repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bundestag.py
yt_dlp/extractor/bundestag.py
import functools import re from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, bug_reports_message, clean_html, format_field, int_or_none, url_or_none, ) from ..utils.traversal import ( find_element, traverse_obj, ) class BundestagIE(InfoExtractor): _VALID_URL = [ r'https?://dbtg\.tv/[cf]vid/(?P<id>\d+)', r'https?://www\.bundestag\.de/mediathek/?\?(?:[^#]+&)?videoid=(?P<id>\d+)', ] _TESTS = [{ 'url': 'https://dbtg.tv/cvid/7605304', 'info_dict': { 'id': '7605304', 'ext': 'mp4', 'title': '145. Sitzung vom 15.12.2023, TOP 24 Barrierefreiheit', 'description': 'md5:321a9dc6bdad201264c0045efc371561', }, }, { 'url': 'https://www.bundestag.de/mediathek?videoid=7602120&url=L21lZGlhdGhla292ZXJsYXk=&mod=mediathek', 'info_dict': { 'id': '7602120', 'ext': 'mp4', 'title': '130. Sitzung vom 18.10.2023, TOP 1 Befragung der Bundesregierung', 'description': 'Befragung der Bundesregierung', }, }, { 'url': 'https://www.bundestag.de/mediathek?videoid=7604941#url=L21lZGlhdGhla292ZXJsYXk/dmlkZW9pZD03NjA0OTQx&mod=mediathek', 'only_matching': True, }, { 'url': 'http://dbtg.tv/fvid/3594346', 'only_matching': True, }] _OVERLAY_URL = 'https://www.bundestag.de/mediathekoverlay' _INSTANCE_FORMAT = 'https://cldf-wzw-od.r53.cdn.tv1.eu/13014bundestagod/_definst_/13014bundestag/ondemand/3777parlamentsfernsehen/archiv/app144277506/145293313/{0}/{0}_playlist.smil/playlist.m3u8' _SHARE_URL = 'https://webtv.bundestag.de/player/macros/_x_s-144277506/shareData.json?contentId=' _SHARE_AUDIO_REGEX = r'/\d+_(?P<codec>\w+)_(?P<bitrate>\d+)kb_(?P<channels>\w+)_\w+_\d+\.(?P<ext>\w+)' _SHARE_VIDEO_REGEX = r'/\d+_(?P<codec>\w+)_(?P<width>\w+)_(?P<height>\w+)_(?P<bitrate>\d+)kb_\w+_\w+_\d+\.(?P<ext>\w+)' def _bt_extract_share_formats(self, video_id): share_data = self._download_json( f'{self._SHARE_URL}{video_id}', video_id, note='Downloading share format JSON') if traverse_obj(share_data, ('status', 'code', {int})) != 1: self.report_warning(format_field( share_data, [('status', 'message', {str})], 'Share API response: %s', default='Unknown Share API Error') + bug_reports_message()) return for name, url in share_data.items(): if not isinstance(name, str) or not url_or_none(url): continue elif name.startswith('audio'): match = re.search(self._SHARE_AUDIO_REGEX, url) yield { 'format_id': name, 'url': url, 'vcodec': 'none', **traverse_obj(match, { 'acodec': 'codec', 'audio_channels': ('channels', {{'mono': 1, 'stereo': 2}.get}), 'abr': ('bitrate', {int_or_none}), 'ext': 'ext', }), } elif name.startswith('download'): match = re.search(self._SHARE_VIDEO_REGEX, url) yield { 'format_id': name, 'url': url, **traverse_obj(match, { 'vcodec': 'codec', 'tbr': ('bitrate', {int_or_none}), 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), 'ext': 'ext', }), } def _real_extract(self, url): video_id = self._match_id(url) formats = [] result = {'id': video_id, 'formats': formats} try: formats.extend(self._extract_m3u8_formats( self._INSTANCE_FORMAT.format(video_id), video_id, m3u8_id='instance')) except ExtractorError as error: if isinstance(error.cause, HTTPError) and error.cause.status == 404: raise ExtractorError('Could not find video id', expected=True) self.report_warning(f'Error extracting hls formats: {error}', video_id) formats.extend(self._bt_extract_share_formats(video_id)) if not formats: self.raise_no_formats('Could not find suitable formats', video_id=video_id) result.update(traverse_obj(self._download_webpage( self._OVERLAY_URL, video_id, query={'videoid': video_id, 'view': 'main'}, note='Downloading metadata overlay', fatal=False, ), { 'title': ( {find_element(tag='h3')}, {functools.partial(re.sub, r'<span[^>]*>[^<]+</span>', '')}, {clean_html}), 'description': ({find_element(tag='p')}, {clean_html}), })) return result
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/soundcloud.py
yt_dlp/extractor/soundcloud.py
import functools import itertools import json import re from .common import InfoExtractor, SearchInfoExtractor from ..networking import HEADRequest from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, float_or_none, int_or_none, join_nonempty, mimetype2ext, parse_qs, str_or_none, try_call, unified_timestamp, update_url_query, url_or_none, urlhandle_detect_ext, ) from ..utils.traversal import traverse_obj class SoundcloudEmbedIE(InfoExtractor): _VALID_URL = r'https?://(?:w|player|p)\.soundcloud\.com/player/?.*?\burl=(?P<id>.+)' _EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?://)?(?:w\.)?soundcloud\.com/player.+?)\1'] _TESTS = [{ # from https://www.soundi.fi/uutiset/ennakkokuuntelussa-timo-kaukolammen-station-to-station-to-station-julkaisua-juhlitaan-tanaan-g-livelabissa/ 'url': 'https://w.soundcloud.com/player/?visual=true&url=https%3A%2F%2Fapi.soundcloud.com%2Fplaylists%2F922213810&show_artwork=true&maxwidth=640&maxheight=960&dnt=1&secret_token=s-ziYey', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'https://news.sophos.com/en-us/2023/08/10/s3-ep147-what-if-you-type-in-your-password-during-a-meeting/', 'info_dict': { 'id': '1588847423', 'ext': 'm4a', 'title': 'S3 Ep147: What if you type in your password during a meeting?', 'artists': ['Naked Security'], 'description': 'md5:6931a0630b920413c8c904407bf4b3b2', 'duration': 942.762, 'genres': ['Technology'], 'license': 'all-rights-reserved', 'repost_count': int, 'tags': 'count:4', 'thumbnail': r're:https?://[ai]1\.sndcdn\.com/.+\.(?:jpg|png)', 'timestamp': 1691624365, 'track': 'S3 Ep147: What if you type in your password during a meeting?', 'upload_date': '20230809', 'uploader': 'Naked Security', 'uploader_id': '61390843', 'uploader_url': 'https://soundcloud.com/sophossecurity', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.guitarplayer.com/lessons/november-2023-guitar-player-lesson-audio', 'info_dict': { 'id': '1695754080', 'title': 'A Tribute to Brian Setzer’s Guitar Mastery', 'album': 'A Tribute to Brian Setzer’s Guitar Mastery', 'album_artists': ['Guitar Player'], 'album_type': 'playlist', 'description': '', 'uploader': 'Guitar Player', 'uploader_id': '489924156', }, 'playlist_mincount': 7, }] def _real_extract(self, url): query = parse_qs(url) api_url = query['url'][0] secret_token = query.get('secret_token') if secret_token: api_url = update_url_query(api_url, {'secret_token': secret_token[0]}) return self.url_result(api_url) class SoundcloudBaseIE(InfoExtractor): _NETRC_MACHINE = 'soundcloud' _API_V2_BASE = 'https://api-v2.soundcloud.com/' _BASE_URL = 'https://soundcloud.com/' _USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36' _API_AUTH_QUERY_TEMPLATE = '?client_id=%s' _API_AUTH_URL_PW = 'https://api-auth.soundcloud.com/web-auth/sign-in/password%s' _API_VERIFY_AUTH_TOKEN = 'https://api-auth.soundcloud.com/connect/session%s' _HEADERS = {} _IMAGE_REPL_RE = r'-[0-9a-z]+\.(?P<ext>jpg|png)' _TAGS_RE = re.compile(r'"([^"]+)"|([^ ]+)') _ARTWORK_MAP = { 'mini': 16, 'tiny': 20, 'small': 32, 'badge': 47, 't67x67': 67, 'large': 100, 't300x300': 300, 'crop': 400, 't500x500': 500, 'original': 0, } _DEFAULT_FORMATS = ['http_aac', 'hls_aac', 'http_opus', 'hls_opus', 'http_mp3', 'hls_mp3'] @functools.cached_property def _is_requested(self): return re.compile(r'|'.join(set( re.escape(pattern).replace(r'\*', r'.*') if pattern != 'default' else '|'.join(map(re.escape, self._DEFAULT_FORMATS)) for pattern in self._configuration_arg('formats', ['default'], ie_key=SoundcloudIE) ))).fullmatch def _store_client_id(self, client_id): self.cache.store('soundcloud', 'client_id', client_id) def _update_client_id(self): webpage = self._download_webpage('https://soundcloud.com/', None) for src in reversed(re.findall(r'<script[^>]+src="([^"]+)"', webpage)): script = self._download_webpage(src, None, fatal=False) if script: client_id = self._search_regex( r'client_id\s*:\s*"([0-9a-zA-Z]{32})"', script, 'client id', default=None) if client_id: self._CLIENT_ID = client_id self._store_client_id(client_id) return raise ExtractorError('Unable to extract client id') def _call_api(self, *args, **kwargs): non_fatal = kwargs.get('fatal') is False if non_fatal: del kwargs['fatal'] query = kwargs.get('query', {}).copy() for _ in range(2): query['client_id'] = self._CLIENT_ID kwargs['query'] = query try: return self._download_json(*args, **kwargs) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status in (401, 403): self._store_client_id(None) self._update_client_id() continue elif non_fatal: self.report_warning(str(e)) return False raise def _initialize_pre_login(self): self._CLIENT_ID = self.cache.load('soundcloud', 'client_id') or 'a3e059563d7fd3372b49b37f00a00bcf' def _verify_oauth_token(self, token): if self._request_webpage( self._API_VERIFY_AUTH_TOKEN % (self._API_AUTH_QUERY_TEMPLATE % self._CLIENT_ID), None, note='Verifying login token...', fatal=False, data=json.dumps({'session': {'access_token': token}}).encode()): self._HEADERS['Authorization'] = f'OAuth {token}' self.report_login() else: self.report_warning('Provided authorization token is invalid. Continuing as guest') def _real_initialize(self): if self._HEADERS: return if token := try_call(lambda: self._get_cookies(self._BASE_URL)['oauth_token'].value): self._verify_oauth_token(token) def _perform_login(self, username, password): if username != 'oauth': raise ExtractorError( 'Login using username and password is not currently supported. ' 'Use "--username oauth --password <oauth_token>" to login using an oauth token, ' f'or else {self._login_hint(method="cookies")}', expected=True) if self._HEADERS: return self._verify_oauth_token(password) r''' def genDevId(): def genNumBlock(): return ''.join([str(random.randrange(10)) for i in range(6)]) return '-'.join([genNumBlock() for i in range(4)]) payload = { 'client_id': self._CLIENT_ID, 'recaptcha_pubkey': 'null', 'recaptcha_response': 'null', 'credentials': { 'identifier': username, 'password': password }, 'signature': self.sign(username, password, self._CLIENT_ID), 'device_id': genDevId(), 'user_agent': self._USER_AGENT } response = self._call_api( self._API_AUTH_URL_PW % (self._API_AUTH_QUERY_TEMPLATE % self._CLIENT_ID), None, note='Verifying login token...', fatal=False, data=json.dumps(payload).encode()) if token := traverse_obj(response, ('session', 'access_token', {str})): self._HEADERS['Authorization'] = f'OAuth {token}' self.report_login() return raise ExtractorError('Unable to get access token, login may have failed', expected=True) ''' # signature generation def sign(self, user, pw, clid): a = 33 i = 1 s = 440123 w = 117 u = 1800000 l = 1042 b = 37 k = 37 c = 5 n = '0763ed7314c69015fd4a0dc16bbf4b90' # _KEY y = '8' # _REV r = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36' # _USER_AGENT e = user # _USERNAME t = clid # _CLIENT_ID d = '-'.join([str(mInt) for mInt in [a, i, s, w, u, l, b, k]]) h = n + y + d + r + e + t + d + n m = 8011470 for f in range(len(h)): m = (m >> 1) + ((1 & m) << 23) m += ord(h[f]) m &= 16777215 # c is not even needed return f'{y}:{d}:{m:x}:{c}' def _extract_info_dict(self, info, full_title=None, secret_token=None, extract_flat=False): track_id = str(info['id']) format_urls = set() formats = [] has_drm = False query = {'client_id': self._CLIENT_ID} if secret_token: query['secret_token'] = secret_token if not extract_flat and info.get('downloadable') and info.get('has_downloads_left'): try: # Do not use _call_api(); HTTP Error codes have different meanings for this request download_data = self._download_json( f'{self._API_V2_BASE}tracks/{track_id}/download', track_id, 'Downloading original download format info JSON', query=query, headers=self._HEADERS) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 401: self.report_warning( 'Original download format is only available ' f'for registered users. {self._login_hint()}') elif isinstance(e.cause, HTTPError) and e.cause.status == 403: self.write_debug('Original download format is not available for this client') else: self.report_warning(e.msg) download_data = None if redirect_url := traverse_obj(download_data, ('redirectUri', {url_or_none})): urlh = self._request_webpage( HEADRequest(redirect_url), track_id, 'Checking original download format availability', 'Original download format is not available', fatal=False) if urlh: format_url = urlh.url format_urls.add(format_url) formats.append({ 'format_id': 'download', 'ext': urlhandle_detect_ext(urlh), 'filesize': int_or_none(urlh.headers.get('Content-Length')), 'url': format_url, 'quality': 10, 'format_note': 'Original', 'vcodec': 'none', }) def invalid_url(url): return not url or url in format_urls # New API for t in traverse_obj(info, ('media', 'transcodings', lambda _, v: url_or_none(v['url']) and v['preset'])): if extract_flat: break format_url = t['url'] preset = t['preset'] preset_base = preset.partition('_')[0] protocol = traverse_obj(t, ('format', 'protocol', {str})) or 'http' if protocol.startswith(('ctr-', 'cbc-')): has_drm = True continue if protocol == 'progressive': protocol = 'http' if protocol != 'hls' and '/hls' in format_url: protocol = 'hls' if protocol == 'encrypted-hls' or '/encrypted-hls' in format_url: protocol = 'hls-aes' short_identifier = f'{protocol}_{preset_base}' if preset_base == 'abr': self.write_debug(f'Skipping broken "{short_identifier}" format') continue if not self._is_requested(short_identifier): self.write_debug(f'"{short_identifier}" is not a requested format, skipping') continue # XXX: if not extract_flat, 429 error must be caught where _extract_info_dict is called stream_url = traverse_obj(self._call_api( format_url, track_id, f'Downloading {short_identifier} format info JSON', query=query, headers=self._HEADERS), ('url', {url_or_none})) if invalid_url(stream_url): continue format_urls.add(stream_url) mime_type = traverse_obj(t, ('format', 'mime_type', {str})) codec = self._search_regex(r'codecs="([^"]+)"', mime_type, 'codec', default=None) ext = { 'mp4a': 'm4a', 'opus': 'opus', }.get(codec[:4] if codec else None) or mimetype2ext(mime_type, default=None) if not ext or ext == 'm3u8': ext = preset_base is_premium = t.get('quality') == 'hq' abr = int_or_none( self._search_regex(r'(\d+)k$', preset, 'abr', default=None) or self._search_regex(r'\.(\d+)\.(?:opus|mp3)[/?]', stream_url, 'abr', default=None) or (256 if (is_premium and 'aac' in preset) else None)) is_preview = (t.get('snipped') or '/preview/' in format_url or re.search(r'/(?:preview|playlist)/0/30/', stream_url)) formats.append({ 'format_id': join_nonempty(protocol, preset, is_preview and 'preview', delim='_'), 'url': stream_url, 'ext': ext, 'acodec': codec, 'vcodec': 'none', 'abr': abr, 'protocol': 'm3u8_native' if protocol in ('hls', 'hls-aes') else 'http', 'container': 'm4a_dash' if ext == 'm4a' else None, 'quality': 5 if is_premium else 0 if (abr and abr >= 160) else -1, 'format_note': 'Premium' if is_premium else None, 'preference': -10 if is_preview else None, }) if not formats: if has_drm: self.report_drm(track_id) if info.get('policy') == 'BLOCK': self.raise_geo_restricted(metadata_available=True) user = info.get('user') or {} thumbnails = [] artwork_url = info.get('artwork_url') thumbnail = artwork_url or user.get('avatar_url') if url_or_none(thumbnail): if mobj := re.search(self._IMAGE_REPL_RE, thumbnail): for image_id, size in self._ARTWORK_MAP.items(): # Soundcloud serves JPEG regardless of URL's ext *except* for "original" thumb ext = mobj.group('ext') if image_id == 'original' else 'jpg' i = { 'id': image_id, 'url': re.sub(self._IMAGE_REPL_RE, f'-{image_id}.{ext}', thumbnail), } if image_id == 'tiny' and not artwork_url: size = 18 elif image_id == 'original': i['preference'] = 10 if size: i.update({ 'width': size, 'height': size, }) thumbnails.append(i) else: thumbnails = [{'url': thumbnail}] def extract_count(key): return int_or_none(info.get(f'{key}_count')) return { 'id': track_id, 'uploader': user.get('username'), 'uploader_id': str_or_none(user.get('id')) or user.get('permalink'), 'uploader_url': user.get('permalink_url'), 'timestamp': unified_timestamp(info.get('created_at')), 'title': info.get('title'), 'track': info.get('title'), 'description': info.get('description'), 'thumbnails': thumbnails, 'duration': float_or_none(info.get('duration'), 1000), 'webpage_url': info.get('permalink_url'), 'license': info.get('license'), 'view_count': extract_count('playback'), 'like_count': extract_count('favoritings') or extract_count('likes'), 'comment_count': extract_count('comment'), 'repost_count': extract_count('reposts'), 'genres': traverse_obj(info, ('genre', {str}, filter, all, filter)), 'tags': traverse_obj(info, ('tag_list', {self._TAGS_RE.findall}, ..., ..., filter)), 'artists': traverse_obj(info, ('publisher_metadata', 'artist', {str}, filter, all, filter)), 'formats': formats if not extract_flat else None, } @classmethod def _resolv_url(cls, url): return cls._API_V2_BASE + 'resolve?url=' + url class SoundcloudIE(SoundcloudBaseIE): """Information extractor for soundcloud.com To access the media, the uid of the song and a stream token must be extracted from the page source and the script must make a request to media.soundcloud.com/crossdomain.xml. Then the media can be grabbed by requesting from an url composed of the stream token and uid """ _VALID_URL = r'''(?x)^(?:https?://)? (?:(?:(?:www\.|m\.)?soundcloud\.com/ (?!stations/track) (?P<uploader>[\w\d-]+)/ (?!(?:tracks|albums|sets(?:/.+?)?|reposts|likes|spotlight|comments)/?(?:$|[?#])) (?P<title>[\w\d-]+) (?:/(?P<token>(?!(?:albums|sets|recommended))[^?]+?))? (?:[?].*)?$) |(?:api(?:-v2)?\.soundcloud\.com/tracks/(?:soundcloud%3Atracks%3A)?(?P<track_id>\d+) (?:/?\?secret_token=(?P<secret_token>[^&]+))?) ) ''' IE_NAME = 'soundcloud' _TESTS = [{ 'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy', 'md5': 'de9bac153e7427a7333b4b0c1b6a18d2', 'info_dict': { 'id': '62986583', 'ext': 'opus', 'title': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1', 'track': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1', 'description': 'md5:7b6074e00887ad79f59b647c8fb6d5ae', 'uploader': 'E.T. ExTerrestrial Music', 'uploader_id': '1571244', 'timestamp': 1349920598, 'upload_date': '20121011', 'duration': 143.216, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, 'thumbnail': r're:https?://[ai]1\.sndcdn\.com/.+\.(?:jpg|png)', 'uploader_url': 'https://soundcloud.com/ethmusic', 'tags': 'count:14', }, }, { # Geo-restricted 'url': 'https://soundcloud.com/the-concept-band/goldrushed-mastered?in=the-concept-band/sets/the-royal-concept-ep', 'info_dict': { 'id': '47127627', 'ext': 'opus', 'title': 'Goldrushed', 'track': 'Goldrushed', 'description': 'md5:c0080b79a3710811d60234f94f391a40', 'uploader': 'The Royal Concept', 'uploader_id': '9615865', 'timestamp': 1337635207, 'upload_date': '20120521', 'duration': 227.103, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, 'uploader_url': 'https://soundcloud.com/the-concept-band', 'thumbnail': r're:https?://[ai]1\.sndcdn\.com/.+\.(?:jpg|png)', 'genres': ['Alternative'], 'artists': ['The Royal Concept'], 'tags': [], }, }, { # private link 'url': 'https://soundcloud.com/jaimemf/youtube-dl-test-video-a-y-baw/s-8Pjrp', 'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604', 'info_dict': { 'id': '123998367', 'ext': 'mp3', 'title': 'Youtube - Dl Test Video \'\' Ä↭', 'track': 'Youtube - Dl Test Video \'\' Ä↭', 'description': 'md5:610b729ee06ac4cedaa28607212948f3', 'uploader': 'jaimeMF', 'uploader_id': '69767071', 'timestamp': 1386604920, 'upload_date': '20131209', 'duration': 9.927, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, 'uploader_url': 'https://soundcloud.com/jaimemf', 'thumbnail': r're:https?://[ai]1\.sndcdn\.com/.+\.(?:jpg|png)', 'genres': ['youtubedl'], 'tags': [], }, }, { # private link (alt format) 'url': 'https://api.soundcloud.com/tracks/123998367?secret_token=s-8Pjrp', 'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604', 'info_dict': { 'id': '123998367', 'ext': 'mp3', 'title': 'Youtube - Dl Test Video \'\' Ä↭', 'track': 'Youtube - Dl Test Video \'\' Ä↭', 'description': 'md5:610b729ee06ac4cedaa28607212948f3', 'uploader': 'jaimeMF', 'uploader_id': '69767071', 'timestamp': 1386604920, 'upload_date': '20131209', 'duration': 9.927, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, 'uploader_url': 'https://soundcloud.com/jaimemf', 'thumbnail': r're:https?://[ai]1\.sndcdn\.com/.+\.(?:jpg|png)', 'genres': ['youtubedl'], 'tags': [], }, }, { # downloadable song 'url': 'https://soundcloud.com/the80m/the-following', 'md5': 'ecb87d7705d5f53e6c02a63760573c75', # wav: '9ffcddb08c87d74fb5808a3c183a1d04' 'info_dict': { 'id': '343609555', 'ext': 'opus', # wav original available with auth 'title': 'The Following', 'track': 'The Following', 'description': '', 'uploader': '80M', 'uploader_id': '312384765', 'uploader_url': 'https://soundcloud.com/the80m', 'upload_date': '20170922', 'timestamp': 1506120436, 'duration': 397.228, 'thumbnail': r're:https?://[ai]1\.sndcdn\.com/.+\.(?:jpg|png)', 'license': 'all-rights-reserved', 'like_count': int, 'comment_count': int, 'repost_count': int, 'view_count': int, 'genres': ['Dance & EDM'], 'artists': ['80M'], 'tags': 'count:4', }, 'expected_warnings': ['Original download format is only available for registered users'], }, { # private link, downloadable format # tags with spaces (e.g. "Uplifting Trance", "Ori Uplift") 'url': 'https://soundcloud.com/oriuplift/uponly-238-no-talking-wav/s-AyZUd', 'md5': '2e1530d0e9986a833a67cb34fc90ece0', # wav: '64a60b16e617d41d0bef032b7f55441e' 'info_dict': { 'id': '340344461', 'ext': 'opus', # wav original available with auth 'title': 'Uplifting Only 238 [No Talking] (incl. Alex Feed Guestmix) (Aug 31, 2017) [wav]', 'track': 'Uplifting Only 238 [No Talking] (incl. Alex Feed Guestmix) (Aug 31, 2017) [wav]', 'description': 'md5:fa20ee0fca76a3d6df8c7e57f3715366', 'uploader': 'Ori Uplift Music', 'uploader_id': '12563093', 'timestamp': 1504206263, 'upload_date': '20170831', 'duration': 7449.096, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, 'thumbnail': r're:https?://[ai]1\.sndcdn\.com/.+\.(?:jpg|png)', 'uploader_url': 'https://soundcloud.com/oriuplift', 'genres': ['Trance'], 'artists': ['Ori Uplift'], 'tags': 'count:6', }, 'expected_warnings': ['Original download format is only available for registered users'], }, { # no album art, use avatar pic for thumbnail 'url': 'https://soundcloud.com/garyvee/sideways-prod-mad-real', 'md5': '59c7872bc44e5d99b7211891664760c2', 'info_dict': { 'id': '309699954', 'ext': 'mp3', 'title': 'Sideways (Prod. Mad Real)', 'track': 'Sideways (Prod. Mad Real)', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'uploader': 'garyvee', 'uploader_id': '2366352', 'timestamp': 1488152409, 'upload_date': '20170226', 'duration': 207.012, 'thumbnail': r're:https?://[ai]1\.sndcdn\.com/.+\.(?:jpg|png)', 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, 'uploader_url': 'https://soundcloud.com/garyvee', 'artists': ['MadReal'], 'tags': [], }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://soundcloud.com/giovannisarani/mezzo-valzer', 'md5': '8227c3473a4264df6b02ad7e5b7527ac', 'info_dict': { 'id': '583011102', 'ext': 'm4a', 'title': 'Mezzo Valzer', 'track': 'Mezzo Valzer', 'description': 'md5:f4d5f39d52e0ccc2b4f665326428901a', 'uploader': 'Giovanni Sarani', 'uploader_id': '3352531', 'timestamp': 1551394171, 'upload_date': '20190228', 'duration': 180.134, 'thumbnail': r're:https?://[ai]1\.sndcdn\.com/.+\.(?:jpg|png)', 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, 'genres': ['Piano'], 'uploader_url': 'https://soundcloud.com/giovannisarani', 'tags': 'count:10', }, 'params': {'skip_download': 'm3u8'}, }, { # .png "original" artwork, 160kbps m4a HLS format 'url': 'https://soundcloud.com/skorxh/audio-dealer', 'info_dict': { 'id': '2011421339', 'ext': 'm4a', 'title': 'audio dealer', 'description': '', 'uploader': '$KORCH', 'uploader_id': '150292288', 'uploader_url': 'https://soundcloud.com/skorxh', 'comment_count': int, 'view_count': int, 'like_count': int, 'repost_count': int, 'duration': 213.469, 'tags': [], 'artists': ['$KORXH'], 'track': 'audio dealer', 'timestamp': 1737143201, 'upload_date': '20250117', 'license': 'all-rights-reserved', 'thumbnail': r're:https?://[ai]1\.sndcdn\.com/.+\.(?:jpg|png)', 'thumbnails': [ {'id': 'mini', 'url': 'https://i1.sndcdn.com/artworks-a1wKGMYNreDLTMrT-fGjRiw-mini.jpg'}, {'id': 'tiny', 'url': 'https://i1.sndcdn.com/artworks-a1wKGMYNreDLTMrT-fGjRiw-tiny.jpg'}, {'id': 'small', 'url': 'https://i1.sndcdn.com/artworks-a1wKGMYNreDLTMrT-fGjRiw-small.jpg'}, {'id': 'badge', 'url': 'https://i1.sndcdn.com/artworks-a1wKGMYNreDLTMrT-fGjRiw-badge.jpg'}, {'id': 't67x67', 'url': 'https://i1.sndcdn.com/artworks-a1wKGMYNreDLTMrT-fGjRiw-t67x67.jpg'}, {'id': 'large', 'url': 'https://i1.sndcdn.com/artworks-a1wKGMYNreDLTMrT-fGjRiw-large.jpg'}, {'id': 't300x300', 'url': 'https://i1.sndcdn.com/artworks-a1wKGMYNreDLTMrT-fGjRiw-t300x300.jpg'}, {'id': 'crop', 'url': 'https://i1.sndcdn.com/artworks-a1wKGMYNreDLTMrT-fGjRiw-crop.jpg'}, {'id': 't500x500', 'url': 'https://i1.sndcdn.com/artworks-a1wKGMYNreDLTMrT-fGjRiw-t500x500.jpg'}, {'id': 'original', 'url': 'https://i1.sndcdn.com/artworks-a1wKGMYNreDLTMrT-fGjRiw-original.png'}, ], }, 'params': {'skip_download': 'm3u8', 'format': 'hls_aac_160k'}, }, { # AAC HQ format available (account with active subscription needed) 'url': 'https://soundcloud.com/wandw/the-chainsmokers-ft-daya-dont-let-me-down-ww-remix-1', 'only_matching': True, }, { # Go+ (account with active subscription needed) 'url': 'https://soundcloud.com/taylorswiftofficial/look-what-you-made-me-do', 'only_matching': True, }, { 'url': 'https://api.soundcloud.com/tracks/soundcloud%3Atracks%3A1083788353', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) track_id = mobj.group('track_id') query = {} if track_id: info_json_url = self._API_V2_BASE + 'tracks/' + track_id full_title = track_id token = mobj.group('secret_token') if token: query['secret_token'] = token else: full_title = resolve_title = '{}/{}'.format(*mobj.group('uploader', 'title')) token = mobj.group('token') if token: resolve_title += f'/{token}' info_json_url = self._resolv_url(self._BASE_URL + resolve_title) info = self._call_api( info_json_url, full_title, 'Downloading info JSON', query=query, headers=self._HEADERS) for retry in self.RetryManager(): try: return self._extract_info_dict(info, full_title, token) except ExtractorError as e: if not isinstance(e.cause, HTTPError) or e.cause.status != 429: raise self.report_warning( 'You have reached the API rate limit, which is ~600 requests per ' '10 minutes. Use the --extractor-retries and --retry-sleep options ' 'to configure an appropriate retry count and wait time', only_once=True) retry.error = e.cause class SoundcloudPlaylistBaseIE(SoundcloudBaseIE): def _extract_set(self, playlist, token=None): playlist_id = str(playlist['id']) tracks = playlist.get('tracks') or [] if not all(t.get('permalink_url') for t in tracks) and token: tracks = self._call_api( self._API_V2_BASE + 'tracks', playlist_id, 'Downloading tracks', query={ 'ids': ','.join([str(t['id']) for t in tracks]), 'playlistId': playlist_id, 'playlistSecretToken': token, }, headers=self._HEADERS) album_info = traverse_obj(playlist, { 'album': ('title', {str}), 'album_artist': ('user', 'username', {str}), 'album_type': ('set_type', {str}, {lambda x: x or 'playlist'}), }) entries = [] for track in tracks: track_id = str_or_none(track.get('id')) url = track.get('permalink_url') if not url: if not track_id: continue url = self._API_V2_BASE + 'tracks/' + track_id if token: url += '?secret_token=' + token entries.append(self.url_result( url, SoundcloudIE.ie_key(), track_id, url_transparent=True, **album_info)) return self.playlist_result( entries, playlist_id,
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/redtube.py
yt_dlp/extractor/redtube.py
from .common import InfoExtractor from ..utils import ( ExtractorError, determine_ext, int_or_none, merge_dicts, str_to_int, unified_strdate, url_or_none, urljoin, ) class RedTubeIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:\w+\.)?redtube\.com(?:\.br)?/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)' _EMBED_REGEX = [r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)'] _TESTS = [{ 'url': 'https://www.redtube.com/38864951', 'md5': '4fba70cbca3aefd25767ab4b523c9878', 'info_dict': { 'id': '38864951', 'ext': 'mp4', 'title': 'Public Sex on the Balcony in Freezing Paris! Amateur Couple LeoLulu', 'description': 'Watch video Public Sex on the Balcony in Freezing Paris! Amateur Couple LeoLulu on Redtube, home of free Blowjob porn videos and Blonde sex movies online. Video length: (10:46) - Uploaded by leolulu - Verified User - Starring Pornstar: Leolulu', 'upload_date': '20210111', 'timestamp': 1610343109, 'duration': 646, 'view_count': int, 'age_limit': 18, 'thumbnail': r're:https://\wi-ph\.rdtcdn\.com/videos/.+/.+\.jpg', }, }, { 'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286', 'only_matching': True, }, { 'url': 'http://it.redtube.com/66418', 'only_matching': True, }, { 'url': 'https://www.redtube.com.br/103224331', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( f'https://www.redtube.com/{video_id}', video_id) ERRORS = ( (('video-deleted-info', '>This video has been removed'), 'has been removed'), (('private_video_text', '>This video is private', '>Send a friend request to its owner to be able to view it'), 'is private'), ) for patterns, message in ERRORS: if any(p in webpage for p in patterns): raise ExtractorError( f'Video {video_id} {message}', expected=True) info = self._search_json_ld(webpage, video_id, default={}) if not info.get('title'): info['title'] = self._html_search_regex( (r'<h(\d)[^>]+class="(?:video_title_text|videoTitle|video_title)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>', r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1'), webpage, 'title', group='title', default=None) or self._og_search_title(webpage) formats = [] sources = self._parse_json( self._search_regex( r'sources\s*:\s*({.+?})', webpage, 'source', default='{}'), video_id, fatal=False) if sources and isinstance(sources, dict): for format_id, format_url in sources.items(): if format_url: formats.append({ 'url': format_url, 'format_id': format_id, 'height': int_or_none(format_id), }) medias = self._parse_json( self._search_regex( r'mediaDefinition["\']?\s*:\s*(\[.+?}\s*\])', webpage, 'media definitions', default='{}'), video_id, fatal=False) for media in medias if isinstance(medias, list) else []: format_url = urljoin('https://www.redtube.com', media.get('videoUrl')) if not format_url: continue format_id = media.get('format') quality = media.get('quality') if format_id == 'hls' or (format_id == 'mp4' and not quality): more_media = self._download_json(format_url, video_id, fatal=False) else: more_media = [media] for media in more_media if isinstance(more_media, list) else []: format_url = url_or_none(media.get('videoUrl')) if not format_url: continue format_id = media.get('format') if format_id == 'hls' or determine_ext(format_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=format_id or 'hls', fatal=False)) continue format_id = media.get('quality') formats.append({ 'url': format_url, 'ext': 'mp4', 'format_id': format_id, 'height': int_or_none(format_id), }) if not formats: video_url = self._html_search_regex( r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL') formats.append({'url': video_url, 'ext': 'mp4'}) thumbnail = self._og_search_thumbnail(webpage) upload_date = unified_strdate(self._search_regex( r'<span[^>]+>(?:ADDED|Published on) ([^<]+)<', webpage, 'upload date', default=None)) duration = int_or_none(self._og_search_property( 'video:duration', webpage, default=None) or self._search_regex( r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None)) view_count = str_to_int(self._search_regex( (r'<div[^>]*>Views</div>\s*<div[^>]*>\s*([\d,.]+)', r'<span[^>]*>VIEWS</span>\s*</td>\s*<td>\s*([\d,.]+)', r'<span[^>]+\bclass=["\']video_view_count[^>]*>\s*([\d,.]+)'), webpage, 'view count', default=None)) # No self-labeling, but they describe themselves as # "Home of Videos Porno" age_limit = 18 return merge_dicts(info, { 'id': video_id, 'ext': 'mp4', 'thumbnail': thumbnail, 'upload_date': upload_date, 'duration': duration, 'view_count': view_count, 'age_limit': age_limit, 'formats': formats, })
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mit.py
yt_dlp/extractor/mit.py
import json import re from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import ( ExtractorError, clean_html, get_element_by_id, ) class TechTVMITIE(InfoExtractor): IE_NAME = 'techtv.mit.edu' _VALID_URL = r'https?://techtv\.mit\.edu/(?:videos|embeds)/(?P<id>\d+)' _TEST = { 'url': 'http://techtv.mit.edu/videos/25418-mit-dna-learning-center-set', 'md5': '00a3a27ee20d44bcaa0933ccec4a2cf7', 'info_dict': { 'id': '25418', 'ext': 'mp4', 'title': 'MIT DNA and Protein Sets', 'description': 'md5:46f5c69ce434f0a97e7c628cc142802d', }, } def _real_extract(self, url): video_id = self._match_id(url) raw_page = self._download_webpage( f'http://techtv.mit.edu/videos/{video_id}', video_id) clean_page = re.compile(r'<!--.*?-->', re.S).sub('', raw_page) base_url = self._proto_relative_url(self._search_regex( r'ipadUrl: \'(.+?cloudfront.net/)', raw_page, 'base url'), 'http:') formats_json = self._search_regex( r'bitrates: (\[.+?\])', raw_page, 'video formats') formats_mit = json.loads(formats_json) formats = [ { 'format_id': f['label'], 'url': base_url + f['url'].partition(':')[2], 'ext': f['url'].partition(':')[0], 'format': f['label'], 'width': f['width'], 'vbr': f['bitrate'], } for f in formats_mit ] title = get_element_by_id('edit-title', clean_page) description = clean_html(get_element_by_id('edit-description', clean_page)) thumbnail = self._search_regex( r'playlist:.*?url: \'(.+?)\'', raw_page, 'thumbnail', flags=re.DOTALL) return { 'id': video_id, 'title': title, 'formats': formats, 'description': description, 'thumbnail': thumbnail, } class OCWMITIE(InfoExtractor): IE_NAME = 'ocw.mit.edu' _VALID_URL = r'https?://ocw\.mit\.edu/courses/(?P<topic>[a-z0-9\-]+)' _BASE_URL = 'http://ocw.mit.edu/' _TESTS = [ { 'url': 'http://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-041-probabilistic-systems-analysis-and-applied-probability-fall-2010/video-lectures/lecture-7-multiple-variables-expectations-independence/', 'info_dict': { 'id': 'EObHWIEKGjA', 'ext': 'webm', 'title': 'Lecture 7: Multiple Discrete Random Variables: Expectations, Conditioning, Independence', 'description': 'In this lecture, the professor discussed multiple random variables, expectations, and binomial distribution.', 'upload_date': '20121109', 'uploader_id': 'MIT', 'uploader': 'MIT OpenCourseWare', }, }, { 'url': 'http://ocw.mit.edu/courses/mathematics/18-01sc-single-variable-calculus-fall-2010/1.-differentiation/part-a-definition-and-basic-rules/session-1-introduction-to-derivatives/', 'info_dict': { 'id': '7K1sB05pE0A', 'ext': 'mp4', 'title': 'Session 1: Introduction to Derivatives', 'upload_date': '20090818', 'uploader_id': 'MIT', 'uploader': 'MIT OpenCourseWare', 'description': 'This section contains lecture video excerpts, lecture notes, an interactive mathlet with supporting documents, and problem solving videos.', }, }, ] def _real_extract(self, url): mobj = self._match_valid_url(url) topic = mobj.group('topic') webpage = self._download_webpage(url, topic) title = self._html_search_meta('WT.cg_s', webpage) description = self._html_search_meta('Description', webpage) # search for call to ocw_embed_chapter_media(container_id, media_url, provider, page_url, image_url, start, stop, captions_file) embed_chapter_media = re.search(r'ocw_embed_chapter_media\((.+?)\)', webpage) if embed_chapter_media: metadata = re.sub(r'[\'"]', '', embed_chapter_media.group(1)) metadata = re.split(r', ?', metadata) yt = metadata[1] else: # search for call to ocw_embed_chapter_media(container_id, media_url, provider, page_url, image_url, captions_file) embed_media = re.search(r'ocw_embed_media\((.+?)\)', webpage) if embed_media: metadata = re.sub(r'[\'"]', '', embed_media.group(1)) metadata = re.split(r', ?', metadata) yt = metadata[1] else: raise ExtractorError('Unable to find embedded YouTube video.') video_id = YoutubeIE.extract_id(yt) return { '_type': 'url_transparent', 'id': video_id, 'title': title, 'description': description, 'url': yt, 'ie_key': 'Youtube', }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/globalplayer.py
yt_dlp/extractor/globalplayer.py
from .common import InfoExtractor from ..utils import ( clean_html, join_nonempty, parse_duration, str_or_none, traverse_obj, unified_strdate, unified_timestamp, urlhandle_detect_ext, ) class GlobalPlayerBaseIE(InfoExtractor): def _get_page_props(self, url, video_id): webpage = self._download_webpage(url, video_id) return self._search_nextjs_data(webpage, video_id)['props']['pageProps'] def _request_ext(self, url, video_id): return urlhandle_detect_ext(self._request_webpage( # Server rejects HEAD requests url, video_id, note='Determining source extension')) def _extract_audio(self, episode, series): return { 'vcodec': 'none', **traverse_obj(series, { 'series': 'title', 'series_id': 'id', 'thumbnail': 'imageUrl', 'uploader': 'itunesAuthor', # podcasts only }), **traverse_obj(episode, { 'id': 'id', 'description': ('description', {clean_html}), 'duration': ('duration', {parse_duration}), 'thumbnail': 'imageUrl', 'url': 'streamUrl', 'timestamp': (('pubDate', 'startDate'), {unified_timestamp}), 'title': 'title', }, get_all=False), } class GlobalPlayerLiveIE(GlobalPlayerBaseIE): _VALID_URL = r'https?://www\.globalplayer\.com/live/(?P<id>\w+)/\w+' _TESTS = [{ 'url': 'https://www.globalplayer.com/live/smoothchill/uk/', 'info_dict': { 'id': '2mx1E', 'ext': 'aac', 'display_id': 'smoothchill-uk', 'title': 're:^Smooth Chill.+$', 'thumbnail': 'https://herald.musicradio.com/media/f296ade8-50c9-4f60-911f-924e96873620.png', 'description': 'Music To Chill To', 'live_status': 'is_live', }, }, { # national station 'url': 'https://www.globalplayer.com/live/heart/uk/', 'info_dict': { 'id': '2mwx4', 'ext': 'aac', 'description': 'turn up the feel good!', 'thumbnail': 'https://herald.musicradio.com/media/49b9e8cb-15bf-4bf2-8c28-a4850cc6b0f3.png', 'live_status': 'is_live', 'title': 're:^Heart UK.+$', 'display_id': 'heart-uk', }, }, { # regional variation 'url': 'https://www.globalplayer.com/live/heart/london/', 'info_dict': { 'id': 'AMqg', 'ext': 'aac', 'thumbnail': 'https://herald.musicradio.com/media/49b9e8cb-15bf-4bf2-8c28-a4850cc6b0f3.png', 'title': 're:^Heart London.+$', 'live_status': 'is_live', 'display_id': 'heart-london', 'description': 'turn up the feel good!', }, }] def _real_extract(self, url): video_id = self._match_id(url) station = self._get_page_props(url, video_id)['station'] stream_url = station['streamUrl'] return { 'id': station['id'], 'display_id': join_nonempty('brandSlug', 'slug', from_dict=station) or station.get('legacyStationPrefix'), 'url': stream_url, 'ext': self._request_ext(stream_url, video_id), 'vcodec': 'none', 'is_live': True, **traverse_obj(station, { 'title': (('name', 'brandName'), {str_or_none}), 'description': 'tagline', 'thumbnail': 'brandLogo', }, get_all=False), } class GlobalPlayerLivePlaylistIE(GlobalPlayerBaseIE): _VALID_URL = r'https?://www\.globalplayer\.com/playlists/(?P<id>\w+)' _TESTS = [{ # "live playlist" 'url': 'https://www.globalplayer.com/playlists/8bLk/', 'info_dict': { 'id': '8bLk', 'ext': 'aac', 'live_status': 'is_live', 'description': 'md5:e10f5e10b01a7f2c14ba815509fbb38d', 'thumbnail': 'https://images.globalplayer.com/images/551379?width=450&signature=oMLPZIoi5_dBSHnTMREW0Xg76mA=', 'title': 're:^Classic FM Hall of Fame.+$', }, }] def _real_extract(self, url): video_id = self._match_id(url) station = self._get_page_props(url, video_id)['playlistData'] stream_url = station['streamUrl'] return { 'id': video_id, 'url': stream_url, 'ext': self._request_ext(stream_url, video_id), 'vcodec': 'none', 'is_live': True, **traverse_obj(station, { 'title': 'title', 'description': 'description', 'thumbnail': 'image', }), } class GlobalPlayerAudioIE(GlobalPlayerBaseIE): _VALID_URL = r'https?://www\.globalplayer\.com/(?:(?P<podcast>podcasts)/|catchup/\w+/\w+/)(?P<id>\w+)/?(?:$|[?#])' _TESTS = [{ # podcast 'url': 'https://www.globalplayer.com/podcasts/42KuaM/', 'playlist_mincount': 5, 'info_dict': { 'id': '42KuaM', 'title': 'Filthy Ritual', 'thumbnail': 'md5:60286e7d12d795bd1bbc9efc6cee643e', 'categories': ['Society & Culture', 'True Crime'], 'uploader': 'Global', 'description': 'md5:da5b918eac9ae319454a10a563afacf9', }, }, { # radio catchup 'url': 'https://www.globalplayer.com/catchup/lbc/uk/46vyD7z/', 'playlist_mincount': 3, 'info_dict': { 'id': '46vyD7z', 'description': 'Nick Ferrari At Breakfast is Leading Britain\'s Conversation.', 'title': 'Nick Ferrari', 'thumbnail': 'md5:4df24d8a226f5b2508efbcc6ae874ebf', }, }] def _real_extract(self, url): video_id, podcast = self._match_valid_url(url).group('id', 'podcast') props = self._get_page_props(url, video_id) series = props['podcastInfo'] if podcast else props['catchupInfo'] return { '_type': 'playlist', 'id': video_id, 'entries': [self._extract_audio(ep, series) for ep in traverse_obj( series, ('episodes', lambda _, v: v['id'] and v['streamUrl']))], 'categories': traverse_obj(series, ('categories', ..., 'name')) or None, **traverse_obj(series, { 'description': 'description', 'thumbnail': 'imageUrl', 'title': 'title', 'uploader': 'itunesAuthor', # podcasts only }), } class GlobalPlayerAudioEpisodeIE(GlobalPlayerBaseIE): _VALID_URL = r'https?://www\.globalplayer\.com/(?:(?P<podcast>podcasts)|catchup/\w+/\w+)/episodes/(?P<id>\w+)/?(?:$|[?#])' _TESTS = [{ # podcast 'url': 'https://www.globalplayer.com/podcasts/episodes/7DrfNnE/', 'info_dict': { 'id': '7DrfNnE', 'ext': 'mp3', 'title': 'Filthy Ritual - Trailer', 'description': 'md5:1f1562fd0f01b4773b590984f94223e0', 'thumbnail': 'md5:60286e7d12d795bd1bbc9efc6cee643e', 'duration': 225.0, 'timestamp': 1681254900, 'series': 'Filthy Ritual', 'series_id': '42KuaM', 'upload_date': '20230411', 'uploader': 'Global', }, }, { # radio catchup 'url': 'https://www.globalplayer.com/catchup/lbc/uk/episodes/2zGq26Vcv1fCWhddC4JAwETXWe/', 'info_dict': { 'id': '2zGq26Vcv1fCWhddC4JAwETXWe', 'ext': 'm4a', 'timestamp': 1682056800, 'series': 'Nick Ferrari', 'thumbnail': 'md5:4df24d8a226f5b2508efbcc6ae874ebf', 'upload_date': '20230421', 'series_id': '46vyD7z', 'description': 'Nick Ferrari At Breakfast is Leading Britain\'s Conversation.', 'title': 'Nick Ferrari', 'duration': 10800.0, }, }] def _real_extract(self, url): video_id, podcast = self._match_valid_url(url).group('id', 'podcast') props = self._get_page_props(url, video_id) episode = props['podcastEpisode'] if podcast else props['catchupEpisode'] return self._extract_audio( episode, traverse_obj(episode, 'podcast', 'show', expected_type=dict) or {}) class GlobalPlayerVideoIE(GlobalPlayerBaseIE): _VALID_URL = r'https?://www\.globalplayer\.com/videos/(?P<id>\w+)' _TESTS = [{ 'url': 'https://www.globalplayer.com/videos/2JsSZ7Gm2uP/', 'info_dict': { 'id': '2JsSZ7Gm2uP', 'ext': 'mp4', 'description': 'md5:6a9f063c67c42f218e42eee7d0298bfd', 'thumbnail': 'md5:d4498af48e15aae4839ce77b97d39550', 'upload_date': '20230420', 'title': 'Treble Malakai Bayoh sings a sublime Handel aria at Classic FM Live', }, }] def _real_extract(self, url): video_id = self._match_id(url) meta = self._get_page_props(url, video_id)['videoData'] return { 'id': video_id, **traverse_obj(meta, { 'url': 'url', 'thumbnail': ('image', 'url'), 'title': 'title', 'upload_date': ('publish_date', {unified_strdate}), 'description': 'description', }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nytimes.py
yt_dlp/extractor/nytimes.py
import json import uuid from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, determine_ext, extract_attributes, float_or_none, get_elements_html_by_class, int_or_none, merge_dicts, mimetype2ext, parse_iso8601, remove_end, remove_start, str_or_none, traverse_obj, url_or_none, ) class NYTimesBaseIE(InfoExtractor): _DNS_NAMESPACE = uuid.UUID('36dd619a-56dc-595b-9e09-37f4152c7b5d') _TOKEN = 'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuNIzKBOFB77aT/jN/FQ+/QVKWq5V1ka1AYmCR9hstz1pGNPH5ajOU9gAqta0T89iPnhjwla+3oec/Z3kGjxbpv6miQXufHFq3u2RC6HyU458cLat5kVPSOQCe3VVB5NRpOlRuwKHqn0txfxnwSSj8mqzstR997d3gKB//RO9zE16y3PoWlDQXkASngNJEWvL19iob/xwAkfEWCjyRILWFY0JYX3AvLMSbq7wsqOCE5srJpo7rRU32zsByhsp1D5W9OYqqwDmflsgCEQy2vqTsJjrJohuNg+urMXNNZ7Y3naMoqttsGDrWVxtPBafKMI8pM2ReNZBbGQsQXRzQNo7+QIDAQAB' _GRAPHQL_API = 'https://samizdat-graphql.nytimes.com/graphql/v2' _GRAPHQL_QUERY = '''query VideoQuery($id: String!) { video(id: $id) { ... on Video { bylines { renderedRepresentation } duration firstPublished promotionalHeadline promotionalMedia { ... on Image { crops { name renditions { name width height url } } } } renditions { type width height url bitrate } summary } } }''' def _call_api(self, media_id): # reference: `id-to-uri.js` video_uuid = uuid.uuid5(self._DNS_NAMESPACE, 'video') media_uuid = uuid.uuid5(video_uuid, media_id) return traverse_obj(self._download_json( self._GRAPHQL_API, media_id, 'Downloading JSON from GraphQL API', data=json.dumps({ 'query': self._GRAPHQL_QUERY, 'variables': {'id': f'nyt://video/{media_uuid}'}, }, separators=(',', ':')).encode(), headers={ 'Content-Type': 'application/json', 'Nyt-App-Type': 'vhs', 'Nyt-App-Version': 'v3.52.21', 'Nyt-Token': self._TOKEN, 'Origin': 'https://nytimes.com', }, fatal=False), ('data', 'video', {dict})) or {} def _extract_thumbnails(self, thumbs): return traverse_obj(thumbs, (lambda _, v: url_or_none(v['url']), { 'url': 'url', 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), }), default=None) def _extract_formats_and_subtitles(self, video_id, content_media_json): urls = [] formats = [] subtitles = {} for video in traverse_obj(content_media_json, ('renditions', ..., {dict})): video_url = video.get('url') format_id = video.get('type') if not video_url or format_id == 'thumbs' or video_url in urls: continue urls.append(video_url) ext = mimetype2ext(video.get('mimetype')) or determine_ext(video_url) if ext == 'm3u8': m3u8_fmts, m3u8_subs = self._extract_m3u8_formats_and_subtitles( video_url, video_id, 'mp4', 'm3u8_native', m3u8_id=format_id or 'hls', fatal=False) formats.extend(m3u8_fmts) self._merge_subtitles(m3u8_subs, target=subtitles) elif ext == 'mpd': continue # all mpd urls give 404 errors else: formats.append({ 'url': video_url, 'format_id': format_id, 'vcodec': video.get('videoencoding') or video.get('video_codec'), 'width': int_or_none(video.get('width')), 'height': int_or_none(video.get('height')), 'filesize': traverse_obj(video, ( ('file_size', 'fileSize'), (None, ('value')), {int_or_none}), get_all=False), 'tbr': int_or_none(video.get('bitrate'), 1000) or None, 'ext': ext, }) return formats, subtitles def _extract_video(self, media_id): data = self._call_api(media_id) formats, subtitles = self._extract_formats_and_subtitles(media_id, data) return { 'id': media_id, 'title': data.get('promotionalHeadline'), 'description': data.get('summary'), 'timestamp': parse_iso8601(data.get('firstPublished')), 'duration': float_or_none(data.get('duration'), scale=1000), 'creator': ', '.join(traverse_obj(data, ( # TODO: change to 'creators' 'bylines', ..., 'renderedRepresentation', {lambda x: remove_start(x, 'By ')}))), 'formats': formats, 'subtitles': subtitles, 'thumbnails': self._extract_thumbnails( traverse_obj(data, ('promotionalMedia', 'crops', ..., 'renditions', ...))), } class NYTimesIE(NYTimesBaseIE): _VALID_URL = r'https?://(?:(?:www\.)?nytimes\.com/video/(?:[^/]+/)+?|graphics8\.nytimes\.com/bcvideo/\d+(?:\.\d+)?/iframe/embed\.html\?videoId=)(?P<id>\d+)' _EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//graphics8\.nytimes\.com/bcvideo/[^/]+/iframe/embed\.html.+?)\1>'] _TESTS = [{ 'url': 'http://www.nytimes.com/video/opinion/100000002847155/verbatim-what-is-a-photocopier.html?playlistId=100000001150263', 'md5': 'a553aa344014e3723d33893d89d4defc', 'info_dict': { 'id': '100000002847155', 'ext': 'mp4', 'title': 'Verbatim: What Is a Photocopier?', 'description': 'md5:93603dada88ddbda9395632fdc5da260', 'timestamp': 1398646132, 'upload_date': '20140428', 'creator': 'Brett Weiner', 'thumbnail': r're:https?://\w+\.nyt.com/images/.+\.jpg', 'duration': 419, }, }, { 'url': 'http://www.nytimes.com/video/travel/100000003550828/36-hours-in-dubai.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) return self._extract_video(video_id) class NYTimesArticleIE(NYTimesBaseIE): _VALID_URL = r'https?://(?:www\.)?nytimes\.com/\d{4}/\d{2}/\d{2}/(?!books|podcasts)[^/?#]+/(?:\w+/)?(?P<id>[^./?#]+)(?:\.html)?' _TESTS = [{ 'url': 'http://www.nytimes.com/2015/04/14/business/owner-of-gravity-payments-a-credit-card-processor-is-setting-a-new-minimum-wage-70000-a-year.html?_r=0', 'md5': '3eb5ddb1d6f86254fe4f233826778737', 'info_dict': { 'id': '100000003628438', 'ext': 'mp4', 'title': 'One Company’s New Minimum Wage: $70,000 a Year', 'description': 'md5:89ba9ab67ca767bb92bf823d1f138433', 'timestamp': 1429047468, 'upload_date': '20150414', 'uploader': 'Matthew Williams', 'creator': 'Patricia Cohen', 'thumbnail': r're:https?://\w+\.nyt.com/images/.*\.jpg', 'duration': 119.0, }, 'skip': 'HTTP Error 500: Internal Server Error', }, { # article with audio and no video 'url': 'https://www.nytimes.com/2023/09/29/health/mosquitoes-genetic-engineering.html', 'md5': '2365b3555c8aa7f4dd34ca735ad02e6a', 'info_dict': { 'id': '100000009110381', 'ext': 'mp3', 'title': 'The Gamble: Can Genetically Modified Mosquitoes End Disease?', 'description': 'md5:9ff8b47acbaf7f3ca8c732f5c815be2e', 'timestamp': 1696008129, 'upload_date': '20230929', 'creators': ['Stephanie Nolen', 'Natalija Gormalova'], 'thumbnail': r're:https?://\w+\.nyt.com/images/.*\.jpg', 'duration': 1322, }, }, { # lede_media_block already has sourceId 'url': 'https://www.nytimes.com/2023/11/29/business/dealbook/kamala-harris-biden-voters.html', 'md5': '3eb5ddb1d6f86254fe4f233826778737', 'info_dict': { 'id': '100000009202270', 'ext': 'mp4', 'title': 'Kamala Harris Defends Biden Policies, but Says ‘More Work’ Needed to Reach Voters', 'description': 'md5:de4212a7e19bb89e4fb14210ca915f1f', 'timestamp': 1701290997, 'upload_date': '20231129', 'uploader': 'By The New York Times', 'creators': ['Katie Rogers'], 'thumbnail': r're:https?://\w+\.nyt.com/images/.*\.jpg', 'duration': 97.631, }, 'params': { 'skip_download': 'm3u8', }, }, { # multiple videos in the same article 'url': 'https://www.nytimes.com/2023/12/02/business/air-traffic-controllers-safety.html', 'info_dict': { 'id': 'air-traffic-controllers-safety', 'title': 'Drunk and Asleep on the Job: Air Traffic Controllers Pushed to the Brink', 'description': 'md5:549e5a5e935bf7d048be53ba3d2c863d', 'upload_date': '20231202', 'creators': ['Emily Steel', 'Sydney Ember'], 'timestamp': 1701511264, }, 'playlist_count': 3, }, { # lede_media_block does not have sourceId 'url': 'https://www.nytimes.com/2025/04/30/well/move/hip-mobility-routine.html', 'info_dict': { 'id': 'hip-mobility-routine', 'title': 'Tight Hips? These Moves Can Help.', 'description': 'Sitting all day is hard on your hips. Try this simple routine for better mobility.', 'creators': ['Alyssa Ages', 'Theodore Tae'], 'timestamp': 1746003629, 'upload_date': '20250430', }, 'playlist_count': 7, }, { 'url': 'https://www.nytimes.com/2023/12/02/business/media/netflix-squid-game-challenge.html', 'only_matching': True, }] def _extract_content_from_block(self, block): details = traverse_obj(block, { 'id': ('sourceId', {str}), 'uploader': ('bylines', ..., 'renderedRepresentation', {str}), 'duration': (None, (('duration', {float_or_none(scale=1000)}), ('length', {int_or_none}))), 'timestamp': ('firstPublished', {parse_iso8601}), 'series': ('podcastSeries', {str}), }, get_all=False) formats, subtitles = self._extract_formats_and_subtitles(details.get('id'), block) # audio articles will have an url and no formats url = traverse_obj(block, ('fileUrl', {url_or_none})) if not formats and url: formats.append({'url': url, 'vcodec': 'none'}) return { **details, 'thumbnails': self._extract_thumbnails(traverse_obj( block, ('promotionalMedia', 'crops', ..., 'renditions', ...))), 'formats': formats, 'subtitles': subtitles, } def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id, impersonate=True) art_json = self._search_json( r'window\.__preloadedData\s*=', webpage, 'media details', page_id, transform_source=lambda x: x.replace('undefined', 'null'))['initialData']['data']['article'] content = art_json['sprinkledBody']['content'] blocks = [] block_filter = lambda k, v: k == 'media' and v['__typename'] in ('Video', 'Audio') if lede_media_block := traverse_obj(content, (..., 'ledeMedia', block_filter, any)): lede_media_block.setdefault('sourceId', art_json.get('sourceId')) blocks.append(lede_media_block) blocks.extend(traverse_obj(content, (..., block_filter))) if not blocks: raise ExtractorError('Unable to extract any media blocks from webpage') common_info = { 'title': remove_end(self._html_extract_title(webpage), ' - The New York Times'), 'description': traverse_obj(art_json, ( 'sprinkledBody', 'content', ..., 'summary', 'content', ..., 'text', {str}), get_all=False) or self._html_search_meta(['og:description', 'twitter:description'], webpage), 'timestamp': traverse_obj(art_json, ('firstPublished', {parse_iso8601})), 'creators': traverse_obj(art_json, ('bylines', ..., 'creators', ..., 'displayName', {str})), 'thumbnails': self._extract_thumbnails(traverse_obj( art_json, ('promotionalMedia', 'assetCrops', ..., 'renditions', ...))), } entries = [] for block in blocks: entries.append(merge_dicts(self._extract_content_from_block(block), common_info)) if len(entries) > 1: return self.playlist_result(entries, page_id, **common_info) return { 'id': page_id, **entries[0], } class NYTimesCookingIE(NYTimesBaseIE): IE_NAME = 'NYTimesCookingGuide' _VALID_URL = r'https?://cooking\.nytimes\.com/guides/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://cooking.nytimes.com/guides/13-how-to-cook-a-turkey', 'info_dict': { 'id': '13-how-to-cook-a-turkey', 'title': 'How to Cook a Turkey', 'description': 'md5:726cfd3f9b161bdf5c279879e8050ca0', }, 'playlist_count': 2, }, { # single video example 'url': 'https://cooking.nytimes.com/guides/50-how-to-make-mac-and-cheese', 'md5': '64415805fe0b8640fce6b0b9def5989a', 'info_dict': { 'id': '100000005835845', 'ext': 'mp4', 'title': 'How to Make Mac and Cheese', 'description': 'md5:b8f2f33ec1fb7523b21367147c9594f1', 'timestamp': 1522950315, 'upload_date': '20180405', 'duration': 9.51, 'creator': 'Alison Roman', 'thumbnail': r're:https?://\w+\.nyt.com/images/.*\.jpg', }, }, { 'url': 'https://cooking.nytimes.com/guides/20-how-to-frost-a-cake', 'md5': '64415805fe0b8640fce6b0b9def5989a', 'info_dict': { 'id': '20-how-to-frost-a-cake', 'title': 'How to Frost a Cake', 'description': 'md5:a31fe3b98a8ce7b98aae097730c269cd', }, 'playlist_count': 8, }] def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) title = self._html_search_meta(['og:title', 'twitter:title'], webpage) description = self._html_search_meta(['og:description', 'twitter:description'], webpage) lead_video_id = self._search_regex( r'data-video-player-id="(\d+)"></div>', webpage, 'lead video') media_ids = traverse_obj( get_elements_html_by_class('video-item', webpage), (..., {extract_attributes}, 'data-video-id')) if media_ids: media_ids.append(lead_video_id) return self.playlist_result( map(self._extract_video, media_ids), page_id, title, description) return { **self._extract_video(lead_video_id), 'title': title, 'description': description, 'creator': self._search_regex( # TODO: change to 'creators' r'<span itemprop="author">([^<]+)</span></p>', webpage, 'author', default=None), } class NYTimesCookingRecipeIE(InfoExtractor): _VALID_URL = r'https?://cooking\.nytimes\.com/recipes/(?P<id>\d+)' _TESTS = [{ 'url': 'https://cooking.nytimes.com/recipes/1017817-cranberry-curd-tart', 'md5': '579e83bbe8e61e9de67f80edba8a78a8', 'info_dict': { 'id': '1017817', 'ext': 'mp4', 'title': 'Cranberry Curd Tart', 'description': 'md5:ad77a3fc321db636256d4343c5742152', 'timestamp': 1447804800, 'upload_date': '20151118', 'creator': 'David Tanis', 'thumbnail': r're:https?://\w+\.nyt.com/images/.*\.jpg', }, }, { 'url': 'https://cooking.nytimes.com/recipes/1024781-neapolitan-checkerboard-cookies', 'md5': '58df35998241dcf0620e99e646331b42', 'info_dict': { 'id': '1024781', 'ext': 'mp4', 'title': 'Neapolitan Checkerboard Cookies', 'description': 'md5:ba12394c585ababea951cb6d2fcc6631', 'timestamp': 1701302400, 'upload_date': '20231130', 'creator': 'Sue Li', 'thumbnail': r're:https?://\w+\.nyt.com/images/.*\.jpg', }, }, { 'url': 'https://cooking.nytimes.com/recipes/1019516-overnight-oats', 'md5': '2fe7965a3adc899913b8e25ada360823', 'info_dict': { 'id': '1019516', 'ext': 'mp4', 'timestamp': 1546387200, 'description': 'md5:8856ce10239161bd2596ac335b9f9bfb', 'upload_date': '20190102', 'title': 'Overnight Oats', 'creator': 'Genevieve Ko', 'thumbnail': r're:https?://\w+\.nyt.com/images/.*\.jpg', }, }] def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) recipe_data = self._search_nextjs_data(webpage, page_id)['props']['pageProps']['recipe'] formats, subtitles = self._extract_m3u8_formats_and_subtitles( recipe_data['videoSrc'], page_id, 'mp4', m3u8_id='hls') return { **traverse_obj(recipe_data, { 'id': ('id', {str_or_none}), 'title': ('title', {str}), 'description': ('topnote', {clean_html}), 'timestamp': ('publishedAt', {int_or_none}), 'creator': ('contentAttribution', 'cardByline', {str}), }), 'formats': formats, 'subtitles': subtitles, 'thumbnails': [{'url': thumb_url} for thumb_url in traverse_obj( recipe_data, ('image', 'crops', 'recipe', ..., {url_or_none}))], }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/trueid.py
yt_dlp/extractor/trueid.py
from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, determine_ext, int_or_none, parse_age_limit, traverse_obj, unified_timestamp, url_or_none, ) class TrueIDIE(InfoExtractor): _VALID_URL = r'https?://(?P<domain>vn\.trueid\.net|trueid\.(?:id|ph))/(?:movie|series/[^/]+)/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://trueid.id/movie/XYNlDOZZJzL6/pengabdi-setan/', 'md5': '2552c7535125885901f1a2a4bcf32ca3', 'info_dict': { 'id': 'XYNlDOZZJzL6', 'ext': 'mp4', 'title': 'Pengabdi Setan', 'display_id': 'pengabdi-setan', 'description': 'md5:b0b41df08601e85e5291496c9bbe52cd', 'timestamp': 1600243511, 'categories': ['Film Indonesia', 'Horror', 'Mystery'], 'release_timestamp': 1593536400, 'release_year': 1982, 'cast': list, 'thumbnail': 'https://cms.dmpcdn.com/movie/2020/09/18/8b6e35c0-f97f-11ea-81fe-c52fc9dd314f_original.png', 'upload_date': '20200916', 'release_date': '20200630', }, 'expected_warnings': ['Video is geo restricted.'], }, { 'url': 'https://trueid.id/series/zZOBVPb62EwR/qXY73rwyl7oj/one-piece-ep-1/', 'md5': '1c6d976049bc3c89a8a25aed2c3fb081', 'info_dict': { 'id': 'qXY73rwyl7oj', 'ext': 'mp4', 'title': 'One Piece Ep. 1', 'display_id': 'one-piece-ep-1', 'description': 'md5:13226d603bd03c4150a1cf5758e842ea', 'timestamp': 1610421085, 'categories': ['Animation & Cartoon', 'Kids & Family', 'Adventure'], 'release_timestamp': 1612112400, 'release_year': 1999, 'age_limit': 7, 'cast': ['Kounosuke Uda', 'Junji Shimizu'], 'thumbnail': 'https://cms.dmpcdn.com/movie/2021/01/13/f84e9e70-5562-11eb-9fe2-dd6c2099a468_original.png', 'upload_date': '20210112', 'release_date': '20210131', }, 'expected_warnings': ['Video is geo restricted.'], }, { 'url': 'https://vn.trueid.net/series/7DNPM7Bpa9wv/pwLgEQ4Xbda2/haikyu-vua-bong-chuyen-phan-1/', 'info_dict': { 'id': 'pwLgEQ4Xbda2', 'ext': 'mp4', 'title': 'Haikyu!!: Vua Bóng Chuyền Phần 1 - Tập 1', 'display_id': 'haikyu-vua-bong-chuyen-phan-1-tap-1', 'description': 'md5:0374dd44d247799169449ee30cca963a', 'timestamp': 1629270901, 'categories': ['Anime', 'Phim Hài', 'Phim Học Đường', 'Phim Thể Thao', 'Shounen'], 'release_timestamp': 1629270720, 'release_year': 2014, 'age_limit': 13, 'thumbnail': 'https://cms.dmpcdn.com/movie/2021/09/28/b6e7ec00-2039-11ec-8436-974544e5841f_webp_original.jpg', 'upload_date': '20210818', 'release_date': '20210818', }, 'expected_warnings': ['Video is geo restricted.'], }, { 'url': 'https://trueid.ph/series/l8rvvAw7Jwv8/l8rvvAw7Jwv8/naruto-trailer/', 'only_matching': True, }] _CUSTOM_RATINGS = { 'PG': 7, } def _real_extract(self, url): domain, video_id = self._match_valid_url(url).group('domain', 'id') webpage = self._download_webpage(url, video_id) initial_data = traverse_obj( self._search_nextjs_data(webpage, video_id, fatal=False), ('props', 'pageProps', 'initialContentData'), default={}) try: stream_data = self._download_json( f'https://{domain}/cmsPostProxy/contents/video/{video_id}/streamer?os=android', video_id, data=b'')['data'] except ExtractorError as e: if not isinstance(e.cause, HTTPError): raise e errmsg = self._parse_json(e.cause.response.read().decode(), video_id)['meta']['message'] if 'country' in errmsg: self.raise_geo_restricted( errmsg, [initial_data['display_country']] if initial_data.get('display_country') else None, True) else: self.raise_no_formats(errmsg, video_id=video_id) if stream_data: stream_url = stream_data['stream']['stream_url'] stream_ext = determine_ext(stream_url) if stream_ext == 'm3u8': formats, subs = self._extract_m3u8_formats_and_subtitles(stream_url, video_id, 'mp4') elif stream_ext == 'mpd': formats, subs = self._extract_mpd_formats_and_subtitles(stream_url, video_id) else: formats = [{'url': stream_url}] thumbnails = [ {'id': thumb_key, 'url': thumb_url} for thumb_key, thumb_url in (initial_data.get('thumb_list') or {}).items() if url_or_none(thumb_url)] return { 'id': video_id, 'title': initial_data.get('title') or self._html_search_regex( [r'Nonton (?P<name>.+) Gratis', r'Xem (?P<name>.+) Miễn phí', r'Watch (?P<name>.+) Free'], webpage, 'title', group='name'), 'display_id': initial_data.get('slug_title'), 'description': initial_data.get('synopsis'), 'timestamp': unified_timestamp(initial_data.get('create_date')), # 'duration': int_or_none(initial_data.get('duration'), invscale=60), # duration field must atleast be accurate to the second 'categories': traverse_obj(initial_data, ('article_category_details', ..., 'name')), 'release_timestamp': unified_timestamp(initial_data.get('publish_date')), 'release_year': int_or_none(initial_data.get('release_year')), 'formats': formats, 'subtitles': subs, 'thumbnails': thumbnails, 'age_limit': self._CUSTOM_RATINGS.get(initial_data.get('rate')) or parse_age_limit(initial_data.get('rate')), 'cast': traverse_obj(initial_data, (('actor', 'director'), ...)), 'view_count': int_or_none(initial_data.get('count_views')), 'like_count': int_or_none(initial_data.get('count_likes')), 'average_rating': int_or_none(initial_data.get('count_ratings')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/teamtreehouse.py
yt_dlp/extractor/teamtreehouse.py
import re from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, determine_ext, float_or_none, get_element_by_class, get_element_by_id, parse_duration, remove_end, urlencode_postdata, urljoin, ) class TeamTreeHouseIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?teamtreehouse\.com/library/(?P<id>[^/]+)' _TESTS = [{ # Course 'url': 'https://teamtreehouse.com/library/introduction-to-user-authentication-in-php', 'info_dict': { 'id': 'introduction-to-user-authentication-in-php', 'title': 'Introduction to User Authentication in PHP', 'description': 'md5:405d7b4287a159b27ddf30ca72b5b053', }, 'playlist_mincount': 24, }, { # WorkShop 'url': 'https://teamtreehouse.com/library/deploying-a-react-app', 'info_dict': { 'id': 'deploying-a-react-app', 'title': 'Deploying a React App', 'description': 'md5:10a82e3ddff18c14ac13581c9b8e5921', }, 'playlist_mincount': 4, }, { # Video 'url': 'https://teamtreehouse.com/library/application-overview-2', 'info_dict': { 'id': 'application-overview-2', 'ext': 'mp4', 'title': 'Application Overview', 'description': 'md5:4b0a234385c27140a4378de5f1e15127', }, 'expected_warnings': ['This is just a preview'], }] _NETRC_MACHINE = 'teamtreehouse' def _perform_login(self, username, password): signin_page = self._download_webpage( 'https://teamtreehouse.com/signin', None, 'Downloading signin page') data = self._form_hidden_inputs('new_user_session', signin_page) data.update({ 'user_session[email]': username, 'user_session[password]': password, }) error_message = get_element_by_class('error-message', self._download_webpage( 'https://teamtreehouse.com/person_session', None, 'Logging in', data=urlencode_postdata(data))) if error_message: raise ExtractorError(clean_html(error_message), expected=True) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = self._html_search_meta(['og:title', 'twitter:title'], webpage) description = self._html_search_meta( ['description', 'og:description', 'twitter:description'], webpage) entries = self._parse_html5_media_entries(url, webpage, display_id) if entries: info = entries[0] for subtitles in info.get('subtitles', {}).values(): for subtitle in subtitles: subtitle['ext'] = determine_ext(subtitle['url'], 'srt') is_preview = 'data-preview="true"' in webpage if is_preview: self.report_warning( 'This is just a preview. You need to be signed in with a Basic account to download the entire video.', display_id) duration = 30 else: duration = float_or_none(self._search_regex( r'data-duration="(\d+)"', webpage, 'duration'), 1000) if not duration: duration = parse_duration(get_element_by_id( 'video-duration', webpage)) info.update({ 'id': display_id, 'title': title, 'description': description, 'duration': duration, }) return info else: def extract_urls(html, extract_info=None): for path in re.findall(r'<a[^>]+href="([^"]+)"', html): page_url = urljoin(url, path) entry = { '_type': 'url_transparent', 'id': self._match_id(page_url), 'url': page_url, 'id_key': self.ie_key(), } if extract_info: entry.update(extract_info) entries.append(entry) workshop_videos = self._search_regex( r'(?s)<ul[^>]+id="workshop-videos"[^>]*>(.+?)</ul>', webpage, 'workshop videos', default=None) if workshop_videos: extract_urls(workshop_videos) else: stages_path = self._search_regex( r'(?s)<div[^>]+id="syllabus-stages"[^>]+data-url="([^"]+)"', webpage, 'stages path') if stages_path: stages_page = self._download_webpage( urljoin(url, stages_path), display_id, 'Downloading stages page') for chapter_number, (chapter, steps_list) in enumerate(re.findall(r'(?s)<h2[^>]*>\s*(.+?)\s*</h2>.+?<ul[^>]*>(.+?)</ul>', stages_page), 1): extract_urls(steps_list, { 'chapter': chapter, 'chapter_number': chapter_number, }) title = remove_end(title, ' Course') return self.playlist_result( entries, display_id, title, description)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vuclip.py
yt_dlp/extractor/vuclip.py
import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, parse_duration, remove_end, ) class VuClipIE(InfoExtractor): _VALID_URL = r'https?://(?:m\.)?vuclip\.com/w\?.*?cid=(?P<id>[0-9]+)' _TEST = { 'url': 'http://m.vuclip.com/w?cid=1129900602&bu=8589892792&frm=w&z=34801&op=0&oc=843169247&section=recommend', 'info_dict': { 'id': '1129900602', 'ext': '3gp', 'title': 'Top 10 TV Convicts', 'duration': 733, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) ad_m = re.search( r'''value="No.*?" onClick="location.href='([^"']+)'"''', webpage) if ad_m: urlr = urllib.parse.urlparse(url) adfree_url = urlr.scheme + '://' + urlr.netloc + ad_m.group(1) webpage = self._download_webpage( adfree_url, video_id, note='Download post-ad page') error_msg = self._html_search_regex( r'<p class="message">(.*?)</p>', webpage, 'error message', default=None) if error_msg: raise ExtractorError( f'{self.IE_NAME} said: {error_msg}', expected=True) # These clowns alternate between two page types video_url = self._search_regex( r'<a[^>]+href="([^"]+)"[^>]*><img[^>]+src="[^"]*/play\.gif', webpage, 'video URL', default=None) if video_url: formats = [{ 'url': video_url, }] else: formats = self._parse_html5_media_entries(url, webpage, video_id)[0]['formats'] title = remove_end(self._html_search_regex( r'<title>(.*?)-\s*Vuclip</title>', webpage, 'title').strip(), ' - Video') duration = parse_duration(self._html_search_regex( r'[(>]([0-9]+:[0-9]+)(?:<span|\))', webpage, 'duration', fatal=False)) return { 'id': video_id, 'formats': formats, 'title': title, 'duration': duration, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nationalgeographic.py
yt_dlp/extractor/nationalgeographic.py
from .common import InfoExtractor from .fox import FOXIE from ..utils import ( smuggle_url, url_basename, ) class NationalGeographicVideoIE(InfoExtractor): IE_NAME = 'natgeo:video' _VALID_URL = r'https?://video\.nationalgeographic\.com/.*?' _TESTS = [ { 'url': 'http://video.nationalgeographic.com/video/news/150210-news-crab-mating-vin?source=featuredvideo', 'md5': '730855d559abbad6b42c2be1fa584917', 'info_dict': { 'id': '0000014b-70a1-dd8c-af7f-f7b559330001', 'ext': 'mp4', 'title': 'Mating Crabs Busted by Sharks', 'description': 'md5:16f25aeffdeba55aaa8ec37e093ad8b3', 'timestamp': 1423523799, 'upload_date': '20150209', 'uploader': 'NAGS', }, 'add_ie': ['ThePlatform'], 'skip': 'Redirects to main page', }, { 'url': 'http://video.nationalgeographic.com/wild/when-sharks-attack/the-real-jaws', 'md5': '6a3105eb448c070503b3105fb9b320b5', 'info_dict': { 'id': 'ngc-I0IauNSWznb_UV008GxSbwY35BZvgi2e', 'ext': 'mp4', 'title': 'The Real Jaws', 'description': 'md5:8d3e09d9d53a85cd397b4b21b2c77be6', 'timestamp': 1433772632, 'upload_date': '20150608', 'uploader': 'NAGS', }, 'add_ie': ['ThePlatform'], 'skip': 'Redirects to main page', }, ] def _real_extract(self, url): name = url_basename(url) webpage = self._download_webpage(url, name) guid = self._search_regex( r'id="(?:videoPlayer|player-container)"[^>]+data-guid="([^"]+)"', webpage, 'guid') return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': smuggle_url( f'http://link.theplatform.com/s/ngs/media/guid/2423130747/{guid}?mbr=true', {'force_smil_url': True}), 'id': guid, } class NationalGeographicTVIE(FOXIE): # XXX: Do not subclass from concrete IE _VALID_URL = r'https?://(?:www\.)?nationalgeographic\.com/tv/watch/(?P<id>[\da-fA-F]+)' _TESTS = [{ 'url': 'https://www.nationalgeographic.com/tv/watch/6a875e6e734b479beda26438c9f21138/', 'info_dict': { 'id': '6a875e6e734b479beda26438c9f21138', 'ext': 'mp4', 'title': 'Why Nat Geo? Valley of the Boom', 'description': 'The lives of prominent figures in the tech world, including their friendships, rivalries, victories and failures.', 'timestamp': 1542662458, 'upload_date': '20181119', 'age_limit': 14, }, 'params': { 'skip_download': True, }, 'skip': 'Content not available', }] _HOME_PAGE_URL = 'https://www.nationalgeographic.com/tv/' _API_KEY = '238bb0a0c2aba67922c48709ce0c06fd'
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mbn.py
yt_dlp/extractor/mbn.py
import re from .common import InfoExtractor from ..utils import ( int_or_none, unified_strdate, url_or_none, ) from ..utils.traversal import traverse_obj class MBNIE(InfoExtractor): IE_DESC = 'mbn.co.kr (매일방송)' _VALID_URL = r'https?://(?:www\.)?mbn\.co\.kr/vod/programContents/preview(?:list)?/\d+/\d+/(?P<id>\d+)' _TESTS = [{ 'url': 'https://mbn.co.kr/vod/programContents/previewlist/861/5433/1276155', 'md5': '85e1694e5b247c04d1386b7e3c90fd76', 'info_dict': { 'id': '1276155', 'ext': 'mp4', 'title': '결국 사로잡힌 권유리, 그녀를 목숨 걸고 구하려는 정일우!', 'duration': 3891, 'release_date': '20210703', 'thumbnail': 'http://img.vod.mbn.co.kr/mbnvod2img/861/2021/07/03/20210703230811_20_861_1276155_360_7_0.jpg', 'series': '보쌈 - 운명을 훔치다', 'episode': 'Episode 19', 'episode_number': 19, }, }, { 'url': 'https://www.mbn.co.kr/vod/programContents/previewlist/835/5294/1084744', 'md5': 'fc65d3aac85e85e0b5056f4ef99cde4a', 'info_dict': { 'id': '1084744', 'ext': 'mp4', 'title': '김정은♥최원영, 제자리를 찾은 위험한 부부! "결혼은 투쟁이면서, 어려운 방식이야.."', 'duration': 93, 'release_date': '20201124', 'thumbnail': 'http://img.vod.mbn.co.kr/mbnvod2img/835/2020/11/25/20201125000221_21_835_1084744_360_7_0.jpg', 'series': '나의 위험한 아내', }, }, { 'url': 'https://www.mbn.co.kr/vod/programContents/preview/952/6088/1054797?next=1', 'md5': 'c711103c72aeac8323a5cf1751f10097', 'info_dict': { 'id': '1054797', 'ext': 'mp4', 'title': '[2차 티저] MBN 주말 미니시리즈 <완벽한 결혼의 정석> l 그녀에게 주어진 두 번째 인생', 'duration': 65, 'release_date': '20231028', 'thumbnail': 'http://img.vod.mbn.co.kr/vod2/952/2023/09/11/20230911130223_22_952_1054797_1080_7.jpg', 'series': '완벽한 결혼의 정석', }, }] def _real_extract(self, url): content_id = self._match_id(url) webpage = self._download_webpage(url, content_id) content_cls_cd = self._search_regex( r'"\?content_cls_cd=(\d+)&', webpage, 'content cls cd', fatal=False) or '20' media_info = self._download_json( 'https://www.mbn.co.kr/player/mbnVodPlayer_2020.mbn', content_id, note='Fetching playback data', query={ 'content_cls_cd': content_cls_cd, 'content_id': content_id, 'relay_type': '1', }) formats = [] for stream_url in traverse_obj(media_info, ('movie_list', ..., 'url', {url_or_none})): stream_url = re.sub(r'/(?:chunk|play)list(?:_pd\d+)?\.m3u8', '/manifest.m3u8', stream_url) final_url = url_or_none(self._download_webpage( f'https://www.mbn.co.kr/player/mbnStreamAuth_new_vod.mbn?vod_url={stream_url}', content_id, note='Fetching authenticated m3u8 url')) formats.extend(self._extract_m3u8_formats(final_url, content_id, fatal=False)) return { 'id': content_id, **traverse_obj(media_info, { 'title': ('movie_title', {str}), 'duration': ('play_sec', {int_or_none}), 'release_date': ('bcast_date', {lambda x: x.replace('.', '')}, {unified_strdate}), 'thumbnail': ('movie_start_Img', {url_or_none}), 'series': ('prog_nm', {str}), 'episode_number': ('ad_contentnumber', {int_or_none}), }), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sohu.py
yt_dlp/extractor/sohu.py
import base64 import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, traverse_obj, try_get, unified_timestamp, url_or_none, urljoin, ) class SohuIE(InfoExtractor): _VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?' # Sohu videos give different MD5 sums on Travis CI and my machine _TESTS = [{ 'note': 'This video is available only in Mainland China', 'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super', 'info_dict': { 'id': '382479172', 'ext': 'mp4', 'title': 'MV:Far East Movement《The Illest》', }, 'skip': 'On available in China', }, { 'url': 'http://tv.sohu.com/20150305/n409385080.shtml', 'info_dict': { 'id': '409385080', 'ext': 'mp4', 'title': '《2015湖南卫视羊年元宵晚会》唐嫣《花好月圆》', }, 'skip': 'no longer available', }, { 'url': 'http://my.tv.sohu.com/us/232799889/78693464.shtml', 'info_dict': { 'id': '78693464', 'ext': 'mp4', 'title': '【爱范品】第31期:MWC见不到的奇葩手机', 'uploader': '爱范儿视频', 'duration': 213, 'timestamp': 1425519600, 'upload_date': '20150305', 'thumbnail': 'http://e3f49eaa46b57.cdn.sohucs.com//group1/M10/83/FA/MTAuMTAuODguODA=/6_14cbccdde5eg104SysCutcloud_78693464_7_0b.jpg', 'tags': ['爱范儿', '爱范品', 'MWC', '手机'], }, }, { 'note': 'Multipart video', 'url': 'http://my.tv.sohu.com/pl/8384802/78910339.shtml', 'info_dict': { 'id': '78910339', 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', 'uploader': '小苍cany', 'duration': 744.0, 'timestamp': 1426269360, 'upload_date': '20150313', 'thumbnail': 'http://e3f49eaa46b57.cdn.sohucs.com//group1/M11/89/57/MTAuMTAuODguODA=/6_14cea022a1dg102SysCutcloud_78910339_8_0b.jpg', 'tags': ['小苍MM', '英雄联盟', '实战秘籍'], }, 'playlist': [{ 'info_dict': { 'id': '78910339_part1', 'ext': 'mp4', 'duration': 294, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', }, }, { 'info_dict': { 'id': '78910339_part2', 'ext': 'mp4', 'duration': 300, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', }, }, { 'info_dict': { 'id': '78910339_part3', 'ext': 'mp4', 'duration': 150, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', }, }], }, { 'note': 'Video with title containing dash', 'url': 'http://my.tv.sohu.com/us/249884221/78932792.shtml', 'info_dict': { 'id': '78932792', 'ext': 'mp4', 'title': 'youtube-dl testing video', 'duration': 360, 'timestamp': 1426348620, 'upload_date': '20150314', 'thumbnail': 'http://e3f49eaa46b57.cdn.sohucs.com//group1/M02/8A/00/MTAuMTAuODguNzk=/6_14cee1be192g102SysCutcloud_78932792_7_7b.jpg', 'tags': [], }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): def _fetch_data(vid_id, mytv=False): if mytv: base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid=' else: base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid=' return self._download_json( base_data_url + vid_id, video_id, f'Downloading JSON data for {vid_id}', headers=self.geo_verification_headers()) mobj = self._match_valid_url(url) video_id = mobj.group('id') mytv = mobj.group('mytv') is not None webpage = self._download_webpage(url, video_id) title = re.sub(r'( - 高清正版在线观看)? - 搜狐视频$', '', self._og_search_title(webpage)) vid = self._html_search_regex( r'var vid ?= ?["\'](\d+)["\']', webpage, 'video path') vid_data = _fetch_data(vid, mytv) if vid_data['play'] != 1: if vid_data.get('status') == 12: raise ExtractorError( f'{self.IE_NAME} said: There\'s something wrong in the video.', expected=True) else: self.raise_geo_restricted( f'{self.IE_NAME} said: The video is only licensed to users in Mainland China.') formats_json = {} for format_id in ('nor', 'high', 'super', 'ori', 'h2644k', 'h2654k'): vid_id = vid_data['data'].get(f'{format_id}Vid') if not vid_id: continue vid_id = str(vid_id) formats_json[format_id] = vid_data if vid == vid_id else _fetch_data(vid_id, mytv) part_count = vid_data['data']['totalBlocks'] playlist = [] for i in range(part_count): formats = [] for format_id, format_data in formats_json.items(): allot = format_data['allot'] data = format_data['data'] clip_url = traverse_obj(data, (('clipsURL', 'mp4PlayUrl'), i, {url_or_none}), get_all=False) if not clip_url: raise ExtractorError(f'Unable to extract url for clip {i}') su = data['su'] video_url = 'newflv.sohu.ccgslb.net' cdn_id = None retries = 0 while 'newflv.sohu.ccgslb.net' in video_url: params = { 'prot': 9, 'file': clip_url, 'new': su[i], 'prod': 'h5n', 'rb': 1, } if cdn_id is not None: params['idc'] = cdn_id download_note = f'Downloading {format_id} video URL part {i + 1} of {part_count}' if retries > 0: download_note += f' (retry #{retries})' part_info = self._parse_json(self._download_webpage( f'http://{allot}/?{urllib.parse.urlencode(params)}', video_id, download_note), video_id) video_url = part_info['url'] cdn_id = part_info.get('nid') retries += 1 if retries > 5: raise ExtractorError('Failed to get video URL') formats.append({ 'url': video_url, 'format_id': format_id, 'filesize': int_or_none( try_get(data, lambda x: x['clipsBytes'][i])), 'width': int_or_none(data.get('width')), 'height': int_or_none(data.get('height')), 'fps': int_or_none(data.get('fps')), }) playlist.append({ 'id': f'{video_id}_part{i + 1}', 'title': title, 'duration': vid_data['data']['clipsDuration'][i], 'formats': formats, }) if len(playlist) == 1: info = playlist[0] info['id'] = video_id else: info = { '_type': 'multi_video', 'entries': playlist, 'id': video_id, 'title': title, 'duration': traverse_obj(vid_data, ('data', 'totalDuration', {float_or_none})), } if mytv: publish_time = unified_timestamp(self._search_regex( r'publishTime:\s*["\'](\d+-\d+-\d+ \d+:\d+)["\']', webpage, 'publish time', fatal=False)) else: publish_time = traverse_obj(vid_data, ('tv_application_time', {unified_timestamp})) return { 'timestamp': publish_time - 8 * 3600 if publish_time else None, **traverse_obj(vid_data, { 'alt_title': ('data', 'subName', {str}), 'uploader': ('wm_data', 'wm_username', {str}), 'thumbnail': ('data', 'coverImg', {url_or_none}), 'tags': ('data', 'tag', {str.split}), }), **info, } class SohuVIE(InfoExtractor): _VALID_URL = r'https?://tv\.sohu\.com/v/(?P<id>[\w=-]+)\.html(?:$|[#?])' _TESTS = [{ 'note': 'Multipart video', 'url': 'https://tv.sohu.com/v/MjAyMzA2MTQvbjYwMTMxNTE5Mi5zaHRtbA==.html', 'info_dict': { 'id': '601315192', 'title': '《淬火丹心》第1集', 'alt_title': '“点天灯”发生事故', 'duration': 2701.692, 'timestamp': 1686758040, 'upload_date': '20230614', 'thumbnail': 'http://photocdn.tv.sohu.com/img/20230614/vrsa_hor_1686738763256_454010551.jpg', }, 'playlist_mincount': 9, 'skip': 'Only available in China', }, { 'url': 'https://tv.sohu.com/v/dXMvMjMyNzk5ODg5Lzc4NjkzNDY0LnNodG1s.html', 'info_dict': { 'id': '78693464', 'ext': 'mp4', 'title': '【爱范品】第31期:MWC见不到的奇葩手机', 'uploader': '爱范儿视频', 'duration': 213, 'timestamp': 1425519600, 'upload_date': '20150305', 'thumbnail': 'http://e3f49eaa46b57.cdn.sohucs.com//group1/M10/83/FA/MTAuMTAuODguODA=/6_14cbccdde5eg104SysCutcloud_78693464_7_0b.jpg', 'tags': ['爱范儿', '爱范品', 'MWC', '手机'], }, }, { 'note': 'Multipart video', 'url': 'https://tv.sohu.com/v/dXMvMjQyNTYyMTYzLzc4OTEwMzM5LnNodG1s.html?src=pl', 'info_dict': { 'id': '78910339', 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', 'uploader': '小苍cany', 'duration': 744.0, 'timestamp': 1426269360, 'upload_date': '20150313', 'thumbnail': 'http://e3f49eaa46b57.cdn.sohucs.com//group1/M11/89/57/MTAuMTAuODguODA=/6_14cea022a1dg102SysCutcloud_78910339_8_0b.jpg', 'tags': ['小苍MM', '英雄联盟', '实战秘籍'], }, 'playlist_mincount': 3, }] def _real_extract(self, url): encoded_id = self._match_id(url) path = base64.urlsafe_b64decode(encoded_id).decode() subdomain = 'tv' if re.match(r'\d+/n\d+\.shtml', path) else 'my.tv' return self.url_result(urljoin(f'http://{subdomain}.sohu.com/', path), SohuIE)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/playwire.py
yt_dlp/extractor/playwire.py
from .common import InfoExtractor from ..utils import ( dict_get, float_or_none, ) class PlaywireIE(InfoExtractor): _VALID_URL = r'https?://(?:config|cdn)\.playwire\.com(?:/v2)?/(?P<publisher_id>\d+)/(?:videos/v2|embed|config)/(?P<id>\d+)' _EMBED_REGEX = [r'<script[^>]+data-config=(["\'])(?P<url>(?:https?:)?//config\.playwire\.com/.+?)\1'] _TESTS = [{ 'url': 'http://config.playwire.com/14907/videos/v2/3353705/player.json', 'md5': 'e6398701e3595888125729eaa2329ed9', 'info_dict': { 'id': '3353705', 'ext': 'mp4', 'title': 'S04_RM_UCL_Rus', 'thumbnail': r're:^https?://.*\.png$', 'duration': 145.94, }, 'skip': 'Invalid URL', }, { # m3u8 in f4m 'url': 'http://config.playwire.com/21772/videos/v2/4840492/zeus.json', 'info_dict': { 'id': '4840492', 'ext': 'mp4', 'title': 'ITV EL SHOW FULL', }, 'skip': 'Invalid URL', }, { # Multiple resolutions while bitrates missing 'url': 'http://cdn.playwire.com/11625/embed/85228.html', 'only_matching': True, }, { 'url': 'http://config.playwire.com/12421/videos/v2/3389892/zeus.json', 'only_matching': True, }, { 'url': 'http://cdn.playwire.com/v2/12342/config/1532636.json', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.cinemablend.com/new/First-Joe-Dirt-2-Trailer-Teaser-Stupid-Greatness-70874.html', 'info_dict': { 'id': '3519514', 'ext': 'mp4', 'title': 'Joe Dirt 2 Beautiful Loser Teaser Trailer', }, 'skip': 'Site no longer embeds Playwire', }] def _real_extract(self, url): mobj = self._match_valid_url(url) publisher_id, video_id = mobj.group('publisher_id'), mobj.group('id') player = self._download_json( f'http://config.playwire.com/{publisher_id}/videos/v2/{video_id}/zeus.json', video_id) title = player['settings']['title'] duration = float_or_none(player.get('duration'), 1000) content = player['content'] thumbnail = content.get('poster') src = content['media']['f4m'] formats = self._extract_f4m_formats(src, video_id, m3u8_id='hls') for a_format in formats: if not dict_get(a_format, ['tbr', 'width', 'height']): a_format['quality'] = 1 if '-hd.' in a_format['url'] else 0 return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/euscreen.py
yt_dlp/extractor/euscreen.py
from .common import InfoExtractor from ..utils import ( js_to_json, parse_duration, ) class EUScreenIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?euscreen\.eu/item.html\?id=(?P<id>[^&?$/]+)' _TESTS = [{ 'url': 'https://euscreen.eu/item.html?id=EUS_0EBCBF356BFC4E12A014023BA41BD98C', 'info_dict': { 'id': 'EUS_0EBCBF356BFC4E12A014023BA41BD98C', 'ext': 'mp4', 'title': "L'effondrement du stade du Heysel", 'alt_title': 'Collapse of the Heysel Stadium', 'duration': 318.0, 'description': 'md5:f0ffffdfce6821139357a1b8359d6152', 'series': 'JA2 DERNIERE', 'episode': '-', 'uploader': 'INA / France', 'thumbnail': 'http://images3.noterik.com/domain/euscreenxl/user/eu_ina/video/EUS_0EBCBF356BFC4E12A014023BA41BD98C/image.jpg', }, 'params': {'skip_download': True}, }] _payload = b'<fsxml><screen><properties><screenId>-1</screenId></properties><capabilities id="1"><properties><platform>Win32</platform><appcodename>Mozilla</appcodename><appname>Netscape</appname><appversion>5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36</appversion><useragent>Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36</useragent><cookiesenabled>true</cookiesenabled><screenwidth>784</screenwidth><screenheight>758</screenheight><orientation>undefined</orientation><smt_browserid>Sat, 07 Oct 2021 08:56:50 GMT</smt_browserid><smt_sessionid>1633769810758</smt_sessionid></properties></capabilities></screen></fsxml>' def _real_extract(self, url): video_id = self._match_id(url) args_for_js_request = self._download_webpage( 'https://euscreen.eu/lou/LouServlet/domain/euscreenxl/html5application/euscreenxlitem', video_id, data=self._payload, query={'actionlist': 'itempage', 'id': video_id}) info_js = self._download_webpage( 'https://euscreen.eu/lou/LouServlet/domain/euscreenxl/html5application/euscreenxlitem', video_id, data=args_for_js_request.replace('screenid', 'screenId').encode()) video_json = self._parse_json( self._search_regex(r'setVideo\(({.+})\)\(\$end\$\)put', info_js, 'Video JSON'), video_id, transform_source=js_to_json) meta_json = self._parse_json( self._search_regex(r'setData\(({.+})\)\(\$end\$\)', info_js, 'Metadata JSON'), video_id, transform_source=js_to_json) formats = [{ 'url': source['src'], } for source in video_json.get('sources', [])] return { 'id': video_id, 'title': meta_json.get('originalTitle'), 'alt_title': meta_json.get('title'), 'duration': parse_duration(meta_json.get('duration')), 'description': '{}\n{}'.format(meta_json.get('summaryOriginal', ''), meta_json.get('summaryEnglish', '')), 'series': meta_json.get('series') or meta_json.get('seriesEnglish'), 'episode': meta_json.get('episodeNumber'), 'uploader': meta_json.get('provider'), 'thumbnail': meta_json.get('screenshot') or video_json.get('screenshot'), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tv24ua.py
yt_dlp/extractor/tv24ua.py
import re from .common import InfoExtractor from ..utils import determine_ext, js_to_json, mimetype2ext, traverse_obj class TV24UAVideoIE(InfoExtractor): _VALID_URL = r'https?://24tv\.ua/news/showPlayer\.do.*?(?:\?|&)objectId=(?P<id>\d+)' _EMBED_REGEX = [rf'<iframe[^>]+?src=["\']?(?P<url>{_VALID_URL})["\']?'] IE_NAME = '24tv.ua' _TESTS = [{ 'url': 'https://24tv.ua/news/showPlayer.do?objectId=2074790&videoUrl=2022/07/2074790&w=640&h=360', 'info_dict': { 'id': '2074790', 'ext': 'mp4', 'title': 'У Харкові ворожа ракета прилетіла в будинок, де слухали пісні про "офіцерів-росіян"', 'thumbnail': r're:^https?://.*\.jpe?g', }, }, { 'url': 'https://24tv.ua/news/showPlayer.do?videoUrl=2022/07/2074790&objectId=2074790&w=640&h=360', 'only_matching': True, }] _WEBPAGE_TESTS = [ { # iframe embed created from share menu. 'url': 'data:text/html,%3Ciframe%20src=%22https://24tv.ua/news/showPlayer.do?objectId=1886193&videoUrl' '=2022/03/1886193&w=640&h=360%22%20width=%22640%22%20height=%22360%22%20frameborder=%220%22' '%20scrolling=%22no%22%3E%3C/iframe%3E', 'info_dict': { 'id': '1886193', 'ext': 'mp4', 'title': 'Росіяни руйнують Бородянку на Київщині та стріляють з літаків по мешканцях: шокуючі фото', 'thumbnail': r're:^https?://.*\.jpe?g', }, }, { 'url': 'https://24tv.ua/vipalyuyut-nashi-mista-sela-dsns-pokazali-motoroshni-naslidki_n1883966', 'info_dict': { 'id': '1883966', 'ext': 'mp4', 'title': 'Випалюють наші міста та села, – моторошні наслідки обстрілів на Чернігівщині', 'thumbnail': r're:^https?://.*\.jpe?g', }, 'params': {'allowed_extractors': ['Generic', '24tv.ua']}, }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) formats = [] subtitles = {} for j in re.findall(r'vPlayConfig\.sources\s*=\s*(?P<json>\[{\s*(?s:.+?)\s*}])', webpage): sources = self._parse_json(j, video_id, fatal=False, ignore_extra=True, transform_source=js_to_json, errnote='') or [] for source in sources: if mimetype2ext(traverse_obj(source, 'type')) == 'm3u8': f, s = self._extract_m3u8_formats_and_subtitles(source['src'], video_id) formats.extend(f) self._merge_subtitles(subtitles, s) else: formats.append({ 'url': source['src'], 'ext': determine_ext(source['src']), }) thumbnail = traverse_obj( self._search_json( r'var\s*vPlayConfig\s*=\s*', webpage, 'thumbnail', video_id, default=None, transform_source=js_to_json), 'poster') return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'thumbnail': thumbnail or self._og_search_thumbnail(webpage), 'title': self._generic_title('', webpage), 'description': self._og_search_description(webpage, default=None), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mir24tv.py
yt_dlp/extractor/mir24tv.py
from .common import InfoExtractor from ..utils import parse_qs, url_or_none from ..utils.traversal import require, traverse_obj class Mir24TvIE(InfoExtractor): IE_NAME = 'mir24.tv' _VALID_URL = r'https?://(?:www\.)?mir24\.tv/news/(?P<id>[0-9]+)/[^/?#]+' _TESTS = [{ 'url': 'https://mir24.tv/news/16635210/dni-kultury-rossii-otkrylis-v-uzbekistane.-na-prazdnichnom-koncerte-vystupili-zvezdy-rossijskoj-estrada', 'info_dict': { 'id': '16635210', 'title': 'Дни культуры России открылись в Узбекистане. На праздничном концерте выступили звезды российской эстрады', 'ext': 'mp4', 'thumbnail': r're:https://images\.mir24\.tv/.+\.jpg', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id, impersonate=True) iframe_url = self._search_regex( r'<iframe\b[^>]+\bsrc=["\'](https?://mir24\.tv/players/[^"\']+)', webpage, 'iframe URL') m3u8_url = traverse_obj(iframe_url, ( {parse_qs}, 'source', -1, {self._proto_relative_url}, {url_or_none}, {require('m3u8 URL')})) formats, subtitles = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id, 'mp4', m3u8_id='hls') return { 'id': video_id, 'title': self._og_search_title(webpage, default=None) or self._html_extract_title(webpage), 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/safari.py
yt_dlp/extractor/safari.py
import json import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, update_url_query, ) class SafariBaseIE(InfoExtractor): _LOGIN_URL = 'https://learning.oreilly.com/accounts/login/' _NETRC_MACHINE = 'safari' _API_BASE = 'https://learning.oreilly.com/api/v1' _API_FORMAT = 'json' LOGGED_IN = False def _perform_login(self, username, password): _, urlh = self._download_webpage_handle( 'https://learning.oreilly.com/accounts/login-check/', None, 'Downloading login page') def is_logged(urlh): return 'learning.oreilly.com/home/' in urlh.url if is_logged(urlh): self.LOGGED_IN = True return redirect_url = urlh.url parsed_url = urllib.parse.urlparse(redirect_url) qs = urllib.parse.parse_qs(parsed_url.query) next_uri = urllib.parse.urljoin( 'https://api.oreilly.com', qs['next'][0]) auth, urlh = self._download_json_handle( 'https://www.oreilly.com/member/auth/login/', None, 'Logging in', data=json.dumps({ 'email': username, 'password': password, 'redirect_uri': next_uri, }).encode(), headers={ 'Content-Type': 'application/json', 'Referer': redirect_url, }, expected_status=400) credentials = auth.get('credentials') if (not auth.get('logged_in') and not auth.get('redirect_uri') and credentials): raise ExtractorError( f'Unable to login: {credentials}', expected=True) # oreilly serves two same instances of the following cookies # in Set-Cookie header and expects first one to be actually set for cookie in ('groot_sessionid', 'orm-jwt', 'orm-rt'): self._apply_first_set_cookie_header(urlh, cookie) _, urlh = self._download_webpage_handle( auth.get('redirect_uri') or next_uri, None, 'Completing login') if is_logged(urlh): self.LOGGED_IN = True return raise ExtractorError('Unable to log in') class SafariIE(SafariBaseIE): IE_NAME = 'safari' IE_DESC = 'safaribooksonline.com online video' _VALID_URL = r'''(?x) https?:// (?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/ (?: library/view/[^/]+/(?P<course_id>[^/]+)/(?P<part>[^/?\#&]+)\.html| videos/[^/]+/[^/]+/(?P<reference_id>[^-]+-[^/?\#&]+) ) ''' _TESTS = [{ 'url': 'https://www.safaribooksonline.com/library/view/hadoop-fundamentals-livelessons/9780133392838/part00.html', 'md5': 'dcc5a425e79f2564148652616af1f2a3', 'info_dict': { 'id': '0_qbqx90ic', 'ext': 'mp4', 'title': 'Introduction to Hadoop Fundamentals LiveLessons', 'timestamp': 1437758058, 'upload_date': '20150724', 'uploader_id': 'stork', }, }, { # non-digits in course id 'url': 'https://www.safaribooksonline.com/library/view/create-a-nodejs/100000006A0210/part00.html', 'only_matching': True, }, { 'url': 'https://www.safaribooksonline.com/library/view/learning-path-red/9780134664057/RHCE_Introduction.html', 'only_matching': True, }, { 'url': 'https://www.safaribooksonline.com/videos/python-programming-language/9780134217314/9780134217314-PYMC_13_00', 'only_matching': True, }, { 'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838/9780133392838-00_SeriesIntro', 'only_matching': True, }, { 'url': 'https://www.oreilly.com/library/view/hadoop-fundamentals-livelessons/9780133392838/00_SeriesIntro.html', 'only_matching': True, }] _PARTNER_ID = '1926081' _UICONF_ID = '29375172' def _real_extract(self, url): mobj = self._match_valid_url(url) reference_id = mobj.group('reference_id') if reference_id: video_id = reference_id partner_id = self._PARTNER_ID ui_id = self._UICONF_ID else: video_id = '{}-{}'.format(mobj.group('course_id'), mobj.group('part')) webpage, urlh = self._download_webpage_handle(url, video_id) mobj = re.match(self._VALID_URL, urlh.url) reference_id = mobj.group('reference_id') if not reference_id: reference_id = self._search_regex( r'data-reference-id=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'kaltura reference id', group='id') partner_id = self._search_regex( r'data-partner-id=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'kaltura widget id', default=self._PARTNER_ID, group='id') ui_id = self._search_regex( r'data-ui-id=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'kaltura uiconf id', default=self._UICONF_ID, group='id') query = { 'wid': f'_{partner_id}', 'uiconf_id': ui_id, 'flashvars[referenceId]': reference_id, } if self.LOGGED_IN: kaltura_session = self._download_json( f'{self._API_BASE}/player/kaltura_session/?reference_id={reference_id}', video_id, 'Downloading kaltura session JSON', 'Unable to download kaltura session JSON', fatal=False, headers={'Accept': 'application/json'}) if kaltura_session: session = kaltura_session.get('session') if session: query['flashvars[ks]'] = session return self.url_result(update_url_query( 'https://cdnapisec.kaltura.com/html5/html5lib/v2.37.1/mwEmbedFrame.php', query), 'Kaltura') class SafariApiIE(SafariBaseIE): IE_NAME = 'safari:api' _VALID_URL = r'https?://(?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/api/v1/book/(?P<course_id>[^/]+)/chapter(?:-content)?/(?P<part>[^/?#&]+)\.html' _TESTS = [{ 'url': 'https://www.safaribooksonline.com/api/v1/book/9780133392838/chapter/part00.html', 'only_matching': True, }, { 'url': 'https://www.safaribooksonline.com/api/v1/book/9780134664057/chapter/RHCE_Introduction.html', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) part = self._download_json( url, '{}/{}'.format(mobj.group('course_id'), mobj.group('part')), 'Downloading part JSON') web_url = part['web_url'] if 'library/view' in web_url: web_url = web_url.replace('library/view', 'videos') natural_keys = part['natural_key'] web_url = f'{web_url.rsplit("/", 1)[0]}/{natural_keys[0]}-{natural_keys[1][:-5]}' return self.url_result(web_url, SafariIE.ie_key()) class SafariCourseIE(SafariBaseIE): IE_NAME = 'safari:course' IE_DESC = 'safaribooksonline.com online courses' _VALID_URL = r'''(?x) https?:// (?: (?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/ (?: library/view/[^/]+| api/v1/book| videos/[^/]+ )| techbus\.safaribooksonline\.com ) /(?P<id>[^/]+) ''' _TESTS = [{ 'url': 'https://www.safaribooksonline.com/library/view/hadoop-fundamentals-livelessons/9780133392838/', 'info_dict': { 'id': '9780133392838', 'title': 'Hadoop Fundamentals LiveLessons', }, 'playlist_count': 22, 'skip': 'Requires safaribooksonline account credentials', }, { 'url': 'https://www.safaribooksonline.com/api/v1/book/9781449396459/?override_format=json', 'only_matching': True, }, { 'url': 'http://techbus.safaribooksonline.com/9780134426365', 'only_matching': True, }, { 'url': 'https://www.safaribooksonline.com/videos/python-programming-language/9780134217314', 'only_matching': True, }, { 'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838', 'only_matching': True, }, { 'url': 'https://www.oreilly.com/library/view/hadoop-fundamentals-livelessons/9780133392838/', 'only_matching': True, }] @classmethod def suitable(cls, url): return (False if SafariIE.suitable(url) or SafariApiIE.suitable(url) else super().suitable(url)) def _real_extract(self, url): course_id = self._match_id(url) course_json = self._download_json( f'{self._API_BASE}/book/{course_id}/?override_format={self._API_FORMAT}', course_id, 'Downloading course JSON') if 'chapters' not in course_json: raise ExtractorError( f'No chapters found for course {course_id}', expected=True) entries = [ self.url_result(chapter, SafariApiIE.ie_key()) for chapter in course_json['chapters']] course_title = course_json['title'] return self.playlist_result(entries, course_id, course_title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/folketinget.py
yt_dlp/extractor/folketinget.py
import urllib.parse from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, parse_iso8601, xpath_text, ) class FolketingetIE(InfoExtractor): IE_DESC = 'Folketinget (ft.dk; Danish parliament)' _VALID_URL = r'https?://(?:www\.)?ft\.dk/webtv/video/[^?#]*?\.(?P<id>[0-9]+)\.aspx' _TEST = { 'url': 'http://www.ft.dk/webtv/video/20141/eru/td.1165642.aspx?as=1#player', 'md5': '6269e8626fa1a891bf5369b386ae996a', 'info_dict': { 'id': '1165642', 'ext': 'mp4', 'title': 'Åbent samråd i Erhvervsudvalget', 'description': 'Åbent samråd med erhvervs- og vækstministeren om regeringens politik på teleområdet', 'view_count': int, 'width': 768, 'height': 432, 'tbr': 928000, 'timestamp': 1416493800, 'upload_date': '20141120', 'duration': 3960, }, 'params': { # rtmp download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage) description = self._html_search_regex( r'(?s)<div class="video-item-agenda"[^>]*>(.*?)<', webpage, 'description', fatal=False) player_params = urllib.parse.parse_qs(self._search_regex( r'<embed src="http://ft\.arkena\.tv/flash/ftplayer\.swf\?([^"]+)"', webpage, 'player params')) xml_url = player_params['xml'][0] doc = self._download_xml(xml_url, video_id) timestamp = parse_iso8601(xpath_text(doc, './/date')) duration = parse_duration(xpath_text(doc, './/duration')) width = int_or_none(xpath_text(doc, './/width')) height = int_or_none(xpath_text(doc, './/height')) view_count = int_or_none(xpath_text(doc, './/views')) formats = [{ 'format_id': n.attrib['bitrate'], 'url': xpath_text(n, './url', fatal=True), 'tbr': int_or_none(n.attrib['bitrate']), } for n in doc.findall('.//streams/stream')] return { 'id': video_id, 'title': title, 'formats': formats, 'description': description, 'timestamp': timestamp, 'width': width, 'height': height, 'duration': duration, 'view_count': view_count, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/spreaker.py
yt_dlp/extractor/spreaker.py
import itertools from .common import InfoExtractor from ..utils import ( filter_dict, float_or_none, int_or_none, parse_qs, str_or_none, try_get, unified_timestamp, url_or_none, ) from ..utils.traversal import traverse_obj def _extract_episode(data, episode_id=None): title = data['title'] download_url = data['download_url'] series = try_get(data, lambda x: x['show']['title'], str) uploader = try_get(data, lambda x: x['author']['fullname'], str) thumbnails = [] for image in ('image_original', 'image_medium', 'image'): image_url = url_or_none(data.get(f'{image}_url')) if image_url: thumbnails.append({'url': image_url}) def stats(key): return int_or_none(try_get( data, (lambda x: x[f'{key}s_count'], lambda x: x['stats'][f'{key}s']))) def duration(key): return float_or_none(data.get(key), scale=1000) return { 'id': str(episode_id or data['episode_id']), 'url': download_url, 'display_id': data.get('permalink'), 'title': title, 'description': data.get('description'), 'timestamp': unified_timestamp(data.get('published_at')), 'uploader': uploader, 'uploader_id': str_or_none(data.get('author_id')), 'creator': uploader, 'duration': duration('duration') or duration('length'), 'view_count': stats('play'), 'like_count': stats('like'), 'comment_count': stats('message'), 'format': 'MPEG Layer 3', 'format_id': 'mp3', 'container': 'mp3', 'ext': 'mp3', 'thumbnails': thumbnails, 'series': series, 'extractor_key': SpreakerIE.ie_key(), } class SpreakerIE(InfoExtractor): _VALID_URL = [ r'https?://api\.spreaker\.com/(?:(?:download/)?episode|v2/episodes)/(?P<id>\d+)', r'https?://(?:www\.)?spreaker\.com/episode/[^#?/]*?(?P<id>\d+)/?(?:[?#]|$)', ] _TESTS = [{ 'url': 'https://api.spreaker.com/episode/12534508', 'info_dict': { 'id': '12534508', 'display_id': 'swm-ep15-how-to-market-your-music-part-2', 'ext': 'mp3', 'title': 'EP:15 | Music Marketing (Likes) - Part 2', 'description': 'md5:0588c43e27be46423e183076fa071177', 'timestamp': 1502250336, 'upload_date': '20170809', 'uploader': 'SWM', 'uploader_id': '9780658', 'duration': 1063.42, 'view_count': int, 'like_count': int, 'comment_count': int, 'series': 'Success With Music | SWM', 'thumbnail': 'https://d3wo5wojvuv7l.cloudfront.net/t_square_limited_160/images.spreaker.com/original/777ce4f96b71b0e1b7c09a5e625210e3.jpg', 'creators': ['SWM'], }, }, { 'url': 'https://api.spreaker.com/download/episode/12534508/swm_ep15_how_to_market_your_music_part_2.mp3', 'only_matching': True, }, { 'url': 'https://api.spreaker.com/v2/episodes/12534508?export=episode_segments', 'only_matching': True, }, { 'note': 'episode', 'url': 'https://www.spreaker.com/episode/grunge-music-origins-the-raw-sound-that-defined-a-generation--60269615', 'info_dict': { 'id': '60269615', 'display_id': 'grunge-music-origins-the-raw-sound-that-', 'ext': 'mp3', 'title': 'Grunge Music Origins - The Raw Sound that Defined a Generation', 'description': str, 'timestamp': 1717468905, 'upload_date': '20240604', 'uploader': 'Katie Brown 2', 'uploader_id': '17733249', 'duration': 818.83, 'view_count': int, 'like_count': int, 'comment_count': int, 'series': '90s Grunge', 'thumbnail': 'https://d3wo5wojvuv7l.cloudfront.net/t_square_limited_160/images.spreaker.com/original/bb0d4178f7cf57cc8786dedbd9c5d969.jpg', 'creators': ['Katie Brown 2'], }, }, { 'url': 'https://www.spreaker.com/episode/60269615', 'only_matching': True, }] def _real_extract(self, url): episode_id = self._match_id(url) data = self._download_json( f'https://api.spreaker.com/v2/episodes/{episode_id}', episode_id, query=traverse_obj(parse_qs(url), {'key': ('key', 0)}))['response']['episode'] return _extract_episode(data, episode_id) class SpreakerShowIE(InfoExtractor): _VALID_URL = [ r'https?://api\.spreaker\.com/show/(?P<id>\d+)', r'https?://(?:www\.)?spreaker\.com/podcast/[\w-]+--(?P<id>[\d]+)', r'https?://(?:www\.)?spreaker\.com/show/(?P<id>\d+)/episodes/feed', ] _TESTS = [{ 'url': 'https://api.spreaker.com/show/4652058', 'info_dict': { 'id': '4652058', }, 'playlist_mincount': 118, }, { 'url': 'https://www.spreaker.com/podcast/health-wealth--5918323', 'info_dict': { 'id': '5918323', }, 'playlist_mincount': 60, }, { 'url': 'https://www.spreaker.com/show/5887186/episodes/feed', 'info_dict': { 'id': '5887186', }, 'playlist_mincount': 290, }] def _entries(self, show_id, key=None): for page_num in itertools.count(1): episodes = self._download_json( f'https://api.spreaker.com/show/{show_id}/episodes', show_id, note=f'Downloading JSON page {page_num}', query=filter_dict({ 'page': page_num, 'max_per_page': 100, 'key': key, })) pager = try_get(episodes, lambda x: x['response']['pager'], dict) if not pager: break results = pager.get('results') if not results or not isinstance(results, list): break for result in results: if not isinstance(result, dict): continue yield _extract_episode(result) if page_num == pager.get('last_page'): break def _real_extract(self, url): show_id = self._match_id(url) key = traverse_obj(parse_qs(url), ('key', 0)) return self.playlist_result(self._entries(show_id, key), playlist_id=show_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mdr.py
yt_dlp/extractor/mdr.py
import urllib.parse from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, join_nonempty, parse_duration, parse_iso8601, url_or_none, xpath_text, ) class MDRIE(InfoExtractor): IE_DESC = 'MDR.DE' _VALID_URL = r'https?://(?:www\.)?mdr\.de/(?:.*)/[a-z-]+-?(?P<id>\d+)(?:_.+?)?\.html' _GEO_COUNTRIES = ['DE'] _TESTS = [{ # MDR regularly deletes its videos 'url': 'http://www.mdr.de/fakt/video189002.html', 'only_matching': True, }, { # audio 'url': 'http://www.mdr.de/kultur/audio1312272_zc-15948bad_zs-86171fdd.html', 'md5': '64c4ee50f0a791deb9479cd7bbe9d2fa', 'info_dict': { 'id': '1312272', 'ext': 'mp3', 'title': 'Feuilleton vom 30. Oktober 2015', 'duration': 250, 'uploader': 'MITTELDEUTSCHER RUNDFUNK', }, 'skip': '404 not found', }, { # audio with alternative playerURL pattern 'url': 'http://www.mdr.de/kultur/videos-und-audios/audio-radio/operation-mindfuck-robert-wilson100.html', 'info_dict': { 'id': '100', 'ext': 'mp4', 'title': 'Feature: Operation Mindfuck - Robert Anton Wilson', 'duration': 3239, 'uploader': 'MITTELDEUTSCHER RUNDFUNK', }, 'skip': '404 not found', }, { 'url': 'http://www.mdr.de/mediathek/mdr-videos/a/video-1334.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data_url = self._search_regex( r'(?:dataURL|playerXml(?:["\'])?)\s*:\s*(["\'])(?P<url>.+?-avCustom\.xml)\1', webpage, 'data url', group='url').replace(r'\/', '/') doc = self._download_xml( urllib.parse.urljoin(url, data_url), video_id) title = xpath_text(doc, ['./title', './broadcast/broadcastName'], 'title', fatal=True) type_ = xpath_text(doc, './type', default=None) formats = [] processed_urls = [] for asset in doc.findall('./assets/asset'): for source in ( 'download', 'progressiveDownload', 'dynamicHttpStreamingRedirector', 'adaptiveHttpStreamingRedirector'): url_el = asset.find(f'./{source}Url') if url_el is None: continue video_url = url_or_none(url_el.text) if not video_url or video_url in processed_urls: continue processed_urls.append(video_url) ext = determine_ext(video_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', quality=1, m3u8_id='HLS', fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, quality=1, f4m_id='HDS', fatal=False)) else: media_type = xpath_text(asset, './mediaType', 'media type', default='MP4') vbr = int_or_none(xpath_text(asset, './bitrateVideo', 'vbr'), 1000) abr = int_or_none(xpath_text(asset, './bitrateAudio', 'abr'), 1000) filesize = int_or_none(xpath_text(asset, './fileSize', 'file size')) f = { 'url': video_url, 'format_id': join_nonempty(media_type, vbr or abr), 'filesize': filesize, 'abr': abr, 'vbr': vbr, } if vbr: f.update({ 'width': int_or_none(xpath_text(asset, './frameWidth', 'width')), 'height': int_or_none(xpath_text(asset, './frameHeight', 'height')), }) if type_ == 'audio': f['vcodec'] = 'none' formats.append(f) description = xpath_text(doc, './broadcast/broadcastDescription', 'description') timestamp = parse_iso8601( xpath_text( doc, [ './broadcast/broadcastDate', './broadcast/broadcastStartDate', './broadcast/broadcastEndDate'], 'timestamp', default=None)) duration = parse_duration(xpath_text(doc, './duration', 'duration')) uploader = xpath_text(doc, './rights', 'uploader') return { 'id': video_id, 'title': title, 'description': description, 'timestamp': timestamp, 'duration': duration, 'uploader': uploader, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/espn.py
yt_dlp/extractor/espn.py
import base64 import json import re import urllib.parse from .adobepass import AdobePassIE from .common import InfoExtractor from ..utils import ( determine_ext, dict_get, int_or_none, traverse_obj, unified_strdate, unified_timestamp, ) class ESPNIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?: (?: (?: (?:(?:\w+\.)+)?espn\.go| (?:www\.)?espn )\.com/ (?: (?: video/(?:clip|iframe/twitter)| ) (?: .*?\?.*?\bid=| /_/id/ )| [^/]+/video/ ) )| (?:www\.)espnfc\.(?:com|us)/(?:video/)?[^/]+/\d+/video/ ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'http://espn.go.com/video/clip?id=10365079', 'info_dict': { 'id': '10365079', 'ext': 'mp4', 'title': '30 for 30 Shorts: Judging Jewell', 'description': 'md5:39370c2e016cb4ecf498ffe75bef7f0f', 'timestamp': 1390936111, 'upload_date': '20140128', 'duration': 1302, 'thumbnail': r're:https://.+\.jpg', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://broadband.espn.go.com/video/clip?id=18910086', 'info_dict': { 'id': '18910086', 'ext': 'mp4', 'title': 'Kyrie spins around defender for two', 'description': 'md5:2b0f5bae9616d26fba8808350f0d2b9b', 'timestamp': 1489539155, 'upload_date': '20170315', }, 'params': { 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest'], }, { 'url': 'http://nonredline.sports.espn.go.com/video/clip?id=19744672', 'only_matching': True, }, { 'url': 'https://cdn.espn.go.com/video/clip/_/id/19771774', 'only_matching': True, }, { 'url': 'http://www.espn.com/video/clip?id=10365079', 'only_matching': True, }, { 'url': 'http://www.espn.com/video/clip/_/id/17989860', 'only_matching': True, }, { 'url': 'https://espn.go.com/video/iframe/twitter/?cms=espn&id=10365079', 'only_matching': True, }, { 'url': 'http://www.espnfc.us/video/espn-fc-tv/86/video/3319154/nashville-unveiled-as-the-newest-club-in-mls', 'only_matching': True, }, { 'url': 'http://www.espnfc.com/english-premier-league/23/video/3324163/premier-league-in-90-seconds-golden-tweets', 'only_matching': True, }, { 'url': 'http://www.espn.com/espnw/video/26066627/arkansas-gibson-completes-hr-cycle-four-innings', 'only_matching': True, }, { 'url': 'http://www.espn.com/watch/player?id=19141491', 'only_matching': True, }, { 'url': 'http://www.espn.com/watch/player?bucketId=257&id=19505875', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) clip = self._download_json( f'http://api-app.espn.com/v1/video/clips/{video_id}', video_id)['videos'][0] title = clip['headline'] format_urls = set() formats = [] def traverse_source(source, base_source_id=None): for src_id, src_item in source.items(): if src_id == 'alert': continue elif isinstance(src_item, str): extract_source(src_item, base_source_id) elif isinstance(src_item, dict): traverse_source( src_item, f'{base_source_id}-{src_id}' if base_source_id else src_id) def extract_source(source_url, source_id=None): if source_url in format_urls: return format_urls.add(source_url) ext = determine_ext(source_url) if ext == 'smil': formats.extend(self._extract_smil_formats( source_url, video_id, fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( source_url, video_id, f4m_id=source_id, fatal=False)) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( source_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=source_id, fatal=False)) else: f = { 'url': source_url, 'format_id': source_id, } mobj = re.search(r'(\d+)p(\d+)_(\d+)k\.', source_url) if mobj: f.update({ 'height': int(mobj.group(1)), 'fps': int(mobj.group(2)), 'tbr': int(mobj.group(3)), }) if source_id == 'mezzanine': f['quality'] = 1 formats.append(f) links = clip.get('links', {}) traverse_source(links.get('source', {})) traverse_source(links.get('mobile', {})) description = clip.get('caption') or clip.get('description') thumbnail = clip.get('thumbnail') duration = int_or_none(clip.get('duration')) timestamp = unified_timestamp(clip.get('originalPublishDate')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, 'formats': formats, } class ESPNArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:espn\.go|(?:www\.)?espn)\.com/(?:[^/]+/)*(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://espn.go.com/nba/recap?gameId=400793786', 'only_matching': True, }, { 'url': 'http://espn.go.com/blog/golden-state-warriors/post/_/id/593/how-warriors-rapidly-regained-a-winning-edge', 'only_matching': True, }, { 'url': 'http://espn.go.com/sports/endurance/story/_/id/12893522/dzhokhar-tsarnaev-sentenced-role-boston-marathon-bombings', 'only_matching': True, }, { 'url': 'http://espn.go.com/nba/playoffs/2015/story/_/id/12887571/john-wall-washington-wizards-no-swelling-left-hand-wrist-game-5-return', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if (ESPNIE.suitable(url) or WatchESPNIE.suitable(url)) else super().suitable(url) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_id = self._search_regex( r'class=(["\']).*?video-play-button.*?\1[^>]+data-id=["\'](?P<id>\d+)', webpage, 'video id', group='id') return self.url_result( f'http://espn.go.com/video/clip?id={video_id}', ESPNIE.ie_key()) class FiveThirtyEightIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?fivethirtyeight\.com/features/(?P<id>[^/?#]+)' _TEST = { 'url': 'http://fivethirtyeight.com/features/how-the-6-8-raiders-can-still-make-the-playoffs/', 'info_dict': { 'id': '56032156', 'ext': 'flv', 'title': 'FiveThirtyEight: The Raiders can still make the playoffs', 'description': 'Neil Paine breaks down the simplest scenario that will put the Raiders into the playoffs at 8-8.', }, 'params': { 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) embed_url = self._search_regex( r'<iframe[^>]+src=["\'](https?://fivethirtyeight\.abcnews\.go\.com/video/embed/\d+/\d+)', webpage, 'embed url') return self.url_result(embed_url, 'AbcNewsVideo') class ESPNCricInfoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?espncricinfo\.com/(?:cricket-)?videos?/[^#$&?/]+-(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.espncricinfo.com/video/finch-chasing-comes-with-risks-despite-world-cup-trend-1289135', 'info_dict': { 'id': '1289135', 'ext': 'mp4', 'title': 'Finch: Chasing comes with \'risks\' despite World Cup trend', 'description': 'md5:ea32373303e25efbb146efdfc8a37829', 'upload_date': '20211113', 'duration': 96, }, 'params': {'skip_download': True}, }, { 'url': 'https://www.espncricinfo.com/cricket-videos/daryl-mitchell-mitchell-santner-is-one-of-the-best-white-ball-spinners-india-vs-new-zealand-1356225', 'info_dict': { 'id': '1356225', 'ext': 'mp4', 'description': '"Santner has done it for a long time for New Zealand - we\'re lucky to have him"', 'upload_date': '20230128', 'title': 'Mitchell: \'Santner is one of the best white-ball spinners at the moment\'', 'duration': 87, }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): video_id = self._match_id(url) data_json = self._download_json( f'https://hs-consumer-api.espncricinfo.com/v1/pages/video/video-details?videoId={video_id}', video_id)['video'] formats, subtitles = [], {} for item in data_json.get('playbacks') or []: if item.get('type') == 'HLS' and item.get('url'): m3u8_frmts, m3u8_subs = self._extract_m3u8_formats_and_subtitles(item['url'], video_id) formats.extend(m3u8_frmts) subtitles = self._merge_subtitles(subtitles, m3u8_subs) elif item.get('type') == 'AUDIO' and item.get('url'): formats.append({ 'url': item['url'], 'vcodec': 'none', }) return { 'id': video_id, 'title': data_json.get('title'), 'description': data_json.get('summary'), 'upload_date': unified_strdate(dict_get(data_json, ('publishedAt', 'recordedAt'))), 'duration': data_json.get('duration'), 'formats': formats, 'subtitles': subtitles, } class WatchESPNIE(AdobePassIE): _VALID_URL = r'https?://(?:www\.)?espn\.com/(?:watch|espnplus)/player/_/id/(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})' _TESTS = [{ 'url': 'https://www.espn.com/watch/player/_/id/11ce417a-6ac9-42b6-8a15-46aeb9ad5710', 'info_dict': { 'id': '11ce417a-6ac9-42b6-8a15-46aeb9ad5710', 'ext': 'mp4', 'title': 'Abilene Chrstn vs. Texas Tech', 'duration': 14166, 'thumbnail': 'https://s.secure.espncdn.com/stitcher/artwork/collections/media/11ce417a-6ac9-42b6-8a15-46aeb9ad5710/16x9.jpg?timestamp=202407252343&showBadge=true&cb=12&package=ESPN_PLUS', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.espn.com/watch/player/_/id/90a2c85d-75e0-4b1e-a878-8e428a3cb2f3', 'info_dict': { 'id': '90a2c85d-75e0-4b1e-a878-8e428a3cb2f3', 'ext': 'mp4', 'title': 'UC Davis vs. California', 'duration': 9547, 'thumbnail': 'https://artwork.api.espn.com/artwork/collections/media/90a2c85d-75e0-4b1e-a878-8e428a3cb2f3/default?width=640&apikey=1ngjw23osgcis1i1vbj96lmfqs', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.espn.com/watch/player/_/id/c4313bbe-95b5-4bb8-b251-ac143ea0fc54', 'info_dict': { 'id': 'c4313bbe-95b5-4bb8-b251-ac143ea0fc54', 'ext': 'mp4', 'title': 'The College Football Show', 'duration': 3639, 'thumbnail': 'https://artwork.api.espn.com/artwork/collections/media/c4313bbe-95b5-4bb8-b251-ac143ea0fc54/default?width=640&apikey=1ngjw23osgcis1i1vbj96lmfqs', }, 'params': { 'skip_download': True, }, }] _API_KEY = 'ZXNwbiZicm93c2VyJjEuMC4w.ptUt7QxsteaRruuPmGZFaJByOoqKvDP2a5YkInHrc7c' _SOFTWARE_STATEMENT = 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiIyZGJmZWM4My03OWE1LTQyNzEtYTVmZC04NTZjYTMxMjRjNjMiLCJuYmYiOjE1NDAyMTI3NjEsImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNTQwMjEyNzYxfQ.yaK3r4AI2uLVvsyN1GLzqzgzRlxMPtasSaiYYBV0wIstqih5tvjTmeoLmi8Xy9Kp_U7Md-bOffwiyK3srHkpUkhhwXLH2x6RPjmS1tPmhaG7-3LBcHTf2ySPvXhVf7cN4ngldawK4tdtLtsw6rF_JoZE2yaC6XbS2F51nXSFEDDnOQWIHEQRG3aYAj-38P2CLGf7g-Yfhbp5cKXeksHHQ90u3eOO4WH0EAjc9oO47h33U8KMEXxJbvjV5J8Va2G2fQSgLDZ013NBI3kQnE313qgqQh2feQILkyCENpB7g-TVBreAjOaH1fU471htSoGGYepcAXv-UDtpgitDiLy7CQ' def _call_bamgrid_api(self, path, video_id, payload=None, headers={}): if 'Authorization' not in headers: headers['Authorization'] = f'Bearer {self._API_KEY}' parse = urllib.parse.urlencode if path == 'token' else json.dumps return self._download_json( f'https://espn.api.edge.bamgrid.com/{path}', video_id, headers=headers, data=parse(payload).encode()) def _real_extract(self, url): video_id = self._match_id(url) cdn_data = self._download_json( f'https://watch-cdn.product.api.espn.com/api/product/v3/watchespn/web/playback/event?id={video_id}', video_id) video_data = cdn_data['playbackState'] # ESPN+ subscription required, through cookies if 'DTC' in video_data.get('sourceId'): cookie = self._get_cookies(url).get('ESPN-ONESITE.WEB-PROD.token') if not cookie: self.raise_login_required(method='cookies') jwt = self._search_regex(r'=([^|]+)\|', cookie.value, 'cookie jwt') id_token = self._download_json( 'https://registerdisney.go.com/jgc/v6/client/ESPN-ONESITE.WEB-PROD/guest/refresh-auth', None, 'Refreshing token', headers={'Content-Type': 'application/json'}, data=json.dumps({ 'refreshToken': json.loads(base64.urlsafe_b64decode(f'{jwt}==='))['refresh_token'], }).encode())['data']['token']['id_token'] assertion = self._call_bamgrid_api( 'devices', video_id, headers={'Content-Type': 'application/json; charset=UTF-8'}, payload={ 'deviceFamily': 'android', 'applicationRuntime': 'android', 'deviceProfile': 'tv', 'attributes': {}, })['assertion'] token = self._call_bamgrid_api( 'token', video_id, payload={ 'subject_token': assertion, 'subject_token_type': 'urn:bamtech:params:oauth:token-type:device', 'platform': 'android', 'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange', })['access_token'] assertion = self._call_bamgrid_api( 'accounts/grant', video_id, payload={'id_token': id_token}, headers={ 'Authorization': token, 'Content-Type': 'application/json; charset=UTF-8', })['assertion'] token = self._call_bamgrid_api( 'token', video_id, payload={ 'subject_token': assertion, 'subject_token_type': 'urn:bamtech:params:oauth:token-type:account', 'platform': 'android', 'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange', })['access_token'] playback = self._download_json( video_data['videoHref'].format(scenario='browser~ssai'), video_id, headers={ 'Accept': 'application/vnd.media-service+json; version=5', 'Authorization': token, }) m3u8_url, headers = playback['stream']['complete'][0]['url'], {'authorization': token} # No login required elif video_data.get('sourceId') == 'ESPN_FREE': asset = self._download_json( f'https://watch.auth.api.espn.com/video/auth/media/{video_id}/asset?apikey=uiqlbgzdwuru14v627vdusswb', video_id) m3u8_url, headers = asset['stream'], {} # TV Provider required else: resource = self._get_mvpd_resource('espn1', video_data['name'], video_id, None) auth = self._extract_mvpd_auth(url, video_id, 'ESPN', resource, self._SOFTWARE_STATEMENT).encode() asset = self._download_json( f'https://watch.auth.api.espn.com/video/auth/media/{video_id}/asset?apikey=uiqlbgzdwuru14v627vdusswb', video_id, data=f'adobeToken={urllib.parse.quote_plus(base64.b64encode(auth))}&drmSupport=HLS'.encode()) m3u8_url, headers = asset['stream'], {} formats, subtitles = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id, 'mp4', m3u8_id='hls') return { 'id': video_id, 'duration': traverse_obj(cdn_data, ('tracking', 'duration')), 'title': video_data.get('name'), 'formats': formats, 'subtitles': subtitles, 'thumbnail': video_data.get('posterHref'), 'http_headers': headers, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dacast.py
yt_dlp/extractor/dacast.py
import functools import hashlib import re import time from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, classproperty, float_or_none, parse_qs, traverse_obj, url_or_none, ) class DacastBaseIE(InfoExtractor): _URL_TYPE = None @classproperty def _VALID_URL(cls): return fr'https?://iframe\.dacast\.com/{cls._URL_TYPE}/(?P<user_id>[\w-]+)/(?P<id>[\w-]+)' @classproperty def _EMBED_REGEX(cls): return [rf'<iframe[^>]+\bsrc=["\'](?P<url>{cls._VALID_URL})'] _API_INFO_URL = 'https://playback.dacast.com/content/info' @classmethod def _get_url_from_id(cls, content_id): user_id, media_id = content_id.split(f'-{cls._URL_TYPE}-') return f'https://iframe.dacast.com/{cls._URL_TYPE}/{user_id}/{media_id}' @classmethod def _extract_embed_urls(cls, url, webpage): yield from super()._extract_embed_urls(url, webpage) for content_id in re.findall( rf'<script[^>]+\bsrc=["\']https://player\.dacast\.com/js/player\.js\?contentId=([\w-]+-{cls._URL_TYPE}-[\w-]+)["\']', webpage): yield cls._get_url_from_id(content_id) class DacastVODIE(DacastBaseIE): _URL_TYPE = 'vod' _TESTS = [{ 'url': 'https://iframe.dacast.com/vod/acae82153ef4d7a7344ae4eaa86af534/1c6143e3-5a06-371d-8695-19b96ea49090', 'info_dict': { 'id': '1c6143e3-5a06-371d-8695-19b96ea49090', 'ext': 'mp4', 'uploader_id': 'acae82153ef4d7a7344ae4eaa86af534', 'title': '2_4||Adnexal mass characterisation: O-RADS US and MRI||N. Bharwani, London/UK', 'thumbnail': 'https://universe-files.dacast.com/26137208-5858-65c1-5e9a-9d6b6bd2b6c2', }, 'params': {'skip_download': 'm3u8'}, }, { # /uspaes/ in hls_url 'url': 'https://iframe.dacast.com/vod/f9823fc6-faba-b98f-0d00-4a7b50a58c5b/348c5c84-b6af-4859-bb9d-1d01009c795b', 'info_dict': { 'id': '348c5c84-b6af-4859-bb9d-1d01009c795b', 'ext': 'mp4', 'title': 'pl1-edyta-rubas-211124.mp4', 'uploader_id': 'f9823fc6-faba-b98f-0d00-4a7b50a58c5b', 'thumbnail': 'https://universe-files.dacast.com/4d0bd042-a536-752d-fc34-ad2fa44bbcbb.png', }, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.dacast.com/support/knowledgebase/how-can-i-embed-a-video-on-my-website/', 'info_dict': { 'id': 'b6674869-f08a-23c5-1d7b-81f5309e1a90', 'ext': 'mp4', 'title': '4-HowToEmbedVideo.mp4', 'uploader_id': '3b67c4a9-3886-4eb1-d0eb-39b23b14bef3', 'thumbnail': 'https://universe-files.dacast.com/d26ab48f-a52a-8783-c42e-a90290ba06b6.png', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://gist.githubusercontent.com/bashonly/4ad249ef2910346fbdf3809b220f11ee/raw/87349778d4af1a80b1fcc3beb9c88108de5858f5/dacast_embeds.html', 'info_dict': { 'id': 'e7df418e-a83b-7a7f-7b5e-1a667981e8fa', 'ext': 'mp4', 'title': 'Evening Service 2-5-23', 'uploader_id': '943bb1ab3c03695ba85330d92d6d226e', 'thumbnail': 'https://universe-files.dacast.com/337472b3-e92c-2ea4-7eb7-5700da477f67', }, 'params': {'skip_download': 'm3u8'}, }] @functools.cached_property def _usp_signing_secret(self): player_js = self._download_webpage( 'https://player.dacast.com/js/player.js', None, 'Downloading player JS') # Rotates every so often, but hardcode a fallback in case of JS change/breakage before rotation return self._search_regex( r'\bUSP_SIGNING_SECRET\s*=\s*(["\'])(?P<secret>(?:(?!\1).)+)', player_js, 'usp signing secret', group='secret', fatal=False) or 'hGDtqMKYVeFdofrAfFmBcrsakaZELajI' def _real_extract(self, url): user_id, video_id = self._match_valid_url(url).group('user_id', 'id') query = { 'contentId': f'{user_id}-vod-{video_id}', 'provider': 'universe', **traverse_obj(url, ({parse_qs}, 'uss_token', {'signedKey': -1})), } info = self._download_json(self._API_INFO_URL, video_id, query=query, fatal=False) access = self._download_json( 'https://playback.dacast.com/content/access', video_id, note='Downloading access JSON', query=query, expected_status=403) error = access.get('error') if error in ('Broadcaster has been blocked', 'Content is offline'): raise ExtractorError(error, expected=True) elif error: raise ExtractorError(f'Dacast API says "{error}"') hls_url = access['hls'] hls_aes = {} if 'DRM_EXT' in hls_url: self.report_drm(video_id) elif '/uspaes/' in hls_url: # Ref: https://player.dacast.com/js/player.js ts = int(time.time()) signature = hashlib.sha1( f'{10413792000 - ts}{ts}{self._usp_signing_secret}'.encode()).digest().hex() hls_aes['uri'] = f'https://keys.dacast.com/uspaes/{video_id}.key?s={signature}&ts={ts}' for retry in self.RetryManager(): try: formats = self._extract_m3u8_formats(hls_url, video_id, 'mp4', m3u8_id='hls') except ExtractorError as e: # CDN will randomly respond with 403 if isinstance(e.cause, HTTPError) and e.cause.status == 403: retry.error = e continue raise return { 'id': video_id, 'uploader_id': user_id, 'formats': formats, 'hls_aes': hls_aes or None, **traverse_obj(info, ('contentInfo', { 'title': 'title', 'duration': ('duration', {float_or_none}), 'thumbnail': ('thumbnailUrl', {url_or_none}), })), } class DacastPlaylistIE(DacastBaseIE): _URL_TYPE = 'playlist' _TESTS = [{ 'url': 'https://iframe.dacast.com/playlist/943bb1ab3c03695ba85330d92d6d226e/b632eb053cac17a9c9a02bcfc827f2d8', 'playlist_mincount': 28, 'info_dict': { 'id': 'b632eb053cac17a9c9a02bcfc827f2d8', 'title': 'Archive Sermons', }, }] _WEBPAGE_TESTS = [{ 'url': 'https://gist.githubusercontent.com/bashonly/7efb606f49f3c6e07ea0327de5a661d1/raw/05a16eac830245ea301fb0a585023bec71e6093c/dacast_playlist_embed.html', 'playlist_mincount': 28, 'info_dict': { 'id': 'b632eb053cac17a9c9a02bcfc827f2d8', 'title': 'Archive Sermons', }, }] def _real_extract(self, url): user_id, playlist_id = self._match_valid_url(url).group('user_id', 'id') info = self._download_json( self._API_INFO_URL, playlist_id, note='Downloading playlist JSON', query={ 'contentId': f'{user_id}-playlist-{playlist_id}', 'provider': 'universe', })['contentInfo'] def entries(info): for video in traverse_obj(info, ('features', 'playlist', 'contents', lambda _, v: v['id'])): yield self.url_result( DacastVODIE._get_url_from_id(video['id']), DacastVODIE, video['id'], video.get('title')) return self.playlist_result(entries(info), playlist_id, info.get('title'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/jamendo.py
yt_dlp/extractor/jamendo.py
import hashlib import random from .common import InfoExtractor from ..networking import HEADRequest from ..utils import ( clean_html, int_or_none, try_get, urlhandle_detect_ext, ) class JamendoIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?: licensing\.jamendo\.com/[^/]+| (?:www\.)?jamendo\.com ) /track/(?P<id>[0-9]+)(?:/(?P<display_id>[^/?#&]+))? ''' _TESTS = [{ 'url': 'https://www.jamendo.com/track/196219/stories-from-emona-i', 'md5': '6e9e82ed6db98678f171c25a8ed09ffd', 'info_dict': { 'id': '196219', 'display_id': 'stories-from-emona-i', 'ext': 'flac', # 'title': 'Maya Filipič - Stories from Emona I', 'title': 'Stories from Emona I', 'artists': ['Maya Filipič'], 'album': 'Between two worlds', 'track': 'Stories from Emona I', 'duration': 210, 'thumbnail': 'https://usercontent.jamendo.com?type=album&id=29279&width=300&trackid=196219', 'timestamp': 1217438117, 'upload_date': '20080730', 'license': 'by-nc-nd', 'view_count': int, 'like_count': int, 'average_rating': int, 'tags': ['piano', 'peaceful', 'newage', 'strings', 'upbeat'], }, }, { 'url': 'https://licensing.jamendo.com/en/track/1496667/energetic-rock', 'only_matching': True, }] def _call_api(self, resource, resource_id, fatal=True): path = f'/api/{resource}s' rand = str(random.random()) return self._download_json( 'https://www.jamendo.com' + path, resource_id, fatal=fatal, query={ 'id[]': resource_id, }, headers={ 'X-Jam-Call': f'${hashlib.sha1((path + rand).encode()).hexdigest()}*{rand}~', })[0] def _real_extract(self, url): track_id, display_id = self._match_valid_url(url).groups() # webpage = self._download_webpage( # 'https://www.jamendo.com/track/' + track_id, track_id) # models = self._parse_json(self._html_search_regex( # r"data-bundled-models='([^']+)", # webpage, 'bundled models'), track_id) # track = models['track']['models'][0] track = self._call_api('track', track_id) title = track_name = track['name'] # get_model = lambda x: try_get(models, lambda y: y[x]['models'][0], dict) or {} # artist = get_model('artist') # artist_name = artist.get('name') # if artist_name: # title = '%s - %s' % (artist_name, title) # album = get_model('album') artist = self._call_api('artist', track.get('artistId'), fatal=False) album = self._call_api('album', track.get('albumId'), fatal=False) formats = [{ 'url': f'https://{sub_domain}.jamendo.com/?trackid={track_id}&format={format_id}&from=app-97dab294', 'format_id': format_id, 'ext': ext, 'quality': quality, } for quality, (format_id, sub_domain, ext) in enumerate(( ('mp31', 'mp3l', 'mp3'), ('mp32', 'mp3d', 'mp3'), ('ogg1', 'ogg', 'ogg'), ('flac', 'flac', 'flac'), ))] urls = [] thumbnails = [] for covers in (track.get('cover') or {}).values(): for cover_id, cover_url in covers.items(): if not cover_url or cover_url in urls: continue urls.append(cover_url) urlh = self._request_webpage( HEADRequest(cover_url), track_id, 'Checking thumbnail extension', errnote=False, fatal=False) if not urlh: continue size = int_or_none(cover_id.lstrip('size')) thumbnails.append({ 'id': cover_id, 'ext': urlhandle_detect_ext(urlh, default='jpg'), 'url': cover_url, 'width': size, 'height': size, }) tags = [] for tag in (track.get('tags') or []): tag_name = tag.get('name') if not tag_name: continue tags.append(tag_name) stats = track.get('stats') or {} video_license = track.get('licenseCC') or [] return { 'id': track_id, 'display_id': display_id, 'thumbnails': thumbnails, 'title': title, 'description': track.get('description'), 'duration': int_or_none(track.get('duration')), 'artist': artist.get('name'), 'track': track_name, 'album': album.get('name'), 'formats': formats, 'license': '-'.join(video_license) if video_license else None, 'timestamp': int_or_none(track.get('dateCreated')), 'view_count': int_or_none(stats.get('listenedAll')), 'like_count': int_or_none(stats.get('favorited')), 'average_rating': int_or_none(stats.get('averageNote')), 'tags': tags, } class JamendoAlbumIE(JamendoIE): # XXX: Do not subclass from concrete IE _VALID_URL = r'https?://(?:www\.)?jamendo\.com/album/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.jamendo.com/album/121486/duck-on-cover', 'info_dict': { 'id': '121486', 'title': 'Duck On Cover', 'description': 'md5:c2920eaeef07d7af5b96d7c64daf1239', }, 'playlist': [{ 'md5': 'e1a2fcb42bda30dfac990212924149a8', 'info_dict': { 'id': '1032333', 'ext': 'flac', 'title': 'Warmachine', 'artist': 'Shearer', 'track': 'Warmachine', 'timestamp': 1368089771, 'upload_date': '20130509', 'view_count': int, 'thumbnail': 'https://usercontent.jamendo.com?type=album&id=121486&width=300&trackid=1032333', 'duration': 190, 'license': 'by', 'album': 'Duck On Cover', 'average_rating': 4, 'tags': ['rock', 'drums', 'bass', 'world', 'punk', 'neutral'], 'like_count': int, }, }, { 'md5': '1f358d7b2f98edfe90fd55dac0799d50', 'info_dict': { 'id': '1032330', 'ext': 'flac', 'title': 'Without Your Ghost', 'artist': 'Shearer', 'track': 'Without Your Ghost', 'timestamp': 1368089771, 'upload_date': '20130509', 'duration': 192, 'tags': ['rock', 'drums', 'bass', 'world', 'punk'], 'album': 'Duck On Cover', 'thumbnail': 'https://usercontent.jamendo.com?type=album&id=121486&width=300&trackid=1032330', 'view_count': int, 'average_rating': 4, 'license': 'by', 'like_count': int, }, }], 'params': { 'playlistend': 2, }, }] def _real_extract(self, url): album_id = self._match_id(url) album = self._call_api('album', album_id) album_name = album.get('name') entries = [] for track in (album.get('tracks') or []): track_id = track.get('id') if not track_id: continue track_id = str(track_id) entries.append({ '_type': 'url_transparent', 'url': 'https://www.jamendo.com/track/' + track_id, 'ie_key': JamendoIE.ie_key(), 'id': track_id, 'album': album_name, }) return self.playlist_result( entries, album_id, album_name, clean_html(try_get(album, lambda x: x['description']['en'], str)))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ku6.py
yt_dlp/extractor/ku6.py
from .common import InfoExtractor class Ku6IE(InfoExtractor): _VALID_URL = r'https?://v\.ku6\.com/show/(?P<id>[a-zA-Z0-9\-\_]+)(?:\.)*html' _TEST = { 'url': 'http://v.ku6.com/show/JG-8yS14xzBr4bCn1pu0xw...html', 'md5': '01203549b9efbb45f4b87d55bdea1ed1', 'info_dict': { 'id': 'JG-8yS14xzBr4bCn1pu0xw', 'ext': 'f4v', 'title': 'techniques test', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<h1 title=.*>(.*?)</h1>', webpage, 'title') data_url = f'http://v.ku6.com/fetchVideo4Player/{video_id}.html' json_data = self._download_json(data_url, video_id) download_url = json_data['data']['f'] return { 'id': video_id, 'title': title, 'url': download_url, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/outsidetv.py
yt_dlp/extractor/outsidetv.py
from .common import InfoExtractor class OutsideTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?outsidetv\.com/(?:[^/]+/)*?play/[a-zA-Z0-9]{8}/\d+/\d+/(?P<id>[a-zA-Z0-9]{8})' _TESTS = [{ 'url': 'http://www.outsidetv.com/category/snow/play/ZjQYboH6/1/10/Hdg0jukV/4', 'md5': '192d968fedc10b2f70ec31865ffba0da', 'info_dict': { 'id': 'Hdg0jukV', 'ext': 'mp4', 'title': 'Home - Jackson Ep 1 | Arbor Snowboards', 'description': 'md5:41a12e94f3db3ca253b04bb1e8d8f4cd', 'upload_date': '20181225', 'timestamp': 1545742800, }, }, { 'url': 'http://www.outsidetv.com/home/play/ZjQYboH6/1/10/Hdg0jukV/4', 'only_matching': True, }] def _real_extract(self, url): jw_media_id = self._match_id(url) return self.url_result( 'jwplatform:' + jw_media_id, 'JWPlatform', jw_media_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/icareus.py
yt_dlp/extractor/icareus.py
import re from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, get_element_by_class, int_or_none, merge_dicts, parse_bitrate, parse_resolution, remove_end, str_or_none, url_or_none, urlencode_postdata, ) class IcareusIE(InfoExtractor): _DOMAINS = '|'.join(map(re.escape, ( 'asahitv.fi', 'helsinkikanava.fi', 'hyvinvointitv.fi', 'inez.fi', 'permanto.fi', 'suite.icareus.com', 'videos.minifiddlers.org', ))) _VALID_URL = rf'(?P<base_url>https?://(?:www\.)?(?:{_DOMAINS}))/[^?#]+/player/[^?#]+\?(?:[^#]+&)?(?:assetId|eventId)=(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.helsinkikanava.fi/fi_FI/web/helsinkikanava/player/vod?assetId=68021894', 'md5': 'ca0b62ffc814a5411dfa6349cf5adb8a', 'info_dict': { 'id': '68021894', 'ext': 'mp4', 'title': 'Perheiden parhaaksi', 'description': 'md5:295785ea408e5ac00708766465cc1325', 'thumbnail': 'https://www.helsinkikanava.fi/image/image_gallery?img_id=68022501', 'upload_date': '20200924', 'timestamp': 1600938300, }, }, { # Recorded livestream 'url': 'https://www.helsinkikanava.fi/fi/web/helsinkikanava/player/event/view?eventId=76241489', 'md5': '014327e69dfa7b949fcc861f6d162d6d', 'info_dict': { 'id': '76258304', 'ext': 'mp4', 'title': 'Helsingin kaupungin ja HUSin tiedotustilaisuus koronaepidemiatilanteesta 24.11.2020', 'description': 'md5:3129d041c6fbbcdc7fe68d9a938fef1c', 'thumbnail': 'https://icareus-suite.secure2.footprint.net/image/image_gallery?img_id=76288630', 'upload_date': '20201124', 'timestamp': 1606206600, }, }, { # Non-m3u8 stream 'url': 'https://suite.icareus.com/fi/web/westend-indians/player/vod?assetId=47567389', 'md5': '72fc04ee971bbedc44405cdf16c990b6', 'info_dict': { 'id': '47567389', 'ext': 'mp4', 'title': 'Omatoiminen harjoittelu - Laukominen', 'description': '', 'thumbnail': 'https://suite.icareus.com/image/image_gallery?img_id=47568162', 'upload_date': '20200319', 'timestamp': 1584658080, }, }, { 'url': 'https://asahitv.fi/fi/web/asahi/player/vod?assetId=89415818', 'only_matching': True, }, { 'url': 'https://hyvinvointitv.fi/fi/web/hyvinvointitv/player/vod?assetId=89149730', 'only_matching': True, }, { 'url': 'https://inez.fi/fi/web/inez-media/player/vod?assetId=71328822', 'only_matching': True, }, { 'url': 'https://www.permanto.fi/fi/web/alfatv/player/vod?assetId=135497515', 'only_matching': True, }, { 'url': 'https://videos.minifiddlers.org/web/international-minifiddlers/player/vod?assetId=1982759', 'only_matching': True, }] def _real_extract(self, url): base_url, temp_id = self._match_valid_url(url).groups() webpage = self._download_webpage(url, temp_id) video_id = self._search_regex(r"_icareus\['itemId'\]\s*=\s*'(\d+)'", webpage, 'video_id') organization_id = self._search_regex(r"_icareus\['organizationId'\]\s*=\s*'(\d+)'", webpage, 'organization_id') assets = self._download_json( self._search_regex(r'var\s+publishingServiceURL\s*=\s*"(http[^"]+)";', webpage, 'api_base'), video_id, data=urlencode_postdata({ 'version': '03', 'action': 'getAssetPlaybackUrls', 'organizationId': organization_id, 'assetId': video_id, 'token': self._search_regex(r"_icareus\['token'\]\s*=\s*'([a-f0-9]+)'", webpage, 'icareus_token'), })) subtitles = { remove_end(sdesc.split(' ')[0], ':'): [{'url': url_or_none(surl)}] for _, sdesc, surl in assets.get('subtitles') or [] } formats = [{ 'format': item.get('name'), 'format_id': 'audio', 'vcodec': 'none', 'url': url_or_none(item['url']), 'tbr': int_or_none(self._search_regex( r'\((\d+)\s*k\)', item.get('name') or '', 'audio bitrate', default=None)), } for item in assets.get('audio_urls') or [] if url_or_none(item.get('url'))] for item in assets.get('urls') or []: video_url = url_or_none(item.get('url')) if video_url is None: continue ext = determine_ext(video_url) if ext == 'm3u8': fmts, subs = self._extract_m3u8_formats_and_subtitles( video_url, video_id, 'mp4', m3u8_id='hls', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) else: fmt = item.get('name') formats.append({ 'url': video_url, 'format': fmt, 'tbr': parse_bitrate(fmt), 'format_id': str_or_none(item.get('id')), **parse_resolution(fmt), }) info, token, live_title = self._search_json_ld(webpage, video_id, default={}), None, None if not info: token = self._search_regex( r'data\s*:\s*{action:"getAsset".*?token:\'([a-f0-9]+)\'}', webpage, 'token', default=None) if not token: live_title = get_element_by_class('unpublished-info-item future-event-title', webpage) if token: metadata = self._download_json( f'{base_url}/icareus-suite-api-portlet/publishing', video_id, fatal=False, data=urlencode_postdata({ 'version': '03', 'action': 'getAsset', 'organizationId': organization_id, 'assetId': video_id, 'languageId': 'en_US', 'userId': '0', 'token': token, })) or {} info = { 'title': metadata.get('name'), 'description': metadata.get('description'), 'timestamp': int_or_none(metadata.get('date'), scale=1000), 'duration': int_or_none(metadata.get('duration')), 'thumbnail': url_or_none(metadata.get('thumbnailMedium')), } elif live_title: # Recorded livestream info = { 'title': live_title, 'description': get_element_by_class('unpublished-info-item future-event-description', webpage), 'timestamp': int_or_none(self._search_regex( r'var startEvent\s*=\s*(\d+);', webpage, 'uploadDate', fatal=False), scale=1000), } thumbnails = info.get('thumbnails') or [{ 'url': url_or_none(info.get('thumbnail') or assets.get('thumbnail')), }] return merge_dicts({ 'id': video_id, 'title': None, 'formats': formats, 'subtitles': subtitles, 'description': clean_html(info.get('description')), 'thumbnails': thumbnails if thumbnails[0]['url'] else None, }, info)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/scrippsnetworks.py
yt_dlp/extractor/scrippsnetworks.py
import hashlib import json from .anvato import AnvatoIE from .aws import AWSIE from .common import InfoExtractor from ..utils import ( smuggle_url, urlencode_postdata, xpath_text, ) class ScrippsNetworksWatchIE(AWSIE): IE_NAME = 'scrippsnetworks:watch' _VALID_URL = r'''(?x) https?:// watch\. (?P<site>geniuskitchen)\.com/ (?: player\.[A-Z0-9]+\.html\#| show/(?:[^/]+/){2}| player/ ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'http://watch.geniuskitchen.com/player/3787617/Ample-Hills-Ice-Cream-Bike/', 'info_dict': { 'id': '4194875', 'ext': 'mp4', 'title': 'Ample Hills Ice Cream Bike', 'description': 'Courtney Rada churns up a signature GK Now ice cream with The Scoopmaster.', 'uploader': 'ANV', 'upload_date': '20171011', 'timestamp': 1507698000, }, 'params': { 'skip_download': True, }, 'add_ie': [AnvatoIE.ie_key()], 'skip': '404 Not Found', }] _SNI_TABLE = { 'geniuskitchen': 'genius', } _AWS_API_KEY = 'E7wSQmq0qK6xPrF13WmzKiHo4BQ7tip4pQcSXVl1' _AWS_PROXY_HOST = 'web.api.video.snidigital.com' _AWS_USER_AGENT = 'aws-sdk-js/2.80.0 callback' def _real_extract(self, url): mobj = self._match_valid_url(url) site_id, video_id = mobj.group('site', 'id') aws_identity_id_json = json.dumps({ 'IdentityId': f'{self._AWS_REGION}:7655847c-0ae7-4d9b-80d6-56c062927eb3', }).encode() token = self._download_json( f'https://cognito-identity.{self._AWS_REGION}.amazonaws.com/', video_id, data=aws_identity_id_json, headers={ 'Accept': '*/*', 'Content-Type': 'application/x-amz-json-1.1', 'Referer': url, 'X-Amz-Content-Sha256': hashlib.sha256(aws_identity_id_json).hexdigest(), 'X-Amz-Target': 'AWSCognitoIdentityService.GetOpenIdToken', 'X-Amz-User-Agent': self._AWS_USER_AGENT, })['Token'] sts = self._download_xml( 'https://sts.amazonaws.com/', video_id, data=urlencode_postdata({ 'Action': 'AssumeRoleWithWebIdentity', 'RoleArn': 'arn:aws:iam::710330595350:role/Cognito_WebAPIUnauth_Role', 'RoleSessionName': 'web-identity', 'Version': '2011-06-15', 'WebIdentityToken': token, }), headers={ 'Referer': url, 'X-Amz-User-Agent': self._AWS_USER_AGENT, 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8', }) def get(key): return xpath_text( sts, f'.//{{https://sts.amazonaws.com/doc/2011-06-15/}}{key}', fatal=True) mcp_id = self._aws_execute_api({ 'uri': f'/1/web/brands/{self._SNI_TABLE[site_id]}/episodes/scrid/{video_id}', 'access_key': get('AccessKeyId'), 'secret_key': get('SecretAccessKey'), 'session_token': get('SessionToken'), }, video_id)['results'][0]['mcpId'] return self.url_result( smuggle_url( f'anvato:anvato_scripps_app_web_prod_0837996dbe373629133857ae9eb72e740424d80a:{mcp_id}', {'geo_countries': ['US']}), AnvatoIE.ie_key(), video_id=mcp_id) class ScrippsNetworksIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?P<site>cookingchanneltv|discovery|(?:diy|food)network|hgtv|travelchannel)\.com/videos/[0-9a-z-]+-(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.cookingchanneltv.com/videos/the-best-of-the-best-0260338', 'info_dict': { 'id': '0260338', 'ext': 'mp4', 'title': 'The Best of the Best', 'description': 'Catch a new episode of MasterChef Canada Tuedsay at 9/8c.', 'timestamp': 1475678834, 'upload_date': '20161005', 'uploader': 'SCNI-SCND', 'tags': 'count:10', 'creator': 'Cooking Channel', 'duration': 29.995, 'chapters': [{'start_time': 0.0, 'end_time': 29.995, 'title': '<Untitled Chapter 1>'}], 'thumbnail': 'https://images.dds.discovery.com/up/tp/Scripps_-_Food_Category_Prod/122/987/0260338_630x355.jpg', }, 'add_ie': ['ThePlatform'], 'expected_warnings': ['No HLS formats found'], }, { 'url': 'https://www.diynetwork.com/videos/diy-barnwood-tablet-stand-0265790', 'only_matching': True, }, { 'url': 'https://www.foodnetwork.com/videos/chocolate-strawberry-cake-roll-7524591', 'only_matching': True, }, { 'url': 'https://www.hgtv.com/videos/cookie-decorating-101-0301929', 'only_matching': True, }, { 'url': 'https://www.travelchannel.com/videos/two-climates-one-bag-5302184', 'only_matching': True, }, { 'url': 'https://www.discovery.com/videos/guardians-of-the-glades-cooking-with-tom-cobb-5578368', 'only_matching': True, }] _ACCOUNT_MAP = { 'cookingchanneltv': 2433005105, 'discovery': 2706091867, 'diynetwork': 2433004575, 'foodnetwork': 2433005105, 'hgtv': 2433004575, 'travelchannel': 2433005739, } _TP_TEMPL = 'https://link.theplatform.com/s/ip77QC/media/guid/%d/%s?mbr=true' def _real_extract(self, url): site, guid = self._match_valid_url(url).groups() return self.url_result(smuggle_url( self._TP_TEMPL % (self._ACCOUNT_MAP[site], guid), {'force_smil_url': True}), 'ThePlatform', guid)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/clippit.py
yt_dlp/extractor/clippit.py
import re from .common import InfoExtractor from ..utils import ( parse_iso8601, qualities, ) class ClippitIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?clippituser\.tv/c/(?P<id>[a-z]+)' _TEST = { 'url': 'https://www.clippituser.tv/c/evmgm', 'md5': '963ae7a59a2ec4572ab8bf2f2d2c5f09', 'info_dict': { 'id': 'evmgm', 'ext': 'mp4', 'title': 'Bye bye Brutus. #BattleBots - Clippit', 'uploader': 'lizllove', 'uploader_url': 'https://www.clippituser.tv/p/lizllove', 'timestamp': 1472183818, 'upload_date': '20160826', 'description': 'BattleBots | ABC', 'thumbnail': r're:^https?://.*\.jpg$', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex(r'<title.*>(.+?)</title>', webpage, 'title') FORMATS = ('sd', 'hd') quality = qualities(FORMATS) formats = [] for format_id in FORMATS: url = self._html_search_regex(rf'data-{format_id}-file="(.+?)"', webpage, 'url', fatal=False) if not url: continue match = re.search(r'/(?P<height>\d+)\.mp4', url) formats.append({ 'url': url, 'format_id': format_id, 'quality': quality(format_id), 'height': int(match.group('height')) if match else None, }) uploader = self._html_search_regex(r'class="username".*>\s+(.+?)\n', webpage, 'uploader', fatal=False) uploader_url = ('https://www.clippituser.tv/p/' + uploader if uploader else None) timestamp = self._html_search_regex(r'datetime="(.+?)"', webpage, 'date', fatal=False) thumbnail = self._html_search_regex(r'data-image="(.+?)"', webpage, 'thumbnail', fatal=False) return { 'id': video_id, 'title': title, 'formats': formats, 'uploader': uploader, 'uploader_url': uploader_url, 'timestamp': parse_iso8601(timestamp), 'description': self._og_search_description(webpage), 'thumbnail': thumbnail, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/projectveritas.py
yt_dlp/extractor/projectveritas.py
from .common import InfoExtractor from ..utils import ( ExtractorError, traverse_obj, unified_strdate, ) class ProjectVeritasIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?projectveritas\.com/(?P<type>news|video)/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.projectveritas.com/news/exclusive-inside-the-new-york-and-new-jersey-hospitals-battling-coronavirus/', 'info_dict': { 'id': '51910aab-365a-5cf1-88f2-8eb1ca5fd3c6', 'ext': 'mp4', 'title': 'Exclusive: Inside The New York and New Jersey Hospitals Battling Coronavirus', 'upload_date': '20200327', 'thumbnail': 'md5:6076477fe50b03eb8708be9415e18e1c', }, }, { 'url': 'https://www.projectveritas.com/video/ilhan-omar-connected-ballot-harvester-in-cash-for-ballots-scheme-car-is-full/', 'info_dict': { 'id': 'c5aab304-a56b-54b1-9f0b-03b77bc5f2f6', 'ext': 'mp4', 'title': 'Ilhan Omar connected Ballot Harvester in cash-for-ballots scheme: "Car is full" of absentee ballots', 'upload_date': '20200927', 'thumbnail': 'md5:194b8edf0e2ba64f25500ff4378369a4', }, }] def _real_extract(self, url): video_id, video_type = self._match_valid_url(url).group('id', 'type') api_url = f'https://www.projectveritas.com/page-data/{video_type}/{video_id}/page-data.json' data_json = self._download_json(api_url, video_id)['result']['data'] main_data = traverse_obj(data_json, 'video', 'post') video_id = main_data['id'] thumbnail = traverse_obj(main_data, ('image', 'ogImage', 'src')) mux_asset = traverse_obj(main_data, 'muxAsset', ('body', 'json', 'content', ..., 'data', 'target', 'fields', 'muxAsset'), get_all=False, expected_type=dict) if not mux_asset: raise ExtractorError('No video on the provided url.', expected=True) playback_id = traverse_obj(mux_asset, 'playbackId', ('en-US', 'playbackId')) formats = self._extract_m3u8_formats(f'https://stream.mux.com/{playback_id}.m3u8', video_id) return { 'id': video_id, 'title': main_data['title'], 'upload_date': unified_strdate(main_data.get('date')), 'thumbnail': thumbnail.replace('//', ''), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/abc.py
yt_dlp/extractor/abc.py
import hashlib import hmac import re import time from .common import InfoExtractor from ..utils import ( ExtractorError, dict_get, int_or_none, js_to_json, parse_iso8601, str_or_none, traverse_obj, try_get, unescapeHTML, update_url_query, url_or_none, ) class ABCIE(InfoExtractor): IE_NAME = 'abc.net.au' _VALID_URL = r'https?://(?:www\.)?abc\.net\.au/(?:news|btn|listen)/(?:[^/?#]+/){1,4}(?P<id>\d{5,})' _TESTS = [{ 'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334', 'md5': 'cb3dd03b18455a661071ee1e28344d9f', 'info_dict': { 'id': '5868334', 'ext': 'mp4', 'title': 'Australia to help staff Ebola treatment centre in Sierra Leone', 'description': 'md5:809ad29c67a05f54eb41f2a105693a67', }, 'skip': 'this video has expired', }, { 'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326', 'md5': '4ebd61bdc82d9a8b722f64f1f4b4d121', 'info_dict': { 'id': 'NvqvPeNZsHU', 'ext': 'mp4', 'upload_date': '20150816', 'uploader': 'ABC News (Australia)', 'description': 'Government backbencher Warren Entsch introduces a cross-party sponsored bill to legalise same-sex marriage, saying the bill is designed to promote "an inclusive Australia, not a divided one.". Read more here: http://ab.co/1Mwc6ef', 'uploader_id': 'NewsOnABC', 'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill', }, 'add_ie': ['Youtube'], 'skip': 'Not accessible from Travis CI server', }, { 'url': 'http://www.abc.net.au/news/2015-10-23/nab-lifts-interest-rates-following-westpac-and-cba/6880080', 'md5': 'b96eee7c9edf4fc5a358a0252881cc1f', 'info_dict': { 'id': '6880080', 'ext': 'mp3', 'title': 'NAB lifts interest rates, following Westpac and CBA - ABC listen', 'description': 'md5:f13d8edc81e462fce4a0437c7dc04728', 'thumbnail': r're:https://live-production\.wcms\.abc-cdn\.net\.au/2193d7437c84b25eafd6360c82b5fa21', }, }, { 'url': 'http://www.abc.net.au/news/2015-10-19/6866214', 'only_matching': True, }, { 'url': 'https://www.abc.net.au/btn/classroom/wwi-centenary/10527914', 'info_dict': { 'id': '10527914', 'ext': 'mp4', 'title': 'WWI Centenary - Behind The News', 'description': 'md5:fa4405939ff750fade46ff0cd4c66a52', 'thumbnail': r're:https://live-production\.wcms\.abc-cdn\.net\.au/bcc3433c97bf992dff32ec5a768713c9', }, }, { 'url': 'https://www.abc.net.au/news/programs/the-world/2020-06-10/black-lives-matter-protests-spawn-support-for/12342074', 'info_dict': { 'id': '12342074', 'ext': 'mp4', 'title': 'Black Lives Matter protests spawn support for Papuans in Indonesia', 'description': 'md5:625257209f2d14ce23cb4e3785da9beb', 'thumbnail': r're:https://live-production\.wcms\.abc-cdn\.net\.au/7ee6f190de6d7dbb04203e514bfae9ec', }, }, { 'url': 'https://www.abc.net.au/btn/newsbreak/btn-newsbreak-20200814/12560476', 'info_dict': { 'id': 'tDL8Ld4dK_8', 'ext': 'mp4', 'title': 'Fortnite Banned From Apple and Google App Stores', 'description': 'md5:a6df3f36ce8f816b74af4bd6462f5651', 'upload_date': '20200813', 'uploader': 'Behind the News', 'uploader_id': 'behindthenews', }, }, { 'url': 'https://www.abc.net.au/news/2023-06-25/wagner-boss-orders-troops-back-to-bases-to-avoid-bloodshed/102520540', 'info_dict': { 'id': '102520540', 'title': 'Wagner Group retreating from Russia, leader Prigozhin to move to Belarus', 'ext': 'mp4', 'description': 'Wagner troops leave Rostov-on-Don and\xa0Yevgeny Prigozhin will move to Belarus under a deal brokered by Belarusian President Alexander Lukashenko to end the mutiny.', 'thumbnail': r're:https://live-production\.wcm\.abc-cdn\.net\.au/0c170f5b57f0105c432f366c0e8e267b', }, }, { 'url': 'https://www.abc.net.au/listen/programs/the-followers-madness-of-two/presents-followers-madness-of-two/105697646', 'info_dict': { 'id': '105697646', 'title': 'INTRODUCING — The Followers: Madness of Two - ABC listen', 'ext': 'mp3', 'description': 'md5:2310cd0d440a4e01656abea15db8d1f3', 'thumbnail': r're:https://live-production\.wcms\.abc-cdn\.net\.au/90d7078214e5d66553ffb7fcf0da0cda', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) mobj = re.search(r'<a\s+href="(?P<url>[^"]+)"\s+data-duration="\d+"\s+title="Download audio directly">', webpage) if mobj: urls_info = mobj.groupdict() youtube = False video = False else: mobj = re.search(r'<a href="(?P<url>http://www\.youtube\.com/watch\?v=[^"]+)"><span><strong>External Link:</strong>', webpage) if mobj is None: mobj = re.search(r'<iframe width="100%" src="(?P<url>//www\.youtube-nocookie\.com/embed/[^?"]+)', webpage) if mobj: urls_info = mobj.groupdict() youtube = True video = True if mobj is None: mobj = re.search(r'(?P<type>)"(?:sources|files|renditions)":\s*(?P<json_data>\[[^\]]+\])', webpage) if mobj is None: mobj = re.search( r'inline(?P<type>Video|Audio|YouTube)Data\.push\((?P<json_data>[^)]+)\);', webpage) if mobj is None: expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None) if expired: raise ExtractorError(f'{self.IE_NAME} said: {expired}', expected=True) raise ExtractorError('Unable to extract video urls') urls_info = self._parse_json( mobj.group('json_data'), video_id, transform_source=js_to_json) youtube = mobj.group('type') == 'YouTube' video = mobj.group('type') == 'Video' or traverse_obj( urls_info, (0, ('contentType', 'MIMEType')), get_all=False) == 'video/mp4' if not isinstance(urls_info, list): urls_info = [urls_info] if youtube: return self.playlist_result([ self.url_result(url_info['url']) for url_info in urls_info]) formats = [] for url_info in urls_info: height = int_or_none(url_info.get('height')) bitrate = int_or_none(url_info.get('bitrate')) width = int_or_none(url_info.get('width')) format_id = None mobj = re.search(r'_(?:(?P<height>\d+)|(?P<bitrate>\d+)k)\.mp4$', url_info['url']) if mobj: height_from_url = mobj.group('height') if height_from_url: height = height or int_or_none(height_from_url) width = width or int_or_none(url_info.get('label')) else: bitrate = bitrate or int_or_none(mobj.group('bitrate')) format_id = str_or_none(url_info.get('label')) formats.append({ 'url': url_info['url'], 'vcodec': url_info.get('codec') if video else 'none', 'width': width, 'height': height, 'tbr': bitrate, 'filesize': int_or_none(url_info.get('filesize')), 'format_id': format_id, }) return { 'id': video_id, 'title': self._og_search_title(webpage), 'formats': formats, 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), } class ABCIViewIE(InfoExtractor): IE_NAME = 'abc.net.au:iview' _VALID_URL = r'https?://iview\.abc\.net\.au/(?:[^/]+/)*video/(?P<id>[^/?#]+)' _GEO_COUNTRIES = ['AU'] _TESTS = [{ 'url': 'https://iview.abc.net.au/show/utopia/series/1/video/CO1211V001S00', 'md5': '52a942bfd7a0b79a6bfe9b4ce6c9d0ed', 'info_dict': { 'id': 'CO1211V001S00', 'ext': 'mp4', 'title': 'Series 1 Ep 1 Wood For The Trees', 'series': 'Utopia', 'description': 'md5:0cfb2c183c1b952d1548fd65c8a95c00', 'upload_date': '20230726', 'uploader_id': 'abc1', 'series_id': 'CO1211V', 'episode_id': 'CO1211V001S00', 'season_number': 1, 'season': 'Season 1', 'episode_number': 1, 'episode': 'Wood For The Trees', 'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/co/CO1211V001S00_5ad8353f4df09_1280.jpg', 'timestamp': 1690403700, }, 'params': { 'skip_download': True, }, }, { 'note': 'No episode name', 'url': 'https://iview.abc.net.au/show/gruen/series/11/video/LE1927H001S00', 'md5': '67715ce3c78426b11ba167d875ac6abf', 'info_dict': { 'id': 'LE1927H001S00', 'ext': 'mp4', 'title': 'Series 11 Ep 1', 'series': 'Gruen', 'description': 'md5:52cc744ad35045baf6aded2ce7287f67', 'upload_date': '20190925', 'uploader_id': 'abc1', 'series_id': 'LE1927H', 'episode_id': 'LE1927H001S00', 'season_number': 11, 'season': 'Season 11', 'episode_number': 1, 'episode': 'Episode 1', 'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/le/LE1927H001S00_5d954fbd79e25_1280.jpg', 'timestamp': 1569445289, }, 'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest'], 'params': { 'skip_download': True, }, }, { 'note': 'No episode number', 'url': 'https://iview.abc.net.au/show/four-corners/series/2022/video/NC2203H039S00', 'md5': '77cb7d8434440e3b28fbebe331c2456a', 'info_dict': { 'id': 'NC2203H039S00', 'ext': 'mp4', 'title': 'Series 2022 Locking Up Kids', 'series': 'Four Corners', 'description': 'md5:54829ca108846d1a70e1fcce2853e720', 'upload_date': '20221114', 'uploader_id': 'abc1', 'series_id': 'NC2203H', 'episode_id': 'NC2203H039S00', 'season_number': 2022, 'season': 'Season 2022', 'episode': 'Locking Up Kids', 'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/nc/NC2203H039S00_636d8a0944a22_1920.jpg', 'timestamp': 1668460497, }, 'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest'], 'params': { 'skip_download': True, }, }, { 'note': 'No episode name or number', 'url': 'https://iview.abc.net.au/show/landline/series/2021/video/RF2004Q043S00', 'md5': '2e17dec06b13cc81dc119d2565289396', 'info_dict': { 'id': 'RF2004Q043S00', 'ext': 'mp4', 'title': 'Series 2021', 'series': 'Landline', 'description': 'md5:c9f30d9c0c914a7fd23842f6240be014', 'upload_date': '20211205', 'uploader_id': 'abc1', 'series_id': 'RF2004Q', 'episode_id': 'RF2004Q043S00', 'season_number': 2021, 'season': 'Season 2021', 'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/rf/RF2004Q043S00_61a950639dbc0_1920.jpg', 'timestamp': 1638710705, }, 'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest'], 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) video_params = self._download_json( 'https://iview.abc.net.au/api/programs/' + video_id, video_id) title = unescapeHTML(video_params.get('title') or video_params['seriesTitle']) stream = next(s for s in video_params['playlist'] if s.get('type') in ('program', 'livestream')) house_number = video_params.get('episodeHouseNumber') or video_id path = f'/auth/hls/sign?ts={int(time.time())}&hn={house_number}&d=android-tablet' sig = hmac.new( b'android.content.res.Resources', path.encode(), hashlib.sha256).hexdigest() token = self._download_webpage( f'http://iview.abc.net.au{path}&sig={sig}', video_id) def tokenize_url(url, token): return update_url_query(url, { 'hdnea': token, }) for sd in ('1080', '720', 'sd', 'sd-low'): sd_url = try_get( stream, lambda x: x['streams']['hls'][sd], str) if not sd_url: continue formats = self._extract_m3u8_formats( tokenize_url(sd_url, token), video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) if formats: break else: formats = [] subtitles = {} src_vtt = stream.get('captions', {}).get('src-vtt') if src_vtt: subtitles['en'] = [{ 'url': src_vtt, 'ext': 'vtt', }] is_live = video_params.get('livestream') == '1' return { 'id': video_id, 'title': title, 'description': video_params.get('description'), 'thumbnail': video_params.get('thumbnail'), 'duration': int_or_none(video_params.get('eventDuration')), 'timestamp': parse_iso8601(video_params.get('pubDate'), ' '), 'series': unescapeHTML(video_params.get('seriesTitle')), 'series_id': video_params.get('seriesHouseNumber') or video_id[:7], 'season_number': int_or_none(self._search_regex( r'\bSeries\s+(\d+)\b', title, 'season number', default=None)), 'episode_number': int_or_none(self._search_regex( r'\bEp\s+(\d+)\b', title, 'episode number', default=None)), 'episode_id': house_number, 'episode': self._search_regex( r'^(?:Series\s+\d+)?\s*(?:Ep\s+\d+)?\s*(.*)$', title, 'episode', default='') or None, 'uploader_id': video_params.get('channel'), 'formats': formats, 'subtitles': subtitles, 'is_live': is_live, } class ABCIViewShowSeriesIE(InfoExtractor): IE_NAME = 'abc.net.au:iview:showseries' _VALID_URL = r'https?://iview\.abc\.net\.au/show/(?P<id>[^/]+)(?:/series/\d+)?$' _GEO_COUNTRIES = ['AU'] _TESTS = [{ 'url': 'https://iview.abc.net.au/show/upper-middle-bogan', 'info_dict': { 'id': '124870-1', 'title': 'Series 1', 'description': 'md5:93119346c24a7c322d446d8eece430ff', 'series': 'Upper Middle Bogan', 'season': 'Series 1', 'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$', }, 'playlist_count': 8, }, { 'url': 'https://iview.abc.net.au/show/upper-middle-bogan', 'info_dict': { 'id': 'CO1108V001S00', 'ext': 'mp4', 'title': 'Series 1 Ep 1 I\'m A Swan', 'description': 'md5:7b676758c1de11a30b79b4d301e8da93', 'series': 'Upper Middle Bogan', 'uploader_id': 'abc1', 'upload_date': '20210630', 'timestamp': 1625036400, }, 'params': { 'noplaylist': True, 'skip_download': 'm3u8', }, }, { # 'videoEpisodes' is a dict with `items` key 'url': 'https://iview.abc.net.au/show/7-30-mark-humphries-satire', 'info_dict': { 'id': '178458-0', 'title': 'Episodes', 'description': 'Satirist Mark Humphries brings his unique perspective on current political events for 7.30.', 'series': '7.30 Mark Humphries Satire', 'season': 'Episodes', 'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$', }, 'playlist_count': 15, 'skip': 'This program is not currently available in ABC iview', }, { 'url': 'https://iview.abc.net.au/show/inbestigators', 'info_dict': { 'id': '175343-1', 'title': 'Series 1', 'description': 'md5:b9976935a6450e5b78ce2a940a755685', 'series': 'The Inbestigators', 'season': 'Series 1', 'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.+\.jpg', }, 'playlist_count': 17, }] def _real_extract(self, url): show_id = self._match_id(url) webpage = self._download_webpage(url, show_id) video_data = self._search_json( r'window\.__INITIAL_STATE__\s*=\s*[\'"]', webpage, 'initial state', show_id, transform_source=lambda x: x.encode().decode('unicode_escape'), end_pattern=r'[\'"]\s*;')['route']['pageData']['_embedded'] highlight = try_get(video_data, lambda x: x['highlightVideo']['shareUrl']) if not self._yes_playlist(show_id, bool(highlight), video_label='highlight video'): return self.url_result(highlight, ie=ABCIViewIE.ie_key()) series = video_data['selectedSeries'] return { '_type': 'playlist', 'entries': [self.url_result(episode_url, ABCIViewIE) for episode_url in traverse_obj(series, ( '_embedded', 'videoEpisodes', (None, 'items'), ..., 'shareUrl', {url_or_none}))], 'id': series.get('id'), 'title': dict_get(series, ('title', 'displaySubtitle')), 'description': series.get('description'), 'series': dict_get(series, ('showTitle', 'displayTitle')), 'season': dict_get(series, ('title', 'displaySubtitle')), 'thumbnail': traverse_obj( series, 'thumbnail', ('images', lambda _, v: v['name'] == 'seriesThumbnail', 'url'), get_all=False), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/monstercat.py
yt_dlp/extractor/monstercat.py
import re from .common import InfoExtractor from ..utils import ( clean_html, extract_attributes, int_or_none, strip_or_none, unified_strdate, ) from ..utils.traversal import find_element, traverse_obj class MonstercatIE(InfoExtractor): _VALID_URL = r'https?://www\.monstercat\.com/release/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.monstercat.com/release/742779548009', 'playlist_count': 20, 'info_dict': { 'title': 'The Secret Language of Trees', 'id': '742779548009', 'thumbnail': 'https://www.monstercat.com/release/742779548009/cover', 'release_date': '20230711', 'album': 'The Secret Language of Trees', 'album_artists': ['BT'], }, }] def _extract_tracks(self, table, album_meta): for td in re.findall(r'<tr[^<]*>((?:(?!</tr>)[\w\W])+)', table): # regex by chatgpt due to lack of get_elements_by_tag title = traverse_obj(td, ( {find_element(cls='d-inline-flex flex-column')}, {lambda x: x.partition(' <span')}, 0, {clean_html})) ids = traverse_obj(td, ( {find_element(cls='btn-play cursor-pointer mr-small', html=True)}, {extract_attributes})) or {} track_id = ids.get('data-track-id') release_id = ids.get('data-release-id') track_number = traverse_obj(td, ({find_element(cls='py-xsmall')}, {int_or_none})) if not track_id or not release_id: self.report_warning(f'Skipping track {track_number}, ID(s) not found') self.write_debug(f'release_id={release_id!r} track_id={track_id!r}') continue yield { **album_meta, 'title': title, 'track': title, 'track_number': track_number, 'artists': traverse_obj(td, ({find_element(cls='d-block fs-xxsmall')}, {clean_html}, all)), 'url': f'https://www.monstercat.com/api/release/{release_id}/track-stream/{track_id}', 'id': track_id, 'ext': 'mp3', } def _real_extract(self, url): url_id = self._match_id(url) html = self._download_webpage(url, url_id) # NB: HTMLParser may choke on this html; use {find_element} or try_call(lambda: get_element...) tracklist_table = traverse_obj(html, {find_element(cls='table table-small')}) or '' title = traverse_obj(html, ({find_element(tag='h1')}, {clean_html})) album_meta = { 'title': title, 'album': title, 'thumbnail': f'https://www.monstercat.com/release/{url_id}/cover', 'album_artists': traverse_obj(html, ( {find_element(cls='h-normal text-uppercase mb-desktop-medium mb-smallish')}, {clean_html}, all)), 'release_date': traverse_obj(html, ( {find_element(cls='font-italic mb-medium d-tablet-none d-phone-block')}, {lambda x: x.partition('Released ')}, 2, {strip_or_none}, {unified_strdate})), } return self.playlist_result( self._extract_tracks(tracklist_table, album_meta), playlist_id=url_id, **album_meta)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/aparat.py
yt_dlp/extractor/aparat.py
from .common import InfoExtractor from ..utils import ( get_element_by_id, int_or_none, merge_dicts, mimetype2ext, url_or_none, ) class AparatIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)' _EMBED_REGEX = [r'<iframe .*?src="(?P<url>http://www\.aparat\.com/video/[^"]+)"'] _TESTS = [{ 'url': 'http://www.aparat.com/v/wP8On', 'md5': '131aca2e14fe7c4dcb3c4877ba300c89', 'info_dict': { 'id': 'wP8On', 'ext': 'mp4', 'title': 'تیم گلکسی 11 - زومیت', 'description': 'md5:096bdabcdcc4569f2b8a5e903a3b3028', 'duration': 231, 'timestamp': 1387394859, 'upload_date': '20131218', 'view_count': int, }, }, { # multiple formats 'url': 'https://www.aparat.com/v/8dflw/', 'only_matching': True, }] def _parse_options(self, webpage, video_id, fatal=True): return self._parse_json(self._search_regex( r'options\s*=\s*({.+?})\s*;', webpage, 'options', default='{}'), video_id) def _real_extract(self, url): video_id = self._match_id(url) # If available, provides more metadata webpage = self._download_webpage(url, video_id, fatal=False) options = self._parse_options(webpage, video_id, fatal=False) if not options: webpage = self._download_webpage( 'http://www.aparat.com/video/video/embed/vt/frame/showvideo/yes/videohash/' + video_id, video_id, 'Downloading embed webpage') options = self._parse_options(webpage, video_id) formats = [] for sources in (options.get('multiSRC') or []): for item in sources: if not isinstance(item, dict): continue file_url = url_or_none(item.get('src')) if not file_url: continue item_type = item.get('type') if item_type == 'application/vnd.apple.mpegurl': formats.extend(self._extract_m3u8_formats( file_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) else: ext = mimetype2ext(item.get('type')) label = item.get('label') formats.append({ 'url': file_url, 'ext': ext, 'format_id': 'http-%s' % (label or ext), 'height': int_or_none(self._search_regex( r'(\d+)[pP]', label or '', 'height', default=None)), }) info = self._search_json_ld(webpage, video_id, default={}) if not info.get('title'): info['title'] = get_element_by_id('videoTitle', webpage) or \ self._html_search_meta(['og:title', 'twitter:title', 'DC.Title', 'title'], webpage, fatal=True) return merge_dicts(info, { 'id': video_id, 'thumbnail': url_or_none(options.get('poster')), 'duration': int_or_none(options.get('duration')), 'formats': formats, })
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mojevideo.py
yt_dlp/extractor/mojevideo.py
from .common import InfoExtractor from ..utils import js_to_json, remove_end, update_url_query class MojevideoIE(InfoExtractor): IE_DESC = 'mojevideo.sk' _VALID_URL = r'https?://(?:www\.)?mojevideo\.sk/video/(?P<id>\w+)/(?P<display_id>[\w()]+?)\.html' _TESTS = [{ 'url': 'https://www.mojevideo.sk/video/3d17c/chlapci_dobetonovali_sme_mame_hotovo.html', 'md5': '384a4628bd2bbd261c5206cf77c38c17', 'info_dict': { 'id': '3d17c', 'ext': 'mp4', 'title': 'Chlapci dobetónovali sme, máme hotovo!', 'display_id': 'chlapci_dobetonovali_sme_mame_hotovo', 'description': 'md5:a0822126044050d304a9ef58c92ddb34', 'thumbnail': 'https://fs5.mojevideo.sk/imgfb/250236.jpg', 'duration': 21.0, 'upload_date': '20230919', 'timestamp': 1695129706, 'like_count': int, 'dislike_count': int, 'view_count': int, 'comment_count': int, }, }, { # 720p 'url': 'https://www.mojevideo.sk/video/14677/den_blbec.html', 'md5': '517c3e111c53a67d10b429c1f344ba2f', 'info_dict': { 'id': '14677', 'ext': 'mp4', 'title': 'Deň blbec?', 'display_id': 'den_blbec', 'description': 'I maličkosť vám môže zmeniť celý deň. Nikdy nezahadzujte žuvačky na zem!', 'thumbnail': 'https://fs5.mojevideo.sk/imgfb/83575.jpg', 'duration': 100.0, 'upload_date': '20120515', 'timestamp': 1337076481, 'like_count': int, 'dislike_count': int, 'view_count': int, 'comment_count': int, }, }, { # 1080p 'url': 'https://www.mojevideo.sk/video/2feb2/band_maid_onset_(instrumental)_live_zepp_tokyo_(full_hd).html', 'md5': '64599a23d3ac31cf2fe069e4353d8162', 'info_dict': { 'id': '2feb2', 'ext': 'mp4', 'title': 'BAND-MAID - onset (Instrumental) Live - Zepp Tokyo (Full HD)', 'display_id': 'band_maid_onset_(instrumental)_live_zepp_tokyo_(full_hd)', 'description': 'Výborná inštrumentálna skladba od skupiny BAND-MAID.', 'thumbnail': 'https://fs5.mojevideo.sk/imgfb/196274.jpg', 'duration': 240.0, 'upload_date': '20190708', 'timestamp': 1562576592, 'like_count': int, 'dislike_count': int, 'view_count': int, 'comment_count': int, }, }, { # 720p 'url': 'https://www.mojevideo.sk/video/358c8/dva_nissany_skyline_strielaju_v_londyne.html', 'only_matching': True, }, { # 720p 'url': 'https://www.mojevideo.sk/video/2455d/gopro_hero4_session_nova_sportova_vodotesna_kamera.html', 'only_matching': True, }, { # 1080p 'url': 'https://www.mojevideo.sk/video/352ee/amd_rx_6800_xt_vs_nvidia_rtx_3080_(test_v_9_hrach).html', 'only_matching': True, }, { # 1080p 'url': 'https://www.mojevideo.sk/video/2cbeb/trailer_z_avengers_infinity_war.html', 'only_matching': True, }] def _real_extract(self, url): video_id, display_id = self._match_valid_url(url).groups() webpage = self._download_webpage(url, video_id) video_id_dec = self._search_regex( r'\bvId\s*=\s*(\d+)', webpage, 'video id', fatal=False) or str(int(video_id, 16)) video_exp = self._search_regex(r'\bvEx\s*=\s*["\'](\d+)', webpage, 'video expiry') video_hashes = self._search_json( r'\bvHash\s*=', webpage, 'video hashes', video_id, contains_pattern=r'\[(?s:.+)\]', transform_source=js_to_json) formats = [] for video_hash, (suffix, quality, format_note) in zip(video_hashes, [ # noqa: B905 ('', 1, 'normálna kvalita'), ('_lq', 0, 'nízka kvalita'), ('_hd', 2, 'HD-720p'), ('_fhd', 3, 'FULL HD-1080p'), ('_2k', 4, '2K-1440p'), ]): formats.append({ 'format_id': f'mp4-{quality}', 'quality': quality, 'format_note': format_note, 'url': update_url_query( f'https://cache01.mojevideo.sk/securevideos69/{video_id_dec}{suffix}.mp4', { 'md5': video_hash, 'expires': video_exp, }), }) return { 'id': video_id, 'display_id': display_id, 'formats': formats, 'title': (self._og_search_title(webpage, default=None) or remove_end(self._html_extract_title(webpage, 'title'), ' - Mojevideo')), 'description': self._og_search_description(webpage), **self._search_json_ld(webpage, video_id, default={}), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/brilliantpala.py
yt_dlp/extractor/brilliantpala.py
import hashlib from .common import InfoExtractor from ..utils import ( ExtractorError, traverse_obj, urlencode_postdata, ) class BrilliantpalaBaseIE(InfoExtractor): _NETRC_MACHINE = 'brilliantpala' _DOMAIN = '{subdomain}.brilliantpala.org' def _initialize_pre_login(self): self._HOMEPAGE = f'https://{self._DOMAIN}' self._LOGIN_API = f'{self._HOMEPAGE}/login/' self._LOGOUT_DEVICES_API = f'{self._HOMEPAGE}/logout_devices/?next=/' self._CONTENT_API = f'{self._HOMEPAGE}/api/v2.4/contents/{{content_id}}/' self._HLS_AES_URI = f'{self._HOMEPAGE}/api/v2.5/video_contents/{{content_id}}/key/' def _get_logged_in_username(self, url, video_id): webpage, urlh = self._download_webpage_handle(url, video_id) if urlh.url.startswith(self._LOGIN_API): self.raise_login_required() return self._html_search_regex( r'"username"\s*:\s*"(?P<username>[^"]+)"', webpage, 'logged-in username') def _perform_login(self, username, password): login_page, urlh = self._download_webpage_handle( self._LOGIN_API, None, 'Downloading login page', expected_status=401) if urlh.status != 401 and not urlh.url.startswith(self._LOGIN_API): self.write_debug('Cookies are valid, no login required.') return if urlh.status == 401: self.write_debug('Got HTTP Error 401; cookies have been invalidated') login_page = self._download_webpage(self._LOGIN_API, None, 'Re-downloading login page') login_form = self._hidden_inputs(login_page) login_form.update({ 'username': username, 'password': password, }) self._set_cookie(self._DOMAIN, 'csrftoken', login_form['csrfmiddlewaretoken']) logged_page = self._download_webpage( self._LOGIN_API, None, note='Logging in', headers={'Referer': self._LOGIN_API}, data=urlencode_postdata(login_form)) if self._html_search_regex( r'(Your username / email and password)', logged_page, 'auth fail', default=None): raise ExtractorError('wrong username or password', expected=True) # the maximum number of logins is one if self._html_search_regex( r'(Logout Other Devices)', logged_page, 'logout devices button', default=None): logout_device_form = self._hidden_inputs(logged_page) self._download_webpage( self._LOGOUT_DEVICES_API, None, headers={'Referer': self._LOGIN_API}, note='Logging out other devices', data=urlencode_postdata(logout_device_form)) def _real_extract(self, url): course_id, content_id = self._match_valid_url(url).group('course_id', 'content_id') video_id = f'{course_id}-{content_id}' username = self._get_logged_in_username(url, video_id) content_json = self._download_json( self._CONTENT_API.format(content_id=content_id), video_id, note='Fetching content info', errnote='Unable to fetch content info') entries = [] for stream in traverse_obj(content_json, ('video', 'streams', lambda _, v: v['id'] and v['url'])): formats = self._extract_m3u8_formats(stream['url'], video_id, fatal=False) if not formats: continue entries.append({ 'id': str(stream['id']), 'title': content_json.get('title'), 'formats': formats, 'hls_aes': {'uri': self._HLS_AES_URI.format(content_id=content_id)}, 'http_headers': {'X-Key': hashlib.sha256(username.encode('ascii')).hexdigest()}, 'thumbnail': content_json.get('cover_image'), }) return self.playlist_result( entries, playlist_id=video_id, playlist_title=content_json.get('title')) class BrilliantpalaElearnIE(BrilliantpalaBaseIE): IE_NAME = 'Brilliantpala:Elearn' IE_DESC = 'VoD on elearn.brilliantpala.org' _VALID_URL = r'https?://elearn\.brilliantpala\.org/courses/(?P<course_id>\d+)/contents/(?P<content_id>\d+)/?' _TESTS = [{ 'url': 'https://elearn.brilliantpala.org/courses/42/contents/12345/', 'only_matching': True, }, { 'url': 'https://elearn.brilliantpala.org/courses/98/contents/36683/', 'info_dict': { 'id': '23577', 'ext': 'mp4', 'title': 'Physical World, Units and Measurements - 1', 'thumbnail': 'https://d1j3vi2u94ebt0.cloudfront.net/institute/brilliantpalalms/chapter_contents/26237/e657f81b90874be19795c7ea081f8d5c.png', 'live_status': 'not_live', }, 'params': { 'skip_download': True, }, }] _DOMAIN = BrilliantpalaBaseIE._DOMAIN.format(subdomain='elearn') class BrilliantpalaClassesIE(BrilliantpalaBaseIE): IE_NAME = 'Brilliantpala:Classes' IE_DESC = 'VoD on classes.brilliantpala.org' _VALID_URL = r'https?://classes\.brilliantpala\.org/courses/(?P<course_id>\d+)/contents/(?P<content_id>\d+)/?' _TESTS = [{ 'url': 'https://classes.brilliantpala.org/courses/42/contents/12345/', 'only_matching': True, }, { 'url': 'https://classes.brilliantpala.org/courses/416/contents/25445/', 'info_dict': { 'id': '9128', 'ext': 'mp4', 'title': 'Motion in a Straight Line - Class 1', 'thumbnail': 'https://d3e4y8hquds3ek.cloudfront.net/institute/brilliantpalaelearn/chapter_contents/ff5ba838d0ec43419f67387fe1a01fa8.png', 'live_status': 'not_live', }, 'params': { 'skip_download': True, }, }] _DOMAIN = BrilliantpalaBaseIE._DOMAIN.format(subdomain='classes')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/wevidi.py
yt_dlp/extractor/wevidi.py
from .common import InfoExtractor from ..utils import clean_html, float_or_none, get_element_by_class, js_to_json, traverse_obj class WeVidiIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?wevidi\.net/watch/(?P<id>[\w-]{11})' _TESTS = [{ 'url': 'https://wevidi.net/watch/2th7UO5F4KV', 'md5': 'b913d1ff5bbad499e2c7ef4aa6d829d7', 'info_dict': { 'id': '2th7UO5F4KV', 'ext': 'mp4', 'title': 'YouTube Alternative: WeVidi - customizable channels & more', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:73a27d0a87d49fbcc5584566326ebeed', 'uploader': 'eclecRC', 'duration': 932.098, }, }, { 'url': 'https://wevidi.net/watch/ievRuuQHbPS', 'md5': 'ce8a94989a959bff9003fa27ee572935', 'info_dict': { 'id': 'ievRuuQHbPS', 'ext': 'mp4', 'title': 'WeVidi Playlists', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:32cdfca272687390d9bd9b0c9c6153ee', 'uploader': 'WeVidi', 'duration': 36.1999, }, }, { 'url': 'https://wevidi.net/watch/PcMzDWaQSWb', 'md5': '55ee0d3434be5d9e5cc76b83f2bb57ec', 'info_dict': { 'id': 'PcMzDWaQSWb', 'ext': 'mp4', 'title': 'Cat blep', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:e2c9e2b54b8bb424cc64937c8fdc068f', 'uploader': 'WeVidi', 'duration': 41.972, }, }, { 'url': 'https://wevidi.net/watch/wJnRqDHNe_u', 'md5': 'c8f263dd47e66cc17546b3abf47b5a77', 'info_dict': { 'id': 'wJnRqDHNe_u', 'ext': 'mp4', 'title': 'Gissy Talks: YouTube Alternatives', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:e65036f0d4af80e0af191bd11af5195e', 'uploader': 'GissyEva', 'duration': 630.451, }, }, { 'url': 'https://wevidi.net/watch/4m1c4yJR_yc', 'md5': 'c63ce5ca6990dce86855fc02ca5bc1ed', 'info_dict': { 'id': '4m1c4yJR_yc', 'ext': 'mp4', 'title': 'Enough of that! - Awesome Exilez Podcast', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:96af99dd63468b2dfab3020560e3e9b2', 'uploader': 'eclecRC', 'duration': 6.804, }, }] def _extract_formats(self, wvplayer_props): # Taken from WeVidi player JS: https://wevidi.net/layouts/default/static/player.min.js resolution_map = { 1: 144, 2: 240, 3: 360, 4: 480, 5: 720, 6: 1080, } src_path = f'{wvplayer_props["srcVID"]}/{wvplayer_props["srcUID"]}/{wvplayer_props["srcNAME"]}' for res in traverse_obj(wvplayer_props, ('resolutions', ..., {int}, filter)): format_id = str(-(res // -2) - 1) yield { 'acodec': 'mp4a.40.2', 'ext': 'mp4', 'format_id': format_id, 'height': resolution_map.get(res), 'url': f'https://www.wevidi.net/videoplayback/{src_path}/{format_id}', 'vcodec': 'avc1.42E01E', } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) wvplayer_props = self._search_json( r'WVPlayer\(', webpage, 'player', video_id, transform_source=lambda x: js_to_json(x.replace('||', '}'))) return { 'id': video_id, 'title': clean_html(get_element_by_class('video_title', webpage)), 'description': clean_html(get_element_by_class('descr_long', webpage)), 'uploader': clean_html(get_element_by_class('username', webpage)), 'formats': list(self._extract_formats(wvplayer_props)), 'thumbnail': self._og_search_thumbnail(webpage), 'duration': float_or_none(wvplayer_props.get('duration')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/iwara.py
yt_dlp/extractor/iwara.py
import functools import hashlib import json import time import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, OnDemandPagedList, int_or_none, jwt_decode_hs256, mimetype2ext, qualities, traverse_obj, try_call, unified_timestamp, ) class IwaraBaseIE(InfoExtractor): _NETRC_MACHINE = 'iwara' _USERTOKEN = None _MEDIATOKEN = None def _is_token_expired(self, token, token_type): # User token TTL == ~3 weeks, Media token TTL == ~1 hour if (try_call(lambda: jwt_decode_hs256(token)['exp']) or 0) <= int(time.time() - 120): self.to_screen(f'{token_type} token has expired') return True def _get_user_token(self): username, password = self._get_login_info() if not username or not password: return user_token = IwaraBaseIE._USERTOKEN or self.cache.load(self._NETRC_MACHINE, username) if not user_token or self._is_token_expired(user_token, 'User'): response = self._download_json( 'https://api.iwara.tv/user/login', None, note='Logging in', headers={'Content-Type': 'application/json'}, data=json.dumps({ 'email': username, 'password': password, }).encode(), expected_status=lambda x: True) user_token = traverse_obj(response, ('token', {str})) if not user_token: error = traverse_obj(response, ('message', {str})) if 'invalidLogin' in error: raise ExtractorError('Invalid login credentials', expected=True) else: raise ExtractorError(f'Iwara API said: {error or "nothing"}') self.cache.store(self._NETRC_MACHINE, username, user_token) IwaraBaseIE._USERTOKEN = user_token def _get_media_token(self): self._get_user_token() if not IwaraBaseIE._USERTOKEN: return # user has not passed credentials if not IwaraBaseIE._MEDIATOKEN or self._is_token_expired(IwaraBaseIE._MEDIATOKEN, 'Media'): IwaraBaseIE._MEDIATOKEN = self._download_json( 'https://api.iwara.tv/user/token', None, note='Fetching media token', data=b'', headers={ 'Authorization': f'Bearer {IwaraBaseIE._USERTOKEN}', 'Content-Type': 'application/json', })['accessToken'] return {'Authorization': f'Bearer {IwaraBaseIE._MEDIATOKEN}'} def _perform_login(self, username, password): self._get_media_token() class IwaraIE(IwaraBaseIE): IE_NAME = 'iwara' _VALID_URL = r'https?://(?:www\.|ecchi\.)?iwara\.tv/videos?/(?P<id>[a-zA-Z0-9]+)' _TESTS = [{ 'url': 'https://www.iwara.tv/video/k2ayoueezfkx6gvq', 'info_dict': { 'id': 'k2ayoueezfkx6gvq', 'ext': 'mp4', 'age_limit': 18, 'title': 'Defeat of Irybelda - アイリベルダの敗北', 'description': 'md5:70278abebe706647a8b4cb04cf23e0d3', 'uploader': 'Inwerwm', 'uploader_id': 'inwerwm', 'tags': 'count:1', 'like_count': 6133, 'view_count': 1050343, 'comment_count': 1, 'timestamp': 1677843869, 'modified_timestamp': 1679056362, }, 'skip': 'this video cannot be played because of migration', }, { 'url': 'https://iwara.tv/video/1ywe1sbkqwumpdxz5/', 'md5': '7645f966f069b8ec9210efd9130c9aad', 'info_dict': { 'id': '1ywe1sbkqwumpdxz5', 'ext': 'mp4', 'age_limit': 18, 'title': 'Aponia アポニア SEX Party Tonight 手の脱衣 巨乳 ', 'description': 'md5:3f60016fff22060eef1ef26d430b1f67', 'uploader': 'Lyu ya', 'uploader_id': 'user792540', 'tags': [ 'uncategorized', ], 'like_count': int, 'view_count': int, 'comment_count': int, 'timestamp': 1678732213, 'modified_timestamp': int, 'thumbnail': 'https://files.iwara.tv/image/thumbnail/581d12b5-46f4-4f15-beb2-cfe2cde5d13d/thumbnail-00.jpg', 'modified_date': '20230614', 'upload_date': '20230313', }, }, { 'url': 'https://iwara.tv/video/blggmfno8ghl725bg', 'info_dict': { 'id': 'blggmfno8ghl725bg', 'ext': 'mp4', 'age_limit': 18, 'title': 'お外でおしっこしちゃう猫耳ロリメイド', 'description': 'md5:0342ba9bf6db09edbbb28729657c3611', 'uploader': 'Fe_Kurosabi', 'uploader_id': 'fekurosabi', 'tags': [ 'pee', ], 'like_count': int, 'view_count': int, 'comment_count': int, 'timestamp': 1598880567, 'modified_timestamp': int, 'upload_date': '20200831', 'modified_date': '20230605', 'thumbnail': 'https://files.iwara.tv/image/thumbnail/7693e881-d302-42a4-a780-f16d66b5dadd/thumbnail-00.jpg', # 'availability': 'needs_auth', }, }] def _extract_formats(self, video_id, fileurl): up = urllib.parse.urlparse(fileurl) q = urllib.parse.parse_qs(up.query) paths = up.path.rstrip('/').split('/') # https://github.com/yt-dlp/yt-dlp/issues/6549#issuecomment-1473771047 x_version = hashlib.sha1('_'.join((paths[-1], q['expires'][0], '5nFp9kmbNnHdAFhaqMvt')).encode()).hexdigest() preference = qualities(['preview', '360', '540', 'Source']) files = self._download_json(fileurl, video_id, headers={'X-Version': x_version}) for fmt in files: yield traverse_obj(fmt, { 'format_id': 'name', 'url': ('src', ('view', 'download'), {self._proto_relative_url}), 'ext': ('type', {mimetype2ext}), 'quality': ('name', {preference}), 'height': ('name', {int_or_none}), }, get_all=False) def _real_extract(self, url): video_id = self._match_id(url) username, _ = self._get_login_info() video_data = self._download_json( f'https://api.iwara.tv/video/{video_id}', video_id, expected_status=lambda x: True, headers=self._get_media_token()) errmsg = video_data.get('message') # at this point we can actually get uploaded user info, but do we need it? if errmsg == 'errors.privateVideo': self.raise_login_required('Private video. Login if you have permissions to watch', method='password') elif errmsg == 'errors.notFound' and not username: self.raise_login_required('Video may need login to view', method='password') elif errmsg: # None if success raise ExtractorError(f'Iwara says: {errmsg}') if not video_data.get('fileUrl'): if video_data.get('embedUrl'): return self.url_result(video_data.get('embedUrl')) raise ExtractorError('This video is unplayable', expected=True) return { 'id': video_id, 'age_limit': 18 if video_data.get('rating') == 'ecchi' else 0, # ecchi is 'sexy' in Japanese **traverse_obj(video_data, { 'title': 'title', 'description': 'body', 'uploader': ('user', 'name'), 'uploader_id': ('user', 'username'), 'tags': ('tags', ..., 'id'), 'like_count': 'numLikes', 'view_count': 'numViews', 'comment_count': 'numComments', 'timestamp': ('createdAt', {unified_timestamp}), 'modified_timestamp': ('updatedAt', {unified_timestamp}), 'thumbnail': ('file', 'id', {str}, { lambda x: f'https://files.iwara.tv/image/thumbnail/{x}/thumbnail-00.jpg'}), }), 'formats': list(self._extract_formats(video_id, video_data.get('fileUrl'))), } class IwaraUserIE(IwaraBaseIE): _VALID_URL = r'https?://(?:www\.)?iwara\.tv/profile/(?P<id>[^/?#&]+)' IE_NAME = 'iwara:user' _PER_PAGE = 32 _TESTS = [{ 'url': 'https://iwara.tv/profile/user792540/videos', 'info_dict': { 'id': 'user792540', 'title': 'Lyu ya', }, 'playlist_mincount': 70, }, { 'url': 'https://iwara.tv/profile/theblackbirdcalls/videos', 'info_dict': { 'id': 'theblackbirdcalls', 'title': 'TheBlackbirdCalls', }, 'playlist_mincount': 723, }, { 'url': 'https://iwara.tv/profile/user792540', 'only_matching': True, }, { 'url': 'https://iwara.tv/profile/theblackbirdcalls', 'only_matching': True, }, { 'url': 'https://www.iwara.tv/profile/lumymmd', 'info_dict': { 'id': 'lumymmd', 'title': 'Lumy MMD', }, 'playlist_mincount': 1, }] def _entries(self, playlist_id, user_id, page): videos = self._download_json( 'https://api.iwara.tv/videos', playlist_id, note=f'Downloading page {page}', query={ 'page': page, 'sort': 'date', 'user': user_id, 'limit': self._PER_PAGE, }, headers=self._get_media_token()) for x in traverse_obj(videos, ('results', ..., 'id')): yield self.url_result(f'https://iwara.tv/video/{x}') def _real_extract(self, url): playlist_id = self._match_id(url) user_info = self._download_json( f'https://api.iwara.tv/profile/{playlist_id}', playlist_id, note='Requesting user info') user_id = traverse_obj(user_info, ('user', 'id')) return self.playlist_result( OnDemandPagedList( functools.partial(self._entries, playlist_id, user_id), self._PER_PAGE), playlist_id, traverse_obj(user_info, ('user', 'name'))) class IwaraPlaylistIE(IwaraBaseIE): _VALID_URL = r'https?://(?:www\.)?iwara\.tv/playlist/(?P<id>[0-9a-f-]+)' IE_NAME = 'iwara:playlist' _PER_PAGE = 32 _TESTS = [{ 'url': 'https://iwara.tv/playlist/458e5486-36a4-4ac0-b233-7e9eef01025f', 'info_dict': { 'id': '458e5486-36a4-4ac0-b233-7e9eef01025f', }, 'playlist_mincount': 3, }] def _entries(self, playlist_id, first_page, page): videos = self._download_json( 'https://api.iwara.tv/videos', playlist_id, f'Downloading page {page}', query={'page': page, 'limit': self._PER_PAGE}, headers=self._get_media_token()) if page else first_page for x in traverse_obj(videos, ('results', ..., 'id')): yield self.url_result(f'https://iwara.tv/video/{x}') def _real_extract(self, url): playlist_id = self._match_id(url) page_0 = self._download_json( f'https://api.iwara.tv/playlist/{playlist_id}?page=0&limit={self._PER_PAGE}', playlist_id, note='Requesting playlist info', headers=self._get_media_token()) return self.playlist_result( OnDemandPagedList( functools.partial(self._entries, playlist_id, page_0), self._PER_PAGE), playlist_id, traverse_obj(page_0, ('title', 'name')))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/camdemy.py
yt_dlp/extractor/camdemy.py
import re import urllib.parse from .common import InfoExtractor from ..utils import ( clean_html, parse_duration, str_to_int, unified_strdate, ) class CamdemyIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?camdemy\.com/media/(?P<id>\d+)' _TESTS = [{ # single file 'url': 'http://www.camdemy.com/media/5181/', 'md5': '5a5562b6a98b37873119102e052e311b', 'info_dict': { 'id': '5181', 'ext': 'mp4', 'title': 'Ch1-1 Introduction, Signals (02-23-2012)', 'thumbnail': r're:^https?://.*\.jpg$', 'creator': 'ss11spring', 'duration': 1591, 'upload_date': '20130114', 'view_count': int, }, }, { # With non-empty description # webpage returns "No permission or not login" 'url': 'http://www.camdemy.com/media/13885', 'md5': '4576a3bb2581f86c61044822adbd1249', 'info_dict': { 'id': '13885', 'ext': 'mp4', 'title': 'EverCam + Camdemy QuickStart', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:2a9f989c2b153a2342acee579c6e7db6', 'creator': 'evercam', 'duration': 318, }, }, { # External source (YouTube) 'url': 'http://www.camdemy.com/media/14842', 'info_dict': { 'id': '2vsYQzNIsJo', 'ext': 'mp4', 'title': 'Excel 2013 Tutorial - How to add Password Protection', 'description': 'Excel 2013 Tutorial for Beginners - How to add Password Protection', 'upload_date': '20130211', 'uploader': 'Hun Kim', 'uploader_id': 'hunkimtutorials', }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) src_from = self._html_search_regex( r"class=['\"]srcFrom['\"][^>]*>Sources?(?:\s+from)?\s*:\s*<a[^>]+(?:href|title)=(['\"])(?P<url>(?:(?!\1).)+)\1", webpage, 'external source', default=None, group='url') if src_from: return self.url_result(src_from) oembed_obj = self._download_json( 'http://www.camdemy.com/oembed/?format=json&url=' + url, video_id) title = oembed_obj['title'] thumb_url = oembed_obj['thumbnail_url'] video_folder = urllib.parse.urljoin(thumb_url, 'video/') file_list_doc = self._download_xml( urllib.parse.urljoin(video_folder, 'fileList.xml'), video_id, 'Downloading filelist XML') file_name = file_list_doc.find('./video/item/fileName').text video_url = urllib.parse.urljoin(video_folder, file_name) # Some URLs return "No permission or not login" in a webpage despite being # freely available via oembed JSON URL (e.g. http://www.camdemy.com/media/13885) upload_date = unified_strdate(self._search_regex( r'>published on ([^<]+)<', webpage, 'upload date', default=None)) view_count = str_to_int(self._search_regex( r'role=["\']viewCnt["\'][^>]*>([\d,.]+) views', webpage, 'view count', default=None)) description = self._html_search_meta( 'description', webpage, default=None) or clean_html( oembed_obj.get('description')) return { 'id': video_id, 'url': video_url, 'title': title, 'thumbnail': thumb_url, 'description': description, 'creator': oembed_obj.get('author_name'), 'duration': parse_duration(oembed_obj.get('duration')), 'upload_date': upload_date, 'view_count': view_count, } class CamdemyFolderIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?camdemy\.com/folder/(?P<id>\d+)' _TESTS = [{ # links with trailing slash 'url': 'http://www.camdemy.com/folder/450', 'info_dict': { 'id': '450', 'title': '信號與系統 2012 & 2011 (Signals and Systems)', }, 'playlist_mincount': 145, }, { # links without trailing slash # and multi-page 'url': 'http://www.camdemy.com/folder/853', 'info_dict': { 'id': '853', 'title': '科學計算 - 使用 Matlab', }, 'playlist_mincount': 20, }, { # with displayMode parameter. For testing the codes to add parameters 'url': 'http://www.camdemy.com/folder/853/?displayMode=defaultOrderByOrg', 'info_dict': { 'id': '853', 'title': '科學計算 - 使用 Matlab', }, 'playlist_mincount': 20, }] def _real_extract(self, url): folder_id = self._match_id(url) # Add displayMode=list so that all links are displayed in a single page parsed_url = list(urllib.parse.urlparse(url)) query = dict(urllib.parse.parse_qsl(parsed_url[4])) query.update({'displayMode': 'list'}) parsed_url[4] = urllib.parse.urlencode(query) final_url = urllib.parse.urlunparse(parsed_url) page = self._download_webpage(final_url, folder_id) matches = re.findall(r"href='(/media/\d+/?)'", page) entries = [self.url_result('http://www.camdemy.com' + media_path) for media_path in matches] folder_title = self._html_search_meta('keywords', page) return self.playlist_result(entries, folder_id, folder_title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mtv.py
yt_dlp/extractor/mtv.py
import base64 import json import time import urllib.parse from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, float_or_none, int_or_none, js_to_json, jwt_decode_hs256, parse_iso8601, parse_qs, update_url, update_url_query, url_or_none, ) from ..utils.traversal import require, traverse_obj class MTVServicesBaseIE(InfoExtractor): _GEO_BYPASS = False _GEO_COUNTRIES = ['US'] _CACHE_SECTION = 'mtvservices' _ACCESS_TOKEN_KEY = 'access' _REFRESH_TOKEN_KEY = 'refresh' _MEDIA_TOKEN_KEY = 'media' _token_cache = {} @staticmethod def _jwt_is_expired(token): return jwt_decode_hs256(token)['exp'] - time.time() < 120 @staticmethod def _get_auth_suite_data(config): return traverse_obj(config, { 'clientId': ('clientId', {str}), 'countryCode': ('countryCode', {str}), }) def _call_auth_api(self, path, config, display_id=None, note=None, data=None, headers=None, query=None): headers = { 'Accept': 'application/json', 'Client-Description': 'deviceName=Chrome Windows;deviceType=desktop;system=Windows NT 10.0', 'Api-Version': '2025-07-09', **(headers or {}), } if data is not None: headers['Content-Type'] = 'application/json' if isinstance(data, dict): data = json.dumps(data, separators=(',', ':')).encode() return self._download_json( f'https://auth.mtvnservices.com/{path}', display_id, note=note or 'Calling authentication API', data=data, headers=headers, query={**self._get_auth_suite_data(config), **(query or {})}) def _get_fresh_access_token(self, config, display_id=None, force_refresh=False): resource_id = config['resourceId'] # resource_id should already be in _token_cache since _get_media_token is the caller tokens = self._token_cache[resource_id] access_token = tokens.get(self._ACCESS_TOKEN_KEY) if not force_refresh and access_token and not self._jwt_is_expired(access_token): return access_token if self._REFRESH_TOKEN_KEY not in tokens: response = self._call_auth_api( 'accessToken', config, display_id, 'Retrieving auth tokens', data=b'') else: response = self._call_auth_api( 'accessToken/refresh', config, display_id, 'Refreshing auth tokens', data={'refreshToken': tokens[self._REFRESH_TOKEN_KEY]}, headers={'Authorization': f'Bearer {access_token}'}) tokens[self._ACCESS_TOKEN_KEY] = response['applicationAccessToken'] tokens[self._REFRESH_TOKEN_KEY] = response['deviceRefreshToken'] self.cache.store(self._CACHE_SECTION, resource_id, tokens) return tokens[self._ACCESS_TOKEN_KEY] def _get_media_token(self, video_config, config, display_id=None): resource_id = config['resourceId'] if resource_id in self._token_cache: tokens = self._token_cache[resource_id] else: tokens = self._token_cache[resource_id] = self.cache.load(self._CACHE_SECTION, resource_id) or {} media_token = tokens.get(self._MEDIA_TOKEN_KEY) if media_token and not self._jwt_is_expired(media_token): return media_token access_token = self._get_fresh_access_token(config, display_id) if not jwt_decode_hs256(access_token).get('accessMethods'): # MTVServices uses a custom AdobePass oauth flow which is incompatible with AdobePassIE mso_id = self.get_param('ap_mso') if not mso_id: raise ExtractorError( 'This video is only available for users of participating TV providers. ' 'Use --ap-mso to specify Adobe Pass Multiple-system operator Identifier and pass ' 'cookies from a browser session where you are signed-in to your provider.', expected=True) auth_suite_data = json.dumps( self._get_auth_suite_data(config), separators=(',', ':')).encode() callback_url = update_url_query(config['callbackURL'], { 'authSuiteData': urllib.parse.quote(base64.b64encode(auth_suite_data).decode()), 'mvpdCode': mso_id, }) auth_url = self._call_auth_api( f'mvpd/{mso_id}/login', config, display_id, 'Retrieving provider authentication URL', query={'callbackUrl': callback_url}, headers={'Authorization': f'Bearer {access_token}'})['authenticationUrl'] res = self._download_webpage_handle(auth_url, display_id, 'Downloading provider auth page') # XXX: The following "provider-specific code" likely only works if mso_id == Comcast_SSO # BEGIN provider-specific code redirect_url = self._search_json( r'initInterstitialRedirect\(', res[0], 'redirect JSON', display_id, transform_source=js_to_json)['continue'] urlh = self._request_webpage(redirect_url, display_id, 'Requesting provider redirect page') authorization_code = parse_qs(urlh.url)['authorizationCode'][-1] # END provider-specific code self._call_auth_api( f'access/mvpd/{mso_id}', config, display_id, 'Submitting authorization code to MTVNServices', query={'authorizationCode': authorization_code}, data=b'', headers={'Authorization': f'Bearer {access_token}'}) access_token = self._get_fresh_access_token(config, display_id, force_refresh=True) tokens[self._MEDIA_TOKEN_KEY] = self._call_auth_api( 'mediaToken', config, display_id, 'Fetching media token', data={ 'content': {('id' if k == 'videoId' else k): v for k, v in video_config.items()}, 'resourceId': resource_id, }, headers={'Authorization': f'Bearer {access_token}'})['mediaToken'] self.cache.store(self._CACHE_SECTION, resource_id, tokens) return tokens[self._MEDIA_TOKEN_KEY] def _real_extract(self, url): display_id = self._match_id(url) try: data = self._download_json( update_url(url, query=None), display_id, query={'json': 'true'}) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 404 and not self.suitable(e.cause.response.url): self.raise_geo_restricted(countries=self._GEO_COUNTRIES) raise flex_wrapper = traverse_obj(data, ( 'children', lambda _, v: v['type'] == 'MainContainer', (None, ('children', lambda _, v: v['type'] == 'AviaWrapper')), 'children', lambda _, v: v['type'] == 'FlexWrapper', {dict}, any)) video_detail = traverse_obj(flex_wrapper, ( (None, ('children', lambda _, v: v['type'] == 'AuthSuiteWrapper')), 'children', lambda _, v: v['type'] == 'Player', 'props', 'videoDetail', {dict}, any)) if not video_detail: video_detail = traverse_obj(data, ( 'children', ..., ('handleTVEAuthRedirection', None), 'videoDetail', {dict}, any, {require('video detail')})) mgid = video_detail['mgid'] video_id = mgid.rpartition(':')[2] service_url = traverse_obj(video_detail, ('videoServiceUrl', {url_or_none}, {update_url(query=None)})) if not service_url: raise ExtractorError('This content is no longer available', expected=True) headers = {} if video_detail.get('authRequired'): # The vast majority of provider-locked content has been moved to Paramount+ # BetIE is the only extractor that is currently known to reach this code path video_config = traverse_obj(flex_wrapper, ( 'children', lambda _, v: v['type'] == 'AuthSuiteWrapper', 'props', 'videoConfig', {dict}, any, {require('video config')})) config = traverse_obj(data, ( 'props', 'authSuiteConfig', {dict}, {require('auth suite config')})) headers['X-VIA-TVE-MEDIATOKEN'] = self._get_media_token(video_config, config, display_id) stream_info = self._download_json( service_url, video_id, 'Downloading API JSON', 'Unable to download API JSON', query={'clientPlatform': 'desktop'}, headers=headers)['stitchedstream'] manifest_type = stream_info['manifesttype'] if manifest_type == 'hls': formats, subtitles = self._extract_m3u8_formats_and_subtitles( stream_info['source'], video_id, 'mp4', m3u8_id=manifest_type) elif manifest_type == 'dash': formats, subtitles = self._extract_mpd_formats_and_subtitles( stream_info['source'], video_id, mpd_id=manifest_type) else: self.raise_no_formats(f'Unsupported manifest type "{manifest_type}"') formats, subtitles = [], {} return { **traverse_obj(video_detail, { 'title': ('title', {str}), 'channel': ('channel', 'name', {str}), 'thumbnails': ('images', ..., {'url': ('url', {url_or_none})}), 'description': (('fullDescription', 'description'), {str}, any), 'series': ('parentEntity', 'title', {str}), 'season_number': ('seasonNumber', {int_or_none}), 'episode_number': ('episodeAiringOrder', {int_or_none}), 'duration': ('duration', 'milliseconds', {float_or_none(scale=1000)}), 'timestamp': (( ('originalPublishDate', {parse_iso8601}), ('publishDate', 'timestamp', {int_or_none})), any), 'release_timestamp': (( ('originalAirDate', {parse_iso8601}), ('airDate', 'timestamp', {int_or_none})), any), }), 'id': video_id, 'display_id': display_id, 'formats': formats, 'subtitles': subtitles, } class MTVIE(MTVServicesBaseIE): IE_NAME = 'mtv' _VALID_URL = r'https?://(?:www\.)?mtv\.com/(?:video-clips|episodes)/(?P<id>[\da-z]{6})' _TESTS = [{ 'url': 'https://www.mtv.com/video-clips/syolsj', 'info_dict': { 'id': '213ea7f8-bac7-4a43-8cd5-8d8cb8c8160f', 'ext': 'mp4', 'display_id': 'syolsj', 'title': 'The Challenge: Vets & New Threats', 'description': 'md5:c4d2e90a5fff6463740fbf96b2bb6a41', 'duration': 95.0, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref', 'series': 'The Challenge', 'season': 'Season 41', 'season_number': 41, 'episode': 'Episode 0', 'episode_number': 0, 'timestamp': 1753945200, 'upload_date': '20250731', 'release_timestamp': 1753945200, 'release_date': '20250731', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.mtv.com/episodes/uzvigh', 'info_dict': { 'id': '364e8b9e-e415-11ef-b405-16fff45bc035', 'ext': 'mp4', 'display_id': 'uzvigh', 'title': 'CT Tamburello and Johnny Bananas', 'description': 'md5:364cea52001e9c13f92784e3365c6606', 'channel': 'MTV', 'duration': 1260.0, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref', 'series': 'Ridiculousness', 'season': 'Season 47', 'season_number': 47, 'episode': 'Episode 19', 'episode_number': 19, 'timestamp': 1753318800, 'upload_date': '20250724', 'release_timestamp': 1753318800, 'release_date': '20250724', }, 'params': {'skip_download': 'm3u8'}, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/spiegel.py
yt_dlp/extractor/spiegel.py
from .common import InfoExtractor from .jwplatform import JWPlatformIE class SpiegelIE(InfoExtractor): _UUID_RE = r'[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}' _VALID_URL = rf'https?://(?:www\.)?(?:spiegel|manager-magazin)\.de(?:/[^/]+)+/[^/]*-(?P<id>[0-9]+|{_UUID_RE})(?:-embed|-iframe)?(?:\.html)?(?:$|[#?])' _TESTS = [{ 'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html', 'md5': '50c7948883ec85a3e431a0a44b7ad1d6', 'info_dict': { 'id': 'II0BUyxY', 'display_id': '1259285', 'ext': 'mp4', 'title': 'Vulkan Tungurahua in Ecuador ist wieder aktiv - DER SPIEGEL - Wissenschaft', 'description': 'md5:8029d8310232196eb235d27575a8b9f4', 'duration': 48.0, 'upload_date': '20130311', 'timestamp': 1362997920, }, }, { 'url': 'http://www.spiegel.de/video/schach-wm-videoanalyse-des-fuenften-spiels-video-1309159.html', 'only_matching': True, }, { 'url': 'https://www.spiegel.de/video/eifel-zoo-aufregung-um-ausgebrochene-raubtiere-video-99018031.html', 'only_matching': True, }, { 'url': 'https://www.spiegel.de/panorama/urteile-im-goldmuenzenprozess-haftstrafen-fuer-clanmitglieder-a-aae8df48-43c1-4c61-867d-23f0a2d254b7', 'only_matching': True, }, { 'url': 'http://www.spiegel.de/video/spiegel-tv-magazin-ueber-guellekrise-in-schleswig-holstein-video-99012776.html', 'only_matching': True, }, { 'url': 'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) media_id = self._html_search_regex( r'(&#34;|["\'])mediaId\1\s*:\s*(&#34;|["\'])(?P<id>(?:(?!\2).)+)\2', webpage, 'media id', group='id') return { '_type': 'url_transparent', 'id': video_id, 'display_id': video_id, 'url': f'jwplatform:{media_id}', 'title': self._og_search_title(webpage, default=None), 'ie_key': JWPlatformIE.ie_key(), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ina.py
yt_dlp/extractor/ina.py
from .common import InfoExtractor from ..utils import unified_strdate class InaIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|m)\.)?ina\.fr/(?:[^?#]+/)(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://www.ina.fr/video/I12055569/francois-hollande-je-crois-que-c-est-clair-video.html', 'md5': 'c5a09e5cb5604ed10709f06e7a377dda', 'info_dict': { 'id': 'I12055569', 'ext': 'mp4', 'title': 'François Hollande "Je crois que c\'est clair"', 'description': 'md5:19f61e2b4844ed4bb2e3df9ab9f527ff', 'upload_date': '20070712', 'thumbnail': 'https://cdn-hub.ina.fr/notice/690x517/3c4/I12055569.jpeg', }, }, { 'url': 'https://www.ina.fr/video/S806544_001/don-d-organes-des-avancees-mais-d-importants-besoins-video.html', 'only_matching': True, }, { 'url': 'https://www.ina.fr/audio/P16173408', 'only_matching': True, }, { 'url': 'https://www.ina.fr/video/P16173408-video.html', 'only_matching': True, }, { 'url': 'http://m.ina.fr/video/I12055569', 'only_matching': True, }, { 'url': 'https://www.ina.fr/ina-eclaire-actu/video/cpb8205116303/les-jeux-electroniques', 'md5': '4b8284a9a3a184fdc7e744225b8251e7', 'info_dict': { 'id': 'CPB8205116303', 'ext': 'mp4', 'title': 'Les jeux électroniques', 'description': 'md5:e09f7683dad1cc60b74950490127d233', 'upload_date': '19821204', 'duration': 657, 'thumbnail': 'https://cdn-hub.ina.fr/notice/690x517/203/CPB8205116303.jpeg', }, }, { 'url': 'https://www.ina.fr/ina-eclaire-actu/arletty-carriere-conseils-actrice-marcel-carne', 'md5': '743d6f069a00e19dda0da166a54eeccb', 'info_dict': { 'id': 'I22203233', 'ext': 'mp4', 'title': 'Arletty sur le métier d\'actrice', 'description': 'md5:3d89b5e419d8514c934f146045ccdbad', 'upload_date': '19581128', 'thumbnail': 'https://cdn-hub.ina.fr/notice/690x517/082/I22203233.jpeg', }, }, { 'url': 'https://www.ina.fr/ina-eclaire-actu/chasse-croise-sncf-gare-d-austerlitz-vacances-d-ete', 'md5': 'a96fb85e9ba3b5c5b2eeb0c5daa55f2f', 'info_dict': { 'id': 'CAF91038285', 'ext': 'mp4', 'title': 'Les grands départs : les trains', 'description': 'md5:1630ee819d8d4da97df53459e99f72bb', 'upload_date': '19740801', 'thumbnail': 'https://cdn-hub.ina.fr/notice/690x517/2cf/CAF91038285.jpeg', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) api_url = self._html_search_regex(r'asset-details-url\s*=\s*["\'](?P<api_url>[^"\']+)', webpage, 'api_url') asset_id = self._search_regex(r'assets/([^?/]+)', api_url, 'asset_id') api_response = self._download_json(api_url.replace(asset_id, f'{asset_id}.json'), asset_id) return { 'id': asset_id, 'url': api_response['resourceUrl'], 'ext': {'video': 'mp4', 'audio': 'mp3'}.get(api_response.get('type')), 'title': api_response.get('title'), 'description': api_response.get('description'), 'upload_date': unified_strdate(api_response.get('dateOfBroadcast')), 'duration': api_response.get('duration'), 'thumbnail': api_response.get('resourceThumbnail'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/pornovoisines.py
yt_dlp/extractor/pornovoisines.py
from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, unified_strdate, ) class PornoVoisinesIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?pornovoisines\.com/videos/show/(?P<id>\d+)/(?P<display_id>[^/.]+)' _TEST = { 'url': 'http://www.pornovoisines.com/videos/show/919/recherche-appartement.html', 'md5': '6f8aca6a058592ab49fe701c8ba8317b', 'info_dict': { 'id': '919', 'display_id': 'recherche-appartement', 'ext': 'mp4', 'title': 'Recherche appartement', 'description': 'md5:fe10cb92ae2dd3ed94bb4080d11ff493', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20140925', 'duration': 120, 'view_count': int, 'average_rating': float, 'categories': ['Débutante', 'Débutantes', 'Scénario', 'Sodomie'], 'age_limit': 18, 'subtitles': { 'fr': [{ 'ext': 'vtt', }], }, }, } def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') display_id = mobj.group('display_id') settings_url = self._download_json( f'http://www.pornovoisines.com/api/video/{video_id}/getsettingsurl/', video_id, note='Getting settings URL')['video_settings_url'] settings = self._download_json(settings_url, video_id)['data'] formats = [] for kind, data in settings['variants'].items(): if kind == 'HLS': formats.extend(self._extract_m3u8_formats( data, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls')) elif kind == 'MP4': for item in data: formats.append({ 'url': item['url'], 'height': item.get('height'), 'bitrate': item.get('bitrate'), }) webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage) description = self._og_search_description(webpage) # The webpage has a bug - there's no space between "thumb" and src= thumbnail = self._html_search_regex( r'<img[^>]+class=([\'"])thumb\1[^>]*src=([\'"])(?P<url>[^"]+)\2', webpage, 'thumbnail', fatal=False, group='url') upload_date = unified_strdate(self._search_regex( r'Le\s*<b>([\d/]+)', webpage, 'upload date', fatal=False)) duration = settings.get('main', {}).get('duration') view_count = int_or_none(self._search_regex( r'(\d+) vues', webpage, 'view count', fatal=False)) average_rating = self._search_regex( r'Note\s*:\s*(\d+(?:,\d+)?)', webpage, 'average rating', fatal=False) if average_rating: average_rating = float_or_none(average_rating.replace(',', '.')) categories = self._html_search_regex( r'(?s)Catégories\s*:\s*<b>(.+?)</b>', webpage, 'categories', fatal=False) if categories: categories = [category.strip() for category in categories.split(',')] subtitles = {'fr': [{ 'url': subtitle, } for subtitle in settings.get('main', {}).get('vtt_tracks', {}).values()]} return { 'id': video_id, 'display_id': display_id, 'formats': formats, 'title': title, 'description': description, 'thumbnail': thumbnail, 'upload_date': upload_date, 'duration': duration, 'view_count': view_count, 'average_rating': average_rating, 'categories': categories, 'age_limit': 18, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/toggo.py
yt_dlp/extractor/toggo.py
from .common import InfoExtractor from ..utils import int_or_none, parse_qs class ToggoIE(InfoExtractor): IE_NAME = 'toggo' _VALID_URL = r'https?://(?:www\.)?toggo\.de/(?:toggolino/)?[^/?#]+/(?:folge|video)/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.toggo.de/weihnachtsmann--co-kg/folge/ein-geschenk-fuer-zwei', 'info_dict': { 'id': 'VEP2977', 'ext': 'mp4', 'title': 'Ein Geschenk für zwei', 'display_id': 'ein-geschenk-fuer-zwei', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'description': 'md5:b7715915bfa47824b4e4ad33fb5962f8', 'release_timestamp': 1637259179, 'series': 'Weihnachtsmann & Co. KG', 'season': 'Weihnachtsmann & Co. KG', 'season_number': 1, 'season_id': 'VST118', 'episode': 'Ein Geschenk für zwei', 'episode_number': 7, 'episode_id': 'VEP2977', 'timestamp': 1581935960, 'uploader_id': '6057955896001', 'upload_date': '20200217', }, 'params': {'skip_download': True}, }, { 'url': 'https://www.toggo.de/grizzy--die-lemminge/folge/ab-durch-die-wand-vogelfrei-rock\'n\'lemming', 'only_matching': True, }, { 'url': 'https://www.toggo.de/toggolino/paw-patrol/folge/der-wetter-zeppelin-der-chili-kochwettbewerb', 'only_matching': True, }, { 'url': 'https://www.toggo.de/toggolino/paw-patrol/video/paw-patrol-rettung-im-anflug', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) data = self._download_json( f'https://production-n.toggo.de/api/assetstore/vod/asset/{display_id}', display_id)['data'] brightcove_id = next( x['value'] for x in data['custom_fields'] if x.get('key') == 'video-cloud-id') info = self._downloader.get_info_extractor('BrightcoveNew').extract( f'http://players.brightcove.net/6057955896001/default_default/index.html?videoId={brightcove_id}') for f in info['formats']: if '/dash/live/cenc/' in f.get('fragment_base_url', ''): # Get hidden non-DRM format f['fragment_base_url'] = f['fragment_base_url'].replace('/cenc/', '/clear/') f['has_drm'] = False if '/fairplay/' in f.get('manifest_url', ''): f['has_drm'] = True thumbnails = [{ 'id': name, 'url': url, 'width': int_or_none(next(iter(parse_qs(url).get('width', [])), None)), } for name, url in (data.get('images') or {}).items()] return { **info, 'id': data.get('id'), 'display_id': display_id, 'title': data.get('title'), 'language': data.get('language'), 'thumbnails': thumbnails, 'description': data.get('description'), 'release_timestamp': data.get('earliest_start_date'), 'series': data.get('series_title'), 'season': data.get('season_title'), 'season_number': data.get('season_no'), 'season_id': data.get('season_id'), 'episode': data.get('title'), 'episode_number': data.get('episode_no'), 'episode_id': data.get('id'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rtvcplay.py
yt_dlp/extractor/rtvcplay.py
import re from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, determine_ext, float_or_none, int_or_none, js_to_json, mimetype2ext, traverse_obj, url_or_none, urljoin, ) class RTVCPlayBaseIE(InfoExtractor): _BASE_VALID_URL = r'https?://(?:www\.)?rtvcplay\.co' def _extract_player_config(self, webpage, video_id): return self._search_json( r'<script\b[^>]*>[^<]*(?:var|let|const)\s+config\s*=', re.sub(r'"\s*\+\s*"', '', webpage), 'player_config', video_id, transform_source=js_to_json) def _extract_formats_and_subtitles_player_config(self, player_config, video_id): formats, subtitles = [], {} for source in traverse_obj(player_config, ('sources', ..., lambda _, v: url_or_none(v['url']))): ext = mimetype2ext(source.get('mimetype'), default=determine_ext(source['url'])) if ext == 'm3u8': fmts, subs = self._extract_m3u8_formats_and_subtitles( source['url'], video_id, 'mp4', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) else: formats.append({ 'url': source['url'], 'ext': ext, }) return formats, subtitles class RTVCPlayIE(RTVCPlayBaseIE): _VALID_URL = RTVCPlayBaseIE._BASE_VALID_URL + r'/(?P<category>(?!embed)[^/]+)/(?:[^?#]+/)?(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://www.rtvcplay.co/en-vivo/canal-institucional', 'info_dict': { 'id': 'canal-institucional', 'title': r're:^Canal Institucional', 'description': 'md5:eff9e548394175928059320c006031ea', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'live_status': 'is_live', 'ext': 'mp4', }, 'params': { 'skip_download': 'Livestream', }, }, { 'url': 'https://www.rtvcplay.co/en-vivo/senal-colombia', 'info_dict': { 'id': 'senal-colombia', 'title': r're:^Señal Colombia', 'description': 'md5:799f16a401d97f40c33a2c6a3e2a507b', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'live_status': 'is_live', 'ext': 'mp4', }, 'params': { 'skip_download': 'Livestream', }, }, { 'url': 'https://www.rtvcplay.co/en-vivo/radio-nacional', 'info_dict': { 'id': 'radio-nacional', 'title': r're:^Radio Nacional', 'description': 'md5:5de009bc6a9fa79d2a6cf0b73f977d53', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'live_status': 'is_live', 'ext': 'mp4', }, 'params': { 'skip_download': 'Livestream', }, }, { 'url': 'https://www.rtvcplay.co/peliculas-ficcion/senoritas', 'md5': '1288ee6f6d1330d880f98bff2ed710a3', 'info_dict': { 'id': 'senoritas', 'title': 'Señoritas', 'description': 'md5:f095a2bb52cb6cf279daf6302f86fb32', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'ext': 'mp4', }, }, { 'url': 'https://www.rtvcplay.co/competencias-basicas-ciudadanas-y-socioemocionales/profe-en-tu-casa/james-regresa-clases-28022022', 'md5': 'f040a7380a269ad633cf837384d5e9fc', 'info_dict': { 'id': 'james-regresa-clases-28022022', 'title': 'James regresa a clases - 28/02/2022', 'description': 'md5:c5dcdf757c7ab29305e8763c6007e675', 'ext': 'mp4', }, }, { 'url': 'https://www.rtvcplay.co/peliculas-documentales/llinas-el-cerebro-y-el-universo', 'info_dict': { 'id': 'llinas-el-cerebro-y-el-universo', 'title': 'Llinás, el cerebro y el universo', 'description': 'md5:add875bf2309bb52b3e8b9b06116d9b0', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', }, 'playlist_mincount': 3, }, { 'url': 'https://www.rtvcplay.co/competencias-basicas-ciudadanas-y-socioemocionales/profe-en-tu-casa', 'info_dict': { 'id': 'profe-en-tu-casa', 'title': 'Profe en tu casa', 'description': 'md5:47dbe20e263194413b1db2a2805a4f2e', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', }, 'playlist_mincount': 537, }, { 'url': 'https://www.rtvcplay.co/series-al-oido/relato-de-un-naufrago-una-travesia-del-periodismo-a-la-literatura', 'info_dict': { 'id': 'relato-de-un-naufrago-una-travesia-del-periodismo-a-la-literatura', 'title': 'Relato de un náufrago: una travesía del periodismo a la literatura', 'description': 'md5:6da28fdca4a5a568ea47ef65ef775603', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', }, 'playlist_mincount': 5, }, { 'url': 'https://www.rtvcplay.co/series-al-oido/diez-versiones', 'info_dict': { 'id': 'diez-versiones', 'title': 'Diez versiones', 'description': 'md5:997471ed971cb3fd8e41969457675306', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', }, 'playlist_mincount': 20, }] def _real_extract(self, url): video_id, category = self._match_valid_url(url).group('id', 'category') webpage = self._download_webpage(url, video_id) hydration = self._search_json( r'window\.__RTVCPLAY_STATE__\s*=', webpage, 'hydration', video_id, transform_source=js_to_json)['content']['currentContent'] asset_id = traverse_obj(hydration, ('video', 'assetid')) if asset_id: hls_url = hydration['base_url_hls'].replace('[node:field_asset_id]', asset_id) else: hls_url = traverse_obj(hydration, ('channel', 'hls')) metadata = traverse_obj(hydration, { 'title': 'title', 'description': 'description', 'thumbnail': ((('channel', 'image', 'logo'), ('resource', 'image', 'cover_desktop')), 'path'), }, get_all=False) # Probably it's a program's page if not hls_url: seasons = traverse_obj( hydration, ('widgets', lambda _, y: y['type'] == 'seasonList', 'contents'), get_all=False) if not seasons: podcast_episodes = hydration.get('audios') if not podcast_episodes: raise ExtractorError('Could not find asset_id nor program playlist nor podcast episodes') return self.playlist_result([ self.url_result(episode['file'], url_transparent=True, **traverse_obj(episode, { 'title': 'title', 'description': ('description', {clean_html}), 'episode_number': ('chapter_number', {float_or_none}, {int_or_none}), 'season_number': ('season', {int_or_none}), })) for episode in podcast_episodes], video_id, **metadata) entries = [self.url_result( urljoin(url, episode['slug']), url_transparent=True, **traverse_obj(season, { 'season': 'title', 'season_number': ('season', {int_or_none}), }), **traverse_obj(episode, { 'title': 'title', 'thumbnail': ('image', 'cover', 'path'), 'episode_number': ('chapter_number', {int_or_none}), })) for season in seasons for episode in traverse_obj(season, ('contents', ...))] return self.playlist_result(entries, video_id, **metadata) formats, subtitles = self._extract_m3u8_formats_and_subtitles(hls_url, video_id, 'mp4') return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'is_live': category == 'en-vivo', **metadata, } class RTVCPlayEmbedIE(RTVCPlayBaseIE): _VALID_URL = RTVCPlayBaseIE._BASE_VALID_URL + r'/embed/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://www.rtvcplay.co/embed/72b0e699-248b-4929-a4a8-3782702fa7f9', 'md5': 'ed529aeaee7aa2a72afe91ac7d1177a8', 'info_dict': { 'id': '72b0e699-248b-4929-a4a8-3782702fa7f9', 'title': 'Tráiler: Señoritas', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'ext': 'mp4', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) player_config = self._extract_player_config(webpage, video_id) formats, subtitles = self._extract_formats_and_subtitles_player_config(player_config, video_id) asset_id = traverse_obj(player_config, ('rtvcplay', 'assetid')) metadata = {} if not asset_id else self._download_json( f'https://cms.rtvcplay.co/api/v1/video/asset-id/{asset_id}', video_id, fatal=False) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(metadata, { 'title': 'title', 'description': 'description', 'thumbnail': ('image', ..., 'thumbnail', 'path'), }, get_all=False), } class RTVCKalturaIE(RTVCPlayBaseIE): _VALID_URL = r'https?://media\.rtvc\.gov\.co/kalturartvc/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://media.rtvc.gov.co/kalturartvc/indexSC.html', 'info_dict': { 'id': 'indexSC', 'title': r're:^Señal Colombia', 'description': 'md5:799f16a401d97f40c33a2c6a3e2a507b', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'live_status': 'is_live', 'ext': 'mp4', }, 'params': { 'skip_download': 'Livestream', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) player_config = self._extract_player_config(webpage, video_id) formats, subtitles = self._extract_formats_and_subtitles_player_config(player_config, video_id) channel_id = traverse_obj(player_config, ('rtvcplay', 'channelId')) metadata = {} if not channel_id else self._download_json( f'https://cms.rtvcplay.co/api/v1/taxonomy_term/streaming/{channel_id}', video_id, fatal=False) fmts, subs = self._extract_m3u8_formats_and_subtitles( traverse_obj(metadata, ('channel', 'hls')), video_id, 'mp4', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'is_live': True, **traverse_obj(metadata, { 'title': 'title', 'description': 'description', 'thumbnail': ('channel', 'image', 'logo', 'path'), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/zenporn.py
yt_dlp/extractor/zenporn.py
import base64 import binascii from .common import InfoExtractor from ..utils import ExtractorError, determine_ext, unified_strdate, url_or_none from ..utils.traversal import traverse_obj class ZenPornIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?zenporn\.com/video/(?P<id>\d+)' _TESTS = [{ 'url': 'https://zenporn.com/video/15627016/desi-bhabi-ki-chudai', 'md5': '07bd576b5920714d74975c054ca28dee', 'info_dict': { 'id': '9563799', 'display_id': '15627016', 'ext': 'mp4', 'title': 'md5:669eafd3bbc688aa29770553b738ada2', 'description': '', 'thumbnail': 'md5:2fc044a19bab450fef8f1931e7920a18', 'upload_date': '20230925', 'uploader': 'md5:9fae59847f1f58d1da8f2772016c12f3', 'age_limit': 18, }, }, { 'url': 'https://zenporn.com/video/15570701', 'md5': 'acba0d080d692664fcc8c4e5502b1a67', 'info_dict': { 'id': '2297875', 'display_id': '15570701', 'ext': 'mp4', 'title': 'md5:47aebdf87644ec91e8b1a844bc832451', 'description': '', 'thumbnail': 'https://mstn.nv7s.com/contents/videos_screenshots/2297000/2297875/480x270/1.jpg', 'upload_date': '20230921', 'uploader': 'Lois Clarke', 'age_limit': 18, }, }, { 'url': 'https://zenporn.com/video/8531117/amateur-students-having-a-fuck-fest-at-club/', 'md5': '67411256aa9451449e4d29f3be525541', 'info_dict': { 'id': '12791908', 'display_id': '8531117', 'ext': 'mp4', 'title': 'Amateur students having a fuck fest at club', 'description': '', 'thumbnail': 'https://tn.txxx.tube/contents/videos_screenshots/12791000/12791908/288x162/1.jpg', 'upload_date': '20191005', 'uploader': 'Jackopenass', 'age_limit': 18, }, }, { 'url': 'https://zenporn.com/video/15872038/glad-you-came/', 'md5': '296ccab437f5bac6099433768449d8e1', 'info_dict': { 'id': '111585', 'display_id': '15872038', 'ext': 'mp4', 'title': 'Glad You Came', 'description': '', 'thumbnail': 'https://vpim.m3pd.com/contents/videos_screenshots/111000/111585/480x270/1.jpg', 'upload_date': '20231024', 'uploader': 'Martin Rudenko', 'age_limit': 18, }, }] def _gen_info_url(self, ext_domain, extr_id, lifetime=86400): """ This function is a reverse engineering from the website javascript """ result = '/'.join(str(int(extr_id) // i * i) for i in (1_000_000, 1_000, 1)) return f'https://{ext_domain}/api/json/video/{lifetime}/{result}.json' @staticmethod def _decode_video_url(encoded_url): """ This function is a reverse engineering from the website javascript """ # Replace lookalike characters and standardize map translation = str.maketrans('АВСЕМ.,~', 'ABCEM+/=') try: return base64.b64decode(encoded_url.translate(translation), validate=True).decode() except (binascii.Error, ValueError): return None def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) ext_domain, video_id = self._search_regex( r'https://(?P<ext_domain>[\w.-]+\.\w{3})/embed/(?P<extr_id>\d+)/', webpage, 'embed info', group=('ext_domain', 'extr_id')) info_json = self._download_json( self._gen_info_url(ext_domain, video_id), video_id, fatal=False) video_json = self._download_json( f'https://{ext_domain}/api/videofile.php', video_id, query={ 'video_id': video_id, 'lifetime': 8640000, }, note='Downloading video file JSON', errnote='Failed to download video file JSON') decoded_url = self._decode_video_url(video_json[0]['video_url']) if not decoded_url: raise ExtractorError('Unable to decode the video url') return { 'id': video_id, 'display_id': display_id, 'ext': traverse_obj(video_json, (0, 'format', {determine_ext})), 'url': f'https://{ext_domain}{decoded_url}', 'age_limit': 18, **traverse_obj(info_json, ('video', { 'title': ('title', {str}), 'description': ('description', {str}), 'thumbnail': ('thumb', {url_or_none}), 'upload_date': ('post_date', {unified_strdate}), 'uploader': ('user', 'username', {str}), })), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ard.py
yt_dlp/extractor/ard.py
import functools import json import re from .common import InfoExtractor from ..utils import ( OnDemandPagedList, bug_reports_message, determine_ext, int_or_none, join_nonempty, jwt_decode_hs256, make_archive_id, parse_duration, parse_iso8601, remove_start, str_or_none, unified_strdate, update_url, update_url_query, url_or_none, xpath_text, ) from ..utils.traversal import traverse_obj, value class ARDMediathekBaseIE(InfoExtractor): _GEO_COUNTRIES = ['DE'] def _extract_media_info(self, media_info_url, webpage, video_id): media_info = self._download_json( media_info_url, video_id, 'Downloading media JSON') return self._parse_media_info(media_info, video_id, '"fsk"' in webpage) def _parse_media_info(self, media_info, video_id, fsk): formats = self._extract_formats(media_info, video_id) if not formats: if fsk: self.raise_no_formats( 'This video is only available after 20:00', expected=True) elif media_info.get('_geoblocked'): self.raise_geo_restricted( 'This video is not available due to geoblocking', countries=self._GEO_COUNTRIES, metadata_available=True) subtitles = {} subtitle_url = media_info.get('_subtitleUrl') if subtitle_url: subtitles['de'] = [{ 'ext': 'ttml', 'url': subtitle_url, }, { 'ext': 'vtt', 'url': subtitle_url.replace('/ebutt/', '/webvtt/') + '.vtt', }] return { 'id': video_id, 'duration': int_or_none(media_info.get('_duration')), 'thumbnail': media_info.get('_previewImage'), 'is_live': media_info.get('_isLive') is True, 'formats': formats, 'subtitles': subtitles, } def _extract_formats(self, media_info, video_id): type_ = media_info.get('_type') media_array = media_info.get('_mediaArray', []) formats = [] for num, media in enumerate(media_array): for stream in media.get('_mediaStreamArray', []): stream_urls = stream.get('_stream') if not stream_urls: continue if not isinstance(stream_urls, list): stream_urls = [stream_urls] quality = stream.get('_quality') server = stream.get('_server') for stream_url in stream_urls: if not url_or_none(stream_url): continue ext = determine_ext(stream_url) if quality != 'auto' and ext in ('f4m', 'm3u8'): continue if ext == 'f4m': formats.extend(self._extract_f4m_formats( update_url_query(stream_url, { 'hdcore': '3.1.1', 'plugin': 'aasp-3.1.1.69.124', }), video_id, f4m_id='hds', fatal=False)) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( stream_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: if server and server.startswith('rtmp'): f = { 'url': server, 'play_path': stream_url, 'format_id': f'a{num}-rtmp-{quality}', } else: f = { 'url': stream_url, 'format_id': f'a{num}-{ext}-{quality}', } m = re.search( r'_(?P<width>\d+)x(?P<height>\d+)\.mp4$', stream_url) if m: f.update({ 'width': int(m.group('width')), 'height': int(m.group('height')), }) if type_ == 'audio': f['vcodec'] = 'none' formats.append(f) return formats class ARDIE(InfoExtractor): _VALID_URL = r'(?P<mainurl>https?://(?:www\.)?daserste\.de/(?:[^/?#&]+/)+(?P<id>[^/?#&]+))\.html' _TESTS = [{ # available till 7.12.2023 'url': 'https://www.daserste.de/information/talk/maischberger/videos/maischberger-video-424.html', 'md5': '94812e6438488fb923c361a44469614b', 'info_dict': { 'id': 'maischberger-video-424', 'display_id': 'maischberger-video-424', 'ext': 'mp4', 'duration': 4452.0, 'title': 'maischberger am 07.12.2022', 'upload_date': '20221207', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'https://www.daserste.de/information/politik-weltgeschehen/morgenmagazin/videosextern/dominik-kahun-aus-der-nhl-direkt-zur-weltmeisterschaft-100.html', 'only_matching': True, }, { 'url': 'https://www.daserste.de/information/nachrichten-wetter/tagesthemen/videosextern/tagesthemen-17736.html', 'only_matching': True, }, { 'url': 'https://www.daserste.de/unterhaltung/serie/in-aller-freundschaft-die-jungen-aerzte/videos/diversity-tag-sanam-afrashteh100.html', 'only_matching': True, }, { 'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html', 'only_matching': True, }, { 'url': 'https://www.daserste.de/unterhaltung/serie/in-aller-freundschaft-die-jungen-aerzte/Drehpause-100.html', 'only_matching': True, }, { 'url': 'https://www.daserste.de/unterhaltung/film/filmmittwoch-im-ersten/videos/making-ofwendezeit-video-100.html', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) display_id = mobj.group('id') player_url = mobj.group('mainurl') + '~playerXml.xml' doc = self._download_xml(player_url, display_id) video_node = doc.find('./video') upload_date = unified_strdate(xpath_text( video_node, './broadcastDate')) thumbnail = xpath_text(video_node, './/teaserImage//variant/url') formats = [] for a in video_node.findall('.//asset'): file_name = xpath_text(a, './fileName', default=None) if not file_name: continue format_type = a.attrib.get('type') format_url = url_or_none(file_name) if format_url: ext = determine_ext(file_name) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=format_type or 'hls', fatal=False)) continue elif ext == 'f4m': formats.extend(self._extract_f4m_formats( update_url_query(format_url, {'hdcore': '3.7.0'}), display_id, f4m_id=format_type or 'hds', fatal=False)) continue f = { 'format_id': format_type, 'width': int_or_none(xpath_text(a, './frameWidth')), 'height': int_or_none(xpath_text(a, './frameHeight')), 'vbr': int_or_none(xpath_text(a, './bitrateVideo')), 'abr': int_or_none(xpath_text(a, './bitrateAudio')), 'vcodec': xpath_text(a, './codecVideo'), 'tbr': int_or_none(xpath_text(a, './totalBitrate')), } server_prefix = xpath_text(a, './serverPrefix', default=None) if server_prefix: f.update({ 'url': server_prefix, 'playpath': file_name, }) else: if not format_url: continue f['url'] = format_url formats.append(f) _SUB_FORMATS = ( ('./dataTimedText', 'ttml'), ('./dataTimedTextNoOffset', 'ttml'), ('./dataTimedTextVtt', 'vtt'), ) subtitles = {} for subsel, subext in _SUB_FORMATS: for node in video_node.findall(subsel): subtitles.setdefault('de', []).append({ 'url': node.attrib['url'], 'ext': subext, }) return { 'id': xpath_text(video_node, './videoId', default=display_id), 'formats': formats, 'subtitles': subtitles, 'display_id': display_id, 'title': video_node.find('./title').text, 'duration': parse_duration(video_node.find('./duration').text), 'upload_date': upload_date, 'thumbnail': thumbnail, } class ARDBetaMediathekIE(InfoExtractor): IE_NAME = 'ARDMediathek' _VALID_URL = r'''(?x)https?:// (?:(?:beta|www)\.)?ardmediathek\.de/ (?:[^/]+/)? (?:player|live|video)/ (?:[^?#]+/)? (?P<id>[a-zA-Z0-9]+) /?(?:[?#]|$)''' _GEO_COUNTRIES = ['DE'] _TOKEN_URL = 'https://sso.ardmediathek.de/sso/token' _TESTS = [{ 'url': 'https://www.ardmediathek.de/video/filme-im-mdr/liebe-auf-vier-pfoten/mdr-fernsehen/Y3JpZDovL21kci5kZS9zZW5kdW5nLzI4MjA0MC80MjIwOTEtNDAyNTM0', 'md5': 'b6e8ab03f2bcc6e1f9e6cef25fcc03c4', 'info_dict': { 'display_id': 'Y3JpZDovL21kci5kZS9zZW5kdW5nLzI4MjA0MC80MjIwOTEtNDAyNTM0', 'id': '12939099', 'title': 'Liebe auf vier Pfoten', 'description': r're:^Claudia Schmitt, Anwältin in Salzburg', 'duration': 5222, 'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:aee7cbf8f06de976?w=960&ch=ae4d0f2ee47d8b9b', 'timestamp': 1701343800, 'upload_date': '20231130', 'ext': 'mp4', 'episode': 'Liebe auf vier Pfoten', 'series': 'Filme im MDR', 'age_limit': 0, 'channel': 'MDR', '_old_archive_ids': ['ardbetamediathek Y3JpZDovL21kci5kZS9zZW5kdW5nLzI4MjA0MC80MjIwOTEtNDAyNTM0'], }, }, { 'url': 'https://www.ardmediathek.de/mdr/video/die-robuste-roswita/Y3JpZDovL21kci5kZS9iZWl0cmFnL2Ntcy84MWMxN2MzZC0wMjkxLTRmMzUtODk4ZS0wYzhlOWQxODE2NGI/', 'md5': 'a1dc75a39c61601b980648f7c9f9f71d', 'info_dict': { 'display_id': 'die-robuste-roswita', 'id': '78566716', 'title': 'Die robuste Roswita', 'description': r're:^Der Mord.*totgeglaubte Ehefrau Roswita', 'duration': 5316, 'thumbnail': 'https://img.ardmediathek.de/standard/00/78/56/67/84/575672121/16x9/960?mandant=ard', 'timestamp': 1596658200, 'upload_date': '20200805', 'ext': 'mp4', }, 'skip': 'Error', }, { 'url': 'https://www.ardmediathek.de/video/tagesschau-oder-tagesschau-20-00-uhr/das-erste/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhZ2Vzc2NoYXUvZmM4ZDUxMjgtOTE0ZC00Y2MzLTgzNzAtNDZkNGNiZWJkOTll', 'md5': '1e73ded21cb79bac065117e80c81dc88', 'info_dict': { 'id': '10049223', 'ext': 'mp4', 'title': 'tagesschau, 20:00 Uhr', 'timestamp': 1636398000, 'description': 'md5:39578c7b96c9fe50afdf5674ad985e6b', 'upload_date': '20211108', 'display_id': 'Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhZ2Vzc2NoYXUvZmM4ZDUxMjgtOTE0ZC00Y2MzLTgzNzAtNDZkNGNiZWJkOTll', 'duration': 915, 'episode': 'tagesschau, 20:00 Uhr', 'series': 'tagesschau', 'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:fbb21142783b0a49?w=960&ch=ee69108ae344f678', 'channel': 'ARD-Aktuell', '_old_archive_ids': ['ardbetamediathek Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhZ2Vzc2NoYXUvZmM4ZDUxMjgtOTE0ZC00Y2MzLTgzNzAtNDZkNGNiZWJkOTll'], }, }, { 'url': 'https://www.ardmediathek.de/video/7-tage/7-tage-unter-harten-jungs/hr-fernsehen/N2I2YmM5MzgtNWFlOS00ZGFlLTg2NzMtYzNjM2JlNjk4MDg3', 'md5': 'c428b9effff18ff624d4f903bda26315', 'info_dict': { 'id': '94834686', 'ext': 'mp4', 'duration': 2670, 'episode': '7 Tage ... unter harten Jungs', 'description': 'md5:0f215470dcd2b02f59f4bd10c963f072', 'upload_date': '20231005', 'timestamp': 1696491171, 'display_id': 'N2I2YmM5MzgtNWFlOS00ZGFlLTg2NzMtYzNjM2JlNjk4MDg3', 'series': '7 Tage ...', 'channel': 'HR', 'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:430c86d233afa42d?w=960&ch=fa32ba69bc87989a', 'title': '7 Tage ... unter harten Jungs', '_old_archive_ids': ['ardbetamediathek N2I2YmM5MzgtNWFlOS00ZGFlLTg2NzMtYzNjM2JlNjk4MDg3'], }, }, { 'url': 'https://www.ardmediathek.de/video/lokalzeit-aus-duesseldorf/lokalzeit-aus-duesseldorf-oder-31-10-2024/wdr-duesseldorf/Y3JpZDovL3dkci5kZS9CZWl0cmFnLXNvcGhvcmEtOWFkMTc0ZWMtMDA5ZS00ZDEwLWFjYjctMGNmNTdhNzVmNzUz', 'info_dict': { 'id': '13847165', 'chapters': 'count:8', 'ext': 'mp4', 'channel': 'WDR', 'display_id': 'Y3JpZDovL3dkci5kZS9CZWl0cmFnLXNvcGhvcmEtOWFkMTc0ZWMtMDA5ZS00ZDEwLWFjYjctMGNmNTdhNzVmNzUz', 'episode': 'Lokalzeit aus Düsseldorf | 31.10.2024', 'series': 'Lokalzeit aus Düsseldorf', 'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:f02ec9bd9b7bd5f6?w=960&ch=612491dcd5e09b0c', 'title': 'Lokalzeit aus Düsseldorf | 31.10.2024', 'upload_date': '20241031', 'timestamp': 1730399400, 'description': 'md5:12db30b3b706314efe3778b8df1a7058', 'duration': 1759, '_old_archive_ids': ['ardbetamediathek Y3JpZDovL3dkci5kZS9CZWl0cmFnLXNvcGhvcmEtOWFkMTc0ZWMtMDA5ZS00ZDEwLWFjYjctMGNmNTdhNzVmNzUz'], }, }, { 'url': 'https://beta.ardmediathek.de/ard/video/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydC9mYmM4NGM1NC0xNzU4LTRmZGYtYWFhZS0wYzcyZTIxNGEyMDE', 'only_matching': True, }, { 'url': 'https://ardmediathek.de/ard/video/saartalk/saartalk-gesellschaftsgift-haltung-gegen-hass/sr-fernsehen/Y3JpZDovL3NyLW9ubGluZS5kZS9TVF84MTY4MA/', 'only_matching': True, }, { 'url': 'https://www.ardmediathek.de/ard/video/trailer/private-eyes-s01-e01/one/Y3JpZDovL3dkci5kZS9CZWl0cmFnLTE1MTgwYzczLWNiMTEtNGNkMS1iMjUyLTg5MGYzOWQxZmQ1YQ/', 'only_matching': True, }, { 'url': 'https://www.ardmediathek.de/ard/player/Y3JpZDovL3N3ci5kZS9hZXgvbzEwNzE5MTU/', 'only_matching': True, }, { 'url': 'https://www.ardmediathek.de/swr/live/Y3JpZDovL3N3ci5kZS8xMzQ4MTA0Mg', 'only_matching': True, }, { 'url': 'https://www.ardmediathek.de/video/coronavirus-update-ndr-info/astrazeneca-kurz-lockdown-und-pims-syndrom-81/ndr/Y3JpZDovL25kci5kZS84NzE0M2FjNi0wMWEwLTQ5ODEtOTE5NS1mOGZhNzdhOTFmOTI/', 'only_matching': True, }] def _extract_episode_info(self, title): patterns = [ # Pattern for title like "Homo sapiens (S06/E07) - Originalversion" # from: https://www.ardmediathek.de/one/sendung/doctor-who/Y3JpZDovL3dkci5kZS9vbmUvZG9jdG9yIHdobw r'.*(?P<ep_info> \(S(?P<season_number>\d+)/E(?P<episode_number>\d+)\)).*', # E.g.: title="Fritjof aus Norwegen (2) (AD)" # from: https://www.ardmediathek.de/ard/sammlung/der-krieg-und-ich/68cMkqJdllm639Skj4c7sS/ r'.*(?P<ep_info> \((?:Folge |Teil )?(?P<episode_number>\d+)(?:/\d+)?\)).*', r'.*(?P<ep_info>Folge (?P<episode_number>\d+)(?:\:| -|) )\"(?P<episode>.+)\".*', # E.g.: title="Folge 25/42: Symmetrie" # from: https://www.ardmediathek.de/ard/video/grips-mathe/folge-25-42-symmetrie/ard-alpha/Y3JpZDovL2JyLmRlL3ZpZGVvLzMyYzI0ZjczLWQ1N2MtNDAxNC05ZmZhLTFjYzRkZDA5NDU5OQ/ # E.g.: title="Folge 1063 - Vertrauen" # from: https://www.ardmediathek.de/ard/sendung/die-fallers/Y3JpZDovL3N3ci5kZS8yMzAyMDQ4/ r'.*(?P<ep_info>Folge (?P<episode_number>\d+)(?:/\d+)?(?:\:| -|) ).*', # As a fallback use the full title r'(?P<title>.*)', ] return traverse_obj(patterns, (..., {functools.partial(re.match, string=title)}, { 'season_number': ('season_number', {int_or_none}), 'episode_number': ('episode_number', {int_or_none}), 'episode': (( ('episode', {str_or_none}), ('ep_info', {lambda x: title.replace(x, '')}), ('title', {str}), ), {str.strip}), }), get_all=False) def _real_extract(self, url): display_id = self._match_id(url) query = {'embedded': 'false', 'mcV6': 'true'} headers = {} if self._get_cookies(self._TOKEN_URL).get('ams'): token = self._download_json( self._TOKEN_URL, display_id, 'Fetching token for age verification', 'Unable to fetch age verification token', fatal=False) id_token = traverse_obj(token, ('idToken', {str})) decoded_token = traverse_obj(id_token, ({jwt_decode_hs256}, {dict})) user_id = traverse_obj(decoded_token, (('user_id', 'sub'), {str}), get_all=False) if not user_id: self.report_warning('Unable to extract token, continuing without authentication') else: headers['x-authorization'] = f'Bearer {id_token}' query['userId'] = user_id if decoded_token.get('age_rating') != 18: self.report_warning('Account is not verified as 18+; video may be unavailable') page_data = self._download_json( f'https://api.ardmediathek.de/page-gateway/pages/ard/item/{display_id}', display_id, query=query, headers=headers) # For user convenience we use the old contentId instead of the longer crid # Ref: https://github.com/yt-dlp/yt-dlp/issues/8731#issuecomment-1874398283 old_id = traverse_obj(page_data, ('tracking', 'atiCustomVars', 'contentId', {int})) if old_id is not None: video_id = str(old_id) archive_ids = [make_archive_id(ARDBetaMediathekIE, display_id)] else: self.report_warning(f'Could not extract contentId{bug_reports_message()}') video_id = display_id archive_ids = None player_data = traverse_obj( page_data, ('widgets', lambda _, v: v['type'] in ('player_ondemand', 'player_live'), {dict}), get_all=False) is_live = player_data.get('type') == 'player_live' media_data = traverse_obj(player_data, ('mediaCollection', 'embedded', {dict})) if player_data.get('blockedByFsk'): self.raise_login_required('This video is only available for age verified users or after 22:00') formats = [] subtitles = {} for stream in traverse_obj(media_data, ('streams', ..., {dict})): kind = stream.get('kind') # Prioritize main stream over sign language and others preference = 1 if kind == 'main' else None for media in traverse_obj(stream, ('media', lambda _, v: url_or_none(v['url']))): media_url = media['url'] audio_kind = traverse_obj(media, ( 'audios', 0, 'kind', {str}), default='').replace('standard', '') lang_code = traverse_obj(media, ('audios', 0, 'languageCode', {str})) or 'deu' lang = join_nonempty(lang_code, audio_kind) language_preference = 10 if lang == 'deu' else -10 if determine_ext(media_url) == 'm3u8': fmts, subs = self._extract_m3u8_formats_and_subtitles( media_url, video_id, m3u8_id=f'hls-{kind}', preference=preference, fatal=False, live=is_live) for f in fmts: f['language'] = lang f['language_preference'] = language_preference formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) else: formats.append({ 'url': media_url, 'format_id': f'http-{kind}', 'preference': preference, 'language': lang, 'language_preference': language_preference, **traverse_obj(media, { 'format_note': ('forcedLabel', {str}), 'width': ('maxHResolutionPx', {int_or_none}), 'height': ('maxVResolutionPx', {int_or_none}), 'vcodec': ('videoCodec', {str}), }), }) for sub in traverse_obj(media_data, ('subtitles', ..., {dict})): for sources in traverse_obj(sub, ('sources', lambda _, v: url_or_none(v['url']))): subtitles.setdefault(sub.get('languageCode') or 'deu', []).append({ 'url': sources['url'], 'ext': {'webvtt': 'vtt', 'ebutt': 'ttml'}.get(sources.get('kind')), }) age_limit = traverse_obj(page_data, ('fskRating', {lambda x: remove_start(x, 'FSK')}, {int_or_none})) return { 'id': video_id, 'display_id': display_id, 'formats': formats, 'subtitles': subtitles, 'is_live': is_live, 'age_limit': age_limit, **traverse_obj(media_data, { 'chapters': ('pluginData', 'jumpmarks@all', 'chapterArray', lambda _, v: int_or_none(v['chapterTime']), { 'start_time': ('chapterTime', {int_or_none}), 'title': ('chapterTitle', {str}), }), }), **traverse_obj(media_data, ('meta', { 'title': 'title', 'description': 'synopsis', 'timestamp': ('broadcastedOnDateTime', {parse_iso8601}), 'series': 'seriesTitle', 'thumbnail': ('images', 0, 'url', {url_or_none}), 'duration': ('durationSeconds', {int_or_none}), 'channel': 'clipSourceName', })), **self._extract_episode_info(page_data.get('title')), '_old_archive_ids': archive_ids, } class ARDMediathekCollectionIE(InfoExtractor): _VALID_URL = r'''(?x)https?:// (?:(?:beta|www)\.)?ardmediathek\.de/ (?:[^/?#]+/)? (?P<playlist>sendung|serie|sammlung)/ (?:(?P<display_id>[^?#]+?)/)? (?P<id>[a-zA-Z0-9]+) (?:/(?P<season>\d+)(?:/(?P<version>OV|AD))?)?/?(?:[?#]|$)''' _GEO_COUNTRIES = ['DE'] _TESTS = [{ 'url': 'https://www.ardmediathek.de/serie/quiz/staffel-1-originalversion/Y3JpZDovL3dkci5kZS9vbmUvcXVpeg/1/OV', 'info_dict': { 'id': 'Y3JpZDovL3dkci5kZS9vbmUvcXVpeg_1_OV', 'display_id': 'quiz/staffel-1-originalversion', 'title': 'Staffel 1 Originalversion', }, 'playlist_count': 3, }, { 'url': 'https://www.ardmediathek.de/serie/babylon-berlin/staffel-4-mit-audiodeskription/Y3JpZDovL2Rhc2Vyc3RlLmRlL2JhYnlsb24tYmVybGlu/4/AD', 'info_dict': { 'id': 'Y3JpZDovL2Rhc2Vyc3RlLmRlL2JhYnlsb24tYmVybGlu_4_AD', 'display_id': 'babylon-berlin/staffel-4-mit-audiodeskription', 'title': 'Staffel 4 mit Audiodeskription', }, 'playlist_count': 12, }, { 'url': 'https://www.ardmediathek.de/serie/babylon-berlin/staffel-1/Y3JpZDovL2Rhc2Vyc3RlLmRlL2JhYnlsb24tYmVybGlu/1/', 'info_dict': { 'id': 'Y3JpZDovL2Rhc2Vyc3RlLmRlL2JhYnlsb24tYmVybGlu_1', 'display_id': 'babylon-berlin/staffel-1', 'title': 'Staffel 1', }, 'playlist_count': 8, }, { 'url': 'https://www.ardmediathek.de/sendung/tatort/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydA', 'info_dict': { 'id': 'Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydA', 'display_id': 'tatort', 'title': 'Tatort', }, 'playlist_mincount': 500, }, { 'url': 'https://www.ardmediathek.de/sammlung/die-kirche-bleibt-im-dorf/5eOHzt8XB2sqeFXbIoJlg2', 'info_dict': { 'id': '5eOHzt8XB2sqeFXbIoJlg2', 'display_id': 'die-kirche-bleibt-im-dorf', 'title': 'Die Kirche bleibt im Dorf', 'description': 'Die Kirche bleibt im Dorf', }, 'playlist_count': 4, }, { # playlist of type 'sendung' 'url': 'https://www.ardmediathek.de/ard/sendung/doctor-who/Y3JpZDovL3dkci5kZS9vbmUvZG9jdG9yIHdobw/', 'only_matching': True, }, { # playlist of type 'serie' 'url': 'https://www.ardmediathek.de/serie/nachtstreife/staffel-1/Y3JpZDovL3N3ci5kZS9zZGIvc3RJZC8xMjQy/1', 'only_matching': True, }, { # playlist of type 'sammlung' 'url': 'https://www.ardmediathek.de/ard/sammlung/team-muenster/5JpTzLSbWUAK8184IOvEir/', 'only_matching': True, }] _PAGE_SIZE = 100 def _real_extract(self, url): playlist_id, display_id, playlist_type, season_number, version = self._match_valid_url(url).group( 'id', 'display_id', 'playlist', 'season', 'version') def call_api(page_num): api_path = 'compilations/ard' if playlist_type == 'sammlung' else 'widgets/ard/asset' return self._download_json( f'https://api.ardmediathek.de/page-gateway/{api_path}/{playlist_id}', playlist_id, f'Downloading playlist page {page_num}', query={ 'pageNumber': page_num, 'pageSize': self._PAGE_SIZE, **({ 'seasoned': 'true', 'seasonNumber': season_number, 'withOriginalversion': 'true' if version == 'OV' else 'false', 'withAudiodescription': 'true' if version == 'AD' else 'false', } if season_number else {}), }) def fetch_page(page_num): for item in traverse_obj(call_api(page_num), ('teasers', ..., {dict})): item_id = traverse_obj(item, ('links', 'target', ('urlId', 'id')), 'id', get_all=False) if not item_id or item_id == playlist_id: continue item_mode = 'sammlung' if item.get('type') == 'compilation' else 'video' yield self.url_result( f'https://www.ardmediathek.de/{item_mode}/{item_id}', ie=(ARDMediathekCollectionIE if item_mode == 'sammlung' else ARDBetaMediathekIE), **traverse_obj(item, { 'id': ('id', {str}), 'title': ('longTitle', {str}), 'duration': ('duration', {int_or_none}), 'timestamp': ('broadcastedOn', {parse_iso8601}), })) page_data = call_api(0) full_id = join_nonempty(playlist_id, season_number, version, delim='_') return self.playlist_result( OnDemandPagedList(fetch_page, self._PAGE_SIZE), full_id, display_id=display_id, title=page_data.get('title'), description=page_data.get('synopsis')) class ARDAudiothekBaseIE(InfoExtractor): def _graphql_query(self, urn, query): return self._download_json( 'https://api.ardaudiothek.de/graphql', urn, data=json.dumps({ 'query': query, 'variables': {'id': urn}, }).encode(), headers={ 'Content-Type': 'application/json', })['data'] class ARDAudiothekIE(ARDAudiothekBaseIE): _VALID_URL = r'https:?//(?:www\.)?ardaudiothek\.de/episode/(?P<id>urn:ard:(?:episode|section|extra):[a-f0-9]{16})' _TESTS = [{ 'url': 'https://www.ardaudiothek.de/episode/urn:ard:episode:eabead1add170e93/', 'info_dict': { 'id': 'urn:ard:episode:eabead1add170e93', 'ext': 'mp3', 'upload_date': '20240717', 'duration': 3339, 'title': 'CAIMAN CLUB (S04E04): Cash Out', 'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:ed64411a07a4b405', 'description': 'md5:0e5d127a3832ae59e8bab40a91a5dadc', 'display_id': 'urn:ard:episode:eabead1add170e93', 'timestamp': 1721181641, 'series': '1LIVE Caiman Club', 'channel': 'WDR', 'episode': 'Episode 4', 'episode_number': 4, }, }, { 'url': 'https://www.ardaudiothek.de/episode/urn:ard:section:855c7a53dac72e0a/', 'info_dict': { 'id': 'urn:ard:section:855c7a53dac72e0a', 'ext': 'mp4', 'upload_date': '20241231', 'duration': 3304, 'title': 'Illegaler DDR-Detektiv: Doberschütz und die letzte Staatsjagd (1/2) - Wendezeit', 'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:b9b4f1e8b93da4dd', 'description': 'md5:3552d571e1959754cff66c1da6c0fdae', 'display_id': 'urn:ard:section:855c7a53dac72e0a', 'timestamp': 1735629900, 'series': 'Auf der Spur – Die ARD Ermittlerkrimis', 'channel': 'ARD', 'episode': 'Episode 1', 'episode_number': 1, }, }, { 'url': 'https://www.ardaudiothek.de/episode/urn:ard:extra:d2fe7303d2dcbf5d/', 'info_dict': { 'id': 'urn:ard:extra:d2fe7303d2dcbf5d', 'ext': 'mp3', 'title': 'Trailer: Fanta Vier Forever, Baby!?!', 'description': 'md5:b64a586f2e976b8bb5ea0a79dbd8751c', 'channel': 'SWR', 'duration': 62, 'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:48d3c255969be803', 'series': 'Fanta Vier Forever, Baby!?!', 'timestamp': 1732108217, 'upload_date': '20241120', }, }] _QUERY_ITEM = '''\ query($id: ID!) { item(id: $id) { audioList { href distributionType audioBitrate audioCodec } show { title } image { url1X1 } programSet { publicationService { organizationName } } description title duration startDate episodeNumber } }''' def _real_extract(self, url): urn = self._match_id(url) item = self._graphql_query(urn, self._QUERY_ITEM)['item'] return { 'id': urn, **traverse_obj(item, { 'formats': ('audioList', lambda _, v: url_or_none(v['href']), { 'url': 'href', 'format_id': ('distributionType', {str}), 'abr': ('audioBitrate', {int_or_none}), 'acodec': ('audioCodec', {str}), 'vcodec': {value('none')}, }), 'channel': ('programSet', 'publicationService', 'organizationName', {str}), 'description': ('description', {str}), 'duration': ('duration', {int_or_none}), 'series': ('show', 'title', {str}), 'episode_number': ('episodeNumber', {int_or_none}), 'thumbnail': ('image', 'url1X1', {url_or_none}, {update_url(query=None)}), 'timestamp': ('startDate', {parse_iso8601}), 'title': ('title', {str}), }), } class ARDAudiothekPlaylistIE(ARDAudiothekBaseIE):
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/krasview.py
yt_dlp/extractor/krasview.py
import json from .common import InfoExtractor from ..utils import ( int_or_none, js_to_json, ) class KrasViewIE(InfoExtractor): _WORKING = False IE_DESC = 'Красвью' _VALID_URL = r'https?://krasview\.ru/(?:video|embed)/(?P<id>\d+)' _TEST = { 'url': 'http://krasview.ru/video/512228', 'md5': '3b91003cf85fc5db277870c8ebd98eae', 'info_dict': { 'id': '512228', 'ext': 'mp4', 'title': 'Снег, лёд, заносы', 'description': 'Снято в городе Нягань, в Ханты-Мансийском автономном округе.', 'duration': 27, 'thumbnail': r're:^https?://.*\.jpg', }, 'params': { 'skip_download': 'Not accessible from Travis CI server', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) flashvars = json.loads(js_to_json(self._search_regex( r'video_Init\(({.+?})', webpage, 'flashvars'))) video_url = flashvars['url'] title = self._og_search_title(webpage) description = self._og_search_description(webpage, default=None) thumbnail = flashvars.get('image') or self._og_search_thumbnail(webpage) duration = int_or_none(flashvars.get('duration')) width = int_or_none(self._og_search_property( 'video:width', webpage, 'video width', default=None)) height = int_or_none(self._og_search_property( 'video:height', webpage, 'video height', default=None)) return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'width': width, 'height': height, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/novaplay.py
yt_dlp/extractor/novaplay.py
from .common import InfoExtractor from ..utils import int_or_none, parse_duration, parse_iso8601 class NovaPlayIE(InfoExtractor): _VALID_URL = r'https?://play\.nova\.bg/video/[^?#]+/(?P<id>\d+)' _TESTS = [ { 'url': 'https://play.nova.bg/video/ochakvaite/season-0/ochakvaite-2022-07-22-sybudi-se-sat/606627', 'md5': 'd79dff2d09d196c595a7290f48e33399', 'info_dict': { 'id': '606627', 'ext': 'mp4', 'title': 'Събуди се - събота по NOVA (23.07.2022)', 'alt_title': 'ochakvaite/season-0/ochakvaite-2022-07-22-sybudi-se-sat', 'duration': 29.0, 'timestamp': 1658491547, 'upload_date': '20220722', 'thumbnail': 'https://nbg-img.fite.tv/img/606627_460x260.jpg', 'description': '29 сек', }, }, { 'url': 'https://play.nova.bg/video/ochakvaite/season-0/ochakvaite-2022-07-22-cherry-tazi/606609', 'md5': 'f3e973e2ed1a5b9b3f498b1ab82d01b3', 'info_dict': { 'id': '606609', 'ext': 'mp4', 'title': 'Черешката на тортата - тази вечер по NOVA (22.07.2022)', 'alt_title': 'ochakvaite/season-0/ochakvaite-2022-07-22-cherry-tazi', 'duration': 29.0, 'timestamp': 1658476303, 'upload_date': '20220722', 'thumbnail': 'https://nbg-img.fite.tv/img/606609_460x260.jpg', 'description': '29 сек', }, }, ] _access_token = None def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) self._access_token = self._access_token or self._download_json( 'https://play.nova.bg/api/client', None, note='Fetching access token')['accessToken'] video_props = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['video'] m3u8_url = self._download_json( f'https://nbg-api.fite.tv/api/v2/videos/{video_id}/streams', video_id, headers={ 'x-flipps-user-agent': 'Flipps/75/9.7', 'x-flipps-version': '2022-05-17', 'Authorization': f'Bearer {self._access_token}', })[0]['links']['play']['href'] formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', m3u8_id='hls') return { 'id': video_id, 'title': video_props['title'], 'alt_title': video_props.get('slug'), 'thumbnail': self._og_search_thumbnail(webpage), 'description': self._og_search_description(webpage), 'formats': formats, 'duration': parse_duration(video_props['duration']), 'timestamp': parse_iso8601(video_props['published_at']), 'view_count': int_or_none(video_props['view_count']), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/toypics.py
yt_dlp/extractor/toypics.py
import re from .common import InfoExtractor class ToypicsIE(InfoExtractor): _WORKING = False IE_DESC = 'Toypics video' _VALID_URL = r'https?://videos\.toypics\.net/view/(?P<id>[0-9]+)' _TEST = { 'url': 'http://videos.toypics.net/view/514/chancebulged,-2-1/', 'md5': '16e806ad6d6f58079d210fe30985e08b', 'info_dict': { 'id': '514', 'ext': 'mp4', 'title': "Chance-Bulge'd, 2", 'age_limit': 18, 'uploader': 'kidsune', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) formats = self._parse_html5_media_entries( url, webpage, video_id)[0]['formats'] title = self._html_search_regex([ r'<h1[^>]+class=["\']view-video-title[^>]+>([^<]+)</h', r'<title>([^<]+) - Toypics</title>', ], webpage, 'title') uploader = self._html_search_regex( r'More videos from <strong>([^<]+)</strong>', webpage, 'uploader', fatal=False) return { 'id': video_id, 'formats': formats, 'title': title, 'uploader': uploader, 'age_limit': 18, } class ToypicsUserIE(InfoExtractor): _WORKING = False IE_DESC = 'Toypics user profile' _VALID_URL = r'https?://videos\.toypics\.net/(?!view)(?P<id>[^/?#&]+)' _TEST = { 'url': 'http://videos.toypics.net/Mikey', 'info_dict': { 'id': 'Mikey', }, 'playlist_mincount': 19, } def _real_extract(self, url): username = self._match_id(url) profile_page = self._download_webpage( url, username, note='Retrieving profile page') video_count = int(self._search_regex( r'public/">Public Videos \(([0-9]+)\)</a></li>', profile_page, 'video count')) PAGE_SIZE = 8 urls = [] page_count = (video_count + PAGE_SIZE + 1) // PAGE_SIZE for n in range(1, page_count + 1): lpage_url = url + f'/public/{n}' lpage = self._download_webpage( lpage_url, username, note=f'Downloading page {n}/{page_count}') urls.extend( re.findall( r'<div[^>]+class=["\']preview[^>]+>\s*<a[^>]+href="(https?://videos\.toypics\.net/view/[^"]+)"', lpage)) return { '_type': 'playlist', 'id': username, 'entries': [{ '_type': 'url', 'url': eurl, 'ie_key': 'Toypics', } for eurl in urls], }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mirrativ.py
yt_dlp/extractor/mirrativ.py
from .common import InfoExtractor from ..utils import ( ExtractorError, dict_get, traverse_obj, try_get, ) class MirrativBaseIE(InfoExtractor): def assert_error(self, response): error_message = traverse_obj(response, ('status', 'error')) if error_message: raise ExtractorError(f'Mirrativ says: {error_message}', expected=True) class MirrativIE(MirrativBaseIE): IE_NAME = 'mirrativ' _VALID_URL = r'https?://(?:www\.)?mirrativ\.com/live/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://mirrativ.com/live/UQomuS7EMgHoxRHjEhNiHw', 'info_dict': { 'id': 'UQomuS7EMgHoxRHjEhNiHw', 'title': 'ねむいぃ、。『参加型』🔰jcが初めてやるCOD✨初見さん大歓迎💗', 'is_live': True, 'description': 'md5:bfcd8f77f2fab24c3c672e5620f3f16e', 'thumbnail': r're:https?://.+', 'uploader': '# あ ち ゅ 。💡', 'uploader_id': '118572165', 'duration': None, 'view_count': 1241, 'release_timestamp': 1646229192, 'timestamp': 1646229167, 'was_live': False, }, 'skip': 'livestream', }, { 'url': 'https://mirrativ.com/live/POxyuG1KmW2982lqlDTuPw', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(f'https://www.mirrativ.com/live/{video_id}', video_id) live_response = self._download_json(f'https://www.mirrativ.com/api/live/live?live_id={video_id}', video_id) self.assert_error(live_response) hls_url = dict_get(live_response, ('archive_url_hls', 'streaming_url_hls')) is_live = bool(live_response.get('is_live')) if not hls_url: raise ExtractorError('Neither archive nor live is available.', expected=True) formats = self._extract_m3u8_formats( hls_url, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls', live=is_live) return { 'id': video_id, 'title': self._og_search_title(webpage, default=None) or self._search_regex( r'<title>\s*(.+?) - Mirrativ\s*</title>', webpage) or live_response.get('title'), 'is_live': is_live, 'description': live_response.get('description'), 'formats': formats, 'thumbnail': live_response.get('image_url'), 'uploader': traverse_obj(live_response, ('owner', 'name')), 'uploader_id': traverse_obj(live_response, ('owner', 'user_id')), 'duration': try_get(live_response, lambda x: x['ended_at'] - x['started_at']) if not is_live else None, 'view_count': live_response.get('total_viewer_num'), 'release_timestamp': live_response.get('started_at'), 'timestamp': live_response.get('created_at'), 'was_live': bool(live_response.get('is_archive')), } class MirrativUserIE(MirrativBaseIE): IE_NAME = 'mirrativ:user' _VALID_URL = r'https?://(?:www\.)?mirrativ\.com/user/(?P<id>\d+)' _TESTS = [{ # Live archive is available up to 3 days # see: https://helpfeel.com/mirrativ/%E9%8C%B2%E7%94%BB-5e26d3ad7b59ef0017fb49ac (Japanese) 'url': 'https://www.mirrativ.com/user/110943130', 'note': 'multiple archives available', 'only_matching': True, }] def _entries(self, user_id): page = 1 while page is not None: api_response = self._download_json( f'https://www.mirrativ.com/api/live/live_history?user_id={user_id}&page={page}', user_id, note=f'Downloading page {page}') self.assert_error(api_response) lives = api_response.get('lives') if not lives: break for live in lives: if not live.get('is_archive') and not live.get('is_live'): # neither archive nor live is available, so skip it # or the service will ban your IP address for a while continue live_id = live.get('live_id') url = f'https://www.mirrativ.com/live/{live_id}' yield self.url_result(url, video_id=live_id, video_title=live.get('title')) page = api_response.get('next_page') def _real_extract(self, url): user_id = self._match_id(url) user_info = self._download_json( f'https://www.mirrativ.com/api/user/profile?user_id={user_id}', user_id, note='Downloading user info', fatal=False) self.assert_error(user_info) return self.playlist_result( self._entries(user_id), user_id, user_info.get('name'), user_info.get('description'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sky.py
yt_dlp/extractor/sky.py
import re from .common import InfoExtractor from ..utils import ( extract_attributes, strip_or_none, ) class SkyBaseIE(InfoExtractor): BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s' _SDC_EL_REGEX = r'(?s)(<div[^>]+data-(?:component-name|fn)="sdc-(?:articl|sit)e-video"[^>]*>)' def _process_video_element(self, webpage, sdc_el, url): sdc = extract_attributes(sdc_el) provider = sdc.get('data-provider') if provider == 'brightcove': video_id = sdc['data-video-id'] account_id = sdc.get('data-account-id') or '6058004172001' player_id = sdc.get('data-player-id') or 'RC9PQUaJ6' video_url = self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, video_id) ie_key = 'BrightcoveNew' return { '_type': 'url_transparent', 'id': video_id, 'url': video_url, 'ie_key': ie_key, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) info = self._process_video_element(webpage, self._search_regex( self._SDC_EL_REGEX, webpage, 'sdc element'), url) info.update({ 'title': self._og_search_title(webpage), 'description': strip_or_none(self._og_search_description(webpage)), }) return info class SkySportsIE(SkyBaseIE): IE_NAME = 'sky:sports' _VALID_URL = r'https?://(?:www\.)?skysports\.com/watch/video/([^/]+/)*(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://www.skysports.com/watch/video/10328419/bale-its-our-time-to-shine', 'md5': '77d59166cddc8d3cb7b13e35eaf0f5ec', 'info_dict': { 'id': 'o3eWJnNDE6l7kfNO8BOoBlRxXRQ4ANNQ', 'ext': 'mp4', 'title': 'Bale: It\'s our time to shine', 'description': 'md5:e88bda94ae15f7720c5cb467e777bb6d', }, 'add_ie': ['BrightcoveNew'], }, { 'url': 'https://www.skysports.com/watch/video/sports/f1/12160544/abu-dhabi-gp-the-notebook', 'only_matching': True, }, { 'url': 'https://www.skysports.com/watch/video/tv-shows/12118508/rainford-brent-how-ace-programme-helps', 'only_matching': True, }] class SkyNewsIE(SkyBaseIE): IE_NAME = 'sky:news' _VALID_URL = r'https?://news\.sky\.com/video/[0-9a-z-]+-(?P<id>[0-9]+)' _TEST = { 'url': 'https://news.sky.com/video/russian-plane-inspected-after-deadly-fire-11712962', 'md5': '411e8893fd216c75eaf7e4c65d364115', 'info_dict': { 'id': 'ref:1ua21xaDE6lCtZDmbYfl8kwsKLooJbNM', 'ext': 'mp4', 'title': 'Russian plane inspected after deadly fire', 'description': 'The Russian Investigative Committee has released video of the wreckage of a passenger plane which caught fire near Moscow.', 'uploader_id': '6058004172001', 'timestamp': 1567112345, 'upload_date': '20190829', }, 'add_ie': ['BrightcoveNew'], } class SkyNewsStoryIE(SkyBaseIE): IE_NAME = 'sky:news:story' _VALID_URL = r'https?://news\.sky\.com/story/[0-9a-z-]+-(?P<id>[0-9]+)' _TEST = { 'url': 'https://news.sky.com/story/budget-2021-chancellor-rishi-sunak-vows-address-will-deliver-strong-economy-fit-for-a-new-age-of-optimism-12445425', 'info_dict': { 'id': 'ref:0714acb9-123d-42c8-91b8-5c1bc6c73f20', 'title': 'md5:e408dd7aad63f31a1817bbe40c7d276f', 'description': 'md5:a881e12f49212f92be2befe4a09d288a', 'ext': 'mp4', 'upload_date': '20211027', 'timestamp': 1635317494, 'uploader_id': '6058004172001', }, } def _real_extract(self, url): article_id = self._match_id(url) webpage = self._download_webpage(url, article_id) entries = [self._process_video_element(webpage, sdc_el, url) for sdc_el in re.findall(self._SDC_EL_REGEX, webpage)] return self.playlist_result( entries, article_id, self._og_search_title(webpage), self._html_search_meta(['og:description', 'description'], webpage)) class SkySportsNewsIE(SkyBaseIE): IE_NAME = 'sky:sports:news' _VALID_URL = r'https?://(?:www\.)?skysports\.com/([^/]+/)*news/\d+/(?P<id>\d+)' _TEST = { 'url': 'http://www.skysports.com/golf/news/12176/10871916/dustin-johnson-ready-to-conquer-players-championship-at-tpc-sawgrass', 'info_dict': { 'id': '10871916', 'title': 'Dustin Johnson ready to conquer Players Championship at TPC Sawgrass', 'description': 'Dustin Johnson is confident he can continue his dominant form in 2017 by adding the Players Championship to his list of victories.', }, 'playlist_count': 2, } def _real_extract(self, url): article_id = self._match_id(url) webpage = self._download_webpage(url, article_id) entries = [] for sdc_el in re.findall(self._SDC_EL_REGEX, webpage): entries.append(self._process_video_element(webpage, sdc_el, url)) return self.playlist_result( entries, article_id, self._og_search_title(webpage), self._html_search_meta(['og:description', 'description'], webpage))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/wykop.py
yt_dlp/extractor/wykop.py
import json from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, format_field, parse_iso8601, traverse_obj, url_or_none, ) class WykopBaseIE(InfoExtractor): def _get_token(self, force_refresh=False): if not force_refresh: maybe_cached = self.cache.load('wykop', 'bearer') if maybe_cached: return maybe_cached new_token = traverse_obj( self._do_call_api('auth', None, 'Downloading anonymous auth token', data={ # hardcoded in frontend 'key': 'w53947240748', 'secret': 'd537d9e0a7adc1510842059ae5316419', }), ('data', 'token')) self.cache.store('wykop', 'bearer', new_token) return new_token def _do_call_api(self, path, video_id, note='Downloading JSON metadata', data=None, headers={}): if data: data = json.dumps({'data': data}).encode() headers['Content-Type'] = 'application/json' return self._download_json( f'https://wykop.pl/api/v3/{path}', video_id, note=note, data=data, headers=headers) def _call_api(self, path, video_id, note='Downloading JSON metadata'): token = self._get_token() for retrying in range(2): try: return self._do_call_api(path, video_id, note, headers={'Authorization': f'Bearer {token}'}) except ExtractorError as e: if not retrying and isinstance(e.cause, HTTPError) and e.cause.status == 403: token = self._get_token(True) continue raise def _common_data_extract(self, data): author = traverse_obj(data, ('author', 'username'), expected_type=str) return { '_type': 'url_transparent', 'display_id': data.get('slug'), 'url': traverse_obj(data, ('media', 'embed', 'url'), # what gets an iframe embed ('source', 'url'), # clickable url (dig only) expected_type=url_or_none), 'thumbnail': traverse_obj( data, ('media', 'photo', 'url'), ('media', 'embed', 'thumbnail'), expected_type=url_or_none), 'uploader': author, 'uploader_id': author, 'uploader_url': format_field(author, None, 'https://wykop.pl/ludzie/%s'), 'timestamp': parse_iso8601(data.get('created_at'), delimiter=' '), # time it got submitted 'like_count': traverse_obj(data, ('votes', 'up'), expected_type=int), 'dislike_count': traverse_obj(data, ('votes', 'down'), expected_type=int), 'comment_count': traverse_obj(data, ('comments', 'count'), expected_type=int), 'age_limit': 18 if data.get('adult') else 0, 'tags': data.get('tags'), } class WykopDigIE(WykopBaseIE): IE_NAME = 'wykop:dig' _VALID_URL = r'https?://(?:www\.)?wykop\.pl/link/(?P<id>\d+)' _TESTS = [{ 'url': 'https://wykop.pl/link/6912923/najbardziej-zrzedliwy-kot-na-swiecie-i-frozen-planet-ii-i-bbc-earth', 'info_dict': { 'id': 'rlSTBvViflc', 'ext': 'mp4', 'title': 'Najbardziej zrzędliwy kot na świecie I Frozen Planet II I BBC Earth', 'display_id': 'najbardziej-zrzedliwy-kot-na-swiecie-i-frozen-planet-ii-i-bbc-earth', 'description': 'md5:ac0f87dea1cdcb6b0c53f3612a095c87', 'tags': ['zwierzaczki', 'koty', 'smiesznykotek', 'humor', 'rozrywka', 'ciekawostki'], 'age_limit': 0, 'timestamp': 1669154480, 'release_timestamp': 1669194241, 'release_date': '20221123', 'uploader': 'starnak', 'uploader_id': 'starnak', 'uploader_url': 'https://wykop.pl/ludzie/starnak', 'like_count': int, 'dislike_count': int, 'comment_count': int, 'thumbnail': r're:https?://wykop\.pl/cdn/.+', 'view_count': int, 'channel': 'BBC Earth', 'channel_id': 'UCwmZiChSryoWQCZMIQezgTg', 'channel_url': 'https://www.youtube.com/channel/UCwmZiChSryoWQCZMIQezgTg', 'categories': ['Pets & Animals'], 'upload_date': '20220923', 'duration': 191, 'channel_follower_count': int, 'availability': 'public', 'live_status': 'not_live', 'playable_in_embed': True, }, }] @classmethod def suitable(cls, url): return cls._match_valid_url(url) and not WykopDigCommentIE.suitable(url) def _real_extract(self, url): video_id = self._match_id(url) data = self._call_api(f'links/{video_id}', video_id)['data'] return { **self._common_data_extract(data), 'id': video_id, 'title': data['title'], 'description': data.get('description'), # time it got "digged" to the homepage 'release_timestamp': parse_iso8601(data.get('published_at'), delimiter=' '), } class WykopDigCommentIE(WykopBaseIE): IE_NAME = 'wykop:dig:comment' _VALID_URL = r'https?://(?:www\.)?wykop\.pl/link/(?P<dig_id>\d+)/[^/]+/komentarz/(?P<id>\d+)' _TESTS = [{ 'url': 'https://wykop.pl/link/6992589/strollowal-oszusta-przez-ponad-24-minuty-udawal-naiwniaka-i-nagral-rozmowe/komentarz/114540527/podobna-sytuacja-ponizej-ciekawa-dyskusja-z-oszustem-na-sam-koniec-sam-bylem-w-biurze-swiadkiem-podobnej-rozmowy-niemal-zakonczonej-sukcesem-bandyty-g', 'info_dict': { 'id': 'u6tEi2FmKZY', 'ext': 'mp4', 'title': 'md5:e7c741c5baa7ed6478000caf72865577', 'display_id': 'md5:45b2d12bd0e262d09cc7cf7abc8412db', 'description': 'md5:bcec7983429f9c0630f9deb9d3d1ba5e', 'timestamp': 1674476945, 'uploader': 'Bartholomew', 'uploader_id': 'Bartholomew', 'uploader_url': 'https://wykop.pl/ludzie/Bartholomew', 'thumbnail': r're:https?://wykop\.pl/cdn/.+', 'tags': [], 'availability': 'public', 'duration': 1838, 'upload_date': '20230117', 'categories': ['Entertainment'], 'view_count': int, 'like_count': int, 'dislike_count': int, 'comment_count': int, 'channel_follower_count': int, 'playable_in_embed': True, 'live_status': 'not_live', 'age_limit': 0, 'chapters': 'count:3', 'channel': 'Poszukiwacze Okazji', 'channel_id': 'UCzzvJDZThwv06dR4xmzrZBw', 'channel_url': 'https://www.youtube.com/channel/UCzzvJDZThwv06dR4xmzrZBw', }, }] def _real_extract(self, url): dig_id, comment_id = self._search_regex(self._VALID_URL, url, 'dig and comment ids', group=('dig_id', 'id')) data = self._call_api(f'links/{dig_id}/comments/{comment_id}', comment_id)['data'] return { **self._common_data_extract(data), 'id': comment_id, 'title': f"{traverse_obj(data, ('author', 'username'))} - {data.get('content') or ''}", 'description': data.get('content'), } class WykopPostIE(WykopBaseIE): IE_NAME = 'wykop:post' _VALID_URL = r'https?://(?:www\.)?wykop\.pl/wpis/(?P<id>\d+)' _TESTS = [{ 'url': 'https://wykop.pl/wpis/68893343/kot-koty-smiesznykotek', 'info_dict': { 'id': 'PL8JMjiUPHUhwc9ZlKa_5IFeBwBV8Xe7jI', 'title': 'PawelW124 - #kot #koty #smiesznykotek', 'description': '#kot #koty #smiesznykotek', 'display_id': 'kot-koty-smiesznykotek', 'tags': ['kot', 'koty', 'smiesznykotek'], 'uploader': 'PawelW124', 'uploader_id': 'PawelW124', 'uploader_url': 'https://wykop.pl/ludzie/PawelW124', 'timestamp': 1668938142, 'age_limit': 0, 'like_count': int, 'dislike_count': int, 'thumbnail': r're:https?://wykop\.pl/cdn/.+', 'comment_count': int, 'channel': 'Revan', 'channel_id': 'UCW9T_-uZoiI7ROARQdTDyOw', 'channel_url': 'https://www.youtube.com/channel/UCW9T_-uZoiI7ROARQdTDyOw', 'upload_date': '20221120', 'modified_date': '20220814', 'availability': 'public', 'view_count': int, }, 'playlist_mincount': 15, 'params': { 'flat_playlist': True, }, }] @classmethod def suitable(cls, url): return cls._match_valid_url(url) and not WykopPostCommentIE.suitable(url) def _real_extract(self, url): video_id = self._match_id(url) data = self._call_api(f'entries/{video_id}', video_id)['data'] return { **self._common_data_extract(data), 'id': video_id, 'title': f"{traverse_obj(data, ('author', 'username'))} - {data.get('content') or ''}", 'description': data.get('content'), } class WykopPostCommentIE(WykopBaseIE): IE_NAME = 'wykop:post:comment' _VALID_URL = r'https?://(?:www\.)?wykop\.pl/wpis/(?P<post_id>\d+)/[^/#]+#(?P<id>\d+)' _TESTS = [{ 'url': 'https://wykop.pl/wpis/70084873/test-test-test#249303979', 'info_dict': { 'id': 'confusedquickarmyant', 'ext': 'mp4', 'title': 'tpap - treść komentarza', 'display_id': 'tresc-komentarza', 'description': 'treść komentarza', 'uploader': 'tpap', 'uploader_id': 'tpap', 'uploader_url': 'https://wykop.pl/ludzie/tpap', 'timestamp': 1675349470, 'upload_date': '20230202', 'tags': [], 'duration': 2.12, 'age_limit': 0, 'categories': [], 'view_count': int, 'like_count': int, 'dislike_count': int, 'thumbnail': r're:https?://wykop\.pl/cdn/.+', }, }] def _real_extract(self, url): post_id, comment_id = self._search_regex(self._VALID_URL, url, 'post and comment ids', group=('post_id', 'id')) data = self._call_api(f'entries/{post_id}/comments/{comment_id}', comment_id)['data'] return { **self._common_data_extract(data), 'id': comment_id, 'title': f"{traverse_obj(data, ('author', 'username'))} - {data.get('content') or ''}", 'description': data.get('content'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/wikimedia.py
yt_dlp/extractor/wikimedia.py
import re from .common import InfoExtractor from ..utils import ( clean_html, get_element_by_class, parse_qs, remove_start, unescapeHTML, urljoin, ) class WikimediaIE(InfoExtractor): IE_NAME = 'wikimedia.org' _VALID_URL = r'https?://commons\.wikimedia\.org/wiki/File:(?P<id>[^/#?]+)\.\w+' _TESTS = [{ 'url': 'https://commons.wikimedia.org/wiki/File:Die_Temperaturkurve_der_Erde_(ZDF,_Terra_X)_720p_HD_50FPS.webm', 'info_dict': { 'url': 're:https?://upload.wikimedia.org/wikipedia', 'ext': 'webm', 'id': 'Die_Temperaturkurve_der_Erde_(ZDF,_Terra_X)_720p_HD_50FPS', 'title': 'Die Temperaturkurve der Erde (ZDF, Terra X) 720p HD 50FPS.webm - Wikimedia Commons', 'description': 'md5:7cd84f76e7081f1be033d0b155b4a460', 'license': 'Creative Commons Attribution 4.0 International', 'uploader': 'ZDF/Terra X/Gruppe 5/Luise Wagner, Jonas Sichert, Andreas Hougardy', 'subtitles': 'count:4', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) subtitles = {} for sub in set(re.findall(r'\bsrc\s*=\s*["\'](/w/api[^"]+)["\']', webpage)): sub = urljoin('https://commons.wikimedia.org', unescapeHTML(sub)) qs = parse_qs(sub) lang = qs.get('lang', [None])[-1] sub_ext = qs.get('trackformat', [None])[-1] if lang and sub_ext: subtitles.setdefault(lang, []).append({'ext': sub_ext, 'url': sub}) return { 'id': video_id, 'url': self._html_search_regex(r'<source\s[^>]*\bsrc="([^"]+)"', webpage, 'video URL'), 'description': clean_html(get_element_by_class('description', webpage)), 'title': remove_start(self._og_search_title(webpage), 'File:'), 'license': self._html_search_regex( r'licensed under(?: the)? (.+?) license', get_element_by_class('licensetpl', webpage), 'license', default=None), 'uploader': self._html_search_regex( r'>\s*Author\s*</td>\s*<td\b[^>]*>\s*([^<]+)\s*</td>', webpage, 'video author', default=None), 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/myspace.py
yt_dlp/extractor/myspace.py
import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, parse_iso8601, ) class MySpaceIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// myspace\.com/[^/]+/ (?P<mediatype> video/[^/]+/(?P<video_id>\d+)| music/song/[^/?#&]+-(?P<song_id>\d+)-\d+(?:[/?#&]|$) ) ''' _TESTS = [{ 'url': 'https://myspace.com/fiveminutestothestage/video/little-big-town/109594919', 'md5': '9c1483c106f4a695c47d2911feed50a7', 'info_dict': { 'id': '109594919', 'ext': 'mp4', 'title': 'Little Big Town', 'description': 'This country quartet was all smiles while playing a sold out show at the Pacific Amphitheatre in Orange County, California.', 'uploader': 'Five Minutes to the Stage', 'uploader_id': 'fiveminutestothestage', 'timestamp': 1414108751, 'upload_date': '20141023', }, }, { # songs 'url': 'https://myspace.com/killsorrow/music/song/of-weakened-soul...-93388656-103880681', 'md5': '1d7ee4604a3da226dd69a123f748b262', 'info_dict': { 'id': '93388656', 'ext': 'm4a', 'title': 'Of weakened soul...', 'uploader': 'Killsorrow', 'uploader_id': 'killsorrow', }, }, { 'url': 'https://myspace.com/starset2/music/song/first-light-95799905-106964426', 'only_matching': True, }, { 'url': 'https://myspace.com/thelargemouthbassband/music/song/02-pure-eyes.mp3-94422330-105113388', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('video_id') or mobj.group('song_id') is_song = mobj.group('mediatype').startswith('music/song') webpage = self._download_webpage(url, video_id) player_url = self._search_regex( r'videoSwf":"([^"?]*)', webpage, 'player URL', fatal=False) def formats_from_stream_urls(stream_url, hls_stream_url, http_stream_url, width=None, height=None): formats = [] vcodec = 'none' if is_song else None if hls_stream_url: formats.append({ 'format_id': 'hls', 'url': hls_stream_url, 'protocol': 'm3u8_native', 'ext': 'm4a' if is_song else 'mp4', 'vcodec': vcodec, }) if stream_url and player_url: rtmp_url, play_path = stream_url.split(';', 1) formats.append({ 'format_id': 'rtmp', 'url': rtmp_url, 'play_path': play_path, 'player_url': player_url, 'protocol': 'rtmp', 'ext': 'flv', 'width': width, 'height': height, 'vcodec': vcodec, }) if http_stream_url: formats.append({ 'format_id': 'http', 'url': http_stream_url, 'width': width, 'height': height, 'vcodec': vcodec, }) return formats if is_song: # songs don't store any useful info in the 'context' variable song_data = self._search_regex( rf'''<button.*data-song-id=(["\']){video_id}\1.*''', webpage, 'song_data', default=None, group=0) if song_data is None: # some songs in an album are not playable self.report_warning( f'{video_id}: No downloadable song on this page') return def search_data(name): return self._search_regex( rf'''data-{name}=([\'"])(?P<data>.*?)\1''', song_data, name, default='', group='data') formats = formats_from_stream_urls( search_data('stream-url'), search_data('hls-stream-url'), search_data('http-stream-url')) if not formats: vevo_id = search_data('vevo-id') youtube_id = search_data('youtube-id') if vevo_id: self.to_screen(f'Vevo video detected: {vevo_id}') return self.url_result(f'vevo:{vevo_id}', ie='Vevo') elif youtube_id: self.to_screen(f'Youtube video detected: {youtube_id}') return self.url_result(youtube_id, ie='Youtube') else: raise ExtractorError( 'Found song but don\'t know how to download it') return { 'id': video_id, 'title': self._og_search_title(webpage), 'uploader': search_data('artist-name'), 'uploader_id': search_data('artist-username'), 'thumbnail': self._og_search_thumbnail(webpage), 'duration': int_or_none(search_data('duration')), 'formats': formats, } else: video = self._parse_json(self._search_regex( r'context = ({.*?});', webpage, 'context'), video_id)['video'] formats = formats_from_stream_urls( video.get('streamUrl'), video.get('hlsStreamUrl'), video.get('mp4StreamUrl'), int_or_none(video.get('width')), int_or_none(video.get('height'))) return { 'id': video_id, 'title': video['title'], 'description': video.get('description'), 'thumbnail': video.get('imageUrl'), 'uploader': video.get('artistName'), 'uploader_id': video.get('artistUsername'), 'duration': int_or_none(video.get('duration')), 'timestamp': parse_iso8601(video.get('dateAdded')), 'formats': formats, } class MySpaceAlbumIE(InfoExtractor): IE_NAME = 'MySpace:album' _VALID_URL = r'https?://myspace\.com/([^/]+)/music/album/(?P<title>.*-)(?P<id>\d+)' _TESTS = [{ 'url': 'https://myspace.com/starset2/music/album/transmissions-19455773', 'info_dict': { 'title': 'Transmissions', 'id': '19455773', }, 'playlist_count': 14, 'skip': 'this album is only available in some countries', }, { 'url': 'https://myspace.com/killsorrow/music/album/the-demo-18596029', 'info_dict': { 'title': 'The Demo', 'id': '18596029', }, 'playlist_count': 5, }] def _real_extract(self, url): mobj = self._match_valid_url(url) playlist_id = mobj.group('id') display_id = mobj.group('title') + playlist_id webpage = self._download_webpage(url, display_id) tracks_paths = re.findall(r'"music:song" content="(.*?)"', webpage) if not tracks_paths: raise ExtractorError( f'{display_id}: No songs found, try using proxy', expected=True) entries = [ self.url_result(t_path, ie=MySpaceIE.ie_key()) for t_path in tracks_paths] return { '_type': 'playlist', 'id': playlist_id, 'display_id': display_id, 'title': self._og_search_title(webpage), 'entries': entries, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mediastream.py
yt_dlp/extractor/mediastream.py
import re from .common import InfoExtractor from ..utils import ( clean_html, filter_dict, parse_qs, remove_end, traverse_obj, update_url_query, urljoin, ) class MediaStreamBaseIE(InfoExtractor): _EMBED_BASE_URL = 'https://mdstrm.com/embed' _BASE_URL_RE = r'https?://mdstrm\.com/(?:embed|live-stream)' def _extract_mediastream_urls(self, webpage): yield from traverse_obj(list(self._yield_json_ld(webpage, None, default={})), ( lambda _, v: v['@type'] == 'VideoObject', ('embedUrl', 'contentUrl'), {lambda x: x if re.match(rf'{self._BASE_URL_RE}/\w+', x) else None})) for mobj in re.finditer(r'<script[^>]+>[^>]*playerMdStream\.mdstreamVideo\(\s*[\'"](?P<video_id>\w+)', webpage): yield f'{self._EMBED_BASE_URL}/{mobj.group("video_id")}' yield from re.findall( rf'<iframe[^>]+\bsrc="({self._BASE_URL_RE}/\w+)', webpage) for mobj in re.finditer( r'''(?x) <(?:div|ps-mediastream)[^>]+ (class="[^"]*MediaStreamVideoPlayer)[^"]*"[^>]+ data-video-id="(?P<video_id>\w+)" (?:\s*data-video-type="(?P<video_type>[^"]+))? (?:[^>]*>\s*<div[^>]+\1[^"]*"[^>]+data-mediastream=["\'][^>]+ https://mdstrm\.com/(?P<live>live-stream))? ''', webpage): video_type = 'live-stream' if mobj.group('video_type') == 'live' or mobj.group('live') else 'embed' yield f'https://mdstrm.com/{video_type}/{mobj.group("video_id")}' class MediaStreamIE(MediaStreamBaseIE): _VALID_URL = MediaStreamBaseIE._BASE_URL_RE + r'/(?P<id>\w+)' _TESTS = [{ 'url': 'https://mdstrm.com/embed/6318e3f1d1d316083ae48831', 'md5': '97b4f2634b8e8612cc574dfcd504df05', 'info_dict': { 'id': '6318e3f1d1d316083ae48831', 'title': 'Video: Así fue el despido de Thomas Tuchel del Chelsea', 'description': 'md5:358ce1e1396010d50a1ece1be3633c95', 'thumbnail': r're:^https?://[^?#]+6318e3f1d1d316083ae48831', 'ext': 'mp4', }, 'params': {'skip_download': 'm3u8'}, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.multimedios.com/video/costa-rica-tv-en-vivo/v2616', 'info_dict': { 'id': '5a7b1e63a8da282c34d65445', 'title': 're:mmtv-costarica', 'description': 'mmtv-costarica', 'thumbnail': 're:^https?://[^?#]+5a7b1e63a8da282c34d65445', 'ext': 'mp4', 'live_status': 'is_live', }, 'params': {'skip_download': 'Livestream'}, }, { 'url': 'https://www.multimedios.com/television/clases-de-llaves-y-castigos-quien-sabe-mas', 'md5': 'de31f0b1ecc321fb35bf22d58734ea40', 'info_dict': { 'id': '63731bab8ec9b308a2c9ed28', 'title': 'Clases de llaves y castigos ¿Quién sabe más?', 'description': 'md5:1b49aa1ee5a4b32fbd66104b2d629e9d', 'thumbnail': 're:^https?://[^?#]+63731bab8ec9b308a2c9ed28', 'ext': 'mp4', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.americatv.com.pe/videos/esto-es-guerra/facundo-gonzalez-sufrio-fuerte-golpe-durante-competencia-frente-hugo-garcia-eeg-noticia-139120', 'info_dict': { 'id': '63756df1c638b008a5659dec', 'title': 'Facundo González sufrió fuerte golpe durante competencia frente a Hugo García en EEG', 'description': 'md5:9490c034264afd756eef7b2c3adee69e', 'thumbnail': 're:^https?://[^?#]+63756df1c638b008a5659dec', 'ext': 'mp4', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.americatv.com.pe/videos/al-fondo-hay-sitio/nuevas-lomas-town-bernardo-mata-se-enfrento-sujeto-luchar-amor-macarena-noticia-139083', 'info_dict': { 'id': '637307669609130f74cd3a6e', 'title': 'Las Nuevas Lomas Town: Bernardo De La Mata se enfrentó a sujeto para luchar por el amor de Macarena', 'description': 'md5:60d71772f1e1496923539ae58aa17124', 'thumbnail': 're:^https?://[^?#]+637307669609130f74cd3a6e', 'ext': 'mp4', }, 'params': {'skip_download': 'm3u8'}, }] def _extract_from_webpage(self, url, webpage): for embed_url in self._extract_mediastream_urls(webpage): yield self.url_result(embed_url, MediaStreamIE, None) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) for message in [ 'Debido a tu ubicación no puedes ver el contenido', 'You are not allowed to watch this video: Geo Fencing Restriction', 'Este contenido no está disponible en tu zona geográfica.', 'El contenido sólo está disponible dentro de', ]: if message in webpage: self.raise_geo_restricted() player_config = self._search_json(r'window\.MDSTRM\.OPTIONS\s*=', webpage, 'metadata', video_id) formats, subtitles = [], {} for video_format in player_config['src']: if video_format == 'hls': params = { 'at': 'web-app', 'access_token': traverse_obj(parse_qs(url), ('access_token', 0)), } for name, key in (('MDSTRMUID', 'uid'), ('MDSTRMSID', 'sid'), ('MDSTRMPID', 'pid'), ('VERSION', 'av')): params[key] = self._search_regex( rf'window\.{name}\s*=\s*["\']([^"\']+)["\'];', webpage, key, default=None) fmts, subs = self._extract_m3u8_formats_and_subtitles( update_url_query(player_config['src'][video_format], filter_dict(params)), video_id) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) elif video_format == 'mpd': fmts, subs = self._extract_mpd_formats_and_subtitles(player_config['src'][video_format], video_id) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) else: formats.append({ 'url': player_config['src'][video_format], }) return { 'id': video_id, 'title': self._og_search_title(webpage) or player_config.get('title'), 'description': self._og_search_description(webpage), 'formats': formats, 'subtitles': subtitles, 'is_live': player_config.get('type') == 'live', 'thumbnail': self._og_search_thumbnail(webpage), } class WinSportsVideoIE(MediaStreamBaseIE): _VALID_URL = r'https?://www\.winsports\.co/videos/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://www.winsports.co/videos/siempre-castellanos-gran-atajada-del-portero-cardenal-para-evitar-la-caida-de-su-arco-60536', 'info_dict': { 'id': '62dc8357162c4b0821fcfb3c', 'display_id': 'siempre-castellanos-gran-atajada-del-portero-cardenal-para-evitar-la-caida-de-su-arco-60536', 'title': '¡Siempre Castellanos! Gran atajada del portero \'cardenal\' para evitar la caída de su arco', 'description': 'md5:eb811b2b2882bdc59431732c06b905f2', 'thumbnail': r're:^https?://[^?#]+62dc8357162c4b0821fcfb3c', 'ext': 'mp4', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.winsports.co/videos/observa-aqui-los-goles-del-empate-entre-tolima-y-nacional-60548', 'info_dict': { 'id': '62dcb875ef12a5526790b552', 'display_id': 'observa-aqui-los-goles-del-empate-entre-tolima-y-nacional-60548', 'title': 'Observa aquí los goles del empate entre Tolima y Nacional', 'description': 'md5:b19402ba6e46558b93fd24b873eea9c9', 'thumbnail': r're:^https?://[^?#]+62dcb875ef12a5526790b552', 'ext': 'mp4', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.winsports.co/videos/equidad-vuelve-defender-su-arco-de-remates-de-junior', 'info_dict': { 'id': '63fa7eca72f1741ad3a4d515', 'display_id': 'equidad-vuelve-defender-su-arco-de-remates-de-junior', 'title': '⚽ Equidad vuelve a defender su arco de remates de Junior', 'description': 'Remate de Sierra', 'thumbnail': r're:^https?://[^?#]+63fa7eca72f1741ad3a4d515', 'ext': 'mp4', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.winsports.co/videos/bucaramanga-se-quedo-con-el-grito-de-gol-en-la-garganta', 'info_dict': { 'id': '6402adb62bbf3b18d454e1b0', 'display_id': 'bucaramanga-se-quedo-con-el-grito-de-gol-en-la-garganta', 'title': '⚽Bucaramanga se quedó con el grito de gol en la garganta', 'description': 'Gol anulado Bucaramanga', 'thumbnail': r're:^https?://[^?#]+6402adb62bbf3b18d454e1b0', 'ext': 'mp4', }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) data = self._search_json( r'<script\s*[^>]+data-drupal-selector="drupal-settings-json">', webpage, 'data', display_id) mediastream_url = urljoin(f'{self._EMBED_BASE_URL}/', ( traverse_obj(data, ( (('settings', 'mediastream_formatter', ..., 'mediastream_id'), 'url'), {str}), get_all=False) or next(self._extract_mediastream_urls(webpage), None))) if not mediastream_url: self.raise_no_formats('No MediaStream embed found in webpage') title = clean_html(remove_end( self._search_json_ld(webpage, display_id, expected_type='VideoObject', default={}).get('title') or self._og_search_title(webpage), '| Win Sports')) return self.url_result( mediastream_url, MediaStreamIE, display_id, url_transparent=True, display_id=display_id, video_title=title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/xnxx.py
yt_dlp/extractor/xnxx.py
import re from .common import InfoExtractor from ..utils import ( NO_DEFAULT, determine_ext, int_or_none, str_to_int, ) class XNXXIE(InfoExtractor): _VALID_URL = r'https?://(?:video|www)\.xnxx3?\.com/video-?(?P<id>[0-9a-z]+)/' _TESTS = [{ 'url': 'http://www.xnxx.com/video-55awb78/skyrim_test_video', 'md5': '7583e96c15c0f21e9da3453d9920fbba', 'info_dict': { 'id': '55awb78', 'ext': 'mp4', 'title': 'Skyrim Test Video', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 469, 'view_count': int, 'age_limit': 18, }, }, { 'url': 'http://video.xnxx.com/video1135332/lida_naked_funny_actress_5_', 'only_matching': True, }, { 'url': 'http://www.xnxx.com/video-55awb78/', 'only_matching': True, }, { 'url': 'http://www.xnxx3.com/video-55awb78/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) def get(meta, default=NO_DEFAULT, fatal=True): return self._search_regex( rf'set{meta}\s*\(\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, meta, default=default, fatal=fatal, group='value') title = self._og_search_title( webpage, default=None) or get('VideoTitle') formats = [] for mobj in re.finditer( r'setVideo(?:Url(?P<id>Low|High)|HLS)\s*\(\s*(?P<q>["\'])(?P<url>(?:https?:)?//.+?)(?P=q)', webpage): format_url = mobj.group('url') if determine_ext(format_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', quality=1, m3u8_id='hls', fatal=False)) else: format_id = mobj.group('id') if format_id: format_id = format_id.lower() formats.append({ 'url': format_url, 'format_id': format_id, 'quality': -1 if format_id == 'low' else 0, }) thumbnail = self._og_search_thumbnail(webpage, default=None) or get( 'ThumbUrl', fatal=False) or get('ThumbUrl169', fatal=False) duration = int_or_none(self._og_search_property('duration', webpage)) view_count = str_to_int(self._search_regex( r'id=["\']nb-views-number[^>]+>([\d,.]+)', webpage, 'view count', default=None)) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'view_count': view_count, 'age_limit': 18, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/fujitv.py
yt_dlp/extractor/fujitv.py
from .common import InfoExtractor from ..networking import HEADRequest class FujiTVFODPlus7IE(InfoExtractor): _VALID_URL = r'https?://fod\.fujitv\.co\.jp/title/(?P<sid>[0-9a-z]{4})/(?P<id>[0-9a-z]+)' _BASE_URL = 'https://i.fod.fujitv.co.jp/' _BITRATE_MAP = { 300: (320, 180), 800: (640, 360), 1200: (1280, 720), 2000: (1280, 720), 4000: (1920, 1080), } _TESTS = [{ 'url': 'https://fod.fujitv.co.jp/title/5d40/5d40110076', 'info_dict': { 'id': '5d40110076', 'ext': 'ts', 'title': '#1318 『まる子、まぼろしの洋館を見る』の巻', 'series': 'ちびまる子ちゃん', 'series_id': '5d40', 'description': 'md5:b3f51dbfdda162ac4f789e0ff4d65750', 'thumbnail': 'https://i.fod.fujitv.co.jp/img/program/5d40/episode/5d40110076_a.jpg', }, }, { 'url': 'https://fod.fujitv.co.jp/title/5d40/5d40810083', 'info_dict': { 'id': '5d40810083', 'ext': 'ts', 'title': '#1324 『まる子とオニの子』の巻/『結成!2月をムダにしない会』の巻', 'description': 'md5:3972d900b896adc8ab1849e310507efa', 'series': 'ちびまる子ちゃん', 'series_id': '5d40', 'thumbnail': 'https://i.fod.fujitv.co.jp/img/program/5d40/episode/5d40810083_a.jpg'}, 'skip': 'Video available only in one week', }] def _real_extract(self, url): series_id, video_id = self._match_valid_url(url).groups() self._request_webpage(HEADRequest(url), video_id) json_info = {} token = self._get_cookies(url).get('CT') if token: json_info = self._download_json( f'https://fod-sp.fujitv.co.jp/apps/api/episode/detail/?ep_id={video_id}&is_premium=false', video_id, headers={'x-authorization': f'Bearer {token.value}'}, fatal=False) else: self.report_warning(f'The token cookie is needed to extract video metadata. {self._login_hint("cookies")}') formats, subtitles = [], {} src_json = self._download_json(f'{self._BASE_URL}abrjson_v2/tv_android/{video_id}', video_id) for src in src_json['video_selector']: if not src.get('url'): continue fmt, subs = self._extract_m3u8_formats_and_subtitles(src['url'], video_id, 'ts') for f in fmt: f.update(dict(zip(('height', 'width'), self._BITRATE_MAP.get(f.get('tbr'), ()), strict=False))) formats.extend(fmt) subtitles = self._merge_subtitles(subtitles, subs) return { 'id': video_id, 'title': json_info.get('ep_title'), 'series': json_info.get('lu_title'), 'series_id': series_id, 'description': json_info.get('ep_description'), 'formats': formats, 'subtitles': subtitles, 'thumbnail': f'{self._BASE_URL}img/program/{series_id}/episode/{video_id}_a.jpg', '_format_sort_fields': ('tbr', ), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/caracoltv.py
yt_dlp/extractor/caracoltv.py
import base64 import json import uuid from .common import InfoExtractor from ..utils import ( int_or_none, js_to_json, traverse_obj, urljoin, ) class CaracolTvPlayIE(InfoExtractor): _VALID_URL = r'https?://play\.caracoltv\.com/videoDetails/(?P<id>[^/?#]+)' _NETRC_MACHINE = 'caracoltv-play' _TESTS = [{ 'url': 'https://play.caracoltv.com/videoDetails/OTo4NGFmNjUwOWQ2ZmM0NTg2YWRiOWU0MGNhOWViOWJkYQ==', 'info_dict': { 'id': 'OTo4NGFmNjUwOWQ2ZmM0NTg2YWRiOWU0MGNhOWViOWJkYQ==', 'title': 'La teoría del promedio', 'description': 'md5:1cdd6d2c13f19ef0d9649ab81a023ac3', }, 'playlist_count': 6, }, { 'url': 'https://play.caracoltv.com/videoDetails/OTo3OWM4ZTliYzQxMmM0MTMxYTk4Mjk2YjdjNGQ4NGRkOQ==/ella?season=0', 'info_dict': { 'id': 'OTo3OWM4ZTliYzQxMmM0MTMxYTk4Mjk2YjdjNGQ4NGRkOQ==', 'title': 'Ella', 'description': 'md5:a639b1feb5ddcc0cff92a489b4e544b8', }, 'playlist_count': 10, }, { 'url': 'https://play.caracoltv.com/videoDetails/OTpiYTY1YTVmOTI5MzI0ZWJhOGZiY2Y3MmRlOWZlYmJkOA==/la-vuelta-al-mundo-en-80-risas-2022?season=0', 'info_dict': { 'id': 'OTpiYTY1YTVmOTI5MzI0ZWJhOGZiY2Y3MmRlOWZlYmJkOA==', 'title': 'La vuelta al mundo en 80 risas 2022', 'description': 'md5:e97aac36106e5c37ebf947b3350106a4', }, 'playlist_count': 17, }, { 'url': 'https://play.caracoltv.com/videoDetails/MzoxX3BwbjRmNjB1', 'only_matching': True, }] _USER_TOKEN = None def _extract_app_token(self, webpage): config_js_path = self._search_regex( r'<script[^>]+src\s*=\s*"([^"]+coreConfig.js[^"]+)', webpage, 'config js url', fatal=False) mediation_config = {} if not config_js_path else self._search_json( r'mediation\s*:', self._download_webpage( urljoin('https://play.caracoltv.com/', config_js_path), None, fatal=False, note='Extracting JS config'), 'mediation_config', None, transform_source=js_to_json, fatal=False) key = traverse_obj( mediation_config, ('live', 'key')) or '795cd9c089a1fc48094524a5eba85a3fca1331817c802f601735907c8bbb4f50' secret = traverse_obj( mediation_config, ('live', 'secret')) or '64dec00a6989ba83d087621465b5e5d38bdac22033b0613b659c442c78976fa0' return base64.b64encode(f'{key}:{secret}'.encode()).decode() def _perform_login(self, email, password): webpage = self._download_webpage('https://play.caracoltv.com/', None, fatal=False) app_token = self._extract_app_token(webpage) bearer_token = self._download_json( 'https://eu-gateway.inmobly.com/applications/oauth', None, data=b'', note='Retrieving bearer token', headers={'Authorization': f'Basic {app_token}'})['token'] self._USER_TOKEN = self._download_json( 'https://eu-gateway.inmobly.com/user/login', None, note='Performing login', headers={ 'Content-Type': 'application/json', 'Authorization': f'Bearer {bearer_token}', }, data=json.dumps({ 'device_data': { 'device_id': str(uuid.uuid4()), 'device_token': '', 'device_type': 'web', }, 'login_data': { 'enabled': True, 'email': email, 'password': password, }, }).encode())['user_token'] def _extract_video(self, video_data, series_id=None, season_id=None, season_number=None): formats, subtitles = self._extract_m3u8_formats_and_subtitles(video_data['stream_url'], series_id, 'mp4') return { 'id': video_data['id'], 'title': video_data.get('name'), 'description': video_data.get('description'), 'formats': formats, 'subtitles': subtitles, 'thumbnails': traverse_obj( video_data, ('extra_thumbs', ..., {'url': 'thumb_url', 'height': 'height', 'width': 'width'})), 'series_id': series_id, 'season_id': season_id, 'season_number': int_or_none(season_number), 'episode_number': int_or_none(video_data.get('item_order')), 'is_live': video_data.get('entry_type') == 3, } def _extract_series_seasons(self, seasons, series_id): for season in seasons: api_response = self._download_json( 'https://eu-gateway.inmobly.com/feed', series_id, query={'season_id': season['id']}, headers={'Authorization': f'Bearer {self._USER_TOKEN}'}) season_number = season.get('order') for episode in api_response['items']: yield self._extract_video(episode, series_id, season['id'], season_number) def _real_extract(self, url): series_id = self._match_id(url) if self._USER_TOKEN is None: self._perform_login('guest@inmobly.com', 'Test@gus1') api_response = self._download_json( 'https://eu-gateway.inmobly.com/feed', series_id, query={'include_ids': series_id}, headers={'Authorization': f'Bearer {self._USER_TOKEN}'})['items'][0] if not api_response.get('seasons'): return self._extract_video(api_response) return self.playlist_result( self._extract_series_seasons(api_response['seasons'], series_id), series_id, **traverse_obj(api_response, { 'title': 'name', 'description': 'description', }))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/stripchat.py
yt_dlp/extractor/stripchat.py
from .common import InfoExtractor from ..utils import ( ExtractorError, UserNotLive, lowercase_escape, traverse_obj, ) class StripchatIE(InfoExtractor): _VALID_URL = r'https?://stripchat\.com/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://stripchat.com/Joselin_Flower', 'info_dict': { 'id': 'Joselin_Flower', 'ext': 'mp4', 'title': 're:^Joselin_Flower [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': str, 'is_live': True, 'age_limit': 18, }, 'skip': 'Room is offline', }, { 'url': 'https://stripchat.com/Rakhijaan@xh', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id, headers=self.geo_verification_headers()) data = self._search_json( r'<script\b[^>]*>\s*window\.__PRELOADED_STATE__\s*=', webpage, 'data', video_id, transform_source=lowercase_escape) if traverse_obj(data, ('viewCam', 'show', {dict})): raise ExtractorError('Model is in a private show', expected=True) if not traverse_obj(data, ('viewCam', 'model', 'isLive', {bool})): raise UserNotLive(video_id=video_id) model_id = data['viewCam']['model']['id'] formats = [] # HLS hosts are currently found in .configV3.static.features.hlsFallback.fallbackDomains[] # The rest of the path is for backwards compatibility and to guard against A/B testing for host in traverse_obj(data, ((('config', 'data'), ('configV3', 'static')), ( (('features', 'featuresV2'), 'hlsFallback', 'fallbackDomains', ...), 'hlsStreamHost'))): formats = self._extract_m3u8_formats( f'https://edge-hls.{host}/hls/{model_id}/master/{model_id}_auto.m3u8', video_id, ext='mp4', m3u8_id='hls', fatal=False, live=True) if formats: break if not formats: self.raise_no_formats('Unable to extract stream host', video_id=video_id) return { 'id': video_id, 'title': video_id, 'description': self._og_search_description(webpage), 'is_live': True, 'formats': formats, # Stripchat declares the RTA meta-tag, but in an non-standard format so _rta_search() can't be used 'age_limit': 18, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vimeo.py
yt_dlp/extractor/vimeo.py
import base64 import functools import itertools import json import re import time import urllib.parse from .common import InfoExtractor from ..networking import HEADRequest, Request from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, OnDemandPagedList, clean_html, determine_ext, filter_dict, get_element_by_class, int_or_none, join_nonempty, js_to_json, jwt_decode_hs256, merge_dicts, mimetype2ext, parse_filesize, parse_iso8601, parse_qs, qualities, smuggle_url, str_or_none, try_call, try_get, unified_timestamp, unsmuggle_url, url_basename, url_or_none, urlencode_postdata, urlhandle_detect_ext, urljoin, ) from ..utils.traversal import require, traverse_obj class VimeoBaseInfoExtractor(InfoExtractor): _NETRC_MACHINE = 'vimeo' _LOGIN_REQUIRED = False _LOGIN_URL = 'https://vimeo.com/log_in' _REFERER_HINT = ( 'Cannot download embed-only video without embedding URL. Please call yt-dlp ' 'with the URL of the page that embeds this video.') _DEFAULT_CLIENT = 'web' _DEFAULT_AUTHED_CLIENT = 'web' _CLIENT_HEADERS = { 'Accept': 'application/vnd.vimeo.*+json; version=3.4.10', 'Accept-Language': 'en', } _CLIENT_CONFIGS = { 'android': { 'CACHE_KEY': 'oauth-token-android', 'CACHE_ONLY': True, 'VIEWER_JWT': False, 'REQUIRES_AUTH': False, 'AUTH': 'NzRmYTg5YjgxMWExY2JiNzUwZDg1MjhkMTYzZjQ4YWYyOGEyZGJlMTp4OGx2NFd3QnNvY1lkamI2UVZsdjdDYlNwSDUrdm50YzdNNThvWDcwN1JrenJGZC9tR1lReUNlRjRSVklZeWhYZVpRS0tBcU9YYzRoTGY2Z1dlVkJFYkdJc0dMRHpoZWFZbU0reDRqZ1dkZ1diZmdIdGUrNUM5RVBySlM0VG1qcw==', 'USER_AGENT': 'com.vimeo.android.videoapp (OnePlus, ONEPLUS A6003, OnePlus, Android 14/34 Version 11.8.1) Kotlin VimeoNetworking/3.12.0', 'VIDEOS_FIELDS': ( 'uri', 'name', 'description', 'type', 'link', 'player_embed_url', 'duration', 'width', 'language', 'height', 'embed', 'created_time', 'modified_time', 'release_time', 'content_rating', 'content_rating_class', 'rating_mod_locked', 'license', 'privacy', 'pictures', 'tags', 'stats', 'categories', 'uploader', 'metadata', 'user', 'files', 'download', 'app', 'play', 'status', 'resource_key', 'badge', 'upload', 'transcode', 'is_playable', 'has_audio', ), }, 'ios': { 'CACHE_KEY': 'oauth-token-ios', 'CACHE_ONLY': True, 'VIEWER_JWT': False, 'REQUIRES_AUTH': False, 'AUTH': 'MTMxNzViY2Y0NDE0YTQ5YzhjZTc0YmU0NjVjNDQxYzNkYWVjOWRlOTpHKzRvMmgzVUh4UkxjdU5FRW80cDNDbDhDWGR5dVJLNUJZZ055dHBHTTB4V1VzaG41bEx1a2hiN0NWYWNUcldSSW53dzRUdFRYZlJEZmFoTTArOTBUZkJHS3R4V2llYU04Qnl1bERSWWxUdXRidjNqR2J4SHFpVmtFSUcyRktuQw==', 'USER_AGENT': 'Vimeo/11.10.0 (com.vimeo; build:250424.164813.0; iOS 18.4.1) Alamofire/5.9.0 VimeoNetworking/5.0.0', 'VIDEOS_FIELDS': ( 'uri', 'name', 'description', 'type', 'link', 'player_embed_url', 'duration', 'width', 'language', 'height', 'embed', 'created_time', 'modified_time', 'release_time', 'content_rating', 'content_rating_class', 'rating_mod_locked', 'license', 'config_url', 'embed_player_config_url', 'privacy', 'pictures', 'tags', 'stats', 'categories', 'uploader', 'metadata', 'user', 'files', 'download', 'app', 'play', 'status', 'resource_key', 'badge', 'upload', 'transcode', 'is_playable', 'has_audio', ), }, 'web': { 'CACHE_ONLY': False, 'VIEWER_JWT': True, 'REQUIRES_AUTH': True, 'USER_AGENT': None, 'VIDEOS_FIELDS': ( 'config_url', 'created_time', 'description', 'license', 'metadata.connections.comments.total', 'metadata.connections.likes.total', 'release_time', 'stats.plays', ), }, } _oauth_tokens = {} _viewer_info = None @staticmethod def _smuggle_referrer(url, referrer_url): return smuggle_url(url, {'referer': referrer_url}) def _unsmuggle_headers(self, url): """@returns (url, smuggled_data, headers)""" url, data = unsmuggle_url(url, {}) headers = self.get_param('http_headers').copy() if 'referer' in data: headers['Referer'] = data['referer'] return url, data, headers def _jwt_is_expired(self, token): return jwt_decode_hs256(token)['exp'] - time.time() < 120 def _fetch_viewer_info(self, display_id=None): if self._viewer_info and not self._jwt_is_expired(self._viewer_info['jwt']): return self._viewer_info self._viewer_info = self._download_json( 'https://vimeo.com/_next/viewer', display_id, 'Downloading web token info', 'Failed to download web token info', headers={'Accept': 'application/json'}) return self._viewer_info @property def _is_logged_in(self): return 'vimeo' in self._get_cookies('https://vimeo.com') def _perform_login(self, username, password): if self._is_logged_in: return viewer = self._fetch_viewer_info() data = { 'action': 'login', 'email': username, 'password': password, 'service': 'vimeo', 'token': viewer['xsrft'], } try: self._download_webpage( self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(data), headers={ 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': self._LOGIN_URL, }) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status in (404, 405, 418): raise ExtractorError( 'Unable to log in: bad username or password', expected=True) raise ExtractorError('Unable to log in') # Clear unauthenticated viewer info self._viewer_info = None def _real_initialize(self): if self._is_logged_in: return if self._LOGIN_REQUIRED: self.raise_login_required() if self._DEFAULT_CLIENT != 'web': return for client_name, client_config in self._CLIENT_CONFIGS.items(): if not client_config['CACHE_ONLY']: continue cache_key = client_config['CACHE_KEY'] if cache_key not in self._oauth_tokens: if token := self.cache.load(self._NETRC_MACHINE, cache_key): self._oauth_tokens[cache_key] = token if self._oauth_tokens.get(cache_key): self._DEFAULT_CLIENT = client_name self.write_debug( f'Found cached {client_name} token; using {client_name} as default API client') return def _get_video_password(self): password = self.get_param('videopassword') if password is None: raise ExtractorError( 'This video is protected by a password, use the --video-password option', expected=True) return password def _verify_video_password(self, video_id, path=None): video_password = self._get_video_password() token = self._fetch_viewer_info(video_id)['xsrft'] url = join_nonempty('https://vimeo.com', path, video_id, delim='/') try: self._request_webpage( f'{url}/password', video_id, 'Submitting video password', data=json.dumps({ 'password': video_password, 'token': token, }, separators=(',', ':')).encode(), headers={ 'Accept': '*/*', 'Content-Type': 'application/json', 'Referer': url, }, impersonate=True) except ExtractorError as error: if isinstance(error.cause, HTTPError) and error.cause.status == 418: raise ExtractorError('Wrong password', expected=True) raise def _extract_config_url(self, webpage, **kwargs): return self._html_search_regex( r'\bdata-config-url="([^"]+)"', webpage, 'config URL', **kwargs) def _extract_vimeo_config(self, webpage, video_id, *args, **kwargs): vimeo_config = self._search_regex( r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));', webpage, 'vimeo config', *args, **kwargs) if vimeo_config: return self._parse_json(vimeo_config, video_id) def _parse_config(self, config, video_id): video_data = config['video'] video_title = video_data.get('title') live_event = video_data.get('live_event') or {} live_status = { 'pending': 'is_upcoming', 'active': 'is_upcoming', 'started': 'is_live', 'ended': 'post_live', }.get(live_event.get('status')) is_live = live_status == 'is_live' request = config.get('request') or {} formats = [] subtitles = {} config_files = video_data.get('files') or request.get('files') or {} for f in (config_files.get('progressive') or []): video_url = f.get('url') if not video_url: continue formats.append({ 'url': video_url, 'format_id': 'http-{}'.format(f.get('quality')), 'source_preference': 10, 'width': int_or_none(f.get('width')), 'height': int_or_none(f.get('height')), 'fps': int_or_none(f.get('fps')), 'tbr': int_or_none(f.get('bitrate')), }) # TODO: fix handling of 308 status code returned for live archive manifest requests QUALITIES = ('low', 'medium', 'high') quality = qualities(QUALITIES) sep_pattern = r'/sep/video/' for files_type in ('hls', 'dash'): for cdn_name, cdn_data in (try_get(config_files, lambda x: x[files_type]['cdns']) or {}).items(): # TODO: Also extract 'avc_url'? Investigate if there are 'hevc_url', 'av1_url'? manifest_url = cdn_data.get('url') if not manifest_url: continue format_id = f'{files_type}-{cdn_name}' sep_manifest_urls = [] if re.search(sep_pattern, manifest_url): for suffix, repl in (('', 'video'), ('_sep', 'sep/video')): sep_manifest_urls.append((format_id + suffix, re.sub( sep_pattern, f'/{repl}/', manifest_url))) else: sep_manifest_urls = [(format_id, manifest_url)] for f_id, m_url in sep_manifest_urls: if files_type == 'hls': fmts, subs = self._extract_m3u8_formats_and_subtitles( m_url, video_id, 'mp4', live=is_live, m3u8_id=f_id, note=f'Downloading {cdn_name} m3u8 information', fatal=False) # m3u8 doesn't give audio bitrates; need to prioritize based on GROUP-ID # See: https://github.com/yt-dlp/yt-dlp/issues/10854 for f in fmts: if mobj := re.search(rf'audio-({"|".join(QUALITIES)})', f['format_id']): f['quality'] = quality(mobj.group(1)) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) elif files_type == 'dash': if 'json=1' in m_url: real_m_url = (self._download_json(m_url, video_id, fatal=False) or {}).get('url') if real_m_url: m_url = real_m_url fmts, subs = self._extract_mpd_formats_and_subtitles( m_url.replace('/master.json', '/master.mpd'), video_id, f_id, f'Downloading {cdn_name} MPD information', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) live_archive = live_event.get('archive') or {} live_archive_source_url = live_archive.get('source_url') if live_archive_source_url and live_archive.get('status') == 'done': formats.append({ 'format_id': 'live-archive-source', 'url': live_archive_source_url, 'quality': 10, }) for tt in (request.get('text_tracks') or []): subtitles.setdefault(tt['lang'], []).append({ 'ext': 'vtt', 'url': urljoin('https://player.vimeo.com/', tt['url']), }) thumbnails = [] if not is_live: for key, thumb in (video_data.get('thumbs') or {}).items(): thumbnails.append({ 'id': key, 'width': int_or_none(key), 'url': thumb, }) thumbnails.extend(traverse_obj(video_data, (('thumbnail', 'thumbnail_url'), {'url': {url_or_none}}))) owner = video_data.get('owner') or {} video_uploader_url = owner.get('url') return { 'id': str_or_none(video_data.get('id')) or video_id, 'title': video_title, 'uploader': owner.get('name'), 'uploader_id': video_uploader_url.split('/')[-1] if video_uploader_url else None, 'uploader_url': video_uploader_url, 'thumbnails': thumbnails, 'duration': int_or_none(video_data.get('duration')), 'chapters': sorted(traverse_obj(config, ( 'embed', 'chapters', lambda _, v: int(v['timecode']) is not None, { 'title': ('title', {str}), 'start_time': ('timecode', {int_or_none}), })), key=lambda c: c['start_time']) or None, 'formats': formats, 'subtitles': subtitles, 'live_status': live_status, 'release_timestamp': traverse_obj(live_event, ('ingest', ( ('scheduled_start_time', {parse_iso8601}), ('start_time', {int_or_none}), ), any)), # Note: Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps # at the same time without actual units specified. '_format_sort_fields': ('quality', 'res', 'fps', 'hdr:12', 'source'), } def _fetch_oauth_token(self, client): client_config = self._CLIENT_CONFIGS[client] if client_config['VIEWER_JWT']: return f'jwt {self._fetch_viewer_info()["jwt"]}' cache_key = client_config['CACHE_KEY'] if not self._oauth_tokens.get(cache_key): self._oauth_tokens[cache_key] = self.cache.load(self._NETRC_MACHINE, cache_key) if not self._oauth_tokens.get(cache_key): if client_config['CACHE_ONLY']: raise ExtractorError( f'The {client} client is unable to fetch new OAuth tokens ' f'and is only intended for use with previously cached tokens', expected=True) self._oauth_tokens[cache_key] = self._download_json( 'https://api.vimeo.com/oauth/authorize/client', None, f'Fetching {client} OAuth token', f'Failed to fetch {client} OAuth token', headers={ 'Authorization': f'Basic {client_config["AUTH"]}', 'User-Agent': client_config['USER_AGENT'], **self._CLIENT_HEADERS, }, data=urlencode_postdata({ 'grant_type': 'client_credentials', 'scope': 'private public create edit delete interact upload purchased stats video_files', }, quote_via=urllib.parse.quote))['access_token'] self.cache.store(self._NETRC_MACHINE, cache_key, self._oauth_tokens[cache_key]) return f'Bearer {self._oauth_tokens[cache_key]}' def _get_requested_client(self): if client := self._configuration_arg('client', [None], ie_key=VimeoIE)[0]: if client not in self._CLIENT_CONFIGS: raise ExtractorError( f'Unsupported API client "{client}" requested. ' f'Supported clients are: {", ".join(self._CLIENT_CONFIGS)}', expected=True) self.write_debug( f'Using {client} API client as specified by extractor argument', only_once=True) return client if self._is_logged_in: return self._DEFAULT_AUTHED_CLIENT return self._DEFAULT_CLIENT def _call_videos_api(self, video_id, unlisted_hash=None, path=None, *, force_client=None, query=None, **kwargs): client = force_client or self._get_requested_client() client_config = self._CLIENT_CONFIGS[client] if client_config['REQUIRES_AUTH'] and not self._is_logged_in: self.raise_login_required(f'The {client} client only works when logged-in') return self._download_json( join_nonempty( 'https://api.vimeo.com/videos', join_nonempty(video_id, unlisted_hash, delim=':'), path, delim='/'), video_id, f'Downloading {client} API JSON', f'Unable to download {client} API JSON', headers=filter_dict({ 'Authorization': self._fetch_oauth_token(client), 'User-Agent': client_config['USER_AGENT'], **self._CLIENT_HEADERS, }), query={ 'fields': ','.join(client_config['VIDEOS_FIELDS']), **(query or {}), }, **kwargs) def _extract_original_format(self, url, video_id, unlisted_hash=None): # Original/source formats are only available when logged in if not self._is_logged_in: return None policy = self._configuration_arg('original_format_policy', ['auto'], ie_key=VimeoIE)[0] if policy == 'never': return None try: download_data = self._download_json( url, video_id, 'Loading download config JSON', query=filter_dict({ 'action': 'load_download_config', 'unlisted_hash': unlisted_hash, }), headers={ 'Accept': 'application/json', 'X-Requested-With': 'XMLHttpRequest', }) except ExtractorError as error: self.write_debug(f'Unable to load download config JSON: {error.cause}') download_data = None source_file = traverse_obj(download_data, ('source_file', {dict})) or {} download_url = traverse_obj(source_file, ('download_url', {url_or_none})) if download_url and not source_file.get('is_cold') and not source_file.get('is_defrosting'): source_name = source_file.get('public_name', 'Original') if self._is_valid_url(download_url, video_id, f'{source_name} video'): ext = (try_get( source_file, lambda x: x['extension'], str) or determine_ext( download_url, None) or 'mp4').lower() return { 'url': download_url, 'ext': ext, 'width': int_or_none(source_file.get('width')), 'height': int_or_none(source_file.get('height')), 'filesize': parse_filesize(source_file.get('size')), 'format_id': source_name, 'quality': 1, } # Most web client API requests are subject to rate-limiting (429) when logged-in. # Requesting only the 'privacy' field is NOT rate-limited, # so first we should check if video even has 'download' formats available try: privacy_info = self._call_videos_api( video_id, unlisted_hash, force_client='web', query={'fields': 'privacy'}) except ExtractorError as error: self.write_debug(f'Unable to download privacy info: {error.cause}') return None if not traverse_obj(privacy_info, ('privacy', 'download', {bool})): msg = f'{video_id}: Vimeo says this video is not downloadable' if policy != 'always': self.write_debug( f'{msg}, so yt-dlp is not attempting to extract the original/source format. ' f'To try anyways, use --extractor-args "vimeo:original_format_policy=always"') return None self.write_debug(f'{msg}; attempting to extract original/source format anyways') original_response = self._call_videos_api( video_id, unlisted_hash, force_client='web', query={'fields': 'download'}, fatal=False) for download_data in traverse_obj(original_response, ('download', ..., {dict})): download_url = download_data.get('link') if not download_url or download_data.get('quality') != 'source': continue ext = determine_ext(parse_qs(download_url).get('filename', [''])[0].lower(), default_ext=None) if not ext: urlh = self._request_webpage( HEADRequest(download_url), video_id, fatal=False, note='Determining source extension') ext = urlh and urlhandle_detect_ext(urlh) return { 'url': download_url, 'ext': ext or 'unknown_video', 'format_id': download_data.get('public_name', 'Original'), 'width': int_or_none(download_data.get('width')), 'height': int_or_none(download_data.get('height')), 'fps': int_or_none(download_data.get('fps')), 'filesize': int_or_none(download_data.get('size')), 'quality': 1, } @staticmethod def _get_embed_params(is_embed, referer): return { 'is_embed': 'true' if is_embed else 'false', 'referrer': urllib.parse.urlparse(referer).hostname if referer and is_embed else '', } def _get_album_data_and_hashed_pass(self, album_id, is_embed, referer): viewer = self._fetch_viewer_info(album_id) jwt = viewer['jwt'] album = self._download_json( 'https://api.vimeo.com/albums/' + album_id, album_id, headers={'Authorization': 'jwt ' + jwt, 'Accept': 'application/json'}, query={**self._get_embed_params(is_embed, referer), 'fields': 'description,name,privacy'}) hashed_pass = None if traverse_obj(album, ('privacy', 'view')) == 'password': password = self.get_param('videopassword') if not password: raise ExtractorError( 'This album is protected by a password, use the --video-password option', expected=True) try: hashed_pass = self._download_json( f'https://vimeo.com/showcase/{album_id}/auth', album_id, 'Verifying the password', data=urlencode_postdata({ 'password': password, 'token': viewer['xsrft'], }), headers={ 'X-Requested-With': 'XMLHttpRequest', })['hashed_pass'] except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 401: raise ExtractorError('Wrong password', expected=True) raise return album, hashed_pass class VimeoIE(VimeoBaseInfoExtractor): """Information extractor for vimeo.com.""" # _VALID_URL matches Vimeo URLs _VALID_URL = r'''(?x) https?:// (?: (?: www| player ) \. )? vimeo\.com/ (?: (?P<u>user)| (?!(?:channels|album|showcase)/[^/?#]+/?(?:$|[?#])|[^/]+/review/|ondemand/) (?:(?!event/).*?/)?? (?P<q> (?: play_redirect_hls| moogaloop\.swf)\?clip_id= )? (?:videos?/)? ) (?P<id>[0-9]+) (?(u) /(?!videos|likes)[^/?#]+/?| (?(q)|/(?P<unlisted_hash>[\da-f]{10}))? ) (?:(?(q)[&]|(?(u)|/?)[?]).*?)?(?:[#].*)?$ ''' IE_NAME = 'vimeo' _EMBED_REGEX = [ # iframe r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.vimeo\.com/video/\d+.*?)\1', # Embedded (swf embed) Vimeo player r'<embed[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vimeo\.com/moogaloop\.swf.+?)\1', # Non-standard embedded Vimeo player r'<video[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vimeo\.com/[0-9]+)\1', ] _TESTS = [{ 'url': 'http://vimeo.com/56015672#at=0', 'md5': '8879b6cc097e987f02484baf890129e5', 'info_dict': { 'id': '56015672', 'ext': 'mp4', 'title': "youtube-dl test video '' ä↭𝕐-BaW jenozKc", 'description': 'md5:2d3305bad981a06ff79f027f19865021', 'timestamp': 1355990239, 'upload_date': '20121220', 'uploader_url': r're:https?://(?:www\.)?vimeo\.com/user7108434', 'uploader_id': 'user7108434', 'uploader': 'Filippo Valsorda', 'duration': 10, 'license': 'by-sa', }, 'params': { 'format': 'best[protocol=https]', }, 'skip': 'No longer available', }, { 'url': 'https://player.vimeo.com/video/54469442', 'md5': '619b811a4417aa4abe78dc653becf511', 'note': 'Videos that embed the url in the player page', 'info_dict': { 'id': '54469442', 'ext': 'mp4', 'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012', 'uploader': 'Business of Software', 'uploader_id': 'businessofsoftware', 'uploader_url': 'https://vimeo.com/businessofsoftware', 'duration': 3610, 'thumbnail': r're:https?://i\.vimeocdn\.com/video/.+', }, 'params': {'format': 'best[protocol=https]'}, 'expected_warnings': ['Failed to parse XML: not well-formed'], }, { 'url': 'http://vimeo.com/68375962', 'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7', 'note': 'Video protected with password', 'info_dict': { 'id': '68375962', 'ext': 'mp4', 'title': 'youtube-dl password protected test video', 'timestamp': 1371214555, 'upload_date': '20130614', 'release_timestamp': 1371214555, 'release_date': '20130614', 'uploader_id': 'user18948128', 'uploader_url': 'https://vimeo.com/user18948128', 'uploader': 'Jaime Marquínez Ferrándiz', 'duration': 10, 'comment_count': int, 'like_count': int, 'thumbnail': r're:https?://i\.vimeocdn\.com/video/.+', }, 'params': { 'format': 'best[protocol=https]', 'videopassword': 'youtube-dl', }, 'expected_warnings': ['Failed to parse XML: not well-formed'], }, { 'url': 'http://vimeo.com/channels/keypeele/75629013', 'md5': '2f86a05afe9d7abc0b9126d229bbe15d', 'info_dict': { 'id': '75629013', 'ext': 'mp4', 'title': 'Key & Peele: Terrorist Interrogation', 'description': 'md5:6173f270cd0c0119f22817204b3eb86c', 'uploader_id': 'atencio', 'uploader_url': 'https://vimeo.com/atencio', 'uploader': 'Peter Atencio', 'channel_id': 'keypeele', 'channel_url': 'https://vimeo.com/channels/keypeele', 'timestamp': 1380339469, 'upload_date': '20130928', 'duration': 187, 'thumbnail': r're:https?://i\.vimeocdn\.com/video/.+', 'view_count': int, 'comment_count': int, 'like_count': int, }, 'params': {'format': 'http-1080p'}, 'expected_warnings': ['Failed to parse XML: not well-formed'], }, { 'url': 'http://vimeo.com/76979871', 'note': 'Video with subtitles', 'info_dict': { 'id': '76979871', 'ext': 'mp4', 'title': 'The New Vimeo Player (You Know, For Videos)', 'description': str, # FIXME: Dynamic SEO spam description 'timestamp': 1381860509, 'upload_date': '20131015', 'release_timestamp': 1381860509, 'release_date': '20131015', 'uploader_id': 'staff', 'uploader_url': 'https://vimeo.com/staff', 'uploader': 'Vimeo', 'duration': 62, 'comment_count': int, 'like_count': int, 'thumbnail': r're:https?://i\.vimeocdn\.com/video/.+', 'subtitles': { 'de': 'count:2', 'en': 'count:2', 'es': 'count:2', 'fr': 'count:2', }, }, 'expected_warnings': [ 'Ignoring subtitle tracks found in the HLS manifest', 'Failed to parse XML: not well-formed', ], }, { # from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/ 'url': 'https://player.vimeo.com/video/98044508', 'note': 'The js code contains assignments to the same variable as the config', 'info_dict': { 'id': '98044508', 'ext': 'mp4', 'title': 'Pier Solar OUYA Official Trailer', 'uploader': 'Tulio Gonçalves', 'uploader_id': 'user28849593', 'uploader_url': 'https://vimeo.com/user28849593', 'duration': 118, 'thumbnail': r're:https?://i\.vimeocdn\.com/video/.+', }, 'expected_warnings': ['Failed to parse XML: not well-formed'], }, { # contains Original format 'url': 'https://vimeo.com/33951933', # 'md5': '53c688fa95a55bf4b7293d37a89c5c53', 'info_dict': { 'id': '33951933', 'ext': 'mp4', 'title': 'FOX CLASSICS - Forever Classic ID - A Full Minute', 'uploader': 'The DMCI', 'uploader_id': 'dmci', 'uploader_url': 'https://vimeo.com/dmci', 'timestamp': 1324361742, 'upload_date': '20111220', 'description': 'md5:f37b4ad0f3ded6fa16f38ecde16c3c44', 'duration': 60, 'comment_count': int, 'thumbnail': r're:https?://i\.vimeocdn\.com/video/.+', 'like_count': int, 'release_timestamp': 1324361742, 'release_date': '20111220', }, # 'params': {'format': 'Original'}, 'expected_warnings': ['Failed to parse XML: not well-formed'], }, { 'note': 'Contains source format not accessible in webpage', 'url': 'https://vimeo.com/393756517', # 'md5': 'c464af248b592190a5ffbb5d33f382b0', 'info_dict': { 'id': '393756517', # 'ext': 'mov', 'ext': 'mp4', 'timestamp': 1582660091, 'uploader_id': 'frameworkla', 'title': 'Straight To Hell - Sabrina: Netflix', 'uploader': 'Framework Studio', 'upload_date': '20200225', 'duration': 176, 'thumbnail': r're:https?://i\.vimeocdn\.com/video/.+',
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/freetv.py
yt_dlp/extractor/freetv.py
import itertools import re from .common import InfoExtractor from ..utils import int_or_none, traverse_obj, urlencode_postdata class FreeTvBaseIE(InfoExtractor): def _get_api_response(self, content_id, resource_type, postdata): return self._download_json( 'https://www.freetv.com/wordpress/wp-admin/admin-ajax.php', content_id, data=urlencode_postdata(postdata), note=f'Downloading {content_id} {resource_type} JSON')['data'] class FreeTvMoviesIE(FreeTvBaseIE): _VALID_URL = r'https?://(?:www\.)?freetv\.com/peliculas/(?P<id>[^/]+)' _TESTS = [{ 'url': 'https://www.freetv.com/peliculas/atrapame-si-puedes/', 'md5': 'dc62d5abf0514726640077cd1591aa92', 'info_dict': { 'id': '428021', 'title': 'Atrápame Si Puedes', 'description': 'md5:ca63bc00898aeb2f64ec87c6d3a5b982', 'ext': 'mp4', }, }, { 'url': 'https://www.freetv.com/peliculas/monstruoso/', 'md5': '509c15c68de41cb708d1f92d071f20aa', 'info_dict': { 'id': '377652', 'title': 'Monstruoso', 'description': 'md5:333fc19ee327b457b980e54a911ea4a3', 'ext': 'mp4', }, }] def _extract_video(self, content_id, action='olyott_video_play'): api_response = self._get_api_response(content_id, 'video', { 'action': action, 'contentID': content_id, }) video_id, video_url = api_response['displayMeta']['contentID'], api_response['displayMeta']['streamURLVideo'] formats, subtitles = self._extract_m3u8_formats_and_subtitles(video_url, video_id, 'mp4') return { 'id': video_id, 'title': traverse_obj(api_response, ('displayMeta', 'title')), 'description': traverse_obj(api_response, ('displayMeta', 'desc')), 'formats': formats, 'subtitles': subtitles, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) return self._extract_video( self._search_regex(( r'class=["\'][^>]+postid-(?P<video_id>\d+)', r'<link[^>]+freetv.com/\?p=(?P<video_id>\d+)', r'<div[^>]+data-params=["\'][^>]+post_id=(?P<video_id>\d+)', ), webpage, 'video id', group='video_id')) class FreeTvIE(FreeTvBaseIE): IE_NAME = 'freetv:series' _VALID_URL = r'https?://(?:www\.)?freetv\.com/series/(?P<id>[^/]+)' _TESTS = [{ 'url': 'https://www.freetv.com/series/el-detective-l/', 'info_dict': { 'id': 'el-detective-l', 'title': 'El Detective L', 'description': 'md5:f9f1143bc33e9856ecbfcbfb97a759be', }, 'playlist_count': 24, }, { 'url': 'https://www.freetv.com/series/esmeraldas/', 'info_dict': { 'id': 'esmeraldas', 'title': 'Esmeraldas', 'description': 'md5:43d7ec45bd931d8268a4f5afaf4c77bf', }, 'playlist_count': 62, }, { 'url': 'https://www.freetv.com/series/las-aventuras-de-leonardo/', 'info_dict': { 'id': 'las-aventuras-de-leonardo', 'title': 'Las Aventuras de Leonardo', 'description': 'md5:0c47130846c141120a382aca059288f6', }, 'playlist_count': 13, }, ] def _extract_series_season(self, season_id, series_title): episodes = self._get_api_response(season_id, 'series', { 'contentID': season_id, 'action': 'olyott_get_dynamic_series_content', 'type': 'list', 'perPage': '1000', })['1'] for episode in episodes: video_id = str(episode['contentID']) formats, subtitles = self._extract_m3u8_formats_and_subtitles(episode['streamURL'], video_id, 'mp4') yield { 'id': video_id, 'title': episode.get('fullTitle'), 'description': episode.get('description'), 'formats': formats, 'subtitles': subtitles, 'thumbnail': episode.get('thumbnail'), 'series': series_title, 'series_id': traverse_obj(episode, ('contentMeta', 'displayMeta', 'seriesID')), 'season_id': traverse_obj(episode, ('contentMeta', 'displayMeta', 'seasonID')), 'season_number': traverse_obj( episode, ('contentMeta', 'displayMeta', 'seasonNum'), expected_type=int_or_none), 'episode_number': traverse_obj( episode, ('contentMeta', 'displayMeta', 'episodeNum'), expected_type=int_or_none), } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = self._html_search_regex( r'<h1[^>]+class=["\']synopis[^>]>(?P<title>[^<]+)', webpage, 'title', group='title', fatal=False) description = self._html_search_regex( r'<div[^>]+class=["\']+synopis content[^>]><p>(?P<description>[^<]+)', webpage, 'description', group='description', fatal=False) return self.playlist_result( itertools.chain.from_iterable( self._extract_series_season(season_id, title) for season_id in re.findall(r'<option[^>]+value=["\'](\d+)["\']', webpage)), display_id, title, description)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rule34video.py
yt_dlp/extractor/rule34video.py
import re from .common import InfoExtractor from ..utils import ( clean_html, extract_attributes, get_element_by_attribute, get_element_by_class, get_element_html_by_class, get_elements_by_class, int_or_none, parse_count, parse_duration, unescapeHTML, ) from ..utils.traversal import traverse_obj class Rule34VideoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rule34video\.com/videos?/(?P<id>\d+)' _TESTS = [ { 'url': 'https://rule34video.com/video/3065157/shot-it-mmd-hmv/', 'md5': 'ffccac2c23799dabbd192621ae4d04f3', 'info_dict': { 'id': '3065157', 'ext': 'mp4', 'title': 'Shot It-(mmd hmv)', 'thumbnail': 'https://rule34video.com/contents/videos_screenshots/3065000/3065157/preview.jpg', 'duration': 347.0, 'age_limit': 18, 'view_count': int, 'like_count': int, 'comment_count': int, 'timestamp': 1639872000, 'description': 'https://discord.gg/aBqPrHSHvv', 'upload_date': '20211219', 'uploader': 'Sweet HMV', 'uploader_url': 'https://rule34video.com/members/22119/', 'categories': ['3D', 'MMD', 'iwara'], 'tags': 'mincount:10', }, }, { 'url': 'https://rule34video.com/videos/3065296/lara-in-trouble-ep-7-wildeerstudio/', 'md5': '6bb5169f9f6b38cd70882bf2e64f6b86', 'info_dict': { 'id': '3065296', 'ext': 'mp4', 'title': 'Lara in Trouble Ep. 7 [WildeerStudio]', 'thumbnail': 'https://rule34video.com/contents/videos_screenshots/3065000/3065296/preview.jpg', 'duration': 938.0, 'age_limit': 18, 'view_count': int, 'like_count': int, 'comment_count': int, 'timestamp': 1640131200, 'description': '', 'creators': ['WildeerStudio'], 'upload_date': '20211222', 'uploader': 'CerZule', 'uploader_url': 'https://rule34video.com/members/36281/', 'categories': ['3D', 'Tomb Raider'], 'tags': 'mincount:40', }, }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) formats = [] for mobj in re.finditer(r'<a[^>]+href="(?P<video_url>[^"]+download=true[^"]+)".*>(?P<ext>[^\s]+) (?P<quality>[^<]+)p</a>', webpage): url, ext, quality = mobj.groups() formats.append({ 'url': url, 'ext': ext.lower(), 'quality': quality, }) categories, creators, uploader, uploader_url = [None] * 4 for col in get_elements_by_class('col', webpage): label = clean_html(get_element_by_class('label', col)) if label == 'Categories:': categories = list(map(clean_html, get_elements_by_class('item', col))) elif label == 'Artist:': creators = list(map(clean_html, get_elements_by_class('item', col))) elif label == 'Uploaded By:': uploader = clean_html(get_element_by_class('name', col)) uploader_url = extract_attributes(get_element_html_by_class('name', col) or '').get('href') return { **traverse_obj(self._search_json_ld(webpage, video_id, default={}), ({ 'title': 'title', 'view_count': 'view_count', 'like_count': 'like_count', 'duration': 'duration', 'timestamp': 'timestamp', 'description': 'description', 'thumbnail': ('thumbnails', 0, 'url'), })), 'id': video_id, 'formats': formats, 'title': self._html_extract_title(webpage), 'thumbnail': self._html_search_regex( r'preview_url:\s+\'([^\']+)\'', webpage, 'thumbnail', default=None), 'duration': parse_duration(self._html_search_regex( r'"icon-clock"></i>\s+<span>((?:\d+:?)+)', webpage, 'duration', default=None)), 'view_count': int_or_none(self._html_search_regex( r'"icon-eye"></i>\s+<span>([ \d]+)', webpage, 'views', default='').replace(' ', '')), 'like_count': parse_count(get_element_by_class('voters count', webpage)), 'comment_count': int_or_none(self._search_regex( r'[^(]+\((\d+)\)', get_element_by_attribute('href', '#tab_comments', webpage), 'comment count', fatal=False)), 'age_limit': 18, 'creators': creators, 'uploader': uploader, 'uploader_url': uploader_url, 'categories': categories, 'tags': list(map(unescapeHTML, re.findall( r'<a class="tag_item"[^>]+\bhref="https://rule34video\.com/tags/\d+/"[^>]*>(?P<tag>[^>]*)</a>', webpage))), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/zapiks.py
yt_dlp/extractor/zapiks.py
import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, parse_iso8601, xpath_text, xpath_with_ns, ) class ZapiksIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?zapiks\.(?:fr|com)/(?:(?:[a-z]{2}/)?(?P<display_id>.+?)\.html|index\.php\?.*\bmedia_id=(?P<id>\d+))' _EMBED_REGEX = [r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?zapiks\.fr/index\.php\?.+?)"'] _TESTS = [{ 'url': 'http://www.zapiks.fr/ep2s3-bon-appetit-eh-be-viva.html', 'md5': 'aeb3c473b2d564b2d46d664d28d5f050', 'info_dict': { 'id': '80798', 'ext': 'mp4', 'title': 'EP2S3 - Bon Appétit - Eh bé viva les pyrénées con!', 'description': 'md5:7054d6f6f620c6519be1fe710d4da847', 'thumbnail': r're:https?://zpks\.com/.+\.jpg', 'duration': 528, 'timestamp': 1359044972, 'upload_date': '20130124', 'view_count': int, }, }, { 'url': 'http://www.zapiks.com/ep3s5-bon-appetit-baqueira-m-1.html', 'only_matching': True, }, { 'url': 'http://www.zapiks.com/nl/ep3s5-bon-appetit-baqueira-m-1.html', 'only_matching': True, }, { 'url': 'http://www.zapiks.fr/index.php?action=playerIframe&amp;media_id=118046&amp;width=640&amp;height=360&amp;autoStart=false&amp;language=fr', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.skipass.com/news/116090-bon-appetit-s5ep3-baqueira-mi-cor.html', 'info_dict': { 'id': '118046', 'ext': 'mp4', 'title': 'EP3S5 - Bon Appétit - Baqueira Mi Corazon !', 'thumbnail': r're:https?://zpks\.com/.+\.jpg', }, }] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') display_id = mobj.group('display_id') or video_id webpage = self._download_webpage(url, display_id) if not video_id: video_id = self._search_regex( r'data-media-id="(\d+)"', webpage, 'video id') playlist = self._download_xml( f'http://www.zapiks.fr/view/index.php?action=playlist&media_id={video_id}&lang=en', display_id) NS_MAP = { 'jwplayer': 'http://rss.jwpcdn.com/', } def ns(path): return xpath_with_ns(path, NS_MAP) item = playlist.find('./channel/item') title = xpath_text(item, 'title', 'title') or self._og_search_title(webpage) description = self._og_search_description(webpage, default=None) thumbnail = xpath_text( item, ns('./jwplayer:image'), 'thumbnail') or self._og_search_thumbnail(webpage, default=None) duration = parse_duration(self._html_search_meta( 'duration', webpage, 'duration', default=None)) timestamp = parse_iso8601(self._html_search_meta( 'uploadDate', webpage, 'upload date', default=None), ' ') view_count = int_or_none(self._search_regex( r'UserPlays:(\d+)', webpage, 'view count', default=None)) comment_count = int_or_none(self._search_regex( r'UserComments:(\d+)', webpage, 'comment count', default=None)) formats = [] for source in item.findall(ns('./jwplayer:source')): format_id = source.attrib['label'] f = { 'url': source.attrib['file'], 'format_id': format_id, } m = re.search(r'^(?P<height>\d+)[pP]', format_id) if m: f['height'] = int(m.group('height')) formats.append(f) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'timestamp': timestamp, 'view_count': view_count, 'comment_count': comment_count, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hotnewhiphop.py
yt_dlp/extractor/hotnewhiphop.py
import base64 from .common import InfoExtractor from ..networking import HEADRequest, Request from ..utils import ExtractorError, urlencode_postdata class HotNewHipHopIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?hotnewhiphop\.com/.*\.(?P<id>.*)\.html' _TEST = { 'url': 'http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html', 'md5': '2c2cd2f76ef11a9b3b581e8b232f3d96', 'info_dict': { 'id': '1435540', 'ext': 'mp3', 'title': 'Freddie Gibbs - Lay It Down', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_url_base64 = self._search_regex( r'data-path="(.*?)"', webpage, 'video URL', default=None) if video_url_base64 is None: video_url = self._search_regex( r'"contentUrl" content="(.*?)"', webpage, 'content URL') return self.url_result(video_url, ie='Youtube') reqdata = urlencode_postdata([ ('mediaType', 's'), ('mediaId', video_id), ]) r = Request( 'http://www.hotnewhiphop.com/ajax/media/getActions/', data=reqdata) r.headers['Content-Type'] = 'application/x-www-form-urlencoded' mkd = self._download_json( r, video_id, note='Requesting media key', errnote='Could not download media key') if 'mediaKey' not in mkd: raise ExtractorError('Did not get a media key') redirect_url = base64.b64decode(video_url_base64).decode('utf-8') redirect_req = HEADRequest(redirect_url) req = self._request_webpage( redirect_req, video_id, note='Resolving final URL', errnote='Could not resolve final URL') video_url = req.url if video_url.endswith('.html'): raise ExtractorError('Redirect failed') video_title = self._og_search_title(webpage).strip() return { 'id': video_id, 'url': video_url, 'title': video_title, 'thumbnail': self._og_search_thumbnail(webpage), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lifenews.py
yt_dlp/extractor/lifenews.py
import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, determine_ext, int_or_none, parse_iso8601, remove_end, ) class LifeNewsIE(InfoExtractor): IE_NAME = 'life' IE_DESC = 'Life.ru' _VALID_URL = r'https?://life\.ru/t/[^/]+/(?P<id>\d+)' _TESTS = [{ # single video embedded via video/source 'url': 'https://life.ru/t/новости/98736', 'md5': '77c95eaefaca216e32a76a343ad89d23', 'info_dict': { 'id': '98736', 'ext': 'mp4', 'title': 'Мужчина нашел дома архив оборонного завода', 'description': 'md5:3b06b1b39b5e2bea548e403d99b8bf26', 'timestamp': 1344154740, 'upload_date': '20120805', 'view_count': int, }, }, { # single video embedded via iframe 'url': 'https://life.ru/t/новости/152125', 'md5': '77d19a6f0886cd76bdbf44b4d971a273', 'info_dict': { 'id': '152125', 'ext': 'mp4', 'title': 'В Сети появилось видео захвата «Правым сектором» колхозных полей ', 'description': 'Жители двух поселков Днепропетровской области не простили радикалам угрозу лишения плодородных земель и пошли в лобовую. ', 'timestamp': 1427961840, 'upload_date': '20150402', 'view_count': int, }, }, { # two videos embedded via iframe 'url': 'https://life.ru/t/новости/153461', 'info_dict': { 'id': '153461', 'title': 'В Москве спасли потерявшегося медвежонка, который спрятался на дереве', 'description': 'Маленький хищник не смог найти дорогу домой и обрел временное убежище на тополе недалеко от жилого массива, пока его не нашла соседская собака.', 'timestamp': 1430825520, 'view_count': int, }, 'playlist': [{ 'md5': '9b6ef8bc0ffa25aebc8bdb40d89ab795', 'info_dict': { 'id': '153461-video1', 'ext': 'mp4', 'title': 'В Москве спасли потерявшегося медвежонка, который спрятался на дереве (Видео 1)', 'description': 'Маленький хищник не смог найти дорогу домой и обрел временное убежище на тополе недалеко от жилого массива, пока его не нашла соседская собака.', 'timestamp': 1430825520, 'upload_date': '20150505', }, }, { 'md5': 'ebb3bf3b1ce40e878d0d628e93eb0322', 'info_dict': { 'id': '153461-video2', 'ext': 'mp4', 'title': 'В Москве спасли потерявшегося медвежонка, который спрятался на дереве (Видео 2)', 'description': 'Маленький хищник не смог найти дорогу домой и обрел временное убежище на тополе недалеко от жилого массива, пока его не нашла соседская собака.', 'timestamp': 1430825520, 'upload_date': '20150505', }, }], }, { 'url': 'https://life.ru/t/новости/213035', 'only_matching': True, }, { 'url': 'https://life.ru/t/%D0%BD%D0%BE%D0%B2%D0%BE%D1%81%D1%82%D0%B8/153461', 'only_matching': True, }, { 'url': 'https://life.ru/t/новости/411489/manuel_vals_nazval_frantsiiu_tsieliu_nomier_odin_dlia_ighil', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_urls = re.findall( r'<video[^>]+><source[^>]+src=["\'](.+?)["\']', webpage) iframe_links = re.findall( r'<iframe[^>]+src=["\']((?:https?:)?//embed\.life\.ru/(?:embed|video)/.+?)["\']', webpage) if not video_urls and not iframe_links: raise ExtractorError(f'No media links available for {video_id}') title = remove_end( self._og_search_title(webpage), ' - Life.ru') description = self._og_search_description(webpage) view_count = self._html_search_regex( r'<div[^>]+class=(["\']).*?\bhits-count\b.*?\1[^>]*>\s*(?P<value>\d+)\s*</div>', webpage, 'view count', fatal=False, group='value') timestamp = parse_iso8601(self._search_regex( r'<time[^>]+datetime=(["\'])(?P<value>.+?)\1', webpage, 'upload date', fatal=False, group='value')) common_info = { 'description': description, 'view_count': int_or_none(view_count), 'timestamp': timestamp, } def make_entry(video_id, video_url, index=None): cur_info = dict(common_info) cur_info.update({ 'id': video_id if not index else f'{video_id}-video{index}', 'url': video_url, 'title': title if not index else f'{title} (Видео {index})', }) return cur_info def make_video_entry(video_id, video_url, index=None): video_url = urllib.parse.urljoin(url, video_url) return make_entry(video_id, video_url, index) def make_iframe_entry(video_id, video_url, index=None): video_url = self._proto_relative_url(video_url, 'http:') cur_info = make_entry(video_id, video_url, index) cur_info['_type'] = 'url_transparent' return cur_info if len(video_urls) == 1 and not iframe_links: return make_video_entry(video_id, video_urls[0]) if len(iframe_links) == 1 and not video_urls: return make_iframe_entry(video_id, iframe_links[0]) entries = [] if video_urls: for num, video_url in enumerate(video_urls, 1): entries.append(make_video_entry(video_id, video_url, num)) if iframe_links: for num, iframe_link in enumerate(iframe_links, len(video_urls) + 1): entries.append(make_iframe_entry(video_id, iframe_link, num)) playlist = common_info.copy() playlist.update(self.playlist_result(entries, video_id, title, description)) return playlist class LifeEmbedIE(InfoExtractor): IE_NAME = 'life:embed' _VALID_URL = r'https?://embed\.life\.ru/(?:embed|video)/(?P<id>[\da-f]{32})' _TESTS = [{ 'url': 'http://embed.life.ru/embed/e50c2dec2867350528e2574c899b8291', 'md5': 'b889715c9e49cb1981281d0e5458fbbe', 'info_dict': { 'id': 'e50c2dec2867350528e2574c899b8291', 'ext': 'mp4', 'title': 'e50c2dec2867350528e2574c899b8291', 'thumbnail': r're:http://.*\.jpg', }, }, { # with 1080p 'url': 'https://embed.life.ru/video/e50c2dec2867350528e2574c899b8291', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) thumbnail = None formats = [] def extract_m3u8(manifest_url): formats.extend(self._extract_m3u8_formats( manifest_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='m3u8')) def extract_original(original_url): formats.append({ 'url': original_url, 'format_id': determine_ext(original_url, None), 'quality': 1, }) playlist = self._parse_json( self._search_regex( r'options\s*=\s*({.+?});', webpage, 'options', default='{}'), video_id).get('playlist', {}) if playlist: master = playlist.get('master') if isinstance(master, str) and determine_ext(master) == 'm3u8': extract_m3u8(urllib.parse.urljoin(url, master)) original = playlist.get('original') if isinstance(original, str): extract_original(original) thumbnail = playlist.get('image') # Old rendition fallback if not formats: for video_url in re.findall(r'"file"\s*:\s*"([^"]+)', webpage): video_url = urllib.parse.urljoin(url, video_url) if determine_ext(video_url) == 'm3u8': extract_m3u8(video_url) else: extract_original(video_url) thumbnail = thumbnail or self._search_regex( r'"image"\s*:\s*"([^"]+)', webpage, 'thumbnail', default=None) return { 'id': video_id, 'title': video_id, 'thumbnail': thumbnail, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/telegraaf.py
yt_dlp/extractor/telegraaf.py
from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, parse_iso8601, try_get, ) class TelegraafIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?telegraaf\.nl/video/(?P<id>\d+)' _TEST = { 'url': 'https://www.telegraaf.nl/video/734366489/historisch-scheepswrak-slaat-na-100-jaar-los', 'info_dict': { 'id': 'gaMItuoSeUg2', 'ext': 'mp4', 'title': 'Historisch scheepswrak slaat na 100 jaar los', 'description': 'md5:6f53b7c4f55596722ac24d6c0ec00cfb', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 55, 'timestamp': 1572805527, 'upload_date': '20191103', }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): article_id = self._match_id(url) video_id = self._download_json( 'https://app.telegraaf.nl/graphql', article_id, headers={'User-Agent': 'De Telegraaf/6.8.11 (Android 11; en_US)'}, query={ 'query': '''{ article(uid: %s) { videos { videoId } } }''' % article_id, # noqa: UP031 })['data']['article']['videos'][0]['videoId'] item = self._download_json( f'https://content.tmgvideo.nl/playlist/item={video_id}/playlist.json', video_id)['items'][0] title = item['title'] formats = [] locations = item.get('locations') or {} for location in locations.get('adaptive', []): manifest_url = location.get('src') if not manifest_url: continue ext = determine_ext(manifest_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( manifest_url, video_id, ext='mp4', m3u8_id='hls', fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( manifest_url, video_id, mpd_id='dash', fatal=False)) else: self.report_warning(f'Unknown adaptive format {ext}') for location in locations.get('progressive', []): src = try_get(location, lambda x: x['sources'][0]['src']) if not src: continue label = location.get('label') formats.append({ 'url': src, 'width': int_or_none(location.get('width')), 'height': int_or_none(location.get('height')), 'format_id': 'http' + (f'-{label}' if label else ''), }) return { 'id': video_id, 'title': title, 'description': item.get('description'), 'formats': formats, 'duration': int_or_none(item.get('duration')), 'thumbnail': item.get('poster'), 'timestamp': parse_iso8601(item.get('datecreated'), ' '), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/pr0gramm.py
yt_dlp/extractor/pr0gramm.py
import datetime as dt import functools import json import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, make_archive_id, mimetype2ext, str_or_none, urljoin, ) from ..utils.traversal import traverse_obj class Pr0grammIE(InfoExtractor): _VALID_URL = r'https?://pr0gramm\.com\/(?:[^/?#]+/)+(?P<id>[\d]+)(?:[/?#:]|$)' _TESTS = [{ 'url': 'https://pr0gramm.com/new/video/5466437', 'info_dict': { 'id': '5466437', 'ext': 'mp4', 'title': 'pr0gramm-5466437 by g11st', 'tags': ['Neon Genesis Evangelion', 'Touhou Project', 'Fly me to the Moon', 'Marisad', 'Marisa Kirisame', 'video', 'sound', 'Marisa', 'Anime'], 'uploader': 'g11st', 'uploader_id': '394718', 'timestamp': 1671590240, 'upload_date': '20221221', 'like_count': int, 'dislike_count': int, 'age_limit': 0, 'thumbnail': r're:^https://thumb\.pr0gramm\.com/.*\.jpg', '_old_archive_ids': ['pr0grammstatic 5466437'], }, }, { 'url': 'https://pr0gramm.com/new/3052805:comment28391322', 'info_dict': { 'id': '3052805', 'ext': 'mp4', 'title': 'pr0gramm-3052805 by Hansking1', 'tags': 'count:15', 'uploader': 'Hansking1', 'uploader_id': '385563', 'timestamp': 1552930408, 'upload_date': '20190318', 'like_count': int, 'dislike_count': int, 'age_limit': 0, 'thumbnail': r're:^https://thumb\.pr0gramm\.com/.*\.jpg', '_old_archive_ids': ['pr0grammstatic 3052805'], }, }, { # Requires verified account 'url': 'https://pr0gramm.com/new/Gianna%20Michaels/5848332', 'info_dict': { 'id': '5848332', 'ext': 'mp4', 'title': 'pr0gramm-5848332 by erd0pfel', 'tags': 'count:18', 'uploader': 'erd0pfel', 'uploader_id': '349094', 'timestamp': 1694489652, 'upload_date': '20230912', 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': r're:^https://thumb\.pr0gramm\.com/.*\.jpg', '_old_archive_ids': ['pr0grammstatic 5848332'], }, }, { 'url': 'https://pr0gramm.com/top/5895149', 'info_dict': { 'id': '5895149', 'ext': 'mp4', 'title': 'pr0gramm-5895149 by algoholigSeeManThrower', 'tags': 'count:19', 'uploader': 'algoholigSeeManThrower', 'uploader_id': '457556', 'timestamp': 1697580902, 'upload_date': '20231018', 'like_count': int, 'dislike_count': int, 'age_limit': 0, 'thumbnail': 'https://thumb.pr0gramm.com/2023/10/18/db47bb3db5e1a1b3.jpg', '_old_archive_ids': ['pr0grammstatic 5895149'], }, }, { 'url': 'https://pr0gramm.com/static/5466437', 'only_matching': True, }, { 'url': 'https://pr0gramm.com/new/rowan%20atkinson%20herr%20bohne/3052805', 'only_matching': True, }, { 'url': 'https://pr0gramm.com/user/froschler/dafur-ist-man-hier/5091290', 'only_matching': True, }] BASE_URL = 'https://pr0gramm.com' @functools.cached_property def _is_logged_in(self): return 'pp' in self._get_cookies(self.BASE_URL) @functools.cached_property def _maximum_flags(self): # We need to guess the flags for the content otherwise the api will raise an error # We can guess the maximum allowed flags for the account from the cookies # Bitflags are (msbf): pol, nsfp, nsfl, nsfw, sfw flags = 0b10001 if self._is_logged_in: flags |= 0b01000 cookies = self._get_cookies(self.BASE_URL) if 'me' not in cookies: self._download_webpage(self.BASE_URL, None, 'Refreshing verification information') if traverse_obj(cookies, ('me', {lambda x: x.value}, {urllib.parse.unquote}, {json.loads}, 'verified')): flags |= 0b00110 return flags def _call_api(self, endpoint, video_id, query={}, note='Downloading API json'): data = self._download_json( f'https://pr0gramm.com/api/items/{endpoint}', video_id, note, query=query, expected_status=403) error = traverse_obj(data, ('error', {str})) if error in ('nsfwRequired', 'nsflRequired', 'nsfpRequired', 'verificationRequired'): if not self._is_logged_in: self.raise_login_required() raise ExtractorError(f'Unverified account cannot access NSFW/NSFL ({error})', expected=True) elif error: message = traverse_obj(data, ('msg', {str})) or error raise ExtractorError(f'API returned error: {message}', expected=True) return data @staticmethod def _create_source_url(path): return urljoin('https://img.pr0gramm.com', path) def _real_extract(self, url): video_id = self._match_id(url) video_info = traverse_obj( self._call_api('get', video_id, {'id': video_id, 'flags': self._maximum_flags}), ('items', 0, {dict})) source = video_info.get('image') if not source or not source.endswith('mp4'): self.raise_no_formats('Could not extract a video', expected=bool(source), video_id=video_id) metadata = self._call_api('info', video_id, {'itemId': video_id}, note='Downloading tags') tags = traverse_obj(metadata, ('tags', ..., 'tag', {str})) # Sorted by "confidence", higher confidence = earlier in list confidences = traverse_obj(metadata, ('tags', ..., 'confidence', ({int}, {float}))) if confidences: tags = [tag for _, tag in sorted(zip(confidences, tags), reverse=True)] # noqa: B905 formats = traverse_obj(video_info, ('variants', ..., { 'format_id': ('name', {str}), 'url': ('path', {self._create_source_url}), 'ext': ('mimeType', {mimetype2ext}), 'vcodec': ('codec', {str}), 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), 'bitrate': ('bitRate', {float_or_none}), 'filesize': ('fileSize', {int_or_none}), })) if video_info.get('variants') else [{ 'ext': 'mp4', 'format_id': 'source', **traverse_obj(video_info, { 'url': ('image', {self._create_source_url}), 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), }), }] subtitles = {} for subtitle in traverse_obj(video_info, ('subtitles', lambda _, v: v['language'])): subtitles.setdefault(subtitle['language'], []).append(traverse_obj(subtitle, { 'url': ('path', {self._create_source_url}), 'note': ('label', {str}), })) return { 'id': video_id, 'title': f'pr0gramm-{video_id} by {video_info.get("user")}', 'tags': tags, 'formats': formats, 'subtitles': subtitles, 'age_limit': 18 if traverse_obj(video_info, ('flags', {0b110.__and__})) else 0, '_old_archive_ids': [make_archive_id('Pr0grammStatic', video_id)], **traverse_obj(video_info, { 'uploader': ('user', {str}), 'uploader_id': ('userId', {str_or_none}), 'like_count': ('up', {int}), 'dislike_count': ('down', {int}), 'timestamp': ('created', {int}), 'upload_date': ('created', {int}, {dt.date.fromtimestamp}, {lambda x: x.strftime('%Y%m%d')}), 'thumbnail': ('thumb', {urljoin('https://thumb.pr0gramm.com')}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/simplecast.py
yt_dlp/extractor/simplecast.py
from .common import InfoExtractor from ..utils import ( clean_podcast_url, int_or_none, parse_iso8601, strip_or_none, try_get, urlencode_postdata, ) class SimplecastBaseIE(InfoExtractor): _UUID_REGEX = r'[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12}' _API_BASE = 'https://api.simplecast.com/' def _call_api(self, path_tmpl, video_id): return self._download_json( self._API_BASE + path_tmpl % video_id, video_id) def _call_search_api(self, resource, resource_id, resource_url): return self._download_json( f'https://api.simplecast.com/{resource}s/search', resource_id, data=urlencode_postdata({'url': resource_url})) def _parse_episode(self, episode): episode_id = episode['id'] title = episode['title'].strip() audio_file = episode.get('audio_file') or {} audio_file_url = audio_file.get('url') or episode.get('audio_file_url') or episode['enclosure_url'] season = episode.get('season') or {} season_href = season.get('href') season_id = None if season_href: season_id = self._search_regex( rf'https?://api.simplecast.com/seasons/({self._UUID_REGEX})', season_href, 'season id', default=None) webpage_url = episode.get('episode_url') channel_url = None if webpage_url: channel_url = self._search_regex( r'(https?://[^/]+\.simplecast\.com)', webpage_url, 'channel url', default=None) return { 'id': episode_id, 'display_id': episode.get('slug'), 'title': title, 'url': clean_podcast_url(audio_file_url), 'webpage_url': webpage_url, 'channel_url': channel_url, 'series': try_get(episode, lambda x: x['podcast']['title']), 'season_number': int_or_none(season.get('number')), 'season_id': season_id, 'thumbnail': episode.get('image_url'), 'episode_id': episode_id, 'episode_number': int_or_none(episode.get('number')), 'description': strip_or_none(episode.get('description')), 'timestamp': parse_iso8601(episode.get('published_at')), 'duration': int_or_none(episode.get('duration')), 'filesize': int_or_none(audio_file.get('size') or episode.get('audio_file_size')), } class SimplecastIE(SimplecastBaseIE): IE_NAME = 'simplecast' _VALID_URL = rf'https?://(?:api\.simplecast\.com/episodes|player\.simplecast\.com)/(?P<id>{SimplecastBaseIE._UUID_REGEX})' _EMBED_REGEX = [rf'''(?x)<iframe[^>]+src=["\'] (?P<url>https?://(?: embed\.simplecast\.com/[0-9a-f]{8}| player\.simplecast\.com/{SimplecastBaseIE._UUID_REGEX} ))'''] _COMMON_TEST_INFO = { 'display_id': 'errant-signal-chris-franklin-new-wave-video-essays', 'id': 'b6dc49a2-9404-4853-9aa9-9cfc097be876', 'ext': 'mp3', 'title': 'Errant Signal - Chris Franklin & New Wave Video Essays', 'channel_url': 'https://the-re-bind-io-podcast.simplecast.com', 'episode': 'Episode 1', 'episode_number': 1, 'episode_id': 'b6dc49a2-9404-4853-9aa9-9cfc097be876', 'description': 'md5:34752789d3d2702e2d2c975fbd14f357', 'season': 'Season 1', 'season_number': 1, 'season_id': 'e23df0da-bae4-4531-8bbf-71364a88dc13', 'series': 'The RE:BIND.io Podcast', 'duration': 5343, 'timestamp': 1580979475, 'upload_date': '20200206', } _TESTS = [{ 'url': 'https://api.simplecast.com/episodes/b6dc49a2-9404-4853-9aa9-9cfc097be876', 'md5': '8c93be7be54251bf29ee97464eabe61c', 'info_dict': _COMMON_TEST_INFO, }, { 'url': 'https://player.simplecast.com/b6dc49a2-9404-4853-9aa9-9cfc097be876', 'only_matching': True, }] _WEBPAGE_TESTS = [{ # FIXME: Embed detection 'url': 'https://poddtoppen.se/podcast/1498417306/the-rebindio-podcast/errant-signal-chris-franklin-new-wave-video-essays', 'md5': '8c93be7be54251bf29ee97464eabe61c', 'info_dict': { 'id': 'b6dc49a2-9404-4853-9aa9-9cfc097be876', 'ext': 'mp3', 'title': 'Errant Signal - Chris Franklin & New Wave Video Essays', 'channel_url': 'https://the-re-bind-io-podcast.simplecast.com', 'description': 'md5:34752789d3d2702e2d2c975fbd14f357', 'display_id': 'errant-signal-chris-franklin-new-wave-video-essays', 'duration': 5343, 'episode': 'Episode 1', 'episode_id': 'b6dc49a2-9404-4853-9aa9-9cfc097be876', 'episode_number': 1, 'season': 'Season 1', 'season_id': 'e23df0da-bae4-4531-8bbf-71364a88dc13', 'season_number': 1, 'series': 'The RE:BIND.io Podcast', 'timestamp': 1580979475, 'upload_date': '20200206', }, }] def _real_extract(self, url): episode_id = self._match_id(url) episode = self._call_api('episodes/%s', episode_id) return self._parse_episode(episode) class SimplecastEpisodeIE(SimplecastBaseIE): IE_NAME = 'simplecast:episode' _VALID_URL = r'https?://(?!api\.)[^/]+\.simplecast\.com/episodes/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://the-re-bind-io-podcast.simplecast.com/episodes/errant-signal-chris-franklin-new-wave-video-essays', 'md5': '8c93be7be54251bf29ee97464eabe61c', 'info_dict': SimplecastIE._COMMON_TEST_INFO, }] def _real_extract(self, url): mobj = self._match_valid_url(url) episode = self._call_search_api( 'episode', mobj.group(1), mobj.group(0)) return self._parse_episode(episode) class SimplecastPodcastIE(SimplecastBaseIE): IE_NAME = 'simplecast:podcast' _VALID_URL = r'https?://(?!(?:api|cdn|embed|feeds|player)\.)(?P<id>[^/]+)\.simplecast\.com(?!/episodes/[^/?&#]+)' _TESTS = [{ 'url': 'https://the-re-bind-io-podcast.simplecast.com', 'playlist_mincount': 32, 'info_dict': { 'id': '07d28d26-7522-42eb-8c53-2bdcfc81c43c', 'title': 'The RE:BIND.io Podcast', }, }, { 'url': 'https://the-re-bind-io-podcast.simplecast.com/episodes', 'only_matching': True, }] def _real_extract(self, url): subdomain = self._match_id(url) site = self._call_search_api('site', subdomain, url) podcast = site['podcast'] podcast_id = podcast['id'] podcast_title = podcast.get('title') def entries(): episodes = self._call_api('podcasts/%s/episodes', podcast_id) for episode in (episodes.get('collection') or []): info = self._parse_episode(episode) info['series'] = podcast_title yield info return self.playlist_result(entries(), podcast_id, podcast_title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/pbs.py
yt_dlp/extractor/pbs.py
import re from .common import InfoExtractor from ..utils import ( US_RATINGS, ExtractorError, determine_ext, float_or_none, int_or_none, js_to_json, orderedSet, strip_jsonp, strip_or_none, traverse_obj, unified_strdate, url_or_none, ) class PBSIE(InfoExtractor): _STATIONS = ( (r'(?:video|www|player)\.pbs\.org', 'PBS: Public Broadcasting Service'), # http://www.pbs.org/ (r'video\.aptv\.org', 'APT - Alabama Public Television (WBIQ)'), # http://aptv.org/ (r'video\.gpb\.org', 'GPB/Georgia Public Broadcasting (WGTV)'), # http://www.gpb.org/ (r'video\.mpbonline\.org', 'Mississippi Public Broadcasting (WMPN)'), # http://www.mpbonline.org (r'video\.wnpt\.org', 'Nashville Public Television (WNPT)'), # http://www.wnpt.org (r'video\.wfsu\.org', 'WFSU-TV (WFSU)'), # http://wfsu.org/ (r'video\.wsre\.org', 'WSRE (WSRE)'), # http://www.wsre.org (r'video\.wtcitv\.org', 'WTCI (WTCI)'), # http://www.wtcitv.org (r'video\.pba\.org', 'WPBA/Channel 30 (WPBA)'), # http://pba.org/ (r'video\.alaskapublic\.org', 'Alaska Public Media (KAKM)'), # http://alaskapublic.org/kakm # (r'kuac\.org', 'KUAC (KUAC)'), # http://kuac.org/kuac-tv/ # (r'ktoo\.org', '360 North (KTOO)'), # http://www.ktoo.org/ # (r'azpm\.org', 'KUAT 6 (KUAT)'), # http://www.azpm.org/ (r'video\.azpbs\.org', 'Arizona PBS (KAET)'), # http://www.azpbs.org (r'portal\.knme\.org', 'KNME-TV/Channel 5 (KNME)'), # http://www.newmexicopbs.org/ (r'video\.vegaspbs\.org', 'Vegas PBS (KLVX)'), # http://vegaspbs.org/ (r'watch\.aetn\.org', 'AETN/ARKANSAS ETV NETWORK (KETS)'), # http://www.aetn.org/ (r'video\.ket\.org', 'KET (WKLE)'), # http://www.ket.org/ (r'video\.wkno\.org', 'WKNO/Channel 10 (WKNO)'), # http://www.wkno.org/ (r'video\.lpb\.org', 'LPB/LOUISIANA PUBLIC BROADCASTING (WLPB)'), # http://www.lpb.org/ (r'videos\.oeta\.tv', 'OETA (KETA)'), # http://www.oeta.tv (r'video\.optv\.org', 'Ozarks Public Television (KOZK)'), # http://www.optv.org/ (r'watch\.wsiu\.org', 'WSIU Public Broadcasting (WSIU)'), # http://www.wsiu.org/ (r'video\.keet\.org', 'KEET TV (KEET)'), # http://www.keet.org (r'pbs\.kixe\.org', 'KIXE/Channel 9 (KIXE)'), # http://kixe.org/ (r'video\.kpbs\.org', 'KPBS San Diego (KPBS)'), # http://www.kpbs.org/ (r'video\.kqed\.org', 'KQED (KQED)'), # http://www.kqed.org (r'vids\.kvie\.org', 'KVIE Public Television (KVIE)'), # http://www.kvie.org (r'(?:video\.|www\.)pbssocal\.org', 'PBS SoCal/KOCE (KOCE)'), # http://www.pbssocal.org/ (r'video\.valleypbs\.org', 'ValleyPBS (KVPT)'), # http://www.valleypbs.org/ (r'video\.cptv\.org', 'CONNECTICUT PUBLIC TELEVISION (WEDH)'), # http://cptv.org (r'watch\.knpb\.org', 'KNPB Channel 5 (KNPB)'), # http://www.knpb.org/ (r'video\.soptv\.org', 'SOPTV (KSYS)'), # http://www.soptv.org # (r'klcs\.org', 'KLCS/Channel 58 (KLCS)'), # http://www.klcs.org # (r'krcb\.org', 'KRCB Television & Radio (KRCB)'), # http://www.krcb.org # (r'kvcr\.org', 'KVCR TV/DT/FM :: Vision for the Future (KVCR)'), # http://kvcr.org (r'video\.rmpbs\.org', 'Rocky Mountain PBS (KRMA)'), # http://www.rmpbs.org (r'video\.kenw\.org', 'KENW-TV3 (KENW)'), # http://www.kenw.org (r'video\.kued\.org', 'KUED Channel 7 (KUED)'), # http://www.kued.org (r'video\.wyomingpbs\.org', 'Wyoming PBS (KCWC)'), # http://www.wyomingpbs.org (r'video\.cpt12\.org', 'Colorado Public Television / KBDI 12 (KBDI)'), # http://www.cpt12.org/ (r'video\.kbyueleven\.org', 'KBYU-TV (KBYU)'), # http://www.kbyutv.org/ (r'(?:video\.|www\.)thirteen\.org', 'Thirteen/WNET New York (WNET)'), # http://www.thirteen.org (r'video\.wgbh\.org', 'WGBH/Channel 2 (WGBH)'), # http://wgbh.org (r'video\.wgby\.org', 'WGBY (WGBY)'), # http://www.wgby.org (r'watch\.njtvonline\.org', 'NJTV Public Media NJ (WNJT)'), # http://www.njtvonline.org/ # (r'ripbs\.org', 'Rhode Island PBS (WSBE)'), # http://www.ripbs.org/home/ (r'watch\.wliw\.org', 'WLIW21 (WLIW)'), # http://www.wliw.org/ (r'video\.mpt\.tv', 'mpt/Maryland Public Television (WMPB)'), # http://www.mpt.org (r'watch\.weta\.org', 'WETA Television and Radio (WETA)'), # http://www.weta.org (r'video\.whyy\.org', 'WHYY (WHYY)'), # http://www.whyy.org (r'video\.wlvt\.org', 'PBS 39 (WLVT)'), # http://www.wlvt.org/ (r'video\.wvpt\.net', 'WVPT - Your Source for PBS and More! (WVPT)'), # http://www.wvpt.net (r'video\.whut\.org', 'Howard University Television (WHUT)'), # http://www.whut.org (r'video\.wedu\.org', 'WEDU PBS (WEDU)'), # http://www.wedu.org (r'video\.wgcu\.org', 'WGCU Public Media (WGCU)'), # http://www.wgcu.org/ # (r'wjct\.org', 'WJCT Public Broadcasting (WJCT)'), # http://www.wjct.org (r'video\.wpbt2\.org', 'WPBT2 (WPBT)'), # http://www.wpbt2.org (r'video\.wucftv\.org', 'WUCF TV (WUCF)'), # http://wucftv.org (r'video\.wuft\.org', 'WUFT/Channel 5 (WUFT)'), # http://www.wuft.org (r'watch\.wxel\.org', 'WXEL/Channel 42 (WXEL)'), # http://www.wxel.org/home/ (r'video\.wlrn\.org', 'WLRN/Channel 17 (WLRN)'), # http://www.wlrn.org/ (r'video\.wusf\.usf\.edu', 'WUSF Public Broadcasting (WUSF)'), # http://wusf.org/ (r'video\.scetv\.org', 'ETV (WRLK)'), # http://www.scetv.org (r'video\.unctv\.org', 'UNC-TV (WUNC)'), # http://www.unctv.org/ # (r'pbsguam\.org', 'PBS Guam (KGTF)'), # http://www.pbsguam.org/ (r'video\.pbshawaii\.org', 'PBS Hawaii - Oceanic Cable Channel 10 (KHET)'), # http://www.pbshawaii.org/ (r'video\.idahoptv\.org', 'Idaho Public Television (KAID)'), # http://idahoptv.org (r'video\.ksps\.org', 'KSPS (KSPS)'), # http://www.ksps.org/home/ (r'watch\.opb\.org', 'OPB (KOPB)'), # http://www.opb.org (r'watch\.nwptv\.org', 'KWSU/Channel 10 & KTNW/Channel 31 (KWSU)'), # http://www.kwsu.org (r'video\.will\.illinois\.edu', 'WILL-TV (WILL)'), # http://will.illinois.edu/ (r'video\.networkknowledge\.tv', 'Network Knowledge - WSEC/Springfield (WSEC)'), # http://www.wsec.tv (r'video\.wttw\.com', 'WTTW11 (WTTW)'), # http://www.wttw.com/ # (r'wtvp\.org', 'WTVP & WTVP.org, Public Media for Central Illinois (WTVP)'), # http://www.wtvp.org/ (r'video\.iptv\.org', 'Iowa Public Television/IPTV (KDIN)'), # http://www.iptv.org/ (r'video\.ninenet\.org', 'Nine Network (KETC)'), # http://www.ninenet.org (r'video\.wfwa\.org', 'PBS39 Fort Wayne (WFWA)'), # http://wfwa.org/ (r'video\.wfyi\.org', 'WFYI Indianapolis (WFYI)'), # http://www.wfyi.org (r'video\.mptv\.org', 'Milwaukee Public Television (WMVS)'), # http://www.mptv.org (r'video\.wnin\.org', 'WNIN (WNIN)'), # http://www.wnin.org/ (r'video\.wnit\.org', 'WNIT Public Television (WNIT)'), # http://www.wnit.org/ (r'video\.wpt\.org', 'WPT (WPNE)'), # http://www.wpt.org/ (r'video\.wvut\.org', 'WVUT/Channel 22 (WVUT)'), # http://wvut.org/ (r'video\.weiu\.net', 'WEIU/Channel 51 (WEIU)'), # http://www.weiu.net (r'video\.wqpt\.org', 'WQPT-TV (WQPT)'), # http://www.wqpt.org (r'video\.wycc\.org', 'WYCC PBS Chicago (WYCC)'), # http://www.wycc.org # (r'lakeshorepublicmedia\.org', 'Lakeshore Public Television (WYIN)'), # http://lakeshorepublicmedia.org/ (r'video\.wipb\.org', 'WIPB-TV (WIPB)'), # http://wipb.org (r'video\.indianapublicmedia\.org', 'WTIU (WTIU)'), # http://indianapublicmedia.org/tv/ (r'watch\.cetconnect\.org', 'CET (WCET)'), # http://www.cetconnect.org (r'video\.thinktv\.org', 'ThinkTVNetwork (WPTD)'), # http://www.thinktv.org (r'video\.wbgu\.org', 'WBGU-TV (WBGU)'), # http://wbgu.org (r'video\.wgvu\.org', 'WGVU TV (WGVU)'), # http://www.wgvu.org/ (r'video\.netnebraska\.org', 'NET1 (KUON)'), # http://netnebraska.org (r'video\.pioneer\.org', 'Pioneer Public Television (KWCM)'), # http://www.pioneer.org (r'watch\.sdpb\.org', 'SDPB Television (KUSD)'), # http://www.sdpb.org (r'video\.tpt\.org', 'TPT (KTCA)'), # http://www.tpt.org (r'watch\.ksmq\.org', 'KSMQ (KSMQ)'), # http://www.ksmq.org/ (r'watch\.kpts\.org', 'KPTS/Channel 8 (KPTS)'), # http://www.kpts.org/ (r'watch\.ktwu\.org', 'KTWU/Channel 11 (KTWU)'), # http://ktwu.org # (r'shptv\.org', 'Smoky Hills Public Television (KOOD)'), # http://www.shptv.org # (r'kcpt\.org', 'KCPT Kansas City Public Television (KCPT)'), # http://kcpt.org/ # (r'blueridgepbs\.org', 'Blue Ridge PBS (WBRA)'), # http://www.blueridgepbs.org/ (r'watch\.easttennesseepbs\.org', 'East Tennessee PBS (WSJK)'), # http://easttennesseepbs.org (r'video\.wcte\.tv', 'WCTE-TV (WCTE)'), # http://www.wcte.org (r'video\.wljt\.org', 'WLJT, Channel 11 (WLJT)'), # http://wljt.org/ (r'video\.wosu\.org', 'WOSU TV (WOSU)'), # http://wosu.org/ (r'video\.woub\.org', 'WOUB/WOUC (WOUB)'), # http://woub.org/tv/index.php?section=5 (r'video\.wvpublic\.org', 'WVPB (WVPB)'), # http://wvpublic.org/ (r'video\.wkyupbs\.org', 'WKYU-PBS (WKYU)'), # http://www.wkyupbs.org # (r'wyes\.org', 'WYES-TV/New Orleans (WYES)'), # http://www.wyes.org (r'video\.kera\.org', 'KERA 13 (KERA)'), # http://www.kera.org/ (r'video\.mpbn\.net', 'MPBN (WCBB)'), # http://www.mpbn.net/ (r'video\.mountainlake\.org', 'Mountain Lake PBS (WCFE)'), # http://www.mountainlake.org/ (r'video\.nhptv\.org', 'NHPTV (WENH)'), # http://nhptv.org/ (r'video\.vpt\.org', 'Vermont PBS (WETK)'), # http://www.vpt.org (r'video\.witf\.org', 'witf (WITF)'), # http://www.witf.org (r'watch\.wqed\.org', 'WQED Multimedia (WQED)'), # http://www.wqed.org/ (r'video\.wmht\.org', 'WMHT Educational Telecommunications (WMHT)'), # http://www.wmht.org/home/ (r'video\.deltabroadcasting\.org', 'Q-TV (WDCQ)'), # http://www.deltabroadcasting.org (r'video\.dptv\.org', 'WTVS Detroit Public TV (WTVS)'), # http://www.dptv.org/ (r'video\.wcmu\.org', 'CMU Public Television (WCMU)'), # http://www.wcmu.org (r'video\.wkar\.org', 'WKAR-TV (WKAR)'), # http://wkar.org/ (r'wnmuvideo\.nmu\.edu', 'WNMU-TV Public TV 13 (WNMU)'), # http://wnmutv.nmu.edu (r'video\.wdse\.org', 'WDSE - WRPT (WDSE)'), # http://www.wdse.org/ (r'video\.wgte\.org', 'WGTE TV (WGTE)'), # http://www.wgte.org (r'video\.lptv\.org', 'Lakeland Public Television (KAWE)'), # http://www.lakelandptv.org # (r'prairiepublic\.org', 'PRAIRIE PUBLIC (KFME)'), # http://www.prairiepublic.org/ (r'video\.kmos\.org', 'KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS)'), # http://www.kmos.org/ (r'watch\.montanapbs\.org', 'MontanaPBS (KUSM)'), # http://montanapbs.org (r'video\.krwg\.org', 'KRWG/Channel 22 (KRWG)'), # http://www.krwg.org (r'video\.kacvtv\.org', 'KACV (KACV)'), # http://www.panhandlepbs.org/home/ (r'video\.kcostv\.org', 'KCOS/Channel 13 (KCOS)'), # www.kcostv.org (r'video\.wcny\.org', 'WCNY/Channel 24 (WCNY)'), # http://www.wcny.org (r'video\.wned\.org', 'WNED (WNED)'), # http://www.wned.org/ (r'watch\.wpbstv\.org', 'WPBS (WPBS)'), # http://www.wpbstv.org (r'video\.wskg\.org', 'WSKG Public TV (WSKG)'), # http://wskg.org (r'video\.wxxi\.org', 'WXXI (WXXI)'), # http://wxxi.org (r'video\.wpsu\.org', 'WPSU (WPSU)'), # http://www.wpsu.org # (r'wqln\.org', 'WQLN/Channel 54 (WQLN)'), # http://www.wqln.org (r'on-demand\.wvia\.org', 'WVIA Public Media Studios (WVIA)'), # http://www.wvia.org/ (r'video\.wtvi\.org', 'WTVI (WTVI)'), # http://www.wtvi.org/ # (r'whro\.org', 'WHRO (WHRO)'), # http://whro.org (r'video\.westernreservepublicmedia\.org', 'Western Reserve PBS (WNEO)'), # http://www.WesternReservePublicMedia.org/ (r'video\.ideastream\.org', 'WVIZ/PBS ideastream (WVIZ)'), # http://www.wviz.org/ (r'video\.kcts9\.org', 'KCTS 9 (KCTS)'), # http://kcts9.org/ (r'video\.basinpbs\.org', 'Basin PBS (KPBT)'), # http://www.basinpbs.org (r'video\.houstonpbs\.org', 'KUHT / Channel 8 (KUHT)'), # http://www.houstonpublicmedia.org/ # (r'tamu\.edu', 'KAMU - TV (KAMU)'), # http://KAMU.tamu.edu # (r'kedt\.org', 'KEDT/Channel 16 (KEDT)'), # http://www.kedt.org (r'video\.klrn\.org', 'KLRN (KLRN)'), # http://www.klrn.org (r'video\.klru\.tv', 'KLRU (KLRU)'), # http://www.klru.org # (r'kmbh\.org', 'KMBH-TV (KMBH)'), # http://www.kmbh.org # (r'knct\.org', 'KNCT (KNCT)'), # http://www.knct.org # (r'ktxt\.org', 'KTTZ-TV (KTXT)'), # http://www.ktxt.org (r'video\.wtjx\.org', 'WTJX Channel 12 (WTJX)'), # http://www.wtjx.org/ (r'video\.ideastations\.org', 'WCVE PBS (WCVE)'), # http://ideastations.org/ (r'video\.kbtc\.org', 'KBTC Public Television (KBTC)'), # http://kbtc.org ) IE_NAME = 'pbs' IE_DESC = 'Public Broadcasting Service (PBS) and member stations: {}'.format(', '.join(list(zip(*_STATIONS, strict=True))[1])) _VALID_URL = r'''(?x)https?:// (?: # Player (?:video|player)\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/?#]+) | # Direct video URL, or article with embedded player (?:{})/(?: (?:(?:vir|port)alplayer|video)/(?P<id>[0-9]+)(?:[?/#]|$) | (?:[^/?#]+/){{1,5}}(?P<presumptive_id>[^/?#]+?)(?:\.html)?/?(?:$|[?#]) ) ) '''.format('|'.join(next(zip(*_STATIONS, strict=True)))) _GEO_COUNTRIES = ['US'] _TESTS = [ { 'url': 'http://www.pbs.org/tpt/constitution-usa-peter-sagal/watch/a-more-perfect-union/', 'md5': '173dc391afd361fa72eab5d3d918968d', 'info_dict': { 'id': '2365006249', 'ext': 'mp4', 'title': 'Constitution USA with Peter Sagal - A More Perfect Union', 'description': 'md5:31b664af3c65fd07fa460d306b837d00', 'duration': 3190, }, 'skip': 'dead URL', }, { 'url': 'https://www.thirteen.org/programs/the-woodwrights-shop/carving-away-with-mary-may-tioglz/', 'info_dict': { 'id': '3004803331', 'ext': 'mp4', 'title': "The Woodwright's Shop - Carving Away with Mary May", 'description': 'md5:7cbaaaa8b9bcc78bd8f0e31911644e28', 'duration': 1606, 'display_id': 'carving-away-with-mary-may-tioglz', 'chapters': [], 'thumbnail': 'https://image.pbs.org/video-assets/NcnTxNl-asset-mezzanine-16x9-K0Keoyv.jpg', }, }, { 'url': 'http://www.pbs.org/wgbh/pages/frontline/losing-iraq/', 'md5': '372b12b670070de39438b946474df92f', 'info_dict': { 'id': '2365297690', 'ext': 'mp4', 'title': 'FRONTLINE - Losing Iraq', 'description': 'md5:5979a4d069b157f622d02bff62fbe654', 'duration': 5050, 'chapters': [ {'start_time': 0.0, 'end_time': 1234.0, 'title': 'After Saddam, Chaos'}, {'start_time': 1233.0, 'end_time': 1719.0, 'title': 'The Insurgency Takes Root'}, {'start_time': 1718.0, 'end_time': 2461.0, 'title': 'A Light Footprint'}, {'start_time': 2460.0, 'end_time': 3589.0, 'title': 'The Surge '}, {'start_time': 3588.0, 'end_time': 4355.0, 'title': 'The Withdrawal '}, {'start_time': 4354.0, 'end_time': 5051.0, 'title': 'ISIS on the March '}, ], 'display_id': 'losing-iraq', 'thumbnail': 'https://image.pbs.org/video-assets/pbs/frontline/138098/images/mezzanine_401.jpg', }, }, { 'url': 'http://www.pbs.org/newshour/bb/education-jan-june12-cyberschools_02-23/', 'md5': 'b19856d7f5351b17a5ab1dc6a64be633', 'info_dict': { 'id': '2201174722', 'ext': 'mp4', 'title': 'PBS NewsHour - Cyber Schools Gain Popularity, but Quality Questions Persist', 'description': 'md5:86ab9a3d04458b876147b355788b8781', 'duration': 801, }, }, { 'url': 'http://www.pbs.org/wnet/gperf/dudamel-conducts-verdi-requiem-hollywood-bowl-full-episode/3374/', 'md5': 'c62859342be2a0358d6c9eb306595978', 'info_dict': { 'id': '2365297708', 'ext': 'mp4', 'title': 'Great Performances - Dudamel Conducts Verdi Requiem at the Hollywood Bowl - Full', 'description': 'md5:657897370e09e2bc6bf0f8d2cd313c6b', 'duration': 6559, 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'http://www.pbs.org/wgbh/nova/earth/killer-typhoon.html', 'md5': '908f3e5473a693b266b84e25e1cf9703', 'info_dict': { 'id': '2365160389', 'display_id': 'killer-typhoon', 'ext': 'mp4', 'description': 'md5:c741d14e979fc53228c575894094f157', 'title': 'NOVA - Killer Typhoon', 'duration': 3172, 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20140122', 'age_limit': 10, }, }, { 'url': 'http://www.pbs.org/wgbh/pages/frontline/united-states-of-secrets/', 'info_dict': { 'id': 'united-states-of-secrets', }, 'playlist_count': 2, }, { 'url': 'http://www.pbs.org/wgbh/americanexperience/films/great-war/', 'info_dict': { 'id': 'great-war', }, 'playlist_count': 3, }, { 'url': 'http://www.pbs.org/wgbh/americanexperience/films/death/player/', 'info_dict': { 'id': '2276541483', 'display_id': 'player', 'ext': 'mp4', 'title': 'American Experience - Death and the Civil War, Chapter 1', 'description': 'md5:67fa89a9402e2ee7d08f53b920674c18', 'duration': 682, 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { 'skip_download': True, # requires ffmpeg }, }, { 'url': 'http://www.pbs.org/video/2365245528/', 'md5': '115223d41bd55cda8ae5cd5ed4e11497', 'info_dict': { 'id': '2365245528', 'display_id': '2365245528', 'ext': 'mp4', 'title': 'FRONTLINE - United States of Secrets (Part One)', 'description': 'md5:55756bd5c551519cc4b7703e373e217e', 'duration': 6851, 'thumbnail': r're:^https?://.*\.jpg$', }, }, { # Video embedded in iframe containing angle brackets as attribute's value (e.g. # "<iframe style='position: absolute;<br />\ntop: 0; left: 0;' ...", see # https://github.com/ytdl-org/youtube-dl/issues/7059) 'url': 'http://www.pbs.org/food/features/a-chefs-life-season-3-episode-5-prickly-business/', 'md5': '59b0ef5009f9ac8a319cc5efebcd865e', 'info_dict': { 'id': '2365546844', 'display_id': 'a-chefs-life-season-3-episode-5-prickly-business', 'ext': 'mp4', 'title': "A Chef's Life - Season 3, Ep. 5: Prickly Business", 'description': 'md5:c0ff7475a4b70261c7e58f493c2792a5', 'duration': 1480, 'thumbnail': r're:^https?://.*\.jpg$', }, }, { # Frontline video embedded via flp2012.js 'url': 'http://www.pbs.org/wgbh/pages/frontline/the-atomic-artists', 'info_dict': { 'id': '2070868960', 'display_id': 'the-atomic-artists', 'ext': 'mp4', 'title': 'FRONTLINE - The Atomic Artists', 'description': 'md5:f677e4520cfacb4a5ce1471e31b57800', 'duration': 723, 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { 'skip_download': True, # requires ffmpeg }, }, { # Serves hd only via wigget/partnerplayer page 'url': 'http://www.pbs.org/video/2365641075/', 'md5': 'fdf907851eab57211dd589cf12006666', 'info_dict': { 'id': '2365641075', 'ext': 'mp4', 'title': 'FRONTLINE - Netanyahu at War', 'duration': 6852, 'thumbnail': r're:^https?://.*\.jpg$', 'formats': 'mincount:8', }, }, { # https://github.com/ytdl-org/youtube-dl/issues/13801 'url': 'https://www.pbs.org/video/pbs-newshour-full-episode-july-31-2017-1501539057/', 'info_dict': { 'id': '3003333873', 'ext': 'mp4', 'title': 'PBS NewsHour - full episode July 31, 2017', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'duration': 3265, 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.pbs.org/wgbh/roadshow/watch/episode/2105-indianapolis-hour-2/', 'info_dict': { 'id': '2365936247', 'ext': 'mp4', 'title': 'Antiques Roadshow - Indianapolis, Hour 2', 'description': 'md5:524b32249db55663e7231b6b8d1671a2', 'duration': 3180, 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { 'skip_download': True, }, 'expected_warnings': ['HTTP Error 403: Forbidden'], }, { 'url': 'https://www.pbs.org/wgbh/masterpiece/episodes/victoria-s2-e1/', 'info_dict': { 'id': '3007193718', 'ext': 'mp4', 'title': "Victoria - A Soldier's Daughter / The Green-Eyed Monster", 'description': 'md5:37efbac85e0c09b009586523ec143652', 'duration': 6292, 'thumbnail': r're:^https?://.*\.(?:jpg|JPG)$', }, 'params': { 'skip_download': True, }, 'expected_warnings': ['HTTP Error 403: Forbidden'], }, { 'url': 'https://player.pbs.org/partnerplayer/tOz9tM5ljOXQqIIWke53UA==/', 'info_dict': { 'id': '3011407934', 'ext': 'mp4', 'title': 'Stories from the Stage - Road Trip', 'duration': 1619, 'thumbnail': r're:^https?://.*\.(?:jpg|JPG)$', }, 'params': { 'skip_download': True, }, 'expected_warnings': ['HTTP Error 403: Forbidden'], }, { 'url': 'https://www.pbssocal.org/shows/newshour/clip/capehart-johnson-1715984001', 'info_dict': { 'id': '3091549094', 'ext': 'mp4', 'title': 'PBS NewsHour - Capehart and Johnson on the unusual Biden-Trump debate plans', 'description': 'Capehart and Johnson on how the Biden-Trump debates could shape the campaign season', 'display_id': 'capehart-johnson-1715984001', 'duration': 593, 'thumbnail': 'https://image.pbs.org/video-assets/mF3oSVn-asset-mezzanine-16x9-QeXjXPy.jpg', 'chapters': [], }, }, { 'url': 'http://player.pbs.org/widget/partnerplayer/2365297708/?start=0&end=0&chapterbar=false&endscreen=false&topbar=true', 'only_matching': True, }, { 'url': 'http://watch.knpb.org/video/2365616055/', 'only_matching': True, }, { 'url': 'https://player.pbs.org/portalplayer/3004638221/?uid=', 'only_matching': True, }, ] _ERRORS = { 101: 'We\'re sorry, but this video is not yet available.', 403: 'We\'re sorry, but this video is not available in your region due to right restrictions.', 404: 'We are experiencing technical difficulties that are preventing us from playing the video at this time. Please check back again soon.', 410: 'This video has expired and is no longer available for online streaming.', } def _real_initialize(self): cookie = (self._download_json( 'http://localization.services.pbs.org/localize/auto/cookie/', None, headers=self.geo_verification_headers(), fatal=False) or {}).get('cookie') if cookie: station = self._search_regex(r'#?s=\["([^"]+)"', cookie, 'station') if station: self._set_cookie('.pbs.org', 'pbsol.station', station) def _extract_webpage(self, url): mobj = self._match_valid_url(url) description = None presumptive_id = mobj.group('presumptive_id') display_id = presumptive_id if presumptive_id: webpage = self._download_webpage(url, display_id) description = strip_or_none(self._og_search_description( webpage, default=None) or self._html_search_meta( 'description', webpage, default=None)) upload_date = unified_strdate(self._search_regex( r'<input type="hidden" id="air_date_[0-9]+" value="([^"]+)"', webpage, 'upload date', default=None)) # tabbed frontline videos MULTI_PART_REGEXES = ( r'<div[^>]+class="videotab[^"]*"[^>]+vid="(\d+)"', r'<a[^>]+href=["\']#(?:video-|part)\d+["\'][^>]+data-cove[Ii]d=["\'](\d+)', ) for p in MULTI_PART_REGEXES: tabbed_videos = orderedSet(re.findall(p, webpage)) if tabbed_videos: return tabbed_videos, presumptive_id, upload_date, description MEDIA_ID_REGEXES = [ r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'", # frontline video embed r'class="coveplayerid">([^<]+)<', # coveplayer r'<section[^>]+data-coveid="(\d+)"', # coveplayer from http://www.pbs.org/wgbh/frontline/film/real-csi/ r'\sclass="passportcoveplayer"[^>]*\sdata-media="(\d+)', # https://www.thirteen.org/programs/the-woodwrights-shop/who-wrote-the-book-of-sloyd-fggvvq/ r'<input type="hidden" id="pbs_video_id_[0-9]+" value="([0-9]+)"/>', # jwplayer r"(?s)window\.PBS\.playerConfig\s*=\s*{.*?id\s*:\s*'([0-9]+)',", r'<div[^>]+\bdata-cove-id=["\'](\d+)"', # http://www.pbs.org/wgbh/roadshow/watch/episode/2105-indianapolis-hour-2/ r'<iframe[^>]+\bsrc=["\'](?:https?:)?//video\.pbs\.org/widget/partnerplayer/(\d+)', # https://www.pbs.org/wgbh/masterpiece/episodes/victoria-s2-e1/ r'\bhttps?://player\.pbs\.org/[\w-]+player/(\d+)', # last pattern to avoid false positives ] media_id = self._search_regex( MEDIA_ID_REGEXES, webpage, 'media ID', fatal=False, default=None) if media_id: return media_id, presumptive_id, upload_date, description # Frontline video embedded via flp video_id = self._search_regex( r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid', default=None) if video_id: # pkg_id calculation is reverse engineered from # http://www.pbs.org/wgbh/pages/frontline/js/flp2012.js prg_id = self._search_regex( r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid')[7:] if 'q' in prg_id: prg_id = prg_id.split('q')[1] prg_id = int(prg_id, 16) getdir = self._download_json( 'http://www.pbs.org/wgbh/pages/frontline/.json/getdir/getdir%d.json' % prg_id, presumptive_id, 'Downloading getdir JSON', transform_source=strip_jsonp) return getdir['mid'], presumptive_id, upload_date, description for iframe in re.findall(r'(?s)<iframe(.+?)></iframe>', webpage): url = self._search_regex( r'src=(["\'])(?P<url>.+?partnerplayer.+?)\1', iframe, 'player URL', default=None, group='url') if url: break if not url: url = self._og_search_url(webpage) mobj = re.match( self._VALID_URL, self._proto_relative_url(url.strip())) player_id = mobj.group('player_id') if not display_id: display_id = player_id if player_id: player_page = self._download_webpage( url, display_id, note='Downloading player page', errnote='Could not download player page') video_id = self._search_regex( r'<div\s+id=["\']video_(\d+)', player_page, 'video ID', default=None) if not video_id: video_info = self._extract_video_data( player_page, 'video data', display_id) video_id = str( video_info.get('id') or video_info['contentID']) else: video_id = mobj.group('id') display_id = video_id return video_id, display_id, None, description def _extract_video_data(self, string, name, video_id, fatal=True): return self._parse_json( self._search_regex( [r'(?s)PBS\.videoData\s*=\s*({.+?});\n', r'window\.videoBridge\s*=\s*({.+?});'], string, name, default='{}'), video_id, transform_source=js_to_json, fatal=fatal) def _real_extract(self, url): video_id, display_id, upload_date, description = self._extract_webpage(url) if isinstance(video_id, list): entries = [self.url_result( f'http://video.pbs.org/video/{vid_id}', 'PBS', vid_id) for vid_id in video_id] return self.playlist_result(entries, display_id) info = {} redirects = [] redirect_urls = set() def extract_redirect_urls(info): for encoding_name in ('recommended_encoding', 'alternate_encoding'): redirect = info.get(encoding_name) if not redirect: continue redirect_url = redirect.get('url') if redirect_url and redirect_url not in redirect_urls: redirects.append(redirect) redirect_urls.add(redirect_url) encodings = info.get('encodings') if isinstance(encodings, list): for encoding in encodings: encoding_url = url_or_none(encoding) if encoding_url and encoding_url not in redirect_urls: redirects.append({'url': encoding_url}) redirect_urls.add(encoding_url) chapters = [] # Player pages may also serve different qualities for page in ('widget/partnerplayer', 'portalplayer'): player = self._download_webpage( f'http://player.pbs.org/{page}/{video_id}', display_id, f'Downloading {page} page', fatal=False) if player: video_info = self._extract_video_data( player, f'{page} video data', display_id, fatal=False)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/voxmedia.py
yt_dlp/extractor/voxmedia.py
import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, try_get, unified_timestamp, ) class VoxMediaVolumeIE(InfoExtractor): _VALID_URL = r'https?://volume\.vox-cdn\.com/embed/(?P<id>[0-9a-f]{9})' def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) setup = self._parse_json(self._search_regex( r'setup\s*=\s*({.+});', webpage, 'setup'), video_id) player_setup = setup.get('player_setup') or setup video_data = player_setup.get('video') or {} formatted_metadata = video_data.get('formatted_metadata') or {} info = { 'id': video_id, 'title': player_setup.get('title') or video_data.get('title_short'), 'description': video_data.get('description_long') or video_data.get('description_short'), 'thumbnail': formatted_metadata.get('thumbnail') or video_data.get('brightcove_thumbnail'), 'timestamp': unified_timestamp(formatted_metadata.get('video_publish_date')), } asset = try_get(setup, lambda x: x['embed_assets']['chorus'], dict) or {} formats = [] hls_url = asset.get('hls_url') if hls_url: formats.extend(self._extract_m3u8_formats( hls_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) mp4_url = asset.get('mp4_url') if mp4_url: tbr = self._search_regex(r'-(\d+)k\.', mp4_url, 'bitrate', default=None) format_id = 'http' if tbr: format_id += '-' + tbr formats.append({ 'format_id': format_id, 'url': mp4_url, 'tbr': int_or_none(tbr), }) if formats: info['formats'] = formats info['duration'] = int_or_none(asset.get('duration')) return info for provider_video_type in ('youtube', 'brightcove'): provider_video_id = video_data.get(f'{provider_video_type}_id') if not provider_video_id: continue if provider_video_type == 'brightcove': # TODO: Find embed example or confirm that Vox has stopped using Brightcove raise ExtractorError('Vox Brightcove embeds are currently unsupported') else: info.update({ '_type': 'url_transparent', 'url': provider_video_id if provider_video_type == 'youtube' else f'{provider_video_type}:{provider_video_id}', 'ie_key': provider_video_type.capitalize(), }) return info raise ExtractorError('Unable to find provider video id') class VoxMediaIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:(?:theverge|vox|sbnation|eater|polygon|curbed|racked|funnyordie)\.com|recode\.net)/(?:[^/]+/)*(?P<id>[^/?]+)' _EMBED_REGEX = [r'<iframe[^>]+?src="(?P<url>https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"'] _TESTS = [{ # FIXME: Unsupported iframe embed # Volume embed, Youtube 'url': 'http://www.theverge.com/2014/6/27/5849272/material-world-how-google-discovered-what-software-is-made-of', 'info_dict': { 'id': 'j4mLW6x17VM', 'ext': 'mp4', 'title': 'Material world: how Google discovered what software is made of', 'description': 'md5:dfc17e7715e3b542d66e33a109861382', 'upload_date': '20190710', 'uploader_id': 'TheVerge', 'uploader': 'The Verge', }, 'add_ie': ['Youtube'], }, { # Volume embed, Youtube 'url': 'http://www.theverge.com/2014/10/21/7025853/google-nexus-6-hands-on-photos-video-android-phablet', 'md5': 'fd19aa0cf3a0eea515d4fd5c8c0e9d68', 'info_dict': { 'id': 'Gy8Md3Eky38', 'ext': 'mp4', 'title': 'The Nexus 6: hands-on with Google\'s phablet', 'description': 'md5:d9f0216e5fb932dd2033d6db37ac3f1d', 'uploader_id': 'TheVerge', 'upload_date': '20141021', 'uploader': 'The Verge', 'timestamp': 1413907200, }, 'add_ie': ['Youtube'], 'skip': 'similar to the previous test', }, { # Volume embed, Youtube 'url': 'http://www.vox.com/2016/3/31/11336640/mississippi-lgbt-religious-freedom-bill', 'info_dict': { 'id': '22986359b', 'ext': 'mp4', 'title': "Mississippi's laws are so bad that its anti-LGBTQ law isn't needed to allow discrimination", 'description': 'md5:fc1317922057de31cd74bce91eb1c66c', 'upload_date': '20150915', 'timestamp': 1442332800, 'duration': 285, }, 'add_ie': ['Youtube'], 'skip': 'similar to the previous test', }, { # youtube embed 'url': 'http://www.vox.com/2016/3/24/11291692/robot-dance', 'md5': '83b3080489fb103941e549352d3e0977', 'info_dict': { 'id': 'FcNHTJU1ufM', 'ext': 'mp4', 'title': 'How "the robot" became the greatest novelty dance of all time', 'description': 'md5:b081c0d588b8b2085870cda55e6da176', 'upload_date': '20160324', 'uploader_id': 'voxdotcom', 'uploader': 'Vox', }, 'add_ie': ['Youtube'], 'skip': 'Page no longer contain videos', }, { # SBN.VideoLinkset.entryGroup multiple ooyala embeds 'url': 'http://www.sbnation.com/college-football-recruiting/2015/2/3/7970291/national-signing-day-rationalizations-itll-be-ok-itll-be-ok', 'info_dict': { 'id': 'national-signing-day-rationalizations-itll-be-ok-itll-be-ok', 'title': '25 lies you will tell yourself on National Signing Day', 'description': 'It\'s the most self-delusional time of the year, and everyone\'s gonna tell the same lies together!', }, 'playlist': [{ 'md5': '721fededf2ab74ae4176c8c8cbfe092e', 'info_dict': { 'id': 'p3cThlMjE61VDi_SD9JlIteSNPWVDBB9', 'ext': 'mp4', 'title': 'Buddy Hield vs Steph Curry (and the world)', 'description': 'Let’s dissect only the most important Final Four storylines.', }, }, { 'md5': 'bf0c5cc115636af028be1bab79217ea9', 'info_dict': { 'id': 'BmbmVjMjE6esPHxdALGubTrouQ0jYLHj', 'ext': 'mp4', 'title': 'Chasing Cinderella 2016: Syracuse basketball', 'description': 'md5:e02d56b026d51aa32c010676765a690d', }, }], 'skip': 'Page no longer contain videos', }] _WEBPAGE_TESTS = [{ 'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns', 'info_dict': { 'id': '18e820ec3f', 'ext': 'mp4', 'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama', }, 'skip': 'Invalid URL', }] def _real_extract(self, url): display_id = self._match_id(url) webpage = urllib.parse.unquote(self._download_webpage(url, display_id)) def create_entry(provider_video_id, provider_video_type, title=None, description=None): video_url = { 'youtube': '%s', 'volume': 'http://volume.vox-cdn.com/embed/%s', }[provider_video_type] % provider_video_id return { '_type': 'url_transparent', 'url': video_url, 'title': title or self._og_search_title(webpage), 'description': description or self._og_search_description(webpage), } entries = [] entries_data = self._search_regex([ r'Chorus\.VideoContext\.addVideo\((\[{.+}\])\);', r'var\s+entry\s*=\s*({.+});', r'SBN\.VideoLinkset\.entryGroup\(\s*(\[.+\])', ], webpage, 'video data', default=None) if entries_data: entries_data = self._parse_json(entries_data, display_id) if isinstance(entries_data, dict): entries_data = [entries_data] for video_data in entries_data: provider_video_id = video_data.get('provider_video_id') provider_video_type = video_data.get('provider_video_type') if provider_video_id and provider_video_type: entries.append(create_entry( provider_video_id, provider_video_type, video_data.get('title'), video_data.get('description'))) volume_uuid = self._search_regex( r'data-volume-uuid="([^"]+)"', webpage, 'volume uuid', default=None) if volume_uuid: entries.append(create_entry(volume_uuid, 'volume')) if len(entries) == 1: return entries[0] else: return self.playlist_result(entries, display_id, self._og_search_title(webpage), self._og_search_description(webpage))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/elpais.py
yt_dlp/extractor/elpais.py
from .common import InfoExtractor from ..utils import strip_jsonp, unified_strdate class ElPaisIE(InfoExtractor): _VALID_URL = r'https?://(?:[^.]+\.)?elpais\.com/.*/(?P<id>[^/#?]+)\.html(?:$|[?#])' IE_DESC = 'El País' _TESTS = [{ 'url': 'http://blogs.elpais.com/la-voz-de-inaki/2014/02/tiempo-nuevo-recetas-viejas.html', 'md5': '98406f301f19562170ec071b83433d55', 'info_dict': { 'id': 'tiempo-nuevo-recetas-viejas', 'ext': 'mp4', 'title': 'Tiempo nuevo, recetas viejas', 'description': 'De lunes a viernes, a partir de las ocho de la mañana, Iñaki Gabilondo nos cuenta su visión de la actualidad nacional e internacional.', 'upload_date': '20140206', }, }, { 'url': 'http://elcomidista.elpais.com/elcomidista/2016/02/24/articulo/1456340311_668921.html#?id_externo_nwl=newsletter_diaria20160303t', 'md5': '3bd5b09509f3519d7d9e763179b013de', 'info_dict': { 'id': '1456340311_668921', 'ext': 'mp4', 'title': 'Cómo hacer el mejor café con cafetera italiana', 'description': 'Que sí, que las cápsulas son cómodas. Pero si le pides algo más a la vida, quizá deberías aprender a usar bien la cafetera italiana. No tienes más que ver este vídeo y seguir sus siete normas básicas.', 'upload_date': '20160303', }, }, { 'url': 'http://elpais.com/elpais/2017/01/26/ciencia/1485456786_417876.html', 'md5': '9c79923a118a067e1a45789e1e0b0f9c', 'info_dict': { 'id': '1485456786_417876', 'ext': 'mp4', 'title': 'Hallado un barco de la antigua Roma que naufragó en Baleares hace 1.800 años', 'description': 'La nave portaba cientos de ánforas y se hundió cerca de la isla de Cabrera por razones desconocidas', 'upload_date': '20170127', }, }, { 'url': 'http://epv.elpais.com/epv/2017/02/14/programa_la_voz_de_inaki/1487062137_075943.html', 'info_dict': { 'id': '1487062137_075943', 'ext': 'mp4', 'title': 'Disyuntivas', 'description': 'md5:a0fb1485c4a6a8a917e6f93878e66218', 'upload_date': '20170214', }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) prefix = self._html_search_regex( r'var\s+url_cache\s*=\s*"([^"]+)";', webpage, 'URL prefix') id_multimedia = self._search_regex( r"id_multimedia\s*=\s*'([^']+)'", webpage, 'ID multimedia', default=None) if id_multimedia: url_info = self._download_json( 'http://elpais.com/vdpep/1/?pepid=' + id_multimedia, video_id, transform_source=strip_jsonp) video_suffix = url_info['mp4'] else: video_suffix = self._search_regex( r"(?:URLMediaFile|urlVideo_\d+)\s*=\s*url_cache\s*\+\s*'([^']+)'", webpage, 'video URL') video_url = prefix + video_suffix thumbnail_suffix = self._search_regex( r"(?:URLMediaStill|urlFotogramaFijo_\d+)\s*=\s*url_cache\s*\+\s*'([^']+)'", webpage, 'thumbnail URL', default=None) thumbnail = ( None if thumbnail_suffix is None else prefix + thumbnail_suffix) or self._og_search_thumbnail(webpage) title = self._html_search_regex( (r"tituloVideo\s*=\s*'([^']+)'", r'<h2 class="entry-header entry-title.*?>(.*?)</h2>', r'<h1[^>]+class="titulo"[^>]*>([^<]+)'), webpage, 'title', default=None) or self._og_search_title(webpage) upload_date = unified_strdate(self._search_regex( r'<p class="date-header date-int updated"\s+title="([^"]+)">', webpage, 'upload date', default=None) or self._html_search_meta( 'datePublished', webpage, 'timestamp')) return { 'id': video_id, 'url': video_url, 'title': title, 'description': self._og_search_description(webpage), 'thumbnail': thumbnail, 'upload_date': upload_date, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sejmpl.py
yt_dlp/extractor/sejmpl.py
import datetime as dt from .common import InfoExtractor from .redge import RedCDNLivxIE from ..utils import ( clean_html, join_nonempty, js_to_json, strip_or_none, update_url_query, ) from ..utils.traversal import traverse_obj def is_dst(date): last_march = dt.datetime(date.year, 3, 31) last_october = dt.datetime(date.year, 10, 31) last_sunday_march = last_march - dt.timedelta(days=last_march.isoweekday() % 7) last_sunday_october = last_october - dt.timedelta(days=last_october.isoweekday() % 7) return last_sunday_march.replace(hour=2) <= date <= last_sunday_october.replace(hour=3) def rfc3339_to_atende(date): date = dt.datetime.fromisoformat(date) date = date + dt.timedelta(hours=1 if is_dst(date) else 0) return int((date.timestamp() - 978307200) * 1000) class SejmIE(InfoExtractor): _VALID_URL = ( r'https?://(?:www\.)?sejm\.gov\.pl/[Ss]ejm(?P<term>\d+)\.nsf/transmisje(?:_arch)?\.xsp(?:\?[^#]*)?#(?P<id>[\dA-F]+)', r'https?://(?:www\.)?sejm\.gov\.pl/[Ss]ejm(?P<term>\d+)\.nsf/transmisje(?:_arch)?\.xsp\?(?:[^#]+&)?unid=(?P<id>[\dA-F]+)', r'https?://sejm-embed\.redcdn\.pl/[Ss]ejm(?P<term>\d+)\.nsf/VideoFrame\.xsp/(?P<id>[\dA-F]+)', ) IE_NAME = 'sejm' _TESTS = [{ # multiple cameras, polish SL iterpreter 'url': 'https://www.sejm.gov.pl/Sejm10.nsf/transmisje_arch.xsp#6181EF1AD9CEEBB5C1258A6D006452B5', 'info_dict': { 'id': '6181EF1AD9CEEBB5C1258A6D006452B5', 'title': '1. posiedzenie Sejmu X kadencji', 'duration': 20145, 'live_status': 'was_live', 'location': 'Sala Posiedzeń', }, 'playlist': [{ 'info_dict': { 'id': 'ENC01-722340000000-722360145000', 'ext': 'mp4', 'duration': 20145, 'title': '1. posiedzenie Sejmu X kadencji - ENC01', 'live_status': 'was_live', }, }, { 'info_dict': { 'id': 'ENC30-722340000000-722360145000', 'ext': 'mp4', 'duration': 20145, 'title': '1. posiedzenie Sejmu X kadencji - ENC30', 'live_status': 'was_live', }, }, { 'info_dict': { 'id': 'ENC31-722340000000-722360145000', 'ext': 'mp4', 'duration': 20145, 'title': '1. posiedzenie Sejmu X kadencji - ENC31', 'live_status': 'was_live', }, }, { 'info_dict': { 'id': 'ENC32-722340000000-722360145000', 'ext': 'mp4', 'duration': 20145, 'title': '1. posiedzenie Sejmu X kadencji - ENC32', 'live_status': 'was_live', }, }, { # sign lang interpreter 'info_dict': { 'id': 'Migacz-ENC01-1-722340000000-722360145000', 'ext': 'mp4', 'duration': 20145, 'title': '1. posiedzenie Sejmu X kadencji - Migacz-ENC01', 'live_status': 'was_live', }, }], }, { 'url': 'https://www.sejm.gov.pl/Sejm8.nsf/transmisje.xsp?unid=9377A9D65518E9A5C125808E002E9FF2', 'info_dict': { 'id': '9377A9D65518E9A5C125808E002E9FF2', 'title': 'Debata "Lepsza Polska: obywatelska"', 'description': 'KP .Nowoczesna', 'duration': 8770, 'live_status': 'was_live', 'location': 'sala kolumnowa im. Kazimierza Pużaka (bud. C-D)', }, 'playlist': [{ 'info_dict': { 'id': 'ENC08-1-503831270000-503840040000', 'ext': 'mp4', 'duration': 8770, 'title': 'Debata "Lepsza Polska: obywatelska" - ENC08', 'live_status': 'was_live', }, }], }, { # 7th term is very special, since it does not use redcdn livx 'url': 'https://www.sejm.gov.pl/sejm7.nsf/transmisje_arch.xsp?rok=2015&month=11#A6E6D475ECCC6FE5C1257EF90034817F', 'info_dict': { 'id': 'A6E6D475ECCC6FE5C1257EF90034817F', 'title': 'Konferencja prasowa - Stanowisko SLD ws. składu nowego rządu', 'description': 'SLD - Biuro Prasowe Klubu', 'duration': 514, 'location': 'sala 101/bud. C', 'live_status': 'was_live', }, 'playlist': [{ 'info_dict': { 'id': 'A6E6D475ECCC6FE5C1257EF90034817F', 'ext': 'mp4', 'title': 'Konferencja prasowa - Stanowisko SLD ws. składu nowego rządu', 'duration': 514, }, }], }, { 'url': 'https://sejm-embed.redcdn.pl/Sejm10.nsf/VideoFrame.xsp/FED58EABB97FBD53C1258A7400386492', 'only_matching': True, }] def _real_extract(self, url): term, video_id = self._match_valid_url(url).group('term', 'id') frame = self._download_webpage( f'https://sejm-embed.redcdn.pl/Sejm{term}.nsf/VideoFrame.xsp/{video_id}', video_id) # despite it says "transmisje_arch", it works for live streams too! data = self._download_json( f'https://www.sejm.gov.pl/Sejm{term}.nsf/transmisje_arch.xsp/json/{video_id}', video_id) params = data['params'] title = strip_or_none(data.get('title')) if data.get('status') == 'VIDEO_ENDED': live_status = 'was_live' elif data.get('status') == 'VIDEO_PLAYING': live_status = 'is_live' else: live_status = None self.report_warning(f'unknown status: {data.get("status")}') start_time = rfc3339_to_atende(params['start']) # current streams have a stop time of *expected* end of session, but actual times # can change during the transmission. setting a stop_time would artificially # end the stream at that time, while the session actually keeps going. if live_status == 'was_live': stop_time = rfc3339_to_atende(params['stop']) duration = (stop_time - start_time) // 1000 else: stop_time, duration = None, None entries = [] def add_entry(file, legacy_file=False): if not file: return file = self._proto_relative_url(file) if not legacy_file: file = update_url_query(file, {'startTime': start_time}) if stop_time is not None: file = update_url_query(file, {'stopTime': stop_time}) stream_id = self._search_regex(r'/o2/sejm/([^/]+)/[^./]+\.livx', file, 'stream id') common_info = { 'url': file, 'duration': duration, } if legacy_file: entries.append({ **common_info, 'id': video_id, 'title': title, }) else: entries.append({ **common_info, '_type': 'url_transparent', 'ie_key': RedCDNLivxIE.ie_key(), 'id': stream_id, 'title': join_nonempty(title, stream_id, delim=' - '), }) cameras = self._search_json( r'var\s+cameras\s*=', frame, 'camera list', video_id, contains_pattern=r'\[(?s:.+)\]', transform_source=js_to_json, fatal=False) or [] for camera_file in traverse_obj(cameras, (..., 'file', {dict})): if camera_file.get('flv'): add_entry(camera_file['flv']) elif camera_file.get('mp4'): # this is only a thing in 7th term. no streams before, and starting 8th it's redcdn livx add_entry(camera_file['mp4'], legacy_file=True) else: self.report_warning('Unknown camera stream type found') if params.get('mig'): add_entry(self._search_regex(r"var sliUrl\s*=\s*'([^']+)'", frame, 'sign language interpreter url', fatal=False)) return { '_type': 'playlist', 'entries': entries, 'id': video_id, 'title': title, 'description': clean_html(data.get('desc')) or None, 'duration': duration, 'live_status': live_status, 'location': strip_or_none(data.get('location')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/go.py
yt_dlp/extractor/go.py
import re from .adobepass import AdobePassIE from ..utils import ( ExtractorError, determine_ext, int_or_none, join_nonempty, parse_age_limit, unified_timestamp, urlencode_postdata, ) from ..utils.traversal import traverse_obj class GoIE(AdobePassIE): _SITE_INFO = { 'abc': { 'brand': '001', 'requestor_id': 'dtci', 'provider_id': 'ABC', 'software_statement': 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiI4OTcwMjlkYS0yYjM1LTQyOWUtYWQ0NS02ZjZiZjVkZTdhOTUiLCJuYmYiOjE2MjAxNzM5NjksImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNjIwMTczOTY5fQ.SC69DVJWSL8sIe-vVUrP6xS_kzHKqwz9PdKYexs_y-f7Vin6mM-7S-W1TE_-K55O0pyf-TL4xYgvm6LIye8CckG-nZfVwNPV4huduov0jmIcxCQFeUwkHULG2IaA44wfBVUBdaHgkhPweZ2amjycO_IXtez-gBXOLbE3B7Gx9j_5ISCFtyVUblThKfoGyQv6KT6t8Vpmc4ZSKCCQp74KWFFypydb9ucego1taW_nQD06Cdf4yByLd6NaTBceMcIKbug9b9gxFm3XBgJ5q3z7KGo1Kr6XalAV5j4m-fQ91wczlTilX8FM4AljMupyRM9mA_aEADILQ4hS79q4SM0w6w', }, 'freeform': { 'brand': '002', 'requestor_id': 'ABCFamily', 'provider_id': 'ABCFamily', 'software_statement': 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiJhZWM2MGYyNC0xYzRjLTQ1NzQtYjc0Zi03ZmM4N2E5YWMzMzgiLCJuYmYiOjE1ODc2NjU5MjMsImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNTg3NjY1OTIzfQ.flCn3dhvmvPnWmV0JV8Fm0YFyj07yPez9-n1GFEwVIm_S2wQVWbWyJhqsAyLZVFrhOMZYTqmPS3OHxGwTwXkEYn6PD7o_vIVG3oqi-Xn1m5jRt_Gazw5qEtpat6VE7bvKGSD3ZhcidOrsCk8NcYyq75u61NHDvSl81pcedJjVRVUpsqrEwmo0aVbA0C8PX3ri0mEbGvkMKvHn8E60xp-PSE-VK8SDT0plwPu_TwUszkZ6-_I8_2xcv_WBqcXFkAVg7Q-iNJXgQvmNsrpcrYuLvi6hEH4ZLtoDcXU6MhwTQAJTiHSo8x9aHX1_qFP09CzlNOFQbC2ZEJdP9SvA53SLQ', }, 'disneynow': { 'brand': '011', # also: '004', '008', '009' 'requestor_id': 'DisneyChannels', 'provider_id': 'DisneyChannels', 'software_statement': 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiI1MzAzNTRiOS04NDNiLTRkNjAtYTQ3ZS0yNzk1MzlkOTIyNTciLCJuYmYiOjE1NTg5ODc0NDksImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNTU4OTg3NDQ5fQ.Jud6YS6-J2h0h6po0oMheDym0qRTJQGj4kzacrz4DFuEwhcBkkykW6pF5pKuAUJy9HCZ40oDAHe2KcTlDJjCZF5tDaUEfdihakZ9cC_rG7MU-QoRne8qaB_dPDKwGuk-ZyWD8eV3zwTJmbGo8hDxYTEU81YNCxwhyc_BPDr5TYiubbmpP3_pTnXmSpuL58isJ2peSKWlX9BacuXtBY25c_QnPFKk-_EETm7IHkTpDazde1QfHWGu4s4yJpKGk8RVVujVG6h6ELlL-ZeYLilBm7iS7h1TYG1u7fJhyZRL7isaom6NvAzsvN3ngss1fLwt8decP8wzdFHrbYTdTjW8qw', 'resource_id': 'Disney', }, 'fxnetworks': { 'brand': '025', # also: '020' 'requestor_id': 'dtci', 'provider_id': 'fx', # also 'fxx', 'fxm' 'software_statement': 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiIzYWRhYWZiNC02OTAxLTRlYzktOTdmNy1lYWZkZTJkODJkN2EiLCJuYmYiOjE1NjIwMjQwNzYsImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNTYyMDI0MDc2fQ.dhKMpZK50AObbZYrMiYPSfWtzXHUaeMP3jrIY4Cgfvh0GaEgk0Mns_zp78jypFeZgRtPVleQMQDNq2YEloRLcAGqP1aa6WVDglnK77ZWUm4IKai14Rwf3A6YBhSRoO2_lMmUGkuTf6gZY-kMIPqBYKqzTQiQl4HbniPFodIzFRiuI9QJVrkoyTGrJL4oqiX08PoFI3Z-TOti1Heu3EbFC-GveQHhlinYrzU7rbiAqLEz7FImtfBDsnXX1Y3uJDLYM3Bq4Oh0nrzTv1Fd62wNsCNErHHIbELidh1zZF0ujvt7ReuZUwAitm0UhEJ7OxNOUbEQWtae6pVNscvdvTFMpg', }, 'nationalgeographic': { 'brand': '026', # also '023' 'requestor_id': 'dtci', 'provider_id': 'ngc', # also 'ngw' 'software_statement': 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiIxMzE4YTM1Ni05Mjc4LTQ4NjEtYTFmNi1jMTIzMzg1ZWMzYzMiLCJuYmYiOjE1NjIwMjM4MjgsImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNTYyMDIzODI4fQ.Le-2OzF9-jrhJ7ZfWtLWk5iSHGVZoxeU1w0_fO--Heli0OwRZsRq2slSmx-oZTzxuWmAgDEiBkWSDcDK6sM25DrCLsdsJa3MBuZ-slBRtH8aq3HpNoqqLkU-vg6gRUEKMtwBUtwCu_9aKUCayYtndWv4b1DjVQeSrteOW5NNudWVYleAe0kxeNJQHo5If9SCzDudKVJktFUjhNks4QPOC_uONPkRRlL9D0fNvtOY-LRFckfcHhf5z9l1iZjeukV0YhdKnuw1wyiaWrQXBUDiBfbkCRd2DM-KnelqPxfiXCaTjGKDURRBO3pz33ebge3IFXSiU5vl4qHQ8xvunzGpFw', }, } _URL_PATH_RE = r'(?:video|episode|movies-and-specials)/(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})' _VALID_URL = [ fr'https?://(?:www\.)?(?P<site>abc)\.com/{_URL_PATH_RE}', fr'https?://(?:www\.)?(?P<site>freeform)\.com/{_URL_PATH_RE}', fr'https?://(?:www\.)?(?P<site>disneynow)\.com/{_URL_PATH_RE}', fr'https?://fxnow\.(?P<site>fxnetworks)\.com/{_URL_PATH_RE}', fr'https?://(?:www\.)?(?P<site>nationalgeographic)\.com/tv/{_URL_PATH_RE}', ] _TESTS = [{ 'url': 'https://abc.com/episode/4192c0e6-26e5-47a8-817b-ce8272b9e440/playlist/PL551127435', 'info_dict': { 'id': 'VDKA10805898', 'ext': 'mp4', 'title': 'Switch the Flip', 'description': 'To help get Brian’s life in order, Stewie and Brian swap bodies using a machine that Stewie invents.', 'age_limit': 14, 'duration': 1297, 'thumbnail': r're:https?://.+/.+\.jpg', 'series': 'Family Guy', 'season': 'Season 16', 'season_number': 16, 'episode': 'Episode 17', 'episode_number': 17, 'timestamp': 1746082800.0, 'upload_date': '20250501', }, 'params': {'skip_download': 'm3u8'}, 'skip': 'This video requires AdobePass MSO credentials', }, { 'url': 'https://disneynow.com/episode/21029660-ba06-4406-adb0-a9a78f6e265e/playlist/PL553044961', 'info_dict': { 'id': 'VDKA39546942', 'ext': 'mp4', 'title': 'Zero Friends Again', 'description': 'Relationships fray under the pressures of a difficult journey.', 'age_limit': 0, 'duration': 1721, 'thumbnail': r're:https?://.+/.+\.jpg', 'series': 'Star Wars: Skeleton Crew', 'season': 'Season 1', 'season_number': 1, 'episode': 'Episode 6', 'episode_number': 6, 'timestamp': 1746946800.0, 'upload_date': '20250511', }, 'params': {'skip_download': 'm3u8'}, 'skip': 'This video requires AdobePass MSO credentials', }, { 'url': 'https://fxnow.fxnetworks.com/episode/09f4fa6f-c293-469e-aebe-32c9ca5842a7/playlist/PL554408064', 'info_dict': { 'id': 'VDKA38112033', 'ext': 'mp4', 'title': 'The Return of Jerry', 'description': 'The vampires’ long-lost fifth roommate returns. Written by Paul Simms; directed by Kyle Newacheck.', 'age_limit': 17, 'duration': 1493, 'thumbnail': r're:https?://.+/.+\.jpg', 'series': 'What We Do in the Shadows', 'season': 'Season 6', 'season_number': 6, 'episode': 'Episode 1', 'episode_number': 1, 'timestamp': 1729573200.0, 'upload_date': '20241022', }, 'params': {'skip_download': 'm3u8'}, 'skip': 'This video requires AdobePass MSO credentials', }, { 'url': 'https://www.freeform.com/episode/bda0eaf7-761a-4838-aa44-96f794000844/playlist/PL553044961', 'info_dict': { 'id': 'VDKA39007340', 'ext': 'mp4', 'title': 'Angel\'s Landing', 'description': 'md5:91bf084e785c968fab16734df7313446', 'age_limit': 14, 'duration': 2523, 'thumbnail': r're:https?://.+/.+\.jpg', 'series': 'How I Escaped My Cult', 'season': 'Season 1', 'season_number': 1, 'episode': 'Episode 2', 'episode_number': 2, 'timestamp': 1740038400.0, 'upload_date': '20250220', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.nationalgeographic.com/tv/episode/ca694661-1186-41ae-8089-82f64d69b16d/playlist/PL554408064', 'info_dict': { 'id': 'VDKA39492078', 'ext': 'mp4', 'title': 'Heart of the Emperors', 'description': 'md5:4fc50a2878f030bb3a7eac9124dca677', 'age_limit': 0, 'duration': 2775, 'thumbnail': r're:https?://.+/.+\.jpg', 'series': 'Secrets of the Penguins', 'season': 'Season 1', 'season_number': 1, 'episode': 'Episode 1', 'episode_number': 1, 'timestamp': 1745204400.0, 'upload_date': '20250421', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.freeform.com/movies-and-specials/c38281fc-9f8f-47c7-8220-22394f9df2e1', 'only_matching': True, }, { 'url': 'https://abc.com/video/219a454a-172c-41bf-878a-d169e6bc0bdc/playlist/PL5523098420', 'only_matching': True, }] def _extract_videos(self, brand, video_id='-1', show_id='-1'): display_id = video_id if video_id != '-1' else show_id return self._download_json( f'http://api.contents.watchabc.go.com/vp2/ws/contents/3000/videos/{brand}/001/-1/{show_id}/-1/{video_id}/-1/-1.json', display_id)['video'] def _extract_global_var(self, name, webpage, video_id): return self._search_json( fr'window\[["\']{re.escape(name)}["\']\]\s*=', webpage, f'{name.strip("_")} JSON', video_id) def _real_extract(self, url): site, display_id = self._match_valid_url(url).group('site', 'id') webpage = self._download_webpage(url, display_id) config = self._extract_global_var('__CONFIG__', webpage, display_id) data = self._extract_global_var(config['globalVar'], webpage, display_id) video_id = traverse_obj(data, ( 'page', 'content', 'video', 'layout', (('video', 'id'), 'videoid'), {str}, any)) if not video_id: video_id = self._search_regex([ # data-track-video_id="VDKA39492078" # data-track-video_id_code="vdka39492078" # data-video-id="'VDKA3609139'" r'data-(?:track-)?video[_-]id(?:_code)?=["\']*((?:vdka|VDKA)\d+)', # page.analytics.videoIdCode r'\bvideoIdCode["\']\s*:\s*["\']((?:vdka|VDKA)\d+)'], webpage, 'video ID') site_info = self._SITE_INFO[site] brand = site_info['brand'] video_data = self._extract_videos(brand, video_id)[0] video_id = video_data['id'] title = video_data['title'] formats = [] subtitles = {} for asset in video_data.get('assets', {}).get('asset', []): asset_url = asset.get('value') if not asset_url: continue format_id = asset.get('format') ext = determine_ext(asset_url) if ext == 'm3u8': video_type = video_data.get('type') data = { 'video_id': video_id, 'video_type': video_type, 'brand': brand, 'device': '001', 'app_name': 'webplayer-abc', } if video_data.get('accesslevel') == '1': provider_id = site_info['provider_id'] software_statement = traverse_obj(data, ('app', 'config', ( ('features', 'auth', 'softwareStatement'), ('tvAuth', 'SOFTWARE_STATEMENTS', 'PRODUCTION'), ), {str}, any)) or site_info['software_statement'] resource = site_info.get('resource_id') or self._get_mvpd_resource( provider_id, title, video_id, None) auth = self._extract_mvpd_auth( url, video_id, site_info['requestor_id'], resource, software_statement) data.update({ 'token': auth, 'token_type': 'ap', 'adobe_requestor_id': provider_id, }) else: self._initialize_geo_bypass({'countries': ['US']}) entitlement = self._download_json( 'https://prod.gatekeeper.us-abc.symphony.edgedatg.go.com/vp2/ws-secure/entitlement/2020/playmanifest_secure.json', video_id, data=urlencode_postdata(data)) errors = entitlement.get('errors', {}).get('errors', []) if errors: for error in errors: if error.get('code') == 1002: self.raise_geo_restricted( error['message'], countries=['US']) error_message = ', '.join([error['message'] for error in errors]) raise ExtractorError(f'{self.IE_NAME} said: {error_message}', expected=True) asset_url += '?' + entitlement['entitlement']['uplynkData']['sessionKey'] fmts, subs = self._extract_m3u8_formats_and_subtitles( asset_url, video_id, 'mp4', m3u8_id=format_id or 'hls', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) else: f = { 'format_id': format_id, 'url': asset_url, 'ext': ext, } if re.search(r'(?:/mp4/source/|_source\.mp4)', asset_url): f.update({ 'format_id': (f'{format_id}-' if format_id else '') + 'SOURCE', 'quality': 1, }) else: mobj = re.search(r'/(\d+)x(\d+)/', asset_url) if mobj: height = int(mobj.group(2)) f.update({ 'format_id': join_nonempty(format_id, f'{height}P'), 'width': int(mobj.group(1)), 'height': height, }) formats.append(f) for cc in video_data.get('closedcaption', {}).get('src', []): cc_url = cc.get('value') if not cc_url: continue ext = determine_ext(cc_url) if ext == 'xml': ext = 'ttml' subtitles.setdefault(cc.get('lang'), []).append({ 'url': cc_url, 'ext': ext, }) thumbnails = [] for thumbnail in video_data.get('thumbnails', {}).get('thumbnail', []): thumbnail_url = thumbnail.get('value') if not thumbnail_url: continue thumbnails.append({ 'url': thumbnail_url, 'width': int_or_none(thumbnail.get('width')), 'height': int_or_none(thumbnail.get('height')), }) return { 'id': video_id, 'title': title, 'description': video_data.get('longdescription') or video_data.get('description'), 'duration': int_or_none(video_data.get('duration', {}).get('value'), 1000), 'age_limit': parse_age_limit(video_data.get('tvrating', {}).get('rating')), 'episode_number': int_or_none(video_data.get('episodenumber')), 'series': video_data.get('show', {}).get('title'), 'season_number': int_or_none(video_data.get('season', {}).get('num')), 'thumbnails': thumbnails, 'formats': formats, 'subtitles': subtitles, 'timestamp': unified_timestamp(traverse_obj(video_data, ('airdates', 'airdate', 0))), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/alibaba.py
yt_dlp/extractor/alibaba.py
from .common import InfoExtractor from ..utils import int_or_none, str_or_none, url_or_none from ..utils.traversal import traverse_obj class AlibabaIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?alibaba\.com/product-detail/[\w-]+_(?P<id>\d+)\.html' _TESTS = [{ 'url': 'https://www.alibaba.com/product-detail/Kids-Entertainment-Bouncer-Bouncy-Castle-Waterslide_1601271126969.html', 'info_dict': { 'id': '6000280444270', 'display_id': '1601271126969', 'ext': 'mp4', 'title': 'Kids Entertainment Bouncer Bouncy Castle Waterslide Juex Gonflables Commercial Inflatable Tropical Water Slide', 'duration': 30, 'thumbnail': 'https://sc04.alicdn.com/kf/Hc5bb391974454af18c7a4f91cbe4062bg.jpg_120x120.jpg', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) product_data = self._search_json( r'window\.detailData\s*=', webpage, 'detail data', display_id)['globalData']['product'] return { **traverse_obj(product_data, ('mediaItems', lambda _, v: v['type'] == 'video' and v['videoId'], any, { 'id': ('videoId', {int}, {str_or_none}), 'duration': ('duration', {int_or_none}), 'thumbnail': ('videoCoverUrl', {url_or_none}), 'formats': ('videoUrl', lambda _, v: url_or_none(v['videoUrl']), { 'url': 'videoUrl', 'format_id': ('definition', {str_or_none}), 'tbr': ('bitrate', {int_or_none}), 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), 'filesize': ('length', {int_or_none}), }), })), 'title': traverse_obj(product_data, ('subject', {str})), 'display_id': display_id, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/pialive.py
yt_dlp/extractor/pialive.py
from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, extract_attributes, get_element_by_class, get_element_html_by_class, multipart_encode, str_or_none, unified_timestamp, url_or_none, ) from ..utils.traversal import traverse_obj class PiaLiveIE(InfoExtractor): _VALID_URL = r'https?://player\.pia-live\.jp/stream/(?P<id>[\w-]+)' _PLAYER_ROOT_URL = 'https://player.pia-live.jp/' _PIA_LIVE_API_URL = 'https://api.pia-live.jp' _API_KEY = 'kfds)FKFps-dms9e' _TESTS = [{ 'url': 'https://player.pia-live.jp/stream/4JagFBEIM14s_hK9aXHKf3k3F3bY5eoHFQxu68TC6krUDqGOwN4d61dCWQYOd6CTxl4hjya9dsfEZGsM4uGOUdax60lEI4twsXGXf7crmz8Gk__GhupTrWxA7RFRVt76', 'info_dict': { 'id': '88f3109a-f503-4d0f-a9f7-9f39ac745d84', 'display_id': '2431867_001', 'title': 'こながめでたい日2024の視聴ページ | PIA LIVE STREAM(ぴあライブストリーム)', 'live_status': 'was_live', 'comment_count': int, }, 'params': { 'getcomments': True, 'skip_download': True, 'ignore_no_formats_error': True, }, 'skip': 'The video is no longer available', }, { 'url': 'https://player.pia-live.jp/stream/4JagFBEIM14s_hK9aXHKf3k3F3bY5eoHFQxu68TC6krJdu0GVBVbVy01IwpJ6J3qBEm3d9TCTt1d0eWpsZGj7DrOjVOmS7GAWGwyscMgiThopJvzgWC4H5b-7XQjAfRZ', 'info_dict': { 'id': '9ce8b8ba-f6d1-4d1f-83a0-18c3148ded93', 'display_id': '2431867_002', 'title': 'こながめでたい日2024の視聴ページ | PIA LIVE STREAM(ぴあライブストリーム)', 'live_status': 'was_live', 'comment_count': int, }, 'params': { 'getcomments': True, 'skip_download': True, 'ignore_no_formats_error': True, }, 'skip': 'The video is no longer available', }] def _extract_var(self, variable, html): return self._search_regex( rf'(?:var|const|let)\s+{variable}\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1', html, f'variable {variable}', group='value') def _real_extract(self, url): video_key = self._match_id(url) webpage = self._download_webpage(url, video_key) program_code = self._extract_var('programCode', webpage) article_code = self._extract_var('articleCode', webpage) title = self._html_extract_title(webpage) if get_element_html_by_class('play-end', webpage): raise ExtractorError('The video is no longer available', expected=True, video_id=program_code) if start_info := clean_html(get_element_by_class('play-waiting__date', webpage)): date, time = self._search_regex( r'(?P<date>\d{4}/\d{1,2}/\d{1,2})\([月火水木金土日]\)(?P<time>\d{2}:\d{2})', start_info, 'start_info', fatal=False, group=('date', 'time')) if date and time: release_timestamp_str = f'{date} {time} +09:00' release_timestamp = unified_timestamp(release_timestamp_str) self.raise_no_formats(f'The video will be available after {release_timestamp_str}', expected=True) return { 'id': program_code, 'title': title, 'live_status': 'is_upcoming', 'release_timestamp': release_timestamp, } payload, content_type = multipart_encode({ 'play_url': video_key, 'api_key': self._API_KEY, }) api_data_and_headers = { 'data': payload, 'headers': {'Content-Type': content_type, 'Referer': self._PLAYER_ROOT_URL}, } player_tag_list = self._download_json( f'{self._PIA_LIVE_API_URL}/perf/player-tag-list/{program_code}', program_code, 'Fetching player tag list', 'Unable to fetch player tag list', **api_data_and_headers) return self.url_result( extract_attributes(player_tag_list['data']['movie_one_tag'])['src'], url_transparent=True, title=title, display_id=program_code, __post_extractor=self.extract_comments(program_code, article_code, api_data_and_headers)) def _get_comments(self, program_code, article_code, api_data_and_headers): chat_room_url = traverse_obj(self._download_json( f'{self._PIA_LIVE_API_URL}/perf/chat-tag-list/{program_code}/{article_code}', program_code, 'Fetching chat info', 'Unable to fetch chat info', fatal=False, **api_data_and_headers), ('data', 'chat_one_tag', {extract_attributes}, 'src', {url_or_none})) if not chat_room_url: return comment_page = self._download_webpage( chat_room_url, program_code, 'Fetching comment page', 'Unable to fetch comment page', fatal=False, headers={'Referer': self._PLAYER_ROOT_URL}) if not comment_page: return yield from traverse_obj(self._search_json( r'var\s+_history\s*=', comment_page, 'comment list', program_code, contains_pattern=r'\[(?s:.+)\]', fatal=False), (..., { 'timestamp': (0, {int}), 'author_is_uploader': (1, {lambda x: x == 2}), 'author': (2, {str}), 'text': (3, {str}), 'id': (4, {str_or_none}), }))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/canalsurmas.py
yt_dlp/extractor/canalsurmas.py
import json import time from .common import InfoExtractor from ..utils import ( determine_ext, float_or_none, jwt_decode_hs256, parse_iso8601, url_or_none, variadic, ) from ..utils.traversal import traverse_obj class CanalsurmasIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?canalsurmas\.es/videos/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.canalsurmas.es/videos/44006-el-gran-queo-1-lora-del-rio-sevilla-20072014', 'md5': '861f86fdc1221175e15523047d0087ef', 'info_dict': { 'id': '44006', 'ext': 'mp4', 'title': 'Lora del Río (Sevilla)', 'description': 'md5:3d9ee40a9b1b26ed8259e6b71ed27b8b', 'thumbnail': 'https://cdn2.rtva.interactvty.com/content_cards/00f3e8f67b0a4f3b90a4a14618a48b0d.jpg', 'timestamp': 1648123182, 'upload_date': '20220324', }, }] _API_BASE = 'https://api-rtva.interactvty.com' _access_token = None @staticmethod def _is_jwt_expired(token): return jwt_decode_hs256(token)['exp'] - time.time() < 300 def _call_api(self, endpoint, video_id, fields=None): if not self._access_token or self._is_jwt_expired(self._access_token): self._access_token = self._download_json( f'{self._API_BASE}/jwt/token/', None, 'Downloading access token', 'Failed to download access token', headers={'Content-Type': 'application/json'}, data=json.dumps({ 'username': 'canalsur_demo', 'password': 'dsUBXUcI', }).encode())['access'] return self._download_json( f'{self._API_BASE}/api/2.0/contents/{endpoint}/{video_id}/', video_id, f'Downloading {endpoint} API JSON', f'Failed to download {endpoint} API JSON', headers={'Authorization': f'jwtok {self._access_token}'}, query={'optional_fields': ','.join(variadic(fields))} if fields else None) def _real_extract(self, url): video_id = self._match_id(url) video_info = self._call_api('content', video_id, fields=[ 'description', 'image', 'duration', 'created_at', 'tags', ]) stream_info = self._call_api('content_resources', video_id, 'media_url') formats, subtitles = [], {} for stream_url in traverse_obj(stream_info, ('results', ..., 'media_url', {url_or_none})): if determine_ext(stream_url) == 'm3u8': fmts, subs = self._extract_m3u8_formats_and_subtitles( stream_url, video_id, m3u8_id='hls', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) else: formats.append({'url': stream_url}) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(video_info, { 'title': ('name', {str.strip}), 'description': ('description', {str}), 'thumbnail': ('image', {url_or_none}), 'duration': ('duration', {float_or_none}), 'timestamp': ('created_at', {parse_iso8601}), 'tags': ('tags', ..., {str}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/yapfiles.py
yt_dlp/extractor/yapfiles.py
from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, qualities, url_or_none, ) class YapFilesIE(InfoExtractor): _WORKING = False _YAPFILES_URL = r'//(?:(?:www|api)\.)?yapfiles\.ru/get_player/*\?.*?\bv=(?P<id>\w+)' _VALID_URL = rf'https?:{_YAPFILES_URL}' _EMBED_REGEX = [rf'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?{_YAPFILES_URL}.*?)\1'] _TESTS = [{ # with hd 'url': 'http://www.yapfiles.ru/get_player/?v=vMDE1NjcyNDUt0413', 'md5': '2db19e2bfa2450568868548a1aa1956c', 'info_dict': { 'id': 'vMDE1NjcyNDUt0413', 'ext': 'mp4', 'title': 'Самый худший пароль WIFI', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 72, }, }, { # without hd 'url': 'https://api.yapfiles.ru/get_player/?uid=video_player_1872528&plroll=1&adv=1&v=vMDE4NzI1Mjgt690b', 'only_matching': True, }] _WEBPAGE_TESTS = [{ # FIXME: Update _VALID_URL 'url': 'https://www.yapfiles.ru/show/3397030/e34b69aa03829d513d7dc3ace6ec9631.mp4.html', 'info_dict': { 'id': 'vMDE4NzI1Mjgt690b', 'ext': 'mp4', 'title': 'Котята', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id, fatal=False) player_url = None query = {} if webpage: player_url = self._search_regex( r'player\.init\s*\(\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'player url', default=None, group='url') if not player_url: player_url = f'http://api.yapfiles.ru/load/{video_id}/' query = { 'md5': 'ded5f369be61b8ae5f88e2eeb2f3caff', 'type': 'json', 'ref': url, } player = self._download_json( player_url, video_id, query=query)['player'] playlist_url = player['playlist'] title = player['title'] thumbnail = player.get('poster') if title == 'Ролик удален' or 'deleted.jpg' in (thumbnail or ''): raise ExtractorError( f'Video {video_id} has been removed', expected=True) playlist = self._download_json( playlist_url, video_id)['player']['main'] hd_height = int_or_none(player.get('hd')) QUALITIES = ('sd', 'hd') quality_key = qualities(QUALITIES) formats = [] for format_id in QUALITIES: is_hd = format_id == 'hd' format_url = url_or_none(playlist.get( 'file%s' % ('_hd' if is_hd else ''))) if not format_url: continue formats.append({ 'url': format_url, 'format_id': format_id, 'quality': quality_key(format_id), 'height': hd_height if is_hd else None, }) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': int_or_none(player.get('length')), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/uktvplay.py
yt_dlp/extractor/uktvplay.py
from .common import InfoExtractor class UKTVPlayIE(InfoExtractor): _VALID_URL = r'https?://uktvplay\.(?:uktv\.)?co\.uk/(?:.+?\?.*?\bvideo=|([^/]+/)*)(?P<id>\d+)' _TESTS = [{ 'url': 'https://uktvplay.uktv.co.uk/shows/world-at-war/c/200/watch-online/?video=2117008346001', 'info_dict': { 'id': '2117008346001', 'ext': 'mp4', 'title': 'Pincers', 'description': 'Pincers', 'uploader_id': '1242911124001', 'upload_date': '20130124', 'timestamp': 1359049267, }, 'params': { # m3u8 download 'skip_download': True, }, 'expected_warnings': ['Failed to download MPD manifest'], }, { 'url': 'https://uktvplay.uktv.co.uk/shows/africa/watch-online/5983349675001', 'only_matching': True, }, { 'url': 'https://uktvplay.co.uk/shows/hornby-a-model-world/series-1/episode-1/6276739790001?autoplaying=true', 'only_matching': True, }] # BRIGHTCOVE_URL_TEMPLATE = 'https://players.brightcove.net/1242911124001/OrCyvJ2gyL_default/index.html?videoId=%s' BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1242911124001/H1xnMOqP_default/index.html?videoId=%s' def _real_extract(self, url): video_id = self._match_id(url) return self.url_result( self.BRIGHTCOVE_URL_TEMPLATE % video_id, 'BrightcoveNew', video_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/crooksandliars.py
yt_dlp/extractor/crooksandliars.py
from .common import InfoExtractor from ..utils import ( int_or_none, qualities, ) class CrooksAndLiarsIE(InfoExtractor): _VALID_URL = r'https?://embed\.crooksandliars\.com/(?:embed|v)/(?P<id>[A-Za-z0-9]+)' _EMBED_REGEX = [r'<(?:iframe[^>]+src|param[^>]+value)=(["\'])(?P<url>(?:https?:)?//embed\.crooksandliars\.com/(?:embed|v)/.+?)\1'] _TESTS = [{ 'url': 'https://embed.crooksandliars.com/embed/8RUoRhRi', 'info_dict': { 'id': '8RUoRhRi', 'ext': 'mp4', 'title': 'Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!', 'description': 'md5:e1a46ad1650e3a5ec7196d432799127f', 'thumbnail': r're:https?://crooksandliars\.com/files/.+', 'timestamp': 1428207000, 'upload_date': '20150405', 'uploader': 'Heather', 'duration': 236, }, }, { 'url': 'http://embed.crooksandliars.com/v/MTE3MjUtMzQ2MzA', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'https://crooksandliars.com/2015/04/fox-friends-says-protecting-atheists', 'info_dict': { 'id': '8RUoRhRi', 'ext': 'mp4', 'title': 'Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!', 'description': 'md5:e1a46ad1650e3a5ec7196d432799127f', 'duration': 236, 'thumbnail': r're:https?://crooksandliars\.com/files/.+', 'timestamp': 1428207000, 'upload_date': '20150405', 'uploader': 'Heather', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( f'http://embed.crooksandliars.com/embed/{video_id}', video_id) manifest = self._search_json(r'var\s+manifest\s*=', webpage, 'manifest JSON', video_id) quality = qualities(('webm_low', 'mp4_low', 'webm_high', 'mp4_high')) formats = [{ 'url': item['url'], 'format_id': item['type'], 'quality': quality(item['type']), } for item in manifest['flavors'] if item['mime'].startswith('video/')] return { 'url': url, 'id': video_id, 'title': manifest['title'], 'description': manifest.get('description'), 'thumbnail': self._proto_relative_url(manifest.get('poster')), 'timestamp': int_or_none(manifest.get('created')), 'uploader': manifest.get('author'), 'duration': int_or_none(manifest.get('duration')), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nfl.py
yt_dlp/extractor/nfl.py
import base64 import json import re import time import uuid from .anvato import AnvatoIE from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, determine_ext, get_element_by_class, int_or_none, make_archive_id, url_or_none, urlencode_postdata, ) from ..utils.traversal import traverse_obj class NFLBaseIE(InfoExtractor): _VALID_URL_BASE = r'''(?x) https?:// (?P<host> (?:www\.)? (?: (?: nfl| buffalobills| miamidolphins| patriots| newyorkjets| baltimoreravens| bengals| clevelandbrowns| steelers| houstontexans| colts| jaguars| (?:titansonline|tennesseetitans)| denverbroncos| (?:kc)?chiefs| raiders| chargers| dallascowboys| giants| philadelphiaeagles| (?:redskins|washingtonfootball)| chicagobears| detroitlions| packers| vikings| atlantafalcons| panthers| neworleanssaints| buccaneers| azcardinals| (?:stlouis|the)rams| 49ers| seahawks )\.com| .+?\.clubs\.nfl\.com ) )/ ''' _VIDEO_CONFIG_REGEX = r'<script[^>]+id="[^"]*video-config-[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}[^"]*"[^>]*>\s*({.+});?\s*</script>' _ANVATO_PREFIX = 'anvato:GXvEgwyJeWem8KCYXfeoHWknwP48Mboj:' _CLIENT_DATA = { 'clientKey': '4cFUW6DmwJpzT9L7LrG3qRAcABG5s04g', 'clientSecret': 'CZuvCL49d9OwfGsR', 'deviceId': str(uuid.uuid4()), 'deviceInfo': base64.b64encode(json.dumps({ 'model': 'desktop', 'version': 'Chrome', 'osName': 'Windows', 'osVersion': '10.0', }, separators=(',', ':')).encode()).decode(), 'networkType': 'other', 'peacockUUID': 'undefined', } _ACCOUNT_INFO = {} _API_KEY = '3_Qa8TkWpIB8ESCBT8tY2TukbVKgO5F6BJVc7N1oComdwFzI7H2L9NOWdm11i_BY9f' _TOKEN = None _TOKEN_EXPIRY = 0 def _get_account_info(self): cookies = self._get_cookies('https://auth-id.nfl.com/') login_token = traverse_obj(cookies, ( (f'glt_{self._API_KEY}', lambda k, _: k.startswith('glt_')), {lambda x: x.value}), get_all=False) if not login_token: self.raise_login_required() if 'ucid' not in cookies: raise ExtractorError( 'Required cookies for the auth-id.nfl.com domain were not found among passed cookies. ' 'If using --cookies, these cookies must be exported along with .nfl.com cookies, ' 'or else try using --cookies-from-browser instead', expected=True) account = self._download_json( 'https://auth-id.nfl.com/accounts.getAccountInfo', None, note='Downloading account info', data=urlencode_postdata({ 'include': 'profile,data', 'lang': 'en', 'APIKey': self._API_KEY, 'sdk': 'js_latest', 'login_token': login_token, 'authMode': 'cookie', 'pageURL': 'https://www.nfl.com/', 'sdkBuild': traverse_obj(cookies, ( 'gig_canary_ver', {lambda x: x.value.partition('-')[0]}), default='15170'), 'format': 'json', }), headers={'Content-Type': 'application/x-www-form-urlencoded'}) self._ACCOUNT_INFO = traverse_obj(account, { 'signatureTimestamp': 'signatureTimestamp', 'uid': 'UID', 'uidSignature': 'UIDSignature', }) if len(self._ACCOUNT_INFO) != 3: raise ExtractorError('Failed to retrieve account info with provided cookies', expected=True) def _get_auth_token(self): if self._TOKEN and self._TOKEN_EXPIRY > int(time.time() + 30): return token = self._download_json( 'https://api.nfl.com/identity/v3/token%s' % ( '/refresh' if self._ACCOUNT_INFO.get('refreshToken') else ''), None, headers={'Content-Type': 'application/json'}, note='Downloading access token', data=json.dumps({**self._CLIENT_DATA, **self._ACCOUNT_INFO}, separators=(',', ':')).encode()) self._TOKEN = token['accessToken'] self._TOKEN_EXPIRY = token['expiresIn'] self._ACCOUNT_INFO['refreshToken'] = token['refreshToken'] def _extract_video(self, mcp_id, is_live=False): self._get_auth_token() data = self._download_json( f'https://api.nfl.com/play/v1/asset/{mcp_id}', mcp_id, headers={ 'Authorization': f'Bearer {self._TOKEN}', 'Accept': 'application/json', 'Content-Type': 'application/json', }, data=json.dumps({'init': True, 'live': is_live}, separators=(',', ':')).encode()) formats, subtitles = self._extract_m3u8_formats_and_subtitles( data['accessUrl'], mcp_id, 'mp4', m3u8_id='hls') return { 'id': mcp_id, 'formats': formats, 'subtitles': subtitles, 'is_live': is_live, '_old_archive_ids': [make_archive_id(AnvatoIE, mcp_id)], **traverse_obj(data, ('metadata', { 'title': ('event', ('def_title', 'friendlyName'), {str}, any), 'description': ('event', 'def_description', {str}), 'duration': ('event', 'duration', {int_or_none}), 'thumbnails': ('thumbnails', ..., 'url', {'url': {url_or_none}}), })), } def _parse_video_config(self, video_config, display_id): video_config = self._parse_json(video_config, display_id) is_live = traverse_obj(video_config, ('live', {bool})) or False item = video_config['playlist'][0] if mcp_id := item.get('mcpID'): return self._extract_video(mcp_id, is_live=is_live) info = {'id': item.get('id') or item['entityId']} item_url = item['url'] ext = determine_ext(item_url) if ext == 'm3u8': info['formats'] = self._extract_m3u8_formats(item_url, info['id'], 'mp4') else: info['url'] = item_url if item.get('audio') is True: info['vcodec'] = 'none' thumbnails = None if image_url := traverse_obj(item, 'imageSrc', 'posterImage', expected_type=url_or_none): thumbnails = [{ 'url': image_url, 'ext': determine_ext(image_url, 'jpg'), }] info.update({ **traverse_obj(item, { 'title': ('title', {str}), 'description': ('description', {clean_html}), }), 'is_live': is_live, 'thumbnails': thumbnails, }) return info class NFLIE(NFLBaseIE): IE_NAME = 'nfl.com' _VALID_URL = NFLBaseIE._VALID_URL_BASE + r'(?:videos?|listen|audio)/(?P<id>[^/#?&]+)' _TESTS = [{ 'url': 'https://www.nfl.com/videos/baker-mayfield-s-game-changing-plays-from-3-td-game-week-14', 'info_dict': { 'id': '899441', 'ext': 'mp4', 'title': "Baker Mayfield's game-changing plays from 3-TD game Week 14", 'description': 'md5:85e05a3cc163f8c344340f220521136d', 'thumbnail': r're:https?://.+\.jpg', 'duration': 157, '_old_archive_ids': ['anvato 899441'], }, }, { 'url': 'https://www.chiefs.com/listen/patrick-mahomes-travis-kelce-react-to-win-over-dolphins-the-breakdown', 'md5': '92a517f05bd3eb50fe50244bc621aec8', 'info_dict': { 'id': '8b7c3625-a461-4751-8db4-85f536f2bbd0', 'ext': 'mp3', 'title': 'Patrick Mahomes, Travis Kelce React to Win Over Dolphins | The Breakdown', 'description': 'md5:12ada8ee70e6762658c30e223e095075', 'thumbnail': 'https://static.clubs.nfl.com/image/private/t_editorial_landscape_12_desktop/v1571153441/chiefs/rfljejccnyhhkpkfq855', }, }, { 'url': 'https://www.buffalobills.com/video/buffalo-bills-military-recognition-week-14', 'only_matching': True, }, { 'url': 'https://www.raiders.com/audio/instant-reactions-raiders-week-14-loss-to-indianapolis-colts-espn-jason-fitz', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) return self._parse_video_config(self._search_regex( self._VIDEO_CONFIG_REGEX, webpage, 'video config'), display_id) class NFLArticleIE(NFLBaseIE): IE_NAME = 'nfl.com:article' _VALID_URL = NFLBaseIE._VALID_URL_BASE + r'news/(?P<id>[^/#?&]+)' _TEST = { 'url': 'https://www.buffalobills.com/news/the-only-thing-we-ve-earned-is-the-noise-bills-coaches-discuss-handling-rising-e', 'info_dict': { 'id': 'the-only-thing-we-ve-earned-is-the-noise-bills-coaches-discuss-handling-rising-e', 'title': "'The only thing we've earned is the noise' | Bills coaches discuss handling rising expectations", }, 'playlist_count': 4, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) def entries(): for video_config in re.findall(self._VIDEO_CONFIG_REGEX, webpage): yield self._parse_video_config(video_config, display_id) title = clean_html(get_element_by_class( 'nfl-c-article__title', webpage)) or self._html_search_meta( ['og:title', 'twitter:title'], webpage) return self.playlist_result(entries(), display_id, title) class NFLPlusReplayIE(NFLBaseIE): IE_NAME = 'nfl.com:plus:replay' _VALID_URL = r'https?://(?:www\.)?nfl\.com/plus/games/(?P<slug>[\w-]+)(?:/(?P<id>\d+))?' _TESTS = [{ 'url': 'https://www.nfl.com/plus/games/giants-at-vikings-2022-post-1/1572108', 'info_dict': { 'id': '1572108', 'ext': 'mp4', 'title': 'New York Giants at Minnesota Vikings', 'description': 'New York Giants play the Minnesota Vikings at U.S. Bank Stadium on January 15, 2023', 'uploader': 'NFL', 'upload_date': '20230116', 'timestamp': 1673864520, 'duration': 7157, 'categories': ['Game Highlights'], 'tags': ['Minnesota Vikings', 'New York Giants', 'Minnesota Vikings vs. New York Giants'], 'thumbnail': r're:^https?://.*\.jpg', }, 'params': {'skip_download': 'm3u8'}, }, { 'note': 'Subscription required', 'url': 'https://www.nfl.com/plus/games/giants-at-vikings-2022-post-1', 'playlist_count': 4, 'info_dict': { 'id': 'giants-at-vikings-2022-post-1', }, }, { 'note': 'Subscription required', 'url': 'https://www.nfl.com/plus/games/giants-at-patriots-2011-pre-4', 'playlist_count': 2, 'info_dict': { 'id': 'giants-at-patriots-2011-pre-4', }, }, { 'note': 'Subscription required', 'url': 'https://www.nfl.com/plus/games/giants-at-patriots-2011-pre-4', 'info_dict': { 'id': '950701', 'ext': 'mp4', 'title': 'Giants @ Patriots', 'description': 'Giants at Patriots on September 01, 2011', 'uploader': 'NFL', 'upload_date': '20210724', 'timestamp': 1627085874, 'duration': 1532, 'categories': ['Game Highlights'], 'tags': ['play-by-play'], 'thumbnail': r're:^https?://.*\.jpg', }, 'params': { 'skip_download': 'm3u8', 'extractor_args': {'nflplusreplay': {'type': ['condensed_game']}}, }, }] _REPLAY_TYPES = { 'full_game': 'Full Game', 'full_game_spanish': 'Full Game - Spanish', 'condensed_game': 'Condensed Game', 'all_22': 'All-22', } def _real_initialize(self): self._get_account_info() def _real_extract(self, url): slug, video_id = self._match_valid_url(url).group('slug', 'id') requested_types = self._configuration_arg('type', ['all']) if 'all' in requested_types: requested_types = list(self._REPLAY_TYPES.keys()) requested_types = traverse_obj(self._REPLAY_TYPES, (None, requested_types)) if not video_id: self._get_auth_token() headers = {'Authorization': f'Bearer {self._TOKEN}'} game_id = self._download_json( f'https://api.nfl.com/football/v2/games/externalId/slug/{slug}', slug, 'Downloading game ID', query={'withExternalIds': 'true'}, headers=headers)['id'] replays = self._download_json( 'https://api.nfl.com/content/v1/videos/replays', slug, 'Downloading replays JSON', query={'gameId': game_id}, headers=headers) if len(requested_types) == 1: video_id = traverse_obj(replays, ( 'items', lambda _, v: v['subType'] == requested_types[0], 'mcpPlaybackId'), get_all=False) if video_id: return self._extract_video(video_id) def entries(): for replay in traverse_obj( replays, ('items', lambda _, v: v['mcpPlaybackId'] and v['subType'] in requested_types), ): yield self._extract_video(replay['mcpPlaybackId']) return self.playlist_result(entries(), slug) class NFLPlusEpisodeIE(NFLBaseIE): IE_NAME = 'nfl.com:plus:episode' _VALID_URL = r'https?://(?:www\.)?nfl\.com/plus/episodes/(?P<id>[\w-]+)' _TESTS = [{ 'note': 'Subscription required', 'url': 'https://www.nfl.com/plus/episodes/kurt-s-qb-insider-conference-championships', 'info_dict': { 'id': '1576832', 'ext': 'mp4', 'title': 'Conference Championships', 'description': 'md5:944f7fab56f7a37430bf8473f5473857', 'uploader': 'NFL', 'upload_date': '20230127', 'timestamp': 1674782760, 'duration': 730, 'categories': ['Analysis'], 'tags': ['Cincinnati Bengals at Kansas City Chiefs (2022-POST-3)'], 'thumbnail': r're:^https?://.*\.jpg', }, 'params': {'skip_download': 'm3u8'}, }] def _real_initialize(self): self._get_account_info() def _real_extract(self, url): slug = self._match_id(url) self._get_auth_token() video_id = self._download_json( f'https://api.nfl.com/content/v1/videos/episodes/{slug}', slug, headers={ 'Authorization': f'Bearer {self._TOKEN}', })['mcpPlaybackId'] return self._extract_video(video_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/amazon.py
yt_dlp/extractor/amazon.py
import re from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, float_or_none, get_element_by_attribute, get_element_by_class, int_or_none, js_to_json, traverse_obj, url_or_none, ) class AmazonStoreIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?amazon\.(?:[a-z]{2,3})(?:\.[a-z]{2})?/(?:[^/]+/)?(?:dp|gp/product)/(?P<id>[^/&#$?]+)' _TESTS = [{ 'url': 'https://www.amazon.co.uk/dp/B098XNCHLD/', 'info_dict': { 'id': 'B098XNCHLD', 'title': str, }, 'playlist_mincount': 1, 'playlist': [{ 'info_dict': { 'id': 'A1F83G8C2ARO7P', 'ext': 'mp4', 'title': 'mcdodo usb c cable 100W 5a', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 34, }, }], 'expected_warnings': ['Unable to extract data'], }, { 'url': 'https://www.amazon.in/Sony-WH-1000XM4-Cancelling-Headphones-Bluetooth/dp/B0863TXGM3', 'info_dict': { 'id': 'B0863TXGM3', 'title': str, }, 'playlist_mincount': 4, 'expected_warnings': ['Unable to extract data'], }, { 'url': 'https://www.amazon.com/dp/B0845NXCXF/', 'info_dict': { 'id': 'B0845NXCXF', 'title': str, }, 'playlist-mincount': 1, 'expected_warnings': ['Unable to extract data'], }, { 'url': 'https://www.amazon.es/Samsung-Smartphone-s-AMOLED-Quad-c%C3%A1mara-espa%C3%B1ola/dp/B08WX337PQ', 'info_dict': { 'id': 'B08WX337PQ', 'title': str, }, 'playlist_mincount': 1, 'expected_warnings': ['Unable to extract data'], }] def _real_extract(self, url): playlist_id = self._match_id(url) for retry in self.RetryManager(): webpage = self._download_webpage(url, playlist_id) try: data_json = self._search_json( r'var\s?obj\s?=\s?jQuery\.parseJSON\(\'', webpage, 'data', playlist_id, transform_source=js_to_json) except ExtractorError as e: retry.error = e entries = [{ 'id': video['marketPlaceID'], 'url': video['url'], 'title': video.get('title'), 'thumbnail': video.get('thumbUrl') or video.get('thumb'), 'duration': video.get('durationSeconds'), 'height': int_or_none(video.get('videoHeight')), 'width': int_or_none(video.get('videoWidth')), } for video in (data_json.get('videos') or []) if video.get('isVideo') and video.get('url')] return self.playlist_result(entries, playlist_id=playlist_id, playlist_title=data_json.get('title')) class AmazonReviewsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?amazon\.(?:[a-z]{2,3})(?:\.[a-z]{2})?/gp/customer-reviews/(?P<id>[^/&#$?]+)' _TESTS = [{ 'url': 'https://www.amazon.com/gp/customer-reviews/R10VE9VUSY19L3/ref=cm_cr_arp_d_rvw_ttl', 'info_dict': { 'id': 'R10VE9VUSY19L3', 'ext': 'mp4', 'title': 'Get squad #Suspicious', 'description': 'md5:7012695052f440a1e064e402d87e0afb', 'uploader': 'Kimberly Cronkright', 'average_rating': 1.0, 'thumbnail': r're:^https?://.*\.jpg$', }, 'expected_warnings': ['Review body was not found in webpage'], }, { 'url': 'https://www.amazon.com/gp/customer-reviews/R10VE9VUSY19L3/ref=cm_cr_arp_d_rvw_ttl?language=es_US', 'info_dict': { 'id': 'R10VE9VUSY19L3', 'ext': 'mp4', 'title': 'Get squad #Suspicious', 'description': 'md5:7012695052f440a1e064e402d87e0afb', 'uploader': 'Kimberly Cronkright', 'average_rating': 1.0, 'thumbnail': r're:^https?://.*\.jpg$', }, 'expected_warnings': ['Review body was not found in webpage'], }, { 'url': 'https://www.amazon.in/gp/customer-reviews/RV1CO8JN5VGXV/', 'info_dict': { 'id': 'RV1CO8JN5VGXV', 'ext': 'mp4', 'title': 'Not sure about its durability', 'description': 'md5:1a252c106357f0a3109ebf37d2e87494', 'uploader': 'Shoaib Gulzar', 'average_rating': 2.0, 'thumbnail': r're:^https?://.*\.jpg$', }, 'expected_warnings': ['Review body was not found in webpage'], }] def _real_extract(self, url): video_id = self._match_id(url) for retry in self.RetryManager(): webpage = self._download_webpage(url, video_id) review_body = get_element_by_attribute('data-hook', 'review-body', webpage) if not review_body: retry.error = ExtractorError('Review body was not found in webpage', expected=True) formats, subtitles = [], {} manifest_url = self._search_regex( r'data-video-url="([^"]+)"', review_body, 'm3u8 url', default=None) if url_or_none(manifest_url): fmts, subtitles = self._extract_m3u8_formats_and_subtitles( manifest_url, video_id, 'mp4', fatal=False) formats.extend(fmts) video_url = self._search_regex( r'<input[^>]+\bvalue="([^"]+)"[^>]+\bclass="video-url"', review_body, 'mp4 url', default=None) if url_or_none(video_url): formats.append({ 'url': video_url, 'ext': 'mp4', 'format_id': 'http-mp4', }) if not formats: self.raise_no_formats('No video found for this customer review', expected=True) return { 'id': video_id, 'title': (clean_html(get_element_by_attribute('data-hook', 'review-title', webpage)) or self._html_extract_title(webpage)), 'description': clean_html(traverse_obj(re.findall( r'<span(?:\s+class="cr-original-review-content")?>(.+?)</span>', review_body), -1)), 'uploader': clean_html(get_element_by_class('a-profile-name', webpage)), 'average_rating': float_or_none(clean_html(get_element_by_attribute( 'data-hook', 'review-star-rating', webpage) or '').partition(' ')[0]), 'thumbnail': self._search_regex( r'data-thumbnail-url="([^"]+)"', review_body, 'thumbnail', default=None), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cracked.py
yt_dlp/extractor/cracked.py
import re from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import ( parse_iso8601, str_to_int, ) class CrackedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?cracked\.com/video_(?P<id>\d+)_[\da-z-]+\.html' _TESTS = [{ 'url': 'http://www.cracked.com/video_19070_if-animal-actors-got-e21-true-hollywood-stories.html', 'md5': '89b90b9824e3806ca95072c4d78f13f7', 'info_dict': { 'id': '19070', 'ext': 'mp4', 'title': 'If Animal Actors Got E! True Hollywood Stories', 'timestamp': 1404954000, 'upload_date': '20140710', }, }, { # youtube embed 'url': 'http://www.cracked.com/video_19006_4-plot-holes-you-didnt-notice-in-your-favorite-movies.html', 'md5': 'ccd52866b50bde63a6ef3b35016ba8c7', 'info_dict': { 'id': 'EjI00A3rZD0', 'ext': 'mp4', 'title': "4 Plot Holes You Didn't Notice in Your Favorite Movies - The Spit Take", 'description': 'md5:c603708c718b796fe6079e2b3351ffc7', 'upload_date': '20140725', 'uploader_id': 'Cracked', 'uploader': 'Cracked', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) youtube_url = YoutubeIE._extract_url(webpage) if youtube_url: return self.url_result(youtube_url, ie=YoutubeIE.ie_key()) video_url = self._html_search_regex( [r'var\s+CK_vidSrc\s*=\s*"([^"]+)"', r'<video\s+src="([^"]+)"'], webpage, 'video URL') title = self._search_regex( [r'property="?og:title"?\s+content="([^"]+)"', r'class="?title"?>([^<]+)'], webpage, 'title') description = self._search_regex( r'name="?(?:og:)?description"?\s+content="([^"]+)"', webpage, 'description', default=None) timestamp = self._html_search_regex( r'"date"\s*:\s*"([^"]+)"', webpage, 'upload date', fatal=False) if timestamp: timestamp = parse_iso8601(timestamp[:-6]) view_count = str_to_int(self._html_search_regex( r'<span\s+class="?views"? id="?viewCounts"?>([\d,\.]+) Views</span>', webpage, 'view count', fatal=False)) comment_count = str_to_int(self._html_search_regex( r'<span\s+id="?commentCounts"?>([\d,\.]+)</span>', webpage, 'comment count', fatal=False)) m = re.search(r'_(?P<width>\d+)X(?P<height>\d+)\.mp4$', video_url) if m: width = int(m.group('width')) height = int(m.group('height')) else: width = height = None return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'timestamp': timestamp, 'view_count': view_count, 'comment_count': comment_count, 'height': height, 'width': width, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rentv.py
yt_dlp/extractor/rentv.py
from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, url_or_none, ) class RENTVIE(InfoExtractor): _WORKING = False _VALID_URL = r'(?:rentv:|https?://(?:www\.)?ren\.tv/(?:player|video/epizod)/)(?P<id>\d+)' _TESTS = [{ 'url': 'http://ren.tv/video/epizod/118577', 'md5': 'd91851bf9af73c0ad9b2cdf76c127fbb', 'info_dict': { 'id': '118577', 'ext': 'mp4', 'title': 'Документальный спецпроект: "Промывка мозгов. Технологии XXI века"', 'timestamp': 1472230800, 'upload_date': '20160826', }, }, { 'url': 'http://ren.tv/player/118577', 'only_matching': True, }, { 'url': 'rentv:118577', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage('http://ren.tv/player/' + video_id, video_id) config = self._parse_json(self._search_regex( r'config\s*=\s*({.+})\s*;', webpage, 'config'), video_id) title = config['title'] formats = [] for video in config['src']: src = url_or_none(video.get('src')) if not src: continue ext = determine_ext(src) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( src, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'url': src, }) return { 'id': video_id, 'title': title, 'description': config.get('description'), 'thumbnail': config.get('image'), 'duration': int_or_none(config.get('duration')), 'timestamp': int_or_none(config.get('date')), 'formats': formats, } class RENTVArticleIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?ren\.tv/novosti/\d{4}-\d{2}-\d{2}/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'http://ren.tv/novosti/2016-10-26/video-mikroavtobus-popavshiy-v-dtp-s-gruzovikami-v-podmoskove-prevratilsya-v', 'md5': 'ebd63c4680b167693745ab91343df1d6', 'info_dict': { 'id': '136472', 'ext': 'mp4', 'title': 'Видео: микроавтобус, попавший в ДТП с грузовиками в Подмосковье, превратился в груду металла', 'description': 'Жертвами столкновения двух фур и микроавтобуса, по последним данным, стали семь человек.', }, }, { # TODO: invalid m3u8 'url': 'http://ren.tv/novosti/2015-09-25/sluchaynyy-prohozhiy-poymal-avtougonshchika-v-murmanske-video', 'info_dict': { 'id': 'playlist', 'ext': 'mp4', 'title': 'Случайный прохожий поймал автоугонщика в Мурманске. ВИДЕО | РЕН ТВ', 'uploader': 'ren.tv', }, 'params': { # m3u8 downloads 'skip_download': True, }, 'skip': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) drupal_settings = self._parse_json(self._search_regex( r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', webpage, 'drupal settings'), display_id) entries = [] for config_profile in drupal_settings.get('ren_jwplayer', {}).values(): media_id = config_profile.get('mediaid') if not media_id: continue media_id = str(media_id) entries.append(self.url_result('rentv:' + media_id, 'RENTV', media_id)) return self.playlist_result(entries, display_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/voicy.py
yt_dlp/extractor/voicy.py
import itertools from .common import InfoExtractor from ..utils import ( ExtractorError, smuggle_url, str_or_none, traverse_obj, unified_strdate, unsmuggle_url, ) class VoicyBaseIE(InfoExtractor): def _extract_from_playlist_data(self, value): voice_id = str(value.get('PlaylistId')) upload_date = unified_strdate(value.get('Published'), False) items = [self._extract_single_article(voice_data) for voice_data in value['VoiceData']] return { '_type': 'multi_video', 'entries': items, 'id': voice_id, 'title': str(value.get('PlaylistName')), 'uploader': value.get('SpeakerName'), 'uploader_id': str_or_none(value.get('SpeakerId')), 'channel': value.get('ChannelName'), 'channel_id': str_or_none(value.get('ChannelId')), 'upload_date': upload_date, } def _extract_single_article(self, entry): formats = [{ 'url': entry['VoiceHlsFile'], 'format_id': 'hls', 'ext': 'm4a', 'acodec': 'aac', 'vcodec': 'none', 'protocol': 'm3u8_native', }, { 'url': entry['VoiceFile'], 'format_id': 'mp3', 'ext': 'mp3', 'acodec': 'mp3', 'vcodec': 'none', }] return { 'id': str(entry.get('ArticleId')), 'title': entry.get('ArticleTitle'), 'description': entry.get('MediaName'), 'formats': formats, } def _call_api(self, url, video_id, **kwargs): response = self._download_json(url, video_id, **kwargs) if response.get('Status') != 0: message = traverse_obj(response, ('Value', 'Error', 'Message'), expected_type=str) if not message: message = 'There was a error in the response: %d' % response.get('Status') raise ExtractorError(message, expected=False) return response.get('Value') class VoicyIE(VoicyBaseIE): _WORKING = False IE_NAME = 'voicy' _VALID_URL = r'https?://voicy\.jp/channel/(?P<channel_id>\d+)/(?P<id>\d+)' ARTICLE_LIST_API_URL = 'https://vmw.api.voicy.jp/articles_list?channel_id=%s&pid=%s' _TESTS = [{ 'url': 'https://voicy.jp/channel/1253/122754', 'info_dict': { 'id': '122754', 'title': '1/21(木)声日記:ついに原稿終わった!!', 'uploader': 'ちょまど@ ITエンジニアなオタク', 'uploader_id': '7339', }, 'playlist_mincount': 9, }] def _real_extract(self, url): mobj = self._match_valid_url(url) assert mobj voice_id = mobj.group('id') channel_id = mobj.group('channel_id') url, article_list = unsmuggle_url(url) if not article_list: article_list = self._call_api(self.ARTICLE_LIST_API_URL % (channel_id, voice_id), voice_id) return self._extract_from_playlist_data(article_list) class VoicyChannelIE(VoicyBaseIE): _WORKING = False IE_NAME = 'voicy:channel' _VALID_URL = r'https?://voicy\.jp/channel/(?P<id>\d+)' PROGRAM_LIST_API_URL = 'https://vmw.api.voicy.jp/program_list/all?channel_id=%s&limit=20&public_type=3%s' _TESTS = [{ 'url': 'https://voicy.jp/channel/1253/', 'info_dict': { 'id': '7339', 'title': 'ゆるふわ日常ラジオ #ちょまラジ', 'uploader': 'ちょまど@ ITエンジニアなオタク', 'uploader_id': '7339', }, 'playlist_mincount': 54, }] @classmethod def suitable(cls, url): return not VoicyIE.suitable(url) and super().suitable(url) def _entries(self, channel_id): pager = '' for count in itertools.count(1): article_list = self._call_api(self.PROGRAM_LIST_API_URL % (channel_id, pager), channel_id, note=f'Paging #{count}') playlist_data = article_list.get('PlaylistData') if not playlist_data: break yield from playlist_data last = playlist_data[-1] pager = '&pid=%d&p_date=%s&play_count=%s' % (last['PlaylistId'], last['Published'], last['PlayCount']) def _real_extract(self, url): channel_id = self._match_id(url) articles = self._entries(channel_id) first_article = next(articles, None) title = traverse_obj(first_article, ('ChannelName', ), expected_type=str) speaker_name = traverse_obj(first_article, ('SpeakerName', ), expected_type=str) if not title and speaker_name: title = f'Uploads from {speaker_name}' if not title: title = f'Uploads from channel ID {channel_id}' articles = itertools.chain([first_article], articles) if first_article else articles playlist = ( self.url_result(smuggle_url('https://voicy.jp/channel/%s/%d' % (channel_id, value['PlaylistId']), value), VoicyIE.ie_key()) for value in articles) return { '_type': 'playlist', 'entries': playlist, 'id': channel_id, 'title': title, 'channel': speaker_name, 'channel_id': channel_id, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/fuyintv.py
yt_dlp/extractor/fuyintv.py
from .common import InfoExtractor from ..utils import traverse_obj class FuyinTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?fuyin\.tv/html/(?:\d+)/(?P<id>\d+)\.html' _TESTS = [{ 'url': 'https://www.fuyin.tv/html/2733/44129.html', 'info_dict': { 'id': '44129', 'ext': 'mp4', 'title': '第1集', 'description': 'md5:21a3d238dc8d49608e1308e85044b9c3', }, }] def _real_extract(self, url): video_id = self._match_id(url) json_data = self._download_json( 'https://www.fuyin.tv/api/api/tv.movie/url', video_id, query={'urlid': f'{video_id}'}) webpage = self._download_webpage(url, video_id, fatal=False) return { 'id': video_id, 'title': traverse_obj(json_data, ('data', 'title')), 'url': json_data['data']['url'], 'ext': 'mp4', 'description': self._html_search_meta('description', webpage), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/eroprofile.py
yt_dlp/extractor/eroprofile.py
import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, merge_dicts, ) class EroProfileIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?eroprofile\.com/m/videos/view/(?P<id>[^/]+)' _LOGIN_URL = 'http://www.eroprofile.com/auth/auth.php?' _NETRC_MACHINE = 'eroprofile' _TESTS = [{ 'url': 'http://www.eroprofile.com/m/videos/view/sexy-babe-softcore', 'md5': 'c26f351332edf23e1ea28ce9ec9de32f', 'info_dict': { 'id': '3733775', 'display_id': 'sexy-babe-softcore', 'ext': 'm4v', 'title': 'sexy babe softcore', 'thumbnail': r're:https?://.*\.jpg', 'age_limit': 18, }, 'skip': 'Video not found', }, { 'url': 'http://www.eroprofile.com/m/videos/view/Try-It-On-Pee_cut_2-wmv-4shared-com-file-sharing-download-movie-file', 'md5': '1baa9602ede46ce904c431f5418d8916', 'info_dict': { 'id': '1133519', 'ext': 'm4v', 'title': 'Try It On Pee_cut_2.wmv - 4shared.com - file sharing - download movie file', 'thumbnail': r're:https?://.*\.jpg', 'age_limit': 18, }, 'skip': 'Requires login', }] def _perform_login(self, username, password): query = urllib.parse.urlencode({ 'username': username, 'password': password, 'url': 'http://www.eroprofile.com/', }) login_url = self._LOGIN_URL + query login_page = self._download_webpage(login_url, None, False) m = re.search(r'Your username or password was incorrect\.', login_page) if m: raise ExtractorError( 'Wrong username and/or password.', expected=True) self.report_login() redirect_url = self._search_regex( r'<script[^>]+?src="([^"]+)"', login_page, 'login redirect url') self._download_webpage(redirect_url, None, False) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) m = re.search(r'You must be logged in to view this video\.', webpage) if m: self.raise_login_required('This video requires login') video_id = self._search_regex( [r"glbUpdViews\s*\('\d*','(\d+)'", r'p/report/video/(\d+)'], webpage, 'video id', default=None) title = self._html_search_regex( (r'Title:</th><td>([^<]+)</td>', r'<h1[^>]*>(.+?)</h1>'), webpage, 'title') info = self._parse_html5_media_entries(url, webpage, video_id)[0] return merge_dicts(info, { 'id': video_id, 'display_id': display_id, 'title': title, 'age_limit': 18, }) class EroProfileAlbumIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?eroprofile\.com/m/videos/album/(?P<id>[^/]+)' IE_NAME = 'EroProfile:album' _TESTS = [{ 'url': 'https://www.eroprofile.com/m/videos/album/BBW-2-893', 'info_dict': { 'id': 'BBW-2-893', 'title': 'BBW 2', }, 'playlist_mincount': 486, }, ] def _extract_from_page(self, page): for url in re.findall(r'href=".*?(/m/videos/view/[^"]+)"', page): yield self.url_result(f'https://www.eroprofile.com{url}', EroProfileIE.ie_key()) def _entries(self, playlist_id, first_page): yield from self._extract_from_page(first_page) page_urls = re.findall(rf'href=".*?(/m/videos/album/{playlist_id}\?pnum=(\d+))"', first_page) max_page = max(int(n) for _, n in page_urls) for n in range(2, max_page + 1): url = f'https://www.eroprofile.com/m/videos/album/{playlist_id}?pnum={n}' yield from self._extract_from_page( self._download_webpage(url, playlist_id, note=f'Downloading playlist page {int(n) - 1}')) def _real_extract(self, url): playlist_id = self._match_id(url) first_page = self._download_webpage(url, playlist_id, note='Downloading playlist') playlist_title = self._search_regex( r'<title>Album: (.*) - EroProfile</title>', first_page, 'playlist_title') return self.playlist_result(self._entries(playlist_id, first_page), playlist_id, playlist_title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bluesky.py
yt_dlp/extractor/bluesky.py
from .common import InfoExtractor from ..utils import ( ExtractorError, format_field, int_or_none, mimetype2ext, orderedSet, parse_iso8601, truncate_string, update_url_query, url_basename, url_or_none, variadic, ) from ..utils.traversal import traverse_obj class BlueskyIE(InfoExtractor): _VALID_URL = [ r'https?://(?:www\.)?(?:bsky\.app|main\.bsky\.dev)/profile/(?P<handle>[\w.:%-]+)/post/(?P<id>\w+)', r'at://(?P<handle>[\w.:%-]+)/app\.bsky\.feed\.post/(?P<id>\w+)', ] _TESTS = [{ 'url': 'https://bsky.app/profile/blu3blue.bsky.social/post/3l4omssdl632g', 'md5': '375539c1930ab05d15585ed772ab54fd', 'info_dict': { 'id': '3l4omssdl632g', 'ext': 'mp4', 'uploader': 'Blu3Blu3Lilith', 'uploader_id': 'blu3blue.bsky.social', 'uploader_url': 'https://bsky.app/profile/blu3blue.bsky.social', 'channel_id': 'did:plc:pzdr5ylumf7vmvwasrpr5bf2', 'channel_url': 'https://bsky.app/profile/did:plc:pzdr5ylumf7vmvwasrpr5bf2', 'thumbnail': r're:https://video.bsky.app/watch/.*\.jpg$', 'title': 'OMG WE HAVE VIDEOS NOW', 'description': 'OMG WE HAVE VIDEOS NOW', 'upload_date': '20240921', 'timestamp': 1726940605, 'like_count': int, 'repost_count': int, 'comment_count': int, 'tags': [], }, }, { 'url': 'https://bsky.app/profile/bsky.app/post/3l3vgf77uco2g', 'md5': 'b9e344fdbce9f2852c668a97efefb105', 'info_dict': { 'id': '3l3vgf77uco2g', 'ext': 'mp4', 'uploader': 'Bluesky', 'uploader_id': 'bsky.app', 'uploader_url': 'https://bsky.app/profile/bsky.app', 'channel_id': 'did:plc:z72i7hdynmk6r22z27h6tvur', 'channel_url': 'https://bsky.app/profile/did:plc:z72i7hdynmk6r22z27h6tvur', 'thumbnail': r're:https://video.bsky.app/watch/.*\.jpg$', 'title': 'Bluesky now has video! Update your app to version 1.91 or refresh on ...', 'alt_title': 'Bluesky video feature announcement', 'description': r're:(?s)Bluesky now has video! .{239}', 'upload_date': '20240911', 'timestamp': 1726074716, 'like_count': int, 'repost_count': int, 'comment_count': int, 'tags': [], 'subtitles': { 'en': 'mincount:1', }, }, }, { 'url': 'https://main.bsky.dev/profile/souris.moe/post/3l4qhp7bcs52c', 'md5': '5f2df8c200b5633eb7fb2c984d29772f', 'info_dict': { 'id': '3l4qhp7bcs52c', 'ext': 'mp4', 'uploader': 'souris', 'uploader_id': 'souris.moe', 'uploader_url': 'https://bsky.app/profile/souris.moe', 'channel_id': 'did:plc:tj7g244gl5v6ai6cm4f4wlqp', 'channel_url': 'https://bsky.app/profile/did:plc:tj7g244gl5v6ai6cm4f4wlqp', 'thumbnail': r're:https://video.bsky.app/watch/.*\.jpg$', 'title': 'Bluesky video #3l4qhp7bcs52c', 'upload_date': '20240922', 'timestamp': 1727003838, 'like_count': int, 'repost_count': int, 'comment_count': int, 'tags': [], }, }, { 'url': 'https://bsky.app/profile/de1.pds.tentacle.expert/post/3l3w4tnezek2e', 'md5': 'cc0110ed1f6b0247caac8234cc1e861d', 'info_dict': { 'id': '3l3w4tnezek2e', 'ext': 'mp4', 'uploader': 'clean', 'uploader_id': 'de1.pds.tentacle.expert', 'uploader_url': 'https://bsky.app/profile/de1.pds.tentacle.expert', 'channel_id': 'did:web:de1.tentacle.expert', 'channel_url': 'https://bsky.app/profile/did:web:de1.tentacle.expert', 'thumbnail': r're:https://video.bsky.app/watch/.*\.jpg$', 'title': 'Bluesky video #3l3w4tnezek2e', 'upload_date': '20240911', 'timestamp': 1726098823, 'like_count': int, 'repost_count': int, 'comment_count': int, 'tags': [], }, }, { 'url': 'https://bsky.app/profile/yunayuispink.bsky.social/post/3l7gqcfes742o', 'info_dict': { 'id': 'XxK3t_5V3ao', 'ext': 'mp4', 'uploader': 'yunayu', 'uploader_id': '@yunayuispink', 'uploader_url': 'https://www.youtube.com/@yunayuispink', 'channel': 'yunayu', 'channel_id': 'UCPLvXnHa7lTyNoR_dGsU14w', 'channel_url': 'https://www.youtube.com/channel/UCPLvXnHa7lTyNoR_dGsU14w', 'thumbnail': 'https://i.ytimg.com/vi_webp/XxK3t_5V3ao/maxresdefault.webp', 'description': r're:Have a good goodx10000day', 'title': '5min vs 5hours drawing', 'availability': 'public', 'live_status': 'not_live', 'playable_in_embed': True, 'upload_date': '20241026', 'timestamp': 1729967784, 'duration': 321, 'age_limit': 0, 'like_count': int, 'view_count': int, 'comment_count': int, 'channel_follower_count': int, 'categories': ['Entertainment'], 'tags': [], 'chapters': list, 'heatmap': 'count:100', }, 'add_ie': ['Youtube'], }, { 'url': 'https://bsky.app/profile/endshark.bsky.social/post/3jzxjkcemae2m', 'info_dict': { 'id': '222792849', 'ext': 'mp3', 'uploader': 'LASERBAT', 'uploader_id': 'laserbatx', 'uploader_url': 'https://laserbatx.bandcamp.com', 'artists': ['LASERBAT'], 'album_artists': ['LASERBAT'], 'album': 'Hari Nezumi [EP]', 'track': 'Forward to the End', 'title': 'LASERBAT - Forward to the End', 'thumbnail': 'https://f4.bcbits.com/img/a2507705510_5.jpg', 'duration': 228.571, 'track_id': '222792849', 'release_date': '20230423', 'upload_date': '20230423', 'timestamp': 1682276040.0, 'release_timestamp': 1682276040.0, 'track_number': 1, }, 'add_ie': ['Bandcamp'], }, { 'url': 'https://bsky.app/profile/dannybhoix.bsky.social/post/3l6oe5mtr2c2j', 'md5': 'b9e344fdbce9f2852c668a97efefb105', 'info_dict': { 'id': '3l3vgf77uco2g', 'ext': 'mp4', 'uploader': 'Bluesky', 'uploader_id': 'bsky.app', 'uploader_url': 'https://bsky.app/profile/bsky.app', 'channel_id': 'did:plc:z72i7hdynmk6r22z27h6tvur', 'channel_url': 'https://bsky.app/profile/did:plc:z72i7hdynmk6r22z27h6tvur', 'thumbnail': r're:https://video.bsky.app/watch/.*\.jpg$', 'title': 'Bluesky now has video! Update your app to version 1.91 or refresh on ...', 'alt_title': 'Bluesky video feature announcement', 'description': r're:(?s)Bluesky now has video! .{239}', 'upload_date': '20240911', 'timestamp': 1726074716, 'like_count': int, 'repost_count': int, 'comment_count': int, 'tags': [], 'subtitles': { 'en': 'mincount:1', }, }, }, { 'url': 'https://bsky.app/profile/cinny.bun.how/post/3l7rdfxhyds2f', 'md5': '8775118b235cf9fa6b5ad30f95cda75c', 'info_dict': { 'id': '3l7rdfxhyds2f', 'ext': 'mp4', 'uploader': 'cinnamon 🐇 🏳️‍⚧️', 'uploader_id': 'cinny.bun.how', 'uploader_url': 'https://bsky.app/profile/cinny.bun.how', 'channel_id': 'did:plc:7x6rtuenkuvxq3zsvffp2ide', 'channel_url': 'https://bsky.app/profile/did:plc:7x6rtuenkuvxq3zsvffp2ide', 'thumbnail': r're:https://video.bsky.app/watch/.*\.jpg$', 'title': 'crazy that i look like this tbh', 'description': 'crazy that i look like this tbh', 'upload_date': '20241030', 'timestamp': 1730332128, 'like_count': int, 'repost_count': int, 'comment_count': int, 'tags': ['sexual'], 'age_limit': 18, }, }, { 'url': 'at://did:plc:ia76kvnndjutgedggx2ibrem/app.bsky.feed.post/3l6zrz6zyl2dr', 'md5': '71b0eb6d85d03145e6af6642c7fc6d78', 'info_dict': { 'id': '3l6zrz6zyl2dr', 'ext': 'mp4', 'uploader': 'mary🐇', 'uploader_id': 'mary.my.id', 'uploader_url': 'https://bsky.app/profile/mary.my.id', 'channel_id': 'did:plc:ia76kvnndjutgedggx2ibrem', 'channel_url': 'https://bsky.app/profile/did:plc:ia76kvnndjutgedggx2ibrem', 'thumbnail': r're:https://video.bsky.app/watch/.*\.jpg$', 'title': 'Bluesky video #3l6zrz6zyl2dr', 'upload_date': '20241021', 'timestamp': 1729523172, 'like_count': int, 'repost_count': int, 'comment_count': int, 'tags': [], }, }, { 'url': 'https://bsky.app/profile/purpleicetea.bsky.social/post/3l7gv55dc2o2w', 'info_dict': { 'id': '3l7gv55dc2o2w', }, 'playlist': [{ 'info_dict': { 'id': '3l7gv55dc2o2w', 'ext': 'mp4', 'upload_date': '20241026', 'description': 'One of my favorite videos', 'comment_count': int, 'uploader_url': 'https://bsky.app/profile/purpleicetea.bsky.social', 'uploader': 'Purple.Ice.Tea', 'thumbnail': r're:https://video.bsky.app/watch/.*\.jpg$', 'channel_url': 'https://bsky.app/profile/did:plc:bjh5ffwya5f53dfy47dezuwx', 'like_count': int, 'channel_id': 'did:plc:bjh5ffwya5f53dfy47dezuwx', 'repost_count': int, 'timestamp': 1729973202, 'tags': [], 'uploader_id': 'purpleicetea.bsky.social', 'title': 'One of my favorite videos', }, }, { 'info_dict': { 'id': '3l77u64l7le2e', 'ext': 'mp4', 'title': "hearing people on twitter say that bluesky isn't funny yet so post t...", 'like_count': int, 'uploader_id': 'thafnine.net', 'uploader_url': 'https://bsky.app/profile/thafnine.net', 'upload_date': '20241024', 'channel_url': 'https://bsky.app/profile/did:plc:6ttyq36rhiyed7wu3ws7dmqj', 'description': r're:(?s)hearing people on twitter say that bluesky .{93}', 'tags': [], 'alt_title': 'md5:9b1ee1937fb3d1a81e932f9ec14d560e', 'uploader': 'T9', 'channel_id': 'did:plc:6ttyq36rhiyed7wu3ws7dmqj', 'thumbnail': r're:https://video.bsky.app/watch/.*\.jpg$', 'timestamp': 1729731642, 'comment_count': int, 'repost_count': int, }, }], }] _BLOB_URL_TMPL = '{}/xrpc/com.atproto.sync.getBlob' def _get_service_endpoint(self, did, video_id): if did.startswith('did:web:'): url = f'https://{did[8:]}/.well-known/did.json' else: url = f'https://plc.directory/{did}' services = self._download_json( url, video_id, 'Fetching service endpoint', 'Falling back to bsky.social', fatal=False) return traverse_obj( services, ('service', lambda _, x: x['type'] == 'AtprotoPersonalDataServer', 'serviceEndpoint', {url_or_none}, any)) or 'https://bsky.social' def _extract_post(self, handle, post_id): return self._download_json( 'https://public.api.bsky.app/xrpc/app.bsky.feed.getPostThread', post_id, query={ 'uri': f'at://{handle}/app.bsky.feed.post/{post_id}', 'depth': 0, 'parentHeight': 0, })['thread']['post'] def _real_extract(self, url): handle, video_id = self._match_valid_url(url).group('handle', 'id') post = self._extract_post(handle, video_id) entries = [] # app.bsky.embed.video.view/app.bsky.embed.external.view entries.extend(self._extract_videos(post, video_id)) # app.bsky.embed.recordWithMedia.view entries.extend(self._extract_videos( post, video_id, embed_path=('embed', 'media'), record_subpath=('embed', 'media'))) # app.bsky.embed.record.view if nested_post := traverse_obj(post, ('embed', 'record', ('record', None), {dict}, any)): entries.extend(self._extract_videos( nested_post, video_id, embed_path=('embeds', 0), record_path='value')) if not entries: raise ExtractorError('No video could be found in this post', expected=True) if len(entries) == 1: return entries[0] return self.playlist_result(entries, video_id) @staticmethod def _build_profile_url(path): return format_field(path, None, 'https://bsky.app/profile/%s', default=None) def _extract_videos(self, root, video_id, embed_path='embed', record_path='record', record_subpath='embed'): embed_path = variadic(embed_path, (str, bytes, dict, set)) record_path = variadic(record_path, (str, bytes, dict, set)) record_subpath = variadic(record_subpath, (str, bytes, dict, set)) entries = [] if external_uri := traverse_obj(root, ( ((*record_path, *record_subpath), embed_path), 'external', 'uri', {url_or_none}, any)): entries.append(self.url_result(external_uri)) if playlist := traverse_obj(root, (*embed_path, 'playlist', {url_or_none})): formats, subtitles = self._extract_m3u8_formats_and_subtitles( playlist, video_id, 'mp4', m3u8_id='hls', fatal=False) else: return entries video_cid = traverse_obj( root, (*embed_path, 'cid', {str}), (*record_path, *record_subpath, 'video', 'ref', '$link', {str})) did = traverse_obj(root, ('author', 'did', {str})) if did and video_cid: endpoint = self._get_service_endpoint(did, video_id) formats.append({ 'format_id': 'blob', 'quality': 1, 'url': update_url_query( self._BLOB_URL_TMPL.format(endpoint), {'did': did, 'cid': video_cid}), **traverse_obj(root, (*embed_path, 'aspectRatio', { 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), })), **traverse_obj(root, (*record_path, *record_subpath, 'video', { 'filesize': ('size', {int_or_none}), 'ext': ('mimeType', {mimetype2ext}), })), }) for sub_data in traverse_obj(root, ( *record_path, *record_subpath, 'captions', lambda _, v: v['file']['ref']['$link'])): subtitles.setdefault(sub_data.get('lang') or 'und', []).append({ 'url': update_url_query( self._BLOB_URL_TMPL.format(endpoint), {'did': did, 'cid': sub_data['file']['ref']['$link']}), 'ext': traverse_obj(sub_data, ('file', 'mimeType', {mimetype2ext})), }) entries.append({ 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(root, { 'id': ('uri', {url_basename}), 'thumbnail': (*embed_path, 'thumbnail', {url_or_none}), 'alt_title': (*embed_path, 'alt', {str}, filter), 'uploader': ('author', 'displayName', {str}), 'uploader_id': ('author', 'handle', {str}), 'uploader_url': ('author', 'handle', {self._build_profile_url}), 'channel_id': ('author', 'did', {str}), 'channel_url': ('author', 'did', {self._build_profile_url}), 'like_count': ('likeCount', {int_or_none}), 'repost_count': ('repostCount', {int_or_none}), 'comment_count': ('replyCount', {int_or_none}), 'timestamp': ('indexedAt', {parse_iso8601}), 'tags': ('labels', ..., 'val', {str}, all, {orderedSet}), 'age_limit': ( 'labels', ..., 'val', {lambda x: 18 if x in ('sexual', 'porn', 'graphic-media') else None}, any), 'description': (*record_path, 'text', {str}, filter), 'title': (*record_path, 'text', {lambda x: x.replace('\n', ' ')}, {truncate_string(left=72)}), }), }) return entries
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/pornbox.py
yt_dlp/extractor/pornbox.py
from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, parse_iso8601, qualities, str_or_none, traverse_obj, url_or_none, ) class PornboxIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?pornbox\.com/application/watch-page/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://pornbox.com/application/watch-page/212108', 'md5': '3ff6b6e206f263be4c5e987a3162ac6e', 'info_dict': { 'id': '212108', 'ext': 'mp4', 'title': 'md5:ececc5c6e6c9dd35d290c45fed05fd49', 'uploader': 'Lily Strong', 'timestamp': 1665871200, 'upload_date': '20221015', 'age_limit': 18, 'availability': 'needs_auth', 'duration': 1505, 'cast': ['Lily Strong', 'John Strong'], 'tags': 'count:11', 'description': 'md5:589c7f33e183aa8aa939537300efb859', 'thumbnail': r're:^https?://cdn-image\.gtflixtv\.com.*\.jpg.*$', }, }, { 'url': 'https://pornbox.com/application/watch-page/216045', 'info_dict': { 'id': '216045', 'title': 'md5:3e48528e73a9a2b12f7a2772ed0b26a2', 'description': 'md5:3e631dcaac029f15ed434e402d1b06c7', 'uploader': 'VK Studio', 'timestamp': 1618264800, 'upload_date': '20210412', 'age_limit': 18, 'availability': 'premium_only', 'duration': 2710, 'cast': 'count:3', 'tags': 'count:29', 'thumbnail': r're:^https?://cdn-image\.gtflixtv\.com.*\.jpg.*$', 'subtitles': 'count:6', }, 'params': { 'skip_download': True, 'ignore_no_formats_error': True, }, 'expected_warnings': [ 'You are either not logged in or do not have access to this scene', 'No video formats found', 'Requested format is not available'], }] def _real_extract(self, url): video_id = self._match_id(url) public_data = self._download_json(f'https://pornbox.com/contents/{video_id}', video_id) subtitles = {country_code: [{ 'url': f'https://pornbox.com/contents/{video_id}/subtitles/{country_code}', 'ext': 'srt', }] for country_code in traverse_obj(public_data, ('subtitles', ..., {str}))} is_free_scene = traverse_obj( public_data, ('price', 'is_available_for_free', {bool}), default=False) metadata = { 'id': video_id, **traverse_obj(public_data, { 'title': ('scene_name', {str.strip}), 'description': ('small_description', {str.strip}), 'uploader': 'studio', 'duration': ('runtime', {parse_duration}), 'cast': (('models', 'male_models'), ..., 'model_name'), 'thumbnail': ('player_poster', {url_or_none}), 'tags': ('niches', ..., 'niche'), }), 'age_limit': 18, 'timestamp': parse_iso8601(traverse_obj( public_data, ('studios', 'release_date'), 'publish_date')), 'availability': self._availability(needs_auth=True, needs_premium=not is_free_scene), 'subtitles': subtitles, } if not public_data.get('is_purchased') or not is_free_scene: self.raise_login_required( 'You are either not logged in or do not have access to this scene', metadata_available=True) return metadata media_id = traverse_obj(public_data, ( 'medias', lambda _, v: v['title'] == 'Full video', 'media_id', {int}), get_all=False) if not media_id: self.raise_no_formats('Could not find stream id', video_id=video_id) stream_data = self._download_json( f'https://pornbox.com/media/{media_id}/stream', video_id=video_id, note='Getting manifest urls') get_quality = qualities(['web', 'vga', 'hd', '1080p', '4k', '8k']) metadata['formats'] = traverse_obj(stream_data, ('qualities', lambda _, v: v['src'], { 'url': 'src', 'vbr': ('bitrate', {int_or_none(scale=1000)}), 'format_id': ('quality', {str_or_none}), 'quality': ('quality', {get_quality}), 'width': ('size', {lambda x: int(x[:-1])}), })) return metadata
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/periscope.py
yt_dlp/extractor/periscope.py
from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, unescapeHTML, ) from ..utils.traversal import traverse_obj class PeriscopeBaseIE(InfoExtractor): _M3U8_HEADERS = { 'Referer': 'https://www.periscope.tv/', } def _call_api(self, method, query, item_id): return self._download_json( f'https://api.periscope.tv/api/v2/{method}', item_id, query=query) def _parse_broadcast_data(self, broadcast, video_id): title = broadcast.get('status') or 'Periscope Broadcast' uploader = broadcast.get('user_display_name') or broadcast.get('username') title = f'{uploader} - {title}' if uploader else title thumbnails = [{ 'url': broadcast[image], } for image in ('image_url', 'image_url_medium', 'image_url_small') if broadcast.get(image)] return { 'id': broadcast.get('id') or video_id, 'title': title, 'timestamp': parse_iso8601(broadcast.get('created_at')) or int_or_none( broadcast.get('created_at_ms'), scale=1000), 'release_timestamp': int_or_none(broadcast.get('scheduled_start_ms'), scale=1000), 'uploader': uploader, 'uploader_id': broadcast.get('user_id') or broadcast.get('username'), 'thumbnails': thumbnails, 'view_count': int_or_none(broadcast.get('total_watched')), 'concurrent_view_count': int_or_none(broadcast.get('total_watching')), 'tags': broadcast.get('tags'), 'live_status': { 'running': 'is_live', 'not_started': 'is_upcoming', }.get(traverse_obj(broadcast, ('state', {str.lower}))) or 'was_live', } @staticmethod def _extract_common_format_info(broadcast): return broadcast.get('state').lower(), int_or_none(broadcast.get('width')), int_or_none(broadcast.get('height')) @staticmethod def _add_width_and_height(f, width, height): for key, val in (('width', width), ('height', height)): if not f.get(key): f[key] = val def _extract_pscp_m3u8_formats(self, m3u8_url, video_id, format_id, state, width, height, fatal=True): m3u8_formats = self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native' if state in ('ended', 'timed_out') else 'm3u8', m3u8_id=format_id, fatal=fatal, headers=self._M3U8_HEADERS) if len(m3u8_formats) == 1: self._add_width_and_height(m3u8_formats[0], width, height) for f in m3u8_formats: f.setdefault('http_headers', {}).update(self._M3U8_HEADERS) return m3u8_formats class PeriscopeIE(PeriscopeBaseIE): IE_DESC = 'Periscope' IE_NAME = 'periscope' _VALID_URL = r'https?://(?:www\.)?(?:periscope|pscp)\.tv/[^/]+/(?P<id>[^/?#]+)' _EMBED_REGEX = [r'<iframe[^>]+src=([\'"])(?P<url>(?:https?:)?//(?:www\.)?(?:periscope|pscp)\.tv/(?:(?!\1).)+)\1'] # Alive example URLs can be found here https://www.periscope.tv/ _TESTS = [{ 'url': 'https://www.periscope.tv/w/aJUQnjY3MjA3ODF8NTYxMDIyMDl2zCg2pECBgwTqRpQuQD352EMPTKQjT4uqlM3cgWFA-g==', 'md5': '65b57957972e503fcbbaeed8f4fa04ca', 'info_dict': { 'id': '56102209', 'ext': 'mp4', 'title': 'Bec Boop - 🚠✈️🇬🇧 Fly above #London in Emirates Air Line cable car at night 🇬🇧✈️🚠 #BoopScope 🎀💗', 'timestamp': 1438978559, 'upload_date': '20150807', 'uploader': 'Bec Boop', 'uploader_id': '1465763', }, 'skip': 'Expires in 24 hours', }, { 'url': 'https://www.periscope.tv/w/1ZkKzPbMVggJv', 'only_matching': True, }, { 'url': 'https://www.periscope.tv/bastaakanoggano/1OdKrlkZZjOJX', 'only_matching': True, }, { 'url': 'https://www.periscope.tv/w/1ZkKzPbMVggJv', 'only_matching': True, }] def _real_extract(self, url): token = self._match_id(url) stream = self._call_api( 'accessVideoPublic', {'broadcast_id': token}, token) broadcast = stream['broadcast'] info = self._parse_broadcast_data(broadcast, token) state = broadcast.get('state').lower() width = int_or_none(broadcast.get('width')) height = int_or_none(broadcast.get('height')) def add_width_and_height(f): for key, val in (('width', width), ('height', height)): if not f.get(key): f[key] = val video_urls = set() formats = [] for format_id in ('replay', 'rtmp', 'hls', 'https_hls', 'lhls', 'lhlsweb'): video_url = stream.get(format_id + '_url') if not video_url or video_url in video_urls: continue video_urls.add(video_url) if format_id != 'rtmp': m3u8_formats = self._extract_pscp_m3u8_formats( video_url, token, format_id, state, width, height, False) formats.extend(m3u8_formats) continue rtmp_format = { 'url': video_url, 'ext': 'flv' if format_id == 'rtmp' else 'mp4', } self._add_width_and_height(rtmp_format) formats.append(rtmp_format) info['formats'] = formats return info class PeriscopeUserIE(PeriscopeBaseIE): _VALID_URL = r'https?://(?:www\.)?(?:periscope|pscp)\.tv/(?P<id>[^/]+)/?$' IE_DESC = 'Periscope user videos' IE_NAME = 'periscope:user' _TEST = { 'url': 'https://www.periscope.tv/LularoeHusbandMike/', 'info_dict': { 'id': 'LularoeHusbandMike', 'title': 'LULAROE HUSBAND MIKE', 'description': 'md5:6cf4ec8047768098da58e446e82c82f0', }, # Periscope only shows videos in the last 24 hours, so it's possible to # get 0 videos 'playlist_mincount': 0, } def _real_extract(self, url): user_name = self._match_id(url) webpage = self._download_webpage(url, user_name) data_store = self._parse_json( unescapeHTML(self._search_regex( r'data-store=(["\'])(?P<data>.+?)\1', webpage, 'data store', default='{}', group='data')), user_name) user = next(iter(data_store['UserCache']['users'].values()))['user'] user_id = user['id'] session_id = data_store['SessionToken']['public']['broadcastHistory']['token']['session_id'] broadcasts = self._call_api( 'getUserBroadcastsPublic', {'user_id': user_id, 'session_id': session_id}, user_name)['broadcasts'] broadcast_ids = [ broadcast['id'] for broadcast in broadcasts if broadcast.get('id')] title = user.get('display_name') or user.get('username') or user_name description = user.get('description') entries = [ self.url_result( f'https://www.periscope.tv/{user_name}/{broadcast_id}') for broadcast_id in broadcast_ids] return self.playlist_result(entries, user_id, title, description)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sendtonews.py
yt_dlp/extractor/sendtonews.py
import re from .common import InfoExtractor from ..utils import ( determine_protocol, float_or_none, int_or_none, parse_iso8601, unescapeHTML, update_url_query, ) class SendtoNewsIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://embed\.sendtonews\.com/player2/embedplayer\.php\?.*\bSC=(?P<id>[0-9A-Za-z-]+)' _TEST = { # From http://cleveland.cbslocal.com/2016/05/16/indians-score-season-high-15-runs-in-blowout-win-over-reds-rapid-reaction/ 'url': 'http://embed.sendtonews.com/player2/embedplayer.php?SC=GxfCe0Zo7D-175909-5588&type=single&autoplay=on&sound=YES', 'info_dict': { 'id': 'GxfCe0Zo7D-175909-5588', }, 'playlist_count': 8, # test the first video only to prevent lengthy tests 'playlist': [{ 'info_dict': { 'id': '240385', 'ext': 'mp4', 'title': 'Indians introduce Encarnacion', 'description': 'Indians president of baseball operations Chris Antonetti and Edwin Encarnacion discuss the slugger\'s three-year contract with Cleveland', 'duration': 137.898, 'thumbnail': r're:https?://.*\.jpg$', 'upload_date': '20170105', 'timestamp': 1483649762, }, }], 'params': { # m3u8 download 'skip_download': True, }, } _URL_TEMPLATE = '//embed.sendtonews.com/player2/embedplayer.php?SC=%s' @classmethod def _extract_embed_urls(cls, url, webpage): mobj = re.search(r'''(?x)<script[^>]+src=([\'"]) (?:https?:)?//embed\.sendtonews\.com/player/responsiveembed\.php\? .*\bSC=(?P<SC>[0-9a-zA-Z-]+).* \1>''', webpage) if mobj: sc = mobj.group('SC') yield cls._URL_TEMPLATE % sc def _real_extract(self, url): playlist_id = self._match_id(url) data_url = update_url_query( url.replace('embedplayer.php', 'data_read.php'), {'cmd': 'loadInitial'}) playlist_data = self._download_json(data_url, playlist_id) entries = [] for video in playlist_data['playlistData'][0]: info_dict = self._parse_jwplayer_data( video['jwconfiguration'], require_title=False, m3u8_id='hls', rtmp_params={'no_resume': True}) for f in info_dict['formats']: if f.get('tbr'): continue tbr = int_or_none(self._search_regex( r'/(\d+)k/', f['url'], 'bitrate', default=None)) if not tbr: continue f.update({ 'format_id': f'{determine_protocol(f)}-{tbr}', 'tbr': tbr, }) thumbnails = [] if video.get('thumbnailUrl'): thumbnails.append({ 'id': 'normal', 'url': video['thumbnailUrl'], }) if video.get('smThumbnailUrl'): thumbnails.append({ 'id': 'small', 'url': video['smThumbnailUrl'], }) info_dict.update({ 'title': video['S_headLine'].strip(), 'description': unescapeHTML(video.get('S_fullStory')), 'thumbnails': thumbnails, 'duration': float_or_none(video.get('SM_length')), 'timestamp': parse_iso8601(video.get('S_sysDate'), delimiter=' '), # 'tbr' was explicitly set to be preferred over 'height' originally, # So this is being kept unless someone can confirm this is unnecessary '_format_sort_fields': ('tbr', 'res'), }) entries.append(info_dict) return self.playlist_result(entries, playlist_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/freesound.py
yt_dlp/extractor/freesound.py
import re from .common import InfoExtractor from ..utils import ( float_or_none, get_element_by_class, get_element_by_id, unified_strdate, ) class FreesoundIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?freesound\.org/people/[^/]+/sounds/(?P<id>[^/]+)' _TEST = { 'url': 'http://www.freesound.org/people/miklovan/sounds/194503/', 'md5': '12280ceb42c81f19a515c745eae07650', 'info_dict': { 'id': '194503', 'ext': 'mp3', 'title': 'gulls in the city.wav', 'description': 'the sounds of seagulls in the city', 'duration': 130.233, 'uploader': 'miklovan', 'upload_date': '20130715', 'tags': list, }, } def _real_extract(self, url): audio_id = self._match_id(url) webpage = self._download_webpage(url, audio_id) audio_url = self._og_search_property('audio', webpage, 'song url') title = self._og_search_property('audio:title', webpage, 'song title') description = self._html_search_regex( r'(?s)id=["\']sound_description["\'][^>]*>(.+?)</div>', webpage, 'description', fatal=False) duration = float_or_none( get_element_by_class('duration', webpage), scale=1000) upload_date = unified_strdate(get_element_by_id('sound_date', webpage)) uploader = self._og_search_property( 'audio:artist', webpage, 'uploader', fatal=False) channels = self._html_search_regex( r'Channels</dt><dd>(.+?)</dd>', webpage, 'channels info', fatal=False) tags_str = get_element_by_class('tags', webpage) tags = re.findall(r'<a[^>]+>([^<]+)', tags_str) if tags_str else None audio_url = re.sub(r'^https?://freesound\.org(https?://)', r'\1', audio_url) audio_urls = [audio_url] LQ_FORMAT = '-lq.mp3' if LQ_FORMAT in audio_url: audio_urls.append(audio_url.replace(LQ_FORMAT, '-hq.mp3')) formats = [{ 'url': format_url, 'format_note': channels, 'quality': quality, } for quality, format_url in enumerate(audio_urls)] return { 'id': audio_id, 'title': title, 'description': description, 'duration': duration, 'uploader': uploader, 'upload_date': upload_date, 'tags': tags, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rtnews.py
yt_dlp/extractor/rtnews.py
import re from .common import InfoExtractor from ..utils import js_to_json class RTNewsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rt\.com/[^/]+/(?:[^/]+/)?(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.rt.com/sport/546301-djokovic-arrives-belgrade-crowds/', 'playlist_mincount': 2, 'info_dict': { 'id': '546301', 'title': 'Crowds gather to greet deported Djokovic as he returns to Serbia (VIDEO)', 'description': 'md5:1d5bfe1a988d81fd74227cfdf93d314d', 'thumbnail': 'https://cdni.rt.com/files/2022.01/article/61e587a085f540102c3386c1.png', }, }, { 'url': 'https://www.rt.com/shows/in-question/535980-plot-to-assassinate-julian-assange/', 'playlist_mincount': 1, 'info_dict': { 'id': '535980', 'title': 'The plot to assassinate Julian Assange', 'description': 'md5:55279ce5e4441dc1d16e2e4a730152cd', 'thumbnail': 'https://cdni.rt.com/files/2021.09/article/615226f42030274e8879b53d.png', }, 'playlist': [{ 'info_dict': { 'id': '6152271d85f5400464496162', 'ext': 'mp4', 'title': '6152271d85f5400464496162', }, }], }] def _entries(self, webpage): video_urls = set(re.findall(r'https://cdnv\.rt\.com/.*[a-f0-9]+\.mp4', webpage)) for v_url in video_urls: v_id = re.search(r'([a-f0-9]+)\.mp4', v_url).group(1) if v_id: yield { 'id': v_id, 'title': v_id, 'url': v_url, } def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) return { '_type': 'playlist', 'id': playlist_id, 'entries': self._entries(webpage), 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), } class RTDocumentryIE(InfoExtractor): _VALID_URL = r'https?://rtd\.rt\.com/(?:(?:series|shows)/[^/]+|films)/(?P<id>[^/?$&#]+)' _TESTS = [{ 'url': 'https://rtd.rt.com/films/escobars-hitman/', 'info_dict': { 'id': 'escobars-hitman', 'ext': 'mp4', 'title': "Escobar's Hitman. Former drug-gang killer, now loved and loathed in Colombia", 'description': 'md5:647c76984b7cb9a8b52a567e87448d88', 'thumbnail': 'https://cdni.rt.com/rtd-files/films/escobars-hitman/escobars-hitman_11.jpg', 'average_rating': 8.53, 'duration': 3134.0, }, 'params': {'skip_download': True}, }, { 'url': 'https://rtd.rt.com/shows/the-kalashnikova-show-military-secrets-anna-knishenko/iskander-tactical-system-natos-headache/', 'info_dict': { 'id': 'iskander-tactical-system-natos-headache', 'ext': 'mp4', 'title': "Iskander tactical system. NATO's headache | The Kalashnikova Show. Episode 10", 'description': 'md5:da7c24a0aa67bc2bb88c86658508ca87', 'thumbnail': 'md5:89de8ce38c710b7c501ff02d47e2aa89', 'average_rating': 9.27, 'duration': 274.0, 'timestamp': 1605726000, 'view_count': int, 'upload_date': '20201118', }, 'params': {'skip_download': True}, }, { 'url': 'https://rtd.rt.com/series/i-am-hacked-trailer/introduction-to-safe-digital-life-ep2/', 'info_dict': { 'id': 'introduction-to-safe-digital-life-ep2', 'ext': 'mp4', 'title': 'How to Keep your Money away from Hackers | I am Hacked. Episode 2', 'description': 'md5:c46fa9a5af86c0008c45a3940a8cce87', 'thumbnail': 'md5:a5e81b9bf5aed8f5e23d9c053601b825', 'average_rating': 10.0, 'duration': 1524.0, 'timestamp': 1636977600, 'view_count': int, 'upload_date': '20211115', }, 'params': {'skip_download': True}, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) ld_json = self._search_json_ld(webpage, None, fatal=False) if not ld_json: self.raise_no_formats('No video/audio found at the provided url.', expected=True) media_json = self._parse_json( self._search_regex(r'(?s)\'Med\'\s*:\s*\[\s*({.+})\s*\]\s*};', webpage, 'media info'), video_id, transform_source=js_to_json) if 'title' not in ld_json and 'title' in media_json: ld_json['title'] = media_json['title'] formats = [{'url': src['file']} for src in media_json.get('sources') or [] if src.get('file')] return { 'id': video_id, 'thumbnail': media_json.get('image'), 'formats': formats, **ld_json, } class RTDocumentryPlaylistIE(InfoExtractor): _VALID_URL = r'https?://rtd\.rt\.com/(?:series|shows)/(?P<id>[^/]+)/$' _TESTS = [{ 'url': 'https://rtd.rt.com/series/i-am-hacked-trailer/', 'playlist_mincount': 6, 'info_dict': { 'id': 'i-am-hacked-trailer', }, }, { 'url': 'https://rtd.rt.com/shows/the-kalashnikova-show-military-secrets-anna-knishenko/', 'playlist_mincount': 34, 'info_dict': { 'id': 'the-kalashnikova-show-military-secrets-anna-knishenko', }, }] def _entries(self, webpage, playlist_id): video_urls = set(re.findall(r'list-2__link\s*"\s*href="([^"]+)"', webpage)) for v_url in video_urls: if playlist_id not in v_url: continue yield self.url_result( f'https://rtd.rt.com{v_url}', ie=RTDocumentryIE.ie_key()) def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) return { '_type': 'playlist', 'id': playlist_id, 'entries': self._entries(webpage, playlist_id), } class RuptlyIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ruptly\.tv/[a-z]{2}/videos/(?P<id>\d+-\d+)' _TESTS = [{ 'url': 'https://www.ruptly.tv/en/videos/20220112-020-Japan-Double-trouble-Tokyo-zoo-presents-adorable-panda-twins', 'info_dict': { 'id': '20220112-020', 'ext': 'mp4', 'title': 'Japan: Double trouble! Tokyo zoo presents adorable panda twins | Video Ruptly', 'description': 'md5:85a8da5fdb31486f0562daf4360ce75a', 'thumbnail': 'https://storage.ruptly.tv/thumbnails/20220112-020/i6JQKnTNpYuqaXsR/i6JQKnTNpYuqaXsR.jpg', }, 'params': {'skip_download': True}, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) m3u8_url = self._search_regex(r'preview_url"\s?:\s?"(https?://storage\.ruptly\.tv/video_projects/.+\.m3u8)"', webpage, 'm3u8 url', fatal=False) if not m3u8_url: self.raise_no_formats('No video/audio found at the provided url.', expected=True) formats, subs = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id, ext='mp4') return { 'id': video_id, 'formats': formats, 'subtitles': subs, 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/iqiyi.py
yt_dlp/extractor/iqiyi.py
import hashlib import itertools import re import time import urllib.parse from .common import InfoExtractor from .openload import PhantomJSwrapper from ..utils import ( ExtractorError, clean_html, float_or_none, format_field, get_element_by_attribute, get_element_by_id, int_or_none, js_to_json, parse_age_limit, parse_duration, parse_iso8601, parse_resolution, qualities, remove_start, str_or_none, traverse_obj, urljoin, ) def md5_text(text): return hashlib.md5(text.encode()).hexdigest() class IqiyiIE(InfoExtractor): IE_NAME = 'iqiyi' IE_DESC = '爱奇艺' _VALID_URL = r'https?://(?:(?:[^.]+\.)?iqiyi\.com|www\.pps\.tv)/.+\.html' _TESTS = [{ 'url': 'http://www.iqiyi.com/v_19rrojlavg.html', # MD5 checksum differs on my machine and Travis CI 'info_dict': { 'id': '9c1fb1b99d192b21c559e5a1a2cb3c73', 'ext': 'mp4', 'title': '美国德州空中惊现奇异云团 酷似UFO', }, }, { 'url': 'http://www.iqiyi.com/v_19rrhnnclk.html', 'md5': 'b7dc800a4004b1b57749d9abae0472da', 'info_dict': { 'id': 'e3f585b550a280af23c98b6cb2be19fb', 'ext': 'mp4', # This can be either Simplified Chinese or Traditional Chinese 'title': r're:^(?:名侦探柯南 国语版:第752集 迫近灰原秘密的黑影 下篇|名偵探柯南 國語版:第752集 迫近灰原秘密的黑影 下篇)$', }, 'skip': 'Geo-restricted to China', }, { 'url': 'http://www.iqiyi.com/w_19rt6o8t9p.html', 'only_matching': True, }, { 'url': 'http://www.iqiyi.com/a_19rrhbc6kt.html', 'only_matching': True, }, { 'url': 'http://yule.iqiyi.com/pcb.html', 'info_dict': { 'id': '4a0af228fddb55ec96398a364248ed7f', 'ext': 'mp4', 'title': '第2017-04-21期 女艺人频遭极端粉丝骚扰', }, }, { # VIP-only video. The first 2 parts (6 minutes) are available without login # MD5 sums omitted as values are different on Travis CI and my machine 'url': 'http://www.iqiyi.com/v_19rrny4w8w.html', 'info_dict': { 'id': 'f3cf468b39dddb30d676f89a91200dc1', 'ext': 'mp4', 'title': '泰坦尼克号', }, 'skip': 'Geo-restricted to China', }, { 'url': 'http://www.iqiyi.com/a_19rrhb8ce1.html', 'info_dict': { 'id': '202918101', 'title': '灌篮高手 国语版', }, 'playlist_count': 101, }, { 'url': 'http://www.pps.tv/w_19rrbav0ph.html', 'only_matching': True, }] _FORMATS_MAP = { '96': 1, # 216p, 240p '1': 2, # 336p, 360p '2': 3, # 480p, 504p '21': 4, # 504p '4': 5, # 720p '17': 5, # 720p '5': 6, # 1072p, 1080p '18': 7, # 1080p } def get_raw_data(self, tvid, video_id): tm = int(time.time() * 1000) key = 'd5fb4bd9d50c4be6948c97edd7254b0e' sc = md5_text(str(tm) + key + tvid) params = { 'tvid': tvid, 'vid': video_id, 'src': '76f90cbd92f94a2e925d83e8ccd22cb7', 'sc': sc, 't': tm, } return self._download_json( f'http://cache.m.iqiyi.com/jp/tmts/{tvid}/{video_id}/', video_id, transform_source=lambda s: remove_start(s, 'var tvInfoJs='), query=params, headers=self.geo_verification_headers()) def _extract_playlist(self, webpage): PAGE_SIZE = 50 links = re.findall( r'<a[^>]+class="site-piclist_pic_link"[^>]+href="(http://www\.iqiyi\.com/.+\.html)"', webpage) if not links: return album_id = self._search_regex( r'albumId\s*:\s*(\d+),', webpage, 'album ID') album_title = self._search_regex( r'data-share-title="([^"]+)"', webpage, 'album title', fatal=False) entries = list(map(self.url_result, links)) # Start from 2 because links in the first page are already on webpage for page_num in itertools.count(2): pagelist_page = self._download_webpage( f'http://cache.video.qiyi.com/jp/avlist/{album_id}/{page_num}/{PAGE_SIZE}/', album_id, note=f'Download playlist page {page_num}', errnote=f'Failed to download playlist page {page_num}') pagelist = self._parse_json( remove_start(pagelist_page, 'var tvInfoJs='), album_id) vlist = pagelist['data']['vlist'] for item in vlist: entries.append(self.url_result(item['vurl'])) if len(vlist) < PAGE_SIZE: break return self.playlist_result(entries, album_id, album_title) def _real_extract(self, url): webpage = self._download_webpage( url, 'temp_id', note='download video page') # There's no simple way to determine whether an URL is a playlist or not # Sometimes there are playlist links in individual videos, so treat it # as a single video first tvid = self._search_regex( r'data-(?:player|shareplattrigger)-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid', default=None) if tvid is None: playlist_result = self._extract_playlist(webpage) if playlist_result: return playlist_result raise ExtractorError('Can\'t find any video') video_id = self._search_regex( r'data-(?:player|shareplattrigger)-videoid\s*=\s*[\'"]([a-f\d]+)', webpage, 'video_id') formats = [] for _ in range(5): raw_data = self.get_raw_data(tvid, video_id) if raw_data['code'] != 'A00000': if raw_data['code'] == 'A00111': self.raise_geo_restricted() raise ExtractorError('Unable to load data. Error code: ' + raw_data['code']) data = raw_data['data'] for stream in data['vidl']: if 'm3utx' not in stream: continue vd = str(stream['vd']) formats.append({ 'url': stream['m3utx'], 'format_id': vd, 'ext': 'mp4', 'quality': self._FORMATS_MAP.get(vd, -1), 'protocol': 'm3u8_native', }) if formats: break self._sleep(5, video_id) title = (get_element_by_id('widget-videotitle', webpage) or clean_html(get_element_by_attribute('class', 'mod-play-tit', webpage)) or self._html_search_regex(r'<span[^>]+data-videochanged-title="word"[^>]*>([^<]+)</span>', webpage, 'title')) return { 'id': video_id, 'title': title, 'formats': formats, } class IqIE(InfoExtractor): IE_NAME = 'iq.com' IE_DESC = 'International version of iQiyi' _VALID_URL = r'https?://(?:www\.)?iq\.com/play/(?:[\w%-]*-)?(?P<id>\w+)' _TESTS = [{ 'url': 'https://www.iq.com/play/one-piece-episode-1000-1ma1i6ferf4', 'md5': '2d7caf6eeca8a32b407094b33b757d39', 'info_dict': { 'ext': 'mp4', 'id': '1ma1i6ferf4', 'title': '航海王 第1000集', 'description': 'Subtitle available on Sunday 4PM(GMT+8).', 'duration': 1430, 'timestamp': 1637488203, 'upload_date': '20211121', 'episode_number': 1000, 'episode': 'Episode 1000', 'series': 'One Piece', 'age_limit': 13, 'average_rating': float, }, 'params': { 'format': '500', }, 'expected_warnings': ['format is restricted'], }, { # VIP-restricted video 'url': 'https://www.iq.com/play/mermaid-in-the-fog-2021-gbdpx13bs4', 'only_matching': True, }] _BID_TAGS = { '100': '240P', '200': '360P', '300': '480P', '500': '720P', '600': '1080P', '610': '1080P50', '700': '2K', '800': '4K', } _LID_TAGS = { '1': 'zh_CN', '2': 'zh_TW', '3': 'en', '4': 'ko', '5': 'ja', '18': 'th', '21': 'my', '23': 'vi', '24': 'id', '26': 'es', '27': 'pt', '28': 'ar', } _DASH_JS = ''' console.log(page.evaluate(function() { var tvid = "%(tvid)s"; var vid = "%(vid)s"; var src = "%(src)s"; var uid = "%(uid)s"; var dfp = "%(dfp)s"; var mode = "%(mode)s"; var lang = "%(lang)s"; var bid_list = %(bid_list)s; var ut_list = %(ut_list)s; var tm = new Date().getTime(); var cmd5x_func = %(cmd5x_func)s; var cmd5x_exporter = {}; cmd5x_func({}, cmd5x_exporter, {}); var cmd5x = cmd5x_exporter.cmd5x; var authKey = cmd5x(cmd5x('') + tm + '' + tvid); var k_uid = Array.apply(null, Array(32)).map(function() {return Math.floor(Math.random() * 15).toString(16)}).join(''); var dash_paths = {}; bid_list.forEach(function(bid) { var query = { 'tvid': tvid, 'bid': bid, 'ds': 1, 'vid': vid, 'src': src, 'vt': 0, 'rs': 1, 'uid': uid, 'ori': 'pcw', 'ps': 1, 'k_uid': k_uid, 'pt': 0, 'd': 0, 's': '', 'lid': '', 'slid': 0, 'cf': '', 'ct': '', 'authKey': authKey, 'k_tag': 1, 'ost': 0, 'ppt': 0, 'dfp': dfp, 'prio': JSON.stringify({ 'ff': 'f4v', 'code': 2 }), 'k_err_retries': 0, 'up': '', 'su': 2, 'applang': lang, 'sver': 2, 'X-USER-MODE': mode, 'qd_v': 2, 'tm': tm, 'qdy': 'a', 'qds': 0, 'k_ft1': '143486267424900', 'k_ft4': '1572868', 'k_ft7': '4', 'k_ft5': '1', 'bop': JSON.stringify({ 'version': '10.0', 'dfp': dfp }), }; var enc_params = []; for (var prop in query) { enc_params.push(encodeURIComponent(prop) + '=' + encodeURIComponent(query[prop])); } ut_list.forEach(function(ut) { enc_params.push('ut=' + ut); }) var dash_path = '/dash?' + enc_params.join('&'); dash_path += '&vf=' + cmd5x(dash_path); dash_paths[bid] = dash_path; }); return JSON.stringify(dash_paths); })); saveAndExit(); ''' def _extract_vms_player_js(self, webpage, video_id): player_js_cache = self.cache.load('iq', 'player_js') if player_js_cache: return player_js_cache webpack_js_url = self._proto_relative_url(self._search_regex( r'<script src="((?:https?:)?//stc\.iqiyipic\.com/_next/static/chunks/webpack-\w+\.js)"', webpage, 'webpack URL')) webpack_js = self._download_webpage(webpack_js_url, video_id, note='Downloading webpack JS', errnote='Unable to download webpack JS') webpack_map = self._search_json( r'["\']\s*\+\s*', webpack_js, 'JS locations', video_id, contains_pattern=r'{\s*(?:\d+\s*:\s*["\'][\da-f]+["\']\s*,?\s*)+}', end_pattern=r'\[\w+\]\+["\']\.js', transform_source=js_to_json) replacement_map = self._search_json( r'["\']\s*\+\(\s*', webpack_js, 'replacement map', video_id, contains_pattern=r'{\s*(?:\d+\s*:\s*["\'][\w.-]+["\']\s*,?\s*)+}', end_pattern=r'\[\w+\]\|\|\w+\)\+["\']\.', transform_source=js_to_json, fatal=False) or {} for module_index in reversed(webpack_map): real_module = replacement_map.get(module_index) or module_index module_js = self._download_webpage( f'https://stc.iqiyipic.com/_next/static/chunks/{real_module}.{webpack_map[module_index]}.js', video_id, note=f'Downloading #{module_index} module JS', errnote='Unable to download module JS', fatal=False) or '' if 'vms request' in module_js: self.cache.store('iq', 'player_js', module_js) return module_js raise ExtractorError('Unable to extract player JS') def _extract_cmd5x_function(self, webpage, video_id): return self._search_regex(r',\s*(function\s*\([^\)]*\)\s*{\s*var _qda.+_qdc\(\)\s*})\s*,', self._extract_vms_player_js(webpage, video_id), 'signature function') def _update_bid_tags(self, webpage, video_id): extracted_bid_tags = self._search_json( r'function\s*\([^)]*\)\s*\{\s*"use strict";?\s*var \w\s*=\s*', self._extract_vms_player_js(webpage, video_id), 'video tags', video_id, contains_pattern=r'{\s*\d+\s*:\s*\{\s*nbid\s*:.+}\s*}', end_pattern=r'\s*,\s*\w\s*=\s*\{\s*getNewVd', fatal=False, transform_source=js_to_json) if not extracted_bid_tags: return self._BID_TAGS = { bid: traverse_obj(extracted_bid_tags, (bid, 'value'), expected_type=str, default=self._BID_TAGS.get(bid)) for bid in extracted_bid_tags } def _get_cookie(self, name, default=None): cookie = self._get_cookies('https://iq.com/').get(name) return cookie.value if cookie else default def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) self._update_bid_tags(webpage, video_id) next_props = self._search_nextjs_data(webpage, video_id)['props'] page_data = next_props['initialState']['play'] video_info = page_data['curVideoInfo'] uid = traverse_obj( self._parse_json( self._get_cookie('I00002', '{}'), video_id, transform_source=urllib.parse.unquote, fatal=False), ('data', 'uid'), default=0) if uid: vip_data = self._download_json( 'https://pcw-api.iq.com/api/vtype', video_id, note='Downloading VIP data', errnote='Unable to download VIP data', query={ 'batch': 1, 'platformId': 3, 'modeCode': self._get_cookie('mod', 'intl'), 'langCode': self._get_cookie('lang', 'en_us'), 'deviceId': self._get_cookie('QC005', ''), }, fatal=False) ut_list = traverse_obj(vip_data, ('data', 'all_vip', ..., 'vipType'), expected_type=str_or_none) else: ut_list = ['0'] # bid 0 as an initial format checker dash_paths = self._parse_json(PhantomJSwrapper(self, timeout=120_000).get( url, note2='Executing signature code (this may take a couple minutes)', html='<!DOCTYPE html>', video_id=video_id, jscode=self._DASH_JS % { 'tvid': video_info['tvId'], 'vid': video_info['vid'], 'src': traverse_obj(next_props, ('initialProps', 'pageProps', 'ptid'), expected_type=str, default='04022001010011000000'), 'uid': uid, 'dfp': self._get_cookie('dfp', ''), 'mode': self._get_cookie('mod', 'intl'), 'lang': self._get_cookie('lang', 'en_us'), 'bid_list': '[' + ','.join(['0', *self._BID_TAGS.keys()]) + ']', 'ut_list': '[' + ','.join(ut_list) + ']', 'cmd5x_func': self._extract_cmd5x_function(webpage, video_id), })[1].strip(), video_id) formats, subtitles = [], {} initial_format_data = self._download_json( urljoin('https://cache-video.iq.com', dash_paths['0']), video_id, note='Downloading initial video format info', errnote='Unable to download initial video format info')['data'] preview_time = traverse_obj( initial_format_data, ('boss_ts', (None, 'data'), ('previewTime', 'rtime')), expected_type=float_or_none, get_all=False) if traverse_obj(initial_format_data, ('boss_ts', 'data', 'prv'), expected_type=int_or_none): self.report_warning('This preview video is limited{}'.format(format_field(preview_time, None, ' to %s seconds'))) # TODO: Extract audio-only formats for bid in set(traverse_obj(initial_format_data, ('program', 'video', ..., 'bid'), expected_type=str_or_none)): dash_path = dash_paths.get(bid) if not dash_path: self.report_warning(f'Unknown format id: {bid}. It is currently not being extracted') continue format_data = traverse_obj(self._download_json( urljoin('https://cache-video.iq.com', dash_path), video_id, note=f'Downloading format data for {self._BID_TAGS[bid]}', errnote='Unable to download format data', fatal=False), 'data', expected_type=dict) video_format = traverse_obj(format_data, ('program', 'video', lambda _, v: str(v['bid']) == bid), expected_type=dict, get_all=False) or {} extracted_formats = [] if video_format.get('m3u8Url'): extracted_formats.extend(self._extract_m3u8_formats( urljoin(format_data.get('dm3u8', 'https://cache-m.iq.com/dc/dt/'), video_format['m3u8Url']), 'mp4', m3u8_id=bid, fatal=False)) if video_format.get('mpdUrl'): # TODO: Properly extract mpd hostname extracted_formats.extend(self._extract_mpd_formats( urljoin(format_data.get('dm3u8', 'https://cache-m.iq.com/dc/dt/'), video_format['mpdUrl']), mpd_id=bid, fatal=False)) if video_format.get('m3u8'): ff = video_format.get('ff', 'ts') if ff == 'ts': m3u8_formats, _ = self._parse_m3u8_formats_and_subtitles( video_format['m3u8'], ext='mp4', m3u8_id=bid, fatal=False) extracted_formats.extend(m3u8_formats) elif ff == 'm4s': mpd_data = traverse_obj( self._parse_json(video_format['m3u8'], video_id, fatal=False), ('payload', ..., 'data'), expected_type=str) if not mpd_data: continue mpd_formats, _ = self._parse_mpd_formats_and_subtitles( mpd_data, bid, format_data.get('dm3u8', 'https://cache-m.iq.com/dc/dt/')) extracted_formats.extend(mpd_formats) else: self.report_warning(f'{ff} formats are currently not supported') if not extracted_formats: if video_format.get('s'): self.report_warning(f'{self._BID_TAGS[bid]} format is restricted') else: self.report_warning(f'Unable to extract {self._BID_TAGS[bid]} format') for f in extracted_formats: f.update({ 'quality': qualities(list(self._BID_TAGS.keys()))(bid), 'format_note': self._BID_TAGS[bid], **parse_resolution(video_format.get('scrsz')), }) formats.extend(extracted_formats) for sub_format in traverse_obj(initial_format_data, ('program', 'stl', ...), expected_type=dict): lang = self._LID_TAGS.get(str_or_none(sub_format.get('lid')), sub_format.get('_name')) subtitles.setdefault(lang, []).extend([{ 'ext': format_ext, 'url': urljoin(initial_format_data.get('dstl', 'http://meta.video.iqiyi.com'), sub_format[format_key]), } for format_key, format_ext in [('srt', 'srt'), ('webvtt', 'vtt')] if sub_format.get(format_key)]) extra_metadata = page_data.get('albumInfo') if video_info.get('albumId') and page_data.get('albumInfo') else video_info return { 'id': video_id, 'title': video_info['name'], 'formats': formats, 'subtitles': subtitles, 'description': video_info.get('mergeDesc'), 'duration': parse_duration(video_info.get('len')), 'age_limit': parse_age_limit(video_info.get('rating')), 'average_rating': traverse_obj(page_data, ('playScoreInfo', 'score'), expected_type=float_or_none), 'timestamp': parse_iso8601(video_info.get('isoUploadDate')), 'categories': traverse_obj(extra_metadata, ('videoTagMap', ..., ..., 'name'), expected_type=str), 'cast': traverse_obj(extra_metadata, ('actorArr', ..., 'name'), expected_type=str), 'episode_number': int_or_none(video_info.get('order')) or None, 'series': video_info.get('albumName'), } class IqAlbumIE(InfoExtractor): IE_NAME = 'iq.com:album' _VALID_URL = r'https?://(?:www\.)?iq\.com/album/(?:[\w%-]*-)?(?P<id>\w+)' _TESTS = [{ 'url': 'https://www.iq.com/album/one-piece-1999-1bk9icvr331', 'info_dict': { 'id': '1bk9icvr331', 'title': 'One Piece', 'description': 'Subtitle available on Sunday 4PM(GMT+8).', }, 'playlist_mincount': 238, }, { # Movie/single video 'url': 'https://www.iq.com/album/九龙城寨-2021-22yjnij099k', 'info_dict': { 'ext': 'mp4', 'id': '22yjnij099k', 'title': '九龙城寨', 'description': 'md5:8a09f50b8ba0db4dc69bc7c844228044', 'duration': 5000, 'timestamp': 1641911371, 'upload_date': '20220111', 'series': '九龙城寨', 'cast': ['Shi Yan Neng', 'Yu Lang', 'Peter lv', 'Sun Zi Jun', 'Yang Xiao Bo'], 'age_limit': 13, 'average_rating': float, }, 'expected_warnings': ['format is restricted'], }] def _entries(self, album_id_num, page_ranges, album_id=None, mode_code='intl', lang_code='en_us'): for page_range in page_ranges: page = self._download_json( f'https://pcw-api.iq.com/api/episodeListSource/{album_id_num}', album_id, note=f'Downloading video list episodes {page_range.get("msg", "")}', errnote='Unable to download video list', query={ 'platformId': 3, 'modeCode': mode_code, 'langCode': lang_code, 'endOrder': page_range['to'], 'startOrder': page_range['from'], }) for video in page['data']['epg']: yield self.url_result('https://www.iq.com/play/%s' % (video.get('playLocSuffix') or video['qipuIdStr']), IqIE.ie_key(), video.get('qipuIdStr'), video.get('name')) def _real_extract(self, url): album_id = self._match_id(url) webpage = self._download_webpage(url, album_id) next_data = self._search_nextjs_data(webpage, album_id) album_data = next_data['props']['initialState']['album']['videoAlbumInfo'] if album_data.get('videoType') == 'singleVideo': return self.url_result(f'https://www.iq.com/play/{album_id}', IqIE.ie_key()) return self.playlist_result( self._entries(album_data['albumId'], album_data['totalPageRange'], album_id, traverse_obj(next_data, ('props', 'initialProps', 'pageProps', 'modeCode')), traverse_obj(next_data, ('props', 'initialProps', 'pageProps', 'langCode'))), album_id, album_data.get('name'), album_data.get('desc'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/godtube.py
yt_dlp/extractor/godtube.py
from .common import InfoExtractor from ..utils import ( parse_duration, parse_iso8601, ) class GodTubeIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?godtube\.com/watch/\?v=(?P<id>[\da-zA-Z]+)' _TESTS = [ { 'url': 'https://www.godtube.com/watch/?v=0C0CNNNU', 'md5': '77108c1e4ab58f48031101a1a2119789', 'info_dict': { 'id': '0C0CNNNU', 'ext': 'mp4', 'title': 'Woman at the well.', 'duration': 159, 'timestamp': 1205712000, 'uploader': 'beverlybmusic', 'upload_date': '20080317', 'thumbnail': r're:^https?://.*\.jpg$', }, }, ] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') config = self._download_xml( f'http://www.godtube.com/resource/mediaplayer/{video_id.lower()}.xml', video_id, 'Downloading player config XML') video_url = config.find('file').text uploader = config.find('author').text timestamp = parse_iso8601(config.find('date').text) duration = parse_duration(config.find('duration').text) thumbnail = config.find('image').text media = self._download_xml( f'http://www.godtube.com/media/xml/?v={video_id}', video_id, 'Downloading media XML') title = media.find('title').text return { 'id': video_id, 'url': video_url, 'title': title, 'thumbnail': thumbnail, 'timestamp': timestamp, 'uploader': uploader, 'duration': duration, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/qqmusic.py
yt_dlp/extractor/qqmusic.py
import base64 import functools import json import random import time from .common import InfoExtractor from ..utils import ( ExtractorError, OnDemandPagedList, clean_html, int_or_none, join_nonempty, js_to_json, str_or_none, strip_jsonp, traverse_obj, url_or_none, urljoin, ) class QQMusicBaseIE(InfoExtractor): def _get_cookie(self, key, default=None): return getattr(self._get_cookies('https://y.qq.com').get(key), 'value', default) def _get_g_tk(self): n = 5381 for c in self._get_cookie('qqmusic_key', ''): n += (n << 5) + ord(c) return n & 2147483647 def _get_uin(self): return int_or_none(self._get_cookie('uin')) or 0 @property def is_logged_in(self): return bool(self._get_uin() and self._get_cookie('fqm_pvqid')) # Reference: m_r_GetRUin() in top_player.js # http://imgcache.gtimg.cn/music/portal_v3/y/top_player.js @staticmethod def _m_r_get_ruin(): cur_ms = int(time.time() * 1000) % 1000 return int(round(random.random() * 2147483647) * cur_ms % 1E10) def _download_init_data(self, url, mid, fatal=True): webpage = self._download_webpage(url, mid, fatal=fatal) return self._search_json(r'window\.__INITIAL_DATA__\s*=', webpage, 'init data', mid, transform_source=js_to_json, fatal=fatal) def _make_fcu_req(self, req_dict, mid, headers={}, **kwargs): return self._download_json( 'https://u.y.qq.com/cgi-bin/musicu.fcg', mid, data=json.dumps({ 'comm': { 'cv': 0, 'ct': 24, 'format': 'json', 'uin': self._get_uin(), }, **req_dict, }, separators=(',', ':')).encode(), headers=headers, **kwargs) class QQMusicIE(QQMusicBaseIE): IE_NAME = 'qqmusic' IE_DESC = 'QQ音乐' _VALID_URL = r'https?://y\.qq\.com/n/ryqq/songDetail/(?P<id>[0-9A-Za-z]+)' _TESTS = [{ 'url': 'https://y.qq.com/n/ryqq/songDetail/004Ti8rT003TaZ', 'md5': 'd7adc5c438d12e2cb648cca81593fd47', 'info_dict': { 'id': '004Ti8rT003TaZ', 'ext': 'mp3', 'title': '永夜のパレード (永夜的游行)', 'album': '幻想遊園郷 -Fantastic Park-', 'release_date': '20111230', 'duration': 281, 'creators': ['ケーキ姫', 'JUMA'], 'genres': ['Pop'], 'description': 'md5:b5261f3d595657ae561e9e6aee7eb7d9', 'size': 4501244, 'thumbnail': r're:^https?://.*\.jpg(?:$|[#?])', 'subtitles': 'count:1', }, }, { 'url': 'https://y.qq.com/n/ryqq/songDetail/004295Et37taLD', 'md5': '5f1e6cea39e182857da7ffc5ef5e6bb8', 'info_dict': { 'id': '004295Et37taLD', 'ext': 'mp3', 'title': '可惜没如果', 'album': '新地球 - 人 (Special Edition)', 'release_date': '20150129', 'duration': 298, 'creators': ['林俊杰'], 'genres': ['Pop'], 'description': 'md5:f568421ff618d2066e74b65a04149c4e', 'thumbnail': r're:^https?://.*\.jpg(?:$|[#?])', }, 'skip': 'premium member only', }, { 'note': 'There is no mp3-320 version of this song.', 'url': 'https://y.qq.com/n/ryqq/songDetail/004MsGEo3DdNxV', 'md5': '028aaef1ae13d8a9f4861a92614887f9', 'info_dict': { 'id': '004MsGEo3DdNxV', 'ext': 'mp3', 'title': '如果', 'album': '新传媒电视连续剧金曲系列II', 'release_date': '20050626', 'duration': 220, 'creators': ['李季美'], 'genres': [], 'description': 'md5:fc711212aa623b28534954dc4bd67385', 'size': 3535730, 'thumbnail': r're:^https?://.*\.jpg(?:$|[#?])', }, }, { 'note': 'lyrics not in .lrc format', 'url': 'https://y.qq.com/n/ryqq/songDetail/001JyApY11tIp6', 'info_dict': { 'id': '001JyApY11tIp6', 'ext': 'mp3', 'title': 'Shadows Over Transylvania', 'release_date': '19970225', 'creator': 'Dark Funeral', 'description': 'md5:c9b20210587cbcd6836a1c597bab4525', 'thumbnail': r're:^https?://.*\.jpg(?:$|[#?])', }, 'params': {'skip_download': True}, 'skip': 'no longer available', }] _FORMATS = { 'F000': {'name': 'flac', 'prefix': 'F000', 'ext': 'flac', 'preference': 60}, 'A000': {'name': 'ape', 'prefix': 'A000', 'ext': 'ape', 'preference': 50}, 'M800': {'name': '320mp3', 'prefix': 'M800', 'ext': 'mp3', 'preference': 40, 'abr': 320}, 'M500': {'name': '128mp3', 'prefix': 'M500', 'ext': 'mp3', 'preference': 30, 'abr': 128}, 'C400': {'name': '96aac', 'prefix': 'C400', 'ext': 'm4a', 'preference': 20, 'abr': 96}, 'C200': {'name': '48aac', 'prefix': 'C200', 'ext': 'm4a', 'preference': 20, 'abr': 48}, } def _real_extract(self, url): mid = self._match_id(url) init_data = self._download_init_data(url, mid, fatal=False) info_data = self._make_fcu_req({'info': { 'module': 'music.pf_song_detail_svr', 'method': 'get_song_detail_yqq', 'param': { 'song_mid': mid, 'song_type': 0, }, }}, mid, note='Downloading song info')['info']['data']['track_info'] media_mid = info_data['file']['media_mid'] data = self._make_fcu_req({ 'req_1': { 'module': 'vkey.GetVkeyServer', 'method': 'CgiGetVkey', 'param': { 'guid': str(self._m_r_get_ruin()), 'songmid': [mid] * len(self._FORMATS), 'songtype': [0] * len(self._FORMATS), 'uin': str(self._get_uin()), 'loginflag': 1, 'platform': '20', 'filename': [f'{f["prefix"]}{media_mid}.{f["ext"]}' for f in self._FORMATS.values()], }, }, 'req_2': { 'module': 'music.musichallSong.PlayLyricInfo', 'method': 'GetPlayLyricInfo', 'param': {'songMID': mid}, }, }, mid, note='Downloading formats and lyric', headers=self.geo_verification_headers()) code = traverse_obj(data, ('req_1', 'code', {int})) if code != 0: raise ExtractorError(f'Failed to download format info, error code {code or "unknown"}') formats = [] for media_info in traverse_obj(data, ( 'req_1', 'data', 'midurlinfo', lambda _, v: v['songmid'] == mid and v['purl']), ): format_key = traverse_obj(media_info, ('filename', {str}, {lambda x: x[:4]})) format_info = self._FORMATS.get(format_key) or {} format_id = format_info.get('name') formats.append({ 'url': urljoin('https://dl.stream.qqmusic.qq.com', media_info['purl']), 'format': format_id, 'format_id': format_id, 'size': traverse_obj(info_data, ('file', f'size_{format_id}', {int_or_none})), 'quality': format_info.get('preference'), 'abr': format_info.get('abr'), 'ext': format_info.get('ext'), 'vcodec': 'none', }) if not formats and not self.is_logged_in: self.raise_login_required() if traverse_obj(data, ('req_2', 'code')): self.report_warning(f'Failed to download lyric, error {data["req_2"]["code"]!r}') lrc_content = traverse_obj(data, ('req_2', 'data', 'lyric', {lambda x: base64.b64decode(x).decode('utf-8')})) info_dict = { 'id': mid, 'formats': formats, **traverse_obj(info_data, { 'title': ('title', {str}), 'album': ('album', 'title', {str}, filter), 'release_date': ('time_public', {lambda x: x.replace('-', '') or None}), 'creators': ('singer', ..., 'name', {str}), 'alt_title': ('subtitle', {str}, filter), 'duration': ('interval', {int_or_none}), }), **traverse_obj(init_data, ('detail', { 'thumbnail': ('picurl', {url_or_none}), 'description': ('info', 'intro', 'content', ..., 'value', {str}), 'genres': ('info', 'genre', 'content', ..., 'value', {str}, all), }), get_all=False), } if lrc_content: info_dict['subtitles'] = {'origin': [{'ext': 'lrc', 'data': lrc_content}]} info_dict['description'] = join_nonempty(info_dict.get('description'), lrc_content, delim='\n') return info_dict class QQMusicSingerIE(QQMusicBaseIE): IE_NAME = 'qqmusic:singer' IE_DESC = 'QQ音乐 - 歌手' _VALID_URL = r'https?://y\.qq\.com/n/ryqq/singer/(?P<id>[0-9A-Za-z]+)' _TESTS = [{ 'url': 'https://y.qq.com/n/ryqq/singer/001BLpXF2DyJe2', 'info_dict': { 'id': '001BLpXF2DyJe2', 'title': '林俊杰', 'description': 'md5:10624ce73b06fa400bc846f59b0305fa', 'thumbnail': r're:^https?://.*\.jpg(?:$|[#?])', }, 'playlist_mincount': 100, }, { 'url': 'https://y.qq.com/n/ryqq/singer/000Q00f213YzNV', 'info_dict': { 'id': '000Q00f213YzNV', 'title': '桃几OvO', 'description': '小破站小唱见~希望大家喜欢听我唱歌~!', 'thumbnail': r're:^https?://.*\.jpg(?:$|[#?])', }, 'playlist_count': 12, 'playlist': [{ 'info_dict': { 'id': '0016cvsy02mmCl', 'ext': 'mp3', 'title': '群青', 'album': '桃几2021年翻唱集', 'release_date': '20210913', 'duration': 248, 'creators': ['桃几OvO'], 'genres': ['Pop'], 'description': 'md5:4296005a04edcb5cdbe0889d5055a7ae', 'size': 3970822, 'thumbnail': r're:^https?://.*\.jpg(?:$|[#?])', }, }], }] _PAGE_SIZE = 50 def _fetch_page(self, mid, page_size, page_num): data = self._make_fcu_req({'req_1': { 'module': 'music.web_singer_info_svr', 'method': 'get_singer_detail_info', 'param': { 'sort': 5, 'singermid': mid, 'sin': page_num * page_size, 'num': page_size, }}}, mid, note=f'Downloading page {page_num}') yield from traverse_obj(data, ('req_1', 'data', 'songlist', ..., {lambda x: self.url_result( f'https://y.qq.com/n/ryqq/songDetail/{x["mid"]}', QQMusicIE, x['mid'], x.get('title'))})) def _real_extract(self, url): mid = self._match_id(url) init_data = self._download_init_data(url, mid, fatal=False) return self.playlist_result( OnDemandPagedList(functools.partial(self._fetch_page, mid, self._PAGE_SIZE), self._PAGE_SIZE), mid, **traverse_obj(init_data, ('singerDetail', { 'title': ('basic_info', 'name', {str}), 'description': ('ex_info', 'desc', {str}), 'thumbnail': ('pic', 'pic', {url_or_none}), }))) class QQPlaylistBaseIE(InfoExtractor): def _extract_entries(self, info_json, path): for song in traverse_obj(info_json, path): song_mid = song.get('songmid') if not song_mid: continue yield self.url_result( f'https://y.qq.com/n/ryqq/songDetail/{song_mid}', QQMusicIE, song_mid, song.get('songname')) class QQMusicAlbumIE(QQPlaylistBaseIE): IE_NAME = 'qqmusic:album' IE_DESC = 'QQ音乐 - 专辑' _VALID_URL = r'https?://y\.qq\.com/n/ryqq/albumDetail/(?P<id>[0-9A-Za-z]+)' _TESTS = [{ 'url': 'https://y.qq.com/n/ryqq/albumDetail/000gXCTb2AhRR1', 'info_dict': { 'id': '000gXCTb2AhRR1', 'title': '我们都是这样长大的', 'description': 'md5:179c5dce203a5931970d306aa9607ea6', }, 'playlist_count': 4, }, { 'url': 'https://y.qq.com/n/ryqq/albumDetail/002Y5a3b3AlCu3', 'info_dict': { 'id': '002Y5a3b3AlCu3', 'title': '그리고…', 'description': 'md5:a48823755615508a95080e81b51ba729', }, 'playlist_count': 8, }] def _real_extract(self, url): mid = self._match_id(url) album_json = self._download_json( 'http://i.y.qq.com/v8/fcg-bin/fcg_v8_album_info_cp.fcg', mid, 'Download album page', query={'albummid': mid, 'format': 'json'})['data'] entries = self._extract_entries(album_json, ('list', ...)) return self.playlist_result(entries, mid, **traverse_obj(album_json, { 'title': ('name', {str}), 'description': ('desc', {str.strip}), })) class QQMusicToplistIE(QQPlaylistBaseIE): IE_NAME = 'qqmusic:toplist' IE_DESC = 'QQ音乐 - 排行榜' _VALID_URL = r'https?://y\.qq\.com/n/ryqq/toplist/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://y.qq.com/n/ryqq/toplist/123', 'info_dict': { 'id': '123', 'title': r're:美国热门音乐榜 \d{4}-\d{2}-\d{2}', 'description': '美国热门音乐榜,每周一更新。', }, 'playlist_count': 95, }, { 'url': 'https://y.qq.com/n/ryqq/toplist/3', 'info_dict': { 'id': '3', 'title': r're:巅峰榜·欧美 \d{4}-\d{2}-\d{2}', 'description': 'md5:4def03b60d3644be4c9a36f21fd33857', }, 'playlist_count': 100, }, { 'url': 'https://y.qq.com/n/ryqq/toplist/106', 'info_dict': { 'id': '106', 'title': r're:韩国Mnet榜 \d{4}-\d{2}-\d{2}', 'description': 'md5:cb84b325215e1d21708c615cac82a6e7', }, 'playlist_count': 50, }] def _real_extract(self, url): list_id = self._match_id(url) toplist_json = self._download_json( 'http://i.y.qq.com/v8/fcg-bin/fcg_v8_toplist_cp.fcg', list_id, note='Download toplist page', query={'type': 'toplist', 'topid': list_id, 'format': 'json'}) return self.playlist_result( self._extract_entries(toplist_json, ('songlist', ..., 'data')), list_id, playlist_title=join_nonempty(*traverse_obj( toplist_json, ((('topinfo', 'ListName'), 'update_time'), None)), delim=' '), playlist_description=traverse_obj(toplist_json, ('topinfo', 'info'))) class QQMusicPlaylistIE(QQPlaylistBaseIE): IE_NAME = 'qqmusic:playlist' IE_DESC = 'QQ音乐 - 歌单' _VALID_URL = r'https?://y\.qq\.com/n/ryqq/playlist/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://y.qq.com/n/ryqq/playlist/1374105607', 'info_dict': { 'id': '1374105607', 'title': '易入人心的华语民谣', 'description': '民谣的歌曲易于传唱、、歌词朗朗伤口、旋律简单温馨。属于那种才入耳孔。却上心头的感觉。没有太多的复杂情绪。简单而直接地表达乐者的情绪,就是这样的简单才易入人心。', }, 'playlist_count': 20, }] def _real_extract(self, url): list_id = self._match_id(url) list_json = self._download_json( 'http://i.y.qq.com/qzone-music/fcg-bin/fcg_ucc_getcdinfo_byids_cp.fcg', list_id, 'Download list page', query={'type': 1, 'json': 1, 'utf8': 1, 'onlysong': 0, 'disstid': list_id}, transform_source=strip_jsonp, headers={'Referer': url}) if not len(list_json.get('cdlist', [])): raise ExtractorError(join_nonempty( 'Unable to get playlist info', join_nonempty('code', 'subcode', from_dict=list_json), list_json.get('msg'), delim=': ')) entries = self._extract_entries(list_json, ('cdlist', 0, 'songlist', ...)) return self.playlist_result(entries, list_id, **traverse_obj(list_json, ('cdlist', 0, { 'title': ('dissname', {str}), 'description': ('desc', {clean_html}), }))) class QQMusicVideoIE(QQMusicBaseIE): IE_NAME = 'qqmusic:mv' IE_DESC = 'QQ音乐 - MV' _VALID_URL = r'https?://y\.qq\.com/n/ryqq/mv/(?P<id>[0-9A-Za-z]+)' _TESTS = [{ 'url': 'https://y.qq.com/n/ryqq/mv/002Vsarh3SVU8K', 'info_dict': { 'id': '002Vsarh3SVU8K', 'ext': 'mp4', 'title': 'The Chant (Extended Mix / Audio)', 'description': '', 'thumbnail': r're:^https?://.*\.jpg(?:$|[#?])', 'release_timestamp': 1688918400, 'release_date': '20230709', 'duration': 313, 'creators': ['Duke Dumont'], 'view_count': int, }, }] def _parse_url_formats(self, url_data): return traverse_obj(url_data, ('mp4', lambda _, v: v['freeflow_url'], { 'url': ('freeflow_url', 0, {url_or_none}), 'filesize': ('fileSize', {int_or_none}), 'format_id': ('newFileType', {str_or_none}), })) def _real_extract(self, url): video_id = self._match_id(url) video_info = self._make_fcu_req({ 'mvInfo': { 'module': 'music.video.VideoData', 'method': 'get_video_info_batch', 'param': { 'vidlist': [video_id], 'required': [ 'vid', 'type', 'sid', 'cover_pic', 'duration', 'singers', 'video_pay', 'hint', 'code', 'msg', 'name', 'desc', 'playcnt', 'pubdate', 'play_forbid_reason'], }, }, 'mvUrl': { 'module': 'music.stream.MvUrlProxy', 'method': 'GetMvUrls', 'param': {'vids': [video_id]}, }, }, video_id, headers=self.geo_verification_headers()) if traverse_obj(video_info, ('mvInfo', 'data', video_id, 'play_forbid_reason')) == 3: self.raise_geo_restricted() return { 'id': video_id, 'formats': self._parse_url_formats(traverse_obj(video_info, ('mvUrl', 'data', video_id))), **traverse_obj(video_info, ('mvInfo', 'data', video_id, { 'title': ('name', {str}), 'description': ('desc', {str}), 'thumbnail': ('cover_pic', {url_or_none}), 'release_timestamp': ('pubdate', {int_or_none}), 'duration': ('duration', {int_or_none}), 'creators': ('singers', ..., 'name', {str}), 'view_count': ('playcnt', {int_or_none}), })), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/antenna.py
yt_dlp/extractor/antenna.py
import urllib.parse from .common import InfoExtractor from ..networking import HEADRequest from ..utils import ( ExtractorError, determine_ext, make_archive_id, scale_thumbnails_to_max_format_width, ) class AntennaBaseIE(InfoExtractor): def _download_and_extract_api_data(self, video_id, netloc, cid=None): info = self._download_json(f'{self.http_scheme()}//{netloc}{self._API_PATH}', video_id, query={'cid': cid or video_id}) if not info.get('url'): raise ExtractorError(f'No source found for {video_id}') ext = determine_ext(info['url']) if ext == 'm3u8': formats, subs = self._extract_m3u8_formats_and_subtitles(info['url'], video_id, 'mp4') else: formats, subs = [{'url': info['url'], 'format_id': ext}], {} thumbnails = scale_thumbnails_to_max_format_width( formats, [{'url': info['thumb']}], r'(?<=/imgHandler/)\d+') if info.get('thumb') else [] return { 'id': video_id, 'title': info.get('title'), 'thumbnails': thumbnails, 'formats': formats, 'subtitles': subs, } class AntennaGrWatchIE(AntennaBaseIE): IE_NAME = 'antenna:watch' IE_DESC = 'antenna.gr and ant1news.gr videos' _VALID_URL = r'https?://(?P<netloc>(?:www\.)?(?:antenna|ant1news)\.gr)/watch/(?P<id>\d+)/' _API_PATH = '/templates/data/player' _TESTS = [{ 'url': 'https://www.ant1news.gr/watch/1506168/ant1-news-09112021-stis-18-45', 'md5': 'c472d9dd7cd233c63aff2ea42201cda6', 'info_dict': { 'id': '1506168', 'ext': 'mp4', 'title': 'md5:0ad00fa66ecf8aa233d26ab0dba7514a', 'description': 'md5:18665af715a6dcfeac1d6153a44f16b0', 'thumbnail': r're:https://ant1media\.azureedge\.net/imgHandler/\d+/26d46bf6-8158-4f02-b197-7096c714b2de\.jpg', }, }, { 'url': 'https://www.antenna.gr/watch/1643812/oi-prodotes-epeisodio-01', 'md5': '8f6f7dd3b1dba4d835ba990e25f31243', 'info_dict': { 'id': '1643812', 'ext': 'mp4', 'format_id': 'mp4', 'title': 'ΟΙ ΠΡΟΔΟΤΕΣ – ΕΠΕΙΣΟΔΙΟ 01', 'thumbnail': r're:https://ant1media\.azureedge\.net/imgHandler/\d+/b3d63096-e72d-43c4-87a0-00d4363d242f\.jpg', }, }] def _real_extract(self, url): video_id, netloc = self._match_valid_url(url).group('id', 'netloc') webpage = self._download_webpage(url, video_id) info = self._download_and_extract_api_data(video_id, netloc) info['description'] = self._og_search_description(webpage, default=None) info['_old_archive_ids'] = [make_archive_id('Ant1NewsGrWatch', video_id)] return info class Ant1NewsGrArticleIE(AntennaBaseIE): IE_NAME = 'ant1newsgr:article' IE_DESC = 'ant1news.gr articles' _VALID_URL = r'https?://(?:www\.)?ant1news\.gr/[^/]+/article/(?P<id>\d+)/' _TESTS = [{ 'url': 'https://www.ant1news.gr/afieromata/article/549468/o-tzeims-mpont-sta-meteora-oi-apeiles-kai-o-xesikomos-ton-kalogeron', 'md5': '57eb8d12181f0fa2b14b0b138e1de9b6', 'info_dict': { 'id': '_xvg/m_cmbatw=', 'ext': 'mp4', 'title': 'md5:a93e8ecf2e4073bfdffcb38f59945411', 'timestamp': 1666166520, 'upload_date': '20221019', 'thumbnail': 'https://ant1media.azureedge.net/imgHandler/1920/756206d2-d640-40e2-b201-3555abdfc0db.jpg', }, }, { 'url': 'https://ant1news.gr/Society/article/620286/symmoria-anilikon-dikigoros-thymaton-ithelan-na-toys-apoteleiosoyn', 'info_dict': { 'id': '620286', 'title': 'md5:91fe569e952e4d146485740ae927662b', }, 'playlist_mincount': 2, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) info = self._search_json_ld(webpage, video_id, expected_type='NewsArticle') embed_urls = list(Ant1NewsGrEmbedIE._extract_embed_urls(url, webpage)) if not embed_urls: raise ExtractorError(f'no videos found for {video_id}', expected=True) return self.playlist_from_matches( embed_urls, video_id, info.get('title'), ie=Ant1NewsGrEmbedIE.ie_key(), video_kwargs={'url_transparent': True, 'timestamp': info.get('timestamp')}) class Ant1NewsGrEmbedIE(AntennaBaseIE): IE_NAME = 'ant1newsgr:embed' IE_DESC = 'ant1news.gr embedded videos' _BASE_PLAYER_URL_RE = r'(?:https?:)?//(?:[a-zA-Z0-9\-]+\.)?(?:antenna|ant1news)\.gr/templates/pages/player' _VALID_URL = rf'{_BASE_PLAYER_URL_RE}\?([^#]+&)?cid=(?P<id>[^#&]+)' _EMBED_REGEX = [rf'<iframe[^>]+?src=(?P<_q1>["\'])(?P<url>{_BASE_PLAYER_URL_RE}\?(?:(?!(?P=_q1)).)+)(?P=_q1)'] _API_PATH = '/templates/data/jsonPlayer' _TESTS = [{ 'url': 'https://www.antenna.gr/templates/pages/player?cid=3f_li_c_az_jw_y_u=&w=670&h=377', 'md5': 'dfc58c3a11a5a9aad2ba316ed447def3', 'info_dict': { 'id': '3f_li_c_az_jw_y_u=', 'ext': 'mp4', 'title': 'md5:a30c93332455f53e1e84ae0724f0adf7', 'thumbnail': 'https://ant1media.azureedge.net/imgHandler/640/bbe31201-3f09-4a4e-87f5-8ad2159fffe2.jpg', }, }] def _real_extract(self, url): video_id = self._match_id(url) canonical_url = self._request_webpage( HEADRequest(url), video_id, note='Resolve canonical player URL', errnote='Could not resolve canonical player URL').url _, netloc, _, _, query, _ = urllib.parse.urlparse(canonical_url) cid = urllib.parse.parse_qs(query)['cid'][0] return self._download_and_extract_api_data(video_id, netloc, cid=cid)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/douyutv.py
yt_dlp/extractor/douyutv.py
import hashlib import time import urllib import uuid from .common import InfoExtractor from .openload import PhantomJSwrapper from ..utils import ( ExtractorError, UserNotLive, determine_ext, int_or_none, js_to_json, parse_resolution, str_or_none, traverse_obj, unescapeHTML, url_or_none, urlencode_postdata, urljoin, ) class DouyuBaseIE(InfoExtractor): def _download_cryptojs_md5(self, video_id): for url in [ # XXX: Do NOT use cdn.bootcdn.net; ref: https://sansec.io/research/polyfill-supply-chain-attack 'https://cdnjs.cloudflare.com/ajax/libs/crypto-js/3.1.2/rollups/md5.js', 'https://unpkg.com/cryptojslib@3.1.2/rollups/md5.js', ]: js_code = self._download_webpage( url, video_id, note='Downloading signing dependency', fatal=False) if js_code: self.cache.store('douyu', 'crypto-js-md5', js_code) return js_code raise ExtractorError('Unable to download JS dependency (crypto-js/md5)') def _get_cryptojs_md5(self, video_id): return self.cache.load( 'douyu', 'crypto-js-md5', min_ver='2024.07.04') or self._download_cryptojs_md5(video_id) def _calc_sign(self, sign_func, video_id, a): b = uuid.uuid4().hex c = round(time.time()) js_script = f'{self._get_cryptojs_md5(video_id)};{sign_func};console.log(ub98484234("{a}","{b}","{c}"))' phantom = PhantomJSwrapper(self) result = phantom.execute(js_script, video_id, note='Executing JS signing script').strip() return {i: v[0] for i, v in urllib.parse.parse_qs(result).items()} def _search_js_sign_func(self, webpage, fatal=True): # The greedy look-behind ensures last possible script tag is matched return self._search_regex( r'(?:<script.*)?<script[^>]*>(.*?ub98484234.*?)</script>', webpage, 'JS sign func', fatal=fatal) class DouyuTVIE(DouyuBaseIE): IE_DESC = '斗鱼直播' _VALID_URL = r'https?://(?:www\.)?douyu(?:tv)?\.com/(topic/\w+\?rid=|(?:[^/]+/))*(?P<id>[A-Za-z0-9]+)' _TESTS = [{ 'url': 'https://www.douyu.com/pigff', 'info_dict': { 'id': '24422', 'display_id': 'pigff', 'ext': 'mp4', 'title': 're:^【PIGFF】.* [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': r'≥15级牌子看鱼吧置顶帖进粉丝vx群', 'thumbnail': str, 'uploader': 'pigff', 'is_live': True, 'live_status': 'is_live', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.douyutv.com/85982', 'info_dict': { 'id': '85982', 'display_id': '85982', 'ext': 'flv', 'title': 're:^小漠从零单排记!——CSOL2躲猫猫 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'md5:746a2f7a253966a06755a912f0acc0d2', 'thumbnail': r're:^https?://.*\.png', 'uploader': 'douyu小漠', 'is_live': True, }, 'params': { 'skip_download': True, }, 'skip': 'Room not found', }, { 'url': 'http://www.douyutv.com/17732', 'info_dict': { 'id': '17732', 'display_id': '17732', 'ext': 'flv', 'title': 're:^清晨醒脑!根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': r're:.*m7show@163\.com.*', 'thumbnail': r're:^https?://.*\.png', 'uploader': '7师傅', 'is_live': True, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.douyu.com/topic/ydxc?rid=6560603', 'info_dict': { 'id': '6560603', 'display_id': '6560603', 'ext': 'flv', 'title': 're:^阿余:新年快乐恭喜发财! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 're:.*直播时间.*', 'thumbnail': r're:^https?://.*\.png', 'uploader': '阿涛皎月Carry', 'live_status': 'is_live', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.douyu.com/xiaocang', 'only_matching': True, }, { # \"room_id\" 'url': 'http://www.douyu.com/t/lpl', 'only_matching': True, }] def _get_sign_func(self, room_id, video_id): return self._download_json( f'https://www.douyu.com/swf_api/homeH5Enc?rids={room_id}', video_id, note='Getting signing script')['data'][f'room{room_id}'] def _extract_stream_formats(self, stream_formats): formats = [] for stream_info in traverse_obj(stream_formats, (..., 'data')): stream_url = urljoin( traverse_obj(stream_info, 'rtmp_url'), traverse_obj(stream_info, 'rtmp_live')) if stream_url: rate_id = traverse_obj(stream_info, ('rate', {int_or_none})) rate_info = traverse_obj(stream_info, ('multirates', lambda _, v: v['rate'] == rate_id), get_all=False) ext = determine_ext(stream_url) formats.append({ 'url': stream_url, 'format_id': str_or_none(rate_id), 'ext': 'mp4' if ext == 'm3u8' else ext, 'protocol': 'm3u8_native' if ext == 'm3u8' else 'https', 'quality': rate_id % -10000 if rate_id is not None else None, **traverse_obj(rate_info, { 'format': ('name', {str_or_none}), 'tbr': ('bit', {int_or_none}), }), }) return formats def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) room_id = self._search_regex(r'\$ROOM\.room_id\s*=\s*(\d+)', webpage, 'room id') if self._search_regex(r'"videoLoop"\s*:\s*(\d+)', webpage, 'loop', default='') == '1': raise UserNotLive('The channel is auto-playing VODs', video_id=video_id) if self._search_regex(r'\$ROOM\.show_status\s*=\s*(\d+)', webpage, 'status', default='') == '2': raise UserNotLive(video_id=video_id) # Grab metadata from API params = { 'aid': 'wp', 'client_sys': 'wp', 'time': int(time.time()), } params['auth'] = hashlib.md5( f'room/{room_id}?{urllib.parse.urlencode(params)}zNzMV1y4EMxOHS6I5WKm'.encode()).hexdigest() room = traverse_obj(self._download_json( f'http://www.douyutv.com/api/v1/room/{room_id}', video_id, note='Downloading room info', query=params, fatal=False), 'data') # 1 = live, 2 = offline if traverse_obj(room, 'show_status') == '2': raise UserNotLive(video_id=video_id) js_sign_func = self._search_js_sign_func(webpage, fatal=False) or self._get_sign_func(room_id, video_id) form_data = { 'rate': 0, **self._calc_sign(js_sign_func, video_id, room_id), } stream_formats = [self._download_json( f'https://www.douyu.com/lapi/live/getH5Play/{room_id}', video_id, note='Downloading livestream format', data=urlencode_postdata(form_data))] for rate_id in traverse_obj(stream_formats[0], ('data', 'multirates', ..., 'rate')): if rate_id != traverse_obj(stream_formats[0], ('data', 'rate')): form_data['rate'] = rate_id stream_formats.append(self._download_json( f'https://www.douyu.com/lapi/live/getH5Play/{room_id}', video_id, note=f'Downloading livestream format {rate_id}', data=urlencode_postdata(form_data))) return { 'id': room_id, 'formats': self._extract_stream_formats(stream_formats), 'is_live': True, **traverse_obj(room, { 'display_id': ('url', {str}, {lambda i: i[1:]}), 'title': ('room_name', {str}, {unescapeHTML}), 'description': ('show_details', {str}), 'uploader': ('nickname', {str}), 'thumbnail': ('room_src', {url_or_none}), }), } class DouyuShowIE(DouyuBaseIE): _VALID_URL = r'https?://v(?:mobile)?\.douyu\.com/show/(?P<id>[0-9a-zA-Z]+)' _TESTS = [{ 'url': 'https://v.douyu.com/show/mPyq7oVNe5Yv1gLY', 'info_dict': { 'id': 'mPyq7oVNe5Yv1gLY', 'ext': 'mp4', 'title': '四川人小时候的味道“蒜苗回锅肉”,传统菜不能丢,要常做来吃', 'duration': 633, 'thumbnail': str, 'uploader': '美食作家王刚V', 'uploader_id': 'OVAO4NVx1m7Q', 'timestamp': 1661850002, 'upload_date': '20220830', 'view_count': int, 'tags': ['美食', '美食综合'], }, }, { 'url': 'https://vmobile.douyu.com/show/rjNBdvnVXNzvE2yw', 'only_matching': True, }] _FORMATS = { 'super': '原画', 'high': '超清', 'normal': '高清', } _QUALITIES = { 'super': -1, 'high': -2, 'normal': -3, } _RESOLUTIONS = { 'super': '1920x1080', 'high': '1280x720', 'normal': '852x480', } def _real_extract(self, url): url = url.replace('vmobile.', 'v.') video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_info = self._search_json( r'<script>\s*window\.\$DATA\s*=', webpage, 'video info', video_id, transform_source=js_to_json) js_sign_func = self._search_js_sign_func(webpage) form_data = { 'vid': video_id, **self._calc_sign(js_sign_func, video_id, video_info['ROOM']['point_id']), } url_info = self._download_json( 'https://v.douyu.com/api/stream/getStreamUrl', video_id, data=urlencode_postdata(form_data), note='Downloading video formats') formats = [] for name, url in traverse_obj(url_info, ('data', 'thumb_video', {dict.items}, ...)): video_url = traverse_obj(url, ('url', {url_or_none})) if video_url: ext = determine_ext(video_url) formats.append({ 'format': self._FORMATS.get(name), 'format_id': name, 'url': video_url, 'quality': self._QUALITIES.get(name), 'ext': 'mp4' if ext == 'm3u8' else ext, 'protocol': 'm3u8_native' if ext == 'm3u8' else 'https', **parse_resolution(self._RESOLUTIONS.get(name)), }) else: self.to_screen( f'"{self._FORMATS.get(name, name)}" format may require logging in. {self._login_hint()}') return { 'id': video_id, 'formats': formats, **traverse_obj(video_info, ('DATA', { 'title': ('content', 'title', {str}), 'uploader': ('content', 'author', {str}), 'uploader_id': ('content', 'up_id', {str_or_none}), 'duration': ('content', 'video_duration', {int_or_none}), 'thumbnail': ('content', 'video_pic', {url_or_none}), 'timestamp': ('content', 'create_time', {int_or_none}), 'view_count': ('content', 'view_num', {int_or_none}), 'tags': ('videoTag', ..., 'tagName', {str}), })), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ninenow.py
yt_dlp/extractor/ninenow.py
from .brightcove import BrightcoveNewIE from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, parse_iso8601, parse_resolution, str_or_none, url_or_none, ) from ..utils.traversal import ( get_first, require, traverse_obj, value, ) class NineNowIE(InfoExtractor): IE_NAME = '9now.com.au' _VALID_URL = r'https?://(?:www\.)?9now\.com\.au/(?:[^/?#]+/){2}(?P<id>(?P<type>clip|episode)-[^/?#]+)' _GEO_BYPASS = False _TESTS = [{ # clip 'url': 'https://www.9now.com.au/today/season-2025/clip-cm8hw9h5z00080hquqa5hszq7', 'info_dict': { 'id': '6370295582112', 'ext': 'mp4', 'title': 'Would Karl Stefanovic be able to land a plane?', 'description': 'The Today host\'s skills are put to the test with the latest simulation tech.', 'uploader_id': '4460760524001', 'duration': 197.376, 'tags': ['flights', 'technology', 'Karl Stefanovic'], 'season': 'Season 2025', 'season_number': 2025, 'series': 'TODAY', 'timestamp': 1742507988, 'upload_date': '20250320', 'release_timestamp': 1742507983, 'release_date': '20250320', 'thumbnail': r're:https?://.+/1920x0/.+\.jpg', }, 'params': { 'skip_download': 'HLS/DASH fragments and mp4 URLs are geo-restricted; only available in AU', }, }, { # episode 'url': 'https://www.9now.com.au/afl-footy-show/2016/episode-19', 'only_matching': True, }, { # DRM protected 'url': 'https://www.9now.com.au/andrew-marrs-history-of-the-world/season-1/episode-1', 'only_matching': True, }, { # episode of series 'url': 'https://www.9now.com.au/lego-masters/season-3/episode-3', 'info_dict': { 'id': '6308830406112', 'title': 'Episode 3', 'ext': 'mp4', 'season_number': 3, 'episode_number': 3, 'description': 'In the first elimination of the competition, teams will have 10 hours to build a world inside a snow globe.', 'uploader_id': '4460760524001', 'timestamp': 1619002200, 'upload_date': '20210421', 'duration': 3574.085, 'thumbnail': r're:https?://.+/1920x0/.+\.jpg', 'tags': ['episode'], 'series': 'Lego Masters', 'season': 'Season 3', 'episode': 'Episode 3', 'release_timestamp': 1619002200, 'release_date': '20210421', }, 'params': { 'skip_download': 'HLS/DASH fragments and mp4 URLs are geo-restricted; only available in AU', }, }, { 'url': 'https://www.9now.com.au/married-at-first-sight/season-12/episode-1', 'info_dict': { 'id': '6367798770112', 'ext': 'mp4', 'title': 'Episode 1', 'description': r're:The cultural sensation of Married At First Sight returns with our first weddings! .{90}$', 'uploader_id': '4460760524001', 'duration': 5415.079, 'thumbnail': r're:https?://.+/1920x0/.+\.png', 'tags': ['episode'], 'season': 'Season 12', 'season_number': 12, 'episode': 'Episode 1', 'episode_number': 1, 'series': 'Married at First Sight', 'timestamp': 1737973800, 'upload_date': '20250127', 'release_timestamp': 1737973800, 'release_date': '20250127', }, 'params': { 'skip_download': 'HLS/DASH fragments and mp4 URLs are geo-restricted; only available in AU', }, }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/4460760524001/default_default/index.html?videoId={}' def _real_extract(self, url): display_id, video_type = self._match_valid_url(url).group('id', 'type') webpage = self._download_webpage(url, display_id) common_data = get_first(self._search_nextjs_v13_data(webpage, display_id), ('payload', {dict})) if traverse_obj(common_data, (video_type, 'video', 'drm', {bool})): self.report_drm(display_id) brightcove_id = traverse_obj(common_data, ( video_type, 'video', ( ('brightcoveId', {str}), ('referenceId', {str}, {lambda x: f'ref:{x}' if x else None}), ), any, {require('brightcove ID')})) return { '_type': 'url_transparent', 'ie_key': BrightcoveNewIE.ie_key(), 'url': self.BRIGHTCOVE_URL_TEMPLATE.format(brightcove_id), **traverse_obj(common_data, { 'id': (video_type, 'video', 'id', {int}, ({str_or_none}, {value(brightcove_id)}), any), 'title': (video_type, 'name', {str}), 'description': (video_type, 'description', {str}), 'duration': (video_type, 'video', 'duration', {float_or_none(scale=1000)}), 'tags': (video_type, 'tags', ..., 'name', {str}, all, filter), 'series': ('tvSeries', 'name', {str}), 'season_number': ('season', 'seasonNumber', {int_or_none}), 'episode_number': ('episode', 'episodeNumber', {int_or_none}), 'timestamp': ('episode', 'airDate', {parse_iso8601}), 'release_timestamp': (video_type, 'availability', {parse_iso8601}), 'thumbnails': (video_type, 'image', 'sizes', {dict.items}, lambda _, v: url_or_none(v[1]), { 'id': 0, 'url': 1, 'width': (1, {parse_resolution}, 'width'), }), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/indavideo.py
yt_dlp/extractor/indavideo.py
from .common import InfoExtractor from ..utils import ( int_or_none, parse_age_limit, parse_iso8601, time_seconds, update_url_query, ) class IndavideoEmbedIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:embed\.)?indavideo\.hu/player/video/|assets\.indavideo\.hu/swf/player\.swf\?.*\b(?:v(?:ID|id))=)(?P<id>[\da-f]+)' # Some example URLs covered by generic extractor: # https://index.indavideo.hu/video/Hod_Nemetorszagban # https://auto.indavideo.hu/video/Sajat_utanfutoban_a_kis_tacsko # https://film.indavideo.hu/video/f_farkaslesen # https://palyazat.indavideo.hu/video/Embertelen_dal_Dodgem_egyuttes _EMBED_REGEX = [r'<iframe[^>]+\bsrc=["\'](?P<url>(?:https?:)//embed\.indavideo\.hu/player/video/[\da-f]+)'] _TESTS = [{ 'url': 'https://indavideo.hu/player/video/1bdc3c6d80/', 'md5': 'c8a507a1c7410685f83a06eaeeaafeab', 'info_dict': { 'id': '1837039', 'ext': 'mp4', 'title': 'Cicatánc', 'description': '', 'uploader': 'cukiajanlo', 'uploader_id': '83729', 'thumbnail': r're:https?://pics\.indavideo\.hu/videos/.+\.jpg', 'timestamp': 1439193826, 'upload_date': '20150810', 'duration': 72, 'age_limit': 0, 'tags': 'count:5', }, }, { 'url': 'https://embed.indavideo.hu/player/video/1bdc3c6d80?autostart=1&hide=1', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'https://indavideo.hu/video/Vicces_cica_1', 'info_dict': { 'id': '1335611', 'ext': 'mp4', 'title': 'Vicces cica', 'description': 'Játszik a tablettel. :D', 'thumbnail': r're:https?://pics\.indavideo\.hu/videos/.+\.jpg', 'uploader': 'Jet_Pack', 'uploader_id': '491217', 'timestamp': 1390821212, 'upload_date': '20140127', 'duration': 7, 'age_limit': 0, 'tags': 'count:2', }, }, { 'url': 'https://palyazat.indavideo.hu/video/RUSH_1', 'info_dict': { 'id': '3808180', 'ext': 'mp4', 'title': 'RUSH', 'age_limit': 0, 'description': '', 'duration': 650, 'tags': 'count:2', 'thumbnail': r're:https?://pics\.indavideo\.hu/videos/.+\.jpg', 'timestamp': 1729136266, 'upload_date': '20241017', 'uploader': '7summerfilms', 'uploader_id': '1628496', }, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( f'https://amfphp.indavideo.hu/SYm0json.php/player.playerHandler.getVideoData/{video_id}/', video_id, query={'_': time_seconds()})['data'] video_urls = [] video_files = video.get('video_files') if isinstance(video_files, list): video_urls.extend(video_files) elif isinstance(video_files, dict): video_urls.extend(video_files.values()) video_urls = list(set(video_urls)) filesh = video.get('filesh') or {} formats = [] for video_url in video_urls: height = int_or_none(self._search_regex( r'\.(\d{3,4})\.mp4(?:\?|$)', video_url, 'height', default=None)) if not height and len(filesh) == 1: height = int_or_none(next(iter(filesh.keys()))) token = filesh.get(str(height)) if token is None: continue formats.append({ 'url': update_url_query(video_url, {'token': token}), 'height': height, }) timestamp = video.get('date') if timestamp: # upload date is in CEST timestamp = parse_iso8601(timestamp + ' +0200', ' ') thumbnails = [{ 'url': self._proto_relative_url(thumbnail), } for thumbnail in video.get('thumbnails', [])] tags = [tag['title'] for tag in video.get('tags') or []] return { 'id': video.get('id') or video_id, 'title': video.get('title'), 'description': video.get('description'), 'thumbnails': thumbnails, 'uploader': video.get('user_name'), 'uploader_id': video.get('user_id'), 'timestamp': timestamp, 'duration': int_or_none(video.get('length')), 'age_limit': parse_age_limit(video.get('age_limit')), 'tags': tags, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/shahid.py
yt_dlp/extractor/shahid.py
import json import math import re from .aws import AWSIE from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, InAdvancePagedList, clean_html, int_or_none, parse_iso8601, str_or_none, urlencode_postdata, ) class ShahidBaseIE(AWSIE): _AWS_PROXY_HOST = 'api2.shahid.net' _AWS_API_KEY = '2RRtuMHx95aNI1Kvtn2rChEuwsCogUd4samGPjLh' _VALID_URL_BASE = r'https?://shahid\.mbc\.net/[a-z]{2}/' def _handle_error(self, e): fail_data = self._parse_json( e.cause.response.read().decode('utf-8'), None, fatal=False) if fail_data: faults = fail_data.get('faults', []) faults_message = ', '.join([clean_html(fault['userMessage']) for fault in faults if fault.get('userMessage')]) if faults_message: raise ExtractorError(faults_message, expected=True) def _call_api(self, path, video_id, request=None): query = {} if request: query['request'] = json.dumps(request) try: return self._aws_execute_api({ 'uri': '/proxy/v2/' + path, 'access_key': 'AKIAI6X4TYCIXM2B7MUQ', 'secret_key': '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', }, video_id, query) except ExtractorError as e: if isinstance(e.cause, HTTPError): self._handle_error(e) raise class ShahidIE(ShahidBaseIE): _NETRC_MACHINE = 'shahid' _VALID_URL = ShahidBaseIE._VALID_URL_BASE + r'(?:serie|show|movie)s/[^/]+/(?P<type>episode|clip|movie)-(?P<id>\d+)' _TESTS = [{ 'url': 'https://shahid.mbc.net/ar/shows/%D9%85%D8%AA%D8%AD%D9%81-%D8%A7%D9%84%D8%AF%D8%AD%D9%8A%D8%AD-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D9%83%D9%84%D9%8A%D8%A8-1/clip-816924', 'info_dict': { 'id': '816924', 'ext': 'mp4', 'title': 'متحف الدحيح الموسم 1 كليب 1', 'timestamp': 1602806400, 'upload_date': '20201016', 'description': 'برومو', 'duration': 22, 'categories': ['كوميديا'], }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://shahid.mbc.net/ar/movies/%D8%A7%D9%84%D9%82%D9%86%D8%A7%D8%B5%D8%A9/movie-151746', 'only_matching': True, }, { # shahid plus subscriber only 'url': 'https://shahid.mbc.net/ar/series/%D9%85%D8%B1%D8%A7%D9%8A%D8%A7-2011-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/episode-90511', 'only_matching': True, }, { 'url': 'https://shahid.mbc.net/en/shows/Ramez-Fi-Al-Shallal-season-1-episode-1/episode-359319', 'only_matching': True, }] def _perform_login(self, username, password): try: user_data = self._download_json( 'https://shahid.mbc.net/wd/service/users/login', None, 'Logging in', data=json.dumps({ 'email': username, 'password': password, 'basic': 'false', }).encode(), headers={ 'Content-Type': 'application/json; charset=UTF-8', })['user'] except ExtractorError as e: if isinstance(e.cause, HTTPError): self._handle_error(e) raise self._download_webpage( 'https://shahid.mbc.net/populateContext', None, 'Populate Context', data=urlencode_postdata({ 'firstName': user_data['firstName'], 'lastName': user_data['lastName'], 'userName': user_data['email'], 'csg_user_name': user_data['email'], 'subscriberId': user_data['id'], 'sessionId': user_data['sessionId'], })) def _real_extract(self, url): page_type, video_id = self._match_valid_url(url).groups() if page_type == 'clip': page_type = 'episode' playout = self._call_api( 'playout/new/url/' + video_id, video_id)['playout'] if not self.get_param('allow_unplayable_formats') and playout.get('drm'): self.report_drm(video_id) formats = self._extract_m3u8_formats(re.sub( # https://docs.aws.amazon.com/mediapackage/latest/ug/manifest-filtering.html r'aws\.manifestfilter=[\w:;,-]+&?', '', playout['url']), video_id, 'mp4') # video = self._call_api( # 'product/id', video_id, { # 'id': video_id, # 'productType': 'ASSET', # 'productSubType': page_type.upper() # })['productModel'] response = self._download_json( f'http://api.shahid.net/api/v1_1/{page_type}/{video_id}', video_id, 'Downloading video JSON', query={ 'apiKey': 'sh@hid0nlin3', 'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=', }) data = response.get('data', {}) error = data.get('error') if error: raise ExtractorError( '{} returned error: {}'.format(self.IE_NAME, '\n'.join(error.values())), expected=True) video = data[page_type] title = video['title'] categories = [ category['name'] for category in video.get('genres', []) if 'name' in category] return { 'id': video_id, 'title': title, 'description': video.get('description'), 'thumbnail': video.get('thumbnailUrl'), 'duration': int_or_none(video.get('duration')), 'timestamp': parse_iso8601(video.get('referenceDate')), 'categories': categories, 'series': video.get('showTitle') or video.get('showName'), 'season': video.get('seasonTitle'), 'season_number': int_or_none(video.get('seasonNumber')), 'season_id': str_or_none(video.get('seasonId')), 'episode_number': int_or_none(video.get('number')), 'episode_id': video_id, 'formats': formats, } class ShahidShowIE(ShahidBaseIE): _VALID_URL = ShahidBaseIE._VALID_URL_BASE + r'(?:show|serie)s/[^/]+/(?:show|series)-(?P<id>\d+)' _TESTS = [{ 'url': 'https://shahid.mbc.net/ar/shows/%D8%B1%D8%A7%D9%85%D8%B2-%D9%82%D8%B1%D8%B4-%D8%A7%D9%84%D8%A8%D8%AD%D8%B1/show-79187', 'info_dict': { 'id': '79187', 'title': 'رامز قرش البحر', 'description': 'md5:c88fa7e0f02b0abd39d417aee0d046ff', }, 'playlist_mincount': 32, }, { 'url': 'https://shahid.mbc.net/ar/series/How-to-live-Longer-(The-Big-Think)/series-291861', 'only_matching': True, }] _PAGE_SIZE = 30 def _real_extract(self, url): show_id = self._match_id(url) product = self._call_api( 'playableAsset', show_id, {'showId': show_id})['productModel'] playlist = product['playlist'] playlist_id = playlist['id'] show = product.get('show', {}) def page_func(page_num): playlist = self._call_api( 'product/playlist', show_id, { 'playListId': playlist_id, 'pageNumber': page_num, 'pageSize': 30, 'sorts': [{ 'order': 'DESC', 'type': 'SORTDATE', }], }) for product in playlist.get('productList', {}).get('products', []): product_url = product.get('productUrl', []).get('url') if not product_url: continue yield self.url_result( product_url, 'Shahid', str_or_none(product.get('id')), product.get('title')) entries = InAdvancePagedList( page_func, math.ceil(playlist['count'] / self._PAGE_SIZE), self._PAGE_SIZE) return self.playlist_result( entries, show_id, show.get('title'), show.get('description'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nuevo.py
yt_dlp/extractor/nuevo.py
from .common import InfoExtractor from ..utils import float_or_none, xpath_text class NuevoBaseIE(InfoExtractor): def _extract_nuevo(self, config_url, video_id, headers={}): config = self._download_xml( config_url, video_id, transform_source=lambda s: s.strip(), headers=headers) title = xpath_text(config, './title', 'title', fatal=True).strip() video_id = xpath_text(config, './mediaid', default=video_id) thumbnail = xpath_text(config, ['./image', './thumb']) duration = float_or_none(xpath_text(config, './duration')) formats = [] for element_name, format_id in (('file', 'sd'), ('filehd', 'hd')): video_url = xpath_text(config, element_name) if video_url: formats.append({ 'url': video_url, 'format_id': format_id, }) self._check_formats(formats, video_id) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false