repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/genius.py
yt_dlp/extractor/genius.py
from .common import InfoExtractor from ..utils import ( ExtractorError, js_to_json, smuggle_url, str_or_none, traverse_obj, unescapeHTML, ) class GeniusIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?genius\.com/(?:videos|(?P<article>a))/(?P<id>[^?/#]+)' _TESTS = [{ 'url': 'https://genius.com/videos/Vince-staples-breaks-down-the-meaning-of-when-sparks-fly', 'md5': '64c2ad98cfafcfda23bfa0ad0c512f4c', 'info_dict': { 'id': '6313303597112', 'ext': 'mp4', 'title': 'Vince Staples Breaks Down The Meaning Of “When Sparks Fly”', 'description': 'md5:bc15e00342c537c0039d414423ae5752', 'tags': 'count:1', 'uploader_id': '4863540648001', 'duration': 388.416, 'upload_date': '20221005', 'timestamp': 1664982341, 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'https://genius.com/videos/Breaking-down-drakes-certified-lover-boy-kanye-beef-way-2-sexy-cudi', 'md5': 'b8ed87a5efd1473bd027c20a969d4060', 'info_dict': { 'id': '6271792014001', 'ext': 'mp4', 'title': 'md5:c6355f7fa8a70bc86492a3963919fc15', 'description': 'md5:1774638c31548b31b037c09e9b821393', 'tags': 'count:3', 'uploader_id': '4863540648001', 'duration': 2685.099, 'upload_date': '20210909', 'timestamp': 1631209167, 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'https://genius.com/a/cordae-anderson-paak-break-down-the-meaning-of-two-tens', 'md5': 'f98a4e03b16b0a2821bd6e52fb3cc9d7', 'info_dict': { 'id': '6321509903112', 'ext': 'mp4', 'title': 'Cordae & Anderson .Paak Breaks Down The Meaning Of “Two Tens”', 'description': 'md5:1255f0e1161d07342ce56a8464ac339d', 'tags': ['song id: 5457554'], 'uploader_id': '4863540648001', 'duration': 361.813, 'upload_date': '20230301', 'timestamp': 1677703908, 'thumbnail': r're:^https?://.*\.jpg$', }, }] def _real_extract(self, url): display_id, is_article = self._match_valid_url(url).group('id', 'article') webpage = self._download_webpage(url, display_id) metadata = self._search_json( r'<meta content="', webpage, 'metadata', display_id, end_pattern=r'"\s+itemprop="page_data"', transform_source=unescapeHTML) video_id = traverse_obj(metadata, ( (('article', 'media', ...), ('video', None)), ('provider_id', ('dfp_kv', lambda _, v: v['name'] == 'brightcove_video_id', 'values', ...))), get_all=False) if not video_id: # Not all article pages have videos, expect the error raise ExtractorError('Brightcove video ID not found in webpage', expected=bool(is_article)) config = self._search_json(r'var\s*APP_CONFIG\s*=', webpage, 'config', video_id, default={}) account_id = config.get('brightcove_account_id', '4863540648001') player_id = traverse_obj( config, 'brightcove_standard_web_player_id', 'brightcove_standard_no_autoplay_web_player_id', 'brightcove_modal_web_player_id', 'brightcove_song_story_web_player_id', default='S1ZcmcOC1x') return self.url_result( smuggle_url( f'https://players.brightcove.net/{account_id}/{player_id}_default/index.html?videoId={video_id}', {'referrer': url}), 'BrightcoveNew', video_id) class GeniusLyricsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?genius\.com/(?P<id>[^?/#]+)-lyrics(?:[?/#]|$)' _TESTS = [{ 'url': 'https://genius.com/Lil-baby-heyy-lyrics', 'playlist_mincount': 2, 'info_dict': { 'id': '8454545', 'title': 'Heyy', 'description': 'Heyy by Lil Baby', }, }, { 'url': 'https://genius.com/Outkast-two-dope-boyz-in-a-cadillac-lyrics', 'playlist_mincount': 1, 'info_dict': { 'id': '36239', 'title': 'Two Dope Boyz (In a Cadillac)', 'description': 'Two Dope Boyz (In a Cadillac) by OutKast', }, }, { 'url': 'https://genius.com/Playboi-carti-rip-lyrics', 'playlist_mincount': 1, 'info_dict': { 'id': '3710582', 'title': 'R.I.P.', 'description': 'R.I.P. by Playboi Carti', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) json_string = self._search_json( r'window\.__PRELOADED_STATE__\s*=\s*JSON\.parse\(', webpage, 'json string', display_id, transform_source=js_to_json, contains_pattern=r'\'{(?s:.+)}\'') song_info = self._parse_json(json_string, display_id) song_id = str_or_none(traverse_obj(song_info, ('songPage', 'song'))) if not song_id: raise ExtractorError('Song id not found in webpage') title = traverse_obj( song_info, ('songPage', 'trackingData', lambda _, x: x['key'] == 'Title', 'value'), get_all=False, default='untitled') artist = traverse_obj( song_info, ('songPage', 'trackingData', lambda _, x: x['key'] == 'Primary Artist', 'value'), get_all=False, default='unknown artist') media = traverse_obj( song_info, ('entities', 'songs', song_id, 'media'), expected_type=list, default=[]) entries = [] for m in media: if m.get('type') in ('video', 'audio') and m.get('url'): if m.get('provider') == 'spotify': self.to_screen(f'{song_id}: Skipping Spotify audio embed') else: entries.append(self.url_result(m['url'])) return self.playlist_result(entries, song_id, title, f'{title} by {artist}')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/adn.py
yt_dlp/extractor/adn.py
import base64 import binascii import json import os import random import time from .common import InfoExtractor from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7 from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, ass_subtitles_timecode, bytes_to_long, float_or_none, int_or_none, join_nonempty, long_to_bytes, parse_iso8601, pkcs1pad, str_or_none, strip_or_none, try_get, unified_strdate, urlencode_postdata, ) from ..utils.traversal import traverse_obj class ADNBaseIE(InfoExtractor): IE_DESC = 'Animation Digital Network' _NETRC_MACHINE = 'animationdigitalnetwork' _BASE = 'animationdigitalnetwork.fr' _API_BASE_URL = f'https://gw.api.{_BASE}/' _PLAYER_BASE_URL = f'{_API_BASE_URL}player/' _HEADERS = {} _LOGIN_ERR_MESSAGE = 'Unable to log in' _RSA_KEY = (0x9B42B08905199A5CCE2026274399CA560ECB209EE9878A708B1C0812E1BB8CB5D1FB7441861147C1A1F2F3A0476DD63A9CAC20D3E983613346850AA6CB38F16DC7D720FD7D86FC6E5B3D5BBC72E14CD0BF9E869F2CEA2CCAD648F1DCE38F1FF916CEFB2D339B64AA0264372344BC775E265E8A852F88144AB0BD9AA06C1A4ABB, 65537) _POS_ALIGN_MAP = { 'start': 1, 'end': 3, } _LINE_ALIGN_MAP = { 'middle': 8, 'end': 4, } class ADNIE(ADNBaseIE): _VALID_URL = r'https?://(?:www\.)?animationdigitalnetwork\.com/(?:(?P<lang>de)/)?video/[^/?#]+/(?P<id>\d+)' _TESTS = [{ 'url': 'https://animationdigitalnetwork.com/video/558-fruits-basket/9841-episode-1-a-ce-soir', 'md5': '1c9ef066ceb302c86f80c2b371615261', 'info_dict': { 'id': '9841', 'ext': 'mp4', 'title': 'Fruits Basket - Episode 1', 'description': 'md5:14be2f72c3c96809b0ca424b0097d336', 'series': 'Fruits Basket', 'duration': 1437, 'release_date': '20190405', 'comment_count': int, 'average_rating': float, 'season_number': 1, 'episode': 'À ce soir !', 'episode_number': 1, 'thumbnail': str, 'season': 'Season 1', }, 'skip': 'Only available in French and German speaking Europe', }, { 'url': 'https://animationdigitalnetwork.com/de/video/973-the-eminence-in-shadow/23550-folge-1', 'md5': '5c5651bf5791fa6fcd7906012b9d94e8', 'info_dict': { 'id': '23550', 'ext': 'mp4', 'episode_number': 1, 'duration': 1417, 'release_date': '20231004', 'series': 'The Eminence in Shadow', 'season_number': 2, 'episode': str, 'title': str, 'thumbnail': str, 'season': 'Season 2', 'comment_count': int, 'average_rating': float, 'description': str, }, # 'skip': 'Only available in French and German speaking Europe', }] def _get_subtitles(self, sub_url, video_id): if not sub_url: return None enc_subtitles = self._download_webpage( sub_url, video_id, 'Downloading subtitles location', fatal=False) or '{}' subtitle_location = (self._parse_json(enc_subtitles, video_id, fatal=False) or {}).get('location') if subtitle_location: enc_subtitles = self._download_webpage( subtitle_location, video_id, 'Downloading subtitles data', fatal=False, headers={'Origin': 'https://' + self._BASE}) if not enc_subtitles: return None # http://animationdigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js dec_subtitles = unpad_pkcs7(aes_cbc_decrypt_bytes( base64.b64decode(enc_subtitles[24:]), binascii.unhexlify(self._K + '7fac1178830cfe0c'), base64.b64decode(enc_subtitles[:24]))) subtitles_json = self._parse_json(dec_subtitles.decode(), None, fatal=False) if not subtitles_json: return None subtitles = {} for sub_lang, sub in subtitles_json.items(): ssa = '''[Script Info] ScriptType:V4.00 [V4 Styles] Format: Name,Fontname,Fontsize,PrimaryColour,SecondaryColour,TertiaryColour,BackColour,Bold,Italic,BorderStyle,Outline,Shadow,Alignment,MarginL,MarginR,MarginV,AlphaLevel,Encoding Style: Default,Arial,18,16777215,16777215,16777215,0,-1,0,1,1,0,2,20,20,20,0,0 [Events] Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text''' for current in sub: start, end, text, line_align, position_align = ( float_or_none(current.get('startTime')), float_or_none(current.get('endTime')), current.get('text'), current.get('lineAlign'), current.get('positionAlign')) if start is None or end is None or text is None: continue alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0) ssa += os.linesep + 'Dialogue: Marked=0,{},{},Default,,0,0,0,,{}{}'.format( ass_subtitles_timecode(start), ass_subtitles_timecode(end), '{\\a%d}' % alignment if alignment != 2 else '', text.replace('\n', '\\N').replace('<i>', '{\\i1}').replace('</i>', '{\\i0}')) if sub_lang == 'vostf': sub_lang = 'fr' elif sub_lang == 'vostde': sub_lang = 'de' subtitles.setdefault(sub_lang, []).extend([{ 'ext': 'json', 'data': json.dumps(sub), }, { 'ext': 'ssa', 'data': ssa, }]) return subtitles def _perform_login(self, username, password): try: access_token = (self._download_json( self._API_BASE_URL + 'authentication/login', None, 'Logging in', self._LOGIN_ERR_MESSAGE, fatal=False, data=urlencode_postdata({ 'password': password, 'rememberMe': False, 'source': 'Web', 'username': username, })) or {}).get('accessToken') if access_token: self._HEADERS['Authorization'] = f'Bearer {access_token}' except ExtractorError as e: message = None if isinstance(e.cause, HTTPError) and e.cause.status == 401: resp = self._parse_json( e.cause.response.read().decode(), None, fatal=False) or {} message = resp.get('message') or resp.get('code') self.report_warning(message or self._LOGIN_ERR_MESSAGE) def _real_extract(self, url): lang, video_id = self._match_valid_url(url).group('lang', 'id') self._HEADERS['X-Target-Distribution'] = lang or 'fr' video_base_url = self._PLAYER_BASE_URL + f'video/{video_id}/' player = self._download_json( video_base_url + 'configuration', video_id, 'Downloading player config JSON metadata', headers=self._HEADERS)['player'] options = player['options'] user = options['user'] if not user.get('hasAccess'): start_date = traverse_obj(options, ('video', 'startDate', {str})) if (parse_iso8601(start_date) or 0) > time.time(): raise ExtractorError(f'This video is not available yet. Release date: {start_date}', expected=True) self.raise_login_required('This video requires a subscription', method='password') token = self._download_json( user.get('refreshTokenUrl') or (self._PLAYER_BASE_URL + 'refresh/token'), video_id, 'Downloading access token', headers={ 'X-Player-Refresh-Token': user['refreshToken'], }, data=b'')['token'] links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link') self._K = ''.join(random.choices('0123456789abcdef', k=16)) message = list(json.dumps({ 'k': self._K, 't': token, }).encode()) # Sometimes authentication fails for no good reason, retry with # a different random padding links_data = None for _ in range(3): padded_message = bytes(pkcs1pad(message, 128)) n, e = self._RSA_KEY encrypted_message = long_to_bytes(pow(bytes_to_long(padded_message), e, n)) authorization = base64.b64encode(encrypted_message).decode() try: links_data = self._download_json( links_url, video_id, 'Downloading links JSON metadata', headers={ 'X-Player-Token': authorization, **self._HEADERS, }, query={ 'freeWithAds': 'true', 'adaptive': 'false', 'withMetadata': 'true', 'source': 'Web', }) break except ExtractorError as e: if not isinstance(e.cause, HTTPError): raise e if e.cause.status == 401: # This usually goes away with a different random pkcs1pad, so retry continue error = self._parse_json(e.cause.response.read(), video_id) message = error.get('message') if e.cause.status == 403 and error.get('code') == 'player-bad-geolocation-country': self.raise_geo_restricted(msg=message) raise ExtractorError(message) else: raise ExtractorError('Giving up retrying') links = links_data.get('links') or {} metas = links_data.get('metadata') or {} sub_url = (links.get('subtitles') or {}).get('all') video_info = links_data.get('video') or {} title = metas['title'] formats = [] for format_id, qualities in (links.get('streaming') or {}).items(): if not isinstance(qualities, dict): continue for quality, load_balancer_url in qualities.items(): load_balancer_data = self._download_json( load_balancer_url, video_id, f'Downloading {format_id} {quality} JSON metadata', headers=self._HEADERS, fatal=False) or {} m3u8_url = load_balancer_data.get('location') if not m3u8_url: continue m3u8_formats = self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False) if format_id == 'vf': for f in m3u8_formats: f['language'] = 'fr' elif format_id == 'vde': for f in m3u8_formats: f['language'] = 'de' formats.extend(m3u8_formats) if not formats: self.raise_login_required('This video requires a subscription', method='password') video = (self._download_json( self._API_BASE_URL + f'video/{video_id}', video_id, 'Downloading additional video metadata', fatal=False, headers=self._HEADERS) or {}).get('video') or {} show = video.get('show') or {} return { 'id': video_id, 'title': title, 'description': strip_or_none(metas.get('summary') or video.get('summary')), 'thumbnail': video_info.get('image') or player.get('image'), 'formats': formats, 'subtitles': self.extract_subtitles(sub_url, video_id), 'episode': metas.get('subtitle') or video.get('name'), 'episode_number': int_or_none(video.get('shortNumber')), 'series': show.get('title'), 'season_number': int_or_none(video.get('season')), 'duration': int_or_none(video_info.get('duration') or video.get('duration')), 'release_date': unified_strdate(video.get('releaseDate')), 'average_rating': float_or_none(video.get('rating') or metas.get('rating')), 'comment_count': int_or_none(video.get('commentsCount')), } class ADNSeasonIE(ADNBaseIE): _VALID_URL = r'https?://(?:www\.)?animationdigitalnetwork\.com/(?:(?P<lang>de)/)?video/(?P<id>\d+)[^/?#]*/?(?:$|[#?])' _TESTS = [{ 'url': 'https://animationdigitalnetwork.com/video/911-tokyo-mew-mew-new', 'playlist_count': 12, 'info_dict': { 'id': '911', 'title': 'Tokyo Mew Mew New', }, # 'skip': 'Only available in French end German speaking Europe', }] def _real_extract(self, url): lang, video_show_slug = self._match_valid_url(url).group('lang', 'id') self._HEADERS['X-Target-Distribution'] = lang or 'fr' show = self._download_json( f'{self._API_BASE_URL}show/{video_show_slug}/', video_show_slug, 'Downloading show JSON metadata', headers=self._HEADERS)['show'] show_id = str(show['id']) episodes = self._download_json( f'{self._API_BASE_URL}video/show/{show_id}', video_show_slug, 'Downloading episode list', headers=self._HEADERS, query={ 'order': 'asc', 'limit': '-1', }) def entries(): for episode_id in traverse_obj(episodes, ('videos', ..., 'id', {str_or_none})): yield self.url_result(join_nonempty( 'https://animationdigitalnetwork.com', lang, 'video', video_show_slug, episode_id, delim='/'), ADNIE, episode_id) return self.playlist_result(entries(), show_id, show.get('title'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rtlnl.py
yt_dlp/extractor/rtlnl.py
from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, ) class RtlNlIE(InfoExtractor): IE_NAME = 'rtl.nl' IE_DESC = 'rtl.nl and rtlxl.nl' _EMBED_REGEX = [r'<iframe[^>]+?\bsrc=(?P<q1>[\'"])(?P<url>(?:https?:)?//(?:(?:www|static)\.)?rtl\.nl/(?:system/videoplayer/[^"]+(?:video_)?)?embed[^"]+)(?P=q1)'] _VALID_URL = r'''(?x) https?://(?:(?:www|static)\.)? (?: rtlxl\.nl/(?:[^\#]*\#!|programma)/[^/]+/| rtl\.nl/(?:(?:system/videoplayer/(?:[^/]+/)+(?:video_)?embed\.html|embed)\b.+?\buuid=|video/)| embed\.rtl\.nl/\#uuid= ) (?P<id>[0-9a-f-]+)''' _TESTS = [{ # new URL schema 'url': 'https://www.rtlxl.nl/programma/rtl-nieuws/0bd1384d-d970-3086-98bb-5c104e10c26f', 'md5': '490428f1187b60d714f34e1f2e3af0b6', 'info_dict': { 'id': '0bd1384d-d970-3086-98bb-5c104e10c26f', 'ext': 'mp4', 'title': 'RTL Nieuws', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'timestamp': 1593293400, 'upload_date': '20200627', 'duration': 661.08, }, }, { # old URL schema 'url': 'http://www.rtlxl.nl/#!/rtl-nieuws-132237/82b1aad1-4a14-3d7b-b554-b0aed1b2c416', 'md5': '473d1946c1fdd050b2c0161a4b13c373', 'info_dict': { 'id': '82b1aad1-4a14-3d7b-b554-b0aed1b2c416', 'ext': 'mp4', 'title': 'RTL Nieuws', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'timestamp': 1461951000, 'upload_date': '20160429', 'duration': 1167.96, }, 'skip': '404', }, { # best format available a3t 'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed/autoplay=false', 'md5': 'dea7474214af1271d91ef332fb8be7ea', 'info_dict': { 'id': '84ae5571-ac25-4225-ae0c-ef8d9efb2aed', 'ext': 'mp4', 'timestamp': 1424039400, 'title': 'RTL Nieuws - Nieuwe beelden Kopenhagen: chaos direct na aanslag', 'thumbnail': r're:^https?://screenshots\.rtl\.nl/(?:[^/]+/)*sz=[0-9]+x[0-9]+/uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed$', 'upload_date': '20150215', 'description': 'Er zijn nieuwe beelden vrijgegeven die vlak na de aanslag in Kopenhagen zijn gemaakt. Op de video is goed te zien hoe omstanders zich bekommeren om één van de slachtoffers, terwijl de eerste agenten ter plaatse komen.', }, }, { # empty synopsis and missing episodes (see https://github.com/ytdl-org/youtube-dl/issues/6275) # best format available nettv 'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a/autoplay=false', 'info_dict': { 'id': 'f536aac0-1dc3-4314-920e-3bd1c5b3811a', 'ext': 'mp4', 'title': 'RTL Nieuws - Meer beelden van overval juwelier', 'thumbnail': r're:^https?://screenshots\.rtl\.nl/(?:[^/]+/)*sz=[0-9]+x[0-9]+/uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a$', 'timestamp': 1437233400, 'upload_date': '20150718', 'duration': 30.474, }, 'params': { 'skip_download': True, }, }, { # encrypted m3u8 streams, georestricted 'url': 'http://www.rtlxl.nl/#!/afl-2-257632/52a74543-c504-4cde-8aa8-ec66fe8d68a7', 'only_matching': True, }, { 'url': 'http://www.rtl.nl/system/videoplayer/derden/embed.html#!/uuid=bb0353b0-d6a4-1dad-90e9-18fe75b8d1f0', 'only_matching': True, }, { 'url': 'http://rtlxl.nl/?_ga=1.204735956.572365465.1466978370#!/rtl-nieuws-132237/3c487912-023b-49ac-903e-2c5d79f8410f', 'only_matching': True, }, { 'url': 'https://www.rtl.nl/video/c603c9c2-601d-4b5e-8175-64f1e942dc7d/', 'only_matching': True, }, { 'url': 'https://static.rtl.nl/embed/?uuid=1a2970fc-5c0b-43ff-9fdc-927e39e6d1bc&autoplay=false&publicatiepunt=rtlnieuwsnl', 'only_matching': True, }, { # new embed URL schema 'url': 'https://embed.rtl.nl/#uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed/autoplay=false', 'only_matching': True, }] def _real_extract(self, url): uuid = self._match_id(url) info = self._download_json( f'http://www.rtl.nl/system/s4m/vfd/version=2/uuid={uuid}/fmt=adaptive/', uuid) material = info['material'][0] title = info['abstracts'][0]['name'] subtitle = material.get('title') if subtitle: title += f' - {subtitle}' description = material.get('synopsis') meta = info.get('meta', {}) videopath = material['videopath'] m3u8_url = meta.get('videohost', 'http://manifest.us.rtl.nl') + videopath formats = self._extract_m3u8_formats( m3u8_url, uuid, 'mp4', m3u8_id='hls', fatal=False) thumbnails = [] for p in ('poster_base_url', '"thumb_base_url"'): if not meta.get(p): continue thumbnails.append({ 'url': self._proto_relative_url(meta[p] + uuid), 'width': int_or_none(self._search_regex( r'/sz=([0-9]+)', meta[p], 'thumbnail width', fatal=False)), 'height': int_or_none(self._search_regex( r'/sz=[0-9]+x([0-9]+)', meta[p], 'thumbnail height', fatal=False)), }) return { 'id': uuid, 'title': title, 'formats': formats, 'timestamp': material['original_date'], 'description': description, 'duration': parse_duration(material.get('duration')), 'thumbnails': thumbnails, } class RTLLuBaseIE(InfoExtractor): _MEDIA_REGEX = { 'video': r'<rtl-player\s[^>]*\bhls\s*=\s*"([^"]+)', 'audio': r'<rtl-audioplayer\s[^>]*\bsrc\s*=\s*"([^"]+)', 'thumbnail': r'<rtl-player\s[^>]*\bposter\s*=\s*"([^"]+)', } def get_media_url(self, webpage, video_id, media_type): return self._search_regex(self._MEDIA_REGEX[media_type], webpage, f'{media_type} url', default=None) def get_formats_and_subtitles(self, webpage, video_id): video_url, audio_url = self.get_media_url(webpage, video_id, 'video'), self.get_media_url(webpage, video_id, 'audio') formats, subtitles = [], {} if video_url is not None: formats, subtitles = self._extract_m3u8_formats_and_subtitles(video_url, video_id) if audio_url is not None: formats.append({'url': audio_url, 'ext': 'mp3', 'vcodec': 'none'}) return formats, subtitles def _real_extract(self, url): video_id = self._match_id(url) is_live = video_id in ('live', 'live-2', 'lauschteren') # TODO: extract comment from https://www.rtl.lu/comments?status=1&order=desc&context=news|article|<video_id> # we can context from <rtl-comments context=<context> in webpage webpage = self._download_webpage(url, video_id) formats, subtitles = self.get_formats_and_subtitles(webpage, video_id) return { 'id': video_id, 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage, default=None), 'formats': formats, 'subtitles': subtitles, 'thumbnail': self.get_media_url(webpage, video_id, 'thumbnail') or self._og_search_thumbnail(webpage, default=None), 'is_live': is_live, } class RTLLuTeleVODIE(RTLLuBaseIE): IE_NAME = 'rtl.lu:tele-vod' _VALID_URL = r'https?://(?:www\.)?rtl\.lu/(tele/(?P<slug>[\w-]+)/v/|video/)(?P<id>\d+)(\.html)?' _TESTS = [{ 'url': 'https://www.rtl.lu/tele/de-journal-vun-der-tele/v/3266757.html', 'info_dict': { 'id': '3266757', 'title': 'Informatiounsversammlung Héichwaasser', 'ext': 'mp4', 'thumbnail': 'https://replay-assets.rtl.lu/2021/11/16/d3647fc4-470d-11ec-adc2-3a00abd6e90f_00008.jpg', 'description': 'md5:b1db974408cc858c9fd241812e4a2a14', }, }, { 'url': 'https://www.rtl.lu/video/3295215', 'info_dict': { 'id': '3295215', 'title': 'Kulturassisen iwwer d\'Bestandsopnam vum Lëtzebuerger Konscht', 'ext': 'mp4', 'thumbnail': 'https://replay-assets.rtl.lu/2022/06/28/0000_3295215_0000.jpg', 'description': 'md5:85bcd4e0490aa6ec969d9bf16927437b', }, }] class RTLLuArticleIE(RTLLuBaseIE): IE_NAME = 'rtl.lu:article' _VALID_URL = r'https?://(?:(www|5minutes|today)\.)rtl\.lu/(?:[\w-]+)/(?:[\w-]+)/a/(?P<id>\d+)\.html' _TESTS = [{ # Audio-only 'url': 'https://www.rtl.lu/sport/news/a/1934360.html', 'info_dict': { 'id': '1934360', 'ext': 'mp3', 'thumbnail': 'https://static.rtl.lu/rtl2008.lu/nt/p/2022/06/28/19/e4b37d66ddf00bab4c45617b91a5bb9b.jpeg', 'description': 'md5:5eab4a2a911c1fff7efc1682a38f9ef7', 'title': 'md5:40aa85f135578fbd549d3c9370321f99', }, }, { # 5minutes 'url': 'https://5minutes.rtl.lu/espace-frontaliers/frontaliers-en-questions/a/1853173.html', 'info_dict': { 'id': '1853173', 'ext': 'mp4', 'description': 'md5:ac031da0740e997a5cf4633173634fee', 'title': 'md5:87e17722ed21af0f24be3243f4ec0c46', 'thumbnail': 'https://replay-assets.rtl.lu/2022/01/26/screenshot_20220126104933_3274749_12b249833469b0d6e4440a1dec83cdfa.jpg', }, }, { # today.lu 'url': 'https://today.rtl.lu/entertainment/news/a/1936203.html', 'info_dict': { 'id': '1936203', 'ext': 'mp4', 'title': 'Once Upon A Time...zu Lëtzebuerg: The Three Witches\' Tower', 'description': 'The witchy theme continues in the latest episode of Once Upon A Time...', 'thumbnail': 'https://replay-assets.rtl.lu/2022/07/02/screenshot_20220702122859_3290019_412dc5185951b7f6545a4039c8be9235.jpg', }, }] class RTLLuLiveIE(RTLLuBaseIE): _VALID_URL = r'https?://www\.rtl\.lu/(?:tele|radio)/(?P<id>live(?:-\d+)?|lauschteren)' _TESTS = [{ # Tele:live 'url': 'https://www.rtl.lu/tele/live', 'info_dict': { 'id': 'live', 'ext': 'mp4', 'live_status': 'is_live', 'title': r're:RTL - Télé LIVE \d{4}-\d{2}-\d{2} \d{2}:\d{2}', 'thumbnail': 'https://static.rtl.lu/livestream/channel1.jpg', }, }, { # Tele:live-2 'url': 'https://www.rtl.lu/tele/live-2', 'info_dict': { 'id': 'live-2', 'ext': 'mp4', 'live_status': 'is_live', 'title': r're:RTL - Télé LIVE \d{4}-\d{2}-\d{2} \d{2}:\d{2}', 'thumbnail': 'https://static.rtl.lu/livestream/channel2.jpg', }, }, { # Radio:lauschteren 'url': 'https://www.rtl.lu/radio/lauschteren', 'info_dict': { 'id': 'lauschteren', 'ext': 'mp4', 'live_status': 'is_live', 'title': r're:RTL - Radio LIVE \d{4}-\d{2}-\d{2} \d{2}:\d{2}', 'thumbnail': 'https://static.rtl.lu/livestream/rtlradiowebtv.jpg', }, }] class RTLLuRadioIE(RTLLuBaseIE): _VALID_URL = r'https?://www\.rtl\.lu/radio/(?:[\w-]+)/s/(?P<id>\d+)(\.html)?' _TESTS = [{ 'url': 'https://www.rtl.lu/radio/5-vir-12/s/4033058.html', 'info_dict': { 'id': '4033058', 'ext': 'mp3', 'description': 'md5:f855a4f3e3235393ae47ed1db5d934b9', 'title': '5 vir 12 - Stau um Stau', 'thumbnail': 'https://static.rtl.lu/rtlg//2022/06/24/c9c19e5694a14be46a3647a3760e1f62.jpg', }, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bilibili.py
yt_dlp/extractor/bilibili.py
import base64 import functools import hashlib import itertools import json import math import random import re import string import time import urllib.parse import uuid from .common import InfoExtractor, SearchInfoExtractor from ..dependencies import Cryptodome from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, GeoRestrictedError, InAdvancePagedList, OnDemandPagedList, bool_or_none, determine_ext, filter_dict, float_or_none, format_field, get_element_by_class, int_or_none, join_nonempty, make_archive_id, merge_dicts, mimetype2ext, parse_count, parse_qs, parse_resolution, qualities, smuggle_url, srt_subtitles_timecode, str_or_none, traverse_obj, unified_timestamp, unsmuggle_url, url_or_none, urlencode_postdata, variadic, ) class BilibiliBaseIE(InfoExtractor): _HEADERS = {'Referer': 'https://www.bilibili.com/'} _FORMAT_ID_RE = re.compile(r'-(\d+)\.m4s\?') _WBI_KEY_CACHE_TIMEOUT = 30 # exact expire timeout is unclear, use 30s for one session _wbi_key_cache = {} @property def is_logged_in(self): return bool(self._get_cookies('https://api.bilibili.com').get('SESSDATA')) def _check_missing_formats(self, play_info, formats): parsed_qualities = set(traverse_obj(formats, (..., 'quality'))) missing_formats = join_nonempty(*[ traverse_obj(fmt, 'new_description', 'display_desc', 'quality') for fmt in traverse_obj(play_info, ( 'support_formats', lambda _, v: v['quality'] not in parsed_qualities))], delim=', ') if missing_formats: self.to_screen( f'Format(s) {missing_formats} are missing; you have to ' f'become a premium member to download them. {self._login_hint()}') def extract_formats(self, play_info): format_names = { r['quality']: traverse_obj(r, 'new_description', 'display_desc') for r in traverse_obj(play_info, ('support_formats', lambda _, v: v['quality'])) } audios = traverse_obj(play_info, ('dash', (None, 'dolby'), 'audio', ..., {dict})) flac_audio = traverse_obj(play_info, ('dash', 'flac', 'audio')) if flac_audio: audios.append(flac_audio) formats = [{ 'url': traverse_obj(audio, 'baseUrl', 'base_url', 'url'), 'ext': mimetype2ext(traverse_obj(audio, 'mimeType', 'mime_type')), 'acodec': traverse_obj(audio, ('codecs', {str.lower})), 'vcodec': 'none', 'tbr': float_or_none(audio.get('bandwidth'), scale=1000), 'filesize': int_or_none(audio.get('size')), 'format_id': str_or_none(audio.get('id')), } for audio in audios] formats.extend({ 'url': traverse_obj(video, 'baseUrl', 'base_url', 'url'), 'ext': mimetype2ext(traverse_obj(video, 'mimeType', 'mime_type')), 'fps': float_or_none(traverse_obj(video, 'frameRate', 'frame_rate')), 'width': int_or_none(video.get('width')), 'height': int_or_none(video.get('height')), 'vcodec': video.get('codecs'), 'acodec': 'none' if audios else None, 'dynamic_range': {126: 'DV', 125: 'HDR10'}.get(int_or_none(video.get('id'))), 'tbr': float_or_none(video.get('bandwidth'), scale=1000), 'filesize': int_or_none(video.get('size')), 'quality': int_or_none(video.get('id')), 'format_id': traverse_obj( video, (('baseUrl', 'base_url'), {self._FORMAT_ID_RE.search}, 1), ('id', {str_or_none}), get_all=False), 'format': format_names.get(video.get('id')), } for video in traverse_obj(play_info, ('dash', 'video', ...))) if formats: self._check_missing_formats(play_info, formats) fragments = traverse_obj(play_info, ('durl', lambda _, v: url_or_none(v['url']), { 'url': ('url', {url_or_none}), 'duration': ('length', {float_or_none(scale=1000)}), 'filesize': ('size', {int_or_none}), })) if fragments: formats.append({ 'url': fragments[0]['url'], 'filesize': sum(traverse_obj(fragments, (..., 'filesize'))), **({ 'fragments': fragments, 'protocol': 'http_dash_segments', } if len(fragments) > 1 else {}), **traverse_obj(play_info, { 'quality': ('quality', {int_or_none}), 'format_id': ('quality', {str_or_none}), 'format_note': ('quality', {lambda x: format_names.get(x)}), 'duration': ('timelength', {float_or_none(scale=1000)}), }), **parse_resolution(format_names.get(play_info.get('quality'))), }) return formats def _get_wbi_key(self, video_id): if time.time() < self._wbi_key_cache.get('ts', 0) + self._WBI_KEY_CACHE_TIMEOUT: return self._wbi_key_cache['key'] session_data = self._download_json( 'https://api.bilibili.com/x/web-interface/nav', video_id, note='Downloading wbi sign') lookup = ''.join(traverse_obj(session_data, ( 'data', 'wbi_img', ('img_url', 'sub_url'), {lambda x: x.rpartition('/')[2].partition('.')[0]}))) # from getMixinKey() in the vendor js mixin_key_enc_tab = [ 46, 47, 18, 2, 53, 8, 23, 32, 15, 50, 10, 31, 58, 3, 45, 35, 27, 43, 5, 49, 33, 9, 42, 19, 29, 28, 14, 39, 12, 38, 41, 13, 37, 48, 7, 16, 24, 55, 40, 61, 26, 17, 0, 1, 60, 51, 30, 4, 22, 25, 54, 21, 56, 59, 6, 63, 57, 62, 11, 36, 20, 34, 44, 52, ] self._wbi_key_cache.update({ 'key': ''.join(lookup[i] for i in mixin_key_enc_tab)[:32], 'ts': time.time(), }) return self._wbi_key_cache['key'] def _sign_wbi(self, params, video_id): params['wts'] = round(time.time()) params = { k: ''.join(filter(lambda char: char not in "!'()*", str(v))) for k, v in sorted(params.items()) } query = urllib.parse.urlencode(params) params['w_rid'] = hashlib.md5(f'{query}{self._get_wbi_key(video_id)}'.encode()).hexdigest() return params def _download_playinfo(self, bvid, cid, headers=None, query=None): params = {'bvid': bvid, 'cid': cid, 'fnval': 4048, **(query or {})} if self.is_logged_in: params.pop('try_look', None) if qn := params.get('qn'): note = f'Downloading video format {qn} for cid {cid}' else: note = f'Downloading video formats for cid {cid}' return self._download_json( 'https://api.bilibili.com/x/player/wbi/playurl', bvid, query=self._sign_wbi(params, bvid), headers=headers, note=note)['data'] def json2srt(self, json_data): srt_data = '' for idx, line in enumerate(json_data.get('body') or []): srt_data += (f'{idx + 1}\n' f'{srt_subtitles_timecode(line["from"])} --> {srt_subtitles_timecode(line["to"])}\n' f'{line["content"]}\n\n') return srt_data def _get_subtitles(self, video_id, cid, aid=None): subtitles = { 'danmaku': [{ 'ext': 'xml', 'url': f'https://comment.bilibili.com/{cid}.xml', }], } video_info = self._download_json( 'https://api.bilibili.com/x/player/wbi/v2', video_id, query={'aid': aid, 'cid': cid} if aid else {'bvid': video_id, 'cid': cid}, note=f'Extracting subtitle info {cid}', headers=self._HEADERS) if traverse_obj(video_info, ('data', 'need_login_subtitle')): self.report_warning( f'Subtitles are only available when logged in. {self._login_hint()}', only_once=True) for s in traverse_obj(video_info, ( 'data', 'subtitle', 'subtitles', lambda _, v: v['subtitle_url'] and v['lan'])): subtitles.setdefault(s['lan'], []).append({ 'ext': 'srt', 'data': self.json2srt(self._download_json(s['subtitle_url'], video_id)), }) return subtitles def _get_chapters(self, aid, cid): chapters = aid and cid and self._download_json( 'https://api.bilibili.com/x/player/wbi/v2', aid, query={'aid': aid, 'cid': cid}, note='Extracting chapters', fatal=False, headers=self._HEADERS) return traverse_obj(chapters, ('data', 'view_points', ..., { 'title': 'content', 'start_time': 'from', 'end_time': 'to', })) or None def _get_comments(self, aid): for idx in itertools.count(1): replies = traverse_obj( self._download_json( f'https://api.bilibili.com/x/v2/reply?pn={idx}&oid={aid}&type=1&jsonp=jsonp&sort=2&_=1567227301685', aid, note=f'Extracting comments from page {idx}', fatal=False), ('data', 'replies')) if not replies: return for children in map(self._get_all_children, replies): yield from children def _get_all_children(self, reply): yield { 'author': traverse_obj(reply, ('member', 'uname')), 'author_id': traverse_obj(reply, ('member', 'mid')), 'id': reply.get('rpid'), 'text': traverse_obj(reply, ('content', 'message')), 'timestamp': reply.get('ctime'), 'parent': reply.get('parent') or 'root', } for children in map(self._get_all_children, traverse_obj(reply, ('replies', ...))): yield from children def _get_episodes_from_season(self, ss_id, url): season_info = self._download_json( 'https://api.bilibili.com/pgc/web/season/section', ss_id, note='Downloading season info', query={'season_id': ss_id}, headers={'Referer': url, **self.geo_verification_headers()}) for entry in traverse_obj(season_info, ( 'result', 'main_section', 'episodes', lambda _, v: url_or_none(v['share_url']) and v['id'])): yield self.url_result(entry['share_url'], BiliBiliBangumiIE, str_or_none(entry.get('id'))) def _get_divisions(self, video_id, graph_version, edges, edge_id, cid_edges=None): cid_edges = cid_edges or {} division_data = self._download_json( 'https://api.bilibili.com/x/stein/edgeinfo_v2', video_id, query={'graph_version': graph_version, 'edge_id': edge_id, 'bvid': video_id}, note=f'Extracting divisions from edge {edge_id}') edges.setdefault(edge_id, {}).update( traverse_obj(division_data, ('data', 'story_list', lambda _, v: v['edge_id'] == edge_id, { 'title': ('title', {str}), 'cid': ('cid', {int_or_none}), }), get_all=False)) edges[edge_id].update(traverse_obj(division_data, ('data', { 'title': ('title', {str}), 'choices': ('edges', 'questions', ..., 'choices', ..., { 'edge_id': ('id', {int_or_none}), 'cid': ('cid', {int_or_none}), 'text': ('option', {str}), }), }))) # use dict to combine edges that use the same video section (same cid) cid_edges.setdefault(edges[edge_id]['cid'], {})[edge_id] = edges[edge_id] for choice in traverse_obj(edges, (edge_id, 'choices', ...)): if choice['edge_id'] not in edges: edges[choice['edge_id']] = {'cid': choice['cid']} self._get_divisions(video_id, graph_version, edges, choice['edge_id'], cid_edges=cid_edges) return cid_edges def _get_interactive_entries(self, video_id, cid, metainfo, headers=None): graph_version = traverse_obj( self._download_json( 'https://api.bilibili.com/x/player/wbi/v2', video_id, 'Extracting graph version', query={'bvid': video_id, 'cid': cid}, headers=headers), ('data', 'interaction', 'graph_version', {int_or_none})) cid_edges = self._get_divisions(video_id, graph_version, {1: {'cid': cid}}, 1) for cid, edges in cid_edges.items(): play_info = self._download_playinfo(video_id, cid, headers=headers, query={'try_look': 1}) yield { **metainfo, 'id': f'{video_id}_{cid}', 'title': f'{metainfo.get("title")} - {next(iter(edges.values())).get("title")}', 'formats': self.extract_formats(play_info), 'description': f'{json.dumps(edges, ensure_ascii=False)}\n{metainfo.get("description", "")}', 'duration': float_or_none(play_info.get('timelength'), scale=1000), 'subtitles': self.extract_subtitles(video_id, cid), } class BiliBiliIE(BilibiliBaseIE): _VALID_URL = r'https?://(?:www\.)?bilibili\.com/(?:video/|festival/[^/?#]+\?(?:[^#]*&)?bvid=)(?P<prefix>[aAbB][vV])(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.bilibili.com/video/BV13x41117TL', 'info_dict': { 'id': 'BV13x41117TL', 'title': '阿滴英文|英文歌分享#6 "Closer', 'ext': 'mp4', 'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文', 'uploader_id': '65880958', 'uploader': '阿滴英文', 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', 'duration': 554.117, 'tags': list, 'comment_count': int, 'upload_date': '20170301', 'timestamp': 1488353834, 'like_count': int, 'view_count': int, '_old_archive_ids': ['bilibili 8903802_part1'], }, }, { 'note': 'old av URL version', 'url': 'http://www.bilibili.com/video/av1074402/', 'info_dict': { 'id': 'BV11x411K7CN', 'ext': 'mp4', 'title': '【金坷垃】金泡沫', 'uploader': '菊子桑', 'uploader_id': '156160', 'duration': 308.36, 'upload_date': '20140420', 'timestamp': 1397983878, 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923', 'like_count': int, 'comment_count': int, 'view_count': int, 'tags': list, 'thumbnail': r're:^https?://.*\.(jpg|jpeg)$', '_old_archive_ids': ['bilibili 1074402_part1'], }, 'params': {'skip_download': True}, }, { 'note': 'Anthology', 'url': 'https://www.bilibili.com/video/BV1bK411W797', 'info_dict': { 'id': 'BV1bK411W797', 'title': '物语中的人物是如何吐槽自己的OP的', }, 'playlist_count': 23, 'playlist': [{ 'info_dict': { 'id': 'BV1bK411W797_p1', 'ext': 'mp4', 'title': '物语中的人物是如何吐槽自己的OP的 p01 Staple Stable/战场原+羽川', 'tags': 'count:10', 'timestamp': 1589601697, 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', 'uploader': '打牌还是打桩', 'uploader_id': '150259984', 'like_count': int, 'comment_count': int, 'upload_date': '20200516', 'view_count': int, 'description': 'md5:e3c401cf7bc363118d1783dd74068a68', 'duration': 90.314, '_old_archive_ids': ['bilibili 498159642_part1'], }, }], 'params': {'playlist_items': '2'}, }, { 'note': 'Specific page of Anthology', 'url': 'https://www.bilibili.com/video/BV1bK411W797?p=1', 'info_dict': { 'id': 'BV1bK411W797_p1', 'ext': 'mp4', 'title': '物语中的人物是如何吐槽自己的OP的 p01 Staple Stable/战场原+羽川', 'tags': 'count:10', 'timestamp': 1589601697, 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', 'uploader': '打牌还是打桩', 'uploader_id': '150259984', 'like_count': int, 'comment_count': int, 'upload_date': '20200516', 'view_count': int, 'description': 'md5:e3c401cf7bc363118d1783dd74068a68', 'duration': 90.314, '_old_archive_ids': ['bilibili 498159642_part1'], }, }, { 'url': 'https://www.bilibili.com/video/av8903802/', 'info_dict': { 'id': 'BV13x41117TL', 'ext': 'mp4', 'title': '阿滴英文|英文歌分享#6 "Closer', 'upload_date': '20170301', 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a', 'timestamp': 1488353834, 'uploader_id': '65880958', 'uploader': '阿滴英文', 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', 'duration': 554.117, 'tags': list, 'comment_count': int, 'view_count': int, 'like_count': int, '_old_archive_ids': ['bilibili 8903802_part1'], }, 'params': { 'skip_download': True, }, }, { 'note': 'video has chapter', 'url': 'https://www.bilibili.com/video/BV1vL411G7N7/', 'info_dict': { 'id': 'BV1vL411G7N7', 'ext': 'mp4', 'title': '如何为你的B站视频添加进度条分段', 'timestamp': 1634554558, 'upload_date': '20211018', 'description': 'md5:a9a3d6702b3a94518d419b2e9c320a6d', 'tags': list, 'uploader': '爱喝咖啡的当麻', 'duration': 669.482, 'uploader_id': '1680903', 'chapters': 'count:6', 'comment_count': int, 'view_count': int, 'like_count': int, 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', '_old_archive_ids': ['bilibili 463665680_part1'], }, 'params': {'skip_download': True}, }, { 'note': 'video redirects to festival page', 'url': 'https://www.bilibili.com/video/BV1wP4y1P72h', 'info_dict': { 'id': 'BV1wP4y1P72h', 'ext': 'mp4', 'title': '牛虎年相交之际,一首传统民族打击乐《牛斗虎》祝大家新春快乐,虎年大吉!【bilibili音乐虎闹新春】', 'timestamp': 1643947497, 'upload_date': '20220204', 'description': 'md5:8681a0d4d2c06b4ae27e59c8080a7fe6', 'uploader': '叨叨冯聊音乐', 'duration': 246.719, 'uploader_id': '528182630', 'view_count': int, 'like_count': int, 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', '_old_archive_ids': ['bilibili 893839363_part1'], }, }, { 'note': 'newer festival video', 'url': 'https://www.bilibili.com/festival/2023honkaiimpact3gala?bvid=BV1ay4y1d77f', 'info_dict': { 'id': 'BV1ay4y1d77f', 'ext': 'mp4', 'title': '【崩坏3新春剧场】为特别的你送上祝福!', 'timestamp': 1674273600, 'upload_date': '20230121', 'description': 'md5:58af66d15c6a0122dc30c8adfd828dd8', 'uploader': '果蝇轰', 'duration': 1111.722, 'uploader_id': '8469526', 'view_count': int, 'like_count': int, 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', '_old_archive_ids': ['bilibili 778246196_part1'], }, }, { 'note': 'legacy flv/mp4 video', 'url': 'https://www.bilibili.com/video/BV1ms411Q7vw/?p=4', 'info_dict': { 'id': 'BV1ms411Q7vw_p4', 'title': '[搞笑]【动画】云南方言快乐生产线出品 p04 新烧包谷之漫游桃花岛', 'timestamp': 1458222815, 'upload_date': '20160317', 'description': '云南方言快乐生产线出品', 'duration': float, 'uploader': '一笑颠天', 'uploader_id': '3916081', 'view_count': int, 'comment_count': int, 'like_count': int, 'tags': list, 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', '_old_archive_ids': ['bilibili 4120229_part4'], }, 'params': {'extractor_args': {'bilibili': {'prefer_multi_flv': ['32']}}}, 'playlist_count': 19, 'playlist': [{ 'info_dict': { 'id': 'BV1ms411Q7vw_p4_0', 'ext': 'flv', 'title': '[搞笑]【动画】云南方言快乐生产线出品 p04 新烧包谷之漫游桃花岛', 'duration': 399.102, }, }], }, { 'note': 'legacy mp4-only video', 'url': 'https://www.bilibili.com/video/BV1nx411u79K', 'info_dict': { 'id': 'BV1nx411u79K', 'ext': 'mp4', 'title': '【练习室】201603声乐练习《No Air》with VigoVan', 'timestamp': 1508893551, 'upload_date': '20171025', 'description': '@ZERO-G伯远\n声乐练习 《No Air》with Vigo Van', 'duration': 80.384, 'uploader': '伯远', 'uploader_id': '10584494', 'comment_count': int, 'view_count': int, 'like_count': int, 'tags': list, 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', '_old_archive_ids': ['bilibili 15700301_part1'], }, }, { 'note': 'interactive/split-path video', 'url': 'https://www.bilibili.com/video/BV1af4y1H7ga/', 'info_dict': { 'id': 'BV1af4y1H7ga', 'title': '【互动游戏】花了大半年时间做的自我介绍~请查收!!', 'timestamp': 1630500414, 'upload_date': '20210901', 'description': 'md5:01113e39ab06e28042d74ac356a08786', 'tags': list, 'uploader': '钉宫妮妮Ninico', 'duration': 1503, 'uploader_id': '8881297', 'comment_count': int, 'view_count': int, 'like_count': int, 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', '_old_archive_ids': ['bilibili 292734508_part1'], }, 'playlist_count': 33, 'playlist': [{ 'info_dict': { 'id': 'BV1af4y1H7ga_400950101', 'ext': 'mp4', 'title': '【互动游戏】花了大半年时间做的自我介绍~请查收!! - 听见猫猫叫~', 'timestamp': 1630500414, 'upload_date': '20210901', 'description': 'md5:db66ac7a2813a94b8291dbce990cc5b2', 'tags': list, 'uploader': '钉宫妮妮Ninico', 'duration': 11.605, 'uploader_id': '8881297', 'comment_count': int, 'view_count': int, 'like_count': int, 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', '_old_archive_ids': ['bilibili 292734508_part1'], }, }], }, { 'note': 'redirect from bvid to bangumi link via redirect_url', 'url': 'https://www.bilibili.com/video/BV1TE411f7f1', 'info_dict': { 'id': '288525', 'title': '李永乐老师 钱学森弹道和乘波体飞行器是什么?', 'ext': 'mp4', 'series': '我和我的祖国', 'series_id': '4780', 'season': '幕后纪实', 'season_id': '28609', 'season_number': 1, 'episode': '钱学森弹道和乘波体飞行器是什么?', 'episode_id': '288525', 'episode_number': 105, 'duration': 1183.957, 'timestamp': 1571648124, 'upload_date': '20191021', 'thumbnail': r're:https?://.*\.(jpg|jpeg|png)$', }, }, { 'note': 'redirect from aid to bangumi link via redirect_url', 'url': 'https://www.bilibili.com/video/av114868162141203', 'info_dict': { 'id': '1933368', 'title': 'PV 引爆变革的起点', 'ext': 'mp4', 'duration': 63.139, 'series': '时光代理人', 'series_id': '5183', 'season': '第三季', 'season_number': 4, 'season_id': '105212', 'episode': '引爆变革的起点', 'episode_number': 1, 'episode_id': '1933368', 'timestamp': 1752849001, 'upload_date': '20250718', 'thumbnail': r're:https?://.*\.(jpg|jpeg|png)$', }, }, { 'note': 'video has subtitles, which requires login', 'url': 'https://www.bilibili.com/video/BV12N4y1M7rh', 'info_dict': { 'id': 'BV12N4y1M7rh', 'ext': 'mp4', 'title': 'md5:96e8bb42c2b432c0d4ce3434a61479c1', 'tags': list, 'description': 'md5:afde2b7ba9025c01d9e3dde10de221e4', 'duration': 313.557, 'upload_date': '20220709', 'uploader': '小夫太渴', 'timestamp': 1657347907, 'uploader_id': '1326814124', 'comment_count': int, 'view_count': int, 'like_count': int, 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', 'subtitles': 'count:2', # login required for CC subtitle '_old_archive_ids': ['bilibili 898179753_part1'], }, 'params': {'listsubtitles': True}, 'skip': 'login required for subtitle', }, { 'url': 'https://www.bilibili.com/video/BV1jL41167ZG/', 'info_dict': { 'id': 'BV1jL41167ZG', 'title': '一场大火引发的离奇死亡!古典推理经典短篇集《不可能犯罪诊断书》!', 'ext': 'mp4', }, 'skip': 'supporter-only video', }, { 'url': 'https://www.bilibili.com/video/BV1Ks411f7aQ/', 'info_dict': { 'id': 'BV1Ks411f7aQ', 'title': '【BD1080P】狼与香辛料I【华盟】', 'ext': 'mp4', }, 'skip': 'login required', }, { 'url': 'https://www.bilibili.com/video/BV1GJ411x7h7/', 'info_dict': { 'id': 'BV1GJ411x7h7', 'title': '【官方 MV】Never Gonna Give You Up - Rick Astley', 'ext': 'mp4', }, 'skip': 'geo-restricted', }, { 'note': 'has - in the last path segment of the url', 'url': 'https://www.bilibili.com/festival/bh3-7th?bvid=BV1tr4y1f7p2&', 'only_matching': True, }] def _real_extract(self, url): video_id, prefix = self._match_valid_url(url).group('id', 'prefix') headers = self.geo_verification_headers() webpage, urlh = self._download_webpage_handle(url, video_id, headers=headers) if not self._match_valid_url(urlh.url): return self.url_result(urlh.url) headers['Referer'] = url initial_state = self._search_json(r'window\.__INITIAL_STATE__\s*=', webpage, 'initial state', video_id, default=None) if not initial_state: if self._search_json(r'\bwindow\._riskdata_\s*=', webpage, 'risk', video_id, default={}).get('v_voucher'): raise ExtractorError('You have exceeded the rate limit. Try again later', expected=True) query = {'platform': 'web'} prefix = prefix.upper() if prefix == 'BV': query['bvid'] = prefix + video_id elif prefix == 'AV': query['aid'] = video_id detail = self._download_json( 'https://api.bilibili.com/x/web-interface/wbi/view/detail', video_id, note='Downloading redirection URL', errnote='Failed to download redirection URL', query=self._sign_wbi(query, video_id), headers=headers) new_url = traverse_obj(detail, ('data', 'View', 'redirect_url', {url_or_none})) if new_url and BiliBiliBangumiIE.suitable(new_url): return self.url_result(new_url, BiliBiliBangumiIE) raise ExtractorError('Unable to extract initial state') if traverse_obj(initial_state, ('error', 'trueCode')) == -403: self.raise_login_required() if traverse_obj(initial_state, ('error', 'trueCode')) == -404: raise ExtractorError( 'This video may be deleted or geo-restricted. ' 'You might want to try a VPN or a proxy server (with --proxy)', expected=True) is_festival = 'videoData' not in initial_state if is_festival: video_data = initial_state['videoInfo'] else: video_data = initial_state['videoData'] video_id, title = video_data['bvid'], video_data.get('title') # Bilibili anthologies are similar to playlists but all videos share the same video ID as the anthology itself. page_list_json = (not is_festival and traverse_obj( self._download_json( 'https://api.bilibili.com/x/player/pagelist', video_id, fatal=False, query={'bvid': video_id, 'jsonp': 'jsonp'}, note='Extracting videos in anthology', headers=headers), 'data', expected_type=list)) or [] is_anthology = len(page_list_json) > 1 part_id = int_or_none(parse_qs(url).get('p', [None])[-1]) if is_anthology and not part_id and self._yes_playlist(video_id, video_id): return self.playlist_from_matches( page_list_json, video_id, title, ie=BiliBiliIE, getter=lambda entry: f'https://www.bilibili.com/video/{video_id}?p={entry["page"]}') if is_anthology: part_id = part_id or 1 title += f' p{part_id:02d} {traverse_obj(page_list_json, (part_id - 1, "part")) or ""}' aid = video_data.get('aid') old_video_id = format_field(aid, None, f'%s_part{part_id or 1}') cid = traverse_obj(video_data, ('pages', part_id - 1, 'cid')) if part_id else video_data.get('cid') festival_info = {} if is_festival: festival_info = traverse_obj(initial_state, { 'uploader': ('videoInfo', 'upName'), 'uploader_id': ('videoInfo', 'upMid', {str_or_none}), 'like_count': ('videoStatus', 'like', {int_or_none}), 'thumbnail': ('sectionEpisodes', lambda _, v: v['bvid'] == video_id, 'cover'), }, get_all=False) metainfo = { **traverse_obj(initial_state, { 'uploader': ('upData', 'name'), 'uploader_id': ('upData', 'mid', {str_or_none}), 'like_count': ('videoData', 'stat', 'like', {int_or_none}), 'tags': ('tags', ..., 'tag_name'), 'thumbnail': ('videoData', 'pic', {url_or_none}), }), **festival_info, **traverse_obj(video_data, { 'description': 'desc', 'timestamp': ('pubdate', {int_or_none}), 'view_count': (('viewCount', ('stat', 'view')), {int_or_none}), 'comment_count': ('stat', 'reply', {int_or_none}), }, get_all=False), 'id': f'{video_id}{format_field(part_id, None, "_p%d")}', '_old_archive_ids': [make_archive_id(self, old_video_id)] if old_video_id else None, 'title': title, 'http_headers': {'Referer': url}, } is_interactive = traverse_obj(video_data, ('rights', 'is_stein_gate')) if is_interactive: return self.playlist_result( self._get_interactive_entries(video_id, cid, metainfo, headers=headers), **metainfo, duration=traverse_obj(initial_state, ('videoData', 'duration', {int_or_none})), __post_extractor=self.extract_comments(aid))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/redbee.py
yt_dlp/extractor/redbee.py
import json import re import time import urllib.parse import uuid from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, strip_or_none, traverse_obj, try_call, unified_timestamp, ) class RedBeeBaseIE(InfoExtractor): _DEVICE_ID = str(uuid.uuid4()) @property def _API_URL(self): """ Ref: https://apidocs.emp.ebsd.ericsson.net Subclasses must set _REDBEE_CUSTOMER, _REDBEE_BUSINESS_UNIT """ return f'https://exposure.api.redbee.live/v2/customer/{self._REDBEE_CUSTOMER}/businessunit/{self._REDBEE_BUSINESS_UNIT}' def _get_bearer_token(self, asset_id, jwt=None): request = { 'deviceId': self._DEVICE_ID, 'device': { 'deviceId': self._DEVICE_ID, 'name': 'Mozilla Firefox 102', 'type': 'WEB', }, } if jwt: request['jwt'] = jwt return self._download_json( f'{self._API_URL}/auth/{"gigyaLogin" if jwt else "anonymous"}', asset_id, data=json.dumps(request).encode(), headers={ 'Content-Type': 'application/json;charset=utf-8', })['sessionToken'] def _get_formats_and_subtitles(self, asset_id, **kwargs): bearer_token = self._get_bearer_token(asset_id, **kwargs) api_response = self._download_json( f'{self._API_URL}/entitlement/{asset_id}/play', asset_id, headers={ 'Authorization': f'Bearer {bearer_token}', 'Accept': 'application/json, text/plain, */*', }) formats, subtitles = [], {} for format_data in api_response['formats']: if not format_data.get('mediaLocator'): continue fmts, subs = [], {} if format_data.get('format') == 'DASH': fmts, subs = self._extract_mpd_formats_and_subtitles( format_data['mediaLocator'], asset_id, fatal=False) elif format_data.get('format') == 'SMOOTHSTREAMING': fmts, subs = self._extract_ism_formats_and_subtitles( format_data['mediaLocator'], asset_id, fatal=False) elif format_data.get('format') == 'HLS': fmts, subs = self._extract_m3u8_formats_and_subtitles( format_data['mediaLocator'], asset_id, fatal=False) if format_data.get('drm'): for f in fmts: f['has_drm'] = True formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return formats, subtitles class ParliamentLiveUKIE(RedBeeBaseIE): IE_NAME = 'parliamentlive.tv' IE_DESC = 'UK parliament videos' _VALID_URL = r'(?i)https?://(?:www\.)?parliamentlive\.tv/Event/Index/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _REDBEE_CUSTOMER = 'UKParliament' _REDBEE_BUSINESS_UNIT = 'ParliamentLive' _TESTS = [{ 'url': 'http://parliamentlive.tv/Event/Index/c1e9d44d-fd6c-4263-b50f-97ed26cc998b', 'info_dict': { 'id': 'c1e9d44d-fd6c-4263-b50f-97ed26cc998b', 'ext': 'mp4', 'title': 'Home Affairs Committee', 'timestamp': 1395153872, 'upload_date': '20140318', 'thumbnail': r're:https?://[^?#]+c1e9d44d-fd6c-4263-b50f-97ed26cc998b[^/]*/thumbnail', }, }, { 'url': 'http://parliamentlive.tv/event/index/3f24936f-130f-40bf-9a5d-b3d6479da6a4', 'only_matching': True, }, { 'url': 'https://parliamentlive.tv/Event/Index/27cf25e4-e77b-42a3-93c5-c815cd6d7377', 'info_dict': { 'id': '27cf25e4-e77b-42a3-93c5-c815cd6d7377', 'ext': 'mp4', 'title': 'House of Commons', 'timestamp': 1658392447, 'upload_date': '20220721', 'thumbnail': r're:https?://[^?#]+27cf25e4-e77b-42a3-93c5-c815cd6d7377[^/]*/thumbnail', }, }] def _real_extract(self, url): video_id = self._match_id(url) formats, subtitles = self._get_formats_and_subtitles(video_id) video_info = self._download_json( f'https://www.parliamentlive.tv/Event/GetShareVideo/{video_id}', video_id, fatal=False) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'title': traverse_obj(video_info, ('event', 'title')), 'thumbnail': traverse_obj(video_info, 'thumbnailUrl'), 'timestamp': traverse_obj( video_info, ('event', 'publishedStartTime'), expected_type=unified_timestamp), '_format_sort_fields': ('res', 'proto'), } class RTBFIE(RedBeeBaseIE): _WORKING = False _VALID_URL = r'''(?x) https?://(?:www\.)?rtbf\.be/ (?: video/[^?]+\?.*\bid=| ouftivi/(?:[^/]+/)*[^?]+\?.*\bvideoId=| auvio/[^/]+\?.*\b(?P<live>l)?id= )(?P<id>\d+)''' _NETRC_MACHINE = 'rtbf' _REDBEE_CUSTOMER = 'RTBF' _REDBEE_BUSINESS_UNIT = 'Auvio' _TESTS = [{ 'url': 'https://www.rtbf.be/video/detail_les-diables-au-coeur-episode-2?id=1921274', 'md5': '8c876a1cceeb6cf31b476461ade72384', 'info_dict': { 'id': '1921274', 'ext': 'mp4', 'title': 'Les Diables au coeur (épisode 2)', 'description': '(du 25/04/2014)', 'duration': 3099.54, 'upload_date': '20140425', 'timestamp': 1398456300, }, 'skip': 'No longer available', }, { # geo restricted 'url': 'http://www.rtbf.be/ouftivi/heros/detail_scooby-doo-mysteres-associes?id=1097&videoId=2057442', 'only_matching': True, }, { 'url': 'http://www.rtbf.be/ouftivi/niouzz?videoId=2055858', 'only_matching': True, }, { 'url': 'http://www.rtbf.be/auvio/detail_jeudi-en-prime-siegfried-bracke?id=2102996', 'only_matching': True, }, { # Live 'url': 'https://www.rtbf.be/auvio/direct_pure-fm?lid=134775', 'only_matching': True, }, { # Audio 'url': 'https://www.rtbf.be/auvio/detail_cinq-heures-cinema?id=2360811', 'only_matching': True, }, { # With Subtitle 'url': 'https://www.rtbf.be/auvio/detail_les-carnets-du-bourlingueur?id=2361588', 'only_matching': True, }, { 'url': 'https://www.rtbf.be/auvio/detail_investigation?id=2921926', 'md5': 'd5d11bb62169fef38d7ce7ac531e034f', 'info_dict': { 'id': '2921926', 'ext': 'mp4', 'title': 'Le handicap un confinement perpétuel - Maladie de Lyme', 'description': 'md5:dcbd5dcf6015488c9069b057c15ccc52', 'duration': 5258.8, 'upload_date': '20220727', 'timestamp': 1658934000, 'series': '#Investigation', 'thumbnail': r're:^https?://[^?&]+\.jpg$', }, }, { 'url': 'https://www.rtbf.be/auvio/detail_la-belgique-criminelle?id=2920492', 'md5': '054f9f143bc79c89647c35e5a7d35fa8', 'info_dict': { 'id': '2920492', 'ext': 'mp4', 'title': '04 - Le crime de la rue Royale', 'description': 'md5:0c3da1efab286df83f2ab3f8f96bd7a6', 'duration': 1574.6, 'upload_date': '20220723', 'timestamp': 1658596887, 'series': 'La Belgique criminelle - TV', 'thumbnail': r're:^https?://[^?&]+\.jpg$', }, }] _IMAGE_HOST = 'http://ds1.ds.static.rtbf.be' _PROVIDERS = { 'YOUTUBE': 'Youtube', 'DAILYMOTION': 'Dailymotion', 'VIMEO': 'Vimeo', } _QUALITIES = [ ('mobile', 'SD'), ('web', 'MD'), ('high', 'HD'), ] _LOGIN_URL = 'https://login.rtbf.be/accounts.login' _GIGYA_API_KEY = '3_kWKuPgcdAybqnqxq_MvHVk0-6PN8Zk8pIIkJM_yXOu-qLPDDsGOtIDFfpGivtbeO' _LOGIN_COOKIE_ID = f'glt_{_GIGYA_API_KEY}' def _perform_login(self, username, password): if self._get_cookies(self._LOGIN_URL).get(self._LOGIN_COOKIE_ID): return self._set_cookie('.rtbf.be', 'gmid', 'gmid.ver4', secure=True, expire_time=time.time() + 3600) login_response = self._download_json( self._LOGIN_URL, None, data=urllib.parse.urlencode({ 'loginID': username, 'password': password, 'APIKey': self._GIGYA_API_KEY, 'targetEnv': 'jssdk', 'sessionExpiration': '-2', }).encode(), headers={ 'Content-Type': 'application/x-www-form-urlencoded', }) if login_response['statusCode'] != 200: raise ExtractorError('Login failed. Server message: {}'.format(login_response['errorMessage']), expected=True) self._set_cookie('.rtbf.be', self._LOGIN_COOKIE_ID, login_response['sessionInfo']['login_token'], secure=True, expire_time=time.time() + 3600) def _get_formats_and_subtitles(self, url, media_id): login_token = self._get_cookies(url).get(self._LOGIN_COOKIE_ID) if not login_token: self.raise_login_required() session_jwt = try_call(lambda: self._get_cookies(url)['rtbf_jwt'].value) or self._download_json( 'https://login.rtbf.be/accounts.getJWT', media_id, query={ 'login_token': login_token.value, 'APIKey': self._GIGYA_API_KEY, 'sdk': 'js_latest', 'authMode': 'cookie', 'pageURL': url, 'sdkBuild': '13273', 'format': 'json', })['id_token'] return super()._get_formats_and_subtitles(media_id, jwt=session_jwt) def _real_extract(self, url): live, media_id = self._match_valid_url(url).groups() embed_page = self._download_webpage( 'https://www.rtbf.be/auvio/embed/' + ('direct' if live else 'media'), media_id, query={'id': media_id}) media_data = self._html_search_regex(r'data-media="([^"]+)"', embed_page, 'media data', fatal=False) if not media_data: if re.search(r'<div[^>]+id="js-error-expired"[^>]+class="(?![^"]*hidden)', embed_page): raise ExtractorError('Livestream has ended.', expected=True) if re.search(r'<div[^>]+id="js-sso-connect"[^>]+class="(?![^"]*hidden)', embed_page): self.raise_login_required() raise ExtractorError('Could not find media data') data = self._parse_json(media_data, media_id) error = data.get('error') if error: raise ExtractorError(f'{self.IE_NAME} said: {error}', expected=True) provider = data.get('provider') if provider in self._PROVIDERS: return self.url_result(data['url'], self._PROVIDERS[provider]) title = traverse_obj(data, 'subtitle', 'title') is_live = data.get('isLive') height_re = r'-(\d+)p\.' formats, subtitles = [], {} # The old api still returns m3u8 and mpd manifest for livestreams, but these are 'fake' # since all they contain is a 20s video that is completely unrelated. # https://github.com/yt-dlp/yt-dlp/issues/4656#issuecomment-1214461092 m3u8_url = None if data.get('isLive') else traverse_obj(data, 'urlHlsAes128', 'urlHls') if m3u8_url: fmts, subs = self._extract_m3u8_formats_and_subtitles( m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) fix_url = lambda x: x.replace('//rtbf-vod.', '//rtbf.') if '/geo/drm/' in x else x http_url = data.get('url') if formats and http_url and re.search(height_re, http_url): http_url = fix_url(http_url) for m3u8_f in formats[:]: height = m3u8_f.get('height') if not height: continue f = m3u8_f.copy() del f['protocol'] f.update({ 'format_id': m3u8_f['format_id'].replace('hls-', 'http-'), 'url': re.sub(height_re, '-%dp.' % height, http_url), }) formats.append(f) else: sources = data.get('sources') or {} for key, format_id in self._QUALITIES: format_url = sources.get(key) if not format_url: continue height = int_or_none(self._search_regex( height_re, format_url, 'height', default=None)) formats.append({ 'format_id': format_id, 'url': fix_url(format_url), 'height': height, }) mpd_url = None if data.get('isLive') else data.get('urlDash') if mpd_url and (self.get_param('allow_unplayable_formats') or not data.get('drm')): fmts, subs = self._extract_mpd_formats_and_subtitles( mpd_url, media_id, mpd_id='dash', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) audio_url = data.get('urlAudio') if audio_url: formats.append({ 'format_id': 'audio', 'url': audio_url, 'vcodec': 'none', }) for track in (data.get('tracks') or {}).values(): sub_url = track.get('url') if not sub_url: continue subtitles.setdefault(track.get('lang') or 'fr', []).append({ 'url': sub_url, }) if not formats: fmts, subs = self._get_formats_and_subtitles(url, f'live_{media_id}' if is_live else media_id) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return { 'id': media_id, 'formats': formats, 'title': title, 'description': strip_or_none(data.get('description')), 'thumbnail': data.get('thumbnail'), 'duration': float_or_none(data.get('realDuration')), 'timestamp': int_or_none(data.get('liveFrom')), 'series': data.get('programLabel'), 'subtitles': subtitles, 'is_live': is_live, '_format_sort_fields': ('res', 'proto'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/box.py
yt_dlp/extractor/box.py
import json import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, parse_iso8601, update_url_query, url_or_none, ) from ..utils.traversal import traverse_obj class BoxIE(InfoExtractor): _VALID_URL = r'https?://(?:[^.]+\.)?(?P<service>app|ent)\.box\.com/s/(?P<shared_name>[^/?#]+)(?:/file/(?P<id>\d+))?' _TESTS = [{ 'url': 'https://mlssoccer.app.box.com/s/0evd2o3e08l60lr4ygukepvnkord1o1x/file/510727257538', 'md5': '1f81b2fd3960f38a40a3b8823e5fcd43', 'info_dict': { 'id': '510727257538', 'ext': 'mp4', 'title': 'Garber St. Louis will be 28th MLS team +scarving.mp4', 'uploader': '', 'timestamp': 1566320259, 'upload_date': '20190820', 'uploader_id': '235196876', }, 'params': {'skip_download': 'dash fragment too small'}, }, { 'url': 'https://utexas.app.box.com/s/2x6vanv85fdl8j2eqlcxmv0gp1wvps6e', 'info_dict': { 'id': '787379022466', 'ext': 'mp4', 'title': 'Webinar recording: Take the Leap!.mp4', 'uploader': 'Patricia Mosele', 'timestamp': 1615824864, 'upload_date': '20210315', 'uploader_id': '239068974', }, 'params': {'skip_download': 'dash fragment too small'}, }, { 'url': 'https://thejacksonlaboratory.ent.box.com/s/2x09dm6vcg6y28o0oox1so4l0t8wzt6l/file/1536173056065', 'info_dict': { 'id': '1536173056065', 'ext': 'mp4', 'uploader_id': '18523128264', 'uploader': 'Lexi Hennigan', 'title': 'iPSC Symposium recording part 1.mp4', 'timestamp': 1716228343, 'upload_date': '20240520', }, 'params': {'skip_download': 'dash fragment too small'}, }] def _real_extract(self, url): shared_name, file_id, service = self._match_valid_url(url).group('shared_name', 'id', 'service') webpage = self._download_webpage(url, file_id or shared_name) if not file_id: post_stream_data = self._search_json( r'Box\.postStreamData\s*=', webpage, 'Box post-stream data', shared_name) shared_item = traverse_obj( post_stream_data, ('/app-api/enduserapp/shared-item', {dict})) or {} if shared_item.get('itemType') != 'file': raise ExtractorError('The requested resource is not a file', expected=True) file_id = str(shared_item['itemID']) request_token = self._search_json( r'Box\.config\s*=', webpage, 'Box config', file_id)['requestToken'] access_token = self._download_json( f'https://{service}.box.com/app-api/enduserapp/elements/tokens', file_id, 'Downloading token JSON metadata', data=json.dumps({'fileIDs': [file_id]}).encode(), headers={ 'Content-Type': 'application/json', 'X-Request-Token': request_token, 'X-Box-EndUser-API': 'sharedName=' + shared_name, })[file_id]['read'] shared_link = f'https://{service}.box.com/s/{shared_name}' f = self._download_json( 'https://api.box.com/2.0/files/' + file_id, file_id, 'Downloading file JSON metadata', headers={ 'Authorization': 'Bearer ' + access_token, 'BoxApi': 'shared_link=' + shared_link, 'X-Rep-Hints': '[dash]', # TODO: extract `hls` formats }, query={ 'fields': 'authenticated_download_url,created_at,created_by,description,extension,is_download_available,name,representations,size', }) title = f['name'] query = { 'access_token': access_token, 'shared_link': shared_link, } formats = [] for url_tmpl in traverse_obj(f, ( 'representations', 'entries', lambda _, v: v['representation'] == 'dash', 'content', 'url_template', {url_or_none}, )): manifest_url = update_url_query(url_tmpl.replace('{+asset_path}', 'manifest.mpd'), query) fmts = self._extract_mpd_formats(manifest_url, file_id) for fmt in fmts: fmt['extra_param_to_segment_url'] = urllib.parse.urlparse(manifest_url).query formats.extend(fmts) creator = f.get('created_by') or {} return { 'id': file_id, 'title': title, 'formats': formats, 'description': f.get('description') or None, 'uploader': creator.get('name'), 'timestamp': parse_iso8601(f.get('created_at')), 'uploader_id': creator.get('id'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/oftv.py
yt_dlp/extractor/oftv.py
from .common import InfoExtractor from .zype import ZypeIE from ..utils import traverse_obj class OfTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?of\.tv/video/(?P<id>\w+)' _TESTS = [{ 'url': 'https://of.tv/video/627d7d95b353db0001dadd1a', 'md5': 'cb9cd5db3bb9ee0d32bfd7e373d6ef0a', 'info_dict': { 'id': '627d7d95b353db0001dadd1a', 'ext': 'mp4', 'title': 'E1: Jacky vs Eric', 'thumbnail': r're:^https?://.*\.jpg', 'average_rating': 0, 'description': 'md5:dd16e3e2a8d27d922e7a989f85986853', 'display_id': '', 'duration': 1423, 'timestamp': 1652391300, 'upload_date': '20220512', 'view_count': 0, 'creator': 'This is Fire', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) info = next(ZypeIE.extract_from_webpage(self._downloader, url, webpage)) info['_type'] = 'url_transparent' info['creator'] = self._search_regex(r'<a[^>]+class=\"creator-name\"[^>]+>([^<]+)', webpage, 'creator') return info class OfTVPlaylistIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?of\.tv/creators/(?P<id>[a-zA-Z0-9-]+)/?(?:$|[?#])' _TESTS = [{ 'url': 'https://of.tv/creators/this-is-fire/', 'playlist_count': 8, 'info_dict': { 'id': 'this-is-fire', }, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) json_match = self._search_json( r'var\s*remaining_videos\s*=', webpage, 'oftv playlists', playlist_id, contains_pattern=r'\[.+\]') return self.playlist_from_matches( traverse_obj(json_match, (..., 'discovery_url')), playlist_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/screencast.py
yt_dlp/extractor/screencast.py
import urllib.parse from .common import InfoExtractor from ..utils import ExtractorError class ScreencastIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?screencast\.com/t/(?P<id>[a-zA-Z0-9]+)' _TESTS = [{ 'url': 'http://www.screencast.com/t/3ZEjQXlT', 'md5': '917df1c13798a3e96211dd1561fded83', 'info_dict': { 'id': '3ZEjQXlT', 'ext': 'm4v', 'title': 'Color Measurement with Ocean Optics Spectrometers', 'description': 'md5:240369cde69d8bed61349a199c5fb153', 'thumbnail': r're:^https?://.*\.(?:gif|jpg)$', }, }, { 'url': 'http://www.screencast.com/t/V2uXehPJa1ZI', 'md5': 'e8e4b375a7660a9e7e35c33973410d34', 'info_dict': { 'id': 'V2uXehPJa1ZI', 'ext': 'mov', 'title': 'The Amadeus Spectrometer', 'description': 're:^In this video, our friends at.*To learn more about Amadeus, visit', 'thumbnail': r're:^https?://.*\.(?:gif|jpg)$', }, }, { 'url': 'http://www.screencast.com/t/aAB3iowa', 'md5': 'dedb2734ed00c9755761ccaee88527cd', 'info_dict': { 'id': 'aAB3iowa', 'ext': 'mp4', 'title': 'Google Earth Export', 'description': 'Provides a demo of a CommunityViz export to Google Earth, one of the 3D viewing options.', 'thumbnail': r're:^https?://.*\.(?:gif|jpg)$', }, }, { 'url': 'http://www.screencast.com/t/X3ddTrYh', 'md5': '669ee55ff9c51988b4ebc0877cc8b159', 'info_dict': { 'id': 'X3ddTrYh', 'ext': 'wmv', 'title': 'Toolkit 6 User Group Webinar (2014-03-04) - Default Judgment and First Impression', 'description': 'md5:7b9f393bc92af02326a5c5889639eab0', 'thumbnail': r're:^https?://.*\.(?:gif|jpg)$', }, }, { 'url': 'http://screencast.com/t/aAB3iowa', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_url = self._html_search_regex( r'<embed name="Video".*?src="([^"]+)"', webpage, 'QuickTime embed', default=None) if video_url is None: flash_vars_s = self._html_search_regex( r'<param name="flashVars" value="([^"]+)"', webpage, 'flash vars', default=None) if not flash_vars_s: flash_vars_s = self._html_search_regex( r'<param name="initParams" value="([^"]+)"', webpage, 'flash vars', default=None) if flash_vars_s: flash_vars_s = flash_vars_s.replace(',', '&') if flash_vars_s: flash_vars = urllib.parse.parse_qs(flash_vars_s) video_url_raw = urllib.parse.quote( flash_vars['content'][0]) video_url = video_url_raw.replace('http%3A', 'http:') if video_url is None: video_meta = self._html_search_meta( 'og:video', webpage, default=None) if video_meta: video_url = self._search_regex( r'src=(.*?)(?:$|&)', video_meta, 'meta tag video URL', default=None) if video_url is None: video_url = self._html_search_regex( r'MediaContentUrl["\']\s*:(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'video url', default=None, group='url') if video_url is None: video_url = self._html_search_meta( 'og:video', webpage, default=None) if video_url is None: raise ExtractorError('Cannot find video') title = self._og_search_title(webpage, default=None) if title is None: title = self._html_search_regex( [r'<b>Title:</b> ([^<]+)</div>', r'class="tabSeperator">></span><span class="tabText">(.+?)<', r'<title>([^<]+)</title>'], webpage, 'title') thumbnail = self._og_search_thumbnail(webpage) description = self._og_search_description(webpage, default=None) if description is None: description = self._html_search_meta('description', webpage) return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bigo.py
yt_dlp/extractor/bigo.py
from .common import InfoExtractor from ..utils import ExtractorError, UserNotLive, urlencode_postdata class BigoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?bigo\.tv/(?:[a-z]{2,}/)?(?P<id>[^/]+)' _TESTS = [{ 'url': 'https://www.bigo.tv/ja/221338632', 'info_dict': { 'id': '6576287577575737440', 'title': '土よ〜💁‍♂️ 休憩室/REST room', 'thumbnail': r're:https?://.+', 'uploader': '✨Shin💫', 'uploader_id': '221338632', 'is_live': True, }, 'skip': 'livestream', }, { 'url': 'https://www.bigo.tv/th/Tarlerm1304', 'only_matching': True, }, { 'url': 'https://bigo.tv/115976881', 'only_matching': True, }] def _real_extract(self, url): user_id = self._match_id(url) info_raw = self._download_json( 'https://ta.bigo.tv/official_website/studio/getInternalStudioInfo', user_id, data=urlencode_postdata({'siteId': user_id}), headers={'Accept': 'application/json'}) if not isinstance(info_raw, dict): raise ExtractorError('Received invalid JSON data') if info_raw.get('code'): raise ExtractorError( 'Bigo says: {} (code {})'.format(info_raw.get('msg'), info_raw.get('code')), expected=True) info = info_raw.get('data') or {} if not info.get('alive'): raise UserNotLive(video_id=user_id) formats, subs = self._extract_m3u8_formats_and_subtitles( info.get('hls_src'), user_id, 'mp4', 'm3u8') return { 'id': info.get('roomId') or user_id, 'title': info.get('roomTopic') or info.get('nick_name') or user_id, 'formats': formats, 'subtitles': subs, 'thumbnail': info.get('snapshot'), 'uploader': info.get('nick_name'), 'uploader_id': user_id, 'is_live': True, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tonline.py
yt_dlp/extractor/tonline.py
from .common import InfoExtractor from ..utils import int_or_none, join_nonempty class TOnlineIE(InfoExtractor): _WORKING = False _ENABLED = None # XXX: pass through to GenericIE IE_NAME = 't-online.de' _VALID_URL = r'https?://(?:www\.)?t-online\.de/tv/(?:[^/]+/)*id_(?P<id>\d+)' _TEST = { 'url': 'http://www.t-online.de/tv/sport/fussball/id_79166266/drittes-remis-zidane-es-muss-etwas-passieren-.html', 'md5': '7d94dbdde5f9d77c5accc73c39632c29', 'info_dict': { 'id': '79166266', 'ext': 'mp4', 'title': 'Drittes Remis! Zidane: "Es muss etwas passieren"', 'description': 'Es läuft nicht rund bei Real Madrid. Das 1:1 gegen den SD Eibar war das dritte Unentschieden in Folge in der Liga.', }, } def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( f'http://www.t-online.de/tv/id_{video_id}/tid_json_video', video_id) title = video_data['subtitle'] formats = [] for asset in video_data.get('assets', []): asset_source = asset.get('source') or asset.get('source2') if not asset_source: continue formats.append({ 'format_id': join_nonempty('type', 'profile', from_dict=asset), 'url': asset_source, }) thumbnails = [] for image in video_data.get('images', []): image_source = image.get('source') if not image_source: continue thumbnails.append({ 'url': image_source, }) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'duration': int_or_none(video_data.get('duration')), 'thumbnails': thumbnails, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/microsoftembed.py
yt_dlp/extractor/microsoftembed.py
import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, parse_resolution, traverse_obj, unified_timestamp, url_basename, url_or_none, ) class MicrosoftEmbedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?microsoft\.com/(?:[^/]+/)?videoplayer/embed/(?P<id>[a-z0-9A-Z]+)' _TESTS = [{ 'url': 'https://www.microsoft.com/en-us/videoplayer/embed/RWL07e', 'md5': 'eb0ae9007f9b305f9acd0a03e74cb1a9', 'info_dict': { 'id': 'RWL07e', 'title': 'Microsoft for Public Health and Social Services', 'ext': 'mp4', 'thumbnail': 'http://img-prod-cms-rt-microsoft-com.akamaized.net/cms/api/am/imageFileData/RWL7Ju?ver=cae5', 'age_limit': 0, 'timestamp': 1631658316, 'upload_date': '20210914', }, 'expected_warnings': ['Failed to parse XML: syntax error: line 1, column 0'], }] _API_URL = 'https://prod-video-cms-rt-microsoft-com.akamaized.net/vhs/api/videos/' def _real_extract(self, url): video_id = self._match_id(url) metadata = self._download_json(self._API_URL + video_id, video_id) formats = [] for source_type, source in metadata['streams'].items(): if source_type == 'smooth_Streaming': formats.extend(self._extract_ism_formats(source['url'], video_id, 'mss', fatal=False)) elif source_type == 'apple_HTTP_Live_Streaming': formats.extend(self._extract_m3u8_formats(source['url'], video_id, 'mp4', fatal=False)) elif source_type == 'mPEG_DASH': formats.extend(self._extract_mpd_formats(source['url'], video_id, fatal=False)) else: formats.append({ 'format_id': source_type, 'url': source['url'], 'height': source.get('heightPixels'), 'width': source.get('widthPixels'), }) subtitles = { lang: [{ 'url': data.get('url'), 'ext': 'vtt', }] for lang, data in traverse_obj(metadata, 'captions', default={}).items() } thumbnails = [{ 'url': thumb.get('url'), 'width': thumb.get('width') or None, 'height': thumb.get('height') or None, } for thumb in traverse_obj(metadata, ('snippet', 'thumbnails', ...))] self._remove_duplicate_formats(thumbnails) return { 'id': video_id, 'title': traverse_obj(metadata, ('snippet', 'title')), 'timestamp': unified_timestamp(traverse_obj(metadata, ('snippet', 'activeStartDate'))), 'age_limit': int_or_none(traverse_obj(metadata, ('snippet', 'minimumAge'))) or 0, 'formats': formats, 'subtitles': subtitles, 'thumbnails': thumbnails, } class MicrosoftMediusBaseIE(InfoExtractor): @staticmethod def _sub_to_dict(subtitle_list): subtitles = {} for sub in subtitle_list: subtitles.setdefault(sub.pop('tag', 'und'), []).append(sub) return subtitles def _extract_ism(self, ism_url, video_id, fatal=True): formats = self._extract_ism_formats(ism_url, video_id, fatal=fatal) for fmt in formats: if fmt['language'] != 'eng' and 'English' not in fmt['format_id']: fmt['language_preference'] = -10 return formats class MicrosoftMediusIE(MicrosoftMediusBaseIE): _VALID_URL = r'https?://medius\.microsoft\.com/Embed/(?:Video\?id=|video-nc/|VideoDetails/)(?P<id>[\da-f-]+)' _TESTS = [{ 'url': 'https://medius.microsoft.com/Embed/video-nc/9640d86c-f513-4889-959e-5dace86e7d2b', 'info_dict': { 'id': '9640d86c-f513-4889-959e-5dace86e7d2b', 'ext': 'ismv', 'title': 'Rapidly code, test and ship from secure cloud developer environments', 'description': 'md5:33c8e4facadc438613476eea24165f71', 'thumbnail': r're:https://mediusimg\.event\.microsoft\.com/video-\d+/thumbnail\.jpg.*', 'subtitles': 'count:30', }, }, { 'url': 'https://medius.microsoft.com/Embed/video-nc/81215af5-c813-4dcd-aede-94f4e1a7daa3', 'info_dict': { 'id': '81215af5-c813-4dcd-aede-94f4e1a7daa3', 'ext': 'ismv', 'title': 'Microsoft Build opening', 'description': 'md5:43455096141077a1f23144cab8cec1cb', 'thumbnail': r're:https://mediusimg\.event\.microsoft\.com/video-\d+/thumbnail\.jpg.*', 'subtitles': 'count:31', }, }, { 'url': 'https://medius.microsoft.com/Embed/VideoDetails/78493569-9b3b-4a85-a409-ee76e789e25c', 'info_dict': { 'id': '78493569-9b3b-4a85-a409-ee76e789e25c', 'ext': 'ismv', 'title': ' Anomaly Detection & Root cause at Edge', 'description': 'md5:f8f1ad93d7918649bfb97fa081b03b83', 'thumbnail': r're:https://mediusdownload.event.microsoft.com/asset.*\.jpg.*', 'subtitles': 'count:17', }, }, { 'url': 'https://medius.microsoft.com/Embed/Video?id=0dc69bda-079b-4070-a7db-a8da1a06a9c7', 'only_matching': True, }, { 'url': 'https://medius.microsoft.com/Embed/video-nc/fe823a91-959c-465b-96d4-8f4db624f72c', 'only_matching': True, }] def _extract_subtitle(self, webpage, video_id): captions = traverse_obj( self._search_json(r'const\s+captionsConfiguration\s*=', webpage, 'captions', video_id, default=None), ('languageList', lambda _, v: url_or_none(v['src']), { 'url': 'src', 'tag': ('srclang', {str}), 'name': ('kind', {str}), })) or [{'url': url, 'tag': url_basename(url).split('.vtt')[0].split('_')[-1]} for url in re.findall(r'var\s+file\s+=\s+\{[^}]+\'(https://[^\']+\.vtt\?[^\']+)', webpage)] return self._sub_to_dict(captions) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(f'https://medius.microsoft.com/Embed/video-nc/{video_id}', video_id) return { 'id': video_id, 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), 'formats': self._extract_ism( self._search_regex(r'StreamUrl\s*=\s*"([^"]+manifest)"', webpage, 'ism url'), video_id), 'thumbnail': self._og_search_thumbnail(webpage), 'subtitles': self._extract_subtitle(webpage, video_id), } class MicrosoftLearnPlaylistIE(InfoExtractor): _VALID_URL = r'https?://learn\.microsoft\.com/(?:[\w-]+/)?(?P<type>shows|events)/(?P<id>[\w-]+)/?(?:[?#]|$)' _TESTS = [{ 'url': 'https://learn.microsoft.com/en-us/shows/bash-for-beginners', 'info_dict': { 'id': 'bash-for-beginners', 'title': 'Bash for Beginners', 'description': 'md5:16a91c07222117d1e00912f0dbc02c2c', }, 'playlist_count': 20, }, { 'url': 'https://learn.microsoft.com/en-us/events/build-2022', 'info_dict': { 'id': 'build-2022', 'title': 'Microsoft Build 2022 - Events', 'description': 'md5:c16b43848027df837b22c6fbac7648d3', }, 'playlist_count': 201, }] def _entries(self, url_base, video_id): skip = 0 while True: playlist_info = self._download_json(url_base, video_id, f'Downloading entries {skip}', query={ 'locale': 'en-us', '$skip': skip, }) url_paths = traverse_obj(playlist_info, ('results', ..., 'url', {str})) for url_path in url_paths: yield self.url_result(f'https://learn.microsoft.com/en-us{url_path}') skip += len(url_paths) if skip >= playlist_info.get('count', 0) or not url_paths: break def _real_extract(self, url): playlist_id, playlist_type = self._match_valid_url(url).group('id', 'type') webpage = self._download_webpage(url, playlist_id) metainfo = { 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), } sub_type = 'episodes' if playlist_type == 'shows' else 'sessions' url_base = f'https://learn.microsoft.com/api/contentbrowser/search/{playlist_type}/{playlist_id}/{sub_type}' return self.playlist_result(self._entries(url_base, playlist_id), playlist_id, **metainfo) class MicrosoftLearnEpisodeIE(MicrosoftMediusBaseIE): _VALID_URL = r'https?://learn\.microsoft\.com/(?:[\w-]+/)?shows/[\w-]+/(?P<id>[^?#/]+)' _TESTS = [{ 'url': 'https://learn.microsoft.com/en-us/shows/bash-for-beginners/what-is-the-difference-between-a-terminal-and-a-shell-2-of-20-bash-for-beginners/', 'info_dict': { 'id': 'd44e1a03-a0e5-45c2-9496-5c9fa08dc94c', 'ext': 'ismv', 'title': 'What is the Difference Between a Terminal and a Shell? (Part 2 of 20)', 'description': 'md5:7bbbfb593d21c2cf2babc3715ade6b88', 'timestamp': 1676339547, 'upload_date': '20230214', 'thumbnail': r're:https://learn\.microsoft\.com/video/media/.+\.png', 'subtitles': 'count:14', }, }, { 'url': 'https://learn.microsoft.com/en-gb/shows/on-demand-instructor-led-training-series/az-900-module-1', 'info_dict': { 'id': '4fe10f7c-d83c-463b-ac0e-c30a8195e01b', 'ext': 'mp4', 'title': 'AZ-900 Cloud fundamentals (1 of 6)', 'description': 'md5:3c2212ce865e9142f402c766441bd5c9', 'thumbnail': r're:https://.+/.+\.jpg', 'timestamp': 1706605184, 'upload_date': '20240130', }, 'params': {'format': 'bv[protocol=https]'}, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) entry_id = self._html_search_meta('entryId', webpage, 'entryId', fatal=True) video_info = self._download_json( f'https://learn.microsoft.com/api/video/public/v1/entries/{entry_id}', video_id) formats = [] if ism_url := traverse_obj(video_info, ('publicVideo', 'adaptiveVideoUrl', {url_or_none})): formats.extend(self._extract_ism(ism_url, video_id, fatal=False)) if hls_url := traverse_obj(video_info, ('publicVideo', 'adaptiveVideoHLSUrl', {url_or_none})): formats.extend(self._extract_m3u8_formats(hls_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) if mpd_url := traverse_obj(video_info, ('publicVideo', 'adaptiveVideoDashUrl', {url_or_none})): formats.extend(self._extract_mpd_formats(mpd_url, video_id, mpd_id='dash', fatal=False)) for key in ('low', 'medium', 'high'): if video_url := traverse_obj(video_info, ('publicVideo', f'{key}QualityVideoUrl', {url_or_none})): formats.append({ 'url': video_url, 'format_id': f'video-http-{key}', 'acodec': 'none', **parse_resolution(video_url), }) if audio_url := traverse_obj(video_info, ('publicVideo', 'audioUrl', {url_or_none})): formats.append({ 'url': audio_url, 'format_id': 'audio-http', 'vcodec': 'none', }) return { 'id': entry_id, 'formats': formats, 'subtitles': self._sub_to_dict(traverse_obj(video_info, ( 'publicVideo', 'captions', lambda _, v: url_or_none(v['url']), { 'tag': ('language', {str}), 'url': 'url', }))), 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), **traverse_obj(video_info, { 'timestamp': ('createTime', {parse_iso8601}), 'thumbnails': ('publicVideo', 'thumbnailOtherSizes', ..., {'url': {url_or_none}}), }), } class MicrosoftLearnSessionIE(InfoExtractor): _VALID_URL = r'https?://learn\.microsoft\.com/(?:[\w-]+/)?events/[\w-]+/(?P<id>[^?#/]+)' _TESTS = [{ 'url': 'https://learn.microsoft.com/en-us/events/build-2022/ts01-rapidly-code-test-ship-from-secure-cloud-developer-environments', 'info_dict': { 'id': '9640d86c-f513-4889-959e-5dace86e7d2b', 'ext': 'ismv', 'title': 'Rapidly code, test and ship from secure cloud developer environments - Events', 'description': 'md5:f26c1a85d41c1cffd27a0279254a25c3', 'timestamp': 1653408600, 'upload_date': '20220524', 'thumbnail': r're:https://mediusimg\.event\.microsoft\.com/video-\d+/thumbnail\.jpg.*', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) metainfo = { 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), 'timestamp': parse_iso8601(self._html_search_meta('startDate', webpage, 'startDate')), } return self.url_result( self._html_search_meta('externalVideoUrl', webpage, 'videoUrl', fatal=True), url_transparent=True, ie=MicrosoftMediusIE, **metainfo) class MicrosoftBuildIE(InfoExtractor): _VALID_URL = [ r'https?://build\.microsoft\.com/[\w-]+/sessions/(?P<id>[\da-f-]+)', r'https?://build\.microsoft\.com/[\w-]+/(?P<id>sessions)/?(?:[?#]|$)', ] _TESTS = [{ 'url': 'https://build.microsoft.com/en-US/sessions/b49feb31-afcd-4217-a538-d3ca1d171198?source=sessions', 'info_dict': { 'id': 'aee55fb5-fcf9-4b38-b764-a3527cb57554', 'ext': 'ismv', 'title': 'Microsoft Build opening keynote', 'description': 'md5:d38338f336ef4b6ef9ad2a7466a76655', 'timestamp': 1716307200, 'upload_date': '20240521', 'thumbnail': r're:https://mediusimg\.event\.microsoft\.com/video-\d+/thumbnail\.jpg.*', }, }, { 'url': 'https://build.microsoft.com/en-US/sessions', 'info_dict': { 'id': 'sessions', }, 'playlist_mincount': 418, }] def _real_extract(self, url): video_id = self._match_id(url) entries = [ self.url_result( video_info['onDemand'], ie=MicrosoftMediusIE, url_transparent=True, **traverse_obj(video_info, { 'id': ('sessionId', {str}), 'title': ('title', {str}), 'description': ('description', {str}), 'timestamp': ('startDateTime', {parse_iso8601}), })) for video_info in self._download_json( 'https://api-v2.build.microsoft.com/api/session/all/en-US', video_id, 'Downloading video info') ] if video_id == 'sessions': return self.playlist_result(entries, video_id) else: return traverse_obj(entries, (lambda _, v: v['id'] == video_id), get_all=False)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lynda.py
yt_dlp/extractor/lynda.py
import itertools import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, urlencode_postdata, ) class LyndaBaseIE(InfoExtractor): _SIGNIN_URL = 'https://www.lynda.com/signin/lynda' _PASSWORD_URL = 'https://www.lynda.com/signin/password' _USER_URL = 'https://www.lynda.com/signin/user' _ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to provide lynda.com account credentials.' _NETRC_MACHINE = 'lynda' @staticmethod def _check_error(json_string, key_or_keys): keys = [key_or_keys] if isinstance(key_or_keys, str) else key_or_keys for key in keys: error = json_string.get(key) if error: raise ExtractorError(f'Unable to login: {error}', expected=True) def _perform_login_step(self, form_html, fallback_action_url, extra_form_data, note, referrer_url): action_url = self._search_regex( r'<form[^>]+action=(["\'])(?P<url>.+?)\1', form_html, 'post url', default=fallback_action_url, group='url') if not action_url.startswith('http'): action_url = urllib.parse.urljoin(self._SIGNIN_URL, action_url) form_data = self._hidden_inputs(form_html) form_data.update(extra_form_data) response = self._download_json( action_url, None, note, data=urlencode_postdata(form_data), headers={ 'Referer': referrer_url, 'X-Requested-With': 'XMLHttpRequest', }, expected_status=(418, 500)) self._check_error(response, ('email', 'password', 'ErrorMessage')) return response, action_url def _perform_login(self, username, password): # Step 1: download signin page signin_page = self._download_webpage( self._SIGNIN_URL, None, 'Downloading signin page') # Already logged in if any(re.search(p, signin_page) for p in ( r'isLoggedIn\s*:\s*true', r'logout\.aspx', r'>Log out<')): return # Step 2: submit email signin_form = self._search_regex( r'(?s)(<form[^>]+data-form-name=["\']signin["\'][^>]*>.+?</form>)', signin_page, 'signin form') signin_page, signin_url = self._login_step( signin_form, self._PASSWORD_URL, {'email': username}, 'Submitting email', self._SIGNIN_URL) # Step 3: submit password password_form = signin_page['body'] self._login_step( password_form, self._USER_URL, {'email': username, 'password': password}, 'Submitting password', signin_url) class LyndaIE(LyndaBaseIE): IE_NAME = 'lynda' IE_DESC = 'lynda.com videos' _VALID_URL = r'''(?x) https?:// (?:www\.)?(?:lynda\.com|educourse\.ga)/ (?: (?:[^/]+/){2,3}(?P<course_id>\d+)| player/embed )/ (?P<id>\d+) ''' _TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]' _TESTS = [{ 'url': 'https://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html', # md5 is unstable 'info_dict': { 'id': '114408', 'ext': 'mp4', 'title': 'Using the exercise files', 'duration': 68, }, }, { 'url': 'https://www.lynda.com/player/embed/133770?tr=foo=1;bar=g;fizz=rt&fs=0', 'only_matching': True, }, { 'url': 'https://educourse.ga/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html', 'only_matching': True, }, { 'url': 'https://www.lynda.com/de/Graphic-Design-tutorials/Willkommen-Grundlagen-guten-Gestaltung/393570/393572-4.html', 'only_matching': True, }, { # Status="NotFound", Message="Transcript not found" 'url': 'https://www.lynda.com/ASP-NET-tutorials/What-you-should-know/5034180/2811512-4.html', 'only_matching': True, }] def _raise_unavailable(self, video_id): self.raise_login_required( f'Video {video_id} is only available for members') def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') course_id = mobj.group('course_id') query = { 'videoId': video_id, 'type': 'video', } video = self._download_json( 'https://www.lynda.com/ajax/player', video_id, 'Downloading video JSON', fatal=False, query=query) # Fallback scenario if not video: query['courseId'] = course_id play = self._download_json( f'https://www.lynda.com/ajax/course/{course_id}/{video_id}/play', video_id, 'Downloading play JSON') if not play: self._raise_unavailable(video_id) formats = [] for formats_dict in play: urls = formats_dict.get('urls') if not isinstance(urls, dict): continue cdn = formats_dict.get('name') for format_id, format_url in urls.items(): if not format_url: continue formats.append({ 'url': format_url, 'format_id': f'{cdn}-{format_id}' if cdn else format_id, 'height': int_or_none(format_id), }) conviva = self._download_json( 'https://www.lynda.com/ajax/player/conviva', video_id, 'Downloading conviva JSON', query=query) return { 'id': video_id, 'title': conviva['VideoTitle'], 'description': conviva.get('VideoDescription'), 'release_year': int_or_none(conviva.get('ReleaseYear')), 'duration': int_or_none(conviva.get('Duration')), 'creator': conviva.get('Author'), 'formats': formats, } if 'Status' in video: raise ExtractorError( 'lynda returned error: {}'.format(video['Message']), expected=True) if video.get('HasAccess') is False: self._raise_unavailable(video_id) video_id = str(video.get('ID') or video_id) duration = int_or_none(video.get('DurationInSeconds')) title = video['Title'] formats = [] fmts = video.get('Formats') if fmts: formats.extend([{ 'url': f['Url'], 'ext': f.get('Extension'), 'width': int_or_none(f.get('Width')), 'height': int_or_none(f.get('Height')), 'filesize': int_or_none(f.get('FileSize')), 'format_id': str(f.get('Resolution')) if f.get('Resolution') else None, } for f in fmts if f.get('Url')]) prioritized_streams = video.get('PrioritizedStreams') if prioritized_streams: for prioritized_stream_id, prioritized_stream in prioritized_streams.items(): formats.extend([{ 'url': video_url, 'height': int_or_none(format_id), 'format_id': f'{prioritized_stream_id}-{format_id}', } for format_id, video_url in prioritized_stream.items()]) self._check_formats(formats, video_id) subtitles = self.extract_subtitles(video_id) return { 'id': video_id, 'title': title, 'duration': duration, 'subtitles': subtitles, 'formats': formats, } def _fix_subtitles(self, subs): srt = '' seq_counter = 0 for seq_current, seq_next in itertools.pairwise(subs): m_current = re.match(self._TIMECODE_REGEX, seq_current['Timecode']) if m_current is None: continue m_next = re.match(self._TIMECODE_REGEX, seq_next['Timecode']) if m_next is None: continue appear_time = m_current.group('timecode') disappear_time = m_next.group('timecode') text = seq_current['Caption'].strip() if text: seq_counter += 1 srt += f'{seq_counter}\r\n{appear_time} --> {disappear_time}\r\n{text}\r\n\r\n' if srt: return srt def _get_subtitles(self, video_id): url = f'https://www.lynda.com/ajax/player?videoId={video_id}&type=transcript' subs = self._download_webpage( url, video_id, 'Downloading subtitles JSON', fatal=False) if not subs or 'Status="NotFound"' in subs: return {} subs = self._parse_json(subs, video_id, fatal=False) if not subs: return {} fixed_subs = self._fix_subtitles(subs) if fixed_subs: return {'en': [{'ext': 'srt', 'data': fixed_subs}]} return {} class LyndaCourseIE(LyndaBaseIE): IE_NAME = 'lynda:course' IE_DESC = 'lynda.com online courses' # Course link equals to welcome/introduction video link of same course # We will recognize it as course link _VALID_URL = r'https?://(?:www|m)\.(?:lynda\.com|educourse\.ga)/(?P<coursepath>(?:[^/]+/){2,3}(?P<courseid>\d+))-2\.html' _TESTS = [{ 'url': 'https://www.lynda.com/Graphic-Design-tutorials/Grundlagen-guten-Gestaltung/393570-2.html', 'only_matching': True, }, { 'url': 'https://www.lynda.com/de/Graphic-Design-tutorials/Grundlagen-guten-Gestaltung/393570-2.html', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) course_path = mobj.group('coursepath') course_id = mobj.group('courseid') item_template = f'https://www.lynda.com/{course_path}/%s-4.html' course = self._download_json( f'https://www.lynda.com/ajax/player?courseId={course_id}&type=course', course_id, 'Downloading course JSON', fatal=False) if not course: webpage = self._download_webpage(url, course_id) entries = [ self.url_result( item_template % video_id, ie=LyndaIE.ie_key(), video_id=video_id) for video_id in re.findall( r'data-video-id=["\'](\d+)', webpage)] return self.playlist_result( entries, course_id, self._og_search_title(webpage, fatal=False), self._og_search_description(webpage)) if course.get('Status') == 'NotFound': raise ExtractorError( f'Course {course_id} does not exist', expected=True) unaccessible_videos = 0 entries = [] # Might want to extract videos right here from video['Formats'] as it seems 'Formats' is not provided # by single video API anymore for chapter in course['Chapters']: for video in chapter.get('Videos', []): if video.get('HasAccess') is False: unaccessible_videos += 1 continue video_id = video.get('ID') if video_id: entries.append({ '_type': 'url_transparent', 'url': item_template % video_id, 'ie_key': LyndaIE.ie_key(), 'chapter': chapter.get('Title'), 'chapter_number': int_or_none(chapter.get('ChapterIndex')), 'chapter_id': str(chapter.get('ID')), }) if unaccessible_videos > 0: self.report_warning( f'{unaccessible_videos} videos are only available for members (or paid members) ' f'and will not be downloaded. {self._ACCOUNT_CREDENTIALS_HINT}') course_title = course.get('Title') course_description = course.get('Description') return self.playlist_result(entries, course_id, course_title, course_description)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/imdb.py
yt_dlp/extractor/imdb.py
import base64 import json import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, mimetype2ext, qualities, traverse_obj, try_get, url_or_none, ) class ImdbIE(InfoExtractor): IE_NAME = 'imdb' IE_DESC = 'Internet Movie Database trailers' _VALID_URL = r'https?://(?:www|m)\.imdb\.com/(?:video|title|list).*?[/-]vi(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.imdb.com/video/imdb/vi2524815897', 'info_dict': { 'id': '2524815897', 'ext': 'mp4', 'title': 'No. 2', 'description': 'md5:87bd0bdc61e351f21f20d2d7441cb4e7', 'duration': 152, 'thumbnail': r're:^https?://.+\.jpg', }, }, { 'url': 'https://www.imdb.com/video/vi3516832537', 'info_dict': { 'id': '3516832537', 'ext': 'mp4', 'title': 'Paul: U.S. Trailer #1', 'description': 'md5:17fcc4fe11ec29b4399be9d4c5ef126c', 'duration': 153, 'thumbnail': r're:^https?://.+\.jpg', }, }, { 'url': 'http://www.imdb.com/video/_/vi2524815897', 'only_matching': True, }, { 'url': 'http://www.imdb.com/title/tt1667889/?ref_=ext_shr_eml_vi#lb-vi2524815897', 'only_matching': True, }, { 'url': 'http://www.imdb.com/title/tt1667889/#lb-vi2524815897', 'only_matching': True, }, { 'url': 'http://www.imdb.com/videoplayer/vi1562949145', 'only_matching': True, }, { 'url': 'http://www.imdb.com/title/tt4218696/videoplayer/vi2608641561', 'only_matching': True, }, { 'url': 'https://www.imdb.com/list/ls009921623/videoplayer/vi260482329', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(f'https://www.imdb.com/video/vi{video_id}', video_id) info = self._search_nextjs_data(webpage, video_id) video_info = traverse_obj(info, ('props', 'pageProps', 'videoPlaybackData', 'video'), default={}) title = (traverse_obj(video_info, ('name', 'value'), ('primaryTitle', 'titleText', 'text')) or self._html_search_meta(('og:title', 'twitter:title'), webpage, default=None) or self._html_extract_title(webpage)) data = video_info.get('playbackURLs') or try_get(self._download_json( 'https://www.imdb.com/ve/data/VIDEO_PLAYBACK_DATA', video_id, query={ 'key': base64.b64encode(json.dumps({ 'type': 'VIDEO_PLAYER', 'subType': 'FORCE_LEGACY', 'id': f'vi{video_id}', }).encode()).decode(), }), lambda x: x[0]['videoLegacyEncodings']) quality = qualities(('SD', '480p', '720p', '1080p')) formats, subtitles = [], {} for encoding in data: if not encoding or not isinstance(encoding, dict): continue video_url = url_or_none(encoding.get('url')) if not video_url: continue ext = mimetype2ext(encoding.get( 'mimeType')) or determine_ext(video_url) if ext == 'm3u8': fmts, subs = self._extract_m3u8_formats_and_subtitles( video_url, video_id, 'mp4', entry_protocol='m3u8_native', preference=1, m3u8_id='hls', fatal=False) subtitles = self._merge_subtitles(subtitles, subs) formats.extend(fmts) continue format_id = traverse_obj(encoding, ('displayName', 'value'), 'definition') formats.append({ 'format_id': format_id, 'url': video_url, 'ext': ext, 'quality': quality(format_id), }) return { 'id': video_id, 'title': title, 'alt_title': info.get('videoSubTitle'), 'formats': formats, 'description': try_get(video_info, lambda x: x['description']['value']), 'thumbnail': url_or_none(try_get(video_info, lambda x: x['thumbnail']['url'])), 'duration': int_or_none(try_get(video_info, lambda x: x['runtime']['value'])), 'subtitles': subtitles, } class ImdbListIE(InfoExtractor): IE_NAME = 'imdb:list' IE_DESC = 'Internet Movie Database lists' _VALID_URL = r'https?://(?:www\.)?imdb\.com/list/ls(?P<id>\d{9})(?!/videoplayer/vi\d+)' _TEST = { 'url': 'https://www.imdb.com/list/ls009921623/', 'info_dict': { 'id': '009921623', 'title': 'The Bourne Legacy', 'description': 'A list of trailers, clips, and more from The Bourne Legacy, starring Jeremy Renner and Rachel Weisz.', }, 'playlist_count': 8, } def _real_extract(self, url): list_id = self._match_id(url) webpage = self._download_webpage(url, list_id) entries = [ self.url_result('http://www.imdb.com' + m, 'Imdb') for m in re.findall(rf'href="(/list/ls{list_id}/videoplayer/vi[^"]+)"', webpage)] list_title = self._html_search_regex( r'<h1[^>]+class="[^"]*header[^"]*"[^>]*>(.*?)</h1>', webpage, 'list title') list_description = self._html_search_regex( r'<div[^>]+class="[^"]*list-description[^"]*"[^>]*><p>(.*?)</p>', webpage, 'list description') return self.playlist_result(entries, list_id, list_title, list_description)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nick.py
yt_dlp/extractor/nick.py
from .mtv import MTVServicesBaseIE class NickIE(MTVServicesBaseIE): IE_NAME = 'nick.com' _VALID_URL = r'https?://(?:www\.)?nick\.com/(?:video-clips|episodes)/(?P<id>[\da-z]{6})' _TESTS = [{ 'url': 'https://www.nick.com/episodes/u3smw8/wylde-pak-best-summer-ever-season-1-ep-1', 'info_dict': { 'id': 'eb9d4db0-274a-11ef-a913-0e37995d42c9', 'ext': 'mp4', 'display_id': 'u3smw8', 'title': 'Best Summer Ever?', 'description': 'md5:c737a0ade3fbc09d569c3b3d029a7792', 'channel': 'Nickelodeon', 'duration': 1296.0, 'thumbnail': r're:https://assets\.nick\.com/uri/mgid:arc:imageassetref:', 'series': 'Wylde Pak', 'season': 'Season 1', 'season_number': 1, 'episode': 'Episode 1', 'episode_number': 1, 'timestamp': 1746100800, 'upload_date': '20250501', 'release_timestamp': 1746100800, 'release_date': '20250501', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.nick.com/video-clips/0p4706/spongebob-squarepants-spongebob-loving-the-krusty-krab-for-7-minutes', 'info_dict': { 'id': '4aac2228-5295-4076-b986-159513cf4ce4', 'ext': 'mp4', 'display_id': '0p4706', 'title': 'SpongeBob Loving the Krusty Krab for 7 Minutes!', 'description': 'md5:72bf59babdf4e6d642187502864e111d', 'duration': 423.423, 'thumbnail': r're:https://assets\.nick\.com/uri/mgid:arc:imageassetref:', 'series': 'SpongeBob SquarePants', 'season': 'Season 0', 'season_number': 0, 'episode': 'Episode 0', 'episode_number': 0, 'timestamp': 1663819200, 'upload_date': '20220922', }, 'params': {'skip_download': 'm3u8'}, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/radiojavan.py
yt_dlp/extractor/radiojavan.py
import re from .common import InfoExtractor from ..utils import ( parse_resolution, str_to_int, unified_strdate, urlencode_postdata, urljoin, ) class RadioJavanIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?radiojavan\.com/videos/video/(?P<id>[^/]+)/?' _TEST = { 'url': 'http://www.radiojavan.com/videos/video/chaartaar-ashoobam', 'md5': 'e85208ffa3ca8b83534fca9fe19af95b', 'info_dict': { 'id': 'chaartaar-ashoobam', 'ext': 'mp4', 'title': 'Chaartaar - Ashoobam', 'thumbnail': r're:^https?://.*\.jpe?g$', 'upload_date': '20150215', 'view_count': int, 'like_count': int, 'dislike_count': int, }, } def _real_extract(self, url): video_id = self._match_id(url) download_host = self._download_json( 'https://www.radiojavan.com/videos/video_host', video_id, data=urlencode_postdata({'id': video_id}), headers={ 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': url, }).get('host', 'https://host1.rjmusicmedia.com') webpage = self._download_webpage(url, video_id) formats = [] for format_id, _, video_path in re.findall( r'RJ\.video(?P<format_id>\d+[pPkK])\s*=\s*(["\'])(?P<url>(?:(?!\2).)+)\2', webpage): f = parse_resolution(format_id) f.update({ 'url': urljoin(download_host, video_path), 'format_id': format_id, }) formats.append(f) title = self._og_search_title(webpage) thumbnail = self._og_search_thumbnail(webpage) upload_date = unified_strdate(self._search_regex( r'class="date_added">Date added: ([^<]+)<', webpage, 'upload date', fatal=False)) view_count = str_to_int(self._search_regex( r'class="views">Plays: ([\d,]+)', webpage, 'view count', fatal=False)) like_count = str_to_int(self._search_regex( r'class="rating">([\d,]+) likes', webpage, 'like count', fatal=False)) dislike_count = str_to_int(self._search_regex( r'class="rating">([\d,]+) dislikes', webpage, 'dislike count', fatal=False)) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'upload_date': upload_date, 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/videa.py
yt_dlp/extractor/videa.py
import base64 import random import string import struct from .common import InfoExtractor from ..compat import compat_ord from ..utils import ( ExtractorError, int_or_none, mimetype2ext, parse_codecs, parse_qs, update_url_query, urljoin, xpath_element, xpath_text, ) class VideaIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// videa(?:kid)?\.hu/ (?: videok/(?:[^/]+/)*[^?#&]+-| (?:videojs_)?player\?.*?\bv=| player/v/ ) (?P<id>[^?#&]+) ''' _EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//videa\.hu/player\?.*?\bv=.+?)\1'] _TESTS = [{ 'url': 'http://videa.hu/videok/allatok/az-orult-kigyasz-285-kigyot-kigyo-8YfIAjxwWGwT8HVQ', 'md5': '97a7af41faeaffd9f1fc864a7c7e7603', 'info_dict': { 'id': '8YfIAjxwWGwT8HVQ', 'ext': 'mp4', 'title': 'Az őrült kígyász 285 kígyót enged szabadon', 'thumbnail': r're:https?://videa\.hu/static/still/.+', 'duration': 21, 'age_limit': 0, }, }, { 'url': 'http://videa.hu/videok/origo/jarmuvek/supercars-elozes-jAHDWfWSJH5XuFhH', 'md5': 'd57ccd8812c7fd491d33b1eab8c99975', 'info_dict': { 'id': 'jAHDWfWSJH5XuFhH', 'ext': 'mp4', 'title': 'Supercars előzés', 'thumbnail': r're:https?://videa\.hu/static/still/.+', 'duration': 64, 'age_limit': 0, }, }, { 'url': 'http://videa.hu/player?v=8YfIAjxwWGwT8HVQ', 'md5': '97a7af41faeaffd9f1fc864a7c7e7603', 'info_dict': { 'id': '8YfIAjxwWGwT8HVQ', 'ext': 'mp4', 'title': 'Az őrült kígyász 285 kígyót enged szabadon', 'thumbnail': r're:https?://videa\.hu/static/still/.+', 'duration': 21, 'age_limit': 0, }, }, { 'url': 'http://videa.hu/player/v/8YfIAjxwWGwT8HVQ?autoplay=1', 'only_matching': True, }, { 'url': 'https://videakid.hu/videok/origo/jarmuvek/supercars-elozes-jAHDWfWSJH5XuFhH', 'only_matching': True, }, { 'url': 'https://videakid.hu/player?v=8YfIAjxwWGwT8HVQ', 'only_matching': True, }, { 'url': 'https://videakid.hu/player/v/8YfIAjxwWGwT8HVQ?autoplay=1', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.kapucziner.hu/', 'info_dict': { 'id': '95yhJCdK2dX1T5Nh', 'ext': 'mp4', 'title': 'Nemzetközi díjat kapott a győri kávémanufaktúra', 'age_limit': 0, 'duration': 207, 'thumbnail': r're:https?://videa\.hu/static/still/.+', }, }, { # FIXME: No video formats found 'url': 'https://hirtv.hu/hirtv_kesleltetett', 'info_dict': { 'id': 'IDRqF7W9X0GXHGj1', 'ext': 'mp4', 'title': 'Hír TV - 60 perccel késleltetett adás', }, }] _STATIC_SECRET = 'xHb0ZvME5q8CBcoQi6AngerDu3FGO9fkUlwPmLVY_RTzj2hJIS4NasXWKy1td7p' @staticmethod def rc4(cipher_text, key): res = b'' key_len = len(key) S = list(range(256)) j = 0 for i in range(256): j = (j + S[i] + ord(key[i % key_len])) % 256 S[i], S[j] = S[j], S[i] i = 0 j = 0 for m in range(len(cipher_text)): i = (i + 1) % 256 j = (j + S[i]) % 256 S[i], S[j] = S[j], S[i] k = S[(S[i] + S[j]) % 256] res += struct.pack('B', k ^ compat_ord(cipher_text[m])) return res.decode() def _real_extract(self, url): video_id = self._match_id(url) video_page = self._download_webpage(url, video_id) if 'videa.hu/player' in url: player_url = url player_page = video_page else: player_url = self._search_regex( r'<iframe.*?src="(/player\?[^"]+)"', video_page, 'player url') player_url = urljoin(url, player_url) player_page = self._download_webpage(player_url, video_id) nonce = self._search_regex( r'_xt\s*=\s*"([^"]+)"', player_page, 'nonce') l = nonce[:32] s = nonce[32:] result = '' for i in range(32): result += s[i - (self._STATIC_SECRET.index(l[i]) - 31)] query = parse_qs(player_url) random_seed = ''.join(random.choices(string.ascii_letters + string.digits, k=8)) query['_s'] = random_seed query['_t'] = result[:16] b64_info, handle = self._download_webpage_handle( 'http://videa.hu/player/xml', video_id, query=query) if b64_info.startswith('<?xml'): info = self._parse_xml(b64_info, video_id) else: key = result[16:] + random_seed + handle.headers['x-videa-xs'] info = self._parse_xml(self.rc4( base64.b64decode(b64_info), key), video_id) video = xpath_element(info, './video', 'video') if video is None: raise ExtractorError(xpath_element( info, './error', fatal=True), expected=True) sources = xpath_element( info, './video_sources', 'sources', fatal=True) hash_values = xpath_element( info, './hash_values', 'hash values', fatal=False) title = xpath_text(video, './title', fatal=True) formats = [] for source in sources.findall('./video_source'): source_url = source.text source_name = source.get('name') source_exp = source.get('exp') if not (source_url and source_name): continue hash_value = ( xpath_text(hash_values, 'hash_value_' + source_name) if hash_values is not None else None) if hash_value and source_exp: source_url = update_url_query(source_url, { 'md5': hash_value, 'expires': source_exp, }) f = parse_codecs(source.get('codecs')) f.update({ 'url': self._proto_relative_url(source_url), 'ext': mimetype2ext(source.get('mimetype')) or 'mp4', 'format_id': source.get('name'), 'width': int_or_none(source.get('width')), 'height': int_or_none(source.get('height')), }) formats.append(f) thumbnail = self._proto_relative_url(xpath_text(video, './poster_src')) age_limit = None is_adult = xpath_text(video, './is_adult_content', default=None) if is_adult: age_limit = 18 if is_adult == '1' else 0 return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': int_or_none(xpath_text(video, './duration')), 'age_limit': age_limit, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/videodetective.py
yt_dlp/extractor/videodetective.py
from .common import InfoExtractor from .internetvideoarchive import InternetVideoArchiveIE class VideoDetectiveIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?videodetective\.com/[^/]+/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'http://www.videodetective.com/movies/kick-ass-2/194487', 'info_dict': { 'id': '194487', 'ext': 'mp4', 'title': 'Kick-Ass 2', 'description': 'md5:c189d5b7280400630a1d3dd17eaa8d8a', }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) query = 'customerid=69249&publishedid=' + video_id return self.url_result( InternetVideoArchiveIE._build_json_url(query), ie=InternetVideoArchiveIE.ie_key())
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lnk.py
yt_dlp/extractor/lnk.py
from .common import InfoExtractor from ..utils import ( format_field, int_or_none, unified_strdate, ) class LnkIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?lnk\.lt/[^/]+/(?P<id>\d+)' _TESTS = [{ 'url': 'https://lnk.lt/zinios/79791', 'info_dict': { 'id': '79791', 'ext': 'mp4', 'title': 'LNK.lt: Viešintų gyventojai sukilo prieš radijo bangų siųstuvą', 'description': 'Svarbiausios naujienos trumpai, LNK žinios ir Info dienos pokalbiai.', 'view_count': int, 'duration': 233, 'upload_date': '20191123', 'thumbnail': r're:^https?://.*\.jpg$', 'episode_number': 13431, 'series': 'Naujausi žinių reportažai', 'episode': 'Episode 13431', }, 'params': {'skip_download': True}, }, { 'url': 'https://lnk.lt/istorijos-trumpai/152546', 'info_dict': { 'id': '152546', 'ext': 'mp4', 'title': 'Radžio koncertas gaisre ', 'description': 'md5:0666b5b85cb9fc7c1238dec96f71faba', 'view_count': int, 'duration': 54, 'upload_date': '20220105', 'thumbnail': r're:^https?://.*\.jpg$', 'episode_number': 1036, 'series': 'Istorijos trumpai', 'episode': 'Episode 1036', }, 'params': {'skip_download': True}, }, { 'url': 'https://lnk.lt/gyvunu-pasaulis/151549', 'info_dict': { 'id': '151549', 'ext': 'mp4', 'title': 'Gyvūnų pasaulis', 'description': '', 'view_count': int, 'duration': 1264, 'upload_date': '20220108', 'thumbnail': r're:^https?://.*\.jpg$', 'episode_number': 16, 'series': 'Gyvūnų pasaulis', 'episode': 'Episode 16', }, 'params': {'skip_download': True}, }] def _real_extract(self, url): video_id = self._match_id(url) video_json = self._download_json(f'https://lnk.lt/api/video/video-config/{video_id}', video_id)['videoInfo'] formats, subtitles = [], {} if video_json.get('videoUrl'): fmts, subs = self._extract_m3u8_formats_and_subtitles(video_json['videoUrl'], video_id) formats.extend(fmts) subtitles = self._merge_subtitles(subtitles, subs) if video_json.get('videoFairplayUrl') and not video_json.get('drm'): fmts, subs = self._extract_m3u8_formats_and_subtitles(video_json['videoFairplayUrl'], video_id) formats.extend(fmts) subtitles = self._merge_subtitles(subtitles, subs) return { 'id': video_id, 'title': video_json.get('title'), 'description': video_json.get('description'), 'view_count': video_json.get('viewsCount'), 'duration': video_json.get('duration'), 'upload_date': unified_strdate(video_json.get('airDate')), 'thumbnail': format_field(video_json, 'posterImage', 'https://lnk.lt/all-images/%s'), 'episode_number': int_or_none(video_json.get('episodeNumber')), 'series': video_json.get('programTitle'), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sbscokr.py
yt_dlp/extractor/sbscokr.py
from .common import InfoExtractor from ..utils import ( clean_html, int_or_none, parse_iso8601, parse_resolution, url_or_none, ) from ..utils.traversal import traverse_obj class SBSCoKrIE(InfoExtractor): IE_NAME = 'sbs.co.kr' _VALID_URL = [r'https?://allvod\.sbs\.co\.kr/allvod/vod(?:Package)?EndPage\.do\?(?:[^#]+&)?mdaId=(?P<id>\d+)', r'https?://programs\.sbs\.co\.kr/(?:enter|drama|culture|sports|plus|mtv|kth)/[a-z0-9]+/(?:vod|clip|movie)/\d+/(?P<id>(?:OC)?\d+)'] _TESTS = [{ 'url': 'https://programs.sbs.co.kr/enter/dongsang2/clip/52007/OC467706746?div=main_pop_clip', 'md5': 'c3f6d45e1fb5682039d94cda23c36f19', 'info_dict': { 'id': 'OC467706746', 'ext': 'mp4', 'title': '‘아슬아슬’ 박군♥한영의 새 집 인테리어 대첩♨', 'description': 'md5:6a71eb1979ee4a94ea380310068ccab4', 'thumbnail': 'https://img2.sbs.co.kr/ops_clip_img/2023/10/10/34c4c0f9-a9a5-4ff6-a92e-9bb4b5f6fa65915w1280.jpg', 'release_timestamp': 1696889400, 'release_date': '20231009', 'view_count': int, 'like_count': int, 'duration': 238, 'age_limit': 15, 'series': '동상이몽2_너는 내 운명', 'episode': '레이디제인, ‘혼전임신설’ ‘3개월’ 앞당긴 결혼식 비하인드 스토리 최초 공개!', 'episode_number': 311, }, }, { 'url': 'https://allvod.sbs.co.kr/allvod/vodPackageEndPage.do?mdaId=22000489324&combiId=PA000000284&packageType=A&isFreeYN=', 'md5': 'bf46b2e89fda7ae7de01f5743cef7236', 'info_dict': { 'id': '22000489324', 'ext': 'mp4', 'title': '[다시보기] 트롤리 15회', 'description': 'md5:0e55d74bef1ac55c61ae90c73ac485f4', 'thumbnail': 'https://img2.sbs.co.kr/img/sbs_cms/WE/2023/02/14/arC1676333794938-1280-720.jpg', 'release_timestamp': 1676325600, 'release_date': '20230213', 'view_count': int, 'like_count': int, 'duration': 5931, 'age_limit': 15, 'series': '트롤리', 'episode': '이거 다 거짓말이야', 'episode_number': 15, }, }, { 'url': 'https://programs.sbs.co.kr/enter/fourman/vod/69625/22000508948', 'md5': '41e8ae4cc6c8424f4e4d76661a4becbf', 'info_dict': { 'id': '22000508948', 'ext': 'mp4', 'title': '[다시보기] 신발 벗고 돌싱포맨 104회', 'description': 'md5:c6a247383c4dd661e4b956bf4d3b586e', 'thumbnail': 'https://img2.sbs.co.kr/img/sbs_cms/WE/2023/08/30/2vb1693355446261-1280-720.jpg', 'release_timestamp': 1693342800, 'release_date': '20230829', 'view_count': int, 'like_count': int, 'duration': 7036, 'age_limit': 15, 'series': '신발 벗고 돌싱포맨', 'episode': '돌싱포맨 저격수들 등장!', 'episode_number': 104, }, }] def _call_api(self, video_id, rscuse=''): return self._download_json( f'https://api.play.sbs.co.kr/1.0/sbs_vodall/{video_id}', video_id, note=f'Downloading m3u8 information {rscuse}', query={ 'platform': 'pcweb', 'protocol': 'download', 'absolute_show': 'Y', 'service': 'program', 'ssl': 'Y', 'rscuse': rscuse, }) def _real_extract(self, url): video_id = self._match_id(url) details = self._call_api(video_id) source = traverse_obj(details, ('vod', 'source', 'mediasource', {dict})) or {} formats = [] for stream in traverse_obj(details, ( 'vod', 'source', 'mediasourcelist', lambda _, v: v['mediaurl'] or v['mediarscuse'], ), default=[source]): if not stream.get('mediaurl'): new_source = traverse_obj( self._call_api(video_id, rscuse=stream['mediarscuse']), ('vod', 'source', 'mediasource', {dict})) or {} if new_source.get('mediarscuse') == source.get('mediarscuse') or not new_source.get('mediaurl'): continue stream = new_source formats.append({ 'url': stream['mediaurl'], 'format_id': stream.get('mediarscuse'), 'format_note': stream.get('medianame'), **parse_resolution(stream.get('quality')), 'preference': int_or_none(stream.get('mediarscuse')), }) caption_url = traverse_obj(details, ('vod', 'source', 'subtitle', {url_or_none})) return { 'id': video_id, **traverse_obj(details, ('vod', { 'title': ('info', 'title'), 'duration': ('info', 'duration', {int_or_none}), 'view_count': ('info', 'viewcount', {int_or_none}), 'like_count': ('info', 'likecount', {int_or_none}), 'description': ('info', 'synopsis', {clean_html}), 'episode': ('info', 'content', ('contenttitle', 'title')), 'episode_number': ('info', 'content', 'number', {int_or_none}), 'series': ('info', 'program', 'programtitle'), 'age_limit': ('info', 'targetage', {int_or_none}), 'release_timestamp': ('info', 'broaddate', {parse_iso8601}), 'thumbnail': ('source', 'thumbnail', 'origin', {url_or_none}), }), get_all=False), 'formats': formats, 'subtitles': {'ko': [{'url': caption_url}]} if caption_url else None, } class SBSCoKrAllvodProgramIE(InfoExtractor): IE_NAME = 'sbs.co.kr:allvod_program' _VALID_URL = r'https?://allvod\.sbs\.co\.kr/allvod/vod(?:Free)?ProgramDetail\.do\?(?:[^#]+&)?pgmId=(?P<id>P?\d+)' _TESTS = [{ 'url': 'https://allvod.sbs.co.kr/allvod/vodFreeProgramDetail.do?type=legend&pgmId=22000010159&listOrder=vodCntAsc', 'info_dict': { '_type': 'playlist', 'id': '22000010159', }, 'playlist_count': 18, }, { 'url': 'https://allvod.sbs.co.kr/allvod/vodProgramDetail.do?pgmId=P460810577', 'info_dict': { '_type': 'playlist', 'id': 'P460810577', }, 'playlist_count': 13, }] def _real_extract(self, url): program_id = self._match_id(url) details = self._download_json( 'https://allvod.sbs.co.kr/allvod/vodProgramDetail/vodProgramDetailAjax.do', program_id, note='Downloading program details', query={ 'pgmId': program_id, 'currentCount': '10000', }) return self.playlist_result( [self.url_result(f'https://allvod.sbs.co.kr/allvod/vodEndPage.do?mdaId={video_id}', SBSCoKrIE) for video_id in traverse_obj(details, ('list', ..., 'mdaId'))], program_id) class SBSCoKrProgramsVodIE(InfoExtractor): IE_NAME = 'sbs.co.kr:programs_vod' _VALID_URL = r'https?://programs\.sbs\.co\.kr/(?:enter|drama|culture|sports|plus|mtv)/(?P<id>[a-z0-9]+)/vods' _TESTS = [{ 'url': 'https://programs.sbs.co.kr/culture/morningwide/vods/65007', 'info_dict': { '_type': 'playlist', 'id': '00000210215', }, 'playlist_mincount': 9782, }, { 'url': 'https://programs.sbs.co.kr/enter/dongsang2/vods/52006', 'info_dict': { '_type': 'playlist', 'id': '22000010476', }, 'playlist_mincount': 312, }] def _real_extract(self, url): program_slug = self._match_id(url) program_id = self._download_json( f'https://static.apis.sbs.co.kr/program-api/1.0/menu/{program_slug}', program_slug, note='Downloading program menu data')['program']['programid'] return self.url_result( f'https://allvod.sbs.co.kr/allvod/vodProgramDetail.do?pgmId={program_id}', SBSCoKrAllvodProgramIE)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/googledrive.py
yt_dlp/extractor/googledrive.py
import re from .common import InfoExtractor from ..utils import ( determine_ext, extract_attributes, filter_dict, get_element_by_class, get_element_html_by_id, int_or_none, mimetype2ext, parse_duration, str_or_none, update_url_query, url_or_none, ) from ..utils.traversal import traverse_obj, value class GoogleDriveIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?: (?:docs|drive|drive\.usercontent)\.google\.com/ (?: (?:uc|open|download)\?.*?id=| file/d/ )| video\.google\.com/get_player\?.*?docid= ) (?P<id>[a-zA-Z0-9_-]{28,}) ''' _TESTS = [{ 'url': 'https://drive.google.com/file/d/0ByeS4oOUV-49Zzh4R1J6R09zazQ/edit?pli=1', 'md5': '5c602afbbf2c1db91831f5d82f678554', 'info_dict': { 'id': '0ByeS4oOUV-49Zzh4R1J6R09zazQ', 'ext': 'mp4', 'title': 'Big Buck Bunny.mp4', 'duration': 45.069, 'thumbnail': r're:https://lh3\.googleusercontent\.com/drive-storage/', }, }, { # has itag 50 which is not in YoutubeIE._formats (royalty Free music from 1922) 'url': 'https://drive.google.com/uc?id=1IP0o8dHcQrIHGgVyp0Ofvx2cGfLzyO1x', 'md5': '322db8d63dd19788c04050a4bba67073', 'info_dict': { 'id': '1IP0o8dHcQrIHGgVyp0Ofvx2cGfLzyO1x', 'ext': 'mp3', 'title': 'My Buddy - Henry Burr - Gus Kahn - Walter Donaldson.mp3', 'duration': 184.68, }, }, { # Has subtitle track 'url': 'https://drive.google.com/file/d/1RAGWRgzn85TXCaCk4gxnwF6TGUaZatzE/view', 'md5': '05488c528da6ef737ec8c962bfa9724e', 'info_dict': { 'id': '1RAGWRgzn85TXCaCk4gxnwF6TGUaZatzE', 'ext': 'mp4', 'title': 'test.mp4', 'duration': 9.999, 'thumbnail': r're:https://lh3\.googleusercontent\.com/drive-storage/', }, }, { # Has subtitle track with kind 'asr' 'url': 'https://drive.google.com/file/d/1Prvv9-mtDDfN_gkJgtt1OFvIULK8c3Ev/view', 'md5': 'ccae12d07f18b5988900b2c8b92801fc', 'info_dict': { 'id': '1Prvv9-mtDDfN_gkJgtt1OFvIULK8c3Ev', 'ext': 'mp4', 'title': 'LEE NA GYUNG-3410-VOICE_MESSAGE.mp4', 'duration': 8.766, 'thumbnail': r're:https://lh3\.googleusercontent\.com/drive-storage/', }, }, { # video can't be watched anonymously due to view count limit reached, # but can be downloaded (see https://github.com/ytdl-org/youtube-dl/issues/14046) 'url': 'https://drive.google.com/file/d/0B-vUyvmDLdWDcEt4WjBqcmI2XzQ/view', 'only_matching': True, }, { # video id is longer than 28 characters 'url': 'https://drive.google.com/file/d/1ENcQ_jeCuj7y19s66_Ou9dRP4GKGsodiDQ/edit', 'only_matching': True, }, { 'url': 'https://drive.google.com/open?id=0B2fjwgkl1A_CX083Tkowdmt6d28', 'only_matching': True, }, { 'url': 'https://drive.google.com/uc?id=0B2fjwgkl1A_CX083Tkowdmt6d28', 'only_matching': True, }, { 'url': 'https://drive.usercontent.google.com/download?id=0ByeS4oOUV-49Zzh4R1J6R09zazQ', 'only_matching': True, }] @classmethod def _extract_embed_urls(cls, url, webpage): mobj = re.search( r'<iframe[^>]+src="https?://(?:video\.google\.com/get_player\?.*?docid=|(?:docs|drive)\.google\.com/file/d/)(?P<id>[a-zA-Z0-9_-]{28,})', webpage) if mobj: yield 'https://drive.google.com/file/d/{}'.format(mobj.group('id')) @staticmethod def _construct_subtitle_url(base_url, video_id, language, fmt, kind): return update_url_query( base_url, filter_dict({ 'hl': 'en-US', 'v': video_id, 'type': 'track', 'lang': language, 'fmt': fmt, 'kind': kind, })) def _get_subtitles(self, video_id, video_info): subtitles = {} timed_text_base_url = traverse_obj(video_info, ('timedTextDetails', 'timedTextBaseUrl', {url_or_none})) if not timed_text_base_url: return subtitles subtitle_data = self._download_xml( timed_text_base_url, video_id, 'Downloading subtitles XML', fatal=False, query={ 'hl': 'en-US', 'type': 'list', 'tlangs': 1, 'v': video_id, 'vssids': 1, }) subtitle_formats = traverse_obj(subtitle_data, (lambda _, v: v.tag == 'format', {lambda x: x.get('fmt_code')}, {str})) for track in traverse_obj(subtitle_data, (lambda _, v: v.tag == 'track' and v.get('lang_code'))): language = track.get('lang_code') subtitles.setdefault(language, []).extend([{ 'url': self._construct_subtitle_url( timed_text_base_url, video_id, language, sub_fmt, track.get('kind')), 'name': track.get('lang_original'), 'ext': sub_fmt, } for sub_fmt in subtitle_formats]) return subtitles def _real_extract(self, url): video_id = self._match_id(url) video_info = self._download_json( f'https://content-workspacevideo-pa.googleapis.com/v1/drive/media/{video_id}/playback', video_id, 'Downloading video webpage', query={'key': 'AIzaSyDVQw45DwoYh632gvsP5vPDqEKvb-Ywnb8'}, headers={'Referer': 'https://drive.google.com/'}) formats = [] for fmt in traverse_obj(video_info, ( 'mediaStreamingData', 'formatStreamingData', ('adaptiveTranscodes', 'progressiveTranscodes'), lambda _, v: url_or_none(v['url']))): formats.append({ **traverse_obj(fmt, { 'url': 'url', 'format_id': ('itag', {int}, {str_or_none}), }), **traverse_obj(fmt, ('transcodeMetadata', { 'ext': ('mimeType', {mimetype2ext}), 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), 'fps': ('videoFps', {int_or_none}), 'filesize': ('contentLength', {int_or_none}), 'vcodec': ((('videoCodecString', {str}), {value('none')}), any), 'acodec': ((('audioCodecString', {str}), {value('none')}), any), })), 'downloader_options': { 'http_chunk_size': 10 << 20, }, }) title = traverse_obj(video_info, ('mediaMetadata', 'title', {str})) source_url = update_url_query( 'https://drive.usercontent.google.com/download', { 'id': video_id, 'export': 'download', 'confirm': 't', }) def request_source_file(source_url, kind, data=None): return self._request_webpage( source_url, video_id, note=f'Requesting {kind} file', errnote=f'Unable to request {kind} file', fatal=False, data=data) urlh = request_source_file(source_url, 'source') if urlh: def add_source_format(urlh): nonlocal title if not title: title = self._search_regex( r'\bfilename="([^"]+)"', urlh.headers.get('Content-Disposition'), 'title', default=None) formats.append({ # Use redirect URLs as download URLs in order to calculate # correct cookies in _calc_cookies. # Using original URLs may result in redirect loop due to # google.com's cookies mistakenly used for googleusercontent.com # redirect URLs (see #23919). 'url': urlh.url, 'ext': determine_ext(title, 'mp4').lower(), 'format_id': 'source', 'quality': 1, }) if urlh.headers.get('Content-Disposition'): add_source_format(urlh) else: confirmation_webpage = self._webpage_read_content( urlh, url, video_id, note='Downloading confirmation page', errnote='Unable to confirm download', fatal=False) if confirmation_webpage: confirmed_source_url = extract_attributes( get_element_html_by_id('download-form', confirmation_webpage) or '').get('action') if confirmed_source_url: urlh = request_source_file(confirmed_source_url, 'confirmed source', data=b'') if urlh and urlh.headers.get('Content-Disposition'): add_source_format(urlh) else: self.report_warning( get_element_by_class('uc-error-subcaption', confirmation_webpage) or get_element_by_class('uc-error-caption', confirmation_webpage) or 'unable to extract confirmation code') return { 'id': video_id, 'title': title, **traverse_obj(video_info, { 'duration': ('mediaMetadata', 'duration', {parse_duration}), 'thumbnails': ('thumbnails', lambda _, v: url_or_none(v['url']), { 'url': 'url', 'ext': ('mimeType', {mimetype2ext}), 'width': ('width', {int}), 'height': ('height', {int}), }), }), 'formats': formats, 'subtitles': self.extract_subtitles(video_id, video_info), } class GoogleDriveFolderIE(InfoExtractor): IE_NAME = 'GoogleDrive:Folder' _VALID_URL = r'https?://(?:docs|drive)\.google\.com/drive/folders/(?P<id>[\w-]{28,})' _TESTS = [{ 'url': 'https://drive.google.com/drive/folders/1dQ4sx0-__Nvg65rxTSgQrl7VyW_FZ9QI', 'info_dict': { 'id': '1dQ4sx0-__Nvg65rxTSgQrl7VyW_FZ9QI', 'title': 'Forrest', }, 'playlist_count': 3, }] _BOUNDARY = '=====vc17a3rwnndj=====' _REQUEST = "/drive/v2beta/files?openDrive=true&reason=102&syncType=0&errorRecovery=false&q=trashed%20%3D%20false%20and%20'{folder_id}'%20in%20parents&fields=kind%2CnextPageToken%2Citems(kind%2CmodifiedDate%2CmodifiedByMeDate%2ClastViewedByMeDate%2CfileSize%2Cowners(kind%2CpermissionId%2Cid)%2ClastModifyingUser(kind%2CpermissionId%2Cid)%2ChasThumbnail%2CthumbnailVersion%2Ctitle%2Cid%2CresourceKey%2Cshared%2CsharedWithMeDate%2CuserPermission(role)%2CexplicitlyTrashed%2CmimeType%2CquotaBytesUsed%2Ccopyable%2CfileExtension%2CsharingUser(kind%2CpermissionId%2Cid)%2Cspaces%2Cversion%2CteamDriveId%2ChasAugmentedPermissions%2CcreatedDate%2CtrashingUser(kind%2CpermissionId%2Cid)%2CtrashedDate%2Cparents(id)%2CshortcutDetails(targetId%2CtargetMimeType%2CtargetLookupStatus)%2Ccapabilities(canCopy%2CcanDownload%2CcanEdit%2CcanAddChildren%2CcanDelete%2CcanRemoveChildren%2CcanShare%2CcanTrash%2CcanRename%2CcanReadTeamDrive%2CcanMoveTeamDriveItem)%2Clabels(starred%2Ctrashed%2Crestricted%2Cviewed))%2CincompleteSearch&appDataFilter=NO_APP_DATA&spaces=drive&pageToken={page_token}&maxResults=50&supportsTeamDrives=true&includeItemsFromAllDrives=true&corpora=default&orderBy=folder%2Ctitle_natural%20asc&retryCount=0&key={key} HTTP/1.1" _DATA = f'''--{_BOUNDARY} content-type: application/http content-transfer-encoding: binary GET %s --{_BOUNDARY} ''' def _call_api(self, folder_id, key, data, **kwargs): response = self._download_webpage( 'https://clients6.google.com/batch/drive/v2beta', folder_id, data=data.encode(), headers={ 'Content-Type': 'text/plain;charset=UTF-8;', 'Origin': 'https://drive.google.com', }, query={ '$ct': f'multipart/mixed; boundary="{self._BOUNDARY}"', 'key': key, }, **kwargs) return self._search_json('', response, 'api response', folder_id, **kwargs) or {} def _get_folder_items(self, folder_id, key): page_token = '' while page_token is not None: request = self._REQUEST.format(folder_id=folder_id, page_token=page_token, key=key) page = self._call_api(folder_id, key, self._DATA % request) yield from page['items'] page_token = page.get('nextPageToken') def _real_extract(self, url): folder_id = self._match_id(url) webpage = self._download_webpage(url, folder_id) key = self._search_regex(r'"(\w{39})"', webpage, 'key') folder_info = self._call_api(folder_id, key, self._DATA % f'/drive/v2beta/files/{folder_id} HTTP/1.1', fatal=False) return self.playlist_from_matches( self._get_folder_items(folder_id, key), folder_id, folder_info.get('title'), ie=GoogleDriveIE, getter=lambda item: f'https://drive.google.com/file/d/{item["id"]}')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vrsquare.py
yt_dlp/extractor/vrsquare.py
import itertools from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, clean_html, extract_attributes, parse_duration, parse_qs, ) from ..utils.traversal import ( find_element, find_elements, traverse_obj, ) class VrSquareIE(InfoExtractor): IE_NAME = 'vrsquare' IE_DESC = 'VR SQUARE' _BASE_URL = 'https://livr.jp' _VALID_URL = r'https?://livr\.jp/contents/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://livr.jp/contents/P470896661', 'info_dict': { 'id': 'P470896661', 'ext': 'mp4', 'title': 'そこ曲がったら、櫻坂? 7年間お疲れ様!菅井友香の卒業を祝う会!前半 2022年11月6日放送分', 'description': 'md5:523726dc835aa8014dfe1e2b38d36cd1', 'duration': 1515.0, 'tags': 'count:2', 'thumbnail': r're:https?://media\.livr\.jp/vod/img/.+\.jpg', }, }, { 'url': 'https://livr.jp/contents/P589523973', 'info_dict': { 'id': 'P589523973', 'ext': 'mp4', 'title': '薄闇に仰ぐ しだれ桜の妖艶', 'description': 'md5:a042f517b2cbb4ed6746707afec4d306', 'duration': 1084.0, 'tags': list, 'thumbnail': r're:https?://media\.livr\.jp/vod/img/.+\.jpg', }, 'skip': 'Paid video', }, { 'url': 'https://livr.jp/contents/P316939908', 'info_dict': { 'id': 'P316939908', 'ext': 'mp4', 'title': '2024年5月16日(木) 「今日は誰に恋をする?」公演 小栗有以 生誕祭', 'description': 'md5:2110bdcf947f28bd7d06ec420e51b619', 'duration': 8559.0, 'tags': list, 'thumbnail': r're:https?://media\.livr\.jp/vod/img/.+\.jpg', }, 'skip': 'Premium channel subscribers only', }, { # Accessible only in the VR SQUARE app 'url': 'https://livr.jp/contents/P126481458', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) status = self._download_json( f'{self._BASE_URL}/webApi/contentsStatus/{video_id}', video_id, 'Checking contents status', fatal=False) if traverse_obj(status, 'result_code') == '40407': self.raise_login_required('Unable to access this video') try: web_api = self._download_json( f'{self._BASE_URL}/webApi/play/url/{video_id}', video_id) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 500: raise ExtractorError('VR SQUARE app-only videos are not supported', expected=True) raise return { 'id': video_id, 'title': self._html_search_meta(['og:title', 'twitter:title'], webpage), 'description': self._html_search_meta('description', webpage), 'formats': self._extract_m3u8_formats(traverse_obj(web_api, ( 'urls', ..., 'url', any)), video_id, 'mp4', fatal=False), 'thumbnail': self._html_search_meta('og:image', webpage), **traverse_obj(webpage, { 'duration': ({find_element(cls='layout-product-data-time')}, {parse_duration}), 'tags': ({find_elements(cls='search-tag')}, ..., {clean_html}), }), } class VrSquarePlaylistBaseIE(InfoExtractor): _BASE_URL = 'https://livr.jp' def _fetch_vids(self, source, keys=()): for url_path in traverse_obj(source, ( *keys, {find_elements(cls='video', html=True)}, ..., {extract_attributes}, 'data-url', {str}, filter), ): yield self.url_result( f'{self._BASE_URL}/contents/{url_path.removeprefix("/contents/")}', VrSquareIE) def _entries(self, path, display_id, query=None): for page in itertools.count(1): ajax = self._download_json( f'{self._BASE_URL}{path}', display_id, f'Downloading playlist JSON page {page}', query={'p': page, **(query or {})}) yield from self._fetch_vids(ajax, ('contents_render_list', ...)) if not traverse_obj(ajax, (('has_next', 'hasNext'), {bool}, any)): break class VrSquareChannelIE(VrSquarePlaylistBaseIE): IE_NAME = 'vrsquare:channel' _VALID_URL = r'https?://livr\.jp/channel/(?P<id>\w+)' _TESTS = [{ 'url': 'https://livr.jp/channel/H372648599', 'info_dict': { 'id': 'H372648599', 'title': 'AKB48+チャンネル', }, 'playlist_mincount': 502, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) return self.playlist_result( self._entries(f'/ajax/channel/{playlist_id}', playlist_id), playlist_id, self._html_search_meta('og:title', webpage)) class VrSquareSearchIE(VrSquarePlaylistBaseIE): IE_NAME = 'vrsquare:search' _VALID_URL = r'https?://livr\.jp/web-search/?\?(?:[^#]+&)?w=[^#]+' _TESTS = [{ 'url': 'https://livr.jp/web-search?w=%23%E5%B0%8F%E6%A0%97%E6%9C%89%E4%BB%A5', 'info_dict': { 'id': '#小栗有以', }, 'playlist_mincount': 60, }] def _real_extract(self, url): search_query = parse_qs(url)['w'][0] return self.playlist_result( self._entries('/ajax/web-search', search_query, {'w': search_query}), search_query) class VrSquareSectionIE(VrSquarePlaylistBaseIE): IE_NAME = 'vrsquare:section' _VALID_URL = r'https?://livr\.jp/(?:category|headline)/(?P<id>\w+)' _TESTS = [{ 'url': 'https://livr.jp/category/C133936275', 'info_dict': { 'id': 'C133936275', 'title': 'そこ曲がったら、櫻坂?VR', }, 'playlist_mincount': 308, }, { 'url': 'https://livr.jp/headline/A296449604', 'info_dict': { 'id': 'A296449604', 'title': 'AKB48 アフターVR', }, 'playlist_mincount': 22, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) return self.playlist_result( self._fetch_vids(webpage), playlist_id, self._html_search_meta('og:title', webpage))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cybrary.py
yt_dlp/extractor/cybrary.py
from .common import InfoExtractor from ..utils import ( ExtractorError, smuggle_url, str_or_none, traverse_obj, urlencode_postdata, ) class CybraryBaseIE(InfoExtractor): _API_KEY = 'AIzaSyCX9ru6j70PX2My1Eq6Q1zoMAhuTdXlzSw' _ENDPOINTS = { 'course': 'https://app.cybrary.it/courses/api/catalog/browse/course/{}', 'course_enrollment': 'https://app.cybrary.it/courses/api/catalog/{}/enrollment', 'enrollment': 'https://app.cybrary.it/courses/api/enrollment/{}', 'launch': 'https://app.cybrary.it/courses/api/catalog/{}/launch', 'vimeo_oembed': 'https://vimeo.com/api/oembed.json?url=https://vimeo.com/{}', } _NETRC_MACHINE = 'cybrary' _TOKEN = None def _perform_login(self, username, password): CybraryBaseIE._TOKEN = self._download_json( f'https://identitytoolkit.googleapis.com/v1/accounts:signInWithPassword?key={self._API_KEY}', None, data=urlencode_postdata({'email': username, 'password': password, 'returnSecureToken': True}), note='Logging in')['idToken'] def _real_initialize(self): if not self._TOKEN: self.raise_login_required(method='password') def _call_api(self, endpoint, item_id): return self._download_json( self._ENDPOINTS[endpoint].format(item_id), item_id, note=f'Downloading {endpoint} JSON metadata', headers={'Authorization': f'Bearer {self._TOKEN}'}) def _get_vimeo_id(self, activity_id): launch_api = self._call_api('launch', activity_id) if launch_api.get('url'): return self._search_regex(r'https?://player\.vimeo\.com/video/(?P<vimeo_id>[0-9]+)', launch_api['url'], 'vimeo_id') return traverse_obj(launch_api, ('vendor_data', 'content', ..., 'videoId'), get_all=False) class CybraryIE(CybraryBaseIE): _VALID_URL = r'https?://app\.cybrary\.it/immersive/(?P<enrollment>[0-9]+)/activity/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://app.cybrary.it/immersive/12487950/activity/63102', 'md5': '9ae12d37e555cb2ed554223a71a701d0', 'info_dict': { 'id': '646609770', 'ext': 'mp4', 'title': 'Getting Started', 'thumbnail': 'https://i.vimeocdn.com/video/1301817996-76a268f0c56cff18a5cecbbdc44131eb9dda0c80eb0b3a036_1280', 'series_id': '63111', 'uploader_url': 'https://vimeo.com/user30867300', 'duration': 88, 'uploader_id': 'user30867300', 'series': 'Cybrary Orientation', 'uploader': 'Cybrary', 'chapter': 'Cybrary Orientation Series', 'chapter_id': '63110', }, 'expected_warnings': ['No authenticators for vimeo'], }, { 'url': 'https://app.cybrary.it/immersive/12747143/activity/52686', 'md5': '62f26547dccc59c44363e2a13d4ad08d', 'info_dict': { 'id': '445638073', 'ext': 'mp4', 'title': 'Azure Virtual Network IP Addressing', 'thumbnail': 'https://i.vimeocdn.com/video/936667051-1647ace66c627d4a2382185e0dae8deb830309bfddd53f8b2367b2f91e92ed0e-d_1280', 'series_id': '52733', 'uploader_url': 'https://vimeo.com/user30867300', 'duration': 426, 'uploader_id': 'user30867300', 'series': 'AZ-500: Microsoft Azure Security Technologies', 'uploader': 'Cybrary', 'chapter': 'Implement Network Security', 'chapter_id': '52693', }, 'expected_warnings': ['No authenticators for vimeo'], }] def _real_extract(self, url): activity_id, enrollment_id = self._match_valid_url(url).group('id', 'enrollment') course = self._call_api('enrollment', enrollment_id)['content'] activity = traverse_obj(course, ('learning_modules', ..., 'activities', lambda _, v: int(activity_id) == v['id']), get_all=False) if activity.get('type') not in ['Video Activity', 'Lesson Activity']: raise ExtractorError('The activity is not a video', expected=True) module = next((m for m in course.get('learning_modules') or [] if int(activity_id) in traverse_obj(m, ('activities', ..., 'id'))), None) vimeo_id = self._get_vimeo_id(activity_id) return { '_type': 'url_transparent', 'series': traverse_obj(course, ('content_description', 'title')), 'series_id': str_or_none(traverse_obj(course, ('content_description', 'id'))), 'id': vimeo_id, 'chapter': module.get('title'), 'chapter_id': str_or_none(module.get('id')), 'title': activity.get('title'), 'url': smuggle_url(f'https://player.vimeo.com/video/{vimeo_id}', {'referer': 'https://api.cybrary.it'}), } class CybraryCourseIE(CybraryBaseIE): _VALID_URL = r'https?://app\.cybrary\.it/browse/course/(?P<id>[\w-]+)/?(?:$|[#?])' _TESTS = [{ 'url': 'https://app.cybrary.it/browse/course/az-500-microsoft-azure-security-technologies', 'info_dict': { 'id': '898', 'title': 'AZ-500: Microsoft Azure Security Technologies', 'description': 'md5:69549d379c0fc1dec92926d4e8b6fbd4', }, 'playlist_count': 59, }, { 'url': 'https://app.cybrary.it/browse/course/cybrary-orientation', 'info_dict': { 'id': '1245', 'title': 'Cybrary Orientation', 'description': 'md5:9e69ff66b32fe78744e0ad4babe2e88e', }, 'playlist_count': 4, }] def _real_extract(self, url): course_id = self._match_id(url) course = self._call_api('course', course_id) enrollment_info = self._call_api('course_enrollment', course['id']) entries = [self.url_result( f'https://app.cybrary.it/immersive/{enrollment_info["id"]}/activity/{activity["id"]}') for activity in traverse_obj(course, ('content_item', 'learning_modules', ..., 'activities', ...))] return self.playlist_result( entries, traverse_obj(course, ('content_item', 'id'), expected_type=str_or_none), course.get('title'), course.get('short_description'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/peertube.py
yt_dlp/extractor/peertube.py
import functools import re from .common import InfoExtractor from ..utils import ( OnDemandPagedList, format_field, int_or_none, parse_resolution, str_or_none, try_get, unified_timestamp, url_or_none, urljoin, ) class PeerTubeIE(InfoExtractor): _INSTANCES_RE = r'''(?: # Taken from https://instances.joinpeertube.org/instances 0ch\.tv| 3dctube\.3dcandy\.social| all\.electric\.kitchen| alterscope\.fr| anarchy\.tube| apathy\.tv| apertatube\.net| archive\.nocopyrightintended\.tv| archive\.reclaim\.tv| area51\.media| astrotube-ufe\.obspm\.fr| astrotube\.obspm\.fr| audio\.freediverse\.com| azxtube\.youssefc\.tn| bark\.video| battlepenguin\.video| bava\.tv| bee-tube\.fr| beetoons\.tv| biblion\.refchat\.net| biblioteca\.theowlclub\.net| bideoak\.argia\.eus| bideoteka\.eus| birdtu\.be| bitcointv\.com| bonn\.video| breeze\.tube| brioco\.live| brocosoup\.fr| canal\.facil\.services| canard\.tube| cdn01\.tilvids\.com| celluloid-media\.huma-num\.fr| chicago1\.peertube\.support| cliptube\.org| cloudtube\.ise\.fraunhofer\.de| comf\.tube| comics\.peertube\.biz| commons\.tube| communitymedia\.video| conspiracydistillery\.com| crank\.recoil\.org| dalek\.zone| dalliance\.network| dangly\.parts| darkvapor\.nohost\.me| daschauher\.aksel\.rocks| digitalcourage\.video| displayeurope\.video| ds106\.tv| dud-video\.inf\.tu-dresden\.de| dud175\.inf\.tu-dresden\.de| dytube\.com| ebildungslabor\.video| evangelisch\.video| fair\.tube| fedi\.video| fedimovie\.com| fediverse\.tv| film\.k-prod\.fr| flipboard\.video| foss\.video| fossfarmers\.company| fotogramas\.politicaconciencia\.org| freediverse\.com| freesoto-u2151\.vm\.elestio\.app| freesoto\.tv| garr\.tv| greatview\.video| grypstube\.uni-greifswald\.de| habratube\.site| ilbjach\.ru| infothema\.net| itvplus\.iiens\.net| johnydeep\.net| juggling\.digital| jupiter\.tube| kadras\.live| kino\.kompot\.si| kino\.schuerz\.at| kinowolnosc\.pl| kirche\.peertube-host\.de| kiwi\.froggirl\.club| kodcast\.com| kolektiva\.media| kpop\.22x22\.ru| kumi\.tube| la2\.peertube\.support| la3\.peertube\.support| la4\.peertube\.support| lastbreach\.tv| lawsplaining\.peertube\.biz| leopard\.tube| live\.codinglab\.ch| live\.libratoi\.org| live\.oldskool\.fi| live\.solari\.com| lucarne\.balsamine\.be| luxtube\.lu| makertube\.net| media\.econoalchemist\.com| media\.exo\.cat| media\.fsfe\.org| media\.gzevd\.de| media\.interior\.edu\.uy| media\.krashboyz\.org| media\.mzhd\.de| media\.smz-ma\.de| media\.theplattform\.net| media\.undeadnetwork\.de| medias\.debrouillonet\.org| medias\.pingbase\.net| mediatube\.fermalo\.fr| melsungen\.peertube-host\.de| merci-la-police\.fr| mindlyvideos\.com| mirror\.peertube\.metalbanana\.net| mirrored\.rocks| mix\.video| mountaintown\.video| movies\.metricsmaster\.eu| mtube\.mooo\.com| mytube\.kn-cloud\.de| mytube\.le5emeaxe\.fr| mytube\.madzel\.de| nadajemy\.com| nanawel-peertube\.dyndns\.org| neat\.tube| nethack\.tv| nicecrew\.tv| nightshift\.minnix\.dev| nolog\.media| nyltube\.nylarea\.com| ocfedtest\.hosted\.spacebear\.ee| openmedia\.edunova\.it| p2ptv\.ru| p\.eertu\.be| p\.lu| pastafriday\.club| patriottube\.sonsofliberty\.red| pcbu\.nl| peer\.azurs\.fr| peer\.d0g4\.me| peer\.lukeog\.com| peer\.madiator\.cloud| peer\.raise-uav\.com| peershare\.togart\.de| peertube-blablalinux\.be| peertube-demo\.learning-hub\.fr| peertube-docker\.cpy\.re| peertube-eu\.howlround\.com| peertube-u5014\.vm\.elestio\.app| peertube-us\.howlround\.com| peertube\.020\.pl| peertube\.0x5e\.eu| peertube\.1984\.cz| peertube\.2i2l\.net| peertube\.adjutor\.xyz| peertube\.adresse\.data\.gouv\.fr| peertube\.alpharius\.io| peertube\.am-networks\.fr| peertube\.anduin\.net| peertube\.anti-logic\.com| peertube\.arch-linux\.cz| peertube\.art3mis\.de| peertube\.artsrn\.ualberta\.ca| peertube\.askan\.info| peertube\.astral0pitek\.synology\.me| peertube\.atsuchan\.page| peertube\.automat\.click| peertube\.b38\.rural-it\.org| peertube\.be| peertube\.beeldengeluid\.nl| peertube\.bgzashtita\.es| peertube\.bike| peertube\.bildung-ekhn\.de| peertube\.biz| peertube\.br0\.fr| peertube\.bridaahost\.ynh\.fr| peertube\.bubbletea\.dev| peertube\.bubuit\.net| peertube\.cabaal\.net| peertube\.chatinbit\.com| peertube\.chaunchy\.com| peertube\.chir\.rs| peertube\.christianpacaud\.com| peertube\.chtisurel\.net| peertube\.chuggybumba\.com| peertube\.cipherbliss\.com| peertube\.cirkau\.art| peertube\.cloud\.nerdraum\.de| peertube\.cloud\.sans\.pub| peertube\.coko\.foundation| peertube\.communecter\.org| peertube\.concordia\.social| peertube\.corrigan\.xyz| peertube\.cpge-brizeux\.fr| peertube\.ctseuro\.com| peertube\.cuatrolibertades\.org| peertube\.cube4fun\.net| peertube\.dair-institute\.org| peertube\.davigge\.com| peertube\.dc\.pini\.fr| peertube\.deadtom\.me| peertube\.debian\.social| peertube\.delta0189\.xyz| peertube\.demonix\.fr| peertube\.designersethiques\.org| peertube\.desmu\.fr| peertube\.devol\.it| peertube\.dk| peertube\.doesstuff\.social| peertube\.eb8\.org| peertube\.education-forum\.com| peertube\.elforcer\.ru| peertube\.em\.id\.lv| peertube\.ethibox\.fr| peertube\.eu\.org| peertube\.european-pirates\.eu| peertube\.eus| peertube\.euskarabildua\.eus| peertube\.expi\.studio| peertube\.familie-berner\.de| peertube\.familleboisteau\.fr| peertube\.fedihost\.website| peertube\.fenarinarsa\.com| peertube\.festnoz\.de| peertube\.forteza\.fr| peertube\.freestorm\.online| peertube\.functional\.cafe| peertube\.gaminglinux\.fr| peertube\.gargantia\.fr| peertube\.geekgalaxy\.fr| peertube\.gemlog\.ca| peertube\.genma\.fr| peertube\.get-racing\.de| peertube\.ghis94\.ovh| peertube\.gidikroon\.eu| peertube\.giftedmc\.com| peertube\.grosist\.fr| peertube\.gruntwerk\.org| peertube\.gsugambit\.com| peertube\.hackerfoo\.com| peertube\.hellsite\.net| peertube\.helvetet\.eu| peertube\.histoirescrepues\.fr| peertube\.home\.x0r\.fr| peertube\.hyperfreedom\.org| peertube\.ichigo\.everydayimshuflin\.com| peertube\.ifwo\.eu| peertube\.in\.ua| peertube\.inapurna\.org| peertube\.informaction\.info| peertube\.interhop\.org| peertube\.it| peertube\.it-arts\.net| peertube\.jensdiemer\.de| peertube\.johntheserg\.al| peertube\.kaleidos\.net| peertube\.kalua\.im| peertube\.kcore\.org| peertube\.keazilla\.net| peertube\.klaewyss\.fr| peertube\.kleph\.eu| peertube\.kodein\.be| peertube\.kooperatywa\.tech| peertube\.kriom\.net| peertube\.kx\.studio| peertube\.kyriog\.eu| peertube\.la-famille-muller\.fr| peertube\.labeuropereunion\.eu| peertube\.lagvoid\.com| peertube\.lhc\.net\.br| peertube\.libresolutions\.network| peertube\.libretic\.fr| peertube\.librosphere\.fr| peertube\.logilab\.fr| peertube\.lon\.tv| peertube\.louisematic\.site| peertube\.luckow\.org| peertube\.luga\.at| peertube\.lyceeconnecte\.fr| peertube\.madixam\.xyz| peertube\.magicstone\.dev| peertube\.marienschule\.de| peertube\.marud\.fr| peertube\.maxweiss\.io| peertube\.miguelcr\.me| peertube\.mikemestnik\.net| peertube\.mobilsicher\.de| peertube\.monlycee\.net| peertube\.mxinfo\.fr| peertube\.naln1\.ca| peertube\.netzbegruenung\.de| peertube\.nicolastissot\.fr| peertube\.nogafam\.fr| peertube\.normalgamingcommunity\.cz| peertube\.nz| peertube\.offerman\.com| peertube\.ohioskates\.com| peertube\.onionstorm\.net| peertube\.opencloud\.lu| peertube\.otakufarms\.com| peertube\.paladyn\.org| peertube\.pix-n-chill\.fr| peertube\.r2\.enst\.fr| peertube\.r5c3\.fr| peertube\.redpill-insight\.com| peertube\.researchinstitute\.at| peertube\.revelin\.fr| peertube\.rlp\.schule| peertube\.rokugan\.fr| peertube\.rougevertbleu\.tv| peertube\.roundpond\.net| peertube\.rural-it\.org| peertube\.satoshishop\.de| peertube\.scyldings\.com| peertube\.securitymadein\.lu| peertube\.semperpax\.com| peertube\.semweb\.pro| peertube\.sensin\.eu| peertube\.sidh\.bzh| peertube\.skorpil\.cz| peertube\.smertrios\.com| peertube\.sqweeb\.net| peertube\.stattzeitung\.org| peertube\.stream| peertube\.su| peertube\.swrs\.net| peertube\.takeko\.cyou| peertube\.taxinachtegel\.de| peertube\.teftera\.com| peertube\.teutronic-services\.de| peertube\.ti-fr\.com| peertube\.tiennot\.net| peertube\.tmp\.rcp\.tf| peertube\.tspu\.edu\.ru| peertube\.tv| peertube\.tweb\.tv| peertube\.underworld\.fr| peertube\.vapronva\.pw| peertube\.veen\.world| peertube\.vesdia\.eu| peertube\.virtual-assembly\.org| peertube\.viviers-fibre\.net| peertube\.vlaki\.cz| peertube\.wiesbaden\.social| peertube\.wivodaim\.net| peertube\.wtf| peertube\.wtfayla\.net| peertube\.xrcb\.cat| peertube\.xwiki\.com| peertube\.zd\.do| peertube\.zetamc\.net| peertube\.zmuuf\.org| peertube\.zoz-serv\.org| peertube\.zwindler\.fr| peervideo\.ru| periscope\.numenaute\.org| pete\.warpnine\.de| petitlutinartube\.fr| phijkchu\.com| phoenixproject\.group| piraten\.space| pirtube\.calut\.fr| pityu\.flaki\.hu| play\.mittdata\.se| player\.ojamajo\.moe| podlibre\.video| portal\.digilab\.nfa\.cz| private\.fedimovie\.com| pt01\.lehrerfortbildung-bw\.de| pt\.diaspodon\.fr| pt\.freedomwolf\.cc| pt\.gordons\.gen\.nz| pt\.ilyamikcoder\.com| pt\.irnok\.net| pt\.mezzo\.moe| pt\.na4\.eu| pt\.netcraft\.ch| pt\.rwx\.ch| pt\.sfunk1x\.com| pt\.thishorsie\.rocks| pt\.vern\.cc| ptb\.lunarviews\.net| ptube\.de| ptube\.ranranhome\.info| puffy\.tube| puppet\.zone| qtube\.qlyoung\.net| quantube\.win| rankett\.net| replay\.jres\.org| review\.peertube\.biz| sdmtube\.fr| secure\.direct-live\.net| secure\.scanovid\.com| seka\.pona\.la| serv3\.wiki-tube\.de| skeptube\.fr| social\.fedimovie\.com| socpeertube\.ru| sovran\.video| special\.videovortex\.tv| spectra\.video| stl1988\.peertube-host\.de| stream\.biovisata\.lt| stream\.conesphere\.cloud| stream\.elven\.pw| stream\.jurnalfm\.md| stream\.k-prod\.fr| stream\.litera\.tools| stream\.nuemedia\.se| stream\.rlp-media\.de| stream\.vrse\.be| studios\.racer159\.com| styxhexenhammer666\.com| syrteplay\.obspm\.fr| t\.0x0\.st| tbh\.co-shaoghal\.net| test-fab\.ynh\.fr| testube\.distrilab\.fr| tgi\.hosted\.spacebear\.ee| theater\.ethernia\.net| thecool\.tube| thevideoverse\.com| tilvids\.com| tinkerbetter\.tube| tinsley\.video| trailers\.ddigest\.com| tube-action-educative\.apps\.education\.fr| tube-arts-lettres-sciences-humaines\.apps\.education\.fr| tube-cycle-2\.apps\.education\.fr| tube-cycle-3\.apps\.education\.fr| tube-education-physique-et-sportive\.apps\.education\.fr| tube-enseignement-professionnel\.apps\.education\.fr| tube-institutionnel\.apps\.education\.fr| tube-langues-vivantes\.apps\.education\.fr| tube-maternelle\.apps\.education\.fr| tube-numerique-educatif\.apps\.education\.fr| tube-sciences-technologies\.apps\.education\.fr| tube-test\.apps\.education\.fr| tube1\.perron-service\.de| tube\.9minuti\.it| tube\.abolivier\.bzh| tube\.alado\.space| tube\.amic37\.fr| tube\.area404\.cloud| tube\.arthack\.nz| tube\.asulia\.fr| tube\.awkward\.company| tube\.azbyka\.ru| tube\.azkware\.net| tube\.bartrip\.me\.uk| tube\.belowtoxic\.media| tube\.bingle\.plus| tube\.bit-friends\.de| tube\.bstly\.de| tube\.chosto\.me| tube\.cms\.garden| tube\.communia\.org| tube\.cyberia\.club| tube\.cybershock\.life| tube\.dembased\.xyz| tube\.dev\.displ\.eu| tube\.digitalesozialearbeit\.de| tube\.distrilab\.fr| tube\.doortofreedom\.org| tube\.dsocialize\.net| tube\.e-jeremy\.com| tube\.ebin\.club| tube\.elemac\.fr| tube\.erzbistum-hamburg\.de| tube\.exozy\.me| tube\.fdn\.fr| tube\.fedi\.quebec| tube\.fediverse\.at| tube\.felinn\.org| tube\.flokinet\.is| tube\.foad\.me\.uk| tube\.freepeople\.fr| tube\.friloux\.me| tube\.froth\.zone| tube\.fulda\.social| tube\.futuretic\.fr| tube\.g1zm0\.de| tube\.g4rf\.net| tube\.gaiac\.io| tube\.geekyboo\.net| tube\.genb\.de| tube\.ghk-academy\.info| tube\.gi-it\.de| tube\.grap\.coop| tube\.graz\.social| tube\.grin\.hu| tube\.hokai\.lol| tube\.int5\.net| tube\.interhacker\.space| tube\.invisible\.ch| tube\.io18\.top| tube\.itsg\.host| tube\.jeena\.net| tube\.kh-berlin\.de| tube\.kockatoo\.org| tube\.kotur\.org| tube\.koweb\.fr| tube\.la-dina\.net| tube\.lab\.nrw| tube\.lacaveatonton\.ovh| tube\.laurent-malys\.fr| tube\.leetdreams\.ch| tube\.linkse\.media| tube\.lokad\.com| tube\.lucie-philou\.com| tube\.media-techport\.de| tube\.morozoff\.pro| tube\.neshweb\.net| tube\.nestor\.coop| tube\.network\.europa\.eu| tube\.nicfab\.eu| tube\.nieuwwestbrabant\.nl| tube\.nogafa\.org| tube\.novg\.net| tube\.nox-rhea\.org| tube\.nuagelibre\.fr| tube\.numerique\.gouv\.fr| tube\.nuxnik\.com| tube\.nx12\.net| tube\.octaplex\.net| tube\.oisux\.org| tube\.okcinfo\.news| tube\.onlinekirche\.net| tube\.opportunis\.me| tube\.oraclefilms\.com| tube\.org\.il| tube\.pacapime\.ovh| tube\.parinux\.org| tube\.pastwind\.top| tube\.picasoft\.net| tube\.pilgerweg-21\.de| tube\.pmj\.rocks| tube\.pol\.social| tube\.ponsonaille\.fr| tube\.portes-imaginaire\.org| tube\.public\.apolut\.net| tube\.pustule\.org| tube\.pyngu\.com| tube\.querdenken-711\.de| tube\.rebellion\.global| tube\.reseau-canope\.fr| tube\.rhythms-of-resistance\.org| tube\.risedsky\.ovh| tube\.rooty\.fr| tube\.rsi\.cnr\.it| tube\.ryne\.moe| tube\.schleuss\.online| tube\.schule\.social| tube\.sekretaerbaer\.net| tube\.shanti\.cafe| tube\.shela\.nu| tube\.skrep\.in| tube\.sleeping\.town| tube\.sp-codes\.de| tube\.spdns\.org| tube\.systerserver\.net| tube\.systest\.eu| tube\.tappret\.fr| tube\.techeasy\.org| tube\.thierrytalbert\.fr| tube\.tinfoil-hat\.net| tube\.toldi\.eu| tube\.tpshd\.de| tube\.trax\.im| tube\.troopers\.agency| tube\.ttk\.is| tube\.tuxfriend\.fr| tube\.tylerdavis\.xyz| tube\.ullihome\.de| tube\.ulne\.be| tube\.undernet\.uy| tube\.vrpnet\.org| tube\.wolfe\.casa| tube\.xd0\.de| tube\.xn--baw-joa\.social| tube\.xy-space\.de| tube\.yapbreak\.fr| tubedu\.org| tubulus\.openlatin\.org| turtleisland\.video| tututu\.tube| tv\.adast\.dk| tv\.adn\.life| tv\.arns\.lt| tv\.atmx\.ca| tv\.based\.quest| tv\.farewellutopia\.com| tv\.filmfreedom\.net| tv\.gravitons\.org| tv\.io\.seg\.br| tv\.lumbung\.space| tv\.pirateradio\.social| tv\.pirati\.cz| tv\.santic-zombie\.ru| tv\.undersco\.re| tv\.zonepl\.net| tvox\.ru| twctube\.twc-zone\.eu| twobeek\.com| urbanists\.video| v\.9tail\.net| v\.basspistol\.org| v\.j4\.lc| v\.kisombrella\.top| v\.koa\.im| v\.kyaru\.xyz| v\.lor\.sh| v\.mkp\.ca| v\.posm\.gay| v\.slaycer\.top| veedeo\.org| vhs\.absturztau\.be| vid\.cthos\.dev| vid\.kinuseka\.us| vid\.mkp\.ca| vid\.nocogabriel\.fr| vid\.norbipeti\.eu| vid\.northbound\.online| vid\.ohboii\.de| vid\.plantplotting\.co\.uk| vid\.pretok\.tv| vid\.prometheus\.systems| vid\.soafen\.love| vid\.twhtv\.club| vid\.wildeboer\.net| video-cave-v2\.de| video-liberty\.com| video\.076\.ne\.jp| video\.1146\.nohost\.me| video\.9wd\.eu| video\.abraum\.de| video\.ados\.accoord\.fr| video\.amiga-ng\.org| video\.anartist\.org| video\.asgardius\.company| video\.audiovisuel-participatif\.org|
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/turner.py
yt_dlp/extractor/turner.py
import re from .adobepass import AdobePassIE from ..utils import ( ExtractorError, determine_ext, fix_xml_ampersands, float_or_none, int_or_none, parse_duration, strip_or_none, update_url_query, url_or_none, xpath_attr, xpath_text, ) class TurnerBaseIE(AdobePassIE): _AKAMAI_SPE_TOKEN_CACHE = {} def _extract_timestamp(self, video_data): return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts')) def _add_akamai_spe_token(self, tokenizer_src, video_url, content_id, ap_data, software_statement, custom_tokenizer_query=None): secure_path = self._search_regex(r'https?://[^/]+(.+/)', video_url, 'secure path') + '*' token = self._AKAMAI_SPE_TOKEN_CACHE.get(secure_path) if not token: query = { 'path': secure_path, } if custom_tokenizer_query: query.update(custom_tokenizer_query) else: query['videoId'] = content_id if ap_data.get('auth_required'): query['accessToken'] = self._extract_mvpd_auth( ap_data['url'], content_id, ap_data['site_name'], ap_data['site_name'], software_statement) auth = self._download_xml( tokenizer_src, content_id, query=query) error_msg = xpath_text(auth, 'error/msg') if error_msg: raise ExtractorError(error_msg, expected=True) token = xpath_text(auth, 'token') if not token: return video_url self._AKAMAI_SPE_TOKEN_CACHE[secure_path] = token return video_url + '?hdnea=' + token def _extract_cvp_info(self, data_src, video_id, software_statement, path_data={}, ap_data={}, fatal=False): video_data = self._download_xml( data_src, video_id, transform_source=lambda s: fix_xml_ampersands(s).strip(), fatal=fatal) if not video_data: return {} video_id = video_data.attrib['id'] title = xpath_text(video_data, 'headline', fatal=True) content_id = xpath_text(video_data, 'contentId') or video_id # rtmp_src = xpath_text(video_data, 'akamai/src') # if rtmp_src: # split_rtmp_src = rtmp_src.split(',') # if len(split_rtmp_src) == 2: # rtmp_src = split_rtmp_src[1] # aifp = xpath_text(video_data, 'akamai/aifp', default='') urls = [] formats = [] thumbnails = [] subtitles = {} rex = re.compile( r'(?P<width>[0-9]+)x(?P<height>[0-9]+)(?:_(?P<bitrate>[0-9]+))?') # Possible formats locations: files/file, files/groupFiles/files # and maybe others for video_file in video_data.findall('.//file'): video_url = url_or_none(video_file.text.strip()) if not video_url: continue ext = determine_ext(video_url) if video_url.startswith('/mp4:protected/'): continue # TODO: Correct extraction for these files # protected_path_data = path_data.get('protected') # if not protected_path_data or not rtmp_src: # continue # protected_path = self._search_regex( # r'/mp4:(.+)\.[a-z0-9]', video_url, 'secure path') # auth = self._download_webpage( # protected_path_data['tokenizer_src'], query={ # 'path': protected_path, # 'videoId': content_id, # 'aifp': aifp, # }) # token = xpath_text(auth, 'token') # if not token: # continue # video_url = rtmp_src + video_url + '?' + token elif video_url.startswith('/secure/'): secure_path_data = path_data.get('secure') if not secure_path_data: continue video_url = self._add_akamai_spe_token( secure_path_data['tokenizer_src'], secure_path_data['media_src'] + video_url, content_id, ap_data, software_statement) elif not re.match('https?://', video_url): base_path_data = path_data.get(ext, path_data.get('default', {})) media_src = base_path_data.get('media_src') if not media_src: continue video_url = media_src + video_url if video_url in urls: continue urls.append(video_url) format_id = video_file.get('bitrate') if ext in ('scc', 'srt', 'vtt'): subtitles.setdefault('en', []).append({ 'ext': ext, 'url': video_url, }) elif ext == 'png': thumbnails.append({ 'id': format_id, 'url': video_url, }) elif ext == 'smil': formats.extend(self._extract_smil_formats( video_url, video_id, fatal=False)) elif re.match(r'https?://[^/]+\.akamaihd\.net/[iz]/', video_url): formats.extend(self._extract_akamai_formats( video_url, video_id, { 'hds': path_data.get('f4m', {}).get('host'), # nba.cdn.turner.com, ht.cdn.turner.com, ht2.cdn.turner.com # ht3.cdn.turner.com, i.cdn.turner.com, s.cdn.turner.com # ssl.cdn.turner.com 'http': 'pmd.cdn.turner.com', })) elif ext == 'm3u8': m3u8_formats = self._extract_m3u8_formats( video_url, video_id, 'mp4', m3u8_id=format_id or 'hls', fatal=False) if '/secure/' in video_url and '?hdnea=' in video_url: for f in m3u8_formats: f['downloader_options'] = {'ffmpeg_args': ['-seekable', '0']} formats.extend(m3u8_formats) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( update_url_query(video_url, {'hdcore': '3.7.0'}), video_id, f4m_id=format_id or 'hds', fatal=False)) else: f = { 'format_id': format_id, 'url': video_url, 'ext': ext, } mobj = rex.search(video_url) if mobj: f.update({ 'width': int(mobj.group('width')), 'height': int(mobj.group('height')), 'tbr': int_or_none(mobj.group('bitrate')), }) elif isinstance(format_id, str): if format_id.isdigit(): f['tbr'] = int(format_id) else: mobj = re.match(r'ios_(audio|[0-9]+)$', format_id) if mobj: if mobj.group(1) == 'audio': f.update({ 'vcodec': 'none', 'ext': 'm4a', }) else: f['tbr'] = int(mobj.group(1)) formats.append(f) for source in video_data.findall('closedCaptions/source'): for track in source.findall('track'): track_url = url_or_none(track.get('url')) if not track_url or track_url.endswith('/big'): continue lang = track.get('lang') or track.get('label') or 'en' subtitles.setdefault(lang, []).append({ 'url': track_url, 'ext': { 'scc': 'scc', 'webvtt': 'vtt', 'smptett': 'tt', }.get(source.get('format')), }) thumbnails.extend({ 'id': image.get('cut') or image.get('name'), 'url': image.text, 'width': int_or_none(image.get('width')), 'height': int_or_none(image.get('height')), } for image in video_data.findall('images/image')) is_live = xpath_text(video_data, 'isLive') == 'true' return { 'id': video_id, 'title': title, 'formats': formats, 'subtitles': subtitles, 'thumbnails': thumbnails, 'thumbnail': xpath_text(video_data, 'poster'), 'description': strip_or_none(xpath_text(video_data, 'description')), 'duration': parse_duration(xpath_text(video_data, 'length') or xpath_text(video_data, 'trt')), 'timestamp': self._extract_timestamp(video_data), 'upload_date': xpath_attr(video_data, 'metas', 'version'), 'series': xpath_text(video_data, 'showTitle'), 'season_number': int_or_none(xpath_text(video_data, 'seasonNumber')), 'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')), 'is_live': is_live, } def _extract_ngtv_info(self, media_id, tokenizer_query, software_statement, ap_data=None): if not isinstance(ap_data, dict): ap_data = {} is_live = ap_data.get('is_live') streams_data = self._download_json( f'https://medium.ngtv.io/media/{media_id}/tv', media_id)['media']['tv'] duration = None chapters = [] formats = [] for supported_type in ('unprotected', 'bulkaes'): stream_data = streams_data.get(supported_type, {}) m3u8_url = stream_data.get('secureUrl') or stream_data.get('url') if not m3u8_url: continue if stream_data.get('playlistProtection') == 'spe': m3u8_url = self._add_akamai_spe_token( 'https://token.ngtv.io/token/token_spe', m3u8_url, media_id, ap_data, software_statement, tokenizer_query) formats.extend(self._extract_m3u8_formats( m3u8_url, media_id, 'mp4', m3u8_id='hls', live=is_live, fatal=False)) duration = float_or_none(stream_data.get('totalRuntime')) if not chapters and not is_live: for chapter in stream_data.get('contentSegments', []): start_time = float_or_none(chapter.get('start')) chapter_duration = float_or_none(chapter.get('duration')) if start_time is None or chapter_duration is None: continue chapters.append({ 'start_time': start_time, 'end_time': start_time + chapter_duration, }) if is_live: for f in formats: # Prevent ffmpeg from adding its own http headers or else we get HTTP Error 403 f['downloader_options'] = {'ffmpeg_args': ['-seekable', '0', '-icy', '0']} return { 'formats': formats, 'chapters': chapters, 'duration': duration, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dw.py
yt_dlp/extractor/dw.py
import urllib.parse from .common import InfoExtractor from ..utils import ( int_or_none, unified_strdate, url_or_none, ) class DWIE(InfoExtractor): _WORKING = False _ENABLED = None # XXX: pass through to GenericIE IE_NAME = 'dw' _VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+(?:av|e)-(?P<id>\d+)' _TESTS = [{ # video 'url': 'http://www.dw.com/en/intelligent-light/av-19112290', 'md5': 'fb9dfd9520811d3ece80f04befd73428', 'info_dict': { 'id': '19112290', 'ext': 'mp4', 'title': 'Intelligent light', 'description': 'md5:90e00d5881719f2a6a5827cb74985af1', 'upload_date': '20160605', }, }, { # audio 'url': 'http://www.dw.com/en/worldlink-my-business/av-19111941', 'md5': '2814c9a1321c3a51f8a7aeb067a360dd', 'info_dict': { 'id': '19111941', 'ext': 'mp3', 'title': 'WorldLink: My business', 'description': 'md5:bc9ca6e4e063361e21c920c53af12405', 'upload_date': '20160311', }, }, { # DW documentaries, only last for one or two weeks 'url': 'http://www.dw.com/en/documentaries-welcome-to-the-90s-2016-05-21/e-19220158-9798', 'md5': '56b6214ef463bfb9a3b71aeb886f3cf1', 'info_dict': { 'id': '19274438', 'ext': 'mp4', 'title': 'Welcome to the 90s – Hip Hop', 'description': 'Welcome to the 90s - The Golden Decade of Hip Hop', 'upload_date': '20160521', }, 'skip': 'Video removed', }] def _real_extract(self, url): media_id = self._match_id(url) webpage = self._download_webpage(url, media_id) hidden_inputs = self._hidden_inputs(webpage) title = hidden_inputs['media_title'] media_id = hidden_inputs.get('media_id') or media_id direct_url = url_or_none(hidden_inputs.get('file_name')) if direct_url: formats = [{'url': hidden_inputs['file_name']}] else: formats = self._extract_smil_formats( f'http://www.dw.com/smil/v-{media_id}', media_id, transform_source=lambda s: s.replace( 'rtmp://tv-od.dw.de/flash/', 'http://tv-download.dw.de/dwtv_video/flv/')) upload_date = hidden_inputs.get('display_date') if not upload_date: upload_date = self._html_search_regex( r'<span[^>]+class="date">([0-9.]+)\s*\|', webpage, 'upload date', default=None) upload_date = unified_strdate(upload_date) return { 'id': media_id, 'title': title, 'description': self._og_search_description(webpage), 'thumbnail': hidden_inputs.get('preview_image'), 'duration': int_or_none(hidden_inputs.get('file_duration')), 'upload_date': upload_date, 'formats': formats, } class DWArticleIE(InfoExtractor): _WORKING = False _ENABLED = None # XXX: pass through to GenericIE IE_NAME = 'dw:article' _VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+a-(?P<id>\d+)' _TEST = { 'url': 'http://www.dw.com/en/no-hope-limited-options-for-refugees-in-idomeni/a-19111009', 'md5': '8ca657f9d068bbef74d6fc38b97fc869', 'info_dict': { 'id': '19105868', 'ext': 'mp4', 'title': 'The harsh life of refugees in Idomeni', 'description': 'md5:196015cc7e48ebf474db9399420043c7', 'upload_date': '20160310', }, } def _real_extract(self, url): article_id = self._match_id(url) webpage = self._download_webpage(url, article_id) hidden_inputs = self._hidden_inputs(webpage) media_id = hidden_inputs['media_id'] media_path = self._search_regex(rf'href="([^"]+av-{media_id})"\s+class="overlayLink"', webpage, 'media url') media_url = urllib.parse.urljoin(url, media_path) return self.url_result(media_url, 'DW', media_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ustudio.py
yt_dlp/extractor/ustudio.py
from .common import InfoExtractor from ..utils import ( int_or_none, unescapeHTML, unified_strdate, ) class UstudioIE(InfoExtractor): IE_NAME = 'ustudio' _VALID_URL = r'https?://(?:(?:www|v1)\.)?ustudio\.com/video/(?P<id>[^/]+)/(?P<display_id>[^/?#&]+)' _TEST = { 'url': 'http://ustudio.com/video/Uxu2my9bgSph/san_francisco_golden_gate_bridge', 'md5': '58bbfca62125378742df01fc2abbdef6', 'info_dict': { 'id': 'Uxu2my9bgSph', 'display_id': 'san_francisco_golden_gate_bridge', 'ext': 'mp4', 'title': 'San Francisco: Golden Gate Bridge', 'description': 'md5:23925500697f2c6d4830e387ba51a9be', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20111107', 'uploader': 'Tony Farley', }, } def _real_extract(self, url): video_id, display_id = self._match_valid_url(url).groups() config = self._download_xml( f'http://v1.ustudio.com/embed/{video_id}/ustudio/config.xml', display_id) def extract(kind): return [{ 'url': unescapeHTML(item.attrib['url']), 'width': int_or_none(item.get('width')), 'height': int_or_none(item.get('height')), } for item in config.findall(f'./qualities/quality/{kind}') if item.get('url')] formats = extract('video') webpage = self._download_webpage(url, display_id) title = self._og_search_title(webpage) upload_date = unified_strdate(self._search_regex( r'(?s)Uploaded by\s*.+?\s*on\s*<span>([^<]+)</span>', webpage, 'upload date', fatal=False)) uploader = self._search_regex( r'Uploaded by\s*<a[^>]*>([^<]+)<', webpage, 'uploader', fatal=False) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': self._og_search_description(webpage), 'thumbnails': extract('image'), 'upload_date': upload_date, 'uploader': uploader, 'formats': formats, } class UstudioEmbedIE(InfoExtractor): IE_NAME = 'ustudio:embed' _VALID_URL = r'https?://(?:(?:app|embed)\.)?ustudio\.com/embed/(?P<uid>[^/]+)/(?P<id>[^/]+)' _TEST = { 'url': 'http://app.ustudio.com/embed/DeN7VdYRDKhP/Uw7G1kMCe65T', 'md5': '47c0be52a09b23a7f40de9469cec58f4', 'info_dict': { 'id': 'Uw7G1kMCe65T', 'ext': 'mp4', 'title': '5 Things IT Should Know About Video', 'description': 'md5:93d32650884b500115e158c5677d25ad', 'uploader_id': 'DeN7VdYRDKhP', }, } def _real_extract(self, url): uploader_id, video_id = self._match_valid_url(url).groups() video_data = self._download_json( f'http://app.ustudio.com/embed/{uploader_id}/{video_id}/config.json', video_id)['videos'][0] title = video_data['name'] formats = [] for ext, qualities in video_data.get('transcodes', {}).items(): for quality in qualities: quality_url = quality.get('url') if not quality_url: continue height = int_or_none(quality.get('height')) formats.append({ 'format_id': f'{ext}-{height}p' if height else ext, 'url': quality_url, 'width': int_or_none(quality.get('width')), 'height': height, }) thumbnails = [] for image in video_data.get('images', []): image_url = image.get('url') if not image_url: continue thumbnails.append({ 'url': image_url, }) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'duration': int_or_none(video_data.get('duration')), 'uploader_id': uploader_id, 'tags': video_data.get('keywords'), 'thumbnails': thumbnails, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/abematv.py
yt_dlp/extractor/abematv.py
import base64 import binascii import functools import hashlib import hmac import io import json import re import time import urllib.parse import uuid from .common import InfoExtractor from ..aes import aes_ecb_decrypt from ..networking import RequestHandler, Response from ..networking.exceptions import TransportError from ..utils import ( ExtractorError, OnDemandPagedList, decode_base_n, int_or_none, time_seconds, traverse_obj, update_url, update_url_query, ) class AbemaLicenseRH(RequestHandler): _SUPPORTED_URL_SCHEMES = ('abematv-license',) _SUPPORTED_PROXY_SCHEMES = None _SUPPORTED_FEATURES = None RH_NAME = 'abematv_license' _STRTABLE = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' _HKEY = b'3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E' def __init__(self, *, ie: 'AbemaTVIE', **kwargs): super().__init__(**kwargs) self.ie = ie def _send(self, request): url = request.url ticket = urllib.parse.urlparse(url).netloc try: response_data = self._get_videokey_from_ticket(ticket) except ExtractorError as e: raise TransportError(cause=e.cause) from e except (IndexError, KeyError, TypeError) as e: raise TransportError(cause=repr(e)) from e return Response( io.BytesIO(response_data), url, headers={'Content-Length': str(len(response_data))}) def _get_videokey_from_ticket(self, ticket): to_show = self.ie.get_param('verbose', False) media_token = self.ie._get_media_token(to_show=to_show) license_response = self.ie._download_json( 'https://license.abema.io/abematv-hls', None, note='Requesting playback license' if to_show else False, query={'t': media_token}, data=json.dumps({ 'kv': 'a', 'lt': ticket, }).encode(), headers={ 'Content-Type': 'application/json', }) res = decode_base_n(license_response['k'], table=self._STRTABLE) encvideokey = list(res.to_bytes(16, 'big')) h = hmac.new( binascii.unhexlify(self._HKEY), (license_response['cid'] + self.ie._DEVICE_ID).encode(), digestmod=hashlib.sha256) enckey = list(h.digest()) return bytes(aes_ecb_decrypt(encvideokey, enckey)) class AbemaTVBaseIE(InfoExtractor): _NETRC_MACHINE = 'abematv' _USERTOKEN = None _DEVICE_ID = None _MEDIATOKEN = None _SECRETKEY = b'v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9BRbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$k9cD=3TxwWe86!x#Zyhe' @classmethod def _generate_aks(cls, deviceid): deviceid = deviceid.encode() # add 1 hour and then drop minute and secs ts_1hour = int((time_seconds() // 3600 + 1) * 3600) time_struct = time.gmtime(ts_1hour) ts_1hour_str = str(ts_1hour).encode() tmp = None def mix_once(nonce): nonlocal tmp h = hmac.new(cls._SECRETKEY, digestmod=hashlib.sha256) h.update(nonce) tmp = h.digest() def mix_tmp(count): nonlocal tmp for _ in range(count): mix_once(tmp) def mix_twist(nonce): nonlocal tmp mix_once(base64.urlsafe_b64encode(tmp).rstrip(b'=') + nonce) mix_once(cls._SECRETKEY) mix_tmp(time_struct.tm_mon) mix_twist(deviceid) mix_tmp(time_struct.tm_mday % 5) mix_twist(ts_1hour_str) mix_tmp(time_struct.tm_hour % 5) return base64.urlsafe_b64encode(tmp).rstrip(b'=').decode('utf-8') def _get_device_token(self): if self._USERTOKEN: return self._USERTOKEN self._downloader._request_director.add_handler(AbemaLicenseRH(ie=self, logger=None)) username, _ = self._get_login_info() auth_cache = username and self.cache.load(self._NETRC_MACHINE, username, min_ver='2024.01.19') AbemaTVBaseIE._USERTOKEN = auth_cache and auth_cache.get('usertoken') if AbemaTVBaseIE._USERTOKEN: # try authentication with locally stored token try: AbemaTVBaseIE._DEVICE_ID = auth_cache.get('device_id') self._get_media_token(True) return except ExtractorError as e: self.report_warning(f'Failed to login with cached user token; obtaining a fresh one ({e})') AbemaTVBaseIE._DEVICE_ID = str(uuid.uuid4()) aks = self._generate_aks(self._DEVICE_ID) user_data = self._download_json( 'https://api.abema.io/v1/users', None, note='Authorizing', data=json.dumps({ 'deviceId': self._DEVICE_ID, 'applicationKeySecret': aks, }).encode(), headers={ 'Content-Type': 'application/json', }) AbemaTVBaseIE._USERTOKEN = user_data['token'] return self._USERTOKEN def _get_media_token(self, invalidate=False, to_show=True): if not invalidate and self._MEDIATOKEN: return self._MEDIATOKEN AbemaTVBaseIE._MEDIATOKEN = self._download_json( 'https://api.abema.io/v1/media/token', None, note='Fetching media token' if to_show else False, query={ 'osName': 'android', 'osVersion': '6.0.1', 'osLang': 'ja_JP', 'osTimezone': 'Asia/Tokyo', 'appId': 'tv.abema', 'appVersion': '3.27.1', }, headers={ 'Authorization': f'bearer {self._get_device_token()}', })['token'] return self._MEDIATOKEN def _perform_login(self, username, password): self._get_device_token() if self.cache.load(self._NETRC_MACHINE, username, min_ver='2024.01.19') and self._get_media_token(): self.write_debug('Skipping logging in') return if '@' in username: # don't strictly check if it's email address or not ep, method = 'user/email', 'email' else: ep, method = 'oneTimePassword', 'userId' login_response = self._download_json( f'https://api.abema.io/v1/auth/{ep}', None, note='Logging in', data=json.dumps({ method: username, 'password': password, }).encode(), headers={ 'Authorization': f'bearer {self._get_device_token()}', 'Origin': 'https://abema.tv', 'Referer': 'https://abema.tv/', 'Content-Type': 'application/json', }) AbemaTVBaseIE._USERTOKEN = login_response['token'] self._get_media_token(True) auth_cache = { 'device_id': AbemaTVBaseIE._DEVICE_ID, 'usertoken': AbemaTVBaseIE._USERTOKEN, } self.cache.store(self._NETRC_MACHINE, username, auth_cache) def _call_api(self, endpoint, video_id, query=None, note='Downloading JSON metadata'): return self._download_json( f'https://api.abema.io/{endpoint}', video_id, query=query or {}, note=note, headers={ 'Authorization': f'bearer {self._get_device_token()}', }) def _extract_breadcrumb_list(self, webpage, video_id): for jld in re.finditer( r'(?is)</span></li></ul><script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>', webpage): jsonld = self._parse_json(jld.group('json_ld'), video_id, fatal=False) if traverse_obj(jsonld, '@type') != 'BreadcrumbList': continue items = traverse_obj(jsonld, ('itemListElement', ..., 'name')) if items: return items return [] class AbemaTVIE(AbemaTVBaseIE): _VALID_URL = r'https?://abema\.tv/(?P<type>now-on-air|video/episode|channels/.+?/slots)/(?P<id>[^?/]+)' _TESTS = [{ 'url': 'https://abema.tv/video/episode/194-25_s2_p1', 'info_dict': { 'id': '194-25_s2_p1', 'title': '第1話 「チーズケーキ」 「モーニング再び」', 'series': '異世界食堂2', 'season': 'シーズン2', 'season_number': 2, 'episode': '第1話 「チーズケーキ」 「モーニング再び」', 'episode_number': 1, }, 'skip': 'expired', }, { 'url': 'https://abema.tv/channels/anime-live2/slots/E8tvAnMJ7a9a5d', 'info_dict': { 'id': 'E8tvAnMJ7a9a5d', 'title': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】', 'series': 'ゆるキャン△ SEASON2', 'episode': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】', 'season_number': 2, 'episode_number': 1, 'description': 'md5:9c5a3172ae763278f9303922f0ea5b17', }, 'skip': 'expired', }, { 'url': 'https://abema.tv/video/episode/87-877_s1282_p31047', 'info_dict': { 'id': 'E8tvAnMJ7a9a5d', 'title': '第5話『光射す』', 'description': 'md5:56d4fc1b4f7769ded5f923c55bb4695d', 'thumbnail': r're:https://hayabusa\.io/.+', 'series': '相棒', 'episode': '第5話『光射す』', }, 'skip': 'expired', }, { 'url': 'https://abema.tv/now-on-air/abema-anime', 'info_dict': { 'id': 'abema-anime', # this varies # 'title': '女子高生の無駄づかい 全話一挙【無料ビデオ72時間】', 'description': 'md5:55f2e61f46a17e9230802d7bcc913d5f', 'is_live': True, }, 'skip': 'Not supported until yt-dlp implements native live downloader OR AbemaTV can start a local HTTP server', }] _TIMETABLE = None def _real_extract(self, url): # starting download using infojson from this extractor is undefined behavior, # and never be fixed in the future; you must trigger downloads by directly specifying URL. # (unless there's a way to hook before downloading by extractor) video_id, video_type = self._match_valid_url(url).group('id', 'type') headers = { 'Authorization': 'Bearer ' + self._get_device_token(), } video_type = video_type.split('/')[-1] webpage = self._download_webpage(url, video_id) canonical_url = self._search_regex( r'<link\s+rel="canonical"\s*href="(.+?)"', webpage, 'canonical URL', default=url) info = self._search_json_ld(webpage, video_id, default={}) title = self._search_regex( r'<span\s*class=".+?EpisodeTitleBlock__title">(.+?)</span>', webpage, 'title', default=None) if not title: jsonld = None for jld in re.finditer( r'(?is)<span\s*class="com-m-Thumbnail__image">(?:</span>)?<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>', webpage): jsonld = self._parse_json(jld.group('json_ld'), video_id, fatal=False) if jsonld: break if jsonld: title = jsonld.get('caption') if not title and video_type == 'now-on-air': if not self._TIMETABLE: # cache the timetable because it goes to 5MiB in size (!!) self._TIMETABLE = self._download_json( 'https://api.abema.io/v1/timetable/dataSet?debug=false', video_id, headers=headers) now = time_seconds(hours=9) for slot in self._TIMETABLE.get('slots', []): if slot.get('channelId') != video_id: continue if slot['startAt'] <= now and now < slot['endAt']: title = slot['title'] break # read breadcrumb on top of page breadcrumb = self._extract_breadcrumb_list(webpage, video_id) if breadcrumb: # breadcrumb list translates to: (e.g. 1st test for this IE) # Home > Anime (genre) > Isekai Shokudo 2 (series name) > Episode 1 "Cheese cakes" "Morning again" (episode title) # hence this works info['series'] = breadcrumb[-2] info['episode'] = breadcrumb[-1] if not title: title = info['episode'] description = self._html_search_regex( (r'<p\s+class="com-video-EpisodeDetailsBlock__content"><span\s+class=".+?">(.+?)</span></p><div', r'<span\s+class=".+?SlotSummary.+?">(.+?)</span></div><div'), webpage, 'description', default=None, group=1) if not description: og_desc = self._html_search_meta( ('description', 'og:description', 'twitter:description'), webpage) if og_desc: description = re.sub(r'''(?sx) ^(.+?)(?: アニメの動画を無料で見るならABEMA!| # anime 等、.+ # applies for most of categories )? ''', r'\1', og_desc) # canonical URL may contain season and episode number mobj = re.search(r's(\d+)_p(\d+)$', canonical_url) if mobj: seri = int_or_none(mobj.group(1), default=float('inf')) epis = int_or_none(mobj.group(2), default=float('inf')) info['season_number'] = seri if seri < 100 else None # some anime like Detective Conan (though not available in AbemaTV) # has more than 1000 episodes (1026 as of 2021/11/15) info['episode_number'] = epis if epis < 2000 else None is_live, m3u8_url = False, None availability = 'public' if video_type == 'now-on-air': is_live = True channel_url = 'https://api.abema.io/v1/channels' if video_id == 'news-global': channel_url = update_url_query(channel_url, {'division': '1'}) onair_channels = self._download_json(channel_url, video_id) for ch in onair_channels['channels']: if video_id == ch['id']: m3u8_url = ch['playback']['hls'] break else: raise ExtractorError(f'Cannot find on-air {video_id} channel.', expected=True) elif video_type == 'episode': api_response = self._download_json( f'https://api.abema.io/v1/video/programs/{video_id}', video_id, note='Checking playability', headers=headers) if not traverse_obj(api_response, ('label', 'free', {bool})): # cannot acquire decryption key for these streams self.report_warning('This is a premium-only stream') availability = 'premium_only' info.update(traverse_obj(api_response, { 'series': ('series', 'title'), 'season': ('season', 'name'), 'season_number': ('season', 'sequence'), 'episode_number': ('episode', 'number'), })) if not title: title = traverse_obj(api_response, ('episode', 'title')) if not description: description = traverse_obj(api_response, ('episode', 'content')) m3u8_url = f'https://vod-abematv.akamaized.net/program/{video_id}/playlist.m3u8' elif video_type == 'slots': api_response = self._download_json( f'https://api.abema.io/v1/media/slots/{video_id}', video_id, note='Checking playability', headers=headers) if not traverse_obj(api_response, ('slot', 'flags', 'timeshiftFree'), default=False): self.report_warning('This is a premium-only stream') availability = 'premium_only' m3u8_url = f'https://vod-abematv.akamaized.net/slot/{video_id}/playlist.m3u8' else: raise ExtractorError('Unreachable') if is_live: self.report_warning("This is a livestream; yt-dlp doesn't support downloading natively, but FFmpeg cannot handle m3u8 manifests from AbemaTV") self.report_warning('Please consider using Streamlink to download these streams (https://github.com/streamlink/streamlink)') formats = self._extract_m3u8_formats( m3u8_url, video_id, ext='mp4', live=is_live) info.update({ 'id': video_id, 'title': title, 'description': description, 'formats': formats, 'is_live': is_live, 'availability': availability, }) if thumbnail := update_url(self._og_search_thumbnail(webpage, default=''), query=None): info['thumbnails'] = [{'url': thumbnail}] return info class AbemaTVTitleIE(AbemaTVBaseIE): _VALID_URL = r'https?://abema\.tv/video/title/(?P<id>[^?/#]+)/?(?:\?(?:[^#]+&)?s=(?P<season>[^&#]+))?' _PAGE_SIZE = 25 _TESTS = [{ 'url': 'https://abema.tv/video/title/90-1887', 'info_dict': { 'id': '90-1887', 'title': 'シャッフルアイランド', 'description': 'md5:61b2425308f41a5282a926edda66f178', }, 'playlist_mincount': 2, }, { 'url': 'https://abema.tv/video/title/193-132', 'info_dict': { 'id': '193-132', 'title': '真心が届く~僕とスターのオフィス・ラブ!?~', 'description': 'md5:9b59493d1f3a792bafbc7319258e7af8', }, 'playlist_mincount': 16, }, { 'url': 'https://abema.tv/video/title/25-1nzan-whrxe', 'info_dict': { 'id': '25-1nzan-whrxe', 'title': 'ソードアート・オンライン', 'description': 'md5:c094904052322e6978495532bdbf06e6', }, 'playlist_mincount': 25, }, { 'url': 'https://abema.tv/video/title/26-2mzbynr-cph?s=26-2mzbynr-cph_s40', 'info_dict': { 'title': '〈物語〉シリーズ', 'id': '26-2mzbynr-cph', 'description': 'md5:e67873de1c88f360af1f0a4b84847a52', }, 'playlist_count': 59, }] def _fetch_page(self, playlist_id, series_version, season_id, page): query = { 'seriesVersion': series_version, 'offset': str(page * self._PAGE_SIZE), 'order': 'seq', 'limit': str(self._PAGE_SIZE), } if season_id: query['seasonId'] = season_id programs = self._call_api( f'v1/video/series/{playlist_id}/programs', playlist_id, note=f'Downloading page {page + 1}', query=query) yield from ( self.url_result(f'https://abema.tv/video/episode/{x}') for x in traverse_obj(programs, ('programs', ..., 'id'))) def _entries(self, playlist_id, series_version, season_id): return OnDemandPagedList( functools.partial(self._fetch_page, playlist_id, series_version, season_id), self._PAGE_SIZE) def _real_extract(self, url): playlist_id, season_id = self._match_valid_url(url).group('id', 'season') series_info = self._call_api(f'v1/video/series/{playlist_id}', playlist_id) return self.playlist_result( self._entries(playlist_id, series_info['version'], season_id), playlist_id=playlist_id, playlist_title=series_info.get('title'), playlist_description=series_info.get('content'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cozytv.py
yt_dlp/extractor/cozytv.py
from .common import InfoExtractor from ..utils import unified_strdate class CozyTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?cozy\.tv/(?P<uploader>[^/]+)/replays/(?P<id>[^/$#&?]+)' _TESTS = [{ 'url': 'https://cozy.tv/beardson/replays/2021-11-19_1', 'info_dict': { 'id': 'beardson-2021-11-19_1', 'ext': 'mp4', 'title': 'pokemon pt2', 'uploader': 'beardson', 'upload_date': '20211119', 'was_live': True, 'duration': 7981, }, 'params': {'skip_download': True}, }] def _real_extract(self, url): uploader, date = self._match_valid_url(url).groups() video_id = f'{uploader}-{date}' data_json = self._download_json(f'https://api.cozy.tv/cache/{uploader}/replay/{date}', video_id) formats, subtitles = self._extract_m3u8_formats_and_subtitles( f'https://cozycdn.foxtrotstream.xyz/replays/{uploader}/{date}/index.m3u8', video_id, ext='mp4') return { 'id': video_id, 'title': data_json.get('title'), 'uploader': data_json.get('user') or uploader, 'upload_date': unified_strdate(data_json.get('date')), 'was_live': True, 'duration': data_json.get('duration'), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/wppilot.py
yt_dlp/extractor/wppilot.py
import json import random import re from .common import InfoExtractor from ..utils import ( ExtractorError, try_get, ) class WPPilotBaseIE(InfoExtractor): _VIDEO_URL = 'https://pilot.wp.pl/api/v1/channel/%s' _VIDEO_GUEST_URL = 'https://pilot.wp.pl/api/v1/guest/channel/%s' _HEADERS_WEB = { 'Content-Type': 'application/json; charset=UTF-8', 'Referer': 'https://pilot.wp.pl/tv/', } def _get_channel_list(self, cache=True): if cache is True: cache_res = self.cache.load('wppilot', 'channel-list') if cache_res: return cache_res, True webpage = self._download_webpage('https://pilot.wp.pl/tv/', None, 'Downloading webpage') page_data_base_url = self._search_regex( r'<script src="(https://wp-pilot-gatsby\.wpcdn\.pl/v[\d.-]+/desktop)', webpage, 'gatsby build version') + '/page-data' page_data = self._download_json(f'{page_data_base_url}/tv/page-data.json', None, 'Downloading page data') for qhash in page_data['staticQueryHashes']: qhash_content = self._download_json( f'{page_data_base_url}/sq/d/{qhash}.json', None, 'Searching for channel list') channel_list = try_get(qhash_content, lambda x: x['data']['allChannels']['nodes']) if channel_list is None: continue self.cache.store('wppilot', 'channel-list', channel_list) return channel_list, False raise ExtractorError('Unable to find the channel list') def _parse_channel(self, chan): return { 'id': str(chan['id']), 'title': chan['name'], 'is_live': True, 'thumbnails': [{ 'id': key, 'url': chan[key], } for key in ('thumbnail', 'thumbnail_mobile', 'icon') if chan.get(key)], } class WPPilotIE(WPPilotBaseIE): _VALID_URL = r'(?:https?://pilot\.wp\.pl/tv/?#|wppilot:)(?P<id>[a-z\d-]+)' IE_NAME = 'wppilot' _TESTS = [{ 'url': 'https://pilot.wp.pl/tv/#telewizja-wp-hd', 'info_dict': { 'id': '158', 'ext': 'mp4', 'title': 'Telewizja WP HD', }, 'params': { 'format': 'bestvideo', }, }, { # audio only 'url': 'https://pilot.wp.pl/tv/#radio-nowy-swiat', 'info_dict': { 'id': '238', 'ext': 'm4a', 'title': 'Radio Nowy Świat', }, 'params': { 'format': 'bestaudio', }, }, { 'url': 'wppilot:9', 'only_matching': True, }] def _get_channel(self, id_or_slug): video_list, is_cached = self._get_channel_list(cache=True) key = 'id' if re.match(r'^\d+$', id_or_slug) else 'slug' for video in video_list: if video.get(key) == id_or_slug: return self._parse_channel(video) # if cached channel not found, download and retry if is_cached: video_list, _ = self._get_channel_list(cache=False) for video in video_list: if video.get(key) == id_or_slug: return self._parse_channel(video) raise ExtractorError('Channel not found') def _real_extract(self, url): video_id = self._match_id(url) channel = self._get_channel(video_id) video_id = str(channel['id']) is_authorized = next((c for c in self.cookiejar if c.name == 'netviapisessid'), None) # cookies starting with "g:" are assigned to guests is_authorized = is_authorized is not None and not is_authorized.value.startswith('g:') video = self._download_json( (self._VIDEO_URL if is_authorized else self._VIDEO_GUEST_URL) % video_id, video_id, query={ 'device_type': 'web', }, headers=self._HEADERS_WEB, expected_status=(200, 422)) stream_token = try_get(video, lambda x: x['_meta']['error']['info']['stream_token']) if stream_token: close = self._download_json( 'https://pilot.wp.pl/api/v1/channels/close', video_id, 'Invalidating previous stream session', headers=self._HEADERS_WEB, data=json.dumps({ 'channelId': video_id, 't': stream_token, }).encode()) if try_get(close, lambda x: x['data']['status']) == 'ok': return self.url_result(url, ie=WPPilotIE.ie_key()) formats = [] for fmt in video['data']['stream_channel']['streams']: # live DASH does not work for now # if fmt['type'] == 'dash@live:abr': # formats.extend( # self._extract_mpd_formats( # random.choice(fmt['url']), video_id)) if fmt['type'] == 'hls@live:abr': formats.extend( self._extract_m3u8_formats( random.choice(fmt['url']), video_id, live=True)) channel['formats'] = formats return channel class WPPilotChannelsIE(WPPilotBaseIE): _VALID_URL = r'(?:https?://pilot\.wp\.pl/(?:tv/?)?(?:\?[^#]*)?#?|wppilot:)$' IE_NAME = 'wppilot:channels' _TESTS = [{ 'url': 'wppilot:', 'info_dict': { 'id': 'wppilot', 'title': 'WP Pilot', }, 'playlist_mincount': 100, }, { 'url': 'https://pilot.wp.pl/', 'only_matching': True, }] def _entries(self): channel_list, _ = self._get_channel_list() for chan in channel_list: entry = self._parse_channel(chan) entry.update({ '_type': 'url_transparent', 'url': f'wppilot:{chan["id"]}', 'ie_key': WPPilotIE.ie_key(), }) yield entry def _real_extract(self, url): return self.playlist_result(self._entries(), 'wppilot', 'WP Pilot')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/leeco.py
yt_dlp/extractor/leeco.py
import base64 import datetime as dt import hashlib import re import time import urllib.parse from .common import InfoExtractor from ..compat import compat_ord from ..utils import ( ExtractorError, determine_ext, encode_data_uri, int_or_none, orderedSet, parse_iso8601, str_or_none, url_basename, urshift, ) class LeIE(InfoExtractor): IE_DESC = '乐视网' _VALID_URL = r'https?://(?:www\.le\.com/ptv/vplay|(?:sports\.le|(?:www\.)?lesports)\.com/(?:match|video))/(?P<id>\d+)\.html' _GEO_COUNTRIES = ['CN'] _URL_TEMPLATE = 'http://www.le.com/ptv/vplay/%s.html' _TESTS = [{ 'url': 'http://www.le.com/ptv/vplay/22005890.html', 'md5': 'edadcfe5406976f42f9f266057ee5e40', 'info_dict': { 'id': '22005890', 'ext': 'mp4', 'title': '第87届奥斯卡颁奖礼完美落幕 《鸟人》成最大赢家', 'description': 'md5:a9cb175fd753e2962176b7beca21a47c', }, 'params': { 'hls_prefer_native': True, }, }, { 'url': 'http://www.le.com/ptv/vplay/1415246.html', 'info_dict': { 'id': '1415246', 'ext': 'mp4', 'title': '美人天下01', 'description': 'md5:28942e650e82ed4fcc8e4de919ee854d', }, 'params': { 'hls_prefer_native': True, }, }, { 'note': 'This video is available only in Mainland China, thus a proxy is needed', 'url': 'http://www.le.com/ptv/vplay/1118082.html', 'md5': '2424c74948a62e5f31988438979c5ad1', 'info_dict': { 'id': '1118082', 'ext': 'mp4', 'title': '与龙共舞 完整版', 'description': 'md5:7506a5eeb1722bb9d4068f85024e3986', }, 'params': { 'hls_prefer_native': True, }, }, { 'url': 'http://sports.le.com/video/25737697.html', 'only_matching': True, }, { 'url': 'http://www.lesports.com/match/1023203003.html', 'only_matching': True, }, { 'url': 'http://sports.le.com/match/1023203003.html', 'only_matching': True, }] # ror() and calc_time_key() are reversed from a embedded swf file in LetvPlayer.swf def ror(self, param1, param2): _loc3_ = 0 while _loc3_ < param2: param1 = urshift(param1, 1) + ((param1 & 1) << 31) _loc3_ += 1 return param1 def calc_time_key(self, param1): _loc2_ = 185025305 return self.ror(param1, _loc2_ % 17) ^ _loc2_ # see M3U8Encryption class in KLetvPlayer.swf @staticmethod def decrypt_m3u8(encrypted_data): if encrypted_data[:5].decode('utf-8').lower() != 'vc_01': return encrypted_data encrypted_data = encrypted_data[5:] _loc4_ = bytearray(2 * len(encrypted_data)) for idx, val in enumerate(encrypted_data): b = compat_ord(val) _loc4_[2 * idx] = b // 16 _loc4_[2 * idx + 1] = b % 16 idx = len(_loc4_) - 11 _loc4_ = _loc4_[idx:] + _loc4_[:idx] _loc7_ = bytearray(len(encrypted_data)) for i in range(len(encrypted_data)): _loc7_[i] = _loc4_[2 * i] * 16 + _loc4_[2 * i + 1] return bytes(_loc7_) def _check_errors(self, play_json): # Check for errors playstatus = play_json['msgs']['playstatus'] if playstatus['status'] == 0: flag = playstatus['flag'] if flag == 1: self.raise_geo_restricted() else: raise ExtractorError('Generic error. flag = %d' % flag, expected=True) def _real_extract(self, url): media_id = self._match_id(url) page = self._download_webpage(url, media_id) play_json_flash = self._download_json( 'http://player-pc.le.com/mms/out/video/playJson', media_id, 'Downloading flash playJson data', query={ 'id': media_id, 'platid': 1, 'splatid': 105, 'format': 1, 'source': 1000, 'tkey': self.calc_time_key(int(time.time())), 'domain': 'www.le.com', 'region': 'cn', }, headers=self.geo_verification_headers()) self._check_errors(play_json_flash) def get_flash_urls(media_url, format_id): nodes_data = self._download_json( media_url, media_id, f'Download JSON metadata for format {format_id}', query={ 'm3v': 1, 'format': 1, 'expect': 3, 'tss': 'ios', }) req = self._request_webpage( nodes_data['nodelist'][0]['location'], media_id, note=f'Downloading m3u8 information for format {format_id}') m3u8_data = self.decrypt_m3u8(req.read()) return { 'hls': encode_data_uri(m3u8_data, 'application/vnd.apple.mpegurl'), } extracted_formats = [] formats = [] playurl = play_json_flash['msgs']['playurl'] play_domain = playurl['domain'][0] for format_id, format_data in playurl.get('dispatch', []).items(): if format_id in extracted_formats: continue extracted_formats.append(format_id) media_url = play_domain + format_data[0] for protocol, format_url in get_flash_urls(media_url, format_id).items(): f = { 'url': format_url, 'ext': determine_ext(format_data[1]), 'format_id': f'{protocol}-{format_id}', 'protocol': 'm3u8_native' if protocol == 'hls' else 'http', 'quality': int_or_none(format_id), } if format_id[-1:] == 'p': f['height'] = int_or_none(format_id[:-1]) formats.append(f) publish_time = parse_iso8601(self._html_search_regex( r'发布时间&nbsp;([^<>]+) ', page, 'publish time', default=None), delimiter=' ', timezone=dt.timedelta(hours=8)) description = self._html_search_meta('description', page, fatal=False) return { 'id': media_id, 'formats': formats, 'title': playurl['title'], 'thumbnail': playurl['pic'], 'description': description, 'timestamp': publish_time, '_format_sort_fields': ('res', 'quality'), } class LePlaylistIE(InfoExtractor): _VALID_URL = r'https?://[a-z]+\.le\.com/(?!video)[a-z]+/(?P<id>[a-z0-9_]+)' _TESTS = [{ 'url': 'http://www.le.com/tv/46177.html', 'info_dict': { 'id': '46177', 'title': '美人天下', 'description': 'md5:395666ff41b44080396e59570dbac01c', }, 'playlist_count': 35, }, { 'url': 'http://tv.le.com/izt/wuzetian/index.html', 'info_dict': { 'id': 'wuzetian', 'title': '武媚娘传奇', 'description': 'md5:e12499475ab3d50219e5bba00b3cb248', }, # This playlist contains some extra videos other than the drama itself 'playlist_mincount': 96, }, { 'url': 'http://tv.le.com/pzt/lswjzzjc/index.shtml', # This series is moved to http://www.le.com/tv/10005297.html 'only_matching': True, }, { 'url': 'http://www.le.com/comic/92063.html', 'only_matching': True, }, { 'url': 'http://list.le.com/listn/c1009_sc532002_d2_p1_o1.html', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if LeIE.suitable(url) else super().suitable(url) def _real_extract(self, url): playlist_id = self._match_id(url) page = self._download_webpage(url, playlist_id) # Currently old domain names are still used in playlists media_ids = orderedSet(re.findall( r'<a[^>]+href="http://www\.letv\.com/ptv/vplay/(\d+)\.html', page)) entries = [self.url_result(LeIE._URL_TEMPLATE % media_id, ie='Le') for media_id in media_ids] title = self._html_search_meta('keywords', page, fatal=False).split(',')[0] description = self._html_search_meta('description', page, fatal=False) return self.playlist_result(entries, playlist_id, playlist_title=title, playlist_description=description) class LetvCloudIE(InfoExtractor): # Most of *.letv.com is changed to *.le.com on 2016/01/02 # but yuntv.letv.com is kept, so also keep the extractor name IE_DESC = '乐视云' _VALID_URL = r'https?://yuntv\.letv\.com/bcloud.html\?.+' _TESTS = [{ 'url': 'http://yuntv.letv.com/bcloud.html?uu=p7jnfw5hw9&vu=467623dedf', 'md5': '26450599afd64c513bc77030ad15db44', 'info_dict': { 'id': 'p7jnfw5hw9_467623dedf', 'ext': 'mp4', 'title': 'Video p7jnfw5hw9_467623dedf', }, }, { 'url': 'http://yuntv.letv.com/bcloud.html?uu=p7jnfw5hw9&vu=ec93197892&pu=2c7cd40209&auto_play=1&gpcflag=1&width=640&height=360', 'md5': 'e03d9cc8d9c13191e1caf277e42dbd31', 'info_dict': { 'id': 'p7jnfw5hw9_ec93197892', 'ext': 'mp4', 'title': 'Video p7jnfw5hw9_ec93197892', }, }, { 'url': 'http://yuntv.letv.com/bcloud.html?uu=p7jnfw5hw9&vu=187060b6fd', 'md5': 'cb988699a776b22d4a41b9d43acfb3ac', 'info_dict': { 'id': 'p7jnfw5hw9_187060b6fd', 'ext': 'mp4', 'title': 'Video p7jnfw5hw9_187060b6fd', }, }] @staticmethod def sign_data(obj): if obj['cf'] == 'flash': salt = '2f9d6924b33a165a6d8b5d3d42f4f987' items = ['cf', 'format', 'ran', 'uu', 'ver', 'vu'] elif obj['cf'] == 'html5': salt = 'fbeh5player12c43eccf2bec3300344' items = ['cf', 'ran', 'uu', 'bver', 'vu'] input_data = ''.join([item + obj[item] for item in items]) + salt obj['sign'] = hashlib.md5(input_data.encode()).hexdigest() def _get_formats(self, cf, uu, vu, media_id): def get_play_json(cf, timestamp): data = { 'cf': cf, 'ver': '2.2', 'bver': 'firefox44.0', 'format': 'json', 'uu': uu, 'vu': vu, 'ran': str(timestamp), } self.sign_data(data) return self._download_json( 'http://api.letvcloud.com/gpc.php?' + urllib.parse.urlencode(data), media_id, f'Downloading playJson data for type {cf}') play_json = get_play_json(cf, time.time()) # The server time may be different from local time if play_json.get('code') == 10071: play_json = get_play_json(cf, play_json['timestamp']) if not play_json.get('data'): if play_json.get('message'): raise ExtractorError('Letv cloud said: {}'.format(play_json['message']), expected=True) elif play_json.get('code'): raise ExtractorError('Letv cloud returned error %d' % play_json['code'], expected=True) else: raise ExtractorError('Letv cloud returned an unknown error') def b64decode(s): return base64.b64decode(s).decode('utf-8') formats = [] for media in play_json['data']['video_info']['media'].values(): play_url = media['play_url'] url = b64decode(play_url['main_url']) decoded_url = b64decode(url_basename(url)) formats.append({ 'url': url, 'ext': determine_ext(decoded_url), 'format_id': str_or_none(play_url.get('vtype')), 'format_note': str_or_none(play_url.get('definition')), 'width': int_or_none(play_url.get('vwidth')), 'height': int_or_none(play_url.get('vheight')), }) return formats def _real_extract(self, url): uu_mobj = re.search(r'uu=([\w]+)', url) vu_mobj = re.search(r'vu=([\w]+)', url) if not uu_mobj or not vu_mobj: raise ExtractorError(f'Invalid URL: {url}', expected=True) uu = uu_mobj.group(1) vu = vu_mobj.group(1) media_id = uu + '_' + vu formats = self._get_formats('flash', uu, vu, media_id) + self._get_formats('html5', uu, vu, media_id) return { 'id': media_id, 'title': f'Video {media_id}', 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hellporno.py
yt_dlp/extractor/hellporno.py
from .common import InfoExtractor from ..utils import ( int_or_none, merge_dicts, remove_end, unified_timestamp, ) class HellPornoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?hellporno\.(?:com/videos|net/v)/(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://hellporno.com/videos/dixie-is-posing-with-naked-ass-very-erotic/', 'md5': 'f0a46ebc0bed0c72ae8fe4629f7de5f3', 'info_dict': { 'id': '149116', 'display_id': 'dixie-is-posing-with-naked-ass-very-erotic', 'ext': 'mp4', 'title': 'Dixie is posing with naked ass very erotic', 'description': 'md5:9a72922749354edb1c4b6e540ad3d215', 'categories': list, 'thumbnail': r're:https?://.*\.jpg$', 'duration': 240, 'timestamp': 1398762720, 'upload_date': '20140429', 'view_count': int, 'age_limit': 18, }, }, { 'url': 'http://hellporno.net/v/186271/', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = remove_end(self._html_extract_title(webpage), ' - Hell Porno') info = self._parse_html5_media_entries(url, webpage, display_id)[0] video_id = self._search_regex( (r'chs_object\s*=\s*["\'](\d+)', r'params\[["\']video_id["\']\]\s*=\s*(\d+)'), webpage, 'video id', default=display_id) description = self._search_regex( r'class=["\']desc_video_view_v2[^>]+>([^<]+)', webpage, 'description', fatal=False) categories = [ c.strip() for c in self._html_search_meta( 'keywords', webpage, 'categories', default='').split(',') if c.strip()] duration = int_or_none(self._og_search_property( 'video:duration', webpage, fatal=False)) timestamp = unified_timestamp(self._og_search_property( 'video:release_date', webpage, fatal=False)) view_count = int_or_none(self._search_regex( r'>Views\s+(\d+)', webpage, 'view count', fatal=False)) return merge_dicts(info, { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'categories': categories, 'duration': duration, 'timestamp': timestamp, 'view_count': view_count, 'age_limit': 18, })
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ccma.py
yt_dlp/extractor/ccma.py
from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, int_or_none, parse_duration, parse_resolution, try_get, unified_timestamp, url_or_none, ) class CCMAIE(InfoExtractor): IE_DESC = '3Cat, TV3 and Catalunya Ràdio' _VALID_URL = r'https?://(?:www\.)?3cat\.cat/(?:3cat|tv3/sx3)/[^/?#]+/(?P<type>video|audio)/(?P<id>\d+)' _TESTS = [{ # ccma.cat/tv3/alacarta/ URLs redirect to 3cat.cat/3cat/ 'url': 'https://www.3cat.cat/3cat/lespot-de-la-marato-de-tv3/video/5630208/', 'md5': '7296ca43977c8ea4469e719c609b0871', 'info_dict': { 'id': '5630208', 'ext': 'mp4', 'title': 'L\'espot de La Marató 2016: Ictus i les lesions medul·lars i cerebrals traumàtiques', 'description': 'md5:f12987f320e2f6e988e9908e4fe97765', 'timestamp': 1478608140, 'upload_date': '20161108', 'age_limit': 0, 'alt_title': 'EsportMarató2016WEB_PerPublicar', 'duration': 79, 'thumbnail': 'https://img.3cat.cat/multimedia/jpg/4/6/1478536106664.jpg', 'series': 'Dedicada a l\'ictus i les lesions medul·lars i cerebrals traumàtiques', 'categories': ['Divulgació'], }, }, { # ccma.cat/catradio/alacarta/ URLs redirect to 3cat.cat/3cat/ 'url': 'https://www.3cat.cat/3cat/el-consell-de-savis-analitza-el-derbi/audio/943685/', 'md5': 'fa3e38f269329a278271276330261425', 'info_dict': { 'id': '943685', 'ext': 'mp3', 'title': 'El Consell de Savis analitza el derbi', 'description': 'md5:e2a3648145f3241cb9c6b4b624033e53', 'upload_date': '20161217', 'timestamp': 1482011700, 'vcodec': 'none', 'categories': ['Esports'], 'series': 'Tot gira', 'duration': 821, 'thumbnail': 'https://img.3cat.cat/multimedia/jpg/8/9/1482002602598.jpg', }, }, { 'url': 'https://www.3cat.cat/3cat/crims-josep-tallada-lespereu-me-part-1/video/6031387/', 'md5': '27493513d08a3e5605814aee9bb778d2', 'info_dict': { 'id': '6031387', 'ext': 'mp4', 'title': 'T1xC5 - Josep Talleda, l\'"Espereu-me" (part 1)', 'description': 'md5:7cbdafb640da9d0d2c0f62bad1e74e60', 'timestamp': 1582577919, 'upload_date': '20200224', 'subtitles': 'mincount:1', 'age_limit': 13, 'series': 'Crims', 'thumbnail': 'https://img.3cat.cat/multimedia/jpg/1/9/1582564376991.jpg', 'duration': 3203, 'categories': ['Divulgació'], 'alt_title': 'Crims - 5 - Josep Talleda, l\'"Espereu-me" (1a part) - Josep Talleda, l\'"Espereu-me" (part 1)', 'episode_number': 5, 'episode': 'Episode 5', }, }, { 'url': 'https://www.3cat.cat/tv3/sx3/una-mosca-volava-per-la-llum/video/5759227/', 'info_dict': { 'id': '5759227', 'ext': 'mp4', 'title': 'Una mosca volava per la llum', 'alt_title': '17Z004Ç UNA MOSCA VOLAVA PER LA LLUM', 'description': 'md5:9ab64276944b0825336f4147f13f7854', 'series': 'Mic', 'upload_date': '20180411', 'timestamp': 1523440105, 'duration': 160, 'age_limit': 0, 'thumbnail': 'https://img.3cat.cat/multimedia/jpg/6/1/1524071667216.jpg', 'categories': ['Música'], }, }] def _real_extract(self, url): media_type, media_id = self._match_valid_url(url).group('type', 'id') media = self._download_json( 'http://api-media.3cat.cat/pvideo/media.jsp', media_id, query={ 'media': media_type, 'idint': media_id, 'format': 'dm', }) formats = [] media_url = media['media']['url'] if isinstance(media_url, list): for format_ in media_url: format_url = url_or_none(format_.get('file')) if not format_url: continue if determine_ext(format_url) == 'mpd': formats.extend(self._extract_mpd_formats( format_url, media_id, mpd_id='dash', fatal=False)) continue label = format_.get('label') f = parse_resolution(label) f.update({ 'url': format_url, 'format_id': label, }) formats.append(f) else: formats.append({ 'url': media_url, 'vcodec': 'none' if media_type == 'audio' else None, }) informacio = media['informacio'] title = informacio['titol'] durada = informacio.get('durada') or {} duration = int_or_none(durada.get('milisegons'), 1000) or parse_duration(durada.get('text')) tematica = try_get(informacio, lambda x: x['tematica']['text']) data_utc = try_get(informacio, lambda x: x['data_emissio']['utc']) timestamp = unified_timestamp(data_utc) subtitles = {} subtitols = media.get('subtitols') or [] if isinstance(subtitols, dict): subtitols = [subtitols] for st in subtitols: sub_url = st.get('url') if sub_url: subtitles.setdefault( st.get('iso') or st.get('text') or 'ca', []).append({ 'url': sub_url, }) thumbnails = [] imatges = media.get('imatges', {}) if imatges: thumbnail_url = imatges.get('url') if thumbnail_url: thumbnails = [{ 'url': thumbnail_url, 'width': int_or_none(imatges.get('amplada')), 'height': int_or_none(imatges.get('alcada')), }] age_limit = None codi_etic = try_get(informacio, lambda x: x['codi_etic']['id']) if codi_etic: codi_etic_s = codi_etic.split('_') if len(codi_etic_s) == 2: if codi_etic_s[1] == 'TP': age_limit = 0 else: age_limit = int_or_none(codi_etic_s[1]) return { 'id': media_id, 'title': title, 'description': clean_html(informacio.get('descripcio')), 'duration': duration, 'timestamp': timestamp, 'thumbnails': thumbnails, 'subtitles': subtitles, 'formats': formats, 'age_limit': age_limit, 'alt_title': informacio.get('titol_complet'), 'episode_number': int_or_none(informacio.get('capitol')), 'categories': [tematica] if tematica else None, 'series': informacio.get('programa'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tvopengr.py
yt_dlp/extractor/tvopengr.py
from .common import InfoExtractor from ..utils import ( determine_ext, get_elements_text_and_html_by_attribute, scale_thumbnails_to_max_format_width, ) class TVOpenGrBaseIE(InfoExtractor): def _return_canonical_url(self, url, video_id): webpage = self._download_webpage(url, video_id) canonical_url = self._og_search_url(webpage) title = self._og_search_title(webpage) return self.url_result(canonical_url, ie=TVOpenGrWatchIE.ie_key(), video_id=video_id, video_title=title) class TVOpenGrWatchIE(TVOpenGrBaseIE): IE_NAME = 'tvopengr:watch' IE_DESC = 'tvopen.gr (and ethnos.gr) videos' _VALID_URL = r'https?://(?P<netloc>(?:www\.)?(?:tvopen|ethnos)\.gr)/watch/(?P<id>\d+)/(?P<slug>[^/]+)' _API_ENDPOINT = 'https://www.tvopen.gr/templates/data/player' _TESTS = [{ 'url': 'https://www.ethnos.gr/watch/101009/nikoskaprabelosdenexoymekanenanasthenhsemethmethmetallaxhomikron', 'info_dict': { 'id': '101009', 'title': 'md5:51f68773dcb6c70498cd326f45fefdf0', 'display_id': 'nikoskaprabelosdenexoymekanenanasthenhsemethmethmetallaxhomikron', 'description': 'md5:78fff49f18fb3effe41b070e5c7685d6', 'duration': 246.0, 'thumbnail': 'https://opentv-static.siliconweb.com/imgHandler/1920/d573ba71-ec5f-43c6-b4cb-d181f327d3a8.jpg', 'ext': 'mp4', 'upload_date': '20220109', 'timestamp': 1641686400, }, }, { 'url': 'https://www.tvopen.gr/watch/100979/se28099agapaomenalla7cepeisodio267cmhthrargiapashskakias', 'info_dict': { 'id': '100979', 'title': 'md5:e021f3001e16088ee40fa79b20df305b', 'display_id': 'se28099agapaomenalla7cepeisodio267cmhthrargiapashskakias', 'description': 'md5:ba17db53954134eb8d625d199e2919fb', 'duration': 2420.0, 'thumbnail': 'https://opentv-static.siliconweb.com/imgHandler/1920/9bb71cf1-21da-43a9-9d65-367950fde4e3.jpg', 'ext': 'mp4', 'upload_date': '20220108', 'timestamp': 1641600000, }, }] def _extract_formats_and_subs(self, response, video_id): formats, subs = [], {} for format_id, format_url in response.items(): if format_id not in ('stream', 'httpstream', 'mpegdash'): continue ext = determine_ext(format_url) if ext == 'm3u8': formats_, subs_ = self._extract_m3u8_formats_and_subtitles( format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False) elif ext == 'mpd': formats_, subs_ = self._extract_mpd_formats_and_subtitles( format_url, video_id, 'mp4', fatal=False) else: formats.append({ 'url': format_url, 'format_id': format_id, }) continue formats.extend(formats_) self._merge_subtitles(subs_, target=subs) return formats, subs def _real_extract(self, url): netloc, video_id, display_id = self._match_valid_url(url).group('netloc', 'id', 'slug') if netloc.find('tvopen.gr') == -1: return self._return_canonical_url(url, video_id) webpage = self._download_webpage(url, video_id) info = self._search_json_ld(webpage, video_id, expected_type='VideoObject') info['formats'], info['subtitles'] = self._extract_formats_and_subs( self._download_json(self._API_ENDPOINT, video_id, query={'cid': video_id}), video_id) info['thumbnails'] = scale_thumbnails_to_max_format_width( info['formats'], info['thumbnails'], r'(?<=/imgHandler/)\d+') description, _html = next(get_elements_text_and_html_by_attribute('class', 'description', webpage)) if description and _html.startswith('<span '): info['description'] = description info['id'] = video_id info['display_id'] = display_id return info class TVOpenGrEmbedIE(TVOpenGrBaseIE): IE_NAME = 'tvopengr:embed' IE_DESC = 'tvopen.gr embedded videos' _VALID_URL = r'(?:https?:)?//(?:www\.|cdn\.|)(?:tvopen|ethnos).gr/embed/(?P<id>\d+)' _EMBED_REGEX = [rf'''<iframe[^>]+?src=(?P<_q1>["'])(?P<url>{_VALID_URL})(?P=_q1)'''] _TESTS = [{ 'url': 'https://cdn.ethnos.gr/embed/100963', 'info_dict': { 'id': '100963', 'display_id': 'koronoiosapotoysdieythyntestonsxoleionselftestgiaosoysdenbrhkan', 'title': 'md5:2c71876fadf0cda6043da0da5fca2936', 'description': 'md5:17482b4432e5ed30eccd93b05d6ea509', 'duration': 118.0, 'thumbnail': 'https://opentv-static.siliconweb.com/imgHandler/1920/5804e07f-799a-4247-a696-33842c94ca37.jpg', 'ext': 'mp4', 'upload_date': '20220108', 'timestamp': 1641600000, }, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.ethnos.gr/World/article/190604/hparosiaxekinoynoisynomiliessthgeneyhmethskiatoypolemoypanoapothnoykrania', 'info_dict': { 'id': '101119', 'ext': 'mp4', 'title': 'Οι καρποί των διαπραγματεύσεων ΗΠΑ-Ρωσίας | Ώρα Ελλάδος 7:00 > Ρεπορτάζ', 'description': 'Ξεκινούν οι διαπραγματεύσεις ανάμεσα σε Ηνωμένες Πολιτείες και Ρωσία για την Ουκρανία.', 'display_id': 'oikarpoitondiapragmateyseonhparosias', 'duration': 421.0, 'thumbnail': r're:https?://opentv-static\.siliconweb\.com/imgHandler/.+\.jpg', 'timestamp': 1641772800, 'upload_date': '20220110', }, }] def _real_extract(self, url): video_id = self._match_id(url) return self._return_canonical_url(url, video_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/beacon.py
yt_dlp/extractor/beacon.py
import json from .common import InfoExtractor from ..utils import ( ExtractorError, parse_iso8601, traverse_obj, ) class BeaconTvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?beacon\.tv/content/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://beacon.tv/content/welcome-to-beacon', 'md5': 'b3f5932d437f288e662f10f3bfc5bd04', 'info_dict': { 'id': 'welcome-to-beacon', 'ext': 'mp4', 'upload_date': '20240509', 'description': 'md5:ea2bd32e71acf3f9fca6937412cc3563', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/I4CkkEvN/poster.jpg?width=720', 'title': 'Your home for Critical Role!', 'timestamp': 1715227200, 'duration': 105.494, }, }, { 'url': 'https://beacon.tv/content/re-slayers-take-trailer', 'md5': 'd879b091485dbed2245094c8152afd89', 'info_dict': { 'id': 're-slayers-take-trailer', 'ext': 'mp4', 'title': 'The Re-Slayer’s Take | Official Trailer', 'timestamp': 1715189040, 'upload_date': '20240508', 'duration': 53.249, 'thumbnail': 'https://cdn.jwplayer.com/v2/media/PW5ApIw3/poster.jpg?width=720', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) content_data = traverse_obj(self._search_nextjs_data(webpage, video_id), ( 'props', 'pageProps', '__APOLLO_STATE__', lambda k, v: k.startswith('Content:') and v['slug'] == video_id, any)) if not content_data: raise ExtractorError('Failed to extract content data') jwplayer_data = traverse_obj(content_data, ( (('contentVideo', 'video', 'videoData'), ('contentPodcast', 'podcast', 'audioData')), {json.loads}, {dict}, any)) if not jwplayer_data: if content_data.get('contentType') not in ('videoPodcast', 'video', 'podcast'): raise ExtractorError('Content is not a video/podcast', expected=True) if traverse_obj(content_data, ('contentTier', '__ref')) != 'MemberTier:65b258d178f89be87b4dc0a4': self.raise_login_required('This video/podcast is for members only') raise ExtractorError('Failed to extract content') return { **self._parse_jwplayer_data(jwplayer_data, video_id), **traverse_obj(content_data, { 'title': ('title', {str}), 'description': ('description', {str}), 'timestamp': ('publishedAt', {parse_iso8601}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bandcamp.py
yt_dlp/extractor/bandcamp.py
import json import random import re import time from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, extract_attributes, float_or_none, format_field, int_or_none, join_nonempty, parse_filesize, parse_qs, str_or_none, strftime_or_none, try_get, unified_timestamp, update_url_query, url_or_none, urljoin, ) from ..utils.traversal import find_element, find_elements, traverse_obj class BandcampIE(InfoExtractor): _VALID_URL = r'https?://(?P<uploader>[^/]+)\.bandcamp\.com/track/(?P<id>[^/?#&]+)' _EMBED_REGEX = [r'<meta property="og:url"[^>]*?content="(?P<url>.*?bandcamp\.com.*?)"'] _TESTS = [{ 'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song', 'md5': 'c557841d5e50261777a6585648adf439', 'info_dict': { 'id': '1812978515', 'ext': 'mp3', 'title': 'youtube-dl "\'/\\ä↭ - youtube-dl "\'/\\ä↭ - youtube-dl test song "\'/\\ä↭', 'duration': 9.8485, 'uploader': 'youtube-dl "\'/\\ä↭', 'upload_date': '20121129', 'thumbnail': r're:https?://f4\.bcbits\.com/img/.+\.jpg', 'timestamp': 1354224127, 'track': 'youtube-dl "\'/\\ä↭ - youtube-dl test song "\'/\\ä↭', 'track_id': '1812978515', 'uploader_url': 'https://youtube-dl.bandcamp.com', 'uploader_id': 'youtube-dl', 'artists': ['youtube-dl "\'/\\ä↭'], 'album_artists': ['youtube-dl "\'/\\ä↭'], }, 'skip': 'There is a limit of 200 free downloads / month for the test song', }, { # free download 'url': 'http://benprunty.bandcamp.com/track/lanius-battle', 'info_dict': { 'id': '2650410135', 'ext': 'm4a', 'title': 'Ben Prunty - Lanius (Battle)', 'uploader': 'Ben Prunty', 'thumbnail': r're:https?://f4\.bcbits\.com/img/.+\.jpg', 'timestamp': 1396508491, 'upload_date': '20140403', 'release_timestamp': 1396483200, 'release_date': '20140403', 'duration': 260.877, 'track': 'Lanius (Battle)', 'track_number': 1, 'track_id': '2650410135', 'album': 'FTL: Advanced Edition Soundtrack', 'uploader_url': 'https://benprunty.bandcamp.com', 'uploader_id': 'benprunty', 'tags': ['soundtrack', 'chiptunes', 'cinematic', 'electronic', 'video game music', 'California'], 'artists': ['Ben Prunty'], 'album_artists': ['Ben Prunty'], }, }, { # no free download, mp3 128 'url': 'https://relapsealumni.bandcamp.com/track/hail-to-fire', 'md5': 'fec12ff55e804bb7f7ebeb77a800c8b7', 'info_dict': { 'id': '2584466013', 'ext': 'mp3', 'title': 'Mastodon - Hail to Fire', 'uploader': 'Mastodon', 'thumbnail': r're:https?://f4\.bcbits\.com/img/.+\.jpg', 'timestamp': 1322005399, 'upload_date': '20111122', 'release_timestamp': 1076112000, 'release_date': '20040207', 'duration': 120.79, 'track': 'Hail to Fire', 'track_number': 5, 'track_id': '2584466013', 'album': 'Call of the Mastodon', 'uploader_url': 'https://relapsealumni.bandcamp.com', 'uploader_id': 'relapsealumni', 'tags': ['Philadelphia'], 'artists': ['Mastodon'], 'album_artists': ['Mastodon'], }, }, { # track from compilation album (artist/album_artist difference) 'url': 'https://diskotopia.bandcamp.com/track/safehouse', 'md5': '19c5337bca1428afa54129f86a2f6a69', 'info_dict': { 'id': '1978174799', 'ext': 'mp3', 'title': 'submerse - submerse - Safehouse', 'uploader': 'submerse', 'thumbnail': r're:https?://f4\.bcbits\.com/img/.+\.jpg', 'timestamp': 1480779297, 'upload_date': '20161203', 'release_timestamp': 1481068800, 'release_date': '20161207', 'duration': 154.066, 'track': 'submerse - Safehouse', 'track_number': 3, 'track_id': '1978174799', 'album': 'DSK F/W 2016-2017 Free Compilation', 'uploader_url': 'https://diskotopia.bandcamp.com', 'uploader_id': 'diskotopia', 'tags': ['Japan'], 'artists': ['submerse'], 'album_artists': ['Diskotopia'], }, }] _WEBPAGE_TESTS = [{ # FIXME: Embed detection 'url': 'https://www.punknews.org/article/85809/stay-inside-super-sonic', 'info_dict': { 'id': '2475540375', 'ext': 'mp3', 'title': 'Stay Inside - Super Sonic', 'album': 'Lunger', 'album_artists': ['Stay Inside'], 'artists': ['Stay Inside'], 'duration': 166.157, 'release_date': '20251003', 'release_timestamp': 1759449600.0, 'thumbnail': r're:https?://f4\.bcbits\.com/img/.+\.jpg', 'timestamp': 1749473029.0, 'track': 'Super Sonic', 'track_id': '2475540375', 'track_number': 3, 'upload_date': '20250609', 'uploader': 'Stay Inside', 'uploader_id': 'stayinside', 'uploader_url': 'https://stayinside.bandcamp.com', }, }] def _extract_data_attr(self, webpage, video_id, attr='tralbum', fatal=True): return self._parse_json(self._html_search_regex( rf'data-{attr}=(["\'])({{.+?}})\1', webpage, attr + ' data', group=2), video_id, fatal=fatal) def _real_extract(self, url): title, uploader = self._match_valid_url(url).group('id', 'uploader') webpage = self._download_webpage(url, title) tralbum = self._extract_data_attr(webpage, title) thumbnail = self._og_search_thumbnail(webpage) track_id = None track = None track_number = None duration = None formats = [] track_info = try_get(tralbum, lambda x: x['trackinfo'][0], dict) if track_info: file_ = track_info.get('file') if isinstance(file_, dict): for format_id, format_url in file_.items(): if not url_or_none(format_url): continue ext, abr_str = format_id.split('-', 1) formats.append({ 'format_id': format_id, 'url': self._proto_relative_url(format_url, 'http:'), 'ext': ext, 'vcodec': 'none', 'acodec': ext, 'abr': int_or_none(abr_str), }) track = track_info.get('title') track_id = str_or_none( track_info.get('track_id') or track_info.get('id')) track_number = int_or_none(track_info.get('track_num')) duration = float_or_none(track_info.get('duration')) embed = self._extract_data_attr(webpage, title, 'embed', False) current = tralbum.get('current') or {} artist = embed.get('artist') or current.get('artist') or tralbum.get('artist') album_artist = self._html_search_regex( r'<h3 class="albumTitle">[\S\s]*?by\s*<span>\s*<a href="[^>]+">\s*([^>]+?)\s*</a>', webpage, 'album artist', fatal=False) timestamp = unified_timestamp( current.get('publish_date') or tralbum.get('album_publish_date')) download_link = tralbum.get('freeDownloadPage') if download_link: track_id = str(tralbum['id']) download_webpage = self._download_webpage( download_link, track_id, 'Downloading free downloads page') blob = self._extract_data_attr(download_webpage, track_id, 'blob') info = try_get( blob, (lambda x: x['digital_items'][0], lambda x: x['download_items'][0]), dict) if info: downloads = info.get('downloads') if isinstance(downloads, dict): if not track: track = info.get('title') if not artist: artist = info.get('artist') if not thumbnail: thumbnail = info.get('thumb_url') download_formats = {} download_formats_list = blob.get('download_formats') if isinstance(download_formats_list, list): for f in blob['download_formats']: name, ext = f.get('name'), f.get('file_extension') if all(isinstance(x, str) for x in (name, ext)): download_formats[name] = ext.strip('.') for format_id, f in downloads.items(): format_url = f.get('url') if not format_url: continue # Stat URL generation algorithm is reverse engineered from # download_*_bundle_*.js stat_url = update_url_query( format_url.replace('/download/', '/statdownload/'), { '.rand': int(time.time() * 1000 * random.random()), }) format_id = f.get('encoding_name') or format_id stat = self._download_json( stat_url, track_id, f'Downloading {format_id} JSON', transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1], fatal=False) if not stat: continue retry_url = url_or_none(stat.get('retry_url')) if not retry_url: continue formats.append({ 'url': self._proto_relative_url(retry_url, 'http:'), 'ext': download_formats.get(format_id), 'format_id': format_id, 'format_note': f.get('description'), 'filesize': parse_filesize(f.get('size_mb')), 'vcodec': 'none', 'acodec': format_id.split('-')[0], }) title = f'{artist} - {track}' if artist else track if not duration: duration = float_or_none(self._html_search_meta( 'duration', webpage, default=None)) return { 'id': track_id, 'title': title, 'thumbnail': thumbnail, 'uploader': artist, 'uploader_id': uploader, 'uploader_url': f'https://{uploader}.bandcamp.com', 'timestamp': timestamp, 'release_timestamp': unified_timestamp(tralbum.get('album_release_date')), 'duration': duration, 'track': track, 'track_number': track_number, 'track_id': track_id, 'artist': artist, 'album': embed.get('album_title'), 'album_artist': album_artist, 'formats': formats, 'tags': traverse_obj(webpage, ({find_elements(cls='tag')}, ..., {clean_html})), } class BandcampAlbumIE(BandcampIE): # XXX: Do not subclass from concrete IE IE_NAME = 'Bandcamp:album' _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com/album/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1', 'playlist': [ { 'md5': '39bc1eded3476e927c724321ddf116cf', 'info_dict': { 'id': '1353101989', 'ext': 'mp3', 'title': 'Blazo - Intro', 'thumbnail': r're:https?://f4\.bcbits\.com/img/.+\.jpg', 'timestamp': 1311756226, 'upload_date': '20110727', 'uploader': 'Blazo', 'album_artists': ['Blazo'], 'uploader_url': 'https://blazo.bandcamp.com', 'release_date': '20110727', 'release_timestamp': 1311724800.0, 'track': 'Intro', 'uploader_id': 'blazo', 'track_number': 1, 'album': 'Jazz Format Mixtape vol.1', 'artists': ['Blazo'], 'duration': 19.335, 'track_id': '1353101989', }, }, { 'md5': '1a2c32e2691474643e912cc6cd4bffaa', 'info_dict': { 'id': '38097443', 'ext': 'mp3', 'title': 'Blazo - Kero One - Keep It Alive (Blazo remix)', 'thumbnail': r're:https?://f4\.bcbits\.com/img/.+\.jpg', 'timestamp': 1311757238, 'upload_date': '20110727', 'uploader': 'Blazo', 'track': 'Kero One - Keep It Alive (Blazo remix)', 'release_date': '20110727', 'track_id': '38097443', 'track_number': 2, 'duration': 181.467, 'uploader_url': 'https://blazo.bandcamp.com', 'album': 'Jazz Format Mixtape vol.1', 'uploader_id': 'blazo', 'album_artists': ['Blazo'], 'artists': ['Blazo'], 'release_timestamp': 1311724800.0, }, }, ], 'info_dict': { 'title': 'Jazz Format Mixtape vol.1', 'id': 'jazz-format-mixtape-vol-1', 'uploader_id': 'blazo', 'description': 'md5:38052a93217f3ffdc033cd5dbbce2989', }, 'params': { 'playlistend': 2, }, 'skip': 'Bandcamp imposes download limits.', }, { 'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave', 'info_dict': { 'title': 'Hierophany of the Open Grave', 'uploader_id': 'nightbringer', 'id': 'hierophany-of-the-open-grave', }, 'playlist_mincount': 9, }, { # with escaped quote in title 'url': 'https://jstrecords.bandcamp.com/album/entropy-ep', 'info_dict': { 'title': '"Entropy" EP', 'uploader_id': 'jstrecords', 'id': 'entropy-ep', 'description': 'md5:0ff22959c943622972596062f2f366a5', }, 'playlist_mincount': 3, }, { # not all tracks have songs 'url': 'https://insulters.bandcamp.com/album/we-are-the-plague', 'info_dict': { 'id': 'we-are-the-plague', 'title': 'WE ARE THE PLAGUE', 'uploader_id': 'insulters', 'description': 'md5:b3cf845ee41b2b1141dc7bde9237255f', }, 'playlist_count': 2, }] @classmethod def suitable(cls, url): return (False if BandcampWeeklyIE.suitable(url) or BandcampIE.suitable(url) else super().suitable(url)) def _real_extract(self, url): uploader_id, album_id = self._match_valid_url(url).groups() playlist_id = album_id or uploader_id webpage = self._download_webpage(url, playlist_id) tralbum = self._extract_data_attr(webpage, playlist_id) track_info = tralbum.get('trackinfo') if not track_info: raise ExtractorError('The page doesn\'t contain any tracks') # Only tracks with duration info have songs entries = [ self.url_result( urljoin(url, t['title_link']), BandcampIE.ie_key(), str_or_none(t.get('track_id') or t.get('id')), t.get('title')) for t in track_info if t.get('duration')] current = tralbum.get('current') or {} return { '_type': 'playlist', 'uploader_id': uploader_id, 'id': playlist_id, 'title': current.get('title'), 'description': current.get('about'), 'entries': entries, } class BandcampWeeklyIE(BandcampIE): # XXX: Do not subclass from concrete IE IE_NAME = 'Bandcamp:weekly' _VALID_URL = r'https?://(?:www\.)?bandcamp\.com/radio/?\?(?:[^#]+&)?show=(?P<id>\d+)' _TESTS = [{ 'url': 'https://bandcamp.com/radio?show=224', 'md5': '61acc9a002bed93986b91168aa3ab433', 'info_dict': { 'id': '224', 'ext': 'mp3', 'title': 'Bandcamp Weekly, 2017-04-04', 'description': 'md5:5d48150916e8e02d030623a48512c874', 'thumbnail': 'https://f4.bcbits.com/img/9982549_0.jpg', 'series': 'Bandcamp Weekly', 'episode_id': '224', 'release_timestamp': 1491264000, 'release_date': '20170404', 'duration': 5829.77, }, 'params': { 'format': 'mp3-128', }, }, { 'url': 'https://bandcamp.com/radio/?foo=bar&show=224', 'only_matching': True, }] def _real_extract(self, url): show_id = self._match_id(url) audio_data = self._download_json( 'https://bandcamp.com/api/bcradio_api/1/get_show', show_id, 'Downloading radio show JSON', data=json.dumps({'id': show_id}).encode(), headers={'Content-Type': 'application/json'})['radioShowAudio'] stream_url = audio_data['streamUrl'] format_id = traverse_obj(stream_url, ({parse_qs}, 'enc', -1)) encoding, _, bitrate_str = (format_id or '').partition('-') webpage = self._download_webpage(url, show_id, fatal=False) metadata = traverse_obj( self._extract_data_attr(webpage, show_id, 'blob', fatal=False), ('appData', 'shows', lambda _, v: str(v['showId']) == show_id, any)) or {} series_title = audio_data.get('title') or metadata.get('title') release_timestamp = unified_timestamp(audio_data.get('date')) or unified_timestamp(metadata.get('date')) return { 'id': show_id, 'episode_id': show_id, 'title': join_nonempty(series_title, strftime_or_none(release_timestamp, '%Y-%m-%d'), delim=', '), 'series': series_title, 'thumbnail': format_field(metadata, 'imageId', 'https://f4.bcbits.com/img/%s_0.jpg', default=None), 'description': metadata.get('desc') or metadata.get('short_desc'), 'duration': float_or_none(audio_data.get('duration')), 'release_timestamp': release_timestamp, 'formats': [{ 'url': stream_url, 'format_id': format_id, 'ext': encoding or 'mp3', 'acodec': encoding or None, 'vcodec': 'none', 'abr': int_or_none(bitrate_str), }], } class BandcampUserIE(InfoExtractor): IE_NAME = 'Bandcamp:user' _VALID_URL = r'https?://(?!www\.)(?P<id>[^.]+)\.bandcamp\.com(?:/music)?/?(?:[#?]|$)' _TESTS = [{ # Type 1 Bandcamp user page. 'url': 'https://adrianvonziegler.bandcamp.com', 'info_dict': { 'id': 'adrianvonziegler', 'title': 'Discography of adrianvonziegler', }, 'playlist_mincount': 23, }, { # Bandcamp user page with only one album 'url': 'http://dotscale.bandcamp.com', 'info_dict': { 'id': 'dotscale', 'title': 'Discography of dotscale', }, 'playlist_count': 1, }, { # Type 2 Bandcamp user page. 'url': 'https://nightcallofficial.bandcamp.com', 'info_dict': { 'id': 'nightcallofficial', 'title': 'Discography of nightcallofficial', }, 'playlist_count': 4, }, { 'url': 'https://steviasphere.bandcamp.com/music', 'playlist_mincount': 47, 'info_dict': { 'id': 'steviasphere', 'title': 'Discography of steviasphere', }, }, { 'url': 'https://coldworldofficial.bandcamp.com/music', 'playlist_mincount': 7, 'info_dict': { 'id': 'coldworldofficial', 'title': 'Discography of coldworldofficial', }, }, { 'url': 'https://nuclearwarnowproductions.bandcamp.com/music', 'playlist_mincount': 399, 'info_dict': { 'id': 'nuclearwarnowproductions', 'title': 'Discography of nuclearwarnowproductions', }, }] def _yield_items(self, webpage): yield from ( re.findall(r'<li data-item-id=["\'][^>]+>\s*<a href=["\'](?![^"\'/]*?/merch)([^"\']+)', webpage) or re.findall(r'<div[^>]+trackTitle["\'][^"\']+["\']([^"\']+)', webpage)) yield from traverse_obj(webpage, ( {find_element(id='music-grid', html=True)}, {extract_attributes}, 'data-client-items', {json.loads}, ..., 'page_url', {str})) def _real_extract(self, url): uploader = self._match_id(url) webpage = self._download_webpage(url, uploader) return self.playlist_from_matches( self._yield_items(webpage), uploader, f'Discography of {uploader}', getter=urljoin(url))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mzaalo.py
yt_dlp/extractor/mzaalo.py
from .common import InfoExtractor from ..utils import ( parse_age_limit, parse_duration, traverse_obj, url_or_none, ) class MzaaloIE(InfoExtractor): _VALID_URL = r'(?i)https?://(?:www\.)?mzaalo\.com/(?:play|watch)/(?P<type>movie|original|clip)/(?P<id>[a-f0-9-]+)/[\w-]+' _TESTS = [{ # Movies 'url': 'https://www.mzaalo.com/play/movie/c0958d9f-f90e-4503-a755-44358758921d/Jamun', 'info_dict': { 'id': 'c0958d9f-f90e-4503-a755-44358758921d', 'title': 'Jamun', 'ext': 'mp4', 'description': 'md5:24fe9ebb9bbe5b36f7b54b90ab1e2f31', 'thumbnails': 'count:15', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 5527.0, 'language': 'hin', 'categories': ['Drama'], 'age_limit': 13, }, 'params': {'skip_download': 'm3u8'}, }, { # Shows 'url': 'https://www.mzaalo.com/play/original/93d42b2b-f373-4c2d-bca4-997412cb069d/Modi-Season-2-CM-TO-PM/Episode-1:Decision,-Not-Promises', 'info_dict': { 'id': '93d42b2b-f373-4c2d-bca4-997412cb069d', 'title': 'Episode 1:Decision, Not Promises', 'ext': 'mp4', 'description': 'md5:16f76058432a54774fbb2561a1955652', 'thumbnails': 'count:22', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2040.0, 'language': 'hin', 'categories': ['Drama'], 'age_limit': 13, }, 'params': {'skip_download': 'm3u8'}, }, { # Streams/Clips 'url': 'https://www.mzaalo.com/play/clip/83cdbcb5-400a-42f1-a1d2-459053cfbda5/Manto-Ki-Kahaaniya', 'info_dict': { 'id': '83cdbcb5-400a-42f1-a1d2-459053cfbda5', 'title': 'Manto Ki Kahaaniya', 'ext': 'mp4', 'description': 'md5:c3c5f1d05f0fd1bfcb05b673d1cc9f2f', 'thumbnails': 'count:3', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1937.0, 'language': 'hin', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://mzaalo.com/watch/MOVIE/389c892d-0b65-4019-bf73-d4edcb1c014f/Chalo-Dilli', 'only_matching': True, }] def _real_extract(self, url): video_id, type_ = self._match_valid_url(url).group('id', 'type') path = (f'partner/streamurl?&assetId={video_id}&getClipDetails=YES' if type_ == 'clip' else f'api/v2/player/details?assetType={type_.upper()}&assetId={video_id}') data = self._download_json( f'https://production.mzaalo.com/platform/{path}', video_id, headers={ 'Ocp-Apim-Subscription-Key': '1d0caac2702049b89a305929fdf4cbae', })['data'] formats = self._extract_m3u8_formats(data['streamURL'], video_id) subtitles = {} for subs_lang, subs_url in traverse_obj(data, ('subtitles', {dict.items}, ...)): if url_or_none(subs_url): subtitles[subs_lang] = [{'url': subs_url, 'ext': 'vtt'}] lang = traverse_obj(data, ('language', {str.lower})) for f in formats: f['language'] = lang return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(data, { 'title': ('title', {str}), 'description': ('description', {str}), 'duration': ('duration', {parse_duration}), 'age_limit': ('maturity_rating', {parse_age_limit}), 'thumbnails': ('images', ..., {'url': {url_or_none}}), 'categories': ('genre', ..., {str}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gbnews.py
yt_dlp/extractor/gbnews.py
from .common import InfoExtractor from ..utils import ( ExtractorError, extract_attributes, get_elements_html_by_class, url_or_none, ) from ..utils.traversal import traverse_obj class GBNewsIE(InfoExtractor): IE_DESC = 'GB News clips, features and live streams' _VALID_URL = r'https?://(?:www\.)?gbnews\.(?:uk|com)/(?:\w+/)?(?P<id>[^#?]+)' _PLATFORM = 'safari' _SSMP_URL = 'https://mm-v2.simplestream.com/ssmp/api.php' _TESTS = [{ 'url': 'https://www.gbnews.com/news/bbc-claudine-gay-harvard-university-antisemitism-row', 'info_dict': { 'id': '52264136', 'ext': 'mp4', 'thumbnail': r're:https?://www\.gbnews\.\w+/.+\.(?:jpe?g|png|webp)', 'display_id': 'bbc-claudine-gay-harvard-university-antisemitism-row', 'description': 'The post was criticised by former employers of the broadcaster', 'title': 'BBC deletes post after furious backlash over headline downplaying antisemitism', }, }, { 'url': 'https://www.gbnews.com/royal/prince-harry-in-love-with-kate-meghan-markle-jealous-royal', 'info_dict': { 'id': '52328390', 'ext': 'mp4', 'thumbnail': r're:https?://www\.gbnews\.\w+/.+\.(?:jpe?g|png|webp)', 'display_id': 'prince-harry-in-love-with-kate-meghan-markle-jealous-royal', 'description': 'Ingrid Seward has published 17 books documenting the highs and lows of the Royal Family', 'title': 'Royal author claims Prince Harry was \'in love\' with Kate - Meghan was \'jealous\'', }, }, { 'url': 'https://www.gbnews.uk/watchlive', 'info_dict': { 'id': '1069', 'ext': 'mp4', 'thumbnail': r're:https?://www\.gbnews\.\w+/.+\.(?:jpe?g|png|webp)', 'display_id': 'watchlive', 'live_status': 'is_live', 'title': r're:^GB News Live', }, 'params': {'skip_download': 'm3u8'}, }] _SS_ENDPOINTS = None def _get_ss_endpoint(self, data_id, data_env): if not self._SS_ENDPOINTS: self._SS_ENDPOINTS = {} if not data_id: data_id = 'GB003' if not data_env: data_env = 'production' key = data_id, data_env result = self._SS_ENDPOINTS.get(key) if result: return result json_data = self._download_json( self._SSMP_URL, None, 'Downloading Simplestream JSON metadata', query={ 'id': data_id, 'env': data_env, }) meta_url = traverse_obj(json_data, ('response', 'api_hostname', {url_or_none})) if not meta_url: raise ExtractorError('No API host found') self._SS_ENDPOINTS[key] = meta_url return meta_url def _real_extract(self, url): display_id = self._match_id(url).rpartition('/')[2] webpage = self._download_webpage(url, display_id) video_data = None elements = get_elements_html_by_class('simplestream', webpage) for html_tag in elements: attributes = extract_attributes(html_tag) if 'sidebar' not in (attributes.get('class') or ''): video_data = attributes if not video_data: raise ExtractorError('Could not find video element', expected=True) endpoint_url = self._get_ss_endpoint(video_data.get('data-id'), video_data.get('data-env')) uvid = video_data['data-uvid'] video_type = video_data.get('data-type') if not video_type or video_type == 'vod': video_type = 'show' stream_data = self._download_json( f'{endpoint_url}/api/{video_type}/stream/{uvid}', uvid, 'Downloading stream JSON', query={ 'key': video_data.get('data-key'), 'platform': self._PLATFORM, }) if traverse_obj(stream_data, 'drm'): self.report_drm(uvid) return { 'id': uvid, 'display_id': display_id, 'title': self._og_search_title(webpage, default=None), 'description': self._og_search_description(webpage, default=None), 'formats': self._extract_m3u8_formats(traverse_obj(stream_data, ( 'response', 'stream', {url_or_none})), uvid, 'mp4'), 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'is_live': video_type == 'live', }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/kinja.py
yt_dlp/extractor/kinja.py
import urllib.parse from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, strip_or_none, try_get, ) class KinjaEmbedIE(InfoExtractor): IE_NAME = 'kinja:embed' _DOMAIN_REGEX = r'''(?:[^.]+\.)? (?: avclub| clickhole| deadspin| gizmodo| jalopnik| jezebel| kinja| kotaku| lifehacker| splinternews| the(?:inventory|onion|root|takeout) )\.com''' _COMMON_REGEX = r'''/ (?: ajax/inset| embed/video )/iframe\?.*?\bid=''' _VALID_URL = rf'''(?x)https?://{_DOMAIN_REGEX}{_COMMON_REGEX} (?P<type> fb| imgur| instagram| jwp(?:layer)?-video| kinjavideo| mcp| megaphone| soundcloud(?:-playlist)?| tumblr-post| twitch-stream| twitter| ustream-channel| vimeo| vine| youtube-(?:list|video) )-(?P<id>[^&]+)''' _EMBED_REGEX = [rf'(?x)<iframe[^>]+?src=(?P<q>["\'])(?P<url>(?:(?:https?:)?//{_DOMAIN_REGEX})?{_COMMON_REGEX}(?:(?!\1).)+)\1'] _TESTS = [{ 'url': 'https://kinja.com/ajax/inset/iframe?id=fb-10103303356633621', 'only_matching': True, }, { 'url': 'https://kinja.com/ajax/inset/iframe?id=kinjavideo-100313', 'only_matching': True, }, { 'url': 'https://kinja.com/ajax/inset/iframe?id=megaphone-PPY1300931075', 'only_matching': True, }, { 'url': 'https://kinja.com/ajax/inset/iframe?id=soundcloud-128574047', 'only_matching': True, }, { 'url': 'https://kinja.com/ajax/inset/iframe?id=soundcloud-playlist-317413750', 'only_matching': True, }, { 'url': 'https://kinja.com/ajax/inset/iframe?id=tumblr-post-160130699814-daydreams-at-midnight', 'only_matching': True, }, { 'url': 'https://kinja.com/ajax/inset/iframe?id=twitch-stream-libratus_extra', 'only_matching': True, }, { 'url': 'https://kinja.com/ajax/inset/iframe?id=twitter-1068875942473404422', 'only_matching': True, }, { 'url': 'https://kinja.com/ajax/inset/iframe?id=ustream-channel-10414700', 'only_matching': True, }, { 'url': 'https://kinja.com/ajax/inset/iframe?id=vimeo-120153502', 'only_matching': True, }, { 'url': 'https://kinja.com/ajax/inset/iframe?id=vine-5BlvV5qqPrD', 'only_matching': True, }, { 'url': 'https://kinja.com/ajax/inset/iframe?id=youtube-list-BCQ3KyrPjgA/PLE6509247C270A72E', 'only_matching': True, }, { 'url': 'https://kinja.com/ajax/inset/iframe?id=youtube-video-00QyL0AgPAE', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'http://www.clickhole.com/video/dont-understand-bitcoin-man-will-mumble-explanatio-2537', 'info_dict': { 'id': '106351', 'ext': 'mp4', 'title': 'Don’t Understand Bitcoin? This Man Will Mumble An Explanation At You', }, 'skip': 'Invalid URL', }] _JWPLATFORM_PROVIDER = ('cdn.jwplayer.com/v2/media/', 'JWPlatform') _PROVIDER_MAP = { 'fb': ('facebook.com/video.php?v=', 'Facebook'), 'imgur': ('imgur.com/', 'Imgur'), 'instagram': ('instagram.com/p/', 'Instagram'), 'jwplayer-video': _JWPLATFORM_PROVIDER, 'jwp-video': _JWPLATFORM_PROVIDER, 'megaphone': ('player.megaphone.fm/', 'Generic'), 'soundcloud': ('api.soundcloud.com/tracks/', 'Soundcloud'), 'soundcloud-playlist': ('api.soundcloud.com/playlists/', 'SoundcloudPlaylist'), 'tumblr-post': ('%s.tumblr.com/post/%s', 'Tumblr'), 'twitch-stream': ('twitch.tv/', 'TwitchStream'), 'twitter': ('twitter.com/i/cards/tfw/v1/', 'TwitterCard'), 'ustream-channel': ('ustream.tv/embed/', 'Ustream'), 'vimeo': ('vimeo.com/', 'Vimeo'), 'vine': ('vine.co/v/', 'Vine'), 'youtube-list': ('youtube.com/embed/%s?list=%s', 'YoutubePlaylist'), 'youtube-video': ('youtube.com/embed/', 'Youtube'), } def _real_extract(self, url): video_type, video_id = self._match_valid_url(url).groups() provider = self._PROVIDER_MAP.get(video_type) if provider: video_id = urllib.parse.unquote(video_id) if video_type == 'tumblr-post': video_id, blog = video_id.split('-', 1) result_url = provider[0] % (blog, video_id) elif video_type == 'youtube-list': video_id, playlist_id = video_id.split('/') result_url = provider[0] % (video_id, playlist_id) else: result_url = provider[0] + video_id return self.url_result('http://' + result_url, provider[1]) if video_type == 'kinjavideo': data = self._download_json( 'https://kinja.com/api/core/video/views/videoById', video_id, query={'videoId': video_id})['data'] title = data['title'] formats = [] for k in ('signedPlaylist', 'streaming'): m3u8_url = data.get(k + 'Url') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) thumbnail = None poster = data.get('poster') or {} poster_id = poster.get('id') if poster_id: thumbnail = 'https://i.kinja-img.com/gawker-media/image/upload/{}.{}'.format(poster_id, poster.get('format') or 'jpg') return { 'id': video_id, 'title': title, 'description': strip_or_none(data.get('description')), 'formats': formats, 'tags': data.get('tags'), 'timestamp': int_or_none(try_get( data, lambda x: x['postInfo']['publishTimeMillis']), 1000), 'thumbnail': thumbnail, 'uploader': data.get('network'), } else: video_data = self._download_json( 'https://api.vmh.univision.com/metadata/v1/content/' + video_id, video_id)['videoMetadata'] iptc = video_data['photoVideoMetadataIPTC'] title = iptc['title']['en'] fmg = video_data.get('photoVideoMetadata_fmg') or {} tvss_domain = fmg.get('tvssDomain') or 'https://auth.univision.com' data = self._download_json( tvss_domain + '/api/v3/video-auth/url-signature-tokens', video_id, query={'mcpids': video_id})['data'][0] formats = [] rendition_url = data.get('renditionUrl') if rendition_url: formats = self._extract_m3u8_formats( rendition_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) fallback_rendition_url = data.get('fallbackRenditionUrl') if fallback_rendition_url: formats.append({ 'format_id': 'fallback', 'tbr': int_or_none(self._search_regex( r'_(\d+)\.mp4', fallback_rendition_url, 'bitrate', default=None)), 'url': fallback_rendition_url, }) return { 'id': video_id, 'title': title, 'thumbnail': try_get(iptc, lambda x: x['cloudinaryLink']['link'], str), 'uploader': fmg.get('network'), 'duration': int_or_none(iptc.get('fileDuration')), 'formats': formats, 'description': try_get(iptc, lambda x: x['description']['en'], str), 'timestamp': parse_iso8601(iptc.get('dateReleased')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rtve.py
yt_dlp/extractor/rtve.py
import base64 import io import struct import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, InAdvancePagedList, clean_html, determine_ext, float_or_none, int_or_none, make_archive_id, parse_iso8601, qualities, url_or_none, ) from ..utils.traversal import subs_list_to_dict, traverse_obj class RTVEBaseIE(InfoExtractor): # Reimplementation of https://js2.rtve.es/pages/app-player/3.5.1/js/pf_video.js @staticmethod def _decrypt_url(png): encrypted_data = io.BytesIO(base64.b64decode(png)[8:]) while True: length_data = encrypted_data.read(4) length = struct.unpack('!I', length_data)[0] chunk_type = encrypted_data.read(4) if chunk_type == b'IEND': break data = encrypted_data.read(length) if chunk_type == b'tEXt': data = bytes(filter(None, data)) alphabet_data, _, url_data = data.partition(b'#') quality_str, _, url_data = url_data.rpartition(b'%%') quality_str = quality_str.decode() or '' alphabet = RTVEBaseIE._get_alphabet(alphabet_data) url = RTVEBaseIE._get_url(alphabet, url_data) yield quality_str, url encrypted_data.read(4) # CRC @staticmethod def _get_url(alphabet, url_data): url = '' f = 0 e = 3 b = 1 for char in url_data.decode('iso-8859-1'): if f == 0: l = int(char) * 10 f = 1 else: if e == 0: l += int(char) url += alphabet[l] e = (b + 3) % 4 f = 0 b += 1 else: e -= 1 return url @staticmethod def _get_alphabet(alphabet_data): alphabet = [] e = 0 d = 0 for char in alphabet_data.decode('iso-8859-1'): if d == 0: alphabet.append(char) d = e = (e + 1) % 4 else: d -= 1 return alphabet def _extract_png_formats_and_subtitles(self, video_id, media_type='videos'): formats, subtitles = [], {} q = qualities(['Media', 'Alta', 'HQ', 'HD_READY', 'HD_FULL']) for manager in ('rtveplayw', 'default'): png = self._download_webpage( f'http://www.rtve.es/ztnr/movil/thumbnail/{manager}/{media_type}/{video_id}.png', video_id, 'Downloading url information', query={'q': 'v2'}, fatal=False) if not png: continue for quality, video_url in self._decrypt_url(png): ext = determine_ext(video_url) if ext == 'm3u8': fmts, subs = self._extract_m3u8_formats_and_subtitles( video_url, video_id, 'mp4', m3u8_id='hls', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) elif ext == 'mpd': fmts, subs = self._extract_mpd_formats_and_subtitles( video_url, video_id, 'dash', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) else: formats.append({ 'format_id': quality, 'quality': q(quality), 'url': video_url, }) return formats, subtitles def _parse_metadata(self, metadata): return traverse_obj(metadata, { 'title': ('title', {str.strip}), 'alt_title': ('alt', {str.strip}), 'description': ('description', {clean_html}), 'timestamp': ('dateOfEmission', {parse_iso8601(delimiter=' ')}), 'release_timestamp': ('publicationDate', {parse_iso8601(delimiter=' ')}), 'modified_timestamp': ('modificationDate', {parse_iso8601(delimiter=' ')}), 'thumbnail': (('thumbnail', 'image', 'imageSEO'), {url_or_none}, any), 'duration': ('duration', {float_or_none(scale=1000)}), 'is_live': ('live', {bool}), 'series': (('programTitle', ('programInfo', 'title')), {clean_html}, any), }) class RTVEALaCartaIE(RTVEBaseIE): IE_NAME = 'rtve.es:alacarta' IE_DESC = 'RTVE a la carta and Play' _VALID_URL = [ r'https?://(?:www\.)?rtve\.es/(?:m/)?(?:(?:alacarta|play)/videos|filmoteca)/(?!directo)(?:[^/?#]+/){2}(?P<id>\d+)', r'https?://(?:www\.)?rtve\.es/infantil/serie/[^/?#]+/video/[^/?#]+/(?P<id>\d+)', ] _TESTS = [{ 'url': 'http://www.rtve.es/alacarta/videos/la-aventura-del-saber/aventuraentornosilla/3088905/', 'md5': 'a964547824359a5753aef09d79fe984b', 'info_dict': { 'id': '3088905', 'ext': 'mp4', 'title': 'En torno a la silla', 'duration': 1216.981, 'series': 'La aventura del Saber', 'thumbnail': 'https://img2.rtve.es/v/aventuraentornosilla_3088905.png', }, }, { 'note': 'Live stream', 'url': 'http://www.rtve.es/alacarta/videos/television/24h-live/1694255/', 'info_dict': { 'id': '1694255', 'ext': 'mp4', 'title': 're:^24H LIVE [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'is_live': True, 'live_status': 'is_live', 'thumbnail': r're:https://img2\.rtve\.es/v/.*\.png', }, 'params': { 'skip_download': 'live stream', }, }, { 'url': 'http://www.rtve.es/alacarta/videos/servir-y-proteger/servir-proteger-capitulo-104/4236788/', 'md5': 'f3cf0d1902d008c48c793e736706c174', 'info_dict': { 'id': '4236788', 'ext': 'mp4', 'title': 'Episodio 104', 'duration': 3222.8, 'thumbnail': r're:https://img2\.rtve\.es/v/.*\.png', 'series': 'Servir y proteger', }, }, { 'url': 'http://www.rtve.es/m/alacarta/videos/cuentame-como-paso/cuentame-como-paso-t16-ultimo-minuto-nuestra-vida-capitulo-276/2969138/?media=tve', 'only_matching': True, }, { 'url': 'http://www.rtve.es/filmoteca/no-do/not-1-introduccion-primer-noticiario-espanol/1465256/', 'only_matching': True, }, { 'url': 'https://www.rtve.es/play/videos/saber-vivir/07-07-24/16177116/', 'md5': 'a5b24fcdfa3ff5cb7908aba53d22d4b6', 'info_dict': { 'id': '16177116', 'ext': 'mp4', 'title': 'Saber vivir - 07/07/24', 'thumbnail': r're:https://img2\.rtve\.es/v/.*\.png', 'duration': 2162.68, 'series': 'Saber vivir', }, }, { 'url': 'https://www.rtve.es/infantil/serie/agus-lui-churros-crafts/video/gusano/7048976/', 'info_dict': { 'id': '7048976', 'ext': 'mp4', 'title': 'Gusano', 'thumbnail': r're:https://img2\.rtve\.es/v/.*\.png', 'duration': 292.86, 'series': 'Agus & Lui: Churros y Crafts', '_old_archive_ids': ['rtveinfantil 7048976'], }, }] def _get_subtitles(self, video_id): subtitle_data = self._download_json( f'https://api2.rtve.es/api/videos/{video_id}/subtitulos.json', video_id, 'Downloading subtitles info') return traverse_obj(subtitle_data, ('page', 'items', ..., { 'id': ('lang', {str}), 'url': ('src', {url_or_none}), }, all, {subs_list_to_dict(lang='es')})) def _real_extract(self, url): video_id = self._match_id(url) metadata = self._download_json( f'http://www.rtve.es/api/videos/{video_id}/config/alacarta_videos.json', video_id)['page']['items'][0] if metadata['state'] == 'DESPU': raise ExtractorError('The video is no longer available', expected=True) formats, subtitles = self._extract_png_formats_and_subtitles(video_id) self._merge_subtitles(self.extract_subtitles(video_id), target=subtitles) is_infantil = urllib.parse.urlparse(url).path.startswith('/infantil/') return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **self._parse_metadata(metadata), '_old_archive_ids': [make_archive_id('rtveinfantil', video_id)] if is_infantil else None, } class RTVEAudioIE(RTVEBaseIE): IE_NAME = 'rtve.es:audio' IE_DESC = 'RTVE audio' _VALID_URL = r'https?://(?:www\.)?rtve\.es/(alacarta|play)/audios/(?:[^/?#]+/){2}(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.rtve.es/alacarta/audios/a-hombros-de-gigantes/palabra-ingeniero-codigos-informaticos-27-04-21/5889192/', 'md5': 'ae06d27bff945c4e87a50f89f6ce48ce', 'info_dict': { 'id': '5889192', 'ext': 'mp3', 'title': 'Códigos informáticos', 'alt_title': 'Códigos informáticos - Escuchar ahora', 'duration': 349.440, 'series': 'A hombros de gigantes', 'description': 'md5:72b0d7c1ca20fd327bdfff7ac0171afb', 'thumbnail': 'https://img2.rtve.es/a/palabra-ingeniero-codigos-informaticos-270421_5889192.png', }, }, { 'url': 'https://www.rtve.es/play/audios/en-radio-3/ignatius-farray/5791165/', 'md5': '072855ab89a9450e0ba314c717fa5ebc', 'info_dict': { 'id': '5791165', 'ext': 'mp3', 'title': 'Ignatius Farray', 'alt_title': 'En Radio 3 - Ignatius Farray - 13/02/21 - escuchar ahora', 'thumbnail': r're:https?://.+/1613243011863.jpg', 'duration': 3559.559, 'series': 'En Radio 3', 'description': 'md5:124aa60b461e0b1724a380bad3bc4040', }, }, { 'url': 'https://www.rtve.es/play/audios/frankenstein-o-el-moderno-prometeo/capitulo-26-ultimo-muerte-victor-juan-jose-plans-mary-shelley/6082623/', 'md5': '0eadab248cc8dd193fa5765712e84d5c', 'info_dict': { 'id': '6082623', 'ext': 'mp3', 'title': 'Capítulo 26 y último: La muerte de Victor', 'alt_title': 'Frankenstein o el moderno Prometeo - Capítulo 26 y último: La muerte de Victor', 'thumbnail': r're:https?://.+/1632147445707.jpg', 'duration': 3174.086, 'series': 'Frankenstein o el moderno Prometeo', 'description': 'md5:4ee6fcb82ebe2e46d267e1d1c1a8f7b5', }, }] def _real_extract(self, url): audio_id = self._match_id(url) metadata = self._download_json( f'https://www.rtve.es/api/audios/{audio_id}.json', audio_id)['page']['items'][0] formats, subtitles = self._extract_png_formats_and_subtitles(audio_id, media_type='audios') return { 'id': audio_id, 'formats': formats, 'subtitles': subtitles, **self._parse_metadata(metadata), } class RTVELiveIE(RTVEBaseIE): IE_NAME = 'rtve.es:live' IE_DESC = 'RTVE.es live streams' _VALID_URL = [ r'https?://(?:www\.)?rtve\.es/directo/(?P<id>[a-zA-Z0-9-]+)', r'https?://(?:www\.)?rtve\.es/play/videos/directo/[^/?#]+/(?P<id>[a-zA-Z0-9-]+)', ] _TESTS = [{ 'url': 'http://www.rtve.es/directo/la-1/', 'info_dict': { 'id': 'la-1', 'ext': 'mp4', 'live_status': 'is_live', 'title': str, 'description': str, 'thumbnail': r're:https://img\d\.rtve\.es/resources/thumbslive/\d+\.jpg', 'timestamp': int, 'upload_date': str, }, 'params': {'skip_download': 'live stream'}, }, { 'url': 'https://www.rtve.es/play/videos/directo/deportes/tdp/', 'info_dict': { 'id': 'tdp', 'ext': 'mp4', 'live_status': 'is_live', 'title': str, 'description': str, 'thumbnail': r're:https://img2\d\.rtve\.es/resources/thumbslive/\d+\.jpg', 'timestamp': int, 'upload_date': str, }, 'params': {'skip_download': 'live stream'}, }, { 'url': 'http://www.rtve.es/play/videos/directo/canales-lineales/la-1/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data_setup = self._search_json( r'<div[^>]+class="[^"]*videoPlayer[^"]*"[^>]*data-setup=\'', webpage, 'data_setup', video_id) formats, subtitles = self._extract_png_formats_and_subtitles(data_setup['idAsset']) return { 'id': video_id, **self._search_json_ld(webpage, video_id, fatal=False), 'title': self._html_extract_title(webpage), 'formats': formats, 'subtitles': subtitles, 'is_live': True, } class RTVETelevisionIE(InfoExtractor): IE_NAME = 'rtve.es:television' _VALID_URL = r'https?://(?:www\.)?rtve\.es/television/[^/?#]+/[^/?#]+/(?P<id>\d+).shtml' _TEST = { 'url': 'https://www.rtve.es/television/20091103/video-inedito-del-8o-programa/299020.shtml', 'info_dict': { 'id': '572515', 'ext': 'mp4', 'title': 'Clase inédita', 'duration': 335.817, 'thumbnail': r're:https://img2\.rtve\.es/v/.*\.png', 'series': 'El coro de la cárcel', }, 'params': { 'skip_download': True, }, } def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) play_url = self._html_search_meta('contentUrl', webpage) if play_url is None: raise ExtractorError('The webpage doesn\'t contain any video', expected=True) return self.url_result(play_url, ie=RTVEALaCartaIE.ie_key()) class RTVEProgramIE(RTVEBaseIE): IE_NAME = 'rtve.es:program' IE_DESC = 'RTVE.es programs' _VALID_URL = r'https?://(?:www\.)?rtve\.es/play/videos/(?P<id>[\w-]+)/?(?:[?#]|$)' _TESTS = [{ 'url': 'https://www.rtve.es/play/videos/saber-vivir/', 'info_dict': { 'id': '111570', 'title': 'Saber vivir - Programa de ciencia y futuro en RTVE Play', }, 'playlist_mincount': 400, }] _PAGE_SIZE = 60 def _fetch_page(self, program_id, page_num): return self._download_json( f'https://www.rtve.es/api/programas/{program_id}/videos', program_id, note=f'Downloading page {page_num}', query={ 'type': 39816, 'page': page_num, 'size': 60, }) def _entries(self, page_data): for video in traverse_obj(page_data, ('page', 'items', lambda _, v: url_or_none(v['htmlUrl']))): yield self.url_result( video['htmlUrl'], RTVEALaCartaIE, url_transparent=True, **traverse_obj(video, { 'id': ('id', {str}), 'title': ('longTitle', {str}), 'description': ('shortDescription', {str}), 'duration': ('duration', {float_or_none(scale=1000)}), 'series': (('programInfo', 'title'), {str}, any), 'season_number': ('temporadaOrden', {int_or_none}), 'season_id': ('temporadaId', {str}), 'season': ('temporada', {str}), 'episode_number': ('episode', {int_or_none}), 'episode': ('title', {str}), 'thumbnail': ('thumbnail', {url_or_none}), }), ) def _real_extract(self, url): program_slug = self._match_id(url) program_page = self._download_webpage(url, program_slug) program_id = self._html_search_meta('DC.identifier', program_page, 'Program ID', fatal=True) first_page = self._fetch_page(program_id, 1) page_count = traverse_obj(first_page, ('page', 'totalPages', {int})) or 1 entries = InAdvancePagedList( lambda idx: self._entries(self._fetch_page(program_id, idx + 1) if idx else first_page), page_count, self._PAGE_SIZE) return self.playlist_result(entries, program_id, self._html_extract_title(program_page))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/twitch.py
yt_dlp/extractor/twitch.py
import collections import itertools import json import random import re import urllib.parse from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, UserNotLive, base_url, clean_html, dict_get, float_or_none, int_or_none, join_nonempty, make_archive_id, parse_duration, parse_iso8601, parse_qs, qualities, str_or_none, try_get, unified_timestamp, update_url_query, url_or_none, urljoin, ) from ..utils.traversal import traverse_obj, value class TwitchBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:(?:www|go|m)\.)?twitch\.tv' _API_BASE = 'https://api.twitch.tv' _USHER_BASE = 'https://usher.ttvnw.net' _LOGIN_FORM_URL = 'https://www.twitch.tv/login' _LOGIN_POST_URL = 'https://passport.twitch.tv/login' _NETRC_MACHINE = 'twitch' _OPERATION_HASHES = { 'CollectionSideBar': '016e1e4ccee0eb4698eb3bf1a04dc1c077fb746c78c82bac9a8f0289658fbd1a', 'FilterableVideoTower_Videos': '67004f7881e65c297936f32c75246470629557a393788fb5a69d6d9a25a8fd5f', 'ClipsCards__User': '90c33f5e6465122fba8f9371e2a97076f9ed06c6fed3788d002ab9eba8f91d88', 'ShareClipRenderStatus': '1844261bb449fa51e6167040311da4a7a5f1c34fe71c71a3e0c4f551bc30c698', 'ChannelCollectionsContent': '5247910a19b1cd2b760939bf4cba4dcbd3d13bdf8c266decd16956f6ef814077', 'StreamMetadata': 'b57f9b910f8cd1a4659d894fe7550ccc81ec9052c01e438b290fd66a040b9b93', 'ComscoreStreamingQuery': 'e1edae8122517d013405f237ffcc124515dc6ded82480a88daef69c83b53ac01', 'VideoPreviewOverlay': '9515480dee68a77e667cb19de634739d33f243572b007e98e67184b1a5d8369f', 'VideoMetadata': '45111672eea2e507f8ba44d101a61862f9c56b11dee09a15634cb75cb9b9084d', 'VideoPlayer_ChapterSelectButtonVideo': '71835d5ef425e154bf282453a926d99b328cdc5e32f36d3a209d0f4778b41203', 'VideoPlayer_VODSeekbarPreviewVideo': '07e99e4d56c5a7c67117a154777b0baf85a5ffefa393b213f4bc712ccaf85dd6', } @property def _CLIENT_ID(self): return self._configuration_arg( 'client_id', ['ue6666qo983tsx6so1t0vnawi233wa'], ie_key='Twitch', casesense=True)[0] def _perform_login(self, username, password): def fail(message): raise ExtractorError( f'Unable to login. Twitch said: {message}', expected=True) def login_step(page, urlh, note, data): form = self._hidden_inputs(page) form.update(data) page_url = urlh.url post_url = self._search_regex( r'<form[^>]+action=(["\'])(?P<url>.+?)\1', page, 'post url', default=self._LOGIN_POST_URL, group='url') post_url = urljoin(page_url, post_url) headers = { 'Referer': page_url, 'Origin': 'https://www.twitch.tv', 'Content-Type': 'text/plain;charset=UTF-8', } response = self._download_json( post_url, None, note, data=json.dumps(form).encode(), headers=headers, expected_status=400) error = dict_get(response, ('error', 'error_description', 'error_code')) if error: fail(error) if 'Authenticated successfully' in response.get('message', ''): return None, None redirect_url = urljoin( post_url, response.get('redirect') or response['redirect_path']) return self._download_webpage_handle( redirect_url, None, 'Downloading login redirect page', headers=headers) login_page, handle = self._download_webpage_handle( self._LOGIN_FORM_URL, None, 'Downloading login page') # Some TOR nodes and public proxies are blocked completely if 'blacklist_message' in login_page: fail(clean_html(login_page)) redirect_page, handle = login_step( login_page, handle, 'Logging in', { 'username': username, 'password': password, 'client_id': self._CLIENT_ID, }) # Successful login if not redirect_page: return if re.search(r'(?i)<form[^>]+id="two-factor-submit"', redirect_page) is not None: # TODO: Add mechanism to request an SMS or phone call tfa_token = self._get_tfa_info('two-factor authentication token') login_step(redirect_page, handle, 'Submitting TFA token', { 'authy_token': tfa_token, 'remember_2fa': 'true', }) def _prefer_source(self, formats): try: source = next(f for f in formats if f['format_id'] == 'Source') source['quality'] = 10 except StopIteration: for f in formats: if '/chunked/' in f['url']: f.update({ 'quality': 10, 'format_note': 'Source', }) def _download_base_gql(self, video_id, ops, note, fatal=True): headers = { 'Content-Type': 'text/plain;charset=UTF-8', 'Client-ID': self._CLIENT_ID, } gql_auth = self._get_cookies('https://gql.twitch.tv').get('auth-token') if gql_auth: headers['Authorization'] = 'OAuth ' + gql_auth.value return self._download_json( 'https://gql.twitch.tv/gql', video_id, note, data=json.dumps(ops).encode(), headers=headers, fatal=fatal) def _download_gql(self, video_id, ops, note, fatal=True): for op in ops: op['extensions'] = { 'persistedQuery': { 'version': 1, 'sha256Hash': self._OPERATION_HASHES[op['operationName']], }, } return self._download_base_gql(video_id, ops, note) def _download_access_token(self, video_id, token_kind, param_name): method = f'{token_kind}PlaybackAccessToken' ops = { 'query': '''{ %s( %s: "%s", params: { platform: "web", playerBackend: "mediaplayer", playerType: "site" } ) { value signature } }''' % (method, param_name, video_id), # noqa: UP031 } return self._download_base_gql( video_id, ops, f'Downloading {token_kind} access token GraphQL')['data'][method] def _get_thumbnails(self, thumbnail): return [{ 'url': re.sub(r'\d+x\d+(\.\w+)($|(?=[?#]))', r'0x0\g<1>', thumbnail), 'preference': 1, }, { 'url': thumbnail, }] if thumbnail else None def _extract_twitch_m3u8_formats(self, path, video_id, token, signature, live_from_start=False): try: formats = self._extract_m3u8_formats( f'{self._USHER_BASE}/{path}/{video_id}.m3u8', video_id, 'mp4', query={ 'allow_source': 'true', 'allow_audio_only': 'true', 'allow_spectre': 'true', 'p': random.randint(1000000, 10000000), 'platform': 'web', 'player': 'twitchweb', 'supported_codecs': 'av1,h265,h264', 'playlist_include_framerate': 'true', 'sig': signature, 'token': token, }) except ExtractorError as e: if ( not isinstance(e.cause, HTTPError) or e.cause.status != 403 or e.cause.response.get_header('content-type') != 'application/json' ): raise error_info = traverse_obj(e.cause.response.read(), ({json.loads}, 0, {dict})) or {} if error_info.get('error_code') in ('vod_manifest_restricted', 'unauthorized_entitlements'): common_msg = 'access to this subscriber-only content' if self._get_cookies('https://gql.twitch.tv').get('auth-token'): raise ExtractorError(f'Your account does not have {common_msg}', expected=True) self.raise_login_required(f'You must be logged into an account that has {common_msg}') if error_msg := join_nonempty('error_code', 'error', from_dict=error_info, delim=': '): raise ExtractorError(error_msg, expected=True) raise for fmt in formats: if fmt.get('vcodec') and fmt['vcodec'].startswith('av01'): # mpegts does not yet have proper support for av1 fmt.setdefault('downloader_options', {}).update({'ffmpeg_args_out': ['-f', 'mp4']}) if live_from_start: fmt.setdefault('downloader_options', {}).update({'ffmpeg_args': ['-live_start_index', '0']}) fmt['is_from_start'] = True return formats class TwitchVodIE(TwitchBaseIE): IE_NAME = 'twitch:vod' _VALID_URL = r'''(?x) https?:// (?: (?:(?:www|go|m)\.)?twitch\.tv/(?:[^/]+/v(?:ideo)?|videos)/| player\.twitch\.tv/\?.*?\bvideo=v?| www\.twitch\.tv/[^/]+/schedule\?vodID= ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'http://www.twitch.tv/riotgames/v/6528877?t=5m10s', 'info_dict': { 'id': 'v6528877', 'ext': 'mp4', 'title': 'LCK Summer Split - Week 6 Day 1', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 17208, 'timestamp': 1435131734, 'upload_date': '20150624', 'uploader': 'Riot Games', 'uploader_id': 'riotgames', 'view_count': int, 'start_time': 310, 'chapters': [ { 'start_time': 0, 'end_time': 17208, 'title': 'League of Legends', }, ], 'live_status': 'was_live', }, 'params': { # m3u8 download 'skip_download': True, }, }, { # Untitled broadcast (title is None) 'url': 'http://www.twitch.tv/belkao_o/v/11230755', 'info_dict': { 'id': 'v11230755', 'ext': 'mp4', 'title': 'Untitled Broadcast', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1638, 'timestamp': 1439746708, 'upload_date': '20150816', 'uploader': 'BelkAO_o', 'uploader_id': 'belkao_o', 'view_count': int, }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': 'HTTP Error 404: Not Found', }, { 'url': 'http://player.twitch.tv/?t=5m10s&video=v6528877', 'only_matching': True, }, { 'url': 'https://www.twitch.tv/videos/6528877', 'only_matching': True, }, { 'url': 'https://m.twitch.tv/beagsandjam/v/247478721', 'only_matching': True, }, { 'url': 'https://www.twitch.tv/northernlion/video/291940395', 'only_matching': True, }, { 'url': 'https://player.twitch.tv/?video=480452374', 'only_matching': True, }, { 'url': 'https://www.twitch.tv/videos/635475444', 'info_dict': { 'id': 'v635475444', 'ext': 'mp4', 'title': 'Riot Games', 'duration': 11643, 'uploader': 'Riot Games', 'uploader_id': 'riotgames', 'timestamp': 1590770569, 'upload_date': '20200529', 'chapters': [ { 'start_time': 0, 'end_time': 573, 'title': 'League of Legends', }, { 'start_time': 573, 'end_time': 3922, 'title': 'Legends of Runeterra', }, { 'start_time': 3922, 'end_time': 11643, 'title': 'Art', }, ], 'live_status': 'was_live', 'thumbnail': r're:^https?://.*\.jpg$', 'view_count': int, }, 'params': { 'skip_download': True, }, }, { 'note': 'Storyboards', 'url': 'https://www.twitch.tv/videos/635475444', 'info_dict': { 'id': 'v635475444', 'format_id': 'sb0', 'ext': 'mhtml', 'title': 'Riot Games', 'duration': 11643, 'uploader': 'Riot Games', 'uploader_id': 'riotgames', 'timestamp': 1590770569, 'upload_date': '20200529', 'chapters': [ { 'start_time': 0, 'end_time': 573, 'title': 'League of Legends', }, { 'start_time': 573, 'end_time': 3922, 'title': 'Legends of Runeterra', }, { 'start_time': 3922, 'end_time': 11643, 'title': 'Art', }, ], 'live_status': 'was_live', 'thumbnail': r're:^https?://.*\.jpg$', 'view_count': int, 'columns': int, 'rows': int, }, 'params': { 'format': 'mhtml', 'skip_download': True, }, }, { 'note': 'VOD with single chapter', 'url': 'https://www.twitch.tv/videos/1536751224', 'info_dict': { 'id': 'v1536751224', 'ext': 'mp4', 'title': 'Porter Robinson Star Guardian Stream Tour with LilyPichu', 'duration': 8353, 'uploader': 'Riot Games', 'uploader_id': 'riotgames', 'timestamp': 1658267731, 'upload_date': '20220719', 'chapters': [ { 'start_time': 0, 'end_time': 8353, 'title': 'League of Legends', }, ], 'live_status': 'was_live', 'thumbnail': r're:^https?://.*\.jpg$', 'view_count': int, }, 'params': { 'skip_download': True, }, 'expected_warnings': ['Unable to download JSON metadata: HTTP Error 403: Forbidden'], }, { 'url': 'https://www.twitch.tv/tangotek/schedule?vodID=1822395420', 'only_matching': True, }] def _download_info(self, item_id): data = self._download_gql( item_id, [{ 'operationName': 'VideoMetadata', 'variables': { 'channelLogin': '', 'videoID': item_id, }, }, { 'operationName': 'VideoPlayer_ChapterSelectButtonVideo', 'variables': { 'includePrivate': False, 'videoID': item_id, }, }, { 'operationName': 'VideoPlayer_VODSeekbarPreviewVideo', 'variables': { 'includePrivate': False, 'videoID': item_id, }, }], 'Downloading stream metadata GraphQL') video = traverse_obj(data, (..., 'data', 'video'), get_all=False) if video is None: raise ExtractorError(f'Video {item_id} does not exist', expected=True) video['moments'] = traverse_obj(data, (..., 'data', 'video', 'moments', 'edges', ..., 'node')) video['storyboard'] = traverse_obj( data, (..., 'data', 'video', 'seekPreviewsURL', {url_or_none}), get_all=False) return video def _extract_info(self, info): status = info.get('status') if status == 'recording': is_live = True elif status == 'recorded': is_live = False else: is_live = None _QUALITIES = ('small', 'medium', 'large') quality_key = qualities(_QUALITIES) thumbnails = [] preview = info.get('preview') if isinstance(preview, dict): for thumbnail_id, thumbnail_url in preview.items(): thumbnail_url = url_or_none(thumbnail_url) if not thumbnail_url: continue if thumbnail_id not in _QUALITIES: continue thumbnails.append({ 'url': thumbnail_url, 'preference': quality_key(thumbnail_id), }) return { 'id': info['_id'], 'title': info.get('title') or 'Untitled Broadcast', 'description': info.get('description'), 'duration': int_or_none(info.get('length')), 'thumbnails': thumbnails, 'uploader': info.get('channel', {}).get('display_name'), 'uploader_id': info.get('channel', {}).get('name'), 'timestamp': parse_iso8601(info.get('recorded_at')), 'view_count': int_or_none(info.get('views')), 'is_live': is_live, 'was_live': True, } def _extract_chapters(self, info, item_id): if not info.get('moments'): game = traverse_obj(info, ('game', 'displayName')) if game: yield {'title': game} return for moment in info['moments']: start_time = int_or_none(moment.get('positionMilliseconds'), 1000) duration = int_or_none(moment.get('durationMilliseconds'), 1000) name = str_or_none(moment.get('description')) if start_time is None or duration is None: self.report_warning(f'Important chapter information missing for chapter {name}', item_id) continue yield { 'start_time': start_time, 'end_time': start_time + duration, 'title': name, } def _extract_info_gql(self, info, item_id): vod_id = info.get('id') or item_id # id backward compatibility for download archives if vod_id[0] != 'v': vod_id = f'v{vod_id}' thumbnail = url_or_none(info.get('previewThumbnailURL')) is_live = None if thumbnail: if re.findall(r'/404_processing_[^.?#]+\.png', thumbnail): # False positive for is_live if info.get('broadcastType') == 'HIGHLIGHT' # See https://github.com/yt-dlp/yt-dlp/issues/14455 is_live = info.get('broadcastType') == 'ARCHIVE' thumbnail = None else: is_live = False return { 'id': vod_id, 'title': info.get('title') or 'Untitled Broadcast', 'description': info.get('description'), 'duration': int_or_none(info.get('lengthSeconds')), 'thumbnails': self._get_thumbnails(thumbnail), 'uploader': try_get(info, lambda x: x['owner']['displayName'], str), 'uploader_id': try_get(info, lambda x: x['owner']['login'], str), 'timestamp': unified_timestamp(info.get('publishedAt')), 'view_count': int_or_none(info.get('viewCount')), 'chapters': list(self._extract_chapters(info, item_id)), 'is_live': is_live, 'was_live': True, } def _extract_storyboard(self, item_id, storyboard_json_url, duration): if not duration or not storyboard_json_url: return spec = self._download_json(storyboard_json_url, item_id, 'Downloading storyboard metadata JSON', fatal=False) or [] # sort from highest quality to lowest # This makes sb0 the highest-quality format, sb1 - lower, etc which is consistent with youtube sb ordering spec.sort(key=lambda x: int_or_none(x.get('width')) or 0, reverse=True) base = base_url(storyboard_json_url) for i, s in enumerate(spec): count = int_or_none(s.get('count')) images = s.get('images') if not (images and count): continue fragment_duration = duration / len(images) yield { 'format_id': f'sb{i}', 'format_note': 'storyboard', 'ext': 'mhtml', 'protocol': 'mhtml', 'acodec': 'none', 'vcodec': 'none', 'url': urljoin(base, images[0]), 'width': int_or_none(s.get('width')), 'height': int_or_none(s.get('height')), 'fps': count / duration, 'rows': int_or_none(s.get('rows')), 'columns': int_or_none(s.get('cols')), 'fragments': [{ 'url': urljoin(base, path), 'duration': fragment_duration, } for path in images], } def _real_extract(self, url): vod_id = self._match_id(url) video = self._download_info(vod_id) info = self._extract_info_gql(video, vod_id) access_token = self._download_access_token(vod_id, 'video', 'id') formats = self._extract_twitch_m3u8_formats( 'vod', vod_id, access_token['value'], access_token['signature'], live_from_start=self.get_param('live_from_start')) formats.extend(self._extract_storyboard(vod_id, video.get('storyboard'), info.get('duration'))) self._prefer_source(formats) info['formats'] = formats parsed_url = urllib.parse.urlparse(url) query = urllib.parse.parse_qs(parsed_url.query) if 't' in query: info['start_time'] = parse_duration(query['t'][0]) if info.get('timestamp') is not None: info['subtitles'] = { 'rechat': [{ 'url': update_url_query( f'https://api.twitch.tv/v5/videos/{vod_id}/comments', { 'client_id': self._CLIENT_ID, }), 'ext': 'json', }], } return info def _make_video_result(node): assert isinstance(node, dict) video_id = node.get('id') if not video_id: return return { '_type': 'url_transparent', 'ie_key': TwitchVodIE.ie_key(), 'id': 'v' + video_id, 'url': f'https://www.twitch.tv/videos/{video_id}', 'title': node.get('title'), 'thumbnail': node.get('previewThumbnailURL'), 'duration': float_or_none(node.get('lengthSeconds')), 'view_count': int_or_none(node.get('viewCount')), } class TwitchCollectionIE(TwitchBaseIE): IE_NAME = 'twitch:collection' _VALID_URL = r'https?://(?:(?:www|go|m)\.)?twitch\.tv/collections/(?P<id>[^/]+)' _TESTS = [{ 'url': 'https://www.twitch.tv/collections/o9zZer3IQBhTJw', 'info_dict': { 'id': 'o9zZer3IQBhTJw', 'title': 'Playthrough Archives', }, 'playlist_mincount': 21, }] _OPERATION_NAME = 'CollectionSideBar' def _real_extract(self, url): collection_id = self._match_id(url) collection = self._download_gql( collection_id, [{ 'operationName': self._OPERATION_NAME, 'variables': {'collectionID': collection_id}, }], 'Downloading collection GraphQL')[0]['data']['collection'] title = collection.get('title') entries = [] for edge in collection['items']['edges']: if not isinstance(edge, dict): continue node = edge.get('node') if not isinstance(node, dict): continue video = _make_video_result(node) if video: entries.append(video) return self.playlist_result( entries, playlist_id=collection_id, playlist_title=title) class TwitchPlaylistBaseIE(TwitchBaseIE): _PAGE_LIMIT = 100 def _entries(self, channel_name, *args): """ Subclasses must define _make_variables() and _extract_entry(), as well as set _OPERATION_NAME, _ENTRY_KIND, _EDGE_KIND, and _NODE_KIND """ cursor = None variables_common = self._make_variables(channel_name, *args) entries_key = f'{self._ENTRY_KIND}s' for page_num in itertools.count(1): variables = variables_common.copy() variables['limit'] = self._PAGE_LIMIT if cursor: variables['cursor'] = cursor page = self._download_gql( channel_name, [{ 'operationName': self._OPERATION_NAME, 'variables': variables, }], f'Downloading {self._NODE_KIND}s GraphQL page {page_num}', fatal=False) # Avoid extracting random/unrelated entries when channel_name doesn't exist # See https://github.com/yt-dlp/yt-dlp/issues/15450 if traverse_obj(page, (0, 'data', 'user', 'id', {str})) == '': raise ExtractorError(f'Channel "{channel_name}" not found', expected=True) if not page: break edges = try_get( page, lambda x: x[0]['data']['user'][entries_key]['edges'], list) if not edges: break for edge in edges: if not isinstance(edge, dict): continue if edge.get('__typename') != self._EDGE_KIND: continue node = edge.get('node') if not isinstance(node, dict): continue if node.get('__typename') != self._NODE_KIND: continue entry = self._extract_entry(node) if entry: cursor = edge.get('cursor') yield entry if not cursor or not isinstance(cursor, str): break class TwitchVideosBaseIE(TwitchPlaylistBaseIE): _OPERATION_NAME = 'FilterableVideoTower_Videos' _ENTRY_KIND = 'video' _EDGE_KIND = 'VideoEdge' _NODE_KIND = 'Video' @staticmethod def _make_variables(channel_name, broadcast_type, sort): return { 'channelOwnerLogin': channel_name, 'broadcastType': broadcast_type, 'videoSort': sort.upper(), } class TwitchVideosIE(TwitchVideosBaseIE): IE_NAME = 'twitch:videos' _VALID_URL = r'https?://(?:(?:www|go|m)\.)?twitch\.tv/(?P<id>[^/]+)/(?:videos|profile)' _TESTS = [{ # All Videos sorted by Date 'url': 'https://www.twitch.tv/spamfish/videos?filter=all', 'info_dict': { 'id': 'spamfish', 'title': 'spamfish - All Videos sorted by Date', }, 'playlist_mincount': 751, }, { # All Videos sorted by Popular 'url': 'https://www.twitch.tv/spamfish/videos?filter=all&sort=views', 'info_dict': { 'id': 'spamfish', 'title': 'spamfish - All Videos sorted by Popular', }, 'playlist_mincount': 754, }, { # TODO: Investigate why we get 0 entries # Past Broadcasts sorted by Date 'url': 'https://www.twitch.tv/spamfish/videos?filter=archives', 'info_dict': { 'id': 'spamfish', 'title': 'spamfish - Past Broadcasts sorted by Date', }, 'playlist_mincount': 27, }, { # Highlights sorted by Date 'url': 'https://www.twitch.tv/spamfish/videos?filter=highlights', 'info_dict': { 'id': 'spamfish', 'title': 'spamfish - Highlights sorted by Date', }, 'playlist_mincount': 751, }, { # TODO: Investigate why we get 0 entries # Uploads sorted by Date 'url': 'https://www.twitch.tv/esl_csgo/videos?filter=uploads&sort=time', 'info_dict': { 'id': 'esl_csgo', 'title': 'esl_csgo - Uploads sorted by Date', }, 'playlist_mincount': 5, }, { # TODO: Investigate why we get 0 entries # Past Premieres sorted by Date 'url': 'https://www.twitch.tv/spamfish/videos?filter=past_premieres', 'info_dict': { 'id': 'spamfish', 'title': 'spamfish - Past Premieres sorted by Date', }, 'playlist_mincount': 1, }, { 'url': 'https://www.twitch.tv/spamfish/videos/all', 'only_matching': True, }, { 'url': 'https://m.twitch.tv/spamfish/videos/all', 'only_matching': True, }, { 'url': 'https://www.twitch.tv/spamfish/videos', 'only_matching': True, }] Broadcast = collections.namedtuple('Broadcast', ['type', 'label']) _DEFAULT_BROADCAST = Broadcast(None, 'All Videos') _BROADCASTS = { 'archives': Broadcast('ARCHIVE', 'Past Broadcasts'), 'highlights': Broadcast('HIGHLIGHT', 'Highlights'), 'uploads': Broadcast('UPLOAD', 'Uploads'), 'past_premieres': Broadcast('PAST_PREMIERE', 'Past Premieres'), 'all': _DEFAULT_BROADCAST, } _DEFAULT_SORTED_BY = 'Date' _SORTED_BY = { 'time': _DEFAULT_SORTED_BY, 'views': 'Popular', } @classmethod def suitable(cls, url): return (False if any(ie.suitable(url) for ie in ( TwitchVideosClipsIE, TwitchVideosCollectionsIE)) else super().suitable(url)) @staticmethod def _extract_entry(node): return _make_video_result(node) def _real_extract(self, url): channel_name = self._match_id(url) qs = parse_qs(url) video_filter = qs.get('filter', ['all'])[0] sort = qs.get('sort', ['time'])[0] broadcast = self._BROADCASTS.get(video_filter, self._DEFAULT_BROADCAST) return self.playlist_result( self._entries(channel_name, broadcast.type, sort), playlist_id=channel_name, playlist_title=( f'{channel_name} - {broadcast.label} ' f'sorted by {self._SORTED_BY.get(sort, self._DEFAULT_SORTED_BY)}')) class TwitchVideosClipsIE(TwitchPlaylistBaseIE): IE_NAME = 'twitch:videos:clips' _VALID_URL = r'https?://(?:(?:www|go|m)\.)?twitch\.tv/(?P<id>[^/]+)/(?:clips|videos/*?\?.*?\bfilter=clips)' _TESTS = [{ # Clips 'url': 'https://www.twitch.tv/vanillatv/clips?filter=clips&range=all', 'info_dict': { 'id': 'vanillatv', 'title': 'vanillatv - Clips Top All', }, 'playlist_mincount': 1, }, { 'url': 'https://www.twitch.tv/dota2ruhub/videos?filter=clips&range=7d', 'only_matching': True, }] Clip = collections.namedtuple('Clip', ['filter', 'label']) _DEFAULT_CLIP = Clip('LAST_WEEK', 'Top 7D') _RANGE = { '24hr': Clip('LAST_DAY', 'Top 24H'), '7d': _DEFAULT_CLIP, '30d': Clip('LAST_MONTH', 'Top 30D'), 'all': Clip('ALL_TIME', 'Top All'), } # NB: values other than 20 result in skipped videos _PAGE_LIMIT = 20 _OPERATION_NAME = 'ClipsCards__User' _ENTRY_KIND = 'clip' _EDGE_KIND = 'ClipEdge' _NODE_KIND = 'Clip' @staticmethod def _make_variables(channel_name, channel_filter): return { 'login': channel_name, 'criteria': { 'filter': channel_filter, }, } @staticmethod def _extract_entry(node): assert isinstance(node, dict) clip_url = url_or_none(node.get('url')) if not clip_url: return return { '_type': 'url_transparent', 'ie_key': TwitchClipsIE.ie_key(), 'id': node.get('id'), 'url': clip_url, 'title': node.get('title'), 'thumbnail': node.get('thumbnailURL'), 'duration': float_or_none(node.get('durationSeconds')), 'timestamp': unified_timestamp(node.get('createdAt')), 'view_count': int_or_none(node.get('viewCount')),
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nerdcubed.py
yt_dlp/extractor/nerdcubed.py
from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import parse_iso8601, url_or_none from ..utils.traversal import traverse_obj class NerdCubedFeedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?nerdcubed\.co\.uk/?(?:$|[#?])' _TEST = { 'url': 'http://www.nerdcubed.co.uk/', 'info_dict': { 'id': 'nerdcubed-feed', 'title': 'nerdcubed.co.uk feed', }, 'playlist_mincount': 5500, } def _extract_video(self, feed_entry): return self.url_result( f'https://www.youtube.com/watch?v={feed_entry["id"]}', YoutubeIE, **traverse_obj(feed_entry, { 'id': ('id', {str}), 'title': ('title', {str}), 'description': ('description', {str}), 'timestamp': ('publishedAt', {parse_iso8601}), 'channel': ('source', 'name', {str}), 'channel_id': ('source', 'id', {str}), 'channel_url': ('source', 'url', {str}), 'thumbnail': ('thumbnail', 'source', {url_or_none}), }), url_transparent=True) def _real_extract(self, url): video_id = 'nerdcubed-feed' feed = self._download_json('https://www.nerdcubed.co.uk/_/cdn/videos.json', video_id) return self.playlist_result( map(self._extract_video, traverse_obj(feed, ('videos', lambda _, v: v['id']))), video_id, 'nerdcubed.co.uk feed')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cjsw.py
yt_dlp/extractor/cjsw.py
from .common import InfoExtractor from ..utils import ( determine_ext, unescapeHTML, ) class CJSWIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?cjsw\.com/program/(?P<program>[^/]+)/episode/(?P<id>\d+)' _TESTS = [{ 'url': 'http://cjsw.com/program/freshly-squeezed/episode/20170620', 'md5': 'cee14d40f1e9433632c56e3d14977120', 'info_dict': { 'id': '91d9f016-a2e7-46c5-8dcb-7cbcd7437c41', 'ext': 'mp3', 'title': 'Freshly Squeezed – Episode June 20, 2017', 'description': 'md5:c967d63366c3898a80d0c7b0ff337202', 'series': 'Freshly Squeezed', 'episode_id': '20170620', }, }, { # no description 'url': 'http://cjsw.com/program/road-pops/episode/20170707/', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) program, episode_id = mobj.group('program', 'id') audio_id = f'{program}/{episode_id}' webpage = self._download_webpage(url, episode_id) title = unescapeHTML(self._search_regex( (r'<h1[^>]+class=["\']episode-header__title["\'][^>]*>(?P<title>[^<]+)', r'data-audio-title=(["\'])(?P<title>(?:(?!\1).)+)\1'), webpage, 'title', group='title')) audio_url = self._search_regex( r'<button[^>]+data-audio-src=(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'audio url', group='url') audio_id = self._search_regex( r'/([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})\.mp3', audio_url, 'audio id', default=audio_id) formats = [{ 'url': audio_url, 'ext': determine_ext(audio_url, 'mp3'), 'vcodec': 'none', }] description = self._html_search_regex( r'<p>(?P<description>.+?)</p>', webpage, 'description', default=None) series = self._search_regex( r'data-showname=(["\'])(?P<name>(?:(?!\1).)+)\1', webpage, 'series', default=program, group='name') return { 'id': audio_id, 'title': title, 'description': description, 'formats': formats, 'series': series, 'episode_id': episode_id, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/toongoggles.py
yt_dlp/extractor/toongoggles.py
from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, ) class ToonGogglesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?toongoggles\.com/shows/(?P<show_id>\d+)(?:/[^/]+/episodes/(?P<episode_id>\d+))?' _TESTS = [{ 'url': 'http://www.toongoggles.com/shows/217143/bernard-season-2/episodes/217147/football', 'md5': '18289fc2b951eff6b953a9d8f01e6831', 'info_dict': { 'id': '217147', 'ext': 'mp4', 'title': 'Football', 'uploader_id': '1', 'description': 'Bernard decides to play football in order to be better than Lloyd and tries to beat him no matter how, he even cheats.', 'upload_date': '20160718', 'timestamp': 1468879330, }, }, { 'url': 'http://www.toongoggles.com/shows/227759/om-nom-stories-around-the-world', 'info_dict': { 'id': '227759', 'title': 'Om Nom Stories Around The World', }, 'playlist_mincount': 11, }] def _call_api(self, action, page_id, query): query.update({ 'for_ng': 1, 'for_web': 1, 'show_meta': 1, 'version': 7.0, }) return self._download_json('http://api.toongoggles.com/' + action, page_id, query=query) def _parse_episode_data(self, episode_data): title = episode_data['episode_name'] return { '_type': 'url_transparent', 'id': episode_data['episode_id'], 'title': title, 'url': 'kaltura:513551:' + episode_data['entry_id'], 'thumbnail': episode_data.get('thumbnail_url'), 'description': episode_data.get('description'), 'duration': parse_duration(episode_data.get('hms')), 'series': episode_data.get('show_name'), 'season_number': int_or_none(episode_data.get('season_num')), 'episode_id': episode_data.get('episode_id'), 'episode': title, 'episode_number': int_or_none(episode_data.get('episode_num')), 'categories': episode_data.get('categories'), 'ie_key': 'Kaltura', } def _real_extract(self, url): show_id, episode_id = self._match_valid_url(url).groups() if episode_id: episode_data = self._call_api('search', episode_id, { 'filter': 'episode', 'id': episode_id, })['objects'][0] return self._parse_episode_data(episode_data) else: show_data = self._call_api('getepisodesbyshow', show_id, { 'max': 1000000000, 'showid': show_id, }) entries = [] for episode_data in show_data.get('objects', []): entries.append(self._parse_episode_data(episode_data)) return self.playlist_result(entries, show_id, show_data.get('show_name'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ninegag.py
yt_dlp/extractor/ninegag.py
from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, traverse_obj, unescapeHTML, url_or_none, ) class NineGagIE(InfoExtractor): IE_NAME = '9gag' IE_DESC = '9GAG' _VALID_URL = r'https?://(?:www\.)?9gag\.com/gag/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://9gag.com/gag/ae5Ag7B', 'info_dict': { 'id': 'ae5Ag7B', 'ext': 'webm', 'title': 'Capybara Agility Training', 'upload_date': '20191108', 'timestamp': 1573237208, 'thumbnail': 'https://img-9gag-fun.9cache.com/photo/ae5Ag7B_460s.jpg', 'categories': ['Awesome'], 'tags': ['Awesome'], 'duration': 44, 'like_count': int, 'dislike_count': int, 'comment_count': int, }, }, { # HTML escaped title 'url': 'https://9gag.com/gag/av5nvyb', 'only_matching': True, }, { # Non Anonymous Uploader 'url': 'https://9gag.com/gag/ajgp66G', 'info_dict': { 'id': 'ajgp66G', 'ext': 'webm', 'title': 'Master Shifu! Or Splinter! You decide:', 'upload_date': '20220806', 'timestamp': 1659803411, 'thumbnail': 'https://img-9gag-fun.9cache.com/photo/ajgp66G_460s.jpg', 'categories': ['Funny'], 'tags': ['Funny'], 'duration': 26, 'like_count': int, 'dislike_count': int, 'comment_count': int, 'uploader': 'Peter Klaus', 'uploader_id': 'peterklaus12', 'uploader_url': 'https://9gag.com/u/peterklaus12', }, }] def _real_extract(self, url): post_id = self._match_id(url) post = self._download_json( 'https://9gag.com/v1/post', post_id, query={ 'id': post_id, }, impersonate=True)['data']['post'] if post.get('type') != 'Animated': self.raise_no_formats( 'The given url does not contain a video', expected=True) duration = None formats = [] thumbnails = [] for key, image in (post.get('images') or {}).items(): image_url = url_or_none(image.get('url')) if not image_url: continue ext = determine_ext(image_url) image_id = key.strip('image') common = { 'url': image_url, 'width': int_or_none(image.get('width')), 'height': int_or_none(image.get('height')), } if ext in ('jpg', 'png'): webp_url = image.get('webpUrl') if webp_url: t = common.copy() t.update({ 'id': image_id + '-webp', 'url': webp_url, }) thumbnails.append(t) common.update({ 'id': image_id, 'ext': ext, }) thumbnails.append(common) elif ext in ('webm', 'mp4'): if not duration: duration = int_or_none(image.get('duration')) common['acodec'] = 'none' if image.get('hasAudio') == 0 else None for vcodec in ('vp8', 'vp9', 'h265'): c_url = image.get(vcodec + 'Url') if not c_url: continue c_f = common.copy() c_f.update({ 'format_id': image_id + '-' + vcodec, 'url': c_url, 'vcodec': vcodec, }) formats.append(c_f) common.update({ 'ext': ext, 'format_id': image_id, }) formats.append(common) section = traverse_obj(post, ('postSection', 'name')) tags = None post_tags = post.get('tags') if post_tags: tags = [] for tag in post_tags: tag_key = tag.get('key') if not tag_key: continue tags.append(tag_key) return { 'id': post_id, 'title': unescapeHTML(post.get('title')), 'timestamp': int_or_none(post.get('creationTs')), 'duration': duration, 'uploader': traverse_obj(post, ('creator', 'fullName')), 'uploader_id': traverse_obj(post, ('creator', 'username')), 'uploader_url': url_or_none(traverse_obj(post, ('creator', 'profileUrl'))), 'formats': formats, 'thumbnails': thumbnails, 'like_count': int_or_none(post.get('upVoteCount')), 'dislike_count': int_or_none(post.get('downVoteCount')), 'comment_count': int_or_none(post.get('commentsCount')), 'age_limit': 18 if post.get('nsfw') == 1 else None, 'categories': [section] if section else None, 'tags': tags, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tarangplus.py
yt_dlp/extractor/tarangplus.py
import base64 import binascii import functools import re from .common import InfoExtractor from ..dependencies import Cryptodome from ..utils import ( ExtractorError, OnDemandPagedList, clean_html, extract_attributes, urljoin, ) from ..utils.traversal import ( find_element, find_elements, require, traverse_obj, ) class TarangPlusBaseIE(InfoExtractor): _BASE_URL = 'https://tarangplus.in' class TarangPlusVideoIE(TarangPlusBaseIE): IE_NAME = 'tarangplus:video' _VALID_URL = r'https?://(?:www\.)?tarangplus\.in/(?:movies|[^#?/]+/[^#?/]+)/(?!episodes)(?P<id>[^#?/]+)' _TESTS = [{ 'url': 'https://tarangplus.in/tarangaplus-originals/khitpit/khitpit-ep-10', 'md5': '78ce056cee755687b8a48199909ecf53', 'info_dict': { 'id': '67b8206719521d054c0059b7', 'display_id': 'khitpit-ep-10', 'ext': 'mp4', 'title': 'Khitpit Ep-10', 'description': 'md5:a45b805cb628e15c853d78b0406eab48', 'thumbnail': r're:https?://.*\.jpg', 'duration': 756.0, 'timestamp': 1740355200, 'upload_date': '20250224', 'media_type': 'episode', 'categories': ['Originals'], }, }, { 'url': 'https://tarangplus.in/tarang-serials/bada-bohu/bada-bohu-ep-233', 'md5': 'b4f9beb15172559bb362203b4f48382e', 'info_dict': { 'id': '680b9d6c19521d054c007782', 'display_id': 'bada-bohu-ep-233', 'ext': 'mp4', 'title': 'Bada Bohu | Ep -233', 'description': 'md5:e6b8e7edc9e60b92c1b390f8789ecd69', 'thumbnail': r're:https?://.*\.jpg', 'duration': 1392.0, 'timestamp': 1745539200, 'upload_date': '20250425', 'media_type': 'episode', 'categories': ['Prime'], }, }, { 'url': 'https://tarangplus.in/short/ai-maa/ai-maa', 'only_matching': True, }, { 'url': 'https://tarangplus.in/shows/tarang-cine-utsav-2024/tarang-cine-utsav-2024-seg-1', 'only_matching': True, }, { 'url': 'https://tarangplus.in/music-videos/chori-chori-bohu-chori-songs/nijara-laguchu-dhire-dhire', 'only_matching': True, }, { 'url': 'https://tarangplus.in/kids-shows/chhota-jaga/chhota-jaga-ep-33-jamidar-ra-khajana-adaya', 'only_matching': True, }, { 'url': 'https://tarangplus.in/movies/swayambara', 'only_matching': True, }] def decrypt(self, data, key): if not Cryptodome.AES: raise ExtractorError('pycryptodomex not found. Please install', expected=True) iv = binascii.unhexlify('00000000000000000000000000000000') cipher = Cryptodome.AES.new(base64.b64decode(key), Cryptodome.AES.MODE_CBC, iv) return cipher.decrypt(base64.b64decode(data)).decode('utf-8') def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) hidden_inputs_data = self._hidden_inputs(webpage) json_ld_data = self._search_json_ld(webpage, display_id) json_ld_data.pop('url', None) iframe_url = traverse_obj(webpage, ( {find_element(tag='iframe', attr='src', value=r'.+[?&]contenturl=.+', html=True, regex=True)}, {extract_attributes}, 'src', {require('iframe URL')})) # Can't use parse_qs here since it would decode the encrypted base64 `+` chars to spaces content = self._search_regex(r'[?&]contenturl=(.+)', iframe_url, 'content') encrypted_data, _, attrs = content.partition('|') metadata = { m.group('k'): m.group('v') for m in re.finditer(r'(?:^|\|)(?P<k>[a-z_]+)=(?P<v>(?:(?!\|[a-z_]+=).)+)', attrs) } m3u8_url = self.decrypt(encrypted_data, metadata['key']) return { 'id': display_id, # Fallback 'display_id': display_id, **json_ld_data, **traverse_obj(metadata, { 'id': ('content_id', {str}), 'title': ('title', {str}), 'thumbnail': ('image', {str}), }), **traverse_obj(hidden_inputs_data, { 'id': ('content_id', {str}), 'media_type': ('theme_type', {str}), 'categories': ('genre', {str}, filter, all, filter), }), 'formats': self._extract_m3u8_formats(m3u8_url, display_id), } class TarangPlusEpisodesIE(TarangPlusBaseIE): IE_NAME = 'tarangplus:episodes' _VALID_URL = r'https?://(?:www\.)?tarangplus\.in/(?P<type>[^#?/]+)/(?P<id>[^#?/]+)/episodes/?(?:$|[?#])' _TESTS = [{ 'url': 'https://tarangplus.in/tarangaplus-originals/balijatra/episodes', 'info_dict': { 'id': 'balijatra', 'title': 'Balijatra', }, 'playlist_mincount': 7, }, { 'url': 'https://tarangplus.in/tarang-serials/bada-bohu/episodes', 'info_dict': { 'id': 'bada-bohu', 'title': 'Bada Bohu', }, 'playlist_mincount': 236, }, { 'url': 'https://tarangplus.in/shows/dr-nonsense/episodes', 'info_dict': { 'id': 'dr-nonsense', 'title': 'Dr. Nonsense', }, 'playlist_mincount': 15, }] _PAGE_SIZE = 20 def _entries(self, playlist_url, playlist_id, page): data = self._download_json( playlist_url, playlist_id, f'Downloading playlist JSON page {page + 1}', query={'page_no': page}) for item in traverse_obj(data, ('items', ..., {str})): yield self.url_result( urljoin(self._BASE_URL, item.split('$')[3]), TarangPlusVideoIE) def _real_extract(self, url): url_type, display_id = self._match_valid_url(url).group('type', 'id') series_url = f'{self._BASE_URL}/{url_type}/{display_id}' webpage = self._download_webpage(series_url, display_id) entries = OnDemandPagedList( functools.partial(self._entries, f'{series_url}/episodes', display_id), self._PAGE_SIZE) return self.playlist_result( entries, display_id, self._hidden_inputs(webpage).get('title')) class TarangPlusPlaylistIE(TarangPlusBaseIE): IE_NAME = 'tarangplus:playlist' _VALID_URL = r'https?://(?:www\.)?tarangplus\.in/(?P<id>[^#?/]+)/all/?(?:$|[?#])' _TESTS = [{ 'url': 'https://tarangplus.in/chhota-jaga/all', 'info_dict': { 'id': 'chhota-jaga', 'title': 'Chhota Jaga', }, 'playlist_mincount': 33, }, { 'url': 'https://tarangplus.in/kids-yali-show/all', 'info_dict': { 'id': 'kids-yali-show', 'title': 'Yali', }, 'playlist_mincount': 10, }, { 'url': 'https://tarangplus.in/trailer/all', 'info_dict': { 'id': 'trailer', 'title': 'Trailer', }, 'playlist_mincount': 57, }, { 'url': 'https://tarangplus.in/latest-songs/all', 'info_dict': { 'id': 'latest-songs', 'title': 'Latest Songs', }, 'playlist_mincount': 46, }, { 'url': 'https://tarangplus.in/premium-serials-episodes/all', 'info_dict': { 'id': 'premium-serials-episodes', 'title': 'Primetime Latest Episodes', }, 'playlist_mincount': 100, }] def _entries(self, webpage): for url_path in traverse_obj(webpage, ( {find_elements(cls='item')}, ..., {find_elements(tag='a', attr='href', value='/.+', html=True, regex=True)}, ..., {extract_attributes}, 'href', )): yield self.url_result(urljoin(self._BASE_URL, url_path), TarangPlusVideoIE) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) return self.playlist_result( self._entries(webpage), display_id, traverse_obj(webpage, ({find_element(id='al_title')}, {clean_html})))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tvw.py
yt_dlp/extractor/tvw.py
import json from .common import InfoExtractor from ..utils import ( clean_html, extract_attributes, parse_qs, remove_end, require, unified_timestamp, url_or_none, ) from ..utils.traversal import find_element, find_elements, traverse_obj class TvwIE(InfoExtractor): IE_NAME = 'tvw' _VALID_URL = [ r'https?://(?:www\.)?tvw\.org/video/(?P<id>[^/?#]+)', r'https?://(?:www\.)?tvw\.org/watch/?\?(?:[^#]+&)?eventID=(?P<id>\d+)', ] _TESTS = [{ 'url': 'https://tvw.org/video/billy-frank-jr-statue-maquette-unveiling-ceremony-2024011211/', 'md5': '9ceb94fe2bb7fd726f74f16356825703', 'info_dict': { 'id': '2024011211', 'ext': 'mp4', 'title': 'Billy Frank Jr. Statue Maquette Unveiling Ceremony', 'thumbnail': r're:^https?://.*\.(?:jpe?g|png)$', 'description': 'md5:58a8150017d985b4f377e11ee8f6f36e', 'timestamp': 1704902400, 'upload_date': '20240110', 'location': 'Legislative Building', 'display_id': 'billy-frank-jr-statue-maquette-unveiling-ceremony-2024011211', 'categories': ['General Interest'], }, }, { 'url': 'https://tvw.org/video/ebeys-landing-state-park-2024081007/', 'md5': '71e87dae3deafd65d75ff3137b9a32fc', 'info_dict': { 'id': '2024081007', 'ext': 'mp4', 'title': 'Ebey\'s Landing State Park', 'thumbnail': r're:^https?://.*\.(?:jpe?g|png)$', 'description': 'md5:50c5bd73bde32fa6286a008dbc853386', 'timestamp': 1724310900, 'upload_date': '20240822', 'location': 'Ebey’s Landing State Park', 'display_id': 'ebeys-landing-state-park-2024081007', 'categories': ['Washington State Parks'], }, }, { 'url': 'https://tvw.org/video/home-warranties-workgroup-2', 'md5': 'f678789bf94d07da89809f213cf37150', 'info_dict': { 'id': '1999121000', 'ext': 'mp4', 'title': 'Home Warranties Workgroup', 'thumbnail': r're:^https?://.*\.(?:jpe?g|png)$', 'description': 'md5:861396cc523c9641d0dce690bc5c35f3', 'timestamp': 946389600, 'upload_date': '19991228', 'display_id': 'home-warranties-workgroup-2', 'categories': ['Legislative'], }, }, { 'url': 'https://tvw.org/video/washington-to-washington-a-new-space-race-2022041111/?eventID=2022041111', 'md5': '6f5551090b351aba10c0d08a881b4f30', 'info_dict': { 'id': '2022041111', 'ext': 'mp4', 'title': 'Washington to Washington - A New Space Race', 'thumbnail': r're:^https?://.*\.(?:jpe?g|png)$', 'description': 'md5:f65a24eec56107afbcebb3aa5cd26341', 'timestamp': 1650394800, 'upload_date': '20220419', 'location': 'Hayner Media Center', 'display_id': 'washington-to-washington-a-new-space-race-2022041111', 'categories': ['Washington to Washington', 'General Interest'], }, }, { 'url': 'https://tvw.org/watch?eventID=2025041235', 'md5': '7d697c02f110b37d6a47622ea608ca90', 'info_dict': { 'id': '2025041235', 'ext': 'mp4', 'title': 'Legislative Review - Medicaid Postpartum Bill Sparks Debate & Senate Approves Automatic Voter Registration', 'thumbnail': r're:^https?://.*\.(?:jpe?g|png)$', 'description': 'md5:37d0f3a9187ae520aac261b3959eaee6', 'timestamp': 1745006400, 'upload_date': '20250418', 'location': 'Hayner Media Center', 'categories': ['Legislative Review'], }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) client_id = self._html_search_meta('clientID', webpage, fatal=True) video_id = self._html_search_meta('eventID', webpage, fatal=True) video_data = self._download_json( 'https://api.v3.invintus.com/v2/Event/getDetailed', video_id, headers={ 'authorization': 'embedder', 'wsc-api-key': '7WhiEBzijpritypp8bqcU7pfU9uicDR', }, data=json.dumps({ 'clientID': client_id, 'eventID': video_id, 'showStreams': True, }).encode())['data'] formats = [] subtitles = {} for stream_url in traverse_obj(video_data, ('streamingURIs', ..., {url_or_none})): fmts, subs = self._extract_m3u8_formats_and_subtitles( stream_url, video_id, 'mp4', m3u8_id='hls', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) if caption_url := traverse_obj(video_data, ('captionPath', {url_or_none})): subtitles.setdefault('en', []).append({'url': caption_url, 'ext': 'vtt'}) return { 'id': video_id, 'display_id': display_id, 'formats': formats, 'subtitles': subtitles, 'title': remove_end(self._og_search_title(webpage, default=None), ' - TVW'), 'description': self._og_search_description(webpage, default=None), **traverse_obj(video_data, { 'title': ('title', {str}), 'description': ('description', {clean_html}), 'categories': ('categories', ..., {str}), 'thumbnail': ('videoThumbnail', {url_or_none}), 'timestamp': ('startDateTime', {unified_timestamp}), 'location': ('locationName', {str}), 'is_live': ('eventStatus', {lambda x: x == 'live'}), }), } class TvwNewsIE(InfoExtractor): IE_NAME = 'tvw:news' _VALID_URL = r'https?://(?:www\.)?tvw\.org/\d{4}/\d{2}/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://tvw.org/2024/01/the-impact-issues-to-watch-in-the-2024-legislative-session/', 'info_dict': { 'id': 'the-impact-issues-to-watch-in-the-2024-legislative-session', 'title': 'The Impact - Issues to Watch in the 2024 Legislative Session', 'description': 'md5:65f0b33ec8f18ff1cd401c5547aa5441', }, 'playlist_count': 6, }, { 'url': 'https://tvw.org/2024/06/the-impact-water-rights-and-the-skookumchuck-dam-debate/', 'info_dict': { 'id': 'the-impact-water-rights-and-the-skookumchuck-dam-debate', 'title': 'The Impact - Water Rights and the Skookumchuck Dam Debate', 'description': 'md5:185f3a2350ef81e3fa159ac3e040a94b', }, 'playlist_count': 1, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) video_ids = traverse_obj(webpage, ( {find_elements(cls='invintus-player', html=True)}, ..., {extract_attributes}, 'data-eventid')) return self.playlist_from_matches( video_ids, playlist_id, playlist_title=remove_end(self._og_search_title(webpage, default=None), ' - TVW'), playlist_description=self._og_search_description(webpage, default=None), getter=lambda x: f'https://tvw.org/watch?eventID={x}', ie=TvwIE) class TvwTvChannelsIE(InfoExtractor): IE_NAME = 'tvw:tvchannels' _VALID_URL = r'https?://(?:www\.)?tvw\.org/tvchannels/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://tvw.org/tvchannels/air/', 'info_dict': { 'id': 'air', 'ext': 'mp4', 'title': r're:TVW Cable Channel Live Stream', 'thumbnail': r're:https?://.+/.+\.(?:jpe?g|png)$', 'live_status': 'is_live', }, }, { 'url': 'https://tvw.org/tvchannels/tvw2/', 'info_dict': { 'id': 'tvw2', 'ext': 'mp4', 'title': r're:TVW-2 Broadcast Channel', 'thumbnail': r're:https?://.+/.+\.(?:jpe?g|png)$', 'live_status': 'is_live', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) m3u8_url = traverse_obj(webpage, ( {find_element(id='invintus-persistent-stream-frame', html=True)}, {extract_attributes}, 'src', {parse_qs}, 'encoder', 0, {json.loads}, 'live247URI', {url_or_none}, {require('stream url')})) return { 'id': video_id, 'formats': self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', m3u8_id='hls', live=True), 'title': remove_end(self._og_search_title(webpage, default=None), ' - TVW'), 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'is_live': True, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ivi.py
yt_dlp/extractor/ivi.py
import json import re from .common import InfoExtractor from ..dependencies import Cryptodome from ..utils import ExtractorError, int_or_none, qualities class IviIE(InfoExtractor): IE_DESC = 'ivi.ru' IE_NAME = 'ivi' _VALID_URL = r'https?://(?:www\.)?ivi\.(?:ru|tv)/(?:watch/(?:[^/]+/)?|video/player\?.*?videoId=)(?P<id>\d+)' _EMBED_REGEX = [r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1'] _GEO_BYPASS = False _GEO_COUNTRIES = ['RU'] _LIGHT_KEY = b'\xf1\x02\x32\xb7\xbc\x5c\x7a\xe8\xf7\x96\xc1\x33\x2b\x27\xa1\x8c' _LIGHT_URL = 'https://api.ivi.ru/light/' _TESTS = [ # Single movie { 'url': 'http://www.ivi.ru/watch/53141', 'md5': '6ff5be2254e796ed346251d117196cf4', 'info_dict': { 'id': '53141', 'ext': 'mp4', 'title': 'Иван Васильевич меняет профессию', 'description': 'md5:b924063ea1677c8fe343d8a72ac2195f', 'duration': 5498, 'thumbnail': r're:^https?://.*\.jpg$', }, 'skip': 'Only works from Russia', }, # Serial's series { 'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549', 'md5': '221f56b35e3ed815fde2df71032f4b3e', 'info_dict': { 'id': '9549', 'ext': 'mp4', 'title': 'Двое из ларца - Дело Гольдберга (1 часть)', 'series': 'Двое из ларца', 'season': 'Сезон 1', 'season_number': 1, 'episode': 'Дело Гольдберга (1 часть)', 'episode_number': 1, 'duration': 2655, 'thumbnail': r're:^https?://.*\.jpg$', }, 'skip': 'Only works from Russia', }, { # with MP4-HD720 format 'url': 'http://www.ivi.ru/watch/146500', 'md5': 'd63d35cdbfa1ea61a5eafec7cc523e1e', 'info_dict': { 'id': '146500', 'ext': 'mp4', 'title': 'Кукла', 'description': 'md5:ffca9372399976a2d260a407cc74cce6', 'duration': 5599, 'thumbnail': r're:^https?://.*\.jpg$', }, 'skip': 'Only works from Russia', }, { 'url': 'https://www.ivi.tv/watch/33560/', 'only_matching': True, }, ] # Sorted by quality _KNOWN_FORMATS = ( 'MP4-low-mobile', 'MP4-mobile', 'FLV-lo', 'MP4-lo', 'FLV-hi', 'MP4-hi', 'MP4-SHQ', 'MP4-HD720', 'MP4-HD1080') def _real_extract(self, url): video_id = self._match_id(url) data = json.dumps({ 'method': 'da.content.get', 'params': [ video_id, { 'site': 's%d', 'referrer': f'http://www.ivi.ru/watch/{video_id}', 'contentid': video_id, }, ], }) for site in (353, 183): content_data = (data % site).encode() if site == 353: if not Cryptodome.CMAC: continue timestamp = (self._download_json( self._LIGHT_URL, video_id, 'Downloading timestamp JSON', data=json.dumps({ 'method': 'da.timestamp.get', 'params': [], }).encode(), fatal=False) or {}).get('result') if not timestamp: continue query = { 'ts': timestamp, 'sign': Cryptodome.CMAC.new(self._LIGHT_KEY, timestamp.encode() + content_data, Cryptodome.Blowfish).hexdigest(), } else: query = {} video_json = self._download_json( self._LIGHT_URL, video_id, 'Downloading video JSON', data=content_data, query=query) error = video_json.get('error') if error: origin = error.get('origin') message = error.get('message') or error.get('user_message') extractor_msg = 'Unable to download video %s' if origin == 'NotAllowedForLocation': self.raise_geo_restricted(message, self._GEO_COUNTRIES) elif origin == 'NoRedisValidData': extractor_msg = 'Video %s does not exist' elif site == 353: continue elif not Cryptodome.CMAC: raise ExtractorError('pycryptodomex not found. Please install', expected=True) elif message: extractor_msg += ': ' + message raise ExtractorError(extractor_msg % video_id, expected=True) else: break result = video_json['result'] title = result['title'] quality = qualities(self._KNOWN_FORMATS) formats = [] for f in result.get('files', []): f_url = f.get('url') content_format = f.get('content_format') if not f_url: continue if (not self.get_param('allow_unplayable_formats') and ('-MDRM-' in content_format or '-FPS-' in content_format)): continue formats.append({ 'url': f_url, 'format_id': content_format, 'quality': quality(content_format), 'filesize': int_or_none(f.get('size_in_bytes')), }) compilation = result.get('compilation') episode = title if compilation else None title = f'{compilation} - {title}' if compilation is not None else title thumbnails = [{ 'url': preview['url'], 'id': preview.get('content_format'), } for preview in result.get('preview', []) if preview.get('url')] webpage = self._download_webpage(url, video_id) season = self._search_regex( r'<li[^>]+class="season active"[^>]*><a[^>]+>([^<]+)', webpage, 'season', default=None) season_number = int_or_none(self._search_regex( r'<li[^>]+class="season active"[^>]*><a[^>]+data-season(?:-index)?="(\d+)"', webpage, 'season number', default=None)) episode_number = int_or_none(self._search_regex( r'[^>]+itemprop="episode"[^>]*>\s*<meta[^>]+itemprop="episodeNumber"[^>]+content="(\d+)', webpage, 'episode number', default=None)) description = self._og_search_description(webpage, default=None) or self._html_search_meta( 'description', webpage, 'description', default=None) return { 'id': video_id, 'title': title, 'series': compilation, 'season': season, 'season_number': season_number, 'episode': episode, 'episode_number': episode_number, 'thumbnails': thumbnails, 'description': description, 'duration': int_or_none(result.get('duration')), 'formats': formats, } class IviCompilationIE(InfoExtractor): IE_DESC = 'ivi.ru compilations' IE_NAME = 'ivi:compilation' _VALID_URL = r'https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$' _TESTS = [{ 'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa', 'info_dict': { 'id': 'dvoe_iz_lartsa', 'title': 'Двое из ларца (2006 - 2008)', }, 'playlist_mincount': 24, }, { 'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/season1', 'info_dict': { 'id': 'dvoe_iz_lartsa/season1', 'title': 'Двое из ларца (2006 - 2008) 1 сезон', }, 'playlist_mincount': 12, }] def _extract_entries(self, html, compilation_id): return [ self.url_result( f'http://www.ivi.ru/watch/{compilation_id}/{serie}', IviIE.ie_key()) for serie in re.findall( rf'<a\b[^>]+\bhref=["\']/watch/{compilation_id}/(\d+)["\']', html)] def _real_extract(self, url): mobj = self._match_valid_url(url) compilation_id = mobj.group('compilationid') season_id = mobj.group('seasonid') if season_id is not None: # Season link season_page = self._download_webpage( url, compilation_id, f'Downloading season {season_id} web page') playlist_id = f'{compilation_id}/season{season_id}' playlist_title = self._html_search_meta('title', season_page, 'title') entries = self._extract_entries(season_page, compilation_id) else: # Compilation link compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page') playlist_id = compilation_id playlist_title = self._html_search_meta('title', compilation_page, 'title') seasons = re.findall( rf'<a href="/watch/{compilation_id}/season(\d+)', compilation_page) if not seasons: # No seasons in this compilation entries = self._extract_entries(compilation_page, compilation_id) else: entries = [] for season_id in seasons: season_page = self._download_webpage( f'http://www.ivi.ru/watch/{compilation_id}/season{season_id}', compilation_id, f'Downloading season {season_id} web page') entries.extend(self._extract_entries(season_page, compilation_id)) return self.playlist_result(entries, playlist_id, playlist_title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dumpert.py
yt_dlp/extractor/dumpert.py
from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, qualities, ) class DumpertIE(InfoExtractor): _VALID_URL = r'''(?x) (?P<protocol>https?)://(?:(?:www|legacy)\.)?dumpert\.nl/(?: (?:mediabase|embed|item)/| [^#]*[?&]selectedId= )(?P<id>[0-9]+[/_][0-9a-zA-Z]+)''' _TESTS = [{ 'url': 'https://www.dumpert.nl/item/6646981_951bc60f', 'md5': '1b9318d7d5054e7dcb9dc7654f21d643', 'info_dict': { 'id': '6646981/951bc60f', 'ext': 'mp4', 'title': 'Ik heb nieuws voor je', 'description': 'Niet schrikken hoor', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 9, 'view_count': int, 'like_count': int, }, }, { 'url': 'https://www.dumpert.nl/embed/6675421_dc440fe7', 'only_matching': True, }, { 'url': 'http://legacy.dumpert.nl/mediabase/6646981/951bc60f', 'only_matching': True, }, { 'url': 'http://legacy.dumpert.nl/embed/6675421/dc440fe7', 'only_matching': True, }, { 'url': 'https://www.dumpert.nl/item/100031688_b317a185', 'info_dict': { 'id': '100031688/b317a185', 'ext': 'mp4', 'title': 'Epic schijnbeweging', 'description': '<p>Die zag je niet eh</p>', 'thumbnail': r're:^https?://.*\.(?:jpg|png)$', 'duration': 12, 'view_count': int, 'like_count': int, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.dumpert.nl/toppers?selectedId=100031688_b317a185', 'only_matching': True, }, { 'url': 'https://www.dumpert.nl/latest?selectedId=100031688_b317a185', 'only_matching': True, }, { 'url': 'https://www.dumpert.nl/?selectedId=100031688_b317a185', 'only_matching': True, }, { 'url': 'https://www.dumpert.nl/toppers/dag?selectedId=100086074_f5cef3ac', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url).replace('_', '/') item = self._download_json( 'http://api-live.dumpert.nl/mobile_api/json/info/' + video_id.replace('/', '_'), video_id)['items'][0] title = item['title'] media = next(m for m in item['media'] if m.get('mediatype') == 'VIDEO') quality = qualities(['flv', 'mobile', 'tablet', '720p', '1080p']) formats = [] for variant in media.get('variants', []): uri = variant.get('uri') if not uri: continue version = variant.get('version') preference = quality(version) if determine_ext(uri) == 'm3u8': formats.extend(self._extract_m3u8_formats( uri, video_id, 'mp4', m3u8_id=version, quality=preference)) else: formats.append({ 'url': uri, 'format_id': version, 'quality': preference, }) thumbnails = [] stills = item.get('stills') or {} for t in ('thumb', 'still'): for s in ('', '-medium', '-large'): still_id = t + s still_url = stills.get(still_id) if not still_url: continue thumbnails.append({ 'id': still_id, 'url': still_url, }) stats = item.get('stats') or {} return { 'id': video_id, 'title': title, 'description': item.get('description'), 'thumbnails': thumbnails, 'formats': formats, 'duration': int_or_none(media.get('duration')), 'like_count': int_or_none(stats.get('kudos_total')), 'view_count': int_or_none(stats.get('views_total')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bandlab.py
yt_dlp/extractor/bandlab.py
from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, format_field, int_or_none, parse_iso8601, parse_qs, truncate_string, url_or_none, ) from ..utils.traversal import traverse_obj, value class BandlabBaseIE(InfoExtractor): def _call_api(self, endpoint, asset_id, **kwargs): headers = kwargs.pop('headers', None) or {} return self._download_json( f'https://www.bandlab.com/api/v1.3/{endpoint}/{asset_id}', asset_id, headers={ 'accept': 'application/json', 'referer': 'https://www.bandlab.com/', 'x-client-id': 'BandLab-Web', 'x-client-version': '10.1.124', **headers, }, **kwargs) def _parse_revision(self, revision_data, url=None): return { 'vcodec': 'none', 'media_type': 'revision', 'extractor_key': BandlabIE.ie_key(), 'extractor': BandlabIE.IE_NAME, **traverse_obj(revision_data, { 'webpage_url': ( 'id', ({value(url)}, {format_field(template='https://www.bandlab.com/revision/%s')}), filter, any), 'id': (('revisionId', 'id'), {str}, any), 'title': ('song', 'name', {str}), 'track': ('song', 'name', {str}), 'url': ('mixdown', 'file', {url_or_none}), 'thumbnail': ('song', 'picture', 'url', {url_or_none}), 'description': ('description', {str}), 'uploader': ('creator', 'name', {str}), 'uploader_id': ('creator', 'username', {str}), 'timestamp': ('createdOn', {parse_iso8601}), 'duration': ('mixdown', 'duration', {float_or_none}), 'view_count': ('counters', 'plays', {int_or_none}), 'like_count': ('counters', 'likes', {int_or_none}), 'comment_count': ('counters', 'comments', {int_or_none}), 'genres': ('genres', ..., 'name', {str}), }), } def _parse_track(self, track_data, url=None): return { 'vcodec': 'none', 'media_type': 'track', 'extractor_key': BandlabIE.ie_key(), 'extractor': BandlabIE.IE_NAME, **traverse_obj(track_data, { 'webpage_url': ( 'id', ({value(url)}, {format_field(template='https://www.bandlab.com/post/%s')}), filter, any), 'id': (('revisionId', 'id'), {str}, any), 'url': ('track', 'sample', 'audioUrl', {url_or_none}), 'title': ('track', 'name', {str}), 'track': ('track', 'name', {str}), 'description': ('caption', {str}), 'thumbnail': ('track', 'picture', ('original', 'url'), {url_or_none}, any), 'view_count': ('counters', 'plays', {int_or_none}), 'like_count': ('counters', 'likes', {int_or_none}), 'comment_count': ('counters', 'comments', {int_or_none}), 'duration': ('track', 'sample', 'duration', {float_or_none}), 'uploader': ('creator', 'name', {str}), 'uploader_id': ('creator', 'username', {str}), 'timestamp': ('createdOn', {parse_iso8601}), }), } def _parse_video(self, video_data, url=None): return { 'media_type': 'video', 'extractor_key': BandlabIE.ie_key(), 'extractor': BandlabIE.IE_NAME, **traverse_obj(video_data, { 'id': ('id', {str}), 'webpage_url': ( 'id', ({value(url)}, {format_field(template='https://www.bandlab.com/post/%s')}), filter, any), 'url': ('video', 'url', {url_or_none}), 'title': ('caption', {lambda x: x.replace('\n', ' ')}, {truncate_string(left=72)}), 'description': ('caption', {str}), 'thumbnail': ('video', 'picture', 'url', {url_or_none}), 'view_count': ('video', 'counters', 'plays', {int_or_none}), 'like_count': ('video', 'counters', 'likes', {int_or_none}), 'comment_count': ('counters', 'comments', {int_or_none}), 'duration': ('video', 'duration', {float_or_none}), 'uploader': ('creator', 'name', {str}), 'uploader_id': ('creator', 'username', {str}), }), } class BandlabIE(BandlabBaseIE): _VALID_URL = [ r'https?://(?:www\.)?bandlab.com/(?P<url_type>track|post|revision)/(?P<id>[\da-f_-]+)', r'https?://(?:www\.)?bandlab.com/(?P<url_type>embed)/\?(?:[^#]*&)?id=(?P<id>[\da-f-]+)', ] _EMBED_REGEX = [rf'<iframe[^>]+src=[\'"](?P<url>{_VALID_URL[1]})[\'"]'] _TESTS = [{ 'url': 'https://www.bandlab.com/track/04b37e88dba24967b9dac8eb8567ff39_07d7f906fc96ee11b75e000d3a428fff', 'md5': '46f7b43367dd268bbcf0bbe466753b2c', 'info_dict': { 'id': '02d7f906-fc96-ee11-b75e-000d3a428fff', 'ext': 'm4a', 'uploader_id': 'ender_milze', 'track': 'sweet black', 'description': 'composed by juanjn3737', 'timestamp': 1702171963, 'view_count': int, 'like_count': int, 'duration': 54.629999999999995, 'title': 'sweet black', 'upload_date': '20231210', 'thumbnail': 'https://bl-prod-images.azureedge.net/v1.0/songs/fa082beb-b856-4730-9170-a57e4e32cc2c/', 'genres': ['Lofi'], 'uploader': 'ender milze', 'comment_count': int, 'media_type': 'revision', }, }, { # Same track as above but post URL 'url': 'https://www.bandlab.com/post/07d7f906-fc96-ee11-b75e-000d3a428fff', 'md5': '46f7b43367dd268bbcf0bbe466753b2c', 'info_dict': { 'id': '02d7f906-fc96-ee11-b75e-000d3a428fff', 'ext': 'm4a', 'uploader_id': 'ender_milze', 'track': 'sweet black', 'description': 'composed by juanjn3737', 'timestamp': 1702171973, 'view_count': int, 'like_count': int, 'duration': 54.629999999999995, 'title': 'sweet black', 'upload_date': '20231210', 'thumbnail': 'https://bl-prod-images.azureedge.net/v1.0/songs/fa082beb-b856-4730-9170-a57e4e32cc2c/', 'genres': ['Lofi'], 'uploader': 'ender milze', 'comment_count': int, 'media_type': 'revision', }, }, { # SharedKey Example 'url': 'https://www.bandlab.com/track/048916c2-c6da-ee11-85f9-6045bd2e11f9?sharedKey=0NNWX8qYAEmI38lWAzCNDA', 'md5': '15174b57c44440e2a2008be9cae00250', 'info_dict': { 'id': '038916c2-c6da-ee11-85f9-6045bd2e11f9', 'ext': 'm4a', 'comment_count': int, 'genres': ['Other'], 'uploader_id': 'user8353034818103753', 'thumbnail': 'https://bl-prod-images.azureedge.net/v1.0/songs/51b18363-da23-4b9b-a29c-2933a3e561ca/', 'timestamp': 1709625771, 'track': 'PodcastMaerchen4b', 'duration': 468.14, 'view_count': int, 'description': 'Podcast: Neues aus der Märchenwelt', 'like_count': int, 'upload_date': '20240305', 'uploader': 'Erna Wageneder', 'title': 'PodcastMaerchen4b', 'media_type': 'revision', }, }, { # Different Revision selected 'url': 'https://www.bandlab.com/track/130343fc-148b-ea11-96d2-0003ffd1fc09?revId=110343fc-148b-ea11-96d2-0003ffd1fc09', 'md5': '74e055ef9325d63f37088772fbfe4454', 'info_dict': { 'id': '110343fc-148b-ea11-96d2-0003ffd1fc09', 'ext': 'm4a', 'timestamp': 1588273294, 'thumbnail': 'https://bl-prod-images.azureedge.net/v1.0/users/b612e533-e4f7-4542-9f50-3fcfd8dd822c/', 'description': 'Final Revision.', 'title': 'Replay ( Instrumental)', 'uploader': 'David R Sparks', 'uploader_id': 'davesnothome69', 'view_count': int, 'comment_count': int, 'track': 'Replay ( Instrumental)', 'genres': ['Rock'], 'upload_date': '20200430', 'like_count': int, 'duration': 279.43, 'media_type': 'revision', }, }, { # Video 'url': 'https://www.bandlab.com/post/5cdf9036-3857-ef11-991a-6045bd36e0d9', 'md5': '8caa2ef28e86c1dacf167293cfdbeba9', 'info_dict': { 'id': '5cdf9036-3857-ef11-991a-6045bd36e0d9', 'ext': 'mp4', 'duration': 44.705, 'thumbnail': 'https://bl-prod-images.azureedge.net/v1.0/videos/67c6cef1-cef6-40d3-831e-a55bc1dcb972/', 'comment_count': int, 'title': 'backing vocals', 'uploader_id': 'marliashya', 'uploader': 'auraa', 'like_count': int, 'description': 'backing vocals', 'media_type': 'video', }, }, { # Embed Example 'url': 'https://www.bandlab.com/embed/?blur=false&id=014de0a4-7d82-ea11-a94c-0003ffd19c0f', 'md5': 'a4ad05cb68c54faaed9b0a8453a8cf4a', 'info_dict': { 'id': '014de0a4-7d82-ea11-a94c-0003ffd19c0f', 'ext': 'm4a', 'comment_count': int, 'genres': ['Electronic'], 'uploader': 'Charlie Henson', 'timestamp': 1587328674, 'upload_date': '20200419', 'view_count': int, 'track': 'Positronic Meltdown', 'duration': 318.55, 'thumbnail': 'https://bl-prod-images.azureedge.net/v1.0/songs/87165bc3-5439-496e-b1f7-a9f13b541ff2/', 'description': 'Checkout my tracks at AOMX http://aomxsounds.com/', 'uploader_id': 'microfreaks', 'title': 'Positronic Meltdown', 'like_count': int, 'media_type': 'revision', }, }, { # Track without revisions available 'url': 'https://www.bandlab.com/track/55767ac51789ea11a94c0003ffd1fc09_2f007b0a37b94ec7a69bc25ae15108a5', 'md5': 'f05d68a3769952c2d9257c473e14c15f', 'info_dict': { 'id': '55767ac51789ea11a94c0003ffd1fc09_2f007b0a37b94ec7a69bc25ae15108a5', 'ext': 'm4a', 'track': 'insame', 'like_count': int, 'duration': 84.03, 'title': 'insame', 'view_count': int, 'comment_count': int, 'uploader': 'Sorakime', 'uploader_id': 'sorakime', 'thumbnail': 'https://bl-prod-images.azureedge.net/v1.0/users/572a351a-0f3a-4c6a-ac39-1a5defdeeb1c/', 'timestamp': 1691162128, 'upload_date': '20230804', 'media_type': 'track', }, }, { 'url': 'https://www.bandlab.com/revision/014de0a4-7d82-ea11-a94c-0003ffd19c0f', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'https://phantomluigi.github.io/', 'info_dict': { 'id': 'e14223c3-7871-ef11-bdfd-000d3a980db3', 'ext': 'm4a', 'view_count': int, 'upload_date': '20240913', 'uploader_id': 'phantommusicofficial', 'timestamp': 1726194897, 'uploader': 'Phantom', 'comment_count': int, 'genres': ['Progresive Rock'], 'description': 'md5:a38cd668f7a2843295ef284114f18429', 'duration': 225.23, 'like_count': int, 'title': 'Vermilion Pt. 2 (Cover)', 'track': 'Vermilion Pt. 2 (Cover)', 'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/62b10750-7aef-4f42-ad08-1af52f577e97/', 'media_type': 'revision', }, }] def _real_extract(self, url): display_id, url_type = self._match_valid_url(url).group('id', 'url_type') qs = parse_qs(url) revision_id = traverse_obj(qs, (('revId', 'id'), 0, any)) if url_type == 'revision': revision_id = display_id revision_data = None if not revision_id: post_data = self._call_api( 'posts', display_id, note='Downloading post data', query=traverse_obj(qs, {'sharedKey': ('sharedKey', 0)})) revision_id = traverse_obj(post_data, (('revisionId', ('revision', 'id')), {str}, any)) revision_data = traverse_obj(post_data, ('revision', {dict})) if not revision_data and not revision_id: post_type = post_data.get('type') if post_type == 'Video': return self._parse_video(post_data, url=url) if post_type == 'Track': return self._parse_track(post_data, url=url) raise ExtractorError(f'Could not extract data for post type {post_type!r}') if not revision_data: revision_data = self._call_api( 'revisions', revision_id, note='Downloading revision data', query={'edit': 'false'}) return self._parse_revision(revision_data, url=url) class BandlabPlaylistIE(BandlabBaseIE): _VALID_URL = [ r'https?://(?:www\.)?bandlab.com/(?:[\w]+/)?(?P<type>albums|collections)/(?P<id>[\da-f-]+)', r'https?://(?:www\.)?bandlab.com/(?P<type>embed)/collection/\?(?:[^#]*&)?id=(?P<id>[\da-f-]+)', ] _EMBED_REGEX = [rf'<iframe[^>]+src=[\'"](?P<url>{_VALID_URL[1]})[\'"]'] _TESTS = [{ 'url': 'https://www.bandlab.com/davesnothome69/albums/89b79ea6-de42-ed11-b495-00224845aac7', 'info_dict': { 'thumbnail': 'https://bl-prod-images.azureedge.net/v1.3/albums/69507ff3-579a-45be-afca-9e87eddec944/', 'release_date': '20221003', 'title': 'Remnants', 'album': 'Remnants', 'like_count': int, 'album_type': 'LP', 'description': 'A collection of some feel good, rock hits.', 'comment_count': int, 'view_count': int, 'id': '89b79ea6-de42-ed11-b495-00224845aac7', 'uploader': 'David R Sparks', 'uploader_id': 'davesnothome69', }, 'playlist_count': 10, }, { 'url': 'https://www.bandlab.com/slytheband/collections/955102d4-1040-ef11-86c3-000d3a42581b', 'info_dict': { 'id': '955102d4-1040-ef11-86c3-000d3a42581b', 'timestamp': 1720762659, 'view_count': int, 'title': 'My Shit 🖤', 'uploader_id': 'slytheband', 'uploader': '𝓢𝓛𝓨', 'upload_date': '20240712', 'like_count': int, 'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/collections/2c64ca12-b180-4b76-8587-7a8da76bddc8/', }, 'playlist_count': 15, }, { # Embeds can contain both albums and collections with the same URL pattern. This is an album 'url': 'https://www.bandlab.com/embed/collection/?id=12cc6f7f-951b-ee11-907c-00224844f303', 'info_dict': { 'id': '12cc6f7f-951b-ee11-907c-00224844f303', 'release_date': '20230706', 'description': 'This is a collection of songs I created when I had an Amiga computer.', 'view_count': int, 'title': 'Mark Salud The Amiga Collection', 'uploader_id': 'mssirmooth1962', 'comment_count': int, 'thumbnail': 'https://bl-prod-images.azureedge.net/v1.3/albums/d618bd7b-0537-40d5-bdd8-61b066e77d59/', 'like_count': int, 'uploader': 'Mark Salud', 'album': 'Mark Salud The Amiga Collection', 'album_type': 'LP', }, 'playlist_count': 24, }, { # Tracks without revision id 'url': 'https://www.bandlab.com/embed/collection/?id=e98aafb5-d932-ee11-b8f0-00224844c719', 'info_dict': { 'like_count': int, 'uploader_id': 'sorakime', 'comment_count': int, 'uploader': 'Sorakime', 'view_count': int, 'description': 'md5:4ec31c568a5f5a5a2b17572ea64c3825', 'release_date': '20230812', 'title': 'Art', 'album': 'Art', 'album_type': 'Album', 'id': 'e98aafb5-d932-ee11-b8f0-00224844c719', 'thumbnail': 'https://bl-prod-images.azureedge.net/v1.3/albums/20c890de-e94a-4422-828a-2da6377a13c8/', }, 'playlist_count': 13, }, { 'url': 'https://www.bandlab.com/albums/89b79ea6-de42-ed11-b495-00224845aac7', 'only_matching': True, }] def _entries(self, album_data): for post in traverse_obj(album_data, ('posts', lambda _, v: v['type'])): post_type = post['type'] if post_type == 'Revision': yield self._parse_revision(post.get('revision')) elif post_type == 'Track': yield self._parse_track(post) elif post_type == 'Video': yield self._parse_video(post) else: self.report_warning(f'Skipping unknown post type: "{post_type}"') def _real_extract(self, url): playlist_id, playlist_type = self._match_valid_url(url).group('id', 'type') endpoints = { 'albums': ['albums'], 'collections': ['collections'], 'embed': ['collections', 'albums'], }.get(playlist_type) for endpoint in endpoints: playlist_data = self._call_api( endpoint, playlist_id, note=f'Downloading {endpoint[:-1]} data', fatal=False, expected_status=404) if not playlist_data.get('errorCode'): playlist_type = endpoint break if error_code := playlist_data.get('errorCode'): raise ExtractorError(f'Could not find playlist data. Error code: "{error_code}"') return self.playlist_result( self._entries(playlist_data), playlist_id, **traverse_obj(playlist_data, { 'title': ('name', {str}), 'description': ('description', {str}), 'uploader': ('creator', 'name', {str}), 'uploader_id': ('creator', 'username', {str}), 'timestamp': ('createdOn', {parse_iso8601}), 'release_date': ('releaseDate', {lambda x: x.replace('-', '')}, filter), 'thumbnail': ('picture', ('original', 'url'), {url_or_none}, any), 'like_count': ('counters', 'likes', {int_or_none}), 'comment_count': ('counters', 'comments', {int_or_none}), 'view_count': ('counters', 'plays', {int_or_none}), }), **(traverse_obj(playlist_data, { 'album': ('name', {str}), 'album_type': ('type', {str}), }) if playlist_type == 'albums' else {}))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/allstar.py
yt_dlp/extractor/allstar.py
import functools import json from .common import InfoExtractor from ..utils import ( ExtractorError, OnDemandPagedList, int_or_none, join_nonempty, parse_qs, urljoin, ) from ..utils.traversal import traverse_obj _FIELDS = ''' _id clipImageSource clipImageThumb clipLink clipTitle createdDate shareId user { _id } username views''' _EXTRA_FIELDS = ''' clipLength clipSizeBytes''' _QUERIES = { 'clip': '''query ($id: String!) { video: getClip(clipIdentifier: $id) { %s %s } }''' % (_FIELDS, _EXTRA_FIELDS), # noqa: UP031 'montage': '''query ($id: String!) { video: getMontage(clipIdentifier: $id) { %s } }''' % _FIELDS, # noqa: UP031 'Clips': '''query ($page: Int!, $user: String!, $game: Int) { videos: clips(search: createdDate, page: $page, user: $user, mobile: false, game: $game) { data { %s %s } } }''' % (_FIELDS, _EXTRA_FIELDS), # noqa: UP031 'Montages': '''query ($page: Int!, $user: String!) { videos: montages(search: createdDate, page: $page, user: $user) { data { %s } } }''' % _FIELDS, # noqa: UP031 'Mobile Clips': '''query ($page: Int!, $user: String!) { videos: clips(search: createdDate, page: $page, user: $user, mobile: true) { data { %s %s } } }''' % (_FIELDS, _EXTRA_FIELDS), # noqa: UP031 } class AllstarBaseIE(InfoExtractor): @staticmethod def _parse_video_data(video_data): def media_url_or_none(path): return urljoin('https://media.allstar.gg/', path) info = traverse_obj(video_data, { 'id': ('_id', {str}), 'display_id': ('shareId', {str}), 'title': ('clipTitle', {str}), 'url': ('clipLink', {media_url_or_none}), 'thumbnails': (('clipImageThumb', 'clipImageSource'), {'url': {media_url_or_none}}), 'duration': ('clipLength', {int_or_none}), 'filesize': ('clipSizeBytes', {int_or_none}), 'timestamp': ('createdDate', {int_or_none(scale=1000)}), 'uploader': ('username', {str}), 'uploader_id': ('user', '_id', {str}), 'view_count': ('views', {int_or_none}), }) if info.get('id') and info.get('url'): basename = 'clip' if '/clips/' in info['url'] else 'montage' info['webpage_url'] = f'https://allstar.gg/{basename}?{basename}={info["id"]}' info.update({ 'extractor_key': AllstarIE.ie_key(), 'extractor': AllstarIE.IE_NAME, 'uploader_url': urljoin('https://allstar.gg/u/', info.get('uploader_id')), }) return info def _call_api(self, query, variables, path, video_id=None, note=None): response = self._download_json( 'https://a1.allstar.gg/graphql', video_id, note=note, headers={'content-type': 'application/json'}, data=json.dumps({'variables': variables, 'query': query}).encode()) errors = traverse_obj(response, ('errors', ..., 'message', {str})) if errors: raise ExtractorError('; '.join(errors)) return traverse_obj(response, path) class AllstarIE(AllstarBaseIE): _VALID_URL = r'https?://(?:www\.)?allstar\.gg/(?P<type>(?:clip|montage))\?(?P=type)=(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://allstar.gg/clip?clip=64482c2da9eec30008a67d1b', 'info_dict': { 'id': '64482c2da9eec30008a67d1b', 'title': '4K on Inferno', 'url': 'md5:66befb5381eef0c9456026386c25fa55', 'thumbnail': r're:https://media\.allstar\.gg/.+\.(?:png|jpg)$', 'uploader': 'chrk.', 'ext': 'mp4', 'duration': 20, 'filesize': 21199257, 'timestamp': 1682451501, 'uploader_id': '62b8bdfc9021052f7905882d', 'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d', 'upload_date': '20230425', 'view_count': int, }, }, { 'url': 'https://allstar.gg/clip?clip=8LJLY4JKB', 'info_dict': { 'id': '64a1ec6b887f4c0008dc50b8', 'display_id': '8LJLY4JKB', 'title': 'AK-47 3K on Mirage', 'url': 'md5:dde224fd12f035c0e2529a4ae34c4283', 'ext': 'mp4', 'thumbnail': r're:https://media\.allstar\.gg/.+\.(?:png|jpg)$', 'duration': 16, 'filesize': 30175859, 'timestamp': 1688333419, 'uploader': 'cherokee', 'uploader_id': '62b8bdfc9021052f7905882d', 'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d', 'upload_date': '20230702', 'view_count': int, }, }, { 'url': 'https://allstar.gg/montage?montage=643e64089da7e9363e1fa66c', 'info_dict': { 'id': '643e64089da7e9363e1fa66c', 'display_id': 'APQLGM2IMXW', 'title': 'cherokee Rapid Fire Snipers Montage', 'url': 'md5:a3ee356022115db2b27c81321d195945', 'thumbnail': r're:https://media\.allstar\.gg/.+\.(?:png|jpg)$', 'ext': 'mp4', 'timestamp': 1681810448, 'uploader': 'cherokee', 'uploader_id': '62b8bdfc9021052f7905882d', 'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d', 'upload_date': '20230418', 'view_count': int, }, }, { 'url': 'https://allstar.gg/montage?montage=RILJMH6QOS', 'info_dict': { 'id': '64a2697372ce3703de29e868', 'display_id': 'RILJMH6QOS', 'title': 'cherokee Rapid Fire Snipers Montage', 'url': 'md5:d5672e6f88579730c2310a80fdbc4030', 'thumbnail': r're:https://media\.allstar\.gg/.+\.(?:png|jpg)$', 'ext': 'mp4', 'timestamp': 1688365434, 'uploader': 'cherokee', 'uploader_id': '62b8bdfc9021052f7905882d', 'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d', 'upload_date': '20230703', 'view_count': int, }, }] def _real_extract(self, url): query_id, video_id = self._match_valid_url(url).group('type', 'id') return self._parse_video_data( self._call_api( _QUERIES.get(query_id), {'id': video_id}, ('data', 'video'), video_id)) class AllstarProfileIE(AllstarBaseIE): _VALID_URL = r'https?://(?:www\.)?allstar\.gg/(?:profile\?user=|u/)(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://allstar.gg/profile?user=62b8bdfc9021052f7905882d', 'info_dict': { 'id': '62b8bdfc9021052f7905882d-clips', 'title': 'cherokee - Clips', }, 'playlist_mincount': 15, }, { 'url': 'https://allstar.gg/u/cherokee?game=730&view=Clips', 'info_dict': { 'id': '62b8bdfc9021052f7905882d-clips-730', 'title': 'cherokee - Clips - 730', }, 'playlist_mincount': 15, }, { 'url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d?view=Montages', 'info_dict': { 'id': '62b8bdfc9021052f7905882d-montages', 'title': 'cherokee - Montages', }, 'playlist_mincount': 4, }, { 'url': 'https://allstar.gg/profile?user=cherokee&view=Mobile Clips', 'info_dict': { 'id': '62b8bdfc9021052f7905882d-mobile', 'title': 'cherokee - Mobile Clips', }, 'playlist_mincount': 1, }] _PAGE_SIZE = 10 def _get_page(self, user_id, display_id, game, query, page_num): page_num += 1 for video_data in self._call_api( query, { 'user': user_id, 'page': page_num, 'game': game, }, ('data', 'videos', 'data'), display_id, f'Downloading page {page_num}'): yield self._parse_video_data(video_data) def _real_extract(self, url): display_id = self._match_id(url) profile_data = self._download_json( urljoin('https://api.allstar.gg/v1/users/profile/', display_id), display_id) user_id = traverse_obj(profile_data, ('data', ('_id'), {str})) if not user_id: raise ExtractorError('Unable to extract the user id') username = traverse_obj(profile_data, ('data', 'profile', ('username'), {str})) url_query = parse_qs(url) game = traverse_obj(url_query, ('game', 0, {int_or_none})) query_id = traverse_obj(url_query, ('view', 0), default='Clips') if query_id not in ('Clips', 'Montages', 'Mobile Clips'): raise ExtractorError(f'Unsupported playlist URL type {query_id!r}') return self.playlist_result( OnDemandPagedList( functools.partial( self._get_page, user_id, display_id, game, _QUERIES.get(query_id)), self._PAGE_SIZE), playlist_id=join_nonempty(user_id, query_id.lower().split()[0], game), playlist_title=join_nonempty((username or display_id), query_id, game, delim=' - '))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/moviezine.py
yt_dlp/extractor/moviezine.py
from .common import InfoExtractor class MoviezineIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?moviezine\.se/video/(?P<id>[^?#]+)' _TEST = { 'url': 'http://www.moviezine.se/video/205866', 'info_dict': { 'id': '205866', 'ext': 'mp4', 'title': 'Oculus - Trailer 1', 'description': 'md5:40cc6790fc81d931850ca9249b40e8a4', 'thumbnail': r're:http://.*\.jpg', }, } def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) jsplayer = self._download_webpage(f'http://www.moviezine.se/api/player.js?video={video_id}', video_id, 'Downloading js api player') formats = [{ 'format_id': 'sd', 'url': self._html_search_regex(r'file: "(.+?)",', jsplayer, 'file'), 'quality': 0, 'ext': 'mp4', }] return { 'id': video_id, 'title': self._search_regex(r'title: "(.+?)",', jsplayer, 'title'), 'thumbnail': self._search_regex(r'image: "(.+?)",', jsplayer, 'image'), 'formats': formats, 'description': self._og_search_description(webpage), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/clubic.py
yt_dlp/extractor/clubic.py
from .common import InfoExtractor from ..utils import ( clean_html, qualities, ) class ClubicIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?clubic\.com/video/(?:[^/]+/)*video.*-(?P<id>[0-9]+)\.html' _TESTS = [{ 'url': 'http://www.clubic.com/video/clubic-week/video-clubic-week-2-0-le-fbi-se-lance-dans-la-photo-d-identite-448474.html', 'md5': '1592b694ba586036efac1776b0b43cd3', 'info_dict': { 'id': '448474', 'ext': 'mp4', 'title': 'Clubic Week 2.0 : le FBI se lance dans la photo d\u0092identité', 'description': 're:Gueule de bois chez Nokia. Le constructeur a indiqué cette.*', 'thumbnail': r're:^http://img\.clubic\.com/.*\.jpg$', }, }, { 'url': 'http://www.clubic.com/video/video-clubic-week-2-0-apple-iphone-6s-et-plus-mais-surtout-le-pencil-469792.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) player_url = f'http://player.m6web.fr/v1/player/clubic/{video_id}.html' player_page = self._download_webpage(player_url, video_id) config = self._parse_json(self._search_regex( r'(?m)M6\.Player\.config\s*=\s*(\{.+?\});$', player_page, 'configuration'), video_id) video_info = config['videoInfo'] sources = config['sources'] quality_order = qualities(['sd', 'hq']) formats = [{ 'format_id': src['streamQuality'], 'url': src['src'], 'quality': quality_order(src['streamQuality']), } for src in sources] return { 'id': video_id, 'title': video_info['title'], 'formats': formats, 'description': clean_html(video_info.get('description')), 'thumbnail': config.get('poster'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/theguardian.py
yt_dlp/extractor/theguardian.py
import itertools from .common import InfoExtractor from ..utils import ( clean_html, extract_attributes, get_element_by_class, get_element_html_by_class, get_elements_html_by_class, parse_qs, traverse_obj, unified_strdate, urljoin, ) class TheGuardianPodcastIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?theguardian\.com/\w+/audio/\d{4}/\w{3}/\d{1,2}/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://www.theguardian.com/news/audio/2023/nov/03/we-are-just-getting-started-the-plastic-eating-bacteria-that-could-change-the-world-podcast', 'md5': 'd1771744681789b4cd7da2a08e487702', 'info_dict': { 'id': 'we-are-just-getting-started-the-plastic-eating-bacteria-that-could-change-the-world-podcast', 'ext': 'mp3', 'title': '‘We are just getting started’: the plastic-eating bacteria that could change the world – podcast', 'description': 'md5:cfd3df2791d394d2ab62cd571d5207ee', 'creator': 'Stephen Buranyi', 'thumbnail': 'md5:73c12558fcb3b0e2a59422bfb33b3f79', 'release_date': '20231103', }, }, { 'url': 'https://www.theguardian.com/news/audio/2023/oct/30/the-trials-of-robert-habeck-is-the-worlds-most-powerful-green-politician-doomed-to-fail-podcast', 'md5': 'd1771744681789b4cd7da2a08e487702', 'info_dict': { 'id': 'the-trials-of-robert-habeck-is-the-worlds-most-powerful-green-politician-doomed-to-fail-podcast', 'ext': 'mp3', 'title': 'The trials of Robert Habeck: is the world’s most powerful green politician doomed to fail? – podcast', 'description': 'md5:1b5cf6582d1771c6b7077784b5456994', 'creator': 'Philip Oltermann', 'thumbnail': 'md5:6e5c5ec43843e956e20be793722e9080', 'release_date': '20231030', }, }, { 'url': 'https://www.theguardian.com/football/audio/2023/nov/06/arsenal-feel-hard-done-by-and-luton-hold-liverpool-football-weekly', 'md5': 'a2fcff6f8e060a95b1483295273dc35e', 'info_dict': { 'id': 'arsenal-feel-hard-done-by-and-luton-hold-liverpool-football-weekly', 'ext': 'mp3', 'title': 'Arsenal feel hard done by and Luton hold Liverpool – Football Weekly', 'description': 'md5:286a9fbddaeb7c83cc65d1c4a5330b2a', 'creator': 'Max Rushden', 'thumbnail': 'md5:93eb7d6440f1bb94eb3a6cad63f48afd', 'release_date': '20231106', }, }, { 'url': 'https://www.theguardian.com/politics/audio/2023/nov/02/the-covid-inquiry-politics-weekly-uk-podcast', 'md5': '06a0f7e9701a80c8064a5d35690481ec', 'info_dict': { 'id': 'the-covid-inquiry-politics-weekly-uk-podcast', 'ext': 'mp3', 'title': 'The Covid inquiry | Politics Weekly UK - podcast', 'description': 'md5:207c98859c14903582b17d25b014046e', 'creator': 'Gaby Hinsliff', 'thumbnail': 'md5:28932a7b5a25b057be330d2ed70ea7f3', 'release_date': '20231102', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) return { 'id': video_id, 'title': self._og_search_title(webpage) or get_element_by_class('content__headline', webpage), 'description': self._og_search_description(webpage), 'creator': self._html_search_meta('author', webpage), 'thumbnail': self._og_search_thumbnail(webpage), 'release_date': unified_strdate(self._html_search_meta('article:published_time', webpage)), 'url': extract_attributes(get_element_html_by_class( 'podcast__player', webpage) or '').get('data-source'), } class TheGuardianPodcastPlaylistIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?theguardian\.com/\w+/series/(?P<id>[\w-]+)(?:\?page=\d+)?' _TESTS = [{ 'url': 'https://www.theguardian.com/football/series/theguardianswomensfootballweekly', 'info_dict': { 'id': 'theguardianswomensfootballweekly', 'title': "The Guardian's Women's Football Weekly", 'description': 'md5:e2cc021311e582d29935a73614a43f51', }, 'playlist_mincount': 69, }, { 'url': 'https://www.theguardian.com/news/series/todayinfocus?page=2', 'info_dict': { 'id': 'todayinfocus', 'title': 'Today in Focus', 'description': 'md5:0f097764fc0d359e0b6eb537be0387e2', }, 'playlist_mincount': 1261, }, { 'url': 'https://www.theguardian.com/news/series/the-audio-long-read', 'info_dict': { 'id': 'the-audio-long-read', 'title': 'The Audio Long Read', 'description': 'md5:5462994a27527309562b25b6defc4ef3', }, 'playlist_mincount': 996, }] def _entries(self, url, playlist_id): for page in itertools.count(1): webpage, urlh = self._download_webpage_handle( url, playlist_id, f'Downloading page {page}', query={'page': page}) if 'page' not in parse_qs(urlh.url): break episodes = get_elements_html_by_class('fc-item--type-media', webpage) yield from traverse_obj(episodes, (..., {extract_attributes}, 'data-id')) def _real_extract(self, url): podcast_id = self._match_id(url) webpage = self._download_webpage(url, podcast_id) title = clean_html(get_element_by_class( 'index-page-header__title', webpage) or get_element_by_class('flagship-audio__title', webpage)) description = self._og_search_description(webpage) or self._html_search_meta( 'description', webpage) return self.playlist_from_matches( self._entries(url, podcast_id), podcast_id, title, description=description, ie=TheGuardianPodcastIE, getter=urljoin('https://www.theguardian.com'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/franceinter.py
yt_dlp/extractor/franceinter.py
from .common import InfoExtractor from ..utils import month_by_name class FranceInterIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?franceinter\.fr/emissions/(?P<id>[^?#]+)' _TEST = { 'url': 'https://www.franceinter.fr/emissions/affaires-sensibles/affaires-sensibles-07-septembre-2016', 'md5': '9e54d7bdb6fdc02a841007f8a975c094', 'info_dict': { 'id': 'affaires-sensibles/affaires-sensibles-07-septembre-2016', 'ext': 'mp3', 'title': 'Affaire Cahuzac : le contentieux du compte en Suisse', 'description': 'md5:401969c5d318c061f86bda1fa359292b', 'thumbnail': r're:^https?://.*\.jpg', 'upload_date': '20160907', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_url = self._search_regex( r'(?s)<div[^>]+class=["\']page-diffusion["\'][^>]*>.*?<button[^>]+data-url=(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'video url', group='url') title = self._og_search_title(webpage) description = self._og_search_description(webpage) thumbnail = self._html_search_meta(['og:image', 'twitter:image'], webpage) upload_date_str = self._search_regex( r'class=["\']\s*cover-emission-period\s*["\'][^>]*>[^<]+\s+(\d{1,2}\s+[^\s]+\s+\d{4})<', webpage, 'upload date', fatal=False) if upload_date_str: upload_date_list = upload_date_str.split() upload_date_list.reverse() upload_date_list[1] = '%02d' % (month_by_name(upload_date_list[1], lang='fr') or 0) upload_date_list[2] = '%02d' % int(upload_date_list[2]) upload_date = ''.join(upload_date_list) else: upload_date = None return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'upload_date': upload_date, 'formats': [{ 'url': video_url, 'vcodec': 'none', }], }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/on24.py
yt_dlp/extractor/on24.py
from .common import InfoExtractor from ..utils import ( int_or_none, strip_or_none, try_get, urljoin, ) class On24IE(InfoExtractor): IE_NAME = 'on24' IE_DESC = 'ON24' _ID_RE = r'(?P<id>\d{7})' _KEY_RE = r'(?P<key>[0-9A-F]{32})' _URL_BASE_RE = r'https?://event\.on24\.com' _URL_QUERY_RE = rf'(?:[^#]*&)?eventid={_ID_RE}&(?:[^#]+&)?key={_KEY_RE}' _VALID_URL = [ rf'{_URL_BASE_RE}/wcc/r/{_ID_RE}/{_KEY_RE}', rf'{_URL_BASE_RE}/eventRegistration/console/(?:EventConsoleApollo\.jsp|apollox/mainEvent/?)\?{_URL_QUERY_RE}', rf'{_URL_BASE_RE}/eventRegistration/EventLobbyServlet/?\?{_URL_QUERY_RE}', ] _TESTS = [{ 'url': 'https://event.on24.com/eventRegistration/console/EventConsoleApollo.jsp?uimode=nextgeneration&eventid=2197467&sessionid=1&key=5DF57BE53237F36A43B478DD36277A84&contenttype=A&eventuserid=305999&playerwidth=1000&playerheight=650&caller=previewLobby&text_language_id=en&format=fhaudio&newConsole=false', 'info_dict': { 'id': '2197467', 'ext': 'wav', 'title': 'Pearson Test of English General/Pearson English International Certificate Teacher Training Guide', 'upload_date': '20200219', 'timestamp': 1582149600.0, 'view_count': int, }, }, { 'url': 'https://event.on24.com/wcc/r/2639291/82829018E813065A122363877975752E?mode=login&email=johnsmith@gmail.com', 'only_matching': True, }, { 'url': 'https://event.on24.com/eventRegistration/console/EventConsoleApollo.jsp?&eventid=2639291&sessionid=1&username=&partnerref=&format=fhvideo1&mobile=&flashsupportedmobiledevice=&helpcenter=&key=82829018E813065A122363877975752E&newConsole=true&nxChe=true&newTabCon=true&text_language_id=en&playerwidth=748&playerheight=526&eventuserid=338788762&contenttype=A&mediametricsessionid=384764716&mediametricid=3558192&usercd=369267058&mode=launch', 'only_matching': True, }, { 'url': 'https://event.on24.com/eventRegistration/EventLobbyServlet?target=reg20.jsp&eventid=3543176&key=BC0F6B968B67C34B50D461D40FDB3E18&groupId=3143628', 'only_matching': True, }, { 'url': 'https://event.on24.com/eventRegistration/console/apollox/mainEvent?&eventid=4843671&sessionid=1&username=&partnerref=&format=fhvideo1&mobile=&flashsupportedmobiledevice=&helpcenter=&key=4EAC9B5C564CC98FF29E619B06A2F743&newConsole=true&nxChe=true&newTabCon=true&consoleEarEventConsole=false&consoleEarCloudApi=false&text_language_id=en&playerwidth=748&playerheight=526&referrer=https%3A%2F%2Fevent.on24.com%2Finterface%2Fregistration%2Fautoreg%2Findex.html%3Fsessionid%3D1%26eventid%3D4843671%26key%3D4EAC9B5C564CC98FF29E619B06A2F743%26email%3D000a3e42-7952-4dd6-8f8a-34c38ea3cf02%2540platform%26firstname%3Ds%26lastname%3Ds%26deletecookie%3Dtrue%26event_email%3DN%26marketing_email%3DN%26std1%3D0642572014177%26std2%3D0642572014179%26std3%3D550165f7-a44e-4725-9fe6-716f89908c2b%26std4%3D0&eventuserid=745776448&contenttype=A&mediametricsessionid=640613707&mediametricid=6810717&usercd=745776448&mode=launch', 'only_matching': True, }] def _real_extract(self, url): event_id, event_key = self._match_valid_url(url).group('id', 'key') event_data = self._download_json( 'https://event.on24.com/apic/utilApp/EventConsoleCachedServlet', event_id, query={ 'eventId': event_id, 'displayProfile': 'player', 'key': event_key, 'contentType': 'A', }) event_id = str(try_get(event_data, lambda x: x['presentationLogInfo']['eventid'])) or event_id language = event_data.get('localelanguagecode') formats = [] for media in event_data.get('mediaUrlInfo', []): media_url = urljoin('https://event.on24.com/media/news/corporatevideo/events/', str(media.get('url'))) if not media_url: continue media_type = media.get('code') if media_type == 'fhvideo1': formats.append({ 'format_id': 'video', 'url': media_url, 'language': language, 'ext': 'mp4', 'vcodec': 'avc1.640020', 'acodec': 'mp4a.40.2', }) elif media_type == 'audio': formats.append({ 'format_id': 'audio', 'url': media_url, 'language': language, 'ext': 'wav', 'vcodec': 'none', 'acodec': 'wav', }) return { 'id': event_id, 'title': strip_or_none(event_data.get('description')), 'timestamp': int_or_none(try_get(event_data, lambda x: x['session']['startdate']), 1000), 'webpage_url': f'https://event.on24.com/wcc/r/{event_id}/{event_key}', 'view_count': event_data.get('registrantcount'), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rozhlas.py
yt_dlp/extractor/rozhlas.py
import itertools from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, extract_attributes, int_or_none, remove_start, str_or_none, traverse_obj, unified_timestamp, url_or_none, ) class RozhlasIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?prehravac\.rozhlas\.cz/audio/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://prehravac.rozhlas.cz/audio/3421320', 'md5': '504c902dbc9e9a1fd50326eccf02a7e2', 'info_dict': { 'id': '3421320', 'ext': 'mp3', 'title': 'Echo Pavla Klusáka (30.06.2015 21:00)', 'description': 'Osmdesátiny Terryho Rileyho jsou skvělou příležitostí proletět se elektronickými i akustickými díly zakladatatele minimalismu, který je aktivní už přes padesát let', }, }, { 'url': 'http://prehravac.rozhlas.cz/audio/3421320/embed', 'only_matching': True, }] def _real_extract(self, url): audio_id = self._match_id(url) webpage = self._download_webpage( f'http://prehravac.rozhlas.cz/audio/{audio_id}', audio_id) title = self._html_search_regex( r'<h3>(.+?)</h3>\s*<p[^>]*>.*?</p>\s*<div[^>]+id=["\']player-track', webpage, 'title', default=None) or remove_start( self._og_search_title(webpage), 'Radio Wave - ') description = self._html_search_regex( r'<p[^>]+title=(["\'])(?P<url>(?:(?!\1).)+)\1[^>]*>.*?</p>\s*<div[^>]+id=["\']player-track', webpage, 'description', fatal=False, group='url') duration = int_or_none(self._search_regex( r'data-duration=["\'](\d+)', webpage, 'duration', default=None)) return { 'id': audio_id, 'url': f'http://media.rozhlas.cz/_audio/{audio_id}.mp3', 'title': title, 'description': description, 'duration': duration, 'vcodec': 'none', } class RozhlasBaseIE(InfoExtractor): def _extract_formats(self, entry, audio_id): formats = [] for audio in traverse_obj(entry, ('audioLinks', lambda _, v: url_or_none(v['url']))): ext = audio.get('variant') for retry in self.RetryManager(): if retry.attempt > 1: self._sleep(1, audio_id) try: if ext == 'dash': formats.extend(self._extract_mpd_formats( audio['url'], audio_id, mpd_id=ext)) elif ext == 'hls': formats.extend(self._extract_m3u8_formats( audio['url'], audio_id, 'm4a', m3u8_id=ext)) else: formats.append({ 'url': audio['url'], 'ext': ext, 'format_id': ext, 'abr': int_or_none(audio.get('bitrate')), 'acodec': ext, 'vcodec': 'none', }) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 429: retry.error = e.cause else: self.report_warning(e.msg) return formats class RozhlasVltavaIE(RozhlasBaseIE): _VALID_URL = r'https?://(?:\w+\.rozhlas|english\.radio)\.cz/[\w-]+-(?P<id>\d+)' _TESTS = [{ 'url': 'https://wave.rozhlas.cz/papej-masicko-porcujeme-a-bilancujeme-filmy-a-serialy-ktere-letos-zabily-8891337', 'md5': 'ba2fdbc1242fc16771c7695d271ec355', 'info_dict': { 'id': '8891337', 'title': 'md5:21f99739d04ab49d8c189ec711eef4ec', }, 'playlist_count': 1, 'playlist': [{ 'md5': 'ba2fdbc1242fc16771c7695d271ec355', 'info_dict': { 'id': '10520988', 'ext': 'mp3', 'title': 'Papej masíčko! Porcujeme a bilancujeme filmy a seriály, které to letos zabily', 'description': 'md5:1c6d29fb9564e1f17fc1bb83ae7da0bc', 'duration': 1574, 'artist': 'Aleš Stuchlý', 'channel_id': 'radio-wave', }, }], }, { 'url': 'https://wave.rozhlas.cz/poslechnete-si-neklid-podcastovy-thriller-o-vine-strachu-a-vztahu-ktery-zasel-8554744', 'info_dict': { 'id': '8554744', 'title': 'Poslechněte si Neklid. Podcastový thriller o vině, strachu a vztahu, který zašel příliš daleko', }, 'playlist_count': 5, 'playlist': [{ 'md5': '93d4109cf8f40523699ae9c1d4600bdd', 'info_dict': { 'id': '9890713', 'ext': 'mp3', 'title': 'Neklid #1', 'description': '1. díl: Neklid: 1. díl', 'duration': 1025, 'artist': 'Josef Kokta', 'channel_id': 'radio-wave', 'chapter': 'Neklid #1', 'chapter_number': 1, }, }, { 'md5': 'e9763235be4a6dcf94bc8a5bac1ca126', 'info_dict': { 'id': '9890716', 'ext': 'mp3', 'title': 'Neklid #2', 'description': '2. díl: Neklid: 2. díl', 'duration': 768, 'artist': 'Josef Kokta', 'channel_id': 'radio-wave', 'chapter': 'Neklid #2', 'chapter_number': 2, }, }, { 'md5': '00b642ea94b78cc949ac84da09f87895', 'info_dict': { 'id': '9890722', 'ext': 'mp3', 'title': 'Neklid #3', 'description': '3. díl: Neklid: 3. díl', 'duration': 607, 'artist': 'Josef Kokta', 'channel_id': 'radio-wave', 'chapter': 'Neklid #3', 'chapter_number': 3, }, }, { 'md5': 'faef97b1b49da7df874740f118c19dea', 'info_dict': { 'id': '9890728', 'ext': 'mp3', 'title': 'Neklid #4', 'description': '4. díl: Neklid: 4. díl', 'duration': 621, 'artist': 'Josef Kokta', 'channel_id': 'radio-wave', 'chapter': 'Neklid #4', 'chapter_number': 4, }, }, { 'md5': '6e729fa39b647325b868d419c76f3efa', 'info_dict': { 'id': '9890734', 'ext': 'mp3', 'title': 'Neklid #5', 'description': '5. díl: Neklid: 5. díl', 'duration': 908, 'artist': 'Josef Kokta', 'channel_id': 'radio-wave', 'chapter': 'Neklid #5', 'chapter_number': 5, }, }], }, { 'url': 'https://dvojka.rozhlas.cz/karel-siktanc-cerny-jezdec-bily-kun-napinava-pohadka-o-tajemnem-prizraku-8946969', 'info_dict': { 'id': '8946969', 'title': 'Karel Šiktanc: Černý jezdec, bílý kůň. Napínavá pohádka o tajemném přízraku', }, 'playlist_count': 1, 'playlist': [{ 'info_dict': { 'id': '10631121', 'ext': 'm4a', 'title': 'Karel Šiktanc: Černý jezdec, bílý kůň. Napínavá pohádka o tajemném přízraku', 'description': 'Karel Šiktanc: Černý jezdec, bílý kůň', 'duration': 2656, 'artist': 'Tvůrčí skupina Drama a literatura', 'channel_id': 'dvojka', }, }], 'params': {'skip_download': 'dash'}, }] def _extract_video(self, entry): audio_id = entry['meta']['ga']['contentId'] chapter_number = traverse_obj(entry, ('meta', 'ga', 'contentSerialPart', {int_or_none})) return { 'id': audio_id, 'chapter': traverse_obj(entry, ('meta', 'ga', 'contentNameShort')) if chapter_number else None, 'chapter_number': chapter_number, 'formats': self._extract_formats(entry, audio_id), **traverse_obj(entry, { 'title': ('meta', 'ga', 'contentName'), 'description': 'title', 'duration': ('duration', {int_or_none}), 'artist': ('meta', 'ga', 'contentAuthor'), 'channel_id': ('meta', 'ga', 'contentCreator'), }), } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) # FIXME: Use get_element_text_and_html_by_tag when it accepts less strict html data = self._parse_json(extract_attributes(self._search_regex( r'(<div class="mujRozhlasPlayer" data-player=\'[^\']+\'>)', webpage, 'player'))['data-player'], video_id)['data'] return { '_type': 'playlist', 'id': str_or_none(data.get('embedId')) or video_id, 'title': traverse_obj(data, ('series', 'title')), 'entries': map(self._extract_video, data['playlist']), } class MujRozhlasIE(RozhlasBaseIE): _VALID_URL = r'https?://(?:www\.)?mujrozhlas\.cz/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ # single episode extraction 'url': 'https://www.mujrozhlas.cz/vykopavky/ach-jo-zase-teleci-rizek-je-mnohem-min-cesky-nez-jsme-si-mysleli', 'md5': '6f8fd68663e64936623e67c152a669e0', 'info_dict': { 'id': '10787730', 'ext': 'mp3', 'title': 'Ach jo, zase to telecí! Řízek je mnohem míň český, než jsme si mysleli', 'description': 'md5:db7141e9caaedc9041ec7cefb9a62908', 'timestamp': 1684915200, 'modified_timestamp': 1687550432, 'series': 'Vykopávky', 'thumbnail': 'https://portal.rozhlas.cz/sites/default/files/images/84377046610af6ddc54d910b1dd7a22b.jpg', 'channel_id': 'radio-wave', 'upload_date': '20230524', 'modified_date': '20230623', }, }, { # serial extraction 'url': 'https://www.mujrozhlas.cz/radiokniha/jaroslava-janackova-pribeh-tajemneho-psani-o-pramenech-genezi-babicky', 'playlist_mincount': 7, 'info_dict': { 'id': 'bb2b5f4e-ffb4-35a6-a34a-046aa62d6f6b', 'title': 'Jaroslava Janáčková: Příběh tajemného psaní. O pramenech a genezi Babičky', 'description': 'md5:7434d8fac39ac9fee6df098e11dfb1be', }, }, { # show extraction 'url': 'https://www.mujrozhlas.cz/nespavci', 'playlist_mincount': 14, 'info_dict': { 'id': '09db9b37-d0f4-368c-986a-d3439f741f08', 'title': 'Nespavci', 'description': 'md5:c430adcbf9e2b9eac88b745881e814dc', }, }, { # serialPart 'url': 'https://www.mujrozhlas.cz/povidka/gustavo-adolfo-becquer-hora-duchu', 'info_dict': { 'id': '8889035', 'ext': 'm4a', 'title': 'Gustavo Adolfo Bécquer: Hora duchů', 'description': 'md5:343a15257b376c276e210b78e900ffea', 'chapter': 'Hora duchů a Polibek – dva tajemné příběhy Gustava Adolfa Bécquera', 'thumbnail': 'https://portal.rozhlas.cz/sites/default/files/images/2adfe1387fb140634be725c1ccf26214.jpg', 'timestamp': 1708173000, 'episode': 'Episode 1', 'episode_number': 1, 'series': 'Povídka', 'modified_date': '20240217', 'upload_date': '20240217', 'modified_timestamp': 1708173198, 'channel_id': 'vltava', }, 'params': {'skip_download': 'dash'}, }] def _call_api(self, path, item_id, msg='API JSON'): return self._download_json( f'https://api.mujrozhlas.cz/{path}/{item_id}', item_id, note=f'Downloading {msg}', errnote=f'Failed to download {msg}')['data'] def _extract_audio_entry(self, entry): audio_id = entry['meta']['ga']['contentId'] return { 'id': audio_id, 'formats': self._extract_formats(entry['attributes'], audio_id), **traverse_obj(entry, { 'title': ('attributes', 'title'), 'description': ('attributes', 'description'), 'episode_number': ('attributes', 'part'), 'series': ('attributes', 'mirroredShow', 'title'), 'chapter': ('attributes', 'mirroredSerial', 'title'), 'artist': ('meta', 'ga', 'contentAuthor'), 'channel_id': ('meta', 'ga', 'contentCreator'), 'timestamp': ('attributes', 'since', {unified_timestamp}), 'modified_timestamp': ('attributes', 'updated', {unified_timestamp}), 'thumbnail': ('attributes', 'asset', 'url', {url_or_none}), }), } def _entries(self, api_url, playlist_id): for page in itertools.count(1): episodes = self._download_json( api_url, playlist_id, note=f'Downloading episodes page {page}', errnote=f'Failed to download episodes page {page}', fatal=False) for episode in traverse_obj(episodes, ('data', lambda _, v: v['meta']['ga']['contentId'])): yield self._extract_audio_entry(episode) api_url = traverse_obj(episodes, ('links', 'next', {url_or_none})) if not api_url: break def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) info = self._search_json(r'\bvar\s+dl\s*=', webpage, 'info json', display_id) entity = info['siteEntityBundle'] if entity in ('episode', 'serialPart'): return self._extract_audio_entry(self._call_api( 'episodes', info['contentId'], 'episode info API JSON')) elif entity in ('show', 'serial'): playlist_id = info['contentShow'].split(':')[0] if entity == 'show' else info['contentId'] data = self._call_api(f'{entity}s', playlist_id, f'{entity} playlist JSON') api_url = data['relationships']['episodes']['links']['related'] return self.playlist_result( self._entries(api_url, playlist_id), playlist_id, **traverse_obj(data, ('attributes', { 'title': 'title', 'description': 'description', }))) else: # `entity == 'person'` not implemented yet by API, ref: # https://api.mujrozhlas.cz/persons/8367e456-2a57-379a-91bb-e699619bea49/participation raise ExtractorError(f'Unsupported entity type "{entity}"')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nitter.py
yt_dlp/extractor/nitter.py
import random import re import urllib.parse from .common import InfoExtractor from ..utils import ( determine_ext, parse_count, remove_end, unified_timestamp, ) class NitterIE(InfoExtractor): # Taken from https://github.com/zedeus/nitter/wiki/Instances NON_HTTP_INSTANCES = ( '3nzoldnxplag42gqjs23xvghtzf6t6yzssrtytnntc6ppc7xxuoneoad.onion', 'nitter.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd.onion', 'nitter7bryz3jv7e3uekphigvmoyoem4al3fynerxkj22dmoxoq553qd.onion', 'npf37k3mtzwxreiw52ccs5ay4e6qt2fkcs2ndieurdyn2cuzzsfyfvid.onion', 'nitter.v6vgyqpa7yefkorazmg5d5fimstmvm2vtbirt6676mt7qmllrcnwycqd.onion', 'i23nv6w3juvzlw32xzoxcqzktegd4i4fu3nmnc2ewv4ggiu4ledwklad.onion', '26oq3gioiwcmfojub37nz5gzbkdiqp7fue5kvye7d4txv4ny6fb4wwid.onion', 'vfaomgh4jxphpbdfizkm5gbtjahmei234giqj4facbwhrfjtcldauqad.onion', 'iwgu3cv7ywf3gssed5iqtavmrlszgsxazkmwwnt4h2kdait75thdyrqd.onion', 'erpnncl5nhyji3c32dcfmztujtl3xaddqb457jsbkulq24zqq7ifdgad.onion', 'ckzuw5misyahmg7j5t5xwwuj3bwy62jfolxyux4brfflramzsvvd3syd.onion', 'jebqj47jgxleaiosfcxfibx2xdahjettuydlxbg64azd4khsxv6kawid.onion', 'nttr2iupbb6fazdpr2rgbooon2tzbbsvvkagkgkwohhodjzj43stxhad.onion', 'nitraeju2mipeziu2wtcrqsxg7h62v5y4eqgwi75uprynkj74gevvuqd.onion', 'nitter.lqs5fjmajyp7rvp4qvyubwofzi6d4imua7vs237rkc4m5qogitqwrgyd.onion', 'ibsboeui2im5o7dxnik3s5yghufumgy5abevtij5nbizequfpu4qi4ad.onion', 'ec5nvbycpfa5k6ro77blxgkyrzbkv7uy6r5cngcbkadtjj2733nm3uyd.onion', 'nitter.i2p', 'u6ikd6zndl3c4dsdq4mmujpntgeevdk5qzkfb57r4tnfeccrn2qa.b32.i2p', 'nitterlgj3n5fgwesu3vxc5h67ruku33nqaoeoocae2mvlzhsu6k7fqd.onion', ) HTTP_INSTANCES = ( 'nitter.lacontrevoie.fr', 'nitter.fdn.fr', 'nitter.1d4.us', 'nitter.kavin.rocks', 'nitter.unixfox.eu', 'nitter.domain.glass', 'nitter.namazso.eu', 'birdsite.xanny.family', 'nitter.moomoo.me', 'bird.trom.tf', 'nitter.it', 'twitter.censors.us', 'nitter.grimneko.de', 'twitter.076.ne.jp', 'nitter.fly.dev', 'notabird.site', 'nitter.weiler.rocks', 'nitter.sethforprivacy.com', 'nitter.cutelab.space', 'nitter.nl', 'nitter.mint.lgbt', 'nitter.bus-hit.me', 'nitter.esmailelbob.xyz', 'tw.artemislena.eu', 'nitter.winscloud.net', 'nitter.tiekoetter.com', 'nitter.spaceint.fr', 'nitter.privacy.com.de', 'nitter.poast.org', 'nitter.bird.froth.zone', 'nitter.dcs0.hu', 'twitter.dr460nf1r3.org', 'nitter.garudalinux.org', 'twitter.femboy.hu', 'nitter.cz', 'nitter.privacydev.net', 'nitter.evil.site', 'tweet.lambda.dance', 'nitter.kylrth.com', 'nitter.foss.wtf', 'nitter.priv.pw', 'nitter.tokhmi.xyz', 'nitter.catalyst.sx', 'unofficialbird.com', 'nitter.projectsegfau.lt', 'nitter.eu.projectsegfau.lt', 'singapore.unofficialbird.com', 'canada.unofficialbird.com', 'india.unofficialbird.com', 'nederland.unofficialbird.com', 'uk.unofficialbird.com', 'n.l5.ca', 'nitter.slipfox.xyz', 'nitter.soopy.moe', 'nitter.qwik.space', 'read.whatever.social', 'nitter.rawbit.ninja', 'nt.vern.cc', 'ntr.odyssey346.dev', 'nitter.ir', 'nitter.privacytools.io', 'nitter.sneed.network', 'n.sneed.network', 'nitter.manasiwibi.com', 'nitter.smnz.de', 'nitter.twei.space', 'nitter.inpt.fr', 'nitter.d420.de', 'nitter.caioalonso.com', 'nitter.at', 'nitter.drivet.xyz', 'nitter.pw', 'nitter.nicfab.eu', 'bird.habedieeh.re', 'nitter.hostux.net', 'nitter.adminforge.de', 'nitter.platypush.tech', 'nitter.mask.sh', 'nitter.pufe.org', 'nitter.us.projectsegfau.lt', 'nitter.arcticfoxes.net', 't.com.sb', 'nitter.kling.gg', 'nitter.ktachibana.party', 'nitter.riverside.rocks', 'nitter.girlboss.ceo', 'nitter.lunar.icu', 'twitter.moe.ngo', 'nitter.freedit.eu', 'ntr.frail.duckdns.org', 'nitter.librenode.org', 'n.opnxng.com', 'nitter.plus.st', ) DEAD_INSTANCES = ( # maintenance 'nitter.ethibox.fr', # official, rate limited 'nitter.net', # offline 'is-nitter.resolv.ee', 'lu-nitter.resolv.ee', 'nitter.13ad.de', 'nitter.40two.app', 'nitter.cattube.org', 'nitter.cc', 'nitter.dark.fail', 'nitter.himiko.cloud', 'nitter.koyu.space', 'nitter.mailstation.de', 'nitter.mastodont.cat', 'nitter.tedomum.net', 'nitter.tokhmi.xyz', 'nitter.weaponizedhumiliation.com', 'nitter.vxempire.xyz', 'tweet.lambda.dance', 'nitter.ca', 'nitter.42l.fr', 'nitter.pussthecat.org', 'nitter.nixnet.services', 'nitter.eu', 'nitter.actionsack.com', 'nitter.hu', 'twitr.gq', 'nittereu.moomoo.me', 'bird.from.tf', 'twitter.grimneko.de', 'nitter.alefvanoon.xyz', 'n.hyperborea.cloud', 'twitter.mstdn.social', 'nitter.silkky.cloud', 'nttr.stream', 'fuckthesacklers.network', 'nitter.govt.land', 'nitter.datatunnel.xyz', 'de.nttr.stream', 'twtr.bch.bar', 'nitter.exonip.de', 'nitter.mastodon.pro', 'nitter.notraxx.ch', 'nitter.skrep.in', 'nitter.snopyta.org', ) INSTANCES = NON_HTTP_INSTANCES + HTTP_INSTANCES + DEAD_INSTANCES _INSTANCES_RE = f'(?:{"|".join(map(re.escape, INSTANCES))})' _VALID_URL = fr'https?://{_INSTANCES_RE}/(?P<uploader_id>.+)/status/(?P<id>[0-9]+)(#.)?' current_instance = random.choice(HTTP_INSTANCES) _TESTS = [ { # GIF (wrapped in mp4) 'url': f'https://{current_instance}/firefox/status/1314279897502629888#m', 'info_dict': { 'id': '1314279897502629888', 'ext': 'mp4', 'title': 'md5:7890a9277da4639ab624dd899424c5d8', 'description': 'md5:5fea96a4d3716c350f8b95b21b3111fe', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Firefox 🔥', 'uploader_id': 'firefox', 'uploader_url': f'https://{current_instance}/firefox', 'upload_date': '20201008', 'timestamp': 1602183720, 'like_count': int, 'repost_count': int, 'comment_count': int, }, }, { # normal video 'url': f'https://{current_instance}/Le___Doc/status/1299715685392756737#m', 'info_dict': { 'id': '1299715685392756737', 'ext': 'mp4', 'title': 're:^.* - "Je ne prédis jamais rien"\nD Raoult, Août 2020...', 'description': '"Je ne prédis jamais rien"\nD Raoult, Août 2020...', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 're:^Le *Doc', 'uploader_id': 'Le___Doc', 'uploader_url': f'https://{current_instance}/Le___Doc', 'upload_date': '20200829', 'timestamp': 1598711340, 'view_count': int, 'like_count': int, 'repost_count': int, 'comment_count': int, }, }, { # video embed in a "Streaming Political Ads" box 'url': f'https://{current_instance}/mozilla/status/1321147074491092994#m', 'info_dict': { 'id': '1321147074491092994', 'ext': 'mp4', 'title': 'md5:8290664aabb43b9189145c008386bf12', 'description': 'md5:9cf2762d49674bc416a191a689fb2aaa', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Mozilla', 'uploader_id': 'mozilla', 'uploader_url': f'https://{current_instance}/mozilla', 'upload_date': '20201027', 'timestamp': 1603820940, 'view_count': int, 'like_count': int, 'repost_count': int, 'comment_count': int, }, 'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest'], }, { # not the first tweet but main-tweet 'url': f'https://{current_instance}/firefox/status/1354848277481414657#m', 'info_dict': { 'id': '1354848277481414657', 'ext': 'mp4', 'title': 'md5:bef647f03bd1c6b15b687ea70dfc9700', 'description': 'md5:5efba25e2f9dac85ebcd21160cb4341f', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Firefox 🔥', 'uploader_id': 'firefox', 'uploader_url': f'https://{current_instance}/firefox', 'upload_date': '20210128', 'timestamp': 1611855960, 'view_count': int, 'like_count': int, 'repost_count': int, 'comment_count': int, }, }, { # no OpenGraph title 'url': f'https://{current_instance}/LocalBateman/status/1678455464038735895#m', 'info_dict': { 'id': '1678455464038735895', 'ext': 'mp4', 'title': 'Your Typical Local Man - Local man, what did Romanians ever do to you?', 'description': 'Local man, what did Romanians ever do to you?', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Your Typical Local Man', 'uploader_id': 'LocalBateman', 'uploader_url': f'https://{current_instance}/LocalBateman', 'upload_date': '20230710', 'timestamp': 1689009900, 'view_count': int, 'like_count': int, 'repost_count': int, 'comment_count': int, }, 'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest'], 'params': {'skip_download': 'm3u8'}, }, ] def _real_extract(self, url): video_id, uploader_id = self._match_valid_url(url).group('id', 'uploader_id') parsed_url = urllib.parse.urlparse(url) base_url = f'{parsed_url.scheme}://{parsed_url.netloc}' self._set_cookie(parsed_url.netloc, 'hlsPlayback', 'on') full_webpage = webpage = self._download_webpage(url, video_id) main_tweet_start = full_webpage.find('class="main-tweet"') if main_tweet_start > 0: webpage = full_webpage[main_tweet_start:] video_url = '{}{}'.format(base_url, self._html_search_regex( r'(?:<video[^>]+data-url|<source[^>]+src)="([^"]+)"', webpage, 'video url')) ext = determine_ext(video_url) if ext == 'unknown_video': formats = self._extract_m3u8_formats(video_url, video_id, ext='mp4') else: formats = [{ 'url': video_url, 'ext': ext, }] title = description = self._og_search_description(full_webpage, default=None) or self._html_search_regex( r'<div class="tweet-content[^>]+>([^<]+)</div>', webpage, 'title', fatal=False) uploader_id = self._html_search_regex( r'<a class="username"[^>]+title="@([^"]+)"', webpage, 'uploader id', fatal=False) or uploader_id uploader = self._html_search_regex( r'<a class="fullname"[^>]+title="([^"]+)"', webpage, 'uploader name', fatal=False) if uploader: title = f'{uploader} - {title}' counts = { f'{x[0]}_count': self._html_search_regex( fr'<span[^>]+class="icon-{x[1]}[^>]*></span>([^<]*)</div>', webpage, f'{x[0]} count', fatal=False) for x in (('view', 'play'), ('like', 'heart'), ('repost', 'retweet'), ('comment', 'comment')) } counts = {field: 0 if count == '' else parse_count(count) for field, count in counts.items()} thumbnail = ( self._html_search_meta('og:image', full_webpage, 'thumbnail url') or remove_end('{}{}'.format(base_url, self._html_search_regex( r'<video[^>]+poster="([^"]+)"', webpage, 'thumbnail url', fatal=False)), '%3Asmall')) thumbnails = [ {'id': id_, 'url': f'{thumbnail}%3A{id_}'} for id_ in ('thumb', 'small', 'large', 'medium', 'orig') ] date = self._html_search_regex( r'<span[^>]+class="tweet-date"[^>]*><a[^>]+title="([^"]+)"', webpage, 'upload date', default='').replace('·', '') return { 'id': video_id, 'title': title, 'description': description, 'uploader': uploader, 'timestamp': unified_timestamp(date), 'uploader_id': uploader_id, 'uploader_url': f'{base_url}/{uploader_id}', 'formats': formats, 'thumbnails': thumbnails, 'thumbnail': thumbnail, **counts, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/kelbyone.py
yt_dlp/extractor/kelbyone.py
from .common import InfoExtractor from ..utils import int_or_none class KelbyOneIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://members\.kelbyone\.com/course/(?P<id>[^$&?#/]+)' _TESTS = [{ 'url': 'https://members.kelbyone.com/course/glyn-dewis-mastering-selections/', 'playlist_mincount': 1, 'info_dict': { 'id': 'glyn-dewis-mastering-selections', 'title': 'Trailer - Mastering Selections in Photoshop', }, 'playlist': [{ 'info_dict': { 'id': 'MkiOnLqK', 'ext': 'mp4', 'title': 'Trailer - Mastering Selections in Photoshop', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': 'https://content.jwplatform.com/v2/media/MkiOnLqK/poster.jpg?width=720', 'timestamp': 1601568639, 'duration': 90, 'upload_date': '20201001', }, }], }] def _entries(self, playlist): for item in playlist: video_id = item['mediaid'] thumbnails = [{ 'url': image.get('src'), 'width': int_or_none(image.get('width')), } for image in item.get('images') or []] formats, subtitles = [], {} for source in item.get('sources') or []: if not source.get('file'): continue if source.get('type') == 'application/vnd.apple.mpegurl': fmts, subs = self._extract_m3u8_formats_and_subtitles(source['file'], video_id) formats.extend(fmts) subtitles = self._merge_subtitles(subs, subtitles) elif source.get('type') == 'audio/mp4': formats.append({ 'format_id': source.get('label'), 'url': source['file'], 'vcodec': 'none', }) else: formats.append({ 'format_id': source.get('label'), 'height': source.get('height'), 'width': source.get('width'), 'url': source['file'], }) for track in item.get('tracks'): if track.get('kind') == 'captions' and track.get('file'): subtitles.setdefault('en', []).append({ 'url': track['file'], }) yield { 'id': video_id, 'title': item['title'], 'description': item.get('description'), 'thumbnails': thumbnails, 'thumbnail': item.get('image'), 'timestamp': item.get('pubdate'), 'duration': item.get('duration'), 'formats': formats, 'subtitles': subtitles, } def _real_extract(self, url): item_id = self._match_id(url) webpage = self._download_webpage(url, item_id) playlist_url = self._html_search_regex(r'playlist"\:"(https.*content\.jwplatform\.com.*json)"', webpage, 'playlist url').replace('\\', '') course_data = self._download_json(playlist_url, item_id) return self.playlist_result(self._entries(course_data['playlist']), item_id, course_data.get('title'), course_data.get('description'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/arte.py
yt_dlp/extractor/arte.py
import re from .common import InfoExtractor from ..utils import ( ExtractorError, GeoRestrictedError, int_or_none, join_nonempty, parse_iso8601, parse_qs, strip_or_none, traverse_obj, url_or_none, ) class ArteTVBaseIE(InfoExtractor): _ARTE_LANGUAGES = 'fr|de|en|es|it|pl' _API_BASE = 'https://api.arte.tv/api/player/v2' class ArteTVIE(ArteTVBaseIE): _VALID_URL = rf'''(?x) (?:https?:// (?: (?:www\.)?arte\.tv/(?P<lang>{ArteTVBaseIE._ARTE_LANGUAGES})/videos| api\.arte\.tv/api/player/v\d+/config/(?P<lang_2>{ArteTVBaseIE._ARTE_LANGUAGES}) ) |arte://program) /(?P<id>\d{{6}}-\d{{3}}-[AF]|LIVE) ''' _TESTS = [{ 'url': 'https://www.arte.tv/en/videos/088501-000-A/mexico-stealing-petrol-to-survive/', 'only_matching': True, }, { 'note': 'No alt_title', 'url': 'https://www.arte.tv/fr/videos/110371-000-A/la-chaleur-supplice-des-arbres-de-rue/', 'only_matching': True, }, { 'url': 'https://api.arte.tv/api/player/v2/config/de/100605-013-A', 'only_matching': True, }, { 'url': 'https://api.arte.tv/api/player/v2/config/de/LIVE', 'only_matching': True, }, { 'url': 'https://www.arte.tv/de/videos/110203-006-A/zaz/', 'only_matching': True, }, { 'url': 'https://www.arte.tv/fr/videos/109067-000-A/la-loi-de-teheran/', 'info_dict': { 'id': '109067-000-A', 'ext': 'mp4', 'description': 'md5:d2ca367b8ecee028dddaa8bd1aebc739', 'thumbnail': r're:https?://api-cdn\.arte\.tv/img/v2/image/.+', 'timestamp': 1713927600, 'duration': 7599, 'title': 'La loi de Téhéran', 'upload_date': '20240424', 'subtitles': { 'fr': 'mincount:1', 'fr-acc': 'mincount:1', 'fr-forced': 'mincount:1', }, }, 'skip': 'Invalid URL', }, { 'note': 'age-restricted', 'url': 'https://www.arte.tv/de/videos/006785-000-A/the-element-of-crime/', 'info_dict': { 'id': '006785-000-A', 'description': 'md5:c2f94fdfefc8a280e4dab68ab96ab0ba', 'title': 'The Element of Crime', 'thumbnail': r're:https?://api-cdn\.arte\.tv/img/v2/image/.+', 'timestamp': 1696111200, 'duration': 5849, 'upload_date': '20230930', 'ext': 'mp4', }, 'skip': '404 Not Found', }] _GEO_BYPASS = True _LANG_MAP = { # ISO639 -> French abbreviations 'fr': 'F', 'de': 'A', 'en': 'E[ANG]', 'es': 'E[ESP]', 'it': 'E[ITA]', 'pl': 'E[POL]', # XXX: probably means mixed; <https://www.arte.tv/en/videos/107710-029-A/dispatches-from-ukraine-local-journalists-report/> # uses this code for audio that happens to be in Ukrainian, but the manifest uses the ISO code 'mul' (mixed) 'mul': 'EU', } _VERSION_CODE_RE = re.compile(r'''(?x) V (?P<original_voice>O?) (?P<vlang>[FA]|E\[[A-Z]+\]|EU)? (?P<audio_desc>AUD|) (?: (?P<has_sub>-ST) (?P<sdh_sub>M?) (?P<sub_lang>[FA]|E\[[A-Z]+\]|EU) )? ''') # all obtained by exhaustive testing _COUNTRIES_MAP = { 'DE_FR': ( 'BL', 'DE', 'FR', 'GF', 'GP', 'MF', 'MQ', 'NC', 'PF', 'PM', 'RE', 'WF', 'YT', ), # with both of the below 'BE' sometimes works, sometimes doesn't 'EUR_DE_FR': ( 'AT', 'BL', 'CH', 'DE', 'FR', 'GF', 'GP', 'LI', 'MC', 'MF', 'MQ', 'NC', 'PF', 'PM', 'RE', 'WF', 'YT', ), 'SAT': ( 'AD', 'AT', 'AX', 'BG', 'BL', 'CH', 'CY', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GF', 'GR', 'HR', 'HU', 'IE', 'IS', 'IT', 'KN', 'LI', 'LT', 'LU', 'LV', 'MC', 'MF', 'MQ', 'MT', 'NC', 'NL', 'NO', 'PF', 'PL', 'PM', 'PT', 'RE', 'RO', 'SE', 'SI', 'SK', 'SM', 'VA', 'WF', 'YT', ), } @staticmethod def _fix_accessible_subs_locale(subs): updated_subs = {} for lang, sub_formats in subs.items(): for fmt in sub_formats: url = fmt.get('url') or '' suffix = ('acc' if url.endswith('-MAL.m3u8') else 'forced' if '_VO' not in url else None) updated_subs.setdefault(join_nonempty(lang, suffix), []).append(fmt) return updated_subs def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') lang = mobj.group('lang') or mobj.group('lang_2') language_code = self._LANG_MAP.get(lang) config = self._download_json(f'{self._API_BASE}/config/{lang}/{video_id}', video_id, headers={ 'x-validated-age': '18', }) geoblocking = traverse_obj(config, ('data', 'attributes', 'restriction', 'geoblocking')) or {} if geoblocking.get('restrictedArea'): raise GeoRestrictedError(f'Video restricted to {geoblocking["code"]!r}', countries=self._COUNTRIES_MAP.get(geoblocking['code'], ('DE', 'FR'))) if not traverse_obj(config, ('data', 'attributes', 'rights')): # Eg: https://www.arte.tv/de/videos/097407-215-A/28-minuten # Eg: https://www.arte.tv/es/videos/104351-002-A/serviteur-du-peuple-1-23 raise ExtractorError( 'Video is not available in this language edition of Arte or broadcast rights expired', expected=True) formats, subtitles = [], {} secondary_formats = [] for stream in config['data']['attributes']['streams']: # official player contains code like `e.get("versions")[0].eStat.ml5` stream_version = stream['versions'][0] stream_version_code = stream_version['eStat']['ml5'] lang_pref = -1 m = self._VERSION_CODE_RE.match(stream_version_code) if m: lang_pref = int(''.join('01'[x] for x in ( m.group('vlang') == language_code, # we prefer voice in the requested language not m.group('audio_desc'), # and not the audio description version bool(m.group('original_voice')), # but if voice is not in the requested language, at least choose the original voice m.group('sub_lang') == language_code, # if subtitles are present, we prefer them in the requested language not m.group('has_sub'), # but we prefer no subtitles otherwise not m.group('sdh_sub'), # and we prefer not the hard-of-hearing subtitles if there are subtitles ))) short_label = traverse_obj(stream_version, 'shortLabel', expected_type=str, default='?') if 'HLS' in stream['protocol']: fmts, subs = self._extract_m3u8_formats_and_subtitles( stream['url'], video_id=video_id, ext='mp4', m3u8_id=stream_version_code, fatal=False) for fmt in fmts: fmt.update({ 'format_note': f'{stream_version.get("label", "unknown")} [{short_label}]', 'language_preference': lang_pref, }) if any(map(short_label.startswith, ('cc', 'OGsub'))): secondary_formats.extend(fmts) else: formats.extend(fmts) subs = self._fix_accessible_subs_locale(subs) self._merge_subtitles(subs, target=subtitles) elif stream['protocol'] in ('HTTPS', 'RTMP'): formats.append({ 'format_id': f'{stream["protocol"]}-{stream_version_code}', 'url': stream['url'], 'format_note': f'{stream_version.get("label", "unknown")} [{short_label}]', 'language_preference': lang_pref, # 'ext': 'mp4', # XXX: may or may not be necessary, at least for HTTPS }) else: self.report_warning(f'Skipping stream with unknown protocol {stream["protocol"]}') formats.extend(secondary_formats) self._remove_duplicate_formats(formats) metadata = config['data']['attributes']['metadata'] return { 'id': metadata['providerId'], 'webpage_url': traverse_obj(metadata, ('link', 'url')), 'title': traverse_obj(metadata, 'subtitle', 'title'), 'alt_title': metadata.get('subtitle') and metadata.get('title'), 'description': metadata.get('description'), 'duration': traverse_obj(metadata, ('duration', 'seconds')), 'language': metadata.get('language'), 'timestamp': traverse_obj(config, ('data', 'attributes', 'rights', 'begin'), expected_type=parse_iso8601), 'is_live': config['data']['attributes'].get('live', False), 'formats': formats, 'subtitles': subtitles, 'thumbnails': [ {'url': image['url'], 'id': image.get('caption')} for image in metadata.get('images') or [] if url_or_none(image.get('url')) ], # TODO: chapters may also be in stream['segments']? 'chapters': traverse_obj(config, ('data', 'attributes', 'chapters', 'elements', ..., { 'start_time': 'startTime', 'title': 'title', })) or None, } class ArteTVEmbedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+' _EMBED_REGEX = [r'<(?:iframe|script)[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+?)\1'] _TESTS = [{ 'url': 'https://www.arte.tv/player/v5/index.php?json_url=https%3A%2F%2Fapi.arte.tv%2Fapi%2Fplayer%2Fv2%2Fconfig%2Fde%2F100605-013-A&lang=de&autoplay=true&mute=0100605-013-A', 'info_dict': { 'id': '100605-013-A', 'ext': 'mp4', 'title': 'United we Stream November Lockdown Edition #13', 'description': 'md5:be40b667f45189632b78c1425c7c2ce1', 'upload_date': '20201116', }, 'skip': 'No video available', }, { 'url': 'https://www.arte.tv/player/v3/index.php?json_url=https://api.arte.tv/api/player/v2/config/de/100605-013-A', 'only_matching': True, }] _WEBPAGE_TESTS = [{ # FIXME: Embed detection 'url': 'https://timesofmalta.com/article/watch-sunken-warships-north-sea-arte.1108358', 'info_dict': { 'id': '110288-000-A', 'ext': 'mp4', 'title': 'Danger on the Seabed', 'alt_title': 'Sunken Warships in the North Sea', 'description': 'md5:a2c84cbad37d280bddb6484087120add', 'duration': 3148, 'thumbnail': r're:https?://api-cdn\.arte\.tv/img/v2/image/.+', 'timestamp': 1741686820, 'upload_date': '20250311', }, 'params': {'skip_download': 'm3u8'}, }, { # FIXME: Embed detection 'url': 'https://www.eurockeennes.fr/en-live/', 'info_dict': { 'id': 'en-live', 'title': 'Les Eurocks en live | Les Eurockéennes de Belfort – 3-4-5-6 juillet 2025 sur la Presqu&#039;Île du Malsaucy', }, 'playlist_count': 4, }] def _real_extract(self, url): qs = parse_qs(url) json_url = qs['json_url'][0] video_id = ArteTVIE._match_id(json_url) return self.url_result( json_url, ie=ArteTVIE.ie_key(), video_id=video_id) class ArteTVPlaylistIE(ArteTVBaseIE): _VALID_URL = rf'https?://(?:www\.)?arte\.tv/(?P<lang>{ArteTVBaseIE._ARTE_LANGUAGES})/videos/(?P<id>RC-\d{{6}})' _TESTS = [{ 'url': 'https://www.arte.tv/en/videos/RC-016954/earn-a-living/', 'only_matching': True, }, { 'url': 'https://www.arte.tv/pl/videos/RC-014123/arte-reportage/', 'playlist_mincount': 100, 'info_dict': { 'description': 'md5:84e7bf1feda248bc325ebfac818c476e', 'id': 'RC-014123', 'title': 'ARTE Reportage - najlepsze reportaże', }, }] def _real_extract(self, url): lang, playlist_id = self._match_valid_url(url).group('lang', 'id') playlist = self._download_json( f'{self._API_BASE}/playlist/{lang}/{playlist_id}', playlist_id)['data']['attributes'] entries = [{ '_type': 'url_transparent', 'url': video['config']['url'], 'ie_key': ArteTVIE.ie_key(), 'id': video.get('providerId'), 'title': video.get('title'), 'alt_title': video.get('subtitle'), 'thumbnail': url_or_none(traverse_obj(video, ('mainImage', 'url'))), 'duration': int_or_none(traverse_obj(video, ('duration', 'seconds'))), } for video in traverse_obj(playlist, ('items', lambda _, v: v['config']['url']))] return self.playlist_result(entries, playlist_id, traverse_obj(playlist, ('metadata', 'title')), traverse_obj(playlist, ('metadata', 'description'))) class ArteTVCategoryIE(ArteTVBaseIE): _VALID_URL = rf'https?://(?:www\.)?arte\.tv/(?P<lang>{ArteTVBaseIE._ARTE_LANGUAGES})/videos/(?P<id>[\w-]+(?:/[\w-]+)*)/?\s*$' _TESTS = [{ 'url': 'https://www.arte.tv/en/videos/politics-and-society/', 'info_dict': { 'id': 'politics-and-society', 'title': 'Politics and society', 'description': 'Watch documentaries and reportage about politics, society and current affairs.', }, 'playlist_mincount': 3, }] @classmethod def suitable(cls, url): return ( not any(ie.suitable(url) for ie in (ArteTVIE, ArteTVPlaylistIE)) and super().suitable(url)) def _real_extract(self, url): lang, playlist_id = self._match_valid_url(url).groups() webpage = self._download_webpage(url, playlist_id) items = [] for video in re.finditer( rf'<a\b[^>]*?href\s*=\s*(?P<q>"|\'|\b)(?P<url>https?://www\.arte\.tv/{lang}/videos/[\w/-]+)(?P=q)', webpage): video = video.group('url') if video == url: continue if any(ie.suitable(video) for ie in (ArteTVIE, ArteTVPlaylistIE)): items.append(video) title = strip_or_none(self._generic_title('', webpage, default='').rsplit('|', 1)[0]) or None return self.playlist_from_matches(items, playlist_id=playlist_id, playlist_title=title, description=self._og_search_description(webpage, default=None))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/whowatch.py
yt_dlp/extractor/whowatch.py
from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, qualities, try_call, try_get, ) class WhoWatchIE(InfoExtractor): IE_NAME = 'whowatch' _VALID_URL = r'https?://whowatch\.tv/viewer/(?P<id>\d+)' _TESTS = [{ 'url': 'https://whowatch.tv/viewer/21450171', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) self._download_webpage(url, video_id) metadata = self._download_json(f'https://api.whowatch.tv/lives/{video_id}', video_id) live_data = self._download_json(f'https://api.whowatch.tv/lives/{video_id}/play', video_id) title = try_call( lambda: live_data['share_info']['live_title'][1:-1], lambda: metadata['live']['title'], expected_type=str) hls_url = live_data.get('hls_url') if not hls_url: raise ExtractorError(live_data.get('error_message') or 'The user is offline.', expected=True) QUALITIES = qualities(['low', 'medium', 'high', 'veryhigh']) formats = [] for i, fmt in enumerate(live_data.get('streams') or []): name = fmt.get('quality') or fmt.get('name') or str(i) hls_url = fmt.get('hls_url') rtmp_url = fmt.get('rtmp_url') audio_only = fmt.get('audio_only') quality = QUALITIES(fmt.get('quality')) if hls_url: hls_fmts = self._extract_m3u8_formats( hls_url, video_id, ext='mp4', m3u8_id=f'hls-{name}', quality=quality) formats.extend(hls_fmts) else: hls_fmts = [] # RTMP url for audio_only is same as high format, so skip it if rtmp_url and not audio_only: formats.append({ 'url': rtmp_url, 'format_id': f'rtmp-{name}', 'ext': 'mp4', 'protocol': 'rtmp_ffmpeg', # ffmpeg can, while rtmpdump can't 'vcodec': 'h264', 'acodec': 'aac', 'quality': quality, 'format_note': fmt.get('label'), # note: HLS and RTMP have same resolution for now, so it's acceptable 'width': try_get(hls_fmts, lambda x: x[0]['width'], int), 'height': try_get(hls_fmts, lambda x: x[0]['height'], int), }) # This contains the same formats as the above manifests and is used only as a fallback formats.extend(self._extract_m3u8_formats( hls_url, video_id, ext='mp4', m3u8_id='hls')) self._remove_duplicate_formats(formats) uploader_url = try_get(metadata, lambda x: x['live']['user']['user_path'], str) if uploader_url: uploader_url = f'https://whowatch.tv/profile/{uploader_url}' uploader_id = str(try_get(metadata, lambda x: x['live']['user']['id'], int)) uploader = try_get(metadata, lambda x: x['live']['user']['name'], str) thumbnail = try_get(metadata, lambda x: x['live']['latest_thumbnail_url'], str) timestamp = int_or_none(try_get(metadata, lambda x: x['live']['started_at'], int), scale=1000) view_count = try_get(metadata, lambda x: x['live']['total_view_count'], int) comment_count = try_get(metadata, lambda x: x['live']['comment_count'], int) return { 'id': video_id, 'title': title, 'uploader_id': uploader_id, 'uploader_url': uploader_url, 'uploader': uploader, 'formats': formats, 'thumbnail': thumbnail, 'timestamp': timestamp, 'view_count': view_count, 'comment_count': comment_count, 'is_live': True, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/polsatgo.py
yt_dlp/extractor/polsatgo.py
import json import uuid from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, try_get, url_or_none, ) class PolsatGoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?polsat(?:box)?go\.pl/.+/(?P<id>[0-9a-fA-F]+)(?:[/#?]|$)' _TESTS = [{ 'url': 'https://polsatgo.pl/wideo/seriale/swiat-wedlug-kiepskich/5024045/sezon-1/5028300/swiat-wedlug-kiepskich-odcinek-88/4121', 'info_dict': { 'id': '4121', 'ext': 'mp4', 'title': 'Świat według Kiepskich - Odcinek 88', 'age_limit': 12, }, }] def _extract_formats(self, sources, video_id): for source in sources or []: if not source.get('id'): continue url = url_or_none(self._call_api( 'drm', video_id, 'getPseudoLicense', {'mediaId': video_id, 'sourceId': source['id']}).get('url')) if not url: continue yield { 'url': url, 'height': int_or_none(try_get(source, lambda x: x['quality'][:-1])), } def _real_extract(self, url): video_id = self._match_id(url) media = self._call_api('navigation', video_id, 'prePlayData', {'mediaId': video_id})['mediaItem'] formats = list(self._extract_formats( try_get(media, lambda x: x['playback']['mediaSources']), video_id)) return { 'id': video_id, 'title': media['displayInfo']['title'], 'formats': formats, 'age_limit': int_or_none(media['displayInfo']['ageGroup']), } def _call_api(self, endpoint, media_id, method, params): rand_uuid = str(uuid.uuid4()) res = self._download_json( f'https://b2c-mobile.redefine.pl/rpc/{endpoint}/', media_id, note=f'Downloading {method} JSON metadata', data=json.dumps({ 'method': method, 'id': '2137', 'jsonrpc': '2.0', 'params': { **params, 'userAgentData': { 'deviceType': 'mobile', 'application': 'native', 'os': 'android', 'build': 10003, 'widevine': False, 'portal': 'pg', 'player': 'cpplayer', }, 'deviceId': { 'type': 'other', 'value': rand_uuid, }, 'clientId': rand_uuid, 'cpid': 1, }, }).encode(), headers={'Content-type': 'application/json'}) if not res.get('result'): if res['error']['code'] == 13404: raise ExtractorError('This video is either unavailable in your region or is DRM protected', expected=True) raise ExtractorError(f'Solorz said: {res["error"]["message"]} - {res["error"]["data"]["userMessage"]}') return res['result']
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gaia.py
yt_dlp/extractor/gaia.py
import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, str_or_none, strip_or_none, try_get, urlencode_postdata, ) class GaiaIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?gaia\.com/video/(?P<id>[^/?]+).*?\bfullplayer=(?P<type>feature|preview)' _TESTS = [{ 'url': 'https://www.gaia.com/video/connecting-universal-consciousness?fullplayer=feature', 'info_dict': { 'id': '89356', 'ext': 'mp4', 'title': 'Connecting with Universal Consciousness', 'description': 'md5:844e209ad31b7d31345f5ed689e3df6f', 'upload_date': '20151116', 'timestamp': 1447707266, 'duration': 936, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://www.gaia.com/video/connecting-universal-consciousness?fullplayer=preview', 'info_dict': { 'id': '89351', 'ext': 'mp4', 'title': 'Connecting with Universal Consciousness', 'description': 'md5:844e209ad31b7d31345f5ed689e3df6f', 'upload_date': '20151116', 'timestamp': 1447707266, 'duration': 53, }, 'params': { # m3u8 download 'skip_download': True, }, }] _NETRC_MACHINE = 'gaia' _jwt = None def _real_initialize(self): auth = self._get_cookies('https://www.gaia.com/').get('auth') if auth: auth = self._parse_json(urllib.parse.unquote(auth.value), None, fatal=False) self._jwt = auth.get('jwt') def _perform_login(self, username, password): if self._jwt: return auth = self._download_json( 'https://auth.gaia.com/v1/login', None, data=urlencode_postdata({ 'username': username, 'password': password, })) if auth.get('success') is False: raise ExtractorError(', '.join(auth['messages']), expected=True) self._jwt = auth.get('jwt') def _real_extract(self, url): display_id, vtype = self._match_valid_url(url).groups() node_id = self._download_json( 'https://brooklyn.gaia.com/pathinfo', display_id, query={ 'path': 'video/' + display_id, })['id'] node = self._download_json( 'https://brooklyn.gaia.com/node/%d' % node_id, node_id) vdata = node[vtype] media_id = str(vdata['nid']) title = node['title'] headers = None if self._jwt: headers = {'Authorization': 'Bearer ' + self._jwt} media = self._download_json( 'https://brooklyn.gaia.com/media/' + media_id, media_id, headers=headers) formats = self._extract_m3u8_formats( media['mediaUrls']['bcHLS'], media_id, 'mp4') subtitles = {} text_tracks = media.get('textTracks', {}) for key in ('captions', 'subtitles'): for lang, sub_url in text_tracks.get(key, {}).items(): subtitles.setdefault(lang, []).append({ 'url': sub_url, }) fivestar = node.get('fivestar', {}) fields = node.get('fields', {}) def get_field_value(key, value_key='value'): return try_get(fields, lambda x: x[key][0][value_key]) return { 'id': media_id, 'display_id': display_id, 'title': title, 'formats': formats, 'description': strip_or_none(get_field_value('body') or get_field_value('teaser')), 'timestamp': int_or_none(node.get('created')), 'subtitles': subtitles, 'duration': int_or_none(vdata.get('duration')), 'like_count': int_or_none(try_get(fivestar, lambda x: x['up_count']['value'])), 'dislike_count': int_or_none(try_get(fivestar, lambda x: x['down_count']['value'])), 'comment_count': int_or_none(node.get('comment_count')), 'series': try_get(node, lambda x: x['series']['title'], str), 'season_number': int_or_none(get_field_value('season')), 'season_id': str_or_none(get_field_value('series_nid', 'nid')), 'episode_number': int_or_none(get_field_value('episode')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/naver.py
yt_dlp/extractor/naver.py
import base64 import hashlib import hmac import itertools import json import re import time import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, dict_get, int_or_none, join_nonempty, merge_dicts, parse_iso8601, traverse_obj, try_get, unified_timestamp, update_url_query, url_or_none, ) class NaverBaseIE(InfoExtractor): _CAPTION_EXT_RE = r'\.(?:ttml|vtt)' @staticmethod # NB: Used in WeverseIE def process_subtitles(vod_data, process_url): ret = {'subtitles': {}, 'automatic_captions': {}} for caption in traverse_obj(vod_data, ('captions', 'list', ...)): caption_url = caption.get('source') if not caption_url: continue type_ = 'automatic_captions' if caption.get('type') == 'auto' else 'subtitles' lang = caption.get('locale') or join_nonempty('language', 'country', from_dict=caption) or 'und' if caption.get('type') == 'fan': lang += '_fan{}'.format(next(i for i in itertools.count(1) if f'{lang}_fan{i}' not in ret[type_])) ret[type_].setdefault(lang, []).extend({ 'url': sub_url, 'name': join_nonempty('label', 'fanName', from_dict=caption, delim=' - '), } for sub_url in process_url(caption_url)) return ret def _extract_video_info(self, video_id, vid, key): video_data = self._download_json( 'http://play.rmcnmv.naver.com/vod/play/v2.0/' + vid, video_id, query={ 'key': key, }) meta = video_data['meta'] title = meta['subject'] formats = [] get_list = lambda x: try_get(video_data, lambda y: y[x + 's']['list'], list) or [] def extract_formats(streams, stream_type, query={}): for stream in streams: stream_url = stream.get('source') if not stream_url: continue stream_url = update_url_query(stream_url, query) encoding_option = stream.get('encodingOption', {}) bitrate = stream.get('bitrate', {}) formats.append({ 'format_id': '{}_{}'.format(stream.get('type') or stream_type, dict_get(encoding_option, ('name', 'id'))), 'url': stream_url, 'ext': 'mp4', 'width': int_or_none(encoding_option.get('width')), 'height': int_or_none(encoding_option.get('height')), 'vbr': int_or_none(bitrate.get('video')), 'abr': int_or_none(bitrate.get('audio')), 'filesize': int_or_none(stream.get('size')), 'protocol': 'm3u8_native' if stream_type == 'HLS' else None, 'extra_param_to_segment_url': urllib.parse.urlencode(query, doseq=True) if stream_type == 'HLS' else None, }) extract_formats(get_list('video'), 'H264') for stream_set in video_data.get('streams', []): query = {} for param in stream_set.get('keys', []): query[param['name']] = param['value'] stream_type = stream_set.get('type') videos = stream_set.get('videos') if videos: extract_formats(videos, stream_type, query) elif stream_type == 'HLS': stream_url = stream_set.get('source') if not stream_url: continue formats.extend(self._extract_m3u8_formats( update_url_query(stream_url, query), video_id, 'mp4', 'm3u8_native', m3u8_id=stream_type, fatal=False)) replace_ext = lambda x, y: re.sub(self._CAPTION_EXT_RE, '.' + y, x) def get_subs(caption_url): if re.search(self._CAPTION_EXT_RE, caption_url): return [ replace_ext(caption_url, 'ttml'), replace_ext(caption_url, 'vtt'), ] return [caption_url] user = meta.get('user', {}) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': try_get(meta, lambda x: x['cover']['source']), 'view_count': int_or_none(meta.get('count')), 'uploader_id': user.get('id'), 'uploader': user.get('name'), 'uploader_url': user.get('url'), **self.process_subtitles(video_data, get_subs), } def _call_api(self, path, video_id): api_endpoint = f'https://apis.naver.com/now_web2/now_web_api/v1{path}' key = b'nbxvs5nwNG9QKEWK0ADjYA4JZoujF4gHcIwvoCxFTPAeamq5eemvt5IWAYXxrbYM' msgpad = int(time.time() * 1000) md = base64.b64encode(hmac.HMAC( key, f'{api_endpoint[:255]}{msgpad}'.encode(), digestmod=hashlib.sha1).digest()).decode() return self._download_json(api_endpoint, video_id=video_id, headers=self.geo_verification_headers(), query={ 'msgpad': msgpad, 'md': md, })['result'] class NaverIE(NaverBaseIE): _VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/(?:v|embed)/(?P<id>\d+)' _GEO_BYPASS = False _TESTS = [{ 'url': 'http://tv.naver.com/v/81652', 'info_dict': { 'id': '81652', 'ext': 'mp4', 'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번', 'description': '메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.', 'timestamp': 1378200754, 'upload_date': '20130903', 'uploader': '메가스터디, 합격불변의 법칙', 'uploader_id': 'megastudy', 'uploader_url': 'https://tv.naver.com/megastudy', 'view_count': int, 'like_count': int, 'comment_count': int, 'duration': 2118, 'thumbnail': r're:^https?://.*\.jpg', }, }, { 'url': 'http://tv.naver.com/v/395837', 'md5': '7791205fa89dbed2f5e3eb16d287ff05', 'info_dict': { 'id': '395837', 'ext': 'mp4', 'title': '9년이 지나도 아픈 기억, 전효성의 아버지', 'description': 'md5:c76be23e21403a6473d8119678cdb5cb', 'timestamp': 1432030253, 'upload_date': '20150519', 'uploader': '4가지쇼', 'uploader_id': '4show', 'uploader_url': 'https://tv.naver.com/4show', 'view_count': int, 'like_count': int, 'comment_count': int, 'duration': 277, 'thumbnail': r're:^https?://.*\.jpg', }, }, { 'url': 'https://tv.naver.com/v/67838091', 'md5': '126ea384ab033bca59672c12cca7a6be', 'info_dict': { 'id': '67838091', 'ext': 'mp4', 'title': '[라인W 날씨] 내일 아침 서울 체감 -19도…호남·충남 대설', 'description': 'md5:fe026e25634c85845698aed4b59db5a7', 'timestamp': 1736347853, 'upload_date': '20250108', 'uploader': 'KBS뉴스', 'uploader_id': 'kbsnews', 'uploader_url': 'https://tv.naver.com/kbsnews', 'view_count': int, 'like_count': int, 'comment_count': int, 'duration': 69, 'thumbnail': r're:^https?://.*\.jpg', }, 'params': {'format': 'HLS_144P'}, }, { 'url': 'http://tvcast.naver.com/v/81652', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) data = self._call_api(f'/clips/{video_id}/play-info', video_id) vid = traverse_obj(data, ('clip', 'videoId', {str})) in_key = traverse_obj(data, ('play', 'inKey', {str})) if not vid or not in_key: raise ExtractorError('Unable to extract video info') info = self._extract_video_info(video_id, vid, in_key) info.update(traverse_obj(data, ('clip', { 'title': 'title', 'description': 'description', 'timestamp': ('firstExposureDatetime', {parse_iso8601}), 'duration': ('playTime', {int_or_none}), 'like_count': ('likeItCount', {int_or_none}), 'view_count': ('playCount', {int_or_none}), 'comment_count': ('commentCount', {int_or_none}), 'thumbnail': ('thumbnailImageUrl', {url_or_none}), 'uploader': 'channelName', 'uploader_id': 'channelId', 'uploader_url': ('channelUrl', {url_or_none}), 'age_limit': ('adultVideo', {lambda x: 19 if x else None}), }))) return info class NaverLiveIE(NaverBaseIE): IE_NAME = 'Naver:live' _VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/l/(?P<id>\d+)' _GEO_BYPASS = False _TESTS = [{ 'url': 'https://tv.naver.com/l/127062', 'info_dict': { 'id': '127062', 'ext': 'mp4', 'live_status': 'is_live', 'channel': '뉴스는 YTN', 'channel_id': 'ytnnews24', 'title': 're:^대한민국 24시간 뉴스 채널 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'md5:f938b5956711beab6f882314ffadf4d5', 'start_time': 1677752280, 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)', 'like_count': int, }, }, { 'url': 'https://tv.naver.com/l/140535', 'info_dict': { 'id': '140535', 'ext': 'mp4', 'live_status': 'is_live', 'channel': 'KBS뉴스', 'channel_id': 'kbsnews', 'start_time': 1696867320, 'title': 're:^언제 어디서나! KBS 뉴스 24 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'md5:6ad419c0bf2f332829bda3f79c295284', 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)', 'like_count': int, }, }, { 'url': 'https://tv.naver.com/l/54887', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) data = self._call_api(f'/live-end/normal/{video_id}/play-info?renewLastPlayDate=true', video_id) status = traverse_obj(data, ('live', 'liveStatus')) if status == 'CLOSED': raise ExtractorError('Stream is offline.', expected=True) elif status != 'OPENED': raise ExtractorError(f'Unknown status {status!r}') return { 'id': video_id, 'formats': self._extract_m3u8_formats( traverse_obj(data, ('playbackBody', {json.loads}, 'media', 0, 'path')), video_id, live=True), **traverse_obj(data, ('live', { 'title': 'title', 'channel': 'channelName', 'channel_id': 'channelId', 'description': 'description', 'like_count': (('likeCount', 'likeItCount'), {int_or_none}), 'thumbnail': ('thumbnailImageUrl', {url_or_none}), 'start_time': (('startTime', 'startDateTime', 'startYmdt'), {parse_iso8601}), }), get_all=False), 'is_live': True, } class NaverNowIE(NaverBaseIE): IE_NAME = 'navernow' _VALID_URL = r'https?://now\.naver\.com/s/now\.(?P<id>\w+)' _API_URL = 'https://apis.naver.com/now_web/oldnow_web/v4' _TESTS = [{ 'url': 'https://now.naver.com/s/now.4759?shareReplayId=26331132#replay=', 'md5': 'e05854162c21c221481de16b2944a0bc', 'info_dict': { 'id': '4759-26331132', 'title': '아이키X노제\r\n💖꽁냥꽁냥💖(1)', 'ext': 'mp4', 'thumbnail': r're:^https?://.*\.jpg', 'timestamp': 1650369600, 'upload_date': '20220419', 'uploader_id': 'now', 'view_count': int, 'uploader_url': 'https://now.naver.com/show/4759', 'uploader': '아이키의 떰즈업', }, 'params': { 'noplaylist': True, }, }, { 'url': 'https://now.naver.com/s/now.4759?shareHightlight=26601461#highlight=', 'md5': '9f6118e398aa0f22b2152f554ea7851b', 'info_dict': { 'id': '4759-26601461', 'title': '아이키: 나 리정한테 흔들렸어,,, 질투 폭발하는 노제 여보😾 [아이키의 떰즈업]ㅣ네이버 NOW.', 'ext': 'mp4', 'thumbnail': r're:^https?://.*\.jpg', 'upload_date': '20220504', 'timestamp': 1651648311, 'uploader_id': 'now', 'view_count': int, 'uploader_url': 'https://now.naver.com/show/4759', 'uploader': '아이키의 떰즈업', }, 'params': { 'noplaylist': True, }, }, { 'url': 'https://now.naver.com/s/now.4759', 'info_dict': { 'id': '4759', 'title': '아이키의 떰즈업', }, 'playlist_mincount': 101, }, { 'url': 'https://now.naver.com/s/now.4759?shareReplayId=26331132#replay', 'info_dict': { 'id': '4759', 'title': '아이키의 떰즈업', }, 'playlist_mincount': 101, }, { 'url': 'https://now.naver.com/s/now.4759?shareHightlight=26601461#highlight=', 'info_dict': { 'id': '4759', 'title': '아이키의 떰즈업', }, 'playlist_mincount': 101, }, { 'url': 'https://now.naver.com/s/now.kihyunplay?shareReplayId=30573291#replay', 'only_matching': True, }] def _extract_replay(self, show_id, replay_id): vod_info = self._download_json(f'{self._API_URL}/shows/now.{show_id}/vod/{replay_id}', replay_id) in_key = self._download_json(f'{self._API_URL}/shows/now.{show_id}/vod/{replay_id}/inkey', replay_id)['inKey'] return merge_dicts({ 'id': f'{show_id}-{replay_id}', 'title': traverse_obj(vod_info, ('episode', 'title')), 'timestamp': unified_timestamp(traverse_obj(vod_info, ('episode', 'start_time'))), 'thumbnail': vod_info.get('thumbnail_image_url'), }, self._extract_video_info(replay_id, vod_info['video_id'], in_key)) def _extract_show_replays(self, show_id): page_size = 15 page = 1 while True: show_vod_info = self._download_json( f'{self._API_URL}/vod-shows/now.{show_id}', show_id, query={'page': page, 'page_size': page_size}, note=f'Downloading JSON vod list for show {show_id} - page {page}', )['response']['result'] for v in show_vod_info.get('vod_list') or []: yield self._extract_replay(show_id, v['id']) if len(show_vod_info.get('vod_list') or []) < page_size: break page += 1 def _extract_show_highlights(self, show_id, highlight_id=None): page_size = 10 page = 1 while True: highlights_videos = self._download_json( f'{self._API_URL}/shows/now.{show_id}/highlights/videos/', show_id, query={'page': page, 'page_size': page_size}, note=f'Downloading JSON highlights for show {show_id} - page {page}') for highlight in highlights_videos.get('results') or []: if highlight_id and highlight.get('clip_no') != int(highlight_id): continue yield merge_dicts({ 'id': f'{show_id}-{highlight["clip_no"]}', 'title': highlight.get('title'), 'timestamp': unified_timestamp(highlight.get('regdate')), 'thumbnail': highlight.get('thumbnail_url'), }, self._extract_video_info(highlight['clip_no'], highlight['video_id'], highlight['video_inkey'])) if len(highlights_videos.get('results') or []) < page_size: break page += 1 def _extract_highlight(self, show_id, highlight_id): try: return next(self._extract_show_highlights(show_id, highlight_id)) except StopIteration: raise ExtractorError(f'Unable to find highlight {highlight_id} for show {show_id}') def _real_extract(self, url): show_id = self._match_id(url) qs = urllib.parse.parse_qs(urllib.parse.urlparse(url).query) if not self._yes_playlist(show_id, qs.get('shareHightlight')): return self._extract_highlight(show_id, qs['shareHightlight'][0]) elif not self._yes_playlist(show_id, qs.get('shareReplayId')): return self._extract_replay(show_id, qs['shareReplayId'][0]) show_info = self._download_json( f'{self._API_URL}/shows/now.{show_id}/', show_id, note=f'Downloading JSON vod list for show {show_id}') return self.playlist_result( itertools.chain(self._extract_show_replays(show_id), self._extract_show_highlights(show_id)), show_id, show_info.get('title'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/presstv.py
yt_dlp/extractor/presstv.py
from .common import InfoExtractor from ..utils import remove_start class PressTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?presstv\.ir/[^/]+/(?P<y>\d+)/(?P<m>\d+)/(?P<d>\d+)/(?P<id>\d+)/(?P<display_id>[^/]+)?' _TEST = { 'url': 'http://www.presstv.ir/Detail/2016/04/09/459911/Australian-sewerage-treatment-facility-/', 'md5': '5d7e3195a447cb13e9267e931d8dd5a5', 'info_dict': { 'id': '459911', 'display_id': 'Australian-sewerage-treatment-facility-', 'ext': 'mp4', 'title': 'Organic mattresses used to clean waste water', 'upload_date': '20160409', 'thumbnail': r're:^https?://.*\.jpg', 'description': 'md5:20002e654bbafb6908395a5c0cfcd125', }, } def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') display_id = mobj.group('display_id') or video_id webpage = self._download_webpage(url, display_id) # extract video URL from webpage video_url = self._hidden_inputs(webpage)['inpPlayback'] # build list of available formats # specified in http://www.presstv.ir/Scripts/playback.js base_url = 'http://192.99.219.222:82/presstv' _formats = [ (180, '_low200.mp4'), (360, '_low400.mp4'), (720, '_low800.mp4'), (1080, '.mp4'), ] formats = [{ 'url': base_url + video_url[:-4] + extension, 'format_id': f'{height}p', 'height': height, } for height, extension in _formats] # extract video metadata title = remove_start( self._html_search_meta('title', webpage, fatal=True), 'PressTV-') thumbnail = self._og_search_thumbnail(webpage) description = self._og_search_description(webpage) upload_date = '%04d%02d%02d' % ( int(mobj.group('y')), int(mobj.group('m')), int(mobj.group('d')), ) return { 'id': video_id, 'display_id': display_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'upload_date': upload_date, 'description': description, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/atscaleconf.py
yt_dlp/extractor/atscaleconf.py
import re from .common import InfoExtractor class AtScaleConfEventIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?atscaleconference\.com/events/(?P<id>[^/&$?]+)' _TESTS = [{ 'url': 'https://atscaleconference.com/events/data-scale-spring-2022/', 'playlist_mincount': 13, 'info_dict': { 'id': 'data-scale-spring-2022', 'title': 'Data @Scale Spring 2022', 'description': 'md5:7d7ca1c42ac9c6d8a785092a1aea4b55', }, }, { 'url': 'https://atscaleconference.com/events/video-scale-2021/', 'playlist_mincount': 14, 'info_dict': { 'id': 'video-scale-2021', 'title': 'Video @Scale 2021', 'description': 'md5:7d7ca1c42ac9c6d8a785092a1aea4b55', }, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) return self.playlist_from_matches( re.findall(r'data-url\s*=\s*"(https?://(?:www\.)?atscaleconference\.com/videos/[^"]+)"', webpage), ie='Generic', playlist_id=playlist_id, title=self._og_search_title(webpage), description=self._og_search_description(webpage))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/drtv.py
yt_dlp/extractor/drtv.py
import json import uuid from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, mimetype2ext, parse_iso8601, try_call, update_url_query, url_or_none, ) from ..utils.traversal import traverse_obj SERIES_API = 'https://production-cdn.dr-massive.com/api/page?device=web_browser&item_detail_expand=all&lang=da&max_list_prefetch=3&path=%s' class DRTVIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?: (?:www\.)?dr\.dk/tv/se(?:/ondemand)?/(?:[^/?#]+/)*| (?:www\.)?(?:dr\.dk|dr-massive\.com)/drtv/(?:se|episode|program)/ ) (?P<id>[\da-z_-]+) ''' _GEO_BYPASS = False _GEO_COUNTRIES = ['DK'] IE_NAME = 'drtv' _TESTS = [{ 'url': 'https://www.dr.dk/tv/se/boern/ultra/klassen-ultra/klassen-darlig-taber-10', 'md5': '25e659cccc9a2ed956110a299fdf5983', 'info_dict': { 'id': 'klassen-darlig-taber-10', 'ext': 'mp4', 'title': 'Klassen - Dårlig taber (10)', 'description': 'md5:815fe1b7fa656ed80580f31e8b3c79aa', 'timestamp': 1539085800, 'upload_date': '20181009', 'duration': 606.84, 'series': 'Klassen', 'season': 'Klassen I', 'season_number': 1, 'season_id': 'urn:dr:mu:bundle:57d7e8216187a4031cfd6f6b', 'episode': 'Episode 10', 'episode_number': 10, 'release_year': 2016, }, 'expected_warnings': ['Unable to download f4m manifest'], 'skip': 'this video has been removed', }, { # with SignLanguage formats 'url': 'https://www.dr.dk/tv/se/historien-om-danmark/-/historien-om-danmark-stenalder', 'info_dict': { 'id': '00831690010', 'ext': 'mp4', 'title': 'Historien om Danmark: Stenalder', 'description': 'md5:8c66dcbc1669bbc6f873879880f37f2a', 'timestamp': 1546628400, 'upload_date': '20190104', 'duration': 3504.619, 'formats': 'mincount:20', 'release_year': 2017, 'season_id': 'urn:dr:mu:bundle:5afc03ad6187a4065ca5fd35', 'season_number': 1, 'season': 'Historien om Danmark', 'series': 'Historien om Danmark', }, 'skip': 'this video has been removed', }, { 'url': 'https://www.dr.dk/drtv/se/frank-and-kastaniegaarden_71769', 'info_dict': { 'id': '00951930010', 'ext': 'mp4', 'title': 'Frank & Kastaniegaarden', 'description': 'md5:974e1780934cf3275ef10280204bccb0', 'release_timestamp': 1546545600, 'release_date': '20190103', 'duration': 2576, 'season': 'Frank & Kastaniegaarden', 'season_id': '67125', 'release_year': 2019, 'season_number': 2019, 'series': 'Frank & Kastaniegaarden', 'episode_number': 1, 'episode': 'Frank & Kastaniegaarden', 'thumbnail': r're:https?://.+', }, 'params': { 'skip_download': True, }, }, { # Foreign and Regular subtitle track 'url': 'https://www.dr.dk/drtv/se/spise-med-price_-pasta-selv_397445', 'info_dict': { 'id': '00212301010', 'ext': 'mp4', 'episode_number': 1, 'title': 'Spise med Price: Pasta Selv', 'alt_title': '1. Pasta Selv', 'release_date': '20230807', 'description': 'md5:2da9060524fed707810d71080b3d0cd8', 'duration': 1750, 'season': 'Spise med Price', 'release_timestamp': 1691438400, 'season_id': '397440', 'episode': 'Spise med Price: Pasta Selv', 'thumbnail': r're:https?://.+', 'season_number': 15, 'series': 'Spise med Price', 'release_year': 2022, 'subtitles': 'mincount:2', }, 'params': { 'skip_download': 'm3u8', }, }, { 'url': 'https://www.dr.dk/drtv/episode/bonderoeven_71769', 'only_matching': True, }, { 'url': 'https://dr-massive.com/drtv/se/bonderoeven_71769', 'only_matching': True, }, { 'url': 'https://www.dr.dk/drtv/program/jagten_220924', 'only_matching': True, }] SUBTITLE_LANGS = { 'DanishLanguageSubtitles': 'da', 'ForeignLanguageSubtitles': 'da_foreign', 'CombinedLanguageSubtitles': 'da_combined', } _TOKEN = None def _real_initialize(self): if self._TOKEN: return token_response = self._download_json( 'https://isl.dr-massive.com/api/authorization/anonymous-sso', None, note='Downloading anonymous token', headers={ 'content-type': 'application/json', }, query={ 'device': 'phone_android', 'lang': 'da', 'supportFallbackToken': 'true', }, data=json.dumps({ 'deviceId': str(uuid.uuid4()), 'scopes': ['Catalog'], 'optout': True, }).encode()) self._TOKEN = traverse_obj( token_response, (lambda _, x: x['type'] == 'UserAccount', 'value', {str}), get_all=False) if not self._TOKEN: raise ExtractorError('Unable to get anonymous token') def _real_extract(self, url): url_slug = self._match_id(url) webpage = self._download_webpage(url, url_slug) json_data = self._search_json( r'window\.__data\s*=', webpage, 'data', url_slug, fatal=False) or {} item = traverse_obj( json_data, ('cache', 'page', ..., (None, ('entries', 0)), 'item', {dict}), get_all=False) if item: item_id = item.get('id') else: item_id = url_slug.rsplit('_', 1)[-1] item = self._download_json( f'https://production-cdn.dr-massive.com/api/items/{item_id}', item_id, note='Attempting to download backup item data', query={ 'device': 'web_browser', 'expand': 'all', 'ff': 'idp,ldp,rpt', 'geoLocation': 'dk', 'isDeviceAbroad': 'false', 'lang': 'da', 'segments': 'drtv,optedout', 'sub': 'Anonymous', }) video_id = try_call(lambda: item['customId'].rsplit(':', 1)[-1]) or item_id stream_data = self._download_json( f'https://production.dr-massive.com/api/account/items/{item_id}/videos', video_id, note='Downloading stream data', query={ 'delivery': 'stream', 'device': 'web_browser', 'ff': 'idp,ldp,rpt', 'lang': 'da', 'resolution': 'HD-1080', 'sub': 'Anonymous', }, headers={'authorization': f'Bearer {self._TOKEN}'}) formats = [] subtitles = {} for stream in traverse_obj(stream_data, (lambda _, x: x['url'])): format_id = stream.get('format', 'na') access_service = stream.get('accessService') preference = None subtitle_suffix = '' if access_service in ('SpokenSubtitles', 'SignLanguage', 'VisuallyInterpreted'): preference = -1 format_id += f'-{access_service}' subtitle_suffix = f'-{access_service}' elif access_service == 'StandardVideo': preference = 1 fmts, subs = self._extract_m3u8_formats_and_subtitles( stream.get('url'), video_id, ext='mp4', preference=preference, m3u8_id=format_id, fatal=False) formats.extend(fmts) api_subtitles = traverse_obj(stream, ('subtitles', lambda _, v: url_or_none(v['link']), {dict})) if not api_subtitles: self._merge_subtitles(subs, target=subtitles) for sub_track in api_subtitles: lang = sub_track.get('language') or 'da' subtitles.setdefault(self.SUBTITLE_LANGS.get(lang, lang) + subtitle_suffix, []).append({ 'url': sub_track['link'], 'ext': mimetype2ext(sub_track.get('format')) or 'vtt', }) if not formats and traverse_obj(item, ('season', 'customFields', 'IsGeoRestricted')): self.raise_geo_restricted(countries=self._GEO_COUNTRIES) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(item, { 'title': 'title', 'alt_title': 'contextualTitle', 'description': 'description', 'thumbnail': ('images', 'wallpaper'), 'release_timestamp': ('customFields', 'BroadcastTimeDK', {parse_iso8601}), 'duration': ('duration', {int_or_none}), 'series': ('season', 'show', 'title'), 'season': ('season', 'title'), 'season_number': ('season', 'seasonNumber', {int_or_none}), 'season_id': 'seasonId', 'episode': 'episodeName', 'episode_number': ('episodeNumber', {int_or_none}), 'release_year': ('releaseYear', {int_or_none}), }), } class DRTVLiveIE(InfoExtractor): IE_NAME = 'drtv:live' _VALID_URL = r'https?://(?:www\.)?dr\.dk/(?:tv|TV)/live/(?P<id>[\da-z-]+)' _GEO_COUNTRIES = ['DK'] _TEST = { 'url': 'https://www.dr.dk/tv/live/dr1', 'info_dict': { 'id': 'dr1', 'ext': 'mp4', 'title': 're:^DR1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): channel_id = self._match_id(url) channel_data = self._download_json( 'https://www.dr.dk/mu-online/api/1.0/channel/' + channel_id, channel_id) title = channel_data['Title'] formats = [] for streaming_server in channel_data.get('StreamingServers', []): server = streaming_server.get('Server') if not server: continue link_type = streaming_server.get('LinkType') for quality in streaming_server.get('Qualities', []): for stream in quality.get('Streams', []): stream_path = stream.get('Stream') if not stream_path: continue stream_url = update_url_query( f'{server}/{stream_path}', {'b': ''}) if link_type == 'HLS': formats.extend(self._extract_m3u8_formats( stream_url, channel_id, 'mp4', m3u8_id=link_type, fatal=False, live=True)) elif link_type == 'HDS': formats.extend(self._extract_f4m_formats(update_url_query( f'{server}/{stream_path}', {'hdcore': '3.7.0'}), channel_id, f4m_id=link_type, fatal=False)) return { 'id': channel_id, 'title': title, 'thumbnail': channel_data.get('PrimaryImageUri'), 'formats': formats, 'is_live': True, } class DRTVSeasonIE(InfoExtractor): IE_NAME = 'drtv:season' _VALID_URL = r'https?://(?:www\.)?(?:dr\.dk|dr-massive\.com)/drtv/saeson/(?P<display_id>[\w-]+)_(?P<id>\d+)' _GEO_COUNTRIES = ['DK'] _TESTS = [{ 'url': 'https://www.dr.dk/drtv/saeson/frank-and-kastaniegaarden_9008', 'info_dict': { 'id': '9008', 'display_id': 'frank-and-kastaniegaarden', 'title': 'Frank & Kastaniegaarden', 'series': 'Frank & Kastaniegaarden', 'season_number': 2008, 'alt_title': 'Season 2008', }, 'playlist_mincount': 8, }, { 'url': 'https://www.dr.dk/drtv/saeson/frank-and-kastaniegaarden_8761', 'info_dict': { 'id': '8761', 'display_id': 'frank-and-kastaniegaarden', 'title': 'Frank & Kastaniegaarden', 'series': 'Frank & Kastaniegaarden', 'season_number': 2009, 'alt_title': 'Season 2009', }, 'playlist_mincount': 19, }] def _real_extract(self, url): display_id, season_id = self._match_valid_url(url).group('display_id', 'id') data = self._download_json(SERIES_API % f'/saeson/{display_id}_{season_id}', display_id) entries = [{ '_type': 'url', 'url': f'https://www.dr.dk/drtv{episode["path"]}', 'ie_key': DRTVIE.ie_key(), 'title': episode.get('title'), 'alt_title': episode.get('contextualTitle'), 'episode': episode.get('episodeName'), 'description': episode.get('shortDescription'), 'series': traverse_obj(data, ('entries', 0, 'item', 'title')), 'season_number': traverse_obj(data, ('entries', 0, 'item', 'seasonNumber')), 'episode_number': episode.get('episodeNumber'), } for episode in traverse_obj(data, ('entries', 0, 'item', 'episodes', 'items'))] return { '_type': 'playlist', 'id': season_id, 'display_id': display_id, 'title': traverse_obj(data, ('entries', 0, 'item', 'title')), 'alt_title': traverse_obj(data, ('entries', 0, 'item', 'contextualTitle')), 'series': traverse_obj(data, ('entries', 0, 'item', 'title')), 'entries': entries, 'season_number': traverse_obj(data, ('entries', 0, 'item', 'seasonNumber')), } class DRTVSeriesIE(InfoExtractor): IE_NAME = 'drtv:series' _VALID_URL = r'https?://(?:www\.)?(?:dr\.dk|dr-massive\.com)/drtv/serie/(?P<display_id>[\w-]+)_(?P<id>\d+)' _GEO_COUNTRIES = ['DK'] _TESTS = [{ 'url': 'https://www.dr.dk/drtv/serie/frank-and-kastaniegaarden_6954', 'info_dict': { 'id': '6954', 'display_id': 'frank-and-kastaniegaarden', 'title': 'Frank & Kastaniegaarden', 'series': 'Frank & Kastaniegaarden', 'alt_title': '', }, 'playlist_mincount': 15, }] def _real_extract(self, url): display_id, series_id = self._match_valid_url(url).group('display_id', 'id') data = self._download_json(SERIES_API % f'/serie/{display_id}_{series_id}', display_id) entries = [{ '_type': 'url', 'url': f'https://www.dr.dk/drtv{season.get("path")}', 'ie_key': DRTVSeasonIE.ie_key(), 'title': season.get('title'), 'alt_title': season.get('contextualTitle'), 'series': traverse_obj(data, ('entries', 0, 'item', 'title')), 'season_number': traverse_obj(data, ('entries', 0, 'item', 'seasonNumber')), } for season in traverse_obj(data, ('entries', 0, 'item', 'show', 'seasons', 'items'))] return { '_type': 'playlist', 'id': series_id, 'display_id': display_id, 'title': traverse_obj(data, ('entries', 0, 'item', 'title')), 'alt_title': traverse_obj(data, ('entries', 0, 'item', 'contextualTitle')), 'series': traverse_obj(data, ('entries', 0, 'item', 'title')), 'entries': entries, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bfmtv.py
yt_dlp/extractor/bfmtv.py
import re from .common import InfoExtractor from ..utils import ExtractorError, extract_attributes class BFMTVBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:www\.|rmc\.)?bfmtv\.com/' _VALID_URL_TMPL = _VALID_URL_BASE + r'(?:[^/]+/)*[^/?&#]+_%s[A-Z]-(?P<id>\d{12})\.html' _VIDEO_BLOCK_REGEX = r'(<div[^>]+class="video_block[^"]*"[^>]*>.*?</div>)' _VIDEO_ELEMENT_REGEX = r'(<video-js[^>]+>)' BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s' def _extract_video(self, video_block): video_element = self._search_regex( self._VIDEO_ELEMENT_REGEX, video_block, 'video element', default=None) if video_element: video_element_attrs = extract_attributes(video_element) video_id = video_element_attrs.get('data-video-id') if not video_id: return account_id = video_element_attrs.get('data-account') or '876450610001' player_id = video_element_attrs.get('adjustplayer') or '19dszYXgm' else: video_block_attrs = extract_attributes(video_block) video_id = video_block_attrs.get('videoid') if not video_id: return account_id = video_block_attrs.get('accountid') or '876630703001' player_id = video_block_attrs.get('playerid') or 'KbPwEbuHx' return self.url_result( self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, video_id), 'BrightcoveNew', video_id) class BFMTVIE(BFMTVBaseIE): IE_NAME = 'bfmtv' _VALID_URL = BFMTVBaseIE._VALID_URL_TMPL % 'V' _TESTS = [{ 'url': 'https://www.bfmtv.com/politique/emmanuel-macron-l-islam-est-une-religion-qui-vit-une-crise-aujourd-hui-partout-dans-le-monde_VN-202010020146.html', 'info_dict': { 'id': '6196747868001', 'ext': 'mp4', 'title': 'Emmanuel Macron: "L\'Islam est une religion qui vit une crise aujourd’hui, partout dans le monde"', 'description': 'Le Président s\'exprime sur la question du séparatisme depuis les Mureaux, dans les Yvelines.', 'uploader_id': '876450610001', 'upload_date': '20201002', 'timestamp': 1601629620, 'duration': 44.757, 'tags': ['bfmactu', 'politique'], 'thumbnail': 'https://cf-images.eu-west-1.prod.boltdns.net/v1/static/876450610001/5041f4c1-bc48-4af8-a256-1b8300ad8ef0/cf2f9114-e8e2-4494-82b4-ab794ea4bc7d/1920x1080/match/image.jpg', }, }] def _real_extract(self, url): bfmtv_id = self._match_id(url) webpage = self._download_webpage(url, bfmtv_id) video = self._extract_video(self._search_regex( self._VIDEO_BLOCK_REGEX, webpage, 'video block')) if not video: raise ExtractorError('Failed to extract video') return video class BFMTVLiveIE(BFMTVBaseIE): IE_NAME = 'bfmtv:live' _VALID_URL = BFMTVBaseIE._VALID_URL_BASE + '(?P<id>(?:[^/]+/)?en-direct)' _TESTS = [{ 'url': 'https://www.bfmtv.com/en-direct/', 'info_dict': { 'id': '6346069778112', 'ext': 'mp4', 'title': r're:^Le Live BFM TV \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'uploader_id': '876450610001', 'upload_date': '20240202', 'timestamp': 1706887572, 'live_status': 'is_live', 'thumbnail': r're:https://.+/image\.jpg', 'tags': [], }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.bfmtv.com/economie/en-direct/', 'only_matching': True, }] def _real_extract(self, url): bfmtv_id = self._match_id(url) webpage = self._download_webpage(url, bfmtv_id) video = self._extract_video(self._search_regex( self._VIDEO_BLOCK_REGEX, webpage, 'video block')) if not video: raise ExtractorError('Failed to extract video') return video class BFMTVArticleIE(BFMTVBaseIE): IE_NAME = 'bfmtv:article' _VALID_URL = BFMTVBaseIE._VALID_URL_TMPL % 'A' _TESTS = [{ 'url': 'https://www.bfmtv.com/sante/covid-19-un-responsable-de-l-institut-pasteur-se-demande-quand-la-france-va-se-reconfiner_AV-202101060198.html', 'info_dict': { 'id': '202101060198', 'title': 'Covid-19: un responsable de l\'Institut Pasteur se demande "quand la France va se reconfiner"', 'description': 'md5:947974089c303d3ac6196670ae262843', }, 'playlist_count': 2, }, { 'url': 'https://www.bfmtv.com/international/pour-bolsonaro-le-bresil-est-en-faillite-mais-il-ne-peut-rien-faire_AD-202101060232.html', 'only_matching': True, }, { 'url': 'https://www.bfmtv.com/sante/covid-19-oui-le-vaccin-de-pfizer-distribue-en-france-a-bien-ete-teste-sur-des-personnes-agees_AN-202101060275.html', 'only_matching': True, }, { 'url': 'https://rmc.bfmtv.com/actualites/societe/transports/ce-n-est-plus-tout-rentable-le-bioethanol-e85-depasse-1eu-le-litre-des-automobilistes-regrettent_AV-202301100268.html', 'info_dict': { 'id': '6318445464112', 'ext': 'mp4', 'title': 'Le plein de bioéthanol fait de plus en plus mal à la pompe', 'uploader_id': '876630703001', 'upload_date': '20230110', 'timestamp': 1673341692, 'duration': 109.269, 'tags': ['rmc', 'show', 'apolline de malherbe', 'info', 'talk', 'matinale', 'radio'], 'thumbnail': 'https://cf-images.eu-west-1.prod.boltdns.net/v1/static/876630703001/5bef74b8-9d5e-4480-a21f-60c2e2480c46/96c88b74-f9db-45e1-8040-e199c5da216c/1920x1080/match/image.jpg', }, }] def _entries(self, webpage): for video_block_el in re.findall(self._VIDEO_BLOCK_REGEX, webpage): video = self._extract_video(video_block_el) if video: yield video def _real_extract(self, url): bfmtv_id = self._match_id(url) webpage = self._download_webpage(url, bfmtv_id) return self.playlist_result( self._entries(webpage), bfmtv_id, self._og_search_title(webpage, fatal=False), self._html_search_meta(['og:description', 'description'], webpage))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/groupon.py
yt_dlp/extractor/groupon.py
from .common import InfoExtractor class GrouponIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?groupon\.com/deals/(?P<id>[^/?#&]+)' _TEST = { 'url': 'https://www.groupon.com/deals/bikram-yoga-huntington-beach-2#ooid=tubGNycTo_9Uxg82uESj4i61EYX8nyuf', 'info_dict': { 'id': 'bikram-yoga-huntington-beach-2', 'title': '$49 for 10 Yoga Classes or One Month of Unlimited Classes at Bikram Yoga Huntington Beach ($180 Value)', 'description': 'Studio kept at 105 degrees and 40% humidity with anti-microbial and anti-slip Flotex flooring; certified instructors', }, 'playlist': [{ 'md5': '42428ce8a00585f9bc36e49226eae7a1', 'info_dict': { 'id': 'fk6OhWpXgIQ', 'ext': 'mp4', 'title': 'Bikram Yoga Huntington Beach | Orange County !tubGNycTo@9Uxg82uESj4i61EYX8nyuf', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'duration': 45, 'upload_date': '20160405', 'uploader_id': 'groupon', 'uploader': 'Groupon', }, 'add_ie': ['Youtube'], }], 'params': { 'skip_download': True, }, } _PROVIDERS = { 'youtube': ('%s', 'Youtube'), } def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) payload = self._parse_json(self._search_regex( r'(?:var\s+|window\.)payload\s*=\s*(.*?);\n', webpage, 'payload'), playlist_id) videos = payload['carousel'].get('dealVideos', []) entries = [] for v in videos: provider = v.get('provider') video_id = v.get('media') or v.get('id') or v.get('baseURL') if not provider or not video_id: continue url_pattern, ie_key = self._PROVIDERS.get(provider.lower()) if not url_pattern: self.report_warning( f'{playlist_id}: Unsupported video provider {provider}, skipping video') continue entries.append(self.url_result(url_pattern % video_id, ie_key)) return { '_type': 'playlist', 'id': playlist_id, 'entries': entries, 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ukcolumn.py
yt_dlp/extractor/ukcolumn.py
from .common import InfoExtractor from .vimeo import VimeoIE from .youtube import YoutubeIE from ..utils import ( ExtractorError, unescapeHTML, urljoin, ) class UkColumnIE(InfoExtractor): _WORKING = False IE_NAME = 'ukcolumn' _VALID_URL = r'(?i)https?://(?:www\.)?ukcolumn\.org(/index\.php)?/(?:video|ukcolumn-news)/(?P<id>[-a-z0-9]+)' _TESTS = [{ 'url': 'https://www.ukcolumn.org/ukcolumn-news/uk-column-news-28th-april-2021', 'info_dict': { 'id': '541632443', 'ext': 'mp4', 'title': 'UK Column News - 28th April 2021', 'uploader_id': 'ukcolumn', 'uploader': 'UK Column', }, 'add_ie': [VimeoIE.ie_key()], 'expected_warnings': ['Unable to download JSON metadata'], 'params': { 'skip_download': 'Handled by Vimeo', }, }, { 'url': 'https://www.ukcolumn.org/video/insight-eu-military-unification', 'info_dict': { 'id': 'Fzbnb9t7XAw', 'ext': 'mp4', 'title': 'Insight: EU Military Unification', 'uploader_id': 'ukcolumn', 'description': 'md5:29a207965271af89baa0bc191f5de576', 'uploader': 'UK Column', 'upload_date': '20170514', }, 'add_ie': [YoutubeIE.ie_key()], 'params': { 'skip_download': 'Handled by Youtube', }, }, { 'url': 'https://www.ukcolumn.org/index.php/ukcolumn-news/uk-column-news-30th-april-2021', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) oembed_url = urljoin(url, unescapeHTML(self._search_regex( r'<iframe[^>]+src=(["\'])(?P<url>/media/oembed\?url=.+?)\1', webpage, 'OEmbed URL', group='url'))) oembed_webpage = self._download_webpage( oembed_url, display_id, note='Downloading OEmbed page') ie, video_url = YoutubeIE, YoutubeIE._extract_url(oembed_webpage) if not video_url: ie, video_url = VimeoIE, VimeoIE._extract_url(url, oembed_webpage) if not video_url: raise ExtractorError('No embedded video found') return { '_type': 'url_transparent', 'title': self._og_search_title(webpage), 'url': video_url, 'ie_key': ie.ie_key(), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cinetecamilano.py
yt_dlp/extractor/cinetecamilano.py
import json from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, float_or_none, parse_iso8601, strip_or_none, traverse_obj, try_get, urljoin, ) class CinetecaMilanoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?cinetecamilano\.it/film/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.cinetecamilano.it/film/1942', 'info_dict': { 'id': '1942', 'ext': 'mp4', 'title': 'Il draghetto Gris\u00f9 (4 episodi)', 'release_date': '20220129', 'thumbnail': r're:.+\.png', 'description': 'md5:5328cbe080b93224712b6f17fcaf2c01', 'modified_date': '20200520', 'duration': 3139, 'release_timestamp': 1643446208, 'modified_timestamp': int, }, }] def _real_extract(self, url): video_id = self._match_id(url) try: film_json = self._download_json( f'https://www.cinetecamilano.it/api/catalogo/{video_id}/?', video_id, headers={ 'Referer': url, 'Authorization': try_get(self._get_cookies('https://www.cinetecamilano.it'), lambda x: f'Bearer {x["cnt-token"].value}') or '', }) except ExtractorError as e: if ((isinstance(e.cause, HTTPError) and e.cause.status == 500) or isinstance(e.cause, json.JSONDecodeError)): self.raise_login_required(method='cookies') raise if not film_json.get('success') or not film_json.get('archive'): raise ExtractorError('Video information not found') archive = film_json['archive'] return { 'id': video_id, 'title': archive.get('title'), 'description': strip_or_none(archive.get('description')), 'duration': float_or_none(archive.get('duration'), invscale=60), 'release_timestamp': parse_iso8601(archive.get('updated_at'), delimiter=' '), 'modified_timestamp': parse_iso8601(archive.get('created_at'), delimiter=' '), 'thumbnail': urljoin(url, try_get(archive, lambda x: x['thumb']['src'].replace('/public/', '/storage/'))), 'formats': self._extract_m3u8_formats( urljoin(url, traverse_obj(archive, ('drm', 'hls'))), video_id, 'mp4'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/teamcoco.py
yt_dlp/extractor/teamcoco.py
import json import re from .turner import TurnerBaseIE from ..utils import ( ExtractorError, clean_html, determine_ext, make_archive_id, merge_dicts, mimetype2ext, parse_duration, parse_qs, traverse_obj, unified_timestamp, url_or_none, urljoin, ) class TeamcocoBaseIE(TurnerBaseIE): _QUALITIES = { 'low': (480, 272), 'sd': (640, 360), 'hd': (1280, 720), 'uhd': (1920, 1080), } def _get_formats_and_subtitles(self, info, video_id): formats, subtitles = [], {} for src in traverse_obj(info, ('src', ..., {dict})): format_id = src.get('label') src_url = src.get('src') if re.match(r'https?:/[^/]', src_url): src_url = src_url.replace(':/', '://', 1) ext = determine_ext(src_url, mimetype2ext(src.get('type'))) if not format_id or not src_url: continue elif format_id == 'hls' or ext == 'm3u8': fmts, subs = self._extract_m3u8_formats_and_subtitles( src_url, video_id, 'mp4', m3u8_id=format_id, fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) elif format_id in self._QUALITIES: if src_url.startswith('/mp4:protected/'): # TODO: Correct extraction for these files continue formats.append({ 'url': src_url, 'ext': ext, 'format_id': format_id, 'width': self._QUALITIES[format_id][0], 'height': self._QUALITIES[format_id][1], }) return formats, subtitles class TeamcocoIE(TeamcocoBaseIE): _VALID_URL = r'https?://(?:www\.)?teamcoco\.com/(?P<id>([^/]+/)*[^/?#]+)' _TESTS = [ { 'url': 'http://teamcoco.com/video/mary-kay-remote', 'info_dict': { 'id': '80187', 'display_id': 'video_mary-kay-remote', 'ext': 'mp4', 'title': 'Conan Becomes A Mary Kay Beauty Consultant', 'description': 'md5:9fb64e45b5aef6b2af1b67612b36c162', 'thumbnail': 'https://teamcoco.com/image/thumb?id=80187', 'upload_date': '20140402', 'timestamp': 1396440000, }, 'params': { 'skip_download': 'm3u8', }, }, { 'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush', 'info_dict': { 'id': '19705', 'display_id': 'video_louis-ck-interview-george-w-bush', 'ext': 'mp4', 'title': 'Louis C.K. Interview Pt. 1 11/3/11', 'description': 'Louis C.K. got starstruck by George W. Bush, so what? Part one.', 'thumbnail': 'https://teamcoco.com/image/thumb?id=19705', 'upload_date': '20111104', 'timestamp': 1320408000, }, 'params': { 'skip_download': 'm3u8', }, }, { 'url': 'http://teamcoco.com/video/timothy-olyphant-drinking-whiskey', 'info_dict': { 'id': '88748', 'display_id': 'video_timothy-olyphant-drinking-whiskey', 'ext': 'mp4', 'title': 'Timothy Olyphant Raises A Toast To “Justified”', 'description': 'md5:15501f23f020e793aeca761205e42c24', 'upload_date': '20150415', 'timestamp': 1429099200, 'thumbnail': 'https://teamcoco.com/image/thumb?id=88748', }, }, { 'url': 'http://teamcoco.com/video/full-episode-mon-6-1-joel-mchale-jake-tapper-and-musical-guest-courtney-barnett?playlist=x;eyJ0eXBlIjoidGFnIiwiaWQiOjl9', 'info_dict': { 'id': '89341', 'ext': 'mp4', 'title': 'Full Episode - Mon. 6/1 - Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett', 'description': 'Guests: Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett', }, 'skip': 'This video is no longer available.', }, { 'url': 'http://teamcoco.com/video/the-conan-audiencey-awards-for-04/25/18', 'only_matching': True, }, { 'url': 'http://teamcoco.com/italy/conan-jordan-schlansky-hit-the-streets-of-florence', 'only_matching': True, }, { 'url': 'http://teamcoco.com/haiti/conan-s-haitian-history-lesson', 'only_matching': True, }, { 'url': 'http://teamcoco.com/israel/conan-hits-the-streets-beaches-of-tel-aviv', 'only_matching': True, }, ] def _real_extract(self, url): display_id = self._match_id(url).replace('/', '_') webpage = self._download_webpage(url, display_id) data = self._search_nextjs_data(webpage, display_id)['props']['pageProps']['pageData'] info = merge_dicts(*traverse_obj(data, ( 'blocks', lambda _, v: v['name'] in ('meta-tags', 'video-player', 'video-info'), 'props', {dict}))) thumbnail = traverse_obj( info, (('image', 'poster'), {urljoin('https://teamcoco.com/')}), get_all=False) video_id = traverse_obj(parse_qs(thumbnail), ('id', 0)) or display_id formats, subtitles = self._get_formats_and_subtitles(info, video_id) return { 'id': video_id, 'display_id': display_id, 'formats': formats, 'subtitles': subtitles, 'thumbnail': thumbnail, **traverse_obj(info, { 'title': 'title', 'description': (('descriptionHtml', 'description'), {clean_html}), 'timestamp': ('publishedOn', {lambda x: f'{x} 12:00AM'}, {unified_timestamp}), }, get_all=False), } class ConanClassicIE(TeamcocoBaseIE): _WORKING = False _VALID_URL = r'https?://(?:(?:www\.)?conanclassic|conan25\.teamcoco)\.com/(?P<id>([^/]+/)*[^/?#]+)' _TESTS = [{ 'url': 'https://conanclassic.com/video/ice-cube-kevin-hart-conan-share-lyft', 'info_dict': { 'id': '74709', 'ext': 'mp4', 'title': 'Ice Cube, Kevin Hart, & Conan Share A Lyft Car', 'display_id': 'video/ice-cube-kevin-hart-conan-share-lyft', 'description': 'The stars of "Ride Along" teach Conan how to roll around Hollywood.', 'thumbnail': 'http://cdn.teamcococdn.com/image/640x360/lyft-5bd75f82b616c.png', 'duration': 570.0, 'upload_date': '20131211', 'timestamp': 1386721620, '_old_archive_ids': ['teamcoco 74709'], }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://conan25.teamcoco.com/video/ice-cube-kevin-hart-conan-share-lyft', 'only_matching': True, }] _GRAPHQL_QUERY = '''query find($id: ID!) { findRecord(id: $id) { ... on MetaInterface { id title teaser publishOn slug thumb { ... on FileInterface { id path preview mime } } } ... on Video { videoType duration isLive youtubeId turnerMediaId turnerMediaAuthToken airDate } ... on Episode { airDate seasonNumber episodeNumber guestNames } } findRecordVideoMetadata(id: $id) { turnerMediaId turnerMediaAuthToken duration src } }''' def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) data = self._search_nextjs_data(webpage, display_id)['props']['pageProps']['pageData'] video_id = traverse_obj( data, ('blocks', ..., 'props', 'fieldDefs', lambda _, v: v['name'] == 'incomingVideoId', 'value'), ('blocks', ..., 'props', 'fields', 'incomingVideoRecord', 'id'), get_all=False) if not video_id: self.raise_no_formats('Unable to extract video ID from webpage', expected=True) response = self._download_json( 'https://conanclassic.com/api/legacy/graphql', video_id, data=json.dumps({ 'query': self._GRAPHQL_QUERY, 'variables': {'id': video_id}, }, separators=(',', ':')).encode(), headers={ 'Content-Type': 'application/json', }) info = traverse_obj(response, ('data', 'findRecord', { 'title': 'title', 'description': 'teaser', 'thumbnail': ('thumb', 'preview', {url_or_none}), 'duration': ('duration', {parse_duration}), 'timestamp': ('publishOn', {unified_timestamp}), })) media_id = traverse_obj( response, ('data', ('findRecord', 'findRecordVideoMetadata'), 'turnerMediaId'), get_all=False) if media_id: token = traverse_obj( response, ('data', ('findRecord', 'findRecordVideoMetadata'), 'turnerMediaAuthToken'), get_all=False) if not token: raise ExtractorError('No Turner Media auth token found in API response') self._initialize_geo_bypass({ 'countries': ['US'], }) info.update(self._extract_ngtv_info(media_id, { 'accessToken': token, 'accessTokenType': 'jws', }, None)) # TODO: the None arg needs to be the AdobePass software_statement else: formats, subtitles = self._get_formats_and_subtitles( traverse_obj(response, ('data', 'findRecordVideoMetadata')), video_id) info.update({ 'formats': formats, 'subtitles': subtitles, }) return { 'id': video_id, 'display_id': display_id, '_old_archive_ids': [make_archive_id('Teamcoco', video_id)], **info, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vgtv.py
yt_dlp/extractor/vgtv.py
import re from .common import InfoExtractor from .xstream import XstreamIE from ..utils import ( ExtractorError, float_or_none, try_get, ) class VGTVIE(XstreamIE): # XXX: Do not subclass from concrete IE IE_DESC = 'VGTV, BTTV, FTV, Aftenposten and Aftonbladet' _GEO_BYPASS = False _HOST_TO_APPNAME = { 'tv.vg.no': 'vgtv', 'vgtv.no': 'vgtv', 'bt.no/tv': 'bttv', 'aftenbladet.no/tv': 'satv', 'fvn.no/fvntv': 'fvntv', 'aftenposten.no/webtv': 'aptv', 'ap.vgtv.no/webtv': 'aptv', 'tv.aftonbladet.se': 'abtv', # obsolete URL schemas, kept in order to save one HTTP redirect 'tv.aftonbladet.se/abtv': 'abtv', 'www.aftonbladet.se/tv': 'abtv', } _APP_NAME_TO_VENDOR = { 'vgtv': 'vgtv', 'bttv': 'bt', 'satv': 'sa', 'fvntv': 'fvn', 'aptv': 'ap', 'abtv': 'ab', } _VALID_URL = r'''(?x) (?:https?://(?:www\.)? (?P<host> {} ) /? (?: (?:\#!/)?(?:video|live)/| embed?.*id=| a(?:rticles)?/ )| (?P<appname> {} ):) (?P<id>\d+) '''.format('|'.join(_HOST_TO_APPNAME.keys()), '|'.join(_APP_NAME_TO_VENDOR.keys())) _TESTS = [ { # streamType: vod 'url': 'http://www.vgtv.no/#!/video/84196/hevnen-er-soet-episode-10-abu', 'md5': 'b8be7a234cebb840c0d512c78013e02f', 'info_dict': { 'id': '84196', 'ext': 'mp4', 'title': 'Hevnen er søt: Episode 10 - Abu', 'description': 'md5:e25e4badb5f544b04341e14abdc72234', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 648.000, 'timestamp': 1404626400, 'upload_date': '20140706', 'view_count': int, }, }, { # streamType: wasLive 'url': 'http://www.vgtv.no/#!/live/100764/opptak-vgtv-foelger-em-kvalifiseringen', 'info_dict': { 'id': '100764', 'ext': 'flv', 'title': 'OPPTAK: VGTV følger EM-kvalifiseringen', 'description': 'md5:3772d9c0dc2dff92a886b60039a7d4d3', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 9103.0, 'timestamp': 1410113864, 'upload_date': '20140907', 'view_count': int, }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': 'Video is no longer available', }, { # streamType: wasLive 'url': 'http://www.vgtv.no/#!/live/113063/direkte-v75-fra-solvalla', 'info_dict': { 'id': '113063', 'ext': 'mp4', 'title': 'V75 fra Solvalla 30.05.15', 'description': 'md5:b3743425765355855f88e096acc93231', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 25966, 'timestamp': 1432975582, 'upload_date': '20150530', 'view_count': int, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.aftenposten.no/webtv/#!/video/21039/trailer-sweatshop-i-can-t-take-any-more', 'md5': 'fd828cd29774a729bf4d4425fe192972', 'info_dict': { 'id': '21039', 'ext': 'mp4', 'title': 'TRAILER: «SWEATSHOP» - I can´t take any more', 'description': 'md5:21891f2b0dd7ec2f78d84a50e54f8238', 'duration': 66, 'timestamp': 1417002452, 'upload_date': '20141126', 'view_count': int, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://tv.vg.no/video/241779/politiets-ekstremkjoering', 'only_matching': True, }, { 'url': 'http://www.bt.no/tv/#!/video/100250/norling-dette-er-forskjellen-paa-1-divisjon-og-eliteserien', 'only_matching': True, }, { 'url': 'http://ap.vgtv.no/webtv#!/video/111084/de-nye-bysyklene-lettere-bedre-gir-stoerre-hjul-og-feste-til-mobil', 'only_matching': True, }, { # geoblocked 'url': 'http://www.vgtv.no/#!/video/127205/inside-the-mind-of-favela-funk', 'only_matching': True, }, { 'url': 'https://tv.aftonbladet.se/video/36015/vulkanutbrott-i-rymden-nu-slapper-nasa-bilderna', 'only_matching': True, }, { 'url': 'http://tv.aftonbladet.se/abtv/articles/36015', 'only_matching': True, }, { 'url': 'https://www.aftonbladet.se/tv/a/36015', 'only_matching': True, }, { 'url': 'abtv:140026', 'only_matching': True, }, { 'url': 'http://www.vgtv.no/video/84196/hevnen-er-soet-episode-10-abu', 'only_matching': True, }, ] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') host = mobj.group('host') appname = self._HOST_TO_APPNAME[host] if host else mobj.group('appname') vendor = self._APP_NAME_TO_VENDOR[appname] data = self._download_json( f'http://svp.vg.no/svp/api/v1/{vendor}/assets/{video_id}?appName={appname}-website', video_id, 'Downloading media JSON') if data.get('status') == 'inactive': raise ExtractorError( f'Video {video_id} is no longer available', expected=True) info = { 'formats': [], } if len(video_id) == 5: if appname == 'bttv': info = self._extract_video_info('btno', video_id) streams = data['streamUrls'] stream_type = data.get('streamType') is_live = stream_type == 'live' formats = [] hls_url = streams.get('hls') if hls_url: formats.extend(self._extract_m3u8_formats( hls_url, video_id, 'mp4', live=is_live, m3u8_id='hls', fatal=False)) hds_url = streams.get('hds') if hds_url: hdcore_sign = 'hdcore=3.7.0' f4m_formats = self._extract_f4m_formats( hds_url + f'?{hdcore_sign}', video_id, f4m_id='hds', fatal=False) if f4m_formats: for entry in f4m_formats: # URLs without the extra param induce an 404 error entry.update({'extra_param_to_segment_url': hdcore_sign}) formats.append(entry) mp4_urls = streams.get('pseudostreaming') or [] mp4_url = streams.get('mp4') if mp4_url: mp4_urls.append(mp4_url) for mp4_url in mp4_urls: format_info = { 'url': mp4_url, } mobj = re.search(r'(\d+)_(\d+)_(\d+)', mp4_url) if mobj: tbr = int(mobj.group(3)) format_info.update({ 'width': int(mobj.group(1)), 'height': int(mobj.group(2)), 'tbr': tbr, 'format_id': f'mp4-{tbr}', }) formats.append(format_info) info['formats'].extend(formats) if not info['formats']: properties = try_get( data, lambda x: x['streamConfiguration']['properties'], list) if properties and 'geoblocked' in properties: raise self.raise_geo_restricted( countries=[host.rpartition('.')[-1].partition('/')[0].upper()]) info.update({ 'id': video_id, 'title': data['title'], 'description': data['description'], 'thumbnail': data['images']['main'] + '?t[]=900x506q80', 'timestamp': data['published'], 'duration': float_or_none(data['duration'], 1000), 'view_count': data['displays'], 'is_live': is_live, }) return info class BTArticleIE(InfoExtractor): IE_NAME = 'bt:article' IE_DESC = 'Bergens Tidende Articles' _VALID_URL = r'https?://(?:www\.)?bt\.no/(?:[^/]+/)+(?P<id>[^/]+)-\d+\.html' _TEST = { 'url': 'http://www.bt.no/nyheter/lokalt/Kjemper-for-internatet-1788214.html', 'md5': '2acbe8ad129b3469d5ae51b1158878df', 'info_dict': { 'id': '23199', 'ext': 'mp4', 'title': 'Alrekstad internat', 'description': 'md5:dc81a9056c874fedb62fc48a300dac58', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 191, 'timestamp': 1289991323, 'upload_date': '20101117', 'view_count': int, }, } def _real_extract(self, url): webpage = self._download_webpage(url, self._match_id(url)) video_id = self._search_regex( r'<video[^>]+data-id="(\d+)"', webpage, 'video id') return self.url_result(f'bttv:{video_id}', 'VGTV') class BTVestlendingenIE(InfoExtractor): IE_NAME = 'bt:vestlendingen' IE_DESC = 'Bergens Tidende - Vestlendingen' _VALID_URL = r'https?://(?:www\.)?bt\.no/spesial/vestlendingen/#!/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.bt.no/spesial/vestlendingen/#!/86588', 'md5': 'd7d17e3337dc80de6d3a540aefbe441b', 'info_dict': { 'id': '86588', 'ext': 'mov', 'title': 'Otto Wollertsen', 'description': 'Vestlendingen Otto Fredrik Wollertsen', 'timestamp': 1430473209, 'upload_date': '20150501', }, 'skip': '404 Error', }, { 'url': 'http://www.bt.no/spesial/vestlendingen/#!/86255', 'md5': 'a2893f8632e96389f4bdf36aa9463ceb', 'info_dict': { 'id': '86255', 'ext': 'mov', 'title': 'Du må tåle å fryse og være sulten', 'description': 'md5:b8046f4d022d5830ddab04865791d063', 'upload_date': '20150321', 'timestamp': 1426942023, }, }] def _real_extract(self, url): return self.url_result(f'bttv:{self._match_id(url)}', 'VGTV')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tbs.py
yt_dlp/extractor/tbs.py
import re import urllib.parse from .turner import TurnerBaseIE from ..utils import ( float_or_none, int_or_none, make_archive_id, strip_or_none, ) from ..utils.traversal import traverse_obj class TBSIE(TurnerBaseIE): _SITE_INFO = { 'tbs': ('TBS', 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiJkZTA0NTYxZS1iMTFhLTRlYTgtYTg5NC01NjI3MGM1NmM2MWIiLCJuYmYiOjE1MzcxODkzOTAsImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNTM3MTg5MzkwfQ.Z7ny66kaqNDdCHf9Y9KsV12LrBxrLkGGxlYe2XGm6qsw2T-k1OCKC1TMzeqiZP735292MMRAQkcJDKrMIzNbAuf9nCdIcv4kE1E2nqUnjPMBduC1bHffZp8zlllyrN2ElDwM8Vhwv_5nElLRwWGEt0Kaq6KJAMZA__WDxKWC18T-wVtsOZWXQpDqO7nByhfj2t-Z8c3TUNVsA_wHgNXlkzJCZ16F2b7yGLT5ZhLPupOScd3MXC5iPh19HSVIok22h8_F_noTmGzmMnIRQi6bWYWK2zC7TQ_MsYHfv7V6EaG5m1RKZTV6JAwwoJQF_9ByzarLV1DGwZxD9-eQdqswvg'), 'tntdrama': ('TNT', 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiIwOTMxYTU4OS1jZjEzLTRmNjMtYTJmYy03MzhjMjE1NWU5NjEiLCJuYmYiOjE1MzcxOTA4MjcsImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNTM3MTkwODI3fQ.AucKvtws7oekTXi80_zX4-BlgJD9GLvlOI9FlBCjdlx7Pa3eJ0AqbogynKMiatMbnLOTMHGjd7tTiq422unmZjBz70dhePAe9BbW0dIo7oQ57vZ-VBYw_tWYRPmON61MwAbLVlqROD3n_zURs85S8TlkQx9aNx9x_riGGELjd8l05CVa_pOluNhYvuIFn6wmrASOKI1hNEblBDWh468UWP571-fe4zzi0rlYeeHd-cjvtWvOB3bQsWrUVbK4pRmqvzEH59j0vNF-ihJF9HncmUicYONe47Mib3elfMok23v4dB1_UAlQY_oawfNcynmEnJQCcqFmbHdEwTW6gMiYsA'), 'trutv': ('truTV', 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiJhYzQyOTkwMi0xMDYzLTQyNTQtYWJlYS1iZTY2ODM4MTVmZGIiLCJuYmYiOjE1MzcxOTA4NjgsImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNTM3MTkwODY4fQ.ewXl5LDMDvvx3nDXV4jCdSwUq_sOluKoOVsIjznAo6Zo4zrGe9rjlZ9DOmQKW66g6VRMexJsJ5vM1EkY8TC5-YcQw_BclK1FPGO1rH3Wf7tX_l0b1BVbSJQKIj9UgqDp_QbGcBXz24kN4So3U22mhs6di9PYyyfG68ccKL2iRprcVKWCslIHwUF-T7FaEqb0K57auilxeW1PONG2m-lIAcZ62DUwqXDWvw0CRoWI08aVVqkkhnXaSsQfLs5Ph1Pfh9Oq3g_epUm9Ss45mq6XM7gbOb5omTcKLADRKK-PJVB_JXnZnlsXbG0ttKE1cTKJ738qu7j4aipYTf-W0nKF5Q'), } _VALID_URL = fr'''(?x) https?://(?:www\.)?(?P<site>{"|".join(map(re.escape, _SITE_INFO))})\.com (?P<path>/(?: (?P<watch>watch(?:tnt|tbs|trutv))| movies|shows/[^/?#]+/(?:clips|season-\d+/episode-\d+) )/(?P<id>[^/?#]+)) ''' _TESTS = [{ 'url': 'https://www.tbs.com/shows/american-dad/season-6/episode-12/you-debt-your-life', 'info_dict': { 'id': '984bdcd8db0cc00dc699927f2a411c8c6e0e48f3', 'ext': 'mp4', 'title': 'You Debt Your Life', 'description': 'md5:f211cfeb9187fd3cdb53eb0e8930d499', 'duration': 1231.0, 'thumbnail': r're:https://images\.tbs\.com/tbs/.+\.(?:jpe?g|png)', 'chapters': 'count:4', 'season': 'Season 6', 'season_number': 6, 'episode': 'Episode 12', 'episode_number': 12, 'timestamp': 1478276239, 'upload_date': '20161104', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.tntdrama.com/shows/the-librarians-the-next-chapter/season-1/episode-10/and-going-medieval', 'info_dict': { 'id': 'e487b31b663a8001864f62fd20907782f7b8ccb8', 'ext': 'mp4', 'title': 'And Going Medieval', 'description': 'md5:5aed0ae23a6cf148a02fe3c1be8359fa', 'duration': 2528.0, 'thumbnail': r're:https://images\.tntdrama\.com/tnt/.+\.(?:jpe?g|png)', 'chapters': 'count:7', 'season': 'Season 1', 'season_number': 1, 'episode': 'Episode 10', 'episode_number': 10, 'timestamp': 1743107520, 'upload_date': '20250327', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.trutv.com/shows/the-carbonaro-effect/season-1/episode-1/got-the-bug-out', 'info_dict': { 'id': 'b457dd7458fd9e64b596355950b13a1ca799dc39', 'ext': 'mp4', 'title': 'Got the Bug Out', 'description': 'md5:9eeddf6248f73517b0e5969b8a43c025', 'duration': 1283.0, 'thumbnail': r're:https://images\.trutv\.com/tru/.+\.(?:jpe?g|png)', 'chapters': 'count:4', 'season': 'Season 1', 'season_number': 1, 'episode': 'Episode 1', 'episode_number': 1, 'timestamp': 1570040829, 'upload_date': '20191002', '_old_archive_ids': ['trutv b457dd7458fd9e64b596355950b13a1ca799dc39'], }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'http://www.tntdrama.com/shows/the-alienist/clips/monster', 'only_matching': True, }, { 'url': 'http://www.tbs.com/shows/search-party/season-1/episode-1/explicit-the-mysterious-disappearance-of-the-girl-no-one-knew', 'only_matching': True, }, { 'url': 'http://www.tntdrama.com/movies/star-wars-a-new-hope', 'only_matching': True, }, { 'url': 'https://www.trutv.com/shows/impractical-jokers/season-9/episode-1/you-dirty-dog', 'only_matching': True, }, { 'url': 'https://www.trutv.com/watchtrutv/east', 'only_matching': True, }, { 'url': 'https://www.tbs.com/watchtbs/east', 'only_matching': True, }, { 'url': 'https://www.tntdrama.com/watchtnt/east', 'only_matching': True, }] def _real_extract(self, url): site, path, display_id, watch = self._match_valid_url(url).group('site', 'path', 'id', 'watch') is_live = bool(watch) webpage = self._download_webpage(url, display_id) drupal_settings = self._search_json( r'<script\b[^>]+\bdata-drupal-selector="drupal-settings-json"[^>]*>', webpage, 'drupal settings', display_id) video_data = next(v for v in drupal_settings['turner_playlist'] if is_live or v.get('url') == path) media_id = video_data['mediaID'] title = video_data['title'] tokenizer_query = urllib.parse.parse_qs(urllib.parse.urlparse( drupal_settings['ngtv_token_url']).query) auth_info = traverse_obj(drupal_settings, ('top2', {dict})) or {} site_name = auth_info.get('siteName') or self._SITE_INFO[site][0] software_statement = auth_info.get('softwareStatement') or self._SITE_INFO[site][1] info = self._extract_ngtv_info( media_id, tokenizer_query, software_statement, { 'url': url, 'site_name': site_name, 'auth_required': video_data.get('authRequired') == '1' or is_live, 'is_live': is_live, }) thumbnails = [] for image_id, image in video_data.get('images', {}).items(): image_url = image.get('url') if not image_url or image.get('type') != 'video': continue i = { 'id': image_id, 'url': image_url, } mobj = re.search(r'(\d+)x(\d+)', image_url) if mobj: i.update({ 'width': int(mobj.group(1)), 'height': int(mobj.group(2)), }) thumbnails.append(i) info.update({ 'id': media_id, 'title': title, 'description': strip_or_none(video_data.get('descriptionNoTags') or video_data.get('shortDescriptionNoTags')), 'duration': float_or_none(video_data.get('duration')) or info.get('duration'), 'timestamp': int_or_none(video_data.get('created')), 'season_number': int_or_none(video_data.get('season')), 'episode_number': int_or_none(video_data.get('episode')), 'thumbnails': thumbnails, 'is_live': is_live, }) if site == 'trutv': info['_old_archive_ids'] = [make_archive_id(site, media_id)] return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/common.py
yt_dlp/extractor/common.py
import base64 import collections import contextlib import functools import getpass import http.client import http.cookiejar import http.cookies import inspect import itertools import json import math import netrc import os import random import re import subprocess import sys import time import types import urllib.parse import urllib.request import xml.etree.ElementTree from ..compat import ( compat_etree_fromstring, compat_expanduser, urllib_req_to_req, ) from ..cookies import LenientSimpleCookie from ..downloader.f4m import get_base_url, remove_encrypted_media from ..downloader.hls import HlsFD from ..globals import plugin_ies_overrides from ..networking import HEADRequest, Request from ..networking.exceptions import ( HTTPError, IncompleteRead, TransportError, network_exceptions, ) from ..utils import ( IDENTITY, JSON_LD_RE, NO_DEFAULT, ExtractorError, FormatSorter, GeoRestrictedError, GeoUtils, ISO639Utils, LenientJSONDecoder, Popen, RegexNotFoundError, RetryManager, UnsupportedError, age_restricted, base_url, bug_reports_message, classproperty, clean_html, deprecation_warning, determine_ext, dict_get, encode_data_uri, extract_attributes, filter_dict, fix_xml_ampersands, float_or_none, format_field, int_or_none, join_nonempty, js_to_json, mimetype2ext, netrc_from_content, orderedSet, parse_bitrate, parse_codecs, parse_duration, parse_iso8601, parse_m3u8_attributes, parse_resolution, qualities, sanitize_url, smuggle_url, str_or_none, str_to_int, strip_or_none, traverse_obj, truncate_string, try_call, try_get, unescapeHTML, unified_strdate, unified_timestamp, url_basename, url_or_none, urlhandle_detect_ext, urljoin, variadic, xpath_element, xpath_text, xpath_with_ns, ) from ..utils._utils import _request_dump_filename from ..utils.jslib import devalue class InfoExtractor: """Information Extractor class. Information extractors are the classes that, given a URL, extract information about the video (or videos) the URL refers to. This information includes the real video URL, the video title, author and others. The information is stored in a dictionary which is then passed to the YoutubeDL. The YoutubeDL processes this information possibly downloading the video to the file system, among other possible outcomes. The type field determines the type of the result. By far the most common value (and the default if _type is missing) is "video", which indicates a single video. For a video, the dictionaries must include the following fields: id: Video identifier. title: Video title, unescaped. Set to an empty string if video has no title as opposed to "None" which signifies that the extractor failed to obtain a title Additionally, it must contain either a formats entry or a url one: formats: A list of dictionaries for each format available, ordered from worst to best quality. Potential fields: * url The mandatory URL representing the media: for plain file media - HTTP URL of this file, for RTMP - RTMP URL, for HLS - URL of the M3U8 media playlist, for HDS - URL of the F4M manifest, for DASH - HTTP URL to plain file media (in case of unfragmented media) - URL of the MPD manifest or base URL representing the media if MPD manifest is parsed from a string (in case of fragmented media) for MSS - URL of the ISM manifest. * request_data Data to send in POST request to the URL * manifest_url The URL of the manifest file in case of fragmented media: for HLS - URL of the M3U8 master playlist, for HDS - URL of the F4M manifest, for DASH - URL of the MPD manifest, for MSS - URL of the ISM manifest. * manifest_stream_number (For internal use only) The index of the stream in the manifest file * ext Will be calculated from URL if missing * format A human-readable description of the format ("mp4 container with h264/opus"). Calculated from the format_id, width, height. and format_note fields if missing. * format_id A short description of the format ("mp4_h264_opus" or "19"). Technically optional, but strongly recommended. * format_note Additional info about the format ("3D" or "DASH video") * width Width of the video, if known * height Height of the video, if known * aspect_ratio Aspect ratio of the video, if known Automatically calculated from width and height * resolution Textual description of width and height Automatically calculated from width and height * dynamic_range The dynamic range of the video. One of: "SDR" (None), "HDR10", "HDR10+, "HDR12", "HLG, "DV" * tbr Average bitrate of audio and video in kbps (1000 bits/sec) * abr Average audio bitrate in kbps (1000 bits/sec) * acodec Name of the audio codec in use * asr Audio sampling rate in Hertz * audio_channels Number of audio channels * vbr Average video bitrate in kbps (1000 bits/sec) * fps Frame rate * vcodec Name of the video codec in use * container Name of the container format * filesize The number of bytes, if known in advance * filesize_approx An estimate for the number of bytes * player_url SWF Player URL (used for rtmpdump). * protocol The protocol that will be used for the actual download, lower-case. One of "http", "https" or one of the protocols defined in downloader.PROTOCOL_MAP * fragment_base_url Base URL for fragments. Each fragment's path value (if present) will be relative to this URL. * fragments A list of fragments of a fragmented media. Each fragment entry must contain either an url or a path. If an url is present it should be considered by a client. Otherwise both path and fragment_base_url must be present. Here is the list of all potential fields: * "url" - fragment's URL * "path" - fragment's path relative to fragment_base_url * "duration" (optional, int or float) * "filesize" (optional, int) * hls_media_playlist_data The M3U8 media playlist data as a string. Only use if the data must be modified during extraction and the native HLS downloader should bypass requesting the URL. Does not apply if ffmpeg is used as external downloader * is_from_start Is a live format that can be downloaded from the start. Boolean * preference Order number of this format. If this field is present and not None, the formats get sorted by this field, regardless of all other values. -1 for default (order by other properties), -2 or smaller for less than default. < -1000 to hide the format (if there is another one which is strictly better) * language Language code, e.g. "de" or "en-US". * language_preference Is this in the language mentioned in the URL? 10 if it's what the URL is about, -1 for default (don't know), -10 otherwise, other values reserved for now. * quality Order number of the video quality of this format, irrespective of the file format. -1 for default (order by other properties), -2 or smaller for less than default. * source_preference Order number for this video source (quality takes higher priority) -1 for default (order by other properties), -2 or smaller for less than default. * http_headers A dictionary of additional HTTP headers to add to the request. * stretched_ratio If given and not 1, indicates that the video's pixels are not square. width : height ratio as float. * no_resume The server does not support resuming the (HTTP or RTMP) download. Boolean. * has_drm True if the format has DRM and cannot be downloaded. 'maybe' if the format may have DRM and has to be tested before download. * extra_param_to_segment_url A query string to append to each fragment's URL, or to update each existing query string with. If it is an HLS stream with an AES-128 decryption key, the query parameters will be passed to the key URI as well, unless there is an `extra_param_to_key_url` given, or unless an external key URI is provided via `hls_aes`. Only applied by the native HLS/DASH downloaders. * extra_param_to_key_url A query string to append to the URL of the format's HLS AES-128 decryption key. Only applied by the native HLS downloader. * hls_aes A dictionary of HLS AES-128 decryption information used by the native HLS downloader to override the values in the media playlist when an '#EXT-X-KEY' tag is present in the playlist: * uri The URI from which the key will be downloaded * key The key (as hex) used to decrypt fragments. If `key` is given, any key URI will be ignored * iv The IV (as hex) used to decrypt fragments * impersonate Impersonate target(s). Can be any of the following entities: * an instance of yt_dlp.networking.impersonate.ImpersonateTarget * a string in the format of CLIENT[:OS] * a list or a tuple of CLIENT[:OS] strings or ImpersonateTarget instances * a boolean value; True means any impersonate target is sufficient * available_at Unix timestamp of when a format will be available to download * downloader_options A dictionary of downloader options (For internal use only) * http_chunk_size Chunk size for HTTP downloads * ffmpeg_args Extra arguments for ffmpeg downloader (input) * ffmpeg_args_out Extra arguments for ffmpeg downloader (output) * ws (NiconicoLiveFD only) WebSocketResponse * ws_url (NiconicoLiveFD only) Websockets URL * max_quality (NiconicoLiveFD only) Max stream quality string * is_dash_periods Whether the format is a result of merging multiple DASH periods. RTMP formats can also have the additional fields: page_url, app, play_path, tc_url, flash_version, rtmp_live, rtmp_conn, rtmp_protocol, rtmp_real_time url: Final video URL. ext: Video filename extension. format: The video format, defaults to ext (used for --get-format) player_url: SWF Player URL (used for rtmpdump). The following fields are optional: direct: True if a direct video file was given (must only be set by GenericIE) alt_title: A secondary title of the video. display_id: An alternative identifier for the video, not necessarily unique, but available before title. Typically, id is something like "4234987", title "Dancing naked mole rats", and display_id "dancing-naked-mole-rats" thumbnails: A list of dictionaries, with the following entries: * "id" (optional, string) - Thumbnail format ID * "url" * "ext" (optional, string) - actual image extension if not given in URL * "preference" (optional, int) - quality of the image * "width" (optional, int) * "height" (optional, int) * "resolution" (optional, string "{width}x{height}", deprecated) * "filesize" (optional, int) * "http_headers" (dict) - HTTP headers for the request thumbnail: Full URL to a video thumbnail image. description: Full video description. uploader: Full name of the video uploader. license: License name the video is licensed under. creators: List of creators of the video. timestamp: UNIX timestamp of the moment the video was uploaded upload_date: Video upload date in UTC (YYYYMMDD). If not explicitly set, calculated from timestamp release_timestamp: UNIX timestamp of the moment the video was released. If it is not clear whether to use timestamp or this, use the former release_date: The date (YYYYMMDD) when the video was released in UTC. If not explicitly set, calculated from release_timestamp release_year: Year (YYYY) as integer when the video or album was released. To be used if no exact release date is known. If not explicitly set, calculated from release_date. modified_timestamp: UNIX timestamp of the moment the video was last modified. modified_date: The date (YYYYMMDD) when the video was last modified in UTC. If not explicitly set, calculated from modified_timestamp uploader_id: Nickname or id of the video uploader. uploader_url: Full URL to a personal webpage of the video uploader. channel: Full name of the channel the video is uploaded on. Note that channel fields may or may not repeat uploader fields. This depends on a particular extractor. channel_id: Id of the channel. channel_url: Full URL to a channel webpage. channel_follower_count: Number of followers of the channel. channel_is_verified: Whether the channel is verified on the platform. location: Physical location where the video was filmed. subtitles: The available subtitles as a dictionary in the format {tag: subformats}. "tag" is usually a language code, and "subformats" is a list sorted from lower to higher preference, each element is a dictionary with the "ext" entry and one of: * "data": The subtitles file contents * "url": A URL pointing to the subtitles file It can optionally also have: * "name": Name or description of the subtitles * "http_headers": A dictionary of additional HTTP headers to add to the request. * "impersonate": Impersonate target(s); same as the "formats" field "ext" will be calculated from URL if missing automatic_captions: Like 'subtitles'; contains automatically generated captions instead of normal subtitles duration: Length of the video in seconds, as an integer or float. view_count: How many users have watched the video on the platform. concurrent_view_count: How many users are currently watching the video on the platform. save_count: Number of times the video has been saved or bookmarked like_count: Number of positive ratings of the video dislike_count: Number of negative ratings of the video repost_count: Number of reposts of the video average_rating: Average rating given by users, the scale used depends on the webpage comment_count: Number of comments on the video comments: A list of comments, each with one or more of the following properties (all but one of text or html optional): * "author" - human-readable name of the comment author * "author_id" - user ID of the comment author * "author_thumbnail" - The thumbnail of the comment author * "author_url" - The url to the comment author's page * "author_is_verified" - Whether the author is verified on the platform * "author_is_uploader" - Whether the comment is made by the video uploader * "id" - Comment ID * "html" - Comment as HTML * "text" - Plain text of the comment * "timestamp" - UNIX timestamp of comment * "parent" - ID of the comment this one is replying to. Set to "root" to indicate that this is a comment to the original video. * "like_count" - Number of positive ratings of the comment * "dislike_count" - Number of negative ratings of the comment * "is_favorited" - Whether the comment is marked as favorite by the video uploader * "is_pinned" - Whether the comment is pinned to the top of the comments age_limit: Age restriction for the video, as an integer (years) webpage_url: The URL to the video webpage, if given to yt-dlp it should allow to get the same result again. (It will be set by YoutubeDL if it's missing) categories: A list of categories that the video falls in, for example ["Sports", "Berlin"] tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"] cast: A list of the video cast is_live: True, False, or None (=unknown). Whether this video is a live stream that goes on instead of a fixed-length video. was_live: True, False, or None (=unknown). Whether this video was originally a live stream. live_status: None (=unknown), 'is_live', 'is_upcoming', 'was_live', 'not_live', or 'post_live' (was live, but VOD is not yet processed) If absent, automatically set from is_live, was_live start_time: Time in seconds where the reproduction should start, as specified in the URL. end_time: Time in seconds where the reproduction should end, as specified in the URL. chapters: A list of dictionaries, with the following entries: * "start_time" - The start time of the chapter in seconds * "end_time" - The end time of the chapter in seconds (optional: core code can determine this value from the next chapter's start_time or the video's duration) * "title" (optional, string) heatmap: A list of dictionaries, with the following entries: * "start_time" - The start time of the data point in seconds * "end_time" - The end time of the data point in seconds * "value" - The normalized value of the data point (float between 0 and 1) playable_in_embed: Whether this video is allowed to play in embedded players on other sites. Can be True (=always allowed), False (=never allowed), None (=unknown), or a string specifying the criteria for embedability; e.g. 'whitelist' availability: Under what condition the video is available. One of 'private', 'premium_only', 'subscriber_only', 'needs_auth', 'unlisted' or 'public'. Use 'InfoExtractor._availability' to set it media_type: The type of media as classified by the site, e.g. "episode", "clip", "trailer" _old_archive_ids: A list of old archive ids needed for backward compatibility. Use yt_dlp.utils.make_archive_id to generate ids _format_sort_fields: A list of fields to use for sorting formats __post_extractor: A function to be called just before the metadata is written to either disk, logger or console. The function must return a dict which will be added to the info_dict. This is useful for additional information that is time-consuming to extract. Note that the fields thus extracted will not be available to output template and match_filter. So, only "comments" and "comment_count" are currently allowed to be extracted via this method. The following fields should only be used when the video belongs to some logical chapter or section: chapter: Name or title of the chapter the video belongs to. chapter_number: Number of the chapter the video belongs to, as an integer. chapter_id: Id of the chapter the video belongs to, as a unicode string. The following fields should only be used when the video is an episode of some series, programme or podcast: series: Title of the series or programme the video episode belongs to. series_id: Id of the series or programme the video episode belongs to, as a unicode string. season: Title of the season the video episode belongs to. season_number: Number of the season the video episode belongs to, as an integer. season_id: Id of the season the video episode belongs to, as a unicode string. episode: Title of the video episode. Unlike mandatory video title field, this field should denote the exact title of the video episode without any kind of decoration. episode_number: Number of the video episode within a season, as an integer. episode_id: Id of the video episode, as a unicode string. The following fields should only be used when the media is a track or a part of a music album: track: Title of the track. track_number: Number of the track within an album or a disc, as an integer. track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii), as a unicode string. artists: List of artists of the track. composers: List of composers of the piece. genres: List of genres of the track. album: Title of the album the track belongs to. album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc). album_artists: List of all artists appeared on the album. E.g. ["Ash Borer", "Fell Voices"] or ["Various Artists"]. Useful for splits and compilations. disc_number: Number of the disc or other physical medium the track belongs to, as an integer. The following fields should only be set for clips that should be cut from the original video: section_start: Start time of the section in seconds section_end: End time of the section in seconds The following fields should only be set for storyboards: rows: Number of rows in each storyboard fragment, as an integer columns: Number of columns in each storyboard fragment, as an integer The following fields are deprecated and should not be set by new code: composer: Use "composers" instead. Composer(s) of the piece, comma-separated. artist: Use "artists" instead. Artist(s) of the track, comma-separated. genre: Use "genres" instead. Genre(s) of the track, comma-separated. album_artist: Use "album_artists" instead. All artists appeared on the album, comma-separated. creator: Use "creators" instead. The creator of the video. Unless mentioned otherwise, the fields should be Unicode strings. Unless mentioned otherwise, None is equivalent to absence of information. _type "playlist" indicates multiple videos. There must be a key "entries", which is a list, an iterable, or a PagedList object, each element of which is a valid dictionary by this specification. Additionally, playlists can have "id", "title", and any other relevant attributes with the same semantics as videos (see above). It can also have the following optional fields: playlist_count: The total number of videos in a playlist. If not given, YoutubeDL tries to calculate it from "entries" _type "multi_video" indicates that there are multiple videos that form a single show, for examples multiple acts of an opera or TV episode. It must have an entries key like a playlist and contain all the keys required for a video at the same time. _type "url" indicates that the video must be extracted from another location, possibly by a different extractor. Its only required key is: "url" - the next URL to extract. The key "ie_key" can be set to the class name (minus the trailing "IE", e.g. "Youtube") if the extractor class is known in advance. Additionally, the dictionary may have any properties of the resolved entity known in advance, for example "title" if the title of the referred video is known ahead of time. _type "url_transparent" entities have the same specification as "url", but indicate that the given additional information is more precise than the one associated with the resolved URL. This is useful when a site employs a video service that hosts the video and its technical metadata, but that video service does not embed a useful title, description etc. Subclasses of this should also be added to the list of extractors and should define _VALID_URL as a regexp or a Sequence of regexps, and re-define the _real_extract() and (optionally) _real_initialize() methods. Subclasses may also override suitable() if necessary, but ensure the function signature is preserved and that this function imports everything it needs (except other extractors), so that lazy_extractors works correctly. Subclasses can define a list of _EMBED_REGEX, which will be searched for in the HTML of Generic webpages. It may also override _extract_embed_urls or _extract_from_webpage as necessary. While these are normally classmethods, _extract_from_webpage is allowed to be an instance method. _extract_from_webpage may raise self.StopExtraction to stop further processing of the webpage and obtain exclusive rights to it. This is useful when the extractor cannot reliably be matched using just the URL, e.g. invidious/peertube instances Embed-only extractors can be defined by setting _VALID_URL = False. To support username + password (or netrc) login, the extractor must define a _NETRC_MACHINE and re-define _perform_login(username, password) and (optionally) _initialize_pre_login() methods. The _perform_login method will be called between _initialize_pre_login and _real_initialize if credentials are passed by the user. In cases where it is necessary to have the login process as part of the extraction rather than initialization, _perform_login can be left undefined. _GEO_BYPASS attribute may be set to False in order to disable geo restriction bypass mechanisms for a particular extractor. Though it won't disable explicit geo restriction bypass based on country code provided with geo_bypass_country. _GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted countries for this extractor. One of these countries will be used by geo restriction bypass mechanism right away in order to bypass geo restriction, of course, if the mechanism is not disabled. _GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted IP blocks in CIDR notation for this extractor. One of these IP blocks will be used by geo restriction bypass mechanism similarly to _GEO_COUNTRIES. The _ENABLED attribute should be set to False for IEs that are disabled by default and must be explicitly enabled. The _WORKING attribute should be set to False for broken IEs in order to warn the users and skip the tests. """ _ready = False _downloader = None _x_forwarded_for_ip = None _GEO_BYPASS = True _GEO_COUNTRIES = None _GEO_IP_BLOCKS = None _WORKING = True _ENABLED = True _NETRC_MACHINE = None IE_DESC = None SEARCH_KEY = None _VALID_URL = None _EMBED_REGEX = [] def _login_hint(self, method=NO_DEFAULT, netrc=None): password_hint = f'--username and --password, --netrc-cmd, or --netrc ({netrc or self._NETRC_MACHINE}) to provide account credentials' cookies_hint = 'See https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp for how to manually pass cookies' return { None: '', 'any': f'Use --cookies, --cookies-from-browser, {password_hint}. {cookies_hint}', 'password': f'Use {password_hint}',
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/podchaser.py
yt_dlp/extractor/podchaser.py
import functools import json from .common import InfoExtractor from ..utils import ( OnDemandPagedList, float_or_none, int_or_none, orderedSet, str_or_none, unified_timestamp, url_or_none, ) from ..utils.traversal import require, traverse_obj class PodchaserIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?podchaser\.com/podcasts/[\w-]+-(?P<podcast_id>\d+)(?:/episodes/[\w-]+-(?P<id>\d+))?' _PAGE_SIZE = 100 _TESTS = [{ 'url': 'https://www.podchaser.com/podcasts/cum-town-36924/episodes/ep-285-freeze-me-off-104365585', 'info_dict': { 'id': '104365585', 'title': 'Ep. 285 – freeze me off', 'description': 'cam ahn', 'thumbnail': r're:https?://.+/.+\.jpg', 'ext': 'mp3', 'categories': ['Comedy', 'News', 'Politics', 'Arts'], 'tags': ['comedy', 'dark humor'], 'series': 'The Adam Friedland Show Podcast', 'duration': 3708, 'timestamp': 1636531259, 'upload_date': '20211110', 'average_rating': 4.0, 'series_id': '36924', }, }, { 'url': 'https://www.podchaser.com/podcasts/the-bone-zone-28853', 'info_dict': { 'id': '28853', 'title': 'The Bone Zone', 'description': r're:The official home of the Bone Zone podcast.+', }, 'playlist_mincount': 275, }, { 'url': 'https://www.podchaser.com/podcasts/sean-carrolls-mindscape-scienc-699349/episodes', 'info_dict': { 'id': '699349', 'title': 'Sean Carroll\'s Mindscape: Science, Society, Philosophy, Culture, Arts, and Ideas', 'description': 'md5:2cbd8f4749891a84dc8235342e0b5ff1', }, 'playlist_mincount': 225, }] @staticmethod def _parse_episode(episode, podcast): info = traverse_obj(episode, { 'id': ('id', {int}, {str_or_none}, {require('episode ID')}), 'title': ('title', {str}), 'description': ('description', {str}), 'url': ('audio_url', {url_or_none}), 'thumbnail': ('image_url', {url_or_none}), 'duration': ('length', {int_or_none}), 'timestamp': ('air_date', {unified_timestamp}), 'average_rating': ('rating', {float_or_none}), }) info.update(traverse_obj(podcast, { 'series': ('title', {str}), 'series_id': ('id', {int}, {str_or_none}), 'categories': (('summary', None), 'categories', ..., 'text', {str}, filter, all, {orderedSet}), 'tags': ('tags', ..., 'text', {str}), })) info['vcodec'] = 'none' if info.get('series_id'): podcast_slug = traverse_obj(podcast, ('slug', {str})) or 'podcast' episode_slug = traverse_obj(episode, ('slug', {str})) or 'episode' info['webpage_url'] = '/'.join(( 'https://www.podchaser.com/podcasts', '-'.join((podcast_slug[:30].rstrip('-'), info['series_id'])), '-'.join((episode_slug[:30].rstrip('-'), info['id'])))) return info def _call_api(self, path, *args, **kwargs): return self._download_json(f'https://api.podchaser.com/{path}', *args, **kwargs) def _fetch_page(self, podcast_id, podcast, page): json_response = self._call_api( 'list/episode', podcast_id, headers={'Content-Type': 'application/json;charset=utf-8'}, data=json.dumps({ 'start': page * self._PAGE_SIZE, 'count': self._PAGE_SIZE, 'sort_order': 'SORT_ORDER_RECENT', 'filters': { 'podcast_id': podcast_id, }, 'options': {}, }).encode()) for episode in json_response['entities']: yield self._parse_episode(episode, podcast) def _real_extract(self, url): podcast_id, episode_id = self._match_valid_url(url).group('podcast_id', 'id') podcast = self._call_api(f'podcasts/{podcast_id}', episode_id or podcast_id) if not episode_id: return self.playlist_result( OnDemandPagedList(functools.partial(self._fetch_page, podcast_id, podcast), self._PAGE_SIZE), str_or_none(podcast.get('id')), podcast.get('title'), podcast.get('description')) episode = self._call_api(f'podcasts/{podcast_id}/episodes/{episode_id}/player_ids', episode_id) return self._parse_episode(episode, podcast)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/theholetv.py
yt_dlp/extractor/theholetv.py
from .common import InfoExtractor from ..utils import extract_attributes, remove_end class TheHoleTvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?the-hole\.tv/episodes/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://the-hole.tv/episodes/gromkii-vopros-sergey-orlov', 'md5': 'fea6682f47786f3ae5a6cbd635ec4bf9', 'info_dict': { 'id': 'gromkii-vopros-sergey-orlov', 'ext': 'mp4', 'title': 'Сергей Орлов — Громкий вопрос', 'thumbnail': 'https://assets-cdn.the-hole.tv/images/t8gan4n6zn627e7wni11b2uemqts', 'description': 'md5:45741a9202331f995d9fb76996759379', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) player_attrs = extract_attributes(self._search_regex( r'(<div[^>]*\bdata-controller="player"[^>]*>)', webpage, 'video player')) formats, subtitles = self._extract_m3u8_formats_and_subtitles( player_attrs['data-player-source-value'], video_id, 'mp4') return { 'id': video_id, 'title': remove_end(self._html_extract_title(webpage), ' — The Hole'), 'description': self._og_search_description(webpage), 'thumbnail': player_attrs.get('data-player-poster-value'), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tubetugraz.py
yt_dlp/extractor/tubetugraz.py
from .common import InfoExtractor from ..utils import ( float_or_none, parse_resolution, traverse_obj, urlencode_postdata, variadic, ) class TubeTuGrazBaseIE(InfoExtractor): _NETRC_MACHINE = 'tubetugraz' _API_EPISODE = 'https://tube.tugraz.at/search/episode.json' _FORMAT_TYPES = ('presentation', 'presenter') def _perform_login(self, username, password): urlh = self._request_webpage( 'https://tube.tugraz.at/Shibboleth.sso/Login?target=/paella/ui/index.html', None, fatal=False, note='downloading login page', errnote='unable to fetch login page') if not urlh: return response = self._download_webpage_handle( urlh.url, None, fatal=False, headers={'referer': urlh.url}, note='logging in', errnote='unable to log in', data=urlencode_postdata({ 'lang': 'de', '_eventId_proceed': '', 'j_username': username, 'j_password': password, })) if not response: return content, urlh = response if urlh.url == 'https://tube.tugraz.at/paella/ui/index.html': return if not self._html_search_regex( r'<p\b[^>]*>(Bitte geben Sie einen OTP-Wert ein:)</p>', content, 'TFA prompt', default=None): self.report_warning('unable to login: incorrect password') return urlh = self._request_webpage( urlh.url, None, fatal=False, headers={'referer': urlh.url}, note='logging in with TFA', errnote='unable to log in with TFA', data=urlencode_postdata({ 'lang': 'de', '_eventId_proceed': '', 'j_tokenNumber': self._get_tfa_info(), })) if not urlh or urlh.url == 'https://tube.tugraz.at/paella/ui/index.html': return self.report_warning('unable to login: incorrect TFA code') def _extract_episode(self, episode_info): video_id = episode_info.get('id') formats = list(self._extract_formats( traverse_obj(episode_info, ('mediapackage', 'media', 'track')), video_id)) title = traverse_obj(episode_info, ('mediapackage', 'title'), 'dcTitle') series_title = traverse_obj(episode_info, ('mediapackage', 'seriestitle')) creator = ', '.join(variadic(traverse_obj( episode_info, ('mediapackage', 'creators', 'creator'), 'dcCreator', default=''))) return { 'id': video_id, 'title': title, 'creator': creator or None, 'duration': traverse_obj(episode_info, ('mediapackage', 'duration'), 'dcExtent'), 'series': series_title, 'series_id': traverse_obj(episode_info, ('mediapackage', 'series'), 'dcIsPartOf'), 'episode': series_title and title, 'formats': formats, } def _set_format_type(self, formats, fmt_type): for f in formats: f['format_note'] = fmt_type if not fmt_type.startswith(self._FORMAT_TYPES[0]): f['preference'] = -2 return formats def _extract_formats(self, format_list, video_id): has_hls, has_dash = False, False for format_info in format_list or []: url = traverse_obj(format_info, ('tags', 'url'), 'url') if url is None: continue fmt_type = format_info.get('type') or 'unknown' transport = (format_info.get('transport') or 'https').lower() if transport == 'https': formats = [{ 'url': url, 'abr': float_or_none(traverse_obj(format_info, ('audio', 'bitrate')), 1000), 'vbr': float_or_none(traverse_obj(format_info, ('video', 'bitrate')), 1000), 'fps': traverse_obj(format_info, ('video', 'framerate')), **parse_resolution(traverse_obj(format_info, ('video', 'resolution'))), }] elif transport == 'hls': has_hls, formats = True, self._extract_m3u8_formats( url, video_id, 'mp4', fatal=False, note=f'downloading {fmt_type} HLS manifest') elif transport == 'dash': has_dash, formats = True, self._extract_mpd_formats( url, video_id, fatal=False, note=f'downloading {fmt_type} DASH manifest') else: # RTMP, HDS, SMOOTH, and unknown formats # - RTMP url fails on every tested entry until now # - HDS url 404's on every tested entry until now # - SMOOTH url 404's on every tested entry until now continue yield from self._set_format_type(formats, fmt_type) # TODO: Add test for these for fmt_type in self._FORMAT_TYPES: if not has_hls: hls_formats = self._extract_m3u8_formats( f'https://wowza.tugraz.at/matterhorn_engage/smil:engage-player_{video_id}_{fmt_type}.smil/playlist.m3u8', video_id, 'mp4', fatal=False, note=f'Downloading {fmt_type} HLS manifest', errnote=False) or [] yield from self._set_format_type(hls_formats, fmt_type) if not has_dash: dash_formats = self._extract_mpd_formats( f'https://wowza.tugraz.at/matterhorn_engage/smil:engage-player_{video_id}_{fmt_type}.smil/manifest_mpm4sav_mvlist.mpd', video_id, fatal=False, note=f'Downloading {fmt_type} DASH manifest', errnote=False) yield from self._set_format_type(dash_formats, fmt_type) class TubeTuGrazIE(TubeTuGrazBaseIE): IE_DESC = 'tube.tugraz.at' _VALID_URL = r'''(?x) https?://tube\.tugraz\.at/(?: paella/ui/watch\.html\?(?:[^#]*&)?id=| portal/watch/ )(?P<id>[0-9a-fA-F]{8}-(?:[0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}) ''' _TESTS = [ { 'url': 'https://tube.tugraz.at/paella/ui/watch.html?id=f2634392-e40e-4ac7-9ddc-47764aa23d40', 'md5': 'a23a3d5c9aaca2b84932fdba66e17145', 'info_dict': { 'id': 'f2634392-e40e-4ac7-9ddc-47764aa23d40', 'ext': 'mp4', 'title': '#6 (23.11.2017)', 'episode': '#6 (23.11.2017)', 'series': '[INB03001UF] Einführung in die strukturierte Programmierung', 'duration': 3295818, 'series_id': 'b1192fff-2aa7-4bf0-a5cf-7b15c3bd3b34', 'creators': ['Safran C'], }, }, { 'url': 'https://tube.tugraz.at/paella/ui/watch.html?id=2df6d787-e56a-428d-8ef4-d57f07eef238', 'md5': 'de0d854a56bf7318d2b693fe1adb89a5', 'info_dict': { 'id': '2df6d787-e56a-428d-8ef4-d57f07eef238', 'title': 'TubeTuGraz video #2df6d787-e56a-428d-8ef4-d57f07eef238', 'ext': 'mp4', }, 'expected_warnings': ['Extractor failed to obtain "title"'], }, { # Portal URL format 'url': 'https://tube.tugraz.at/portal/watch/ab28ec60-8cbe-4f1a-9b96-a95add56c612', 'only_matching': True, }, ] def _real_extract(self, url): video_id = self._match_id(url) episode_data = self._download_json( self._API_EPISODE, video_id, query={'id': video_id, 'limit': 1}, note='Downloading episode metadata') episode_info = traverse_obj(episode_data, ('search-results', 'result'), default={'id': video_id}) return self._extract_episode(episode_info) class TubeTuGrazSeriesIE(TubeTuGrazBaseIE): _VALID_URL = r'''(?x) https?://tube\.tugraz\.at/paella/ui/browse\.html\?series= (?P<id>[0-9a-fA-F]{8}-(?:[0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}) ''' _TESTS = [{ 'url': 'https://tube.tugraz.at/paella/ui/browse.html?series=0e6351b7-c372-491e-8a49-2c9b7e21c5a6', 'id': '0e6351b7-c372-491e-8a49-2c9b7e21c5a6', 'info_dict': { 'id': '0e6351b7-c372-491e-8a49-2c9b7e21c5a6', 'title': '[209351] Strassenwesen', }, 'playlist': [ { 'info_dict': { 'id': 'ee17ce5d-34e2-48b7-a76a-fed148614e11', 'series_id': '0e6351b7-c372-491e-8a49-2c9b7e21c5a6', 'ext': 'mp4', 'title': '#4 Detailprojekt', 'episode': '#4 Detailprojekt', 'series': '[209351] Strassenwesen', 'creator': 'Neuhold R', 'duration': 6127024, }, }, { 'info_dict': { 'id': '87350498-799a-44d3-863f-d1518a98b114', 'series_id': '0e6351b7-c372-491e-8a49-2c9b7e21c5a6', 'ext': 'mp4', 'title': '#3 Generelles Projekt', 'episode': '#3 Generelles Projekt', 'series': '[209351] Strassenwesen', 'creator': 'Neuhold R', 'duration': 5374422, }, }, { 'info_dict': { 'id': '778599ea-489e-4189-9e05-3b4888e19bcd', 'series_id': '0e6351b7-c372-491e-8a49-2c9b7e21c5a6', 'ext': 'mp4', 'title': '#2 Vorprojekt', 'episode': '#2 Vorprojekt', 'series': '[209351] Strassenwesen', 'creator': 'Neuhold R', 'duration': 5566404, }, }, { 'info_dict': { 'id': '75e4c71c-d99d-4e56-b0e6-4f2bcdf11f29', 'series_id': '0e6351b7-c372-491e-8a49-2c9b7e21c5a6', 'ext': 'mp4', 'title': '#1 Variantenstudium', 'episode': '#1 Variantenstudium', 'series': '[209351] Strassenwesen', 'creator': 'Neuhold R', 'duration': 5420200, }, }, ], 'playlist_mincount': 4, }] def _real_extract(self, url): playlist_id = self._match_id(url) episodes_data = self._download_json( self._API_EPISODE, playlist_id, query={'sid': playlist_id}, note='Downloading episode list') series_data = self._download_json( 'https://tube.tugraz.at/series/series.json', playlist_id, fatal=False, note='downloading series metadata', errnote='failed to download series metadata', query={ 'seriesId': playlist_id, 'count': 1, 'sort': 'TITLE', }) return self.playlist_result( map(self._extract_episode, episodes_data['search-results']['result']), playlist_id, traverse_obj(series_data, ('catalogs', 0, 'http://purl.org/dc/terms/', 'title', 0, 'value')))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/testurl.py
yt_dlp/extractor/testurl.py
import re from .common import InfoExtractor from ..utils import ExtractorError class TestURLIE(InfoExtractor): """ Allows addressing of the test cases as test:yout.*be_1 """ IE_DESC = False # Do not list _VALID_URL = r'test(?:url)?:(?P<extractor>.*?)(?:_(?P<num>\d+|all))?$' def _real_extract(self, url): from . import gen_extractor_classes extractor_id, num = self._match_valid_url(url).group('extractor', 'num') if not extractor_id: return {'id': ':test', 'title': '', 'url': url} rex = re.compile(extractor_id, flags=re.IGNORECASE) matching_extractors = [e for e in gen_extractor_classes() if rex.search(e.IE_NAME)] if len(matching_extractors) == 0: raise ExtractorError(f'No extractors matching {extractor_id!r} found', expected=True) elif len(matching_extractors) > 1: extractor = next(( # Check for exact match ie for ie in matching_extractors if ie.IE_NAME.lower() == extractor_id.lower() ), None) or next(( # Check for exact match without plugin suffix ie for ie in matching_extractors if ie.IE_NAME.split('+')[0].lower() == extractor_id.lower() ), None) if not extractor: raise ExtractorError( 'Found multiple matching extractors: {}'.format(' '.join(ie.IE_NAME for ie in matching_extractors)), expected=True) else: extractor = matching_extractors[0] testcases = tuple(extractor.get_testcases(True)) if num == 'all': return self.playlist_result( [self.url_result(tc['url'], extractor) for tc in testcases], url, f'{extractor.IE_NAME} tests') try: tc = testcases[int(num or 0)] except IndexError: raise ExtractorError( f'Test case {num or 0} not found, got only {len(testcases)} tests', expected=True) self.to_screen(f'Test URL: {tc["url"]}') return self.url_result(tc['url'], extractor)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/stv.py
yt_dlp/extractor/stv.py
from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, smuggle_url, str_or_none, try_get, ) class STVPlayerIE(InfoExtractor): IE_NAME = 'stv:player' _VALID_URL = r'https?://player\.stv\.tv/(?P<type>episode|video)/(?P<id>[a-z0-9]{4})' _TESTS = [{ # shortform 'url': 'https://player.stv.tv/video/4gwd/emmerdale/60-seconds-on-set-with-laura-norton/', 'md5': '5adf9439c31d554f8be0707c7abe7e0a', 'info_dict': { 'id': '5333973339001', 'ext': 'mp4', 'upload_date': '20170301', 'title': '60 seconds on set with Laura Norton', 'description': "How many questions can Laura - a.k.a Kerry Wyatt - answer in 60 seconds? Let's find out!", 'timestamp': 1488388054, 'uploader_id': '1486976045', }, 'skip': 'this resource is unavailable outside of the UK', }, { # episodes 'url': 'https://player.stv.tv/episode/4125/jennifer-saunders-memory-lane', 'only_matching': True, }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1486976045/default_default/index.html?videoId=%s' _PTYPE_MAP = { 'episode': 'episodes', 'video': 'shortform', } def _real_extract(self, url): ptype, video_id = self._match_valid_url(url).groups() webpage = self._download_webpage(url, video_id, fatal=False) or '' props = self._search_nextjs_data(webpage, video_id, default={}).get('props') or {} player_api_cache = try_get( props, lambda x: x['initialReduxState']['playerApiCache']) or {} api_path, resp = None, {} for k, v in player_api_cache.items(): if k.startswith(('/episodes/', '/shortform/')): api_path, resp = k, v break else: episode_id = str_or_none(try_get( props, lambda x: x['pageProps']['episodeId'])) api_path = f'/{self._PTYPE_MAP[ptype]}/{episode_id or video_id}' result = resp.get('results') if not result: resp = self._download_json( 'https://player.api.stv.tv/v1' + api_path, video_id) result = resp['results'] video = result['video'] video_id = str(video['id']) subtitles = {} _subtitles = result.get('_subtitles') or {} for ext, sub_url in _subtitles.items(): subtitles.setdefault('en', []).append({ 'ext': 'vtt' if ext == 'webvtt' else ext, 'url': sub_url, }) programme = result.get('programme') or {} if programme.get('drmEnabled'): self.report_drm(video_id) return { '_type': 'url_transparent', 'id': video_id, 'url': smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % video_id, {'geo_countries': ['GB']}), 'description': result.get('summary'), 'duration': float_or_none(video.get('length'), 1000), 'subtitles': subtitles, 'view_count': int_or_none(result.get('views')), 'series': programme.get('name') or programme.get('shortName'), 'ie_key': 'BrightcoveNew', }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hidive.py
yt_dlp/extractor/hidive.py
from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, try_get, url_or_none, urlencode_postdata, ) class HiDiveIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?hidive\.com/stream/(?P<id>(?P<title>[^/]+)/(?P<key>[^/?#&]+))' # Using X-Forwarded-For results in 403 HTTP error for HLS fragments, # so disabling geo bypass completely _GEO_BYPASS = False _NETRC_MACHINE = 'hidive' _LOGIN_URL = 'https://www.hidive.com/account/login' _TESTS = [{ 'url': 'https://www.hidive.com/stream/the-comic-artist-and-his-assistants/s01e001', 'info_dict': { 'id': 'the-comic-artist-and-his-assistants/s01e001', 'ext': 'mp4', 'title': 'the-comic-artist-and-his-assistants/s01e001', 'series': 'the-comic-artist-and-his-assistants', 'season_number': 1, 'episode_number': 1, }, 'params': { 'skip_download': True, }, 'skip': 'Requires Authentication', }] def _perform_login(self, username, password): webpage = self._download_webpage(self._LOGIN_URL, None) form = self._search_regex( r'(?s)<form[^>]+action="/account/login"[^>]*>(.+?)</form>', webpage, 'login form', default=None) if not form: return data = self._hidden_inputs(form) data.update({ 'Email': username, 'Password': password, }) login_webpage = self._download_webpage( self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(data)) # If the user has multiple profiles on their account, select one. For now pick the first profile. profile_id = self._search_regex( r'<button [^>]+?data-profile-id="(\w+)"', login_webpage, 'profile id', default=None) if profile_id is None: return # If only one profile, Hidive auto-selects it self._request_webpage( 'https://www.hidive.com/ajax/chooseprofile', None, data=urlencode_postdata({ 'profileId': profile_id, 'hash': self._search_regex( r'\<button [^>]+?data-hash="(\w+)"', login_webpage, 'profile id hash'), 'returnUrl': '/dashboard', })) def _call_api(self, video_id, title, key, data={}, **kwargs): data = { **data, 'Title': title, 'Key': key, 'PlayerId': 'f4f895ce1ca713ba263b91caeb1daa2d08904783', } return self._download_json( 'https://www.hidive.com/play/settings', video_id, data=urlencode_postdata(data), **kwargs) or {} def _real_extract(self, url): video_id, title, key = self._match_valid_url(url).group('id', 'title', 'key') settings = self._call_api(video_id, title, key) restriction = settings.get('restrictionReason') if restriction == 'RegionRestricted': self.raise_geo_restricted() if restriction and restriction != 'None': raise ExtractorError( f'{self.IE_NAME} said: {restriction}', expected=True) formats, parsed_urls = [], {None} for rendition_id, rendition in settings['renditions'].items(): audio, version, extra = rendition_id.split('_') m3u8_url = url_or_none(try_get(rendition, lambda x: x['bitrates']['hls'])) if m3u8_url not in parsed_urls: parsed_urls.add(m3u8_url) frmt = self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=rendition_id, fatal=False) for f in frmt: f['language'] = audio f['format_note'] = f'{version}, {extra}' formats.extend(frmt) subtitles = {} for rendition_id, rendition in settings['renditions'].items(): audio, version, extra = rendition_id.split('_') for cc_file in rendition.get('ccFiles') or []: cc_url = url_or_none(try_get(cc_file, lambda x: x[2])) cc_lang = try_get(cc_file, (lambda x: x[1].replace(' ', '-').lower(), lambda x: x[0]), str) if cc_url not in parsed_urls and cc_lang: parsed_urls.add(cc_url) subtitles.setdefault(cc_lang, []).append({'url': cc_url}) return { 'id': video_id, 'title': video_id, 'subtitles': subtitles, 'formats': formats, 'series': title, 'season_number': int_or_none( self._search_regex(r's(\d+)', key, 'season number', default=None)), 'episode_number': int_or_none( self._search_regex(r'e(\d+)', key, 'episode number', default=None)), 'http_headers': {'Referer': url}, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rcs.py
yt_dlp/extractor/rcs.py
import re from .common import InfoExtractor from ..networking import HEADRequest from ..utils import ( ExtractorError, base_url, clean_html, extract_attributes, get_element_html_by_class, get_element_html_by_id, int_or_none, js_to_json, mimetype2ext, sanitize_url, traverse_obj, try_call, url_basename, urljoin, ) class RCSBaseIE(InfoExtractor): # based on VideoPlayerLoader.prototype.getVideoSrc # and VideoPlayerLoader.prototype.transformSrc from # https://js2.corriereobjects.it/includes2013/LIBS/js/corriere_video.sjs _UUID_RE = r'[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}' _RCS_ID_RE = r'[\w-]+-\d{10}' _MIGRATION_MAP = { 'videoamica-vh.akamaihd': 'amica', 'media2-amica-it.akamaized': 'amica', 'corrierevam-vh.akamaihd': 'corriere', 'media2vam-corriere-it.akamaized': 'corriere', 'cormezzogiorno-vh.akamaihd': 'corrieredelmezzogiorno', 'media2vam-mezzogiorno-corriere-it.akamaized': 'corrieredelmezzogiorno', 'corveneto-vh.akamaihd': 'corrieredelveneto', 'media2vam-veneto-corriere-it.akamaized': 'corrieredelveneto', 'corbologna-vh.akamaihd': 'corrieredibologna', 'media2vam-bologna-corriere-it.akamaized': 'corrieredibologna', 'corfiorentino-vh.akamaihd': 'corrierefiorentino', 'media2vam-fiorentino-corriere-it.akamaized': 'corrierefiorentino', 'corinnovazione-vh.akamaihd': 'corriereinnovazione', 'media2-gazzanet-gazzetta-it.akamaized': 'gazzanet', 'videogazzanet-vh.akamaihd': 'gazzanet', 'videogazzaworld-vh.akamaihd': 'gazzaworld', 'gazzettavam-vh.akamaihd': 'gazzetta', 'media2vam-gazzetta-it.akamaized': 'gazzetta', 'videoiodonna-vh.akamaihd': 'iodonna', 'media2-leitv-it.akamaized': 'leitv', 'videoleitv-vh.akamaihd': 'leitv', 'videoliving-vh.akamaihd': 'living', 'media2-living-corriere-it.akamaized': 'living', 'media2-oggi-it.akamaized': 'oggi', 'videooggi-vh.akamaihd': 'oggi', 'media2-quimamme-it.akamaized': 'quimamme', 'quimamme-vh.akamaihd': 'quimamme', 'videorunning-vh.akamaihd': 'running', 'media2-style-corriere-it.akamaized': 'style', 'style-vh.akamaihd': 'style', 'videostyle-vh.akamaihd': 'style', 'media2-stylepiccoli-it.akamaized': 'stylepiccoli', 'stylepiccoli-vh.akamaihd': 'stylepiccoli', 'doveviaggi-vh.akamaihd': 'viaggi', 'media2-doveviaggi-it.akamaized': 'viaggi', 'media2-vivimilano-corriere-it.akamaized': 'vivimilano', 'vivimilano-vh.akamaihd': 'vivimilano', 'media2-youreporter-it.akamaized': 'youreporter', } def _get_video_src(self, video): for source in traverse_obj(video, ( 'mediaProfile', 'mediaFile', lambda _, v: v.get('mimeType'))): url = source['value'] for s, r in ( ('media2vam.corriere.it.edgesuite.net', 'media2vam-corriere-it.akamaized.net'), ('media.youreporter.it.edgesuite.net', 'media-youreporter-it.akamaized.net'), ('corrierepmd.corriere.it.edgesuite.net', 'corrierepmd-corriere-it.akamaized.net'), ('media2vam-corriere-it.akamaized.net/fcs.quotidiani/vr/videos/', 'video.corriere.it/vr360/videos/'), ('http://', 'https://'), ): url = url.replace(s, r) type_ = mimetype2ext(source['mimeType']) if type_ == 'm3u8' and '-vh.akamaihd' in url: # still needed for some old content: see _TESTS #3 matches = re.search(r'(?:https?:)?//(?P<host>[\w\.\-]+)\.net/i(?P<path>.+)$', url) if matches: url = f'https://vod.rcsobjects.it/hls/{self._MIGRATION_MAP[matches.group("host")]}{matches.group("path")}' if traverse_obj(video, ('mediaProfile', 'geoblocking')) or ( type_ == 'm3u8' and 'fcs.quotidiani_!' in url): url = url.replace('vod.rcsobjects', 'vod-it.rcsobjects') if type_ == 'm3u8' and 'vod' in url: url = url.replace('.csmil', '.urlset') if type_ == 'mp3': url = url.replace('media2vam-corriere-it.akamaized.net', 'vod.rcsobjects.it/corriere') yield { 'type': type_, 'url': url, 'bitrate': source.get('bitrate'), } def _create_http_formats(self, m3u8_formats, video_id): for f in m3u8_formats: if f['vcodec'] == 'none': continue http_url = re.sub(r'(https?://[^/]+)/hls/([^?#]+?\.mp4).+', r'\g<1>/\g<2>', f['url']) if http_url == f['url']: continue http_f = f.copy() del http_f['manifest_url'] format_id = try_call(lambda: http_f['format_id'].replace('hls-', 'https-')) urlh = self._request_webpage(HEADRequest(http_url), video_id, fatal=False, note=f'Check filesize for {format_id}') if not urlh: continue http_f.update({ 'format_id': format_id, 'url': http_url, 'protocol': 'https', 'filesize_approx': int_or_none(urlh.headers.get('Content-Length', None)), }) yield http_f def _create_formats(self, sources, video_id): for source in sources: if source['type'] == 'm3u8': m3u8_formats = self._extract_m3u8_formats( source['url'], video_id, 'mp4', m3u8_id='hls', fatal=False) yield from m3u8_formats yield from self._create_http_formats(m3u8_formats, video_id) elif source['type'] == 'mp3': yield { 'format_id': 'https-mp3', 'ext': 'mp3', 'acodec': 'mp3', 'vcodec': 'none', 'abr': source.get('bitrate'), 'url': source['url'], } def _real_extract(self, url): cdn, video_id = self._match_valid_url(url).group('cdn', 'id') display_id, video_data = None, None if re.match(self._UUID_RE, video_id) or re.match(self._RCS_ID_RE, video_id): url = f'https://video.{cdn}/video-json/{video_id}' else: webpage = self._download_webpage(url, video_id) data_config = get_element_html_by_id('divVideoPlayer', webpage) or get_element_html_by_class('divVideoPlayer', webpage) if data_config: data_config = self._parse_json( extract_attributes(data_config).get('data-config'), video_id, fatal=False) or {} if data_config.get('newspaper'): cdn = f'{data_config["newspaper"]}.it' display_id, video_id = video_id, data_config.get('uuid') or video_id url = f'https://video.{cdn}/video-json/{video_id}' else: json_url = self._search_regex( r'''(?x)url\s*=\s*(["']) (?P<url> (?:https?:)?//video\.rcs\.it /fragment-includes/video-includes/[^"']+?\.json )\1;''', webpage, video_id, group='url', default=None) if json_url: video_data = self._download_json(sanitize_url(json_url, scheme='https'), video_id) display_id, video_id = video_id, video_data.get('id') or video_id if not video_data: webpage = self._download_webpage(url, video_id) video_data = self._search_json( '##start-video##', webpage, 'video data', video_id, default=None, end_pattern='##end-video##', transform_source=js_to_json) if not video_data: # try search for iframes emb = RCSEmbedsIE._extract_url(webpage) if emb: return { '_type': 'url_transparent', 'url': emb, 'ie_key': RCSEmbedsIE.ie_key(), } if not video_data: raise ExtractorError('Video data not found in the page') return { 'id': video_id, 'display_id': display_id, 'title': video_data.get('title'), 'description': (clean_html(video_data.get('description')) or clean_html(video_data.get('htmlDescription')) or self._html_search_meta('description', webpage)), 'uploader': video_data.get('provider') or cdn, 'formats': list(self._create_formats(self._get_video_src(video_data), video_id)), } class RCSEmbedsIE(RCSBaseIE): _VALID_URL = r'''(?x) https?://(?P<vid>video)\. (?P<cdn> (?: rcs| (?:corriere\w+\.)?corriere| (?:gazzanet\.)?gazzetta )\.it) /video-embed/(?P<id>[^/=&\?]+?)(?:$|\?)''' _EMBED_REGEX = [r'''(?x) (?: data-frame-src=| <iframe[^\n]+src= ) (["']) (?P<url>(?:https?:)?//video\. (?: rcs| (?:corriere\w+\.)?corriere| (?:gazzanet\.)?gazzetta ) \.it/video-embed/.+?) \1'''] _TESTS = [{ 'url': 'https://video.rcs.it/video-embed/iodonna-0001585037', 'md5': '0faca97df525032bb9847f690bc3720c', 'info_dict': { 'id': 'iodonna-0001585037', 'ext': 'mp4', 'title': 'Sky Arte racconta Madonna nella serie "Artist to icon"', 'description': 'md5:65b09633df9ffee57f48b39e34c9e067', 'uploader': 'rcs.it', }, }, { 'url': 'https://video.gazzanet.gazzetta.it/video-embed/gazzanet-mo05-0000260789', 'only_matching': True, }, { 'url': 'https://video.gazzetta.it/video-embed/49612410-00ca-11eb-bcd8-30d4253e0140', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.iodonna.it/video-iodonna/personaggi-video/monica-bellucci-piu-del-lavoro-oggi-per-me-sono-importanti-lamicizia-e-la-famiglia/', 'info_dict': { 'id': 'iodonna-0002033648', 'ext': 'mp4', 'title': 'Monica Bellucci: «Più del lavoro, oggi per me sono importanti l\'amicizia e la famiglia»', 'description': 'md5:daea6d9837351e56b1ab615c06bebac1', 'uploader': 'rcs.it', }, }] @staticmethod def _sanitize_url(url): url = sanitize_url(url, scheme='https') return urljoin(base_url(url), url_basename(url)) @classmethod def _extract_embed_urls(cls, url, webpage): return map(cls._sanitize_url, super()._extract_embed_urls(url, webpage)) class RCSIE(RCSBaseIE): _VALID_URL = r'''(?x)https?://(?P<vid>video|viaggi)\. (?P<cdn> (?: corrieredelmezzogiorno\. |corrieredelveneto\. |corrieredibologna\. |corrierefiorentino\. )?corriere\.it |(?:gazzanet\.)?gazzetta\.it) /(?!video-embed/)[^?#]+?/(?P<id>[^/\?]+)(?=\?|/$|$)''' _TESTS = [{ # json iframe directly from id 'url': 'https://video.corriere.it/sport/formula-1/vettel-guida-ferrari-sf90-mugello-suo-fianco-c-elecrerc-bendato-video-esilarante/b727632a-f9d0-11ea-91b0-38d50a849abb', 'md5': '14946840dec46ecfddf66ba4eea7d2b2', 'info_dict': { 'id': 'b727632a-f9d0-11ea-91b0-38d50a849abb', 'ext': 'mp4', 'title': 'Vettel guida la Ferrari SF90 al Mugello e al suo fianco c\'è Leclerc (bendato): il video è esilarante', 'description': 'md5:3915ce5ebb3d2571deb69a5eb85ac9b5', 'uploader': 'Corriere Tv', }, }, { # search for video id inside the page 'url': 'https://viaggi.corriere.it/video/norvegia-il-nuovo-ponte-spettacolare-sopra-la-cascata-di-voringsfossen/', 'md5': 'f22a92d9e666e80f2fffbf2825359c81', 'info_dict': { 'id': '5b7cd134-e2c1-11ea-89b3-b56dd0df2aa2', 'display_id': 'norvegia-il-nuovo-ponte-spettacolare-sopra-la-cascata-di-voringsfossen', 'ext': 'mp4', 'title': 'La nuova spettacolare attrazione in Norvegia: il ponte sopra Vøringsfossen', 'description': 'md5:18b35a291f6746c0c8dacd16e5f5f4f8', 'uploader': 'DOVE Viaggi', }, }, { # only audio format https://github.com/yt-dlp/yt-dlp/issues/5683 'url': 'https://video.corriere.it/cronaca/audio-telefonata-il-papa-becciu-santita-lettera-che-mi-ha-inviato-condanna/b94c0d20-70c2-11ed-9572-e4b947a0ebd2', 'md5': 'aaffb08d02f2ce4292a4654694c78150', 'info_dict': { 'id': 'b94c0d20-70c2-11ed-9572-e4b947a0ebd2', 'ext': 'mp3', 'title': 'L\'audio della telefonata tra il Papa e Becciu: «Santità, la lettera che mi ha inviato è una condanna»', 'description': 'md5:c0ddb61bd94a8d4e0d4bb9cda50a689b', 'uploader': 'Corriere Tv', 'formats': [{'format_id': 'https-mp3', 'ext': 'mp3'}], }, }, { # old content still needs cdn migration 'url': 'https://viaggi.corriere.it/video/milano-varallo-sesia-sul-treno-a-vapore/', 'md5': '2dfdce7af249654ad27eeba03fe1e08d', 'info_dict': { 'id': 'd8f6c8d0-f7d7-11e8-bfca-f74cf4634191', 'display_id': 'milano-varallo-sesia-sul-treno-a-vapore', 'ext': 'mp4', 'title': 'Milano-Varallo Sesia sul treno a vapore', 'description': 'md5:6348f47aac230397fe341a74f7678d53', 'uploader': 'DOVE Viaggi', }, }, { 'url': 'https://video.corriere.it/video-360/metro-copenaghen-tutta-italiana/a248a7f0-e2db-11e9-9830-af2de6b1f945', 'only_matching': True, }] class RCSVariousIE(RCSBaseIE): _VALID_URL = r'''(?x)https?://www\. (?P<cdn> leitv\.it| youreporter\.it| amica\.it )/(?:[^/]+/)?(?P<id>[^/]+?)(?:$|\?|/)''' _TESTS = [{ 'url': 'https://www.leitv.it/benessere/mal-di-testa/', 'md5': '3b7a683d105a7313ec7513b014443631', 'info_dict': { 'id': 'leitv-0000125151', 'display_id': 'mal-di-testa', 'ext': 'mp4', 'title': 'Cervicalgia e mal di testa, il video con i suggerimenti dell\'esperto', 'description': 'md5:ae21418f34cee0b8d02a487f55bcabb5', 'uploader': 'leitv.it', }, }, { 'url': 'https://www.youreporter.it/fiume-sesia-3-ottobre-2020/', 'md5': '3989b6d603482611a2abd2f32b79f739', 'info_dict': { 'id': 'youreporter-0000332574', 'display_id': 'fiume-sesia-3-ottobre-2020', 'ext': 'mp4', 'title': 'Fiume Sesia 3 ottobre 2020', 'description': 'md5:0070eef1cc884d13c970a4125063de55', 'uploader': 'youreporter.it', }, }, { 'url': 'https://www.amica.it/video-post/saint-omer-al-cinema-il-film-leone-dargento-che-ribalta-gli-stereotipi/', 'md5': '187cce524dfd0343c95646c047375fc4', 'info_dict': { 'id': 'amica-0001225365', 'display_id': 'saint-omer-al-cinema-il-film-leone-dargento-che-ribalta-gli-stereotipi', 'ext': 'mp4', 'title': '"Saint Omer": al cinema il film Leone d\'argento che ribalta gli stereotipi', 'description': 'md5:b1c8869c2dcfd6073a2a311ba0008aa8', 'uploader': 'rcs.it', }, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/uol.py
yt_dlp/extractor/uol.py
import urllib.parse from .common import InfoExtractor from ..utils import ( clean_html, int_or_none, parse_duration, parse_iso8601, qualities, update_url_query, ) class UOLIE(InfoExtractor): IE_NAME = 'uol.com.br' _VALID_URL = r'https?://(?:.+?\.)?uol\.com\.br/.*?(?:(?:mediaId|v)=|view/(?:[a-z0-9]+/)?|video(?:=|/(?:\d{4}/\d{2}/\d{2}/)?))(?P<id>\d+|[\w-]+-[A-Z0-9]+)' _TESTS = [{ 'url': 'http://player.mais.uol.com.br/player_video_v3.swf?mediaId=15951931', 'md5': '4f1e26683979715ff64e4e29099cf020', 'info_dict': { 'id': '15951931', 'ext': 'mp4', 'title': 'Miss simpatia é encontrada morta', 'description': 'md5:3f8c11a0c0556d66daf7e5b45ef823b2', 'timestamp': 1470421860, 'upload_date': '20160805', }, }, { 'url': 'http://tvuol.uol.com.br/video/incendio-destroi-uma-das-maiores-casas-noturnas-de-londres-04024E9A3268D4C95326', 'md5': '2850a0e8dfa0a7307e04a96c5bdc5bc2', 'info_dict': { 'id': '15954259', 'ext': 'mp4', 'title': 'Incêndio destrói uma das maiores casas noturnas de Londres', 'description': 'Em Londres, um incêndio destruiu uma das maiores boates da cidade. Não há informações sobre vítimas.', 'timestamp': 1470674520, 'upload_date': '20160808', }, }, { 'url': 'http://mais.uol.com.br/static/uolplayer/index.html?mediaId=15951931', 'only_matching': True, }, { 'url': 'http://mais.uol.com.br/view/15954259', 'only_matching': True, }, { 'url': 'http://noticias.band.uol.com.br/brasilurgente/video/2016/08/05/15951931/miss-simpatia-e-encontrada-morta.html', 'only_matching': True, }, { 'url': 'http://videos.band.uol.com.br/programa.asp?e=noticias&pr=brasil-urgente&v=15951931&t=Policia-desmonte-base-do-PCC-na-Cracolandia', 'only_matching': True, }, { 'url': 'http://mais.uol.com.br/view/cphaa0gl2x8r/incendio-destroi-uma-das-maiores-casas-noturnas-de-londres-04024E9A3268D4C95326', 'only_matching': True, }, { 'url': 'http://noticias.uol.com.br//videos/assistir.htm?video=rafaela-silva-inspira-criancas-no-judo-04024D983968D4C95326', 'only_matching': True, }, { 'url': 'http://mais.uol.com.br/view/e0qbgxid79uv/15275470', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( # https://api.mais.uol.com.br/apiuol/v4/player/data/[MEDIA_ID] 'https://api.mais.uol.com.br/apiuol/v3/media/detail/' + video_id, video_id)['item'] media_id = str(video_data['mediaId']) title = video_data['title'] ver = video_data.get('revision', 2) uol_formats = self._download_json( f'https://croupier.mais.uol.com.br/v3/formats/{media_id}/jsonp', media_id) quality = qualities(['mobile', 'WEBM', '360p', '720p', '1080p']) formats = [] for format_id, f in uol_formats.items(): if not isinstance(f, dict): continue f_url = f.get('url') or f.get('secureUrl') if not f_url: continue query = { 'ver': ver, 'r': 'http://mais.uol.com.br', } for k in ('token', 'sign'): v = f.get(k) if v: query[k] = v f_url = update_url_query(f_url, query) if format_id == 'HLS': m3u8_formats = self._extract_m3u8_formats( f_url, media_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) encoded_query = urllib.parse.urlencode(query) for m3u8_f in m3u8_formats: m3u8_f['extra_param_to_segment_url'] = encoded_query m3u8_f['url'] = update_url_query(m3u8_f['url'], query) formats.extend(m3u8_formats) continue formats.append({ 'format_id': format_id, 'url': f_url, 'quality': quality(format_id), }) tags = [] for tag in video_data.get('tags', []): tag_description = tag.get('description') if not tag_description: continue tags.append(tag_description) thumbnails = [] for q in ('Small', 'Medium', 'Wmedium', 'Large', 'Wlarge', 'Xlarge'): q_url = video_data.get('thumb' + q) if not q_url: continue thumbnails.append({ 'id': q, 'url': q_url, }) return { 'id': media_id, 'title': title, 'description': clean_html(video_data.get('description')), 'thumbnails': thumbnails, 'duration': parse_duration(video_data.get('duration')), 'tags': tags, 'formats': formats, 'timestamp': parse_iso8601(video_data.get('publishDate'), ' '), 'view_count': int_or_none(video_data.get('viewsQtty')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/fancode.py
yt_dlp/extractor/fancode.py
from .common import InfoExtractor from ..utils import ExtractorError, mimetype2ext, parse_iso8601, try_get class FancodeVodIE(InfoExtractor): _WORKING = False IE_NAME = 'fancode:vod' _VALID_URL = r'https?://(?:www\.)?fancode\.com/video/(?P<id>[0-9]+)\b' _TESTS = [{ 'url': 'https://fancode.com/video/15043/match-preview-pbks-vs-mi', 'params': { 'skip_download': True, }, 'info_dict': { 'id': '6249806281001', 'ext': 'mp4', 'title': 'Match Preview: PBKS vs MI', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1619081590, 'view_count': int, 'like_count': int, 'upload_date': '20210422', 'uploader_id': '6008340455001', }, }, { 'url': 'https://fancode.com/video/15043', 'only_matching': True, }] _ACCESS_TOKEN = None _NETRC_MACHINE = 'fancode' _LOGIN_HINT = 'Use "--username refresh --password <refresh_token>" to login using a refresh token' headers = { 'content-type': 'application/json', 'origin': 'https://fancode.com', 'referer': 'https://fancode.com', } def _perform_login(self, username, password): # Access tokens are shortlived, so get them using the refresh token. if username != 'refresh': self.report_warning(f'Login using username and password is not currently supported. {self._LOGIN_HINT}') self.report_login() data = '''{ "query":"mutation RefreshToken($refreshToken: String\\u0021) { refreshToken(refreshToken: $refreshToken) { accessToken }}", "variables":{ "refreshToken":"%s" }, "operationName":"RefreshToken" }''' % password # noqa: UP031 token_json = self.download_gql('refresh token', data, 'Getting the Access token') self._ACCESS_TOKEN = try_get(token_json, lambda x: x['data']['refreshToken']['accessToken']) if self._ACCESS_TOKEN is None: self.report_warning('Failed to get Access token') else: self.headers.update({'Authorization': f'Bearer {self._ACCESS_TOKEN}'}) def _check_login_required(self, is_available, is_premium): msg = None if is_premium and self._ACCESS_TOKEN is None: msg = f'This video is only available for registered users. {self._LOGIN_HINT}' elif not is_available and self._ACCESS_TOKEN is not None: msg = 'This video isn\'t available to the current logged in account' if msg: self.raise_login_required(msg, metadata_available=True, method=None) def download_gql(self, variable, data, note, fatal=False, headers=headers): return self._download_json( 'https://www.fancode.com/graphql', variable, data=data.encode(), note=note, headers=headers, fatal=fatal) def _real_extract(self, url): BRIGHTCOVE_URL_TEMPLATE = 'https://players.brightcove.net/%s/default_default/index.html?videoId=%s' video_id = self._match_id(url) brightcove_user_id = '6008340455001' data = '''{ "query":"query Video($id: Int\\u0021, $filter: SegmentFilter) { media(id: $id, filter: $filter) { id contentId title contentId publishedTime totalViews totalUpvotes provider thumbnail { src } mediaSource {brightcove } duration isPremium isUserEntitled tags duration }}", "variables":{ "id":%s, "filter":{ "contentDataType":"DEFAULT" } }, "operationName":"Video" }''' % video_id # noqa: UP031 metadata_json = self.download_gql(video_id, data, note='Downloading metadata') media = try_get(metadata_json, lambda x: x['data']['media'], dict) or {} brightcove_video_id = try_get(media, lambda x: x['mediaSource']['brightcove'], str) if brightcove_video_id is None: raise ExtractorError('Unable to extract brightcove Video ID') is_premium = media.get('isPremium') self._check_login_required(media.get('isUserEntitled'), is_premium) return { '_type': 'url_transparent', 'url': BRIGHTCOVE_URL_TEMPLATE % (brightcove_user_id, brightcove_video_id), 'ie_key': 'BrightcoveNew', 'id': video_id, 'title': media['title'], 'like_count': media.get('totalUpvotes'), 'view_count': media.get('totalViews'), 'tags': media.get('tags'), 'release_timestamp': parse_iso8601(media.get('publishedTime')), 'availability': self._availability(needs_premium=is_premium), } class FancodeLiveIE(FancodeVodIE): # XXX: Do not subclass from concrete IE _WORKING = False IE_NAME = 'fancode:live' _VALID_URL = r'https?://(www\.)?fancode\.com/match/(?P<id>[0-9]+).+' _TESTS = [{ 'url': 'https://fancode.com/match/35328/cricket-fancode-ecs-hungary-2021-bub-vs-blb?slug=commentary', 'info_dict': { 'id': '35328', 'ext': 'mp4', 'title': 'BUB vs BLB', 'timestamp': 1624863600, 'is_live': True, 'upload_date': '20210628', }, 'skip': 'Ended', }, { 'url': 'https://fancode.com/match/35328/', 'only_matching': True, }, { 'url': 'https://fancode.com/match/35567?slug=scorecard', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) data = '''{ "query":"query MatchResponse($id: Int\\u0021, $isLoggedIn: Boolean\\u0021) { match: matchWithScores(id: $id) { id matchDesc mediaId videoStreamId videoStreamUrl { ...VideoSource } liveStreams { videoStreamId videoStreamUrl { ...VideoSource } contentId } name startTime streamingStatus isPremium isUserEntitled @include(if: $isLoggedIn) status metaTags bgImage { src } sport { name slug } tour { id name } squads { name shortName } liveStreams { contentId } mediaId }}fragment VideoSource on VideoSource { title description posterUrl url deliveryType playerType}", "variables":{ "id":%s, "isLoggedIn":true }, "operationName":"MatchResponse" }''' % video_id # noqa: UP031 info_json = self.download_gql(video_id, data, 'Info json') match_info = try_get(info_json, lambda x: x['data']['match']) if match_info.get('streamingStatus') != 'STARTED': raise ExtractorError('The stream can\'t be accessed', expected=True) self._check_login_required(match_info.get('isUserEntitled'), True) # all live streams are premium only return { 'id': video_id, 'title': match_info.get('name'), 'formats': self._extract_akamai_formats(try_get(match_info, lambda x: x['videoStreamUrl']['url']), video_id), 'ext': mimetype2ext(try_get(match_info, lambda x: x['videoStreamUrl']['deliveryType'])), 'is_live': True, 'release_timestamp': parse_iso8601(match_info.get('startTime')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/toutv.py
yt_dlp/extractor/toutv.py
import json from .radiocanada import RadioCanadaIE from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, int_or_none, merge_dicts, ) class TouTvIE(RadioCanadaIE): # XXX: Do not subclass from concrete IE _NETRC_MACHINE = 'toutv' IE_NAME = 'tou.tv' _VALID_URL = r'https?://ici\.tou\.tv/(?P<id>[a-zA-Z0-9_-]+(?:/S[0-9]+[EC][0-9]+)?)' _TESTS = [{ 'url': 'http://ici.tou.tv/garfield-tout-court/S2015E17', 'info_dict': { 'id': '122017', 'ext': 'mp4', 'title': 'Saison 2015 Épisode 17', 'description': 'La photo de famille 2', 'upload_date': '20100717', }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': '404 Not Found', }, { 'url': 'http://ici.tou.tv/hackers', 'only_matching': True, }, { 'url': 'https://ici.tou.tv/l-age-adulte/S01C501', 'only_matching': True, }] _CLIENT_KEY = '90505c8d-9c34-4f34-8da1-3a85bdc6d4f4' def _perform_login(self, username, password): try: self._access_token = self._download_json( 'https://services.radio-canada.ca/toutv/profiling/accounts/login', None, 'Logging in', data=json.dumps({ 'ClientId': self._CLIENT_KEY, 'ClientSecret': '34026772-244b-49b6-8b06-317b30ac9a20', 'Email': username, 'Password': password, 'Scope': 'id.write media-validation.read', }).encode(), headers={ 'Authorization': 'client-key ' + self._CLIENT_KEY, 'Content-Type': 'application/json;charset=utf-8', })['access_token'] except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 401: error = self._parse_json(e.cause.response.read().decode(), None)['Message'] raise ExtractorError(error, expected=True) raise self._claims = self._call_api('validation/v2/getClaims')['claims'] def _real_extract(self, url): path = self._match_id(url) metadata = self._download_json( f'https://services.radio-canada.ca/toutv/presentation/{path}', path, query={ 'client_key': self._CLIENT_KEY, 'device': 'web', 'version': 4, }) # IsDrm does not necessarily mean the video is DRM protected (see # https://github.com/ytdl-org/youtube-dl/issues/13994). if not self.get_param('allow_unplayable_formats') and metadata.get('IsDrm'): self.report_warning('This video is probably DRM protected.', path) video_id = metadata['IdMedia'] details = metadata['Details'] return merge_dicts({ 'id': video_id, 'title': details.get('OriginalTitle'), 'description': details.get('Description'), 'thumbnail': details.get('ImageUrl'), 'duration': int_or_none(details.get('LengthInSeconds')), 'series': metadata.get('ProgramTitle'), 'season_number': int_or_none(metadata.get('SeasonNumber')), 'season': metadata.get('SeasonTitle'), 'episode_number': int_or_none(metadata.get('EpisodeNumber')), 'episode': metadata.get('EpisodeTitle'), }, self._extract_info(metadata.get('AppCode', 'toutv'), video_id))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/youporn.py
yt_dlp/extractor/youporn.py
import itertools import re from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, extract_attributes, get_element_by_class, get_element_by_id, get_elements_html_by_class, int_or_none, merge_dicts, parse_count, parse_qs, traverse_obj, unified_strdate, url_or_none, urljoin, ) class YouPornIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?youporn\.com/(?:watch|embed)/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?/?(?:[#?]|$)' _EMBED_REGEX = [r'<iframe[^>]+\bsrc=["\'](?P<url>(?:https?:)?//(?:www\.)?youporn\.com/embed/\d+)'] _TESTS = [{ 'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/', 'md5': '3744d24c50438cf5b6f6d59feb5055c2', 'info_dict': { 'id': '505835', 'display_id': 'sex-ed-is-it-safe-to-masturbate-daily', 'ext': 'mp4', 'title': 'Sex Ed: Is It Safe To Masturbate Daily?', 'description': 'Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 210, 'uploader': 'Ask Dan And Jennifer', 'upload_date': '20101217', 'average_rating': int, 'view_count': int, 'categories': list, 'tags': list, 'age_limit': 18, }, 'skip': 'This video has been deactivated', }, { # Unknown uploader 'url': 'http://www.youporn.com/watch/561726/big-tits-awesome-brunette-on-amazing-webcam-show/?from=related3&al=2&from_id=561726&pos=4', 'info_dict': { 'id': '561726', 'display_id': 'big-tits-awesome-brunette-on-amazing-webcam-show', 'ext': 'mp4', 'title': 'Big Tits Awesome Brunette On amazing webcam show', 'description': 'http://sweetlivegirls.com Big Tits Awesome Brunette On amazing webcam show.mp4', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Unknown', 'upload_date': '20110418', 'average_rating': int, 'view_count': int, 'categories': list, 'tags': list, 'age_limit': 18, }, 'params': { 'skip_download': True, }, 'skip': '404', }, { 'url': 'https://www.youporn.com/embed/505835/sex-ed-is-it-safe-to-masturbate-daily/', 'only_matching': True, }, { 'url': 'http://www.youporn.com/watch/505835', 'only_matching': True, }, { 'url': 'https://www.youporn.com/watch/13922959/femdom-principal/', 'only_matching': True, }, { 'url': 'https://www.youporn.com/watch/16290308/tinderspecial-trailer1/', 'info_dict': { 'id': '16290308', 'age_limit': 18, 'categories': [], 'display_id': 'tinderspecial-trailer1', 'duration': 298.0, 'ext': 'mp4', 'upload_date': '20201123', 'uploader': 'Ersties', 'tags': [], 'thumbnail': r're:https://.+\.jpg', 'timestamp': 1606147564, 'title': 'Tinder In Real Life', 'view_count': int, }, }] def _real_extract(self, url): video_id, display_id = self._match_valid_url(url).group('id', 'display_id') self._set_cookie('.youporn.com', 'age_verified', '1') webpage = self._download_webpage(f'https://www.youporn.com/watch/{video_id}', video_id) watchable = self._search_regex( r'''(<div\s[^>]*\bid\s*=\s*('|")?watch-container(?(2)\2|(?!-)\b)[^>]*>)''', webpage, 'watchability', default=None) if not watchable: msg = re.split(r'\s{2}', clean_html(get_element_by_id('mainContent', webpage)) or '')[0] raise ExtractorError( f'{self.IE_NAME} says: {msg}' if msg else 'Video unavailable', expected=True) player_vars = self._search_json(r'\bplayervars\s*:', webpage, 'player vars', video_id) definitions = player_vars['mediaDefinitions'] def get_format_data(data, stream_type): info_url = traverse_obj(data, (lambda _, v: v['format'] == stream_type, 'videoUrl', {url_or_none}, any)) if not info_url: return [] return traverse_obj( self._download_json(info_url, video_id, f'Downloading {stream_type} info JSON', fatal=False), lambda _, v: v['format'] == stream_type and url_or_none(v['videoUrl'])) formats = [] # Try to extract only the actual master m3u8 first, avoiding the duplicate single resolution "master" m3u8s for hls_url in traverse_obj(get_format_data(definitions, 'hls'), ( lambda _, v: not isinstance(v['defaultQuality'], bool), 'videoUrl'), (..., 'videoUrl')): formats.extend(self._extract_m3u8_formats(hls_url, video_id, 'mp4', fatal=False, m3u8_id='hls')) for definition in get_format_data(definitions, 'mp4'): f = traverse_obj(definition, { 'url': 'videoUrl', 'filesize': ('videoSize', {int_or_none}), }) height = int_or_none(definition.get('quality')) # Video URL's path looks like this: # /201012/17/505835/720p_1500k_505835/YouPorn%20-%20Sex%20Ed%20Is%20It%20Safe%20To%20Masturbate%20Daily.mp4 # /201012/17/505835/vl_240p_240k_505835/YouPorn%20-%20Sex%20Ed%20Is%20It%20Safe%20To%20Masturbate%20Daily.mp4 # /videos/201703/11/109285532/1080P_4000K_109285532.mp4 # We will benefit from it by extracting some metadata mobj = re.search(r'(?P<height>\d{3,4})[pP]_(?P<bitrate>\d+)[kK]_\d+', definition['videoUrl']) if mobj: if not height: height = int(mobj.group('height')) bitrate = int(mobj.group('bitrate')) f.update({ 'format_id': f'{height}p-{bitrate}k', 'tbr': bitrate, }) f['height'] = height formats.append(f) title = self._html_search_regex( r'(?s)<div[^>]+class=["\']watchVideoTitle[^>]+>(.+?)</div>', webpage, 'title', default=None) or self._og_search_title( webpage, default=None) or self._html_search_meta( 'title', webpage, fatal=True) description = self._html_search_regex( r'(?s)<div[^>]+\bid=["\']description["\'][^>]*>(.+?)</div>', webpage, 'description', default=None) or self._og_search_description( webpage, default=None) thumbnail = self._search_regex( r'(?:imageurl\s*=|poster\s*:)\s*(["\'])(?P<thumbnail>.+?)\1', webpage, 'thumbnail', fatal=False, group='thumbnail') duration = traverse_obj(player_vars, ('duration', {int_or_none})) if duration is None: duration = int_or_none(self._html_search_meta( 'video:duration', webpage, 'duration', fatal=False)) uploader = self._html_search_regex( r'(?s)<div[^>]+class=["\']submitByLink["\'][^>]*>(.+?)</div>', webpage, 'uploader', fatal=False) upload_date = unified_strdate(self._html_search_regex( (r'UPLOADED:\s*<span>([^<]+)', r'Date\s+[Aa]dded:\s*<span>([^<]+)', r'''(?s)<div[^>]+class=["']videoInfo(?:Date|Time)\b[^>]*>(.+?)</div>''', r'(?s)<label\b[^>]*>Uploaded[^<]*</label>\s*<span\b[^>]*>(.+?)</span>'), webpage, 'upload date', fatal=False)) age_limit = self._rta_search(webpage) view_count = None views = self._search_regex( r'(<div [^>]*\bdata-value\s*=[^>]+>)\s*<label>Views:</label>', webpage, 'views', default=None) if views: view_count = parse_count(extract_attributes(views).get('data-value')) comment_count = parse_count(self._search_regex( r'>All [Cc]omments? \(([\d,.]+)\)', webpage, 'comment count', default=None)) def extract_tag_box(regex, title): tag_box = self._search_regex(regex, webpage, title, default=None) if not tag_box: return [] return re.findall(r'<a[^>]+href=[^>]+>([^<]+)', tag_box) categories = extract_tag_box( r'(?s)Categories:.*?</[^>]+>(.+?)</div>', 'categories') tags = extract_tag_box( r'(?s)Tags:.*?</div>\s*<div[^>]+class=["\']tagBoxContent["\'][^>]*>(.+?)</div>', 'tags') data = self._search_json_ld(webpage, video_id, expected_type='VideoObject', fatal=False) data.pop('url', None) result = merge_dicts(data, { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'uploader': uploader, 'upload_date': upload_date, 'view_count': view_count, 'comment_count': comment_count, 'categories': categories, 'tags': tags, 'age_limit': age_limit, 'formats': formats, }) # Remove SEO spam "description" description = result.get('description') if description and description.startswith(f'Watch {result.get("title")} online'): del result['description'] return result class YouPornListBaseIE(InfoExtractor): def _get_next_url(self, url, pl_id, html): return urljoin(url, self._search_regex( r'''<a [^>]*?\bhref\s*=\s*("|')(?P<url>(?:(?!\1)[^>])+)\1''', get_element_by_id('next', html) or '', 'next page', group='url', default=None)) @classmethod def _get_title_from_slug(cls, title_slug): return re.sub(r'[_-]', ' ', title_slug) def _entries(self, url, pl_id, html=None, page_num=None): start = page_num or 1 for page in itertools.count(start): if not html: html = self._download_webpage( url, pl_id, note=f'Downloading page {page}', fatal=page == start) if not html: return for element in get_elements_html_by_class('video-title', html): if video_url := traverse_obj(element, ({extract_attributes}, 'href', {urljoin(url)})): yield self.url_result(video_url) if page_num is not None: return next_url = self._get_next_url(url, pl_id, html) if not next_url or next_url == url: return url = next_url html = None def _real_extract(self, url, html=None): m_dict = self._match_valid_url(url).groupdict() pl_id, page_type, sort = (m_dict.get(k) for k in ('id', 'type', 'sort')) qs = {k: v[-1] for k, v in parse_qs(url).items() if v} base_id = pl_id or 'YouPorn' title = self._get_title_from_slug(base_id) if page_type: title = f'{page_type.capitalize()} {title}' base_id = [base_id.lower()] if sort is None: title += ' videos' else: title = f'{title} videos by {re.sub(r"[_-]", " ", sort)}' base_id.append(sort) if qs: filters = list(map('='.join, sorted(qs.items()))) title += f' ({",".join(filters)})' base_id.extend(filters) pl_id = '/'.join(base_id) return self.playlist_result( self._entries(url, pl_id, html=html, page_num=int_or_none(qs.get('page'))), playlist_id=pl_id, playlist_title=title) class YouPornCategoryIE(YouPornListBaseIE): IE_DESC = 'YouPorn category, with sorting, filtering and pagination' _VALID_URL = r'''(?x) https?://(?:www\.)?youporn\.com/ (?P<type>category)/(?P<id>[^/?#&]+) (?:/(?P<sort>popular|views|rating|time|duration))?/?(?:[#?]|$) ''' _TESTS = [{ 'note': 'Full list with pagination', 'url': 'https://www.youporn.com/category/popular-with-women/popular/', 'info_dict': { 'id': 'popular-with-women/popular', 'title': 'Category popular with women videos by popular', }, 'playlist_mincount': 39, }, { 'note': 'Filtered paginated list with single page result', 'url': 'https://www.youporn.com/category/popular-with-women/duration/?min_minutes=10', 'info_dict': { 'id': 'popular-with-women/duration/min_minutes=10', 'title': 'Category popular with women videos by duration (min_minutes=10)', }, 'playlist_mincount': 2, # 'playlist_maxcount': 30, }, { 'note': 'Single page of full list', 'url': 'https://www.youporn.com/category/popular-with-women/popular?page=1', 'info_dict': { 'id': 'popular-with-women/popular/page=1', 'title': 'Category popular with women videos by popular (page=1)', }, 'playlist_count': 36, }] class YouPornChannelIE(YouPornListBaseIE): IE_DESC = 'YouPorn channel, with sorting and pagination' _VALID_URL = r'''(?x) https?://(?:www\.)?youporn\.com/ (?P<type>channel)/(?P<id>[^/?#&]+) (?:/(?P<sort>rating|views|duration))?/?(?:[#?]|$) ''' _TESTS = [{ 'note': 'Full list with pagination', 'url': 'https://www.youporn.com/channel/x-feeds/', 'info_dict': { 'id': 'x-feeds', 'title': 'Channel X-Feeds videos', }, 'playlist_mincount': 37, }, { 'note': 'Single page of full list (no filters here)', 'url': 'https://www.youporn.com/channel/x-feeds/duration?page=1', 'info_dict': { 'id': 'x-feeds/duration/page=1', 'title': 'Channel X-Feeds videos by duration (page=1)', }, 'playlist_count': 24, }] @staticmethod def _get_title_from_slug(title_slug): return re.sub(r'_', ' ', title_slug).title() class YouPornCollectionIE(YouPornListBaseIE): IE_DESC = 'YouPorn collection (user playlist), with sorting and pagination' _VALID_URL = r'''(?x) https?://(?:www\.)?youporn\.com/ (?P<type>collection)s/videos/(?P<id>\d+) (?:/(?P<sort>rating|views|time|duration))?/?(?:[#?]|$) ''' _TESTS = [{ 'note': 'Full list with pagination', 'url': 'https://www.youporn.com/collections/videos/33044251/', 'info_dict': { 'id': '33044251', 'title': 'Collection Sexy Lips videos', 'uploader': 'ph-littlewillyb', }, 'playlist_mincount': 50, }, { 'note': 'Single page of full list (no filters here)', 'url': 'https://www.youporn.com/collections/videos/33044251/time?page=1', 'info_dict': { 'id': '33044251/time/page=1', 'title': 'Collection Sexy Lips videos by time (page=1)', 'uploader': 'ph-littlewillyb', }, 'playlist_count': 20, }] def _real_extract(self, url): pl_id = self._match_id(url) html = self._download_webpage(url, pl_id) playlist = super()._real_extract(url, html=html) infos = re.sub(r'\s+', ' ', clean_html(get_element_by_class( 'collection-infos', html)) or '') title, uploader = self._search_regex( r'^\s*Collection: (?P<title>.+?) \d+ VIDEOS \d+ VIEWS \d+ days LAST UPDATED From: (?P<uploader>[\w_-]+)', infos, 'title/uploader', group=('title', 'uploader'), default=(None, None)) if title: playlist.update({ 'title': playlist['title'].replace(playlist['id'].split('/')[0], title), 'uploader': uploader, }) return playlist class YouPornTagIE(YouPornListBaseIE): IE_DESC = 'YouPorn tag (porntags), with sorting, filtering and pagination' _VALID_URL = r'''(?x) https?://(?:www\.)?youporn\.com/ porn(?P<type>tag)s/(?P<id>[^/?#&]+) (?:/(?P<sort>views|rating|time|duration))?/?(?:[#?]|$) ''' _TESTS = [{ 'note': 'Full list with pagination', 'url': 'https://www.youporn.com/porntags/austrian', 'info_dict': { 'id': 'austrian', 'title': 'Tag austrian videos', }, 'playlist_mincount': 33, 'expected_warnings': ['YouPorn tag pages are not correctly cached'], }, { 'note': 'Filtered paginated list with single page result', 'url': 'https://www.youporn.com/porntags/austrian/duration/?min_minutes=10', 'info_dict': { 'id': 'austrian/duration/min_minutes=10', 'title': 'Tag austrian videos by duration (min_minutes=10)', }, 'playlist_mincount': 10, # number of videos per page is (row x col) 2x3 + 6x4 + 2, or + 3, # or more, varying with number of ads; let's set max as 9x4 # NB col 1 may not be shown in non-JS page with site CSS and zoom 100% # 'playlist_maxcount': 32, 'expected_warnings': ['YouPorn tag pages are not correctly cached'], }, { 'note': 'Single page of full list', 'url': 'https://www.youporn.com/porntags/austrian/?page=1', 'info_dict': { 'id': 'austrian/page=1', 'title': 'Tag austrian videos (page=1)', }, 'playlist_mincount': 32, # 'playlist_maxcount': 34, 'expected_warnings': ['YouPorn tag pages are not correctly cached'], }] def _real_extract(self, url): self.report_warning( 'YouPorn tag pages are not correctly cached and ' 'often return incorrect results', only_once=True) return super()._real_extract(url) class YouPornStarIE(YouPornListBaseIE): IE_DESC = 'YouPorn Pornstar, with description, sorting and pagination' _VALID_URL = r'''(?x) https?://(?:www\.)?youporn\.com/ (?P<type>pornstar)/(?P<id>[^/?#&]+) (?:/(?P<sort>rating|views|duration))?/?(?:[#?]|$) ''' _TESTS = [{ 'note': 'Full list with pagination', 'url': 'https://www.youporn.com/pornstar/daynia/', 'info_dict': { 'id': 'daynia', 'title': 'Pornstar Daynia videos', 'description': r're:Daynia Rank \d+ Videos \d+ Views [\d,.]+ .+ Subscribers \d+', }, 'playlist_mincount': 40, }, { 'note': 'Single page of full list (no filters here)', 'url': 'https://www.youporn.com/pornstar/daynia/?page=1', 'info_dict': { 'id': 'daynia/page=1', 'title': 'Pornstar Daynia videos (page=1)', 'description': 're:.{180,}', }, 'playlist_count': 26, }] @staticmethod def _get_title_from_slug(title_slug): return re.sub(r'_', ' ', title_slug).title() def _real_extract(self, url): pl_id = self._match_id(url) html = self._download_webpage(url, pl_id) playlist = super()._real_extract(url, html=html) INFO_ELEMENT_RE = r'''(?x) <div [^>]*\bclass\s*=\s*('|")(?:[\w$-]+\s+|\s)*?pornstar-info-wrapper(?:\s+[\w$-]+|\s)*\1[^>]*> (?P<info>[\s\S]+?)(?:</div>\s*){6,} ''' if infos := self._search_regex(INFO_ELEMENT_RE, html, 'infos', group='info', default=''): infos = re.sub( r'(?:\s*nl=nl)+\s*', ' ', re.sub(r'(?u)\s+', ' ', clean_html(re.sub('\n', 'nl=nl', infos)))).replace('ribe Subsc', '') return { **playlist, 'description': infos.strip() or None, } class YouPornVideosIE(YouPornListBaseIE): IE_DESC = 'YouPorn video (browse) playlists, with sorting, filtering and pagination' _VALID_URL = r'''(?x) https?://(?:www\.)?youporn\.com/ (?:(?P<id>browse)/)? (?P<sort>(?(id) (?:duration|rating|time|views)| (?:most_(?:favou?rit|view)ed|recommended|top_rated)?)) (?:[/#?]|$) ''' _TESTS = [{ 'note': 'Full list with pagination (too long for test)', 'url': 'https://www.youporn.com/', 'info_dict': { 'id': 'youporn', 'title': 'YouPorn videos', }, 'only_matching': True, }, { 'note': 'Full list with pagination (too long for test)', 'url': 'https://www.youporn.com/recommended', 'info_dict': { 'id': 'youporn/recommended', 'title': 'YouPorn videos by recommended', }, 'only_matching': True, }, { 'note': 'Full list with pagination (too long for test)', 'url': 'https://www.youporn.com/top_rated', 'info_dict': { 'id': 'youporn/top_rated', 'title': 'YouPorn videos by top rated', }, 'only_matching': True, }, { 'note': 'Full list with pagination (too long for test)', 'url': 'https://www.youporn.com/browse/time', 'info_dict': { 'id': 'browse/time', 'title': 'YouPorn videos by time', }, 'only_matching': True, }, { 'note': 'Filtered paginated list with single page result', 'url': 'https://www.youporn.com/most_favorited/?res=VR&max_minutes=2', 'info_dict': { 'id': 'youporn/most_favorited/max_minutes=2/res=VR', 'title': 'YouPorn videos by most favorited (max_minutes=2,res=VR)', }, 'playlist_mincount': 10, # 'playlist_maxcount': 28, }, { 'note': 'Filtered paginated list with several pages', 'url': 'https://www.youporn.com/most_favorited/?res=VR&max_minutes=5', 'info_dict': { 'id': 'youporn/most_favorited/max_minutes=5/res=VR', 'title': 'YouPorn videos by most favorited (max_minutes=5,res=VR)', }, 'playlist_mincount': 45, }, { 'note': 'Single page of full list', 'url': 'https://www.youporn.com/browse/time?page=1', 'info_dict': { 'id': 'browse/time/page=1', 'title': 'YouPorn videos by time (page=1)', }, 'playlist_count': 36, }] @staticmethod def _get_title_from_slug(title_slug): return 'YouPorn' if title_slug == 'browse' else title_slug
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/goodgame.py
yt_dlp/extractor/goodgame.py
from .common import InfoExtractor from ..utils import ( int_or_none, str_or_none, traverse_obj, url_or_none, ) class GoodGameIE(InfoExtractor): IE_NAME = 'goodgame:stream' _VALID_URL = r'https?://goodgame\.ru/(?!channel/)(?P<id>[\w.*-]+)' _TESTS = [{ 'url': 'https://goodgame.ru/TGW#autoplay', 'info_dict': { 'id': '7998', 'ext': 'mp4', 'channel_id': '7998', 'title': r're:шоуматч Happy \(NE\) vs Fortitude \(UD\), потом ладдер и дс \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'channel_url': 'https://goodgame.ru/TGW', 'thumbnail': 'https://hls.goodgame.ru/previews/7998_240.jpg', 'uploader': 'TGW', 'channel': 'JosephStalin', 'live_status': 'is_live', 'age_limit': 18, 'channel_follower_count': int, 'uploader_id': '2899', 'concurrent_view_count': int, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://goodgame.ru/Mr.Gray', 'only_matching': True, }, { 'url': 'https://goodgame.ru/HeDoPa3yMeHue*', 'only_matching': True, }] def _real_extract(self, url): channel_name = self._match_id(url) response = self._download_json(f'https://goodgame.ru/api/4/users/{channel_name}/stream', channel_name) player_id = response['streamkey'] formats, subtitles = [], {} if response.get('status'): formats, subtitles = self._extract_m3u8_formats_and_subtitles( f'https://hls.goodgame.ru/manifest/{player_id}_master.m3u8', channel_name, 'mp4', live=True) else: self.raise_no_formats('User is offline', expected=True, video_id=channel_name) return { 'id': player_id, 'formats': formats, 'subtitles': subtitles, 'is_live': bool(formats), **traverse_obj(response, { 'title': ('title', {str}), 'channel': ('channelkey', {str}), 'channel_id': ('id', {str_or_none}), 'channel_url': ('link', {url_or_none}), 'uploader': ('streamer', 'username', {str}), 'uploader_id': ('streamer', 'id', {str_or_none}), 'thumbnail': ('preview', {url_or_none}, {self._proto_relative_url}), 'concurrent_view_count': ('viewers', {int_or_none}), 'channel_follower_count': ('followers', {int_or_none}), 'age_limit': ('adult', {bool}, {lambda x: 18 if x else None}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/zoom.py
yt_dlp/extractor/zoom.py
from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, js_to_json, parse_filesize, parse_qs, parse_resolution, str_or_none, update_url_query, url_basename, urlencode_postdata, urljoin, ) from ..utils.traversal import traverse_obj class ZoomIE(InfoExtractor): IE_NAME = 'zoom' _VALID_URL = r'(?P<base_url>https?://(?:[^.]+\.)?zoom\.us/)rec(?:ording)?/(?P<type>play|share)/(?P<id>[\w.-]+)' _TESTS = [{ 'url': 'https://economist.zoom.us/rec/play/dUk_CNBETmZ5VA2BwEl-jjakPpJ3M1pcfVYAPRsoIbEByGsLjUZtaa4yCATQuOL3der8BlTwxQePl_j0.EImBkXzTIaPvdZO5', 'md5': 'ab445e8c911fddc4f9adc842c2c5d434', 'info_dict': { 'id': 'dUk_CNBETmZ5VA2BwEl-jjakPpJ3M1pcfVYAPRsoIbEByGsLjUZtaa4yCATQuOL3der8BlTwxQePl_j0.EImBkXzTIaPvdZO5', 'ext': 'mp4', 'title': 'China\'s "two sessions" and the new five-year plan', }, 'skip': 'Recording requires email authentication to access', }, { # play URL 'url': 'https://ffgolf.zoom.us/rec/play/qhEhXbrxq1Zoucx8CMtHzq1Z_2YZRPVCqWK_K-2FkEGRsSLDeOX8Tu4P6jtjZcRry8QhIbvKZdtr4UNo.QcPn2debFskI9whJ', 'md5': '2c4b1c4e5213ebf9db293e88d9385bee', 'info_dict': { 'id': 'qhEhXbrxq1Zoucx8CMtHzq1Z_2YZRPVCqWK_K-2FkEGRsSLDeOX8Tu4P6jtjZcRry8QhIbvKZdtr4UNo.QcPn2debFskI9whJ', 'ext': 'mp4', 'title': 'Prépa AF2023 - Séance 5 du 11 avril - R20/VM/GO', }, }, { # share URL 'url': 'https://us02web.zoom.us/rec/share/hkUk5Zxcga0nkyNGhVCRfzkA2gX_mzgS3LpTxEEWJz9Y_QpIQ4mZFOUx7KZRZDQA.9LGQBdqmDAYgiZ_8', 'md5': '90fdc7cfcaee5d52d1c817fc03c43c9b', 'info_dict': { 'id': 'hkUk5Zxcga0nkyNGhVCRfzkA2gX_mzgS3LpTxEEWJz9Y_QpIQ4mZFOUx7KZRZDQA.9LGQBdqmDAYgiZ_8', 'ext': 'mp4', 'title': 'Timea Andrea Lelik\'s Personal Meeting Room', }, 'skip': 'This recording has expired', }, { # view_with_share URL 'url': 'https://cityofdetroit.zoom.us/rec/share/VjE-5kW3xmgbEYqR5KzRgZ1OFZvtMtiXk5HyRJo5kK4m5PYE6RF4rF_oiiO_9qaM.UTAg1MI7JSnF3ZjX', 'md5': 'bdc7867a5934c151957fb81321b3c024', 'info_dict': { 'id': 'VjE-5kW3xmgbEYqR5KzRgZ1OFZvtMtiXk5HyRJo5kK4m5PYE6RF4rF_oiiO_9qaM.UTAg1MI7JSnF3ZjX', 'ext': 'mp4', 'title': 'February 2022 Detroit Revenue Estimating Conference', 'duration': 7299, 'formats': 'mincount:3', }, }] def _get_page_data(self, webpage, video_id): return self._search_json( r'window\.__data__\s*=', webpage, 'data', video_id, transform_source=js_to_json) def _get_real_webpage(self, url, base_url, video_id, url_type): webpage = self._download_webpage(url, video_id, note=f'Downloading {url_type} webpage') try: form = self._form_hidden_inputs('password_form', webpage) except ExtractorError: return webpage password = self.get_param('videopassword') if not password: raise ExtractorError( 'This video is protected by a passcode, use the --video-password option', expected=True) is_meeting = form.get('useWhichPasswd') == 'meeting' validation = self._download_json( base_url + 'rec/validate%s_passwd' % ('_meet' if is_meeting else ''), video_id, 'Validating passcode', 'Wrong passcode', data=urlencode_postdata({ 'id': form[('meet' if is_meeting else 'file') + 'Id'], 'passwd': password, 'action': form.get('action'), })) if not validation.get('status'): raise ExtractorError(validation['errorMessage'], expected=True) return self._download_webpage(url, video_id, note=f'Re-downloading {url_type} webpage') def _real_extract(self, url): base_url, url_type, video_id = self._match_valid_url(url).group('base_url', 'type', 'id') query = {} start_params = traverse_obj(url, {'startTime': ({parse_qs}, 'startTime', -1)}) if url_type == 'share': webpage = self._get_real_webpage(url, base_url, video_id, 'share') meeting_id = self._get_page_data(webpage, video_id)['meetingId'] redirect_path = self._download_json( f'{base_url}nws/recording/1.0/play/share-info/{meeting_id}', video_id, note='Downloading share info JSON')['result']['redirectUrl'] url = update_url_query(urljoin(base_url, redirect_path), start_params) query['continueMode'] = 'true' webpage = self._get_real_webpage(url, base_url, video_id, 'play') file_id = self._get_page_data(webpage, video_id)['fileId'] if not file_id: # When things go wrong, file_id can be empty string raise ExtractorError('Unable to extract file ID') query.update(start_params) data = self._download_json( f'{base_url}nws/recording/1.0/play/info/{file_id}', video_id, query=query, note='Downloading play info JSON')['result'] subtitles = {} for _type in ('transcript', 'cc', 'chapter'): if data.get(f'{_type}Url'): subtitles[_type] = [{ 'url': urljoin(base_url, data[f'{_type}Url']), 'ext': 'vtt', }] formats = [] if data.get('viewMp4Url'): formats.append({ 'format_note': 'Camera stream', 'url': data['viewMp4Url'], 'width': int_or_none(traverse_obj(data, ('viewResolvtions', 0))), 'height': int_or_none(traverse_obj(data, ('viewResolvtions', 1))), 'format_id': 'view', 'ext': 'mp4', 'filesize_approx': parse_filesize(str_or_none(traverse_obj(data, ('recording', 'fileSizeInMB')))), 'preference': 0, }) if data.get('shareMp4Url'): formats.append({ 'format_note': 'Screen share stream', 'url': data['shareMp4Url'], 'width': int_or_none(traverse_obj(data, ('shareResolvtions', 0))), 'height': int_or_none(traverse_obj(data, ('shareResolvtions', 1))), 'format_id': 'share', 'ext': 'mp4', 'preference': -1, }) view_with_share_url = data.get('viewMp4WithshareUrl') if view_with_share_url: formats.append({ **parse_resolution(self._search_regex( r'_(\d+x\d+)\.mp4', url_basename(view_with_share_url), 'resolution', default=None)), 'format_note': 'Screen share with camera', 'url': view_with_share_url, 'format_id': 'view_with_share', 'ext': 'mp4', 'preference': 1, }) return { 'id': video_id, 'title': str_or_none(traverse_obj(data, ('meet', 'topic'))), 'duration': int_or_none(data.get('duration')), 'subtitles': subtitles, 'formats': formats, 'http_headers': { 'Referer': base_url, }, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ertgr.py
yt_dlp/extractor/ertgr.py
import json import re from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, determine_ext, dict_get, int_or_none, merge_dicts, parse_age_limit, parse_iso8601, parse_qs, str_or_none, try_get, url_or_none, variadic, ) from ..utils.traversal import traverse_obj class ERTFlixBaseIE(InfoExtractor): def _call_api( self, video_id, method='Player/AcquireContent', api_version=1, param_headers=None, data=None, headers=None, **params): platform_codename = {'platformCodename': 'www'} headers_as_param = {'X-Api-Date-Format': 'iso', 'X-Api-Camel-Case': False} headers_as_param.update(param_headers or {}) headers = headers or {} if data: headers['Content-Type'] = headers_as_param['Content-Type'] = 'application/json;charset=utf-8' data = json.dumps(merge_dicts(platform_codename, data)).encode() query = merge_dicts( {} if data else platform_codename, {'$headers': json.dumps(headers_as_param)}, params) response = self._download_json( f'https://api.app.ertflix.gr/v{api_version!s}/{method}', video_id, fatal=False, query=query, data=data, headers=headers) if try_get(response, lambda x: x['Result']['Success']) is True: return response def _call_api_get_tiles(self, video_id, *tile_ids): requested_tile_ids = [video_id, *tile_ids] requested_tiles = [{'Id': tile_id} for tile_id in requested_tile_ids] tiles_response = self._call_api( video_id, method='Tile/GetTiles', api_version=2, data={'RequestedTiles': requested_tiles}) tiles = try_get(tiles_response, lambda x: x['Tiles'], list) or [] if tile_ids: if sorted([tile['Id'] for tile in tiles]) != sorted(requested_tile_ids): raise ExtractorError('Requested tiles not found', video_id=video_id) return tiles try: return next(tile for tile in tiles if tile['Id'] == video_id) except StopIteration: raise ExtractorError('No matching tile found', video_id=video_id) class ERTFlixCodenameIE(ERTFlixBaseIE): IE_NAME = 'ertflix:codename' IE_DESC = 'ERTFLIX videos by codename' _VALID_URL = r'ertflix:(?P<id>[\w-]+)' _TESTS = [{ 'url': 'ertflix:monogramma-praxitelis-tzanoylinos', 'info_dict': { 'id': 'monogramma-praxitelis-tzanoylinos', 'ext': 'mp4', 'title': 'monogramma-praxitelis-tzanoylinos', }, }] def _extract_formats_and_subs(self, video_id): media_info = self._call_api(video_id, codename=video_id) formats, subtitles = [], {} for media in traverse_obj(media_info, ( 'MediaFiles', lambda _, v: v['RoleCodename'] == 'main', 'Formats', lambda _, v: url_or_none(v['Url']))): fmt_url = media['Url'] ext = determine_ext(fmt_url) if ext == 'm3u8': fmts, subs = self._extract_m3u8_formats_and_subtitles( fmt_url, video_id, m3u8_id='hls', ext='mp4', fatal=False) elif ext == 'mpd': fmts, subs = self._extract_mpd_formats_and_subtitles( fmt_url, video_id, mpd_id='dash', fatal=False) else: formats.append({ 'url': fmt_url, 'format_id': str_or_none(media.get('Id')), }) continue formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return formats, subtitles def _real_extract(self, url): video_id = self._match_id(url) formats, subs = self._extract_formats_and_subs(video_id) if formats: return { 'id': video_id, 'formats': formats, 'subtitles': subs, 'title': self._generic_title(url), } class ERTFlixIE(ERTFlixBaseIE): IE_NAME = 'ertflix' IE_DESC = 'ERTFLIX videos' _VALID_URL = r'https?://www\.ertflix\.gr/(?:[^/]+/)?(?:series|vod)/(?P<id>[a-z]{3}\.\d+)' _TESTS = [{ 'url': 'https://www.ertflix.gr/vod/vod.173258-aoratoi-ergates', 'md5': '6479d5e60fd7e520b07ba5411dcdd6e7', 'info_dict': { 'id': 'aoratoi-ergates', 'ext': 'mp4', 'title': 'md5:c1433d598fbba0211b0069021517f8b4', 'description': 'md5:01a64d113c31957eb7eb07719ab18ff4', 'thumbnail': r're:https?://.+\.jpg', 'episode_id': 'vod.173258', 'timestamp': 1639648800, 'upload_date': '20211216', 'duration': 3166, 'age_limit': 8, }, 'skip': 'Invalid URL', }, { 'url': 'https://www.ertflix.gr/series/ser.3448-monogramma', 'info_dict': { 'id': 'ser.3448', 'age_limit': 8, 'title': 'Monogramma', 'description': 'md5:e30cc640e6463da87f210a8ed10b2439', }, 'playlist_mincount': 64, }, { 'url': 'https://www.ertflix.gr/series/ser.3448-monogramma?season=1', 'info_dict': { 'id': 'ser.3448', 'age_limit': 8, 'title': 'Monogramma', 'description': 'md5:e30cc640e6463da87f210a8ed10b2439', }, 'playlist_mincount': 66, }, { 'url': 'https://www.ertflix.gr/series/ser.3448-monogramma?season=1&season=2021%20-%202022', 'info_dict': { 'id': 'ser.3448', 'age_limit': 8, 'title': 'Monogramma', 'description': 'md5:e30cc640e6463da87f210a8ed10b2439', }, 'playlist_mincount': 25, }, { 'url': 'https://www.ertflix.gr/series/ser.164991-to-diktuo-1?season=1-9', 'info_dict': { 'id': 'ser.164991', 'age_limit': 8, 'title': 'The Network', 'description': 'The first Greek show featuring topics exclusively around the internet.', }, 'playlist_mincount': 0, }, { 'url': 'https://www.ertflix.gr/en/vod/vod.127652-ta-kalytera-mas-chronia-ep1-mia-volta-sto-feggari', 'only_matching': True, }] def _extract_episode(self, episode): codename = try_get(episode, lambda x: x['Codename'], str) title = episode.get('Title') description = clean_html(dict_get(episode, ('ShortDescription', 'TinyDescription'))) if not codename or not title or not episode.get('HasPlayableStream', True): return thumbnail = next(( url_or_none(thumb.get('Url')) for thumb in variadic(dict_get(episode, ('Images', 'Image')) or {}) if thumb.get('IsMain')), None) return { '_type': 'url_transparent', 'thumbnail': thumbnail, 'id': codename, 'episode_id': episode.get('Id'), 'title': title, 'alt_title': episode.get('Subtitle'), 'description': description, 'timestamp': parse_iso8601(episode.get('PublishDate')), 'duration': episode.get('DurationSeconds'), 'age_limit': self._parse_age_rating(episode), 'url': f'ertflix:{codename}', } @staticmethod def _parse_age_rating(info_dict): return parse_age_limit( info_dict.get('AgeRating') or (info_dict.get('IsAdultContent') and 18) or (info_dict.get('IsKidsContent') and 0)) def _extract_series(self, video_id, season_titles=None, season_numbers=None): media_info = self._call_api(video_id, method='Tile/GetSeriesDetails', id=video_id) series = try_get(media_info, lambda x: x['Series'], dict) or {} series_info = { 'age_limit': self._parse_age_rating(series), 'title': series.get('Title'), 'description': dict_get(series, ('ShortDescription', 'TinyDescription')), } if season_numbers: season_titles = season_titles or [] for season in try_get(series, lambda x: x['Seasons'], list) or []: if season.get('SeasonNumber') in season_numbers and season.get('Title'): season_titles.append(season['Title']) def gen_episode(m_info, season_titles): for episode_group in try_get(m_info, lambda x: x['EpisodeGroups'], list) or []: if season_titles and episode_group.get('Title') not in season_titles: continue episodes = try_get(episode_group, lambda x: x['Episodes'], list) if not episodes: continue season_info = { 'season': episode_group.get('Title'), 'season_number': int_or_none(episode_group.get('SeasonNumber')), } try: episodes = [(int(ep['EpisodeNumber']), ep) for ep in episodes] episodes.sort() except (KeyError, ValueError): episodes = enumerate(episodes, 1) for n, episode in episodes: info = self._extract_episode(episode) if info is None: continue info['episode_number'] = n info.update(season_info) yield info return self.playlist_result( gen_episode(media_info, season_titles), playlist_id=video_id, **series_info) def _real_extract(self, url): video_id = self._match_id(url) if video_id.startswith('ser.'): param_season = parse_qs(url).get('season', [None]) param_season = [ (have_number, int_or_none(v) if have_number else str_or_none(v)) for have_number, v in [(int_or_none(ps) is not None, ps) for ps in param_season] if v is not None ] season_kwargs = { k: [v for is_num, v in param_season if is_num is c] or None for k, c in [('season_titles', False), ('season_numbers', True)] } return self._extract_series(video_id, **season_kwargs) return self._extract_episode(self._call_api_get_tiles(video_id)) class ERTWebtvEmbedIE(InfoExtractor): IE_NAME = 'ertwebtv:embed' IE_DESC = 'ert.gr webtv embedded videos' _BASE_PLAYER_URL_RE = re.escape('//www.ert.gr/webtv/live-uni/vod/dt-uni-vod.php') _VALID_URL = rf'https?:{_BASE_PLAYER_URL_RE}\?([^#]+&)?f=(?P<id>[^#&]+)' _EMBED_REGEX = [rf'<iframe[^>]+?src=(?P<_q1>["\'])(?P<url>(?:https?:)?{_BASE_PLAYER_URL_RE}\?(?:(?!(?P=_q1)).)+)(?P=_q1)'] _TESTS = [{ 'url': 'https://www.ert.gr/webtv/live-uni/vod/dt-uni-vod.php?f=trailers/E2251_TO_DIKTYO_E09_16-01_1900.mp4&bgimg=/photos/2022/1/to_diktio_ep09_i_istoria_tou_diadiktiou_stin_Ellada_1021x576.jpg', 'md5': 'f9e9900c25c26f4ecfbddbb4b6305854', 'info_dict': { 'id': 'trailers/E2251_TO_DIKTYO_E09_16-01_1900.mp4', 'title': 'md5:914f06a73cd8b62fbcd6fb90c636e497', 'ext': 'mp4', 'thumbnail': 'https://program.ert.gr/photos/2022/1/to_diktio_ep09_i_istoria_tou_diadiktiou_stin_Ellada_1021x576.jpg', }, 'skip': 'Invalid URL', }] _WEBPAGE_TESTS = [{ 'url': 'https://www.ertnews.gr/video/manolis-goyalles-o-anthropos-piso-apo-ti-diadiktyaki-vasilopita/', 'info_dict': { 'id': '2022/tv/news-themata-ianouarios/20220114-apotis6-gouales-pita.mp4', 'ext': 'mp4', 'title': 'VOD - 2022/tv/news-themata-ianouarios/20220114-apotis6-gouales-pita.mp4', 'thumbnail': r're:https?://www\.ert\.gr/themata/photos/.+\.jpg', }, }] def _real_extract(self, url): video_id = self._match_id(url) formats, subs = self._extract_m3u8_formats_and_subtitles( f'https://mediastream.ert.gr/vodedge/_definst_/mp4:dvrorigin/{video_id}/playlist.m3u8', video_id, 'mp4') thumbnail_id = parse_qs(url).get('bgimg', [None])[0] if thumbnail_id and not thumbnail_id.startswith('http'): thumbnail_id = f'https://program.ert.gr{thumbnail_id}' return { 'id': video_id, 'title': f'VOD - {video_id}', 'thumbnail': thumbnail_id, 'formats': formats, 'subtitles': subs, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/threespeak.py
yt_dlp/extractor/threespeak.py
import re from .common import InfoExtractor from ..utils import ( try_get, unified_strdate, ) class ThreeSpeakIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?3speak\.tv/watch\?v\=[^/]+/(?P<id>[^/$&#?]+)' _TESTS = [{ 'url': 'https://3speak.tv/watch?v=dannyshine/wjgoxyfy', 'info_dict': { 'id': 'wjgoxyfy', 'ext': 'mp4', 'title': 'Can People who took the Vax think Critically', 'uploader': 'dannyshine', 'description': 'md5:181aa7ccb304afafa089b5af3bca7a10', 'tags': ['sex', 'covid', 'antinatalism', 'comedy', 'vaccines'], 'thumbnail': 'https://img.3speakcontent.co/wjgoxyfy/thumbnails/default.png', 'upload_date': '20211021', 'duration': 2703.867833, 'filesize': 1620054781, }, 'params': {'skip_download': True}, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) json_str = self._html_search_regex(r'JSON\.parse\(\'([^\']+)\'\)', webpage, 'json') # The json string itself is escaped. Hence the double parsing data_json = self._parse_json(self._parse_json(f'"{json_str}"', video_id), video_id) video_json = self._parse_json(data_json['json_metadata'], video_id) formats, subtitles = [], {} og_m3u8 = self._html_search_regex(r'<meta\s?property=\"ogvideo\"\s?content=\"([^\"]+)\">', webpage, 'og m3u8', fatal=False) if og_m3u8: https_frmts, https_subs = self._extract_m3u8_formats_and_subtitles(og_m3u8, video_id, fatal=False, m3u8_id='https') formats.extend(https_frmts) subtitles = self._merge_subtitles(subtitles, https_subs) ipfs_m3u8 = try_get(video_json, lambda x: x['video']['info']['ipfs']) if ipfs_m3u8: ipfs_frmts, ipfs_subs = self._extract_m3u8_formats_and_subtitles( f'https://ipfs.3speak.tv/ipfs/{ipfs_m3u8}', video_id, fatal=False, m3u8_id='ipfs') formats.extend(ipfs_frmts) subtitles = self._merge_subtitles(subtitles, ipfs_subs) mp4_file = try_get(video_json, lambda x: x['video']['info']['file']) if mp4_file: formats.append({ 'url': f'https://threespeakvideo.b-cdn.net/{video_id}/{mp4_file}', 'ext': 'mp4', 'format_id': 'https-mp4', 'duration': try_get(video_json, lambda x: x['video']['info']['duration']), 'filesize': try_get(video_json, lambda x: x['video']['info']['filesize']), 'quality': 11, 'format_note': 'Original file', }) return { 'id': video_id, 'title': data_json.get('title') or data_json.get('root_title'), 'uploader': data_json.get('author'), 'description': try_get(video_json, lambda x: x['video']['content']['description']), 'tags': try_get(video_json, lambda x: x['video']['content']['tags']), 'thumbnail': try_get(video_json, lambda x: x['image'][0]), 'upload_date': unified_strdate(data_json.get('created')), 'formats': formats, 'subtitles': subtitles, } class ThreeSpeakUserIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?3speak\.tv/user/(?P<id>[^/$&?#]+)' _TESTS = [{ 'url': 'https://3speak.tv/user/theycallmedan', 'info_dict': { 'id': 'theycallmedan', }, 'playlist_mincount': 115, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) entries = [ self.url_result( f'https://3speak.tv/watch?v={video}', ie=ThreeSpeakIE.ie_key()) for video in re.findall(r'data-payout\s?\=\s?\"([^\"]+)\"', webpage) if video ] return self.playlist_result(entries, playlist_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/audiomack.py
yt_dlp/extractor/audiomack.py
import itertools import time from .common import InfoExtractor from .soundcloud import SoundcloudIE from ..utils import ( ExtractorError, url_basename, ) class AudiomackIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?audiomack\.com/(?:song/|(?=.+/song/))(?P<id>[\w/-]+)' IE_NAME = 'audiomack' _TESTS = [ # hosted on audiomack { 'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary', 'info_dict': { 'id': '310086', 'ext': 'mp3', 'uploader': 'Roosh Williams', 'title': 'Extraordinary', }, }, # audiomack wrapper around soundcloud song # Needs new test URL. { 'add_ie': ['Soundcloud'], 'url': 'http://www.audiomack.com/song/hip-hop-daily/black-mamba-freestyle', 'info_dict': { 'id': '258901379', 'ext': 'mp3', 'description': 'mamba day freestyle for the legend Kobe Bryant ', 'title': 'Black Mamba Freestyle [Prod. By Danny Wolf]', 'uploader': 'ILOVEMAKONNEN', 'upload_date': '20160414', }, 'skip': 'Song has been removed from the site', }, ] def _real_extract(self, url): # URLs end with [uploader name]/song/[uploader title] # this title is whatever the user types in, and is rarely # the proper song title. Real metadata is in the api response album_url_tag = self._match_id(url).replace('/song/', '/') # Request the extended version of the api for extra fields like artist and title api_response = self._download_json( 'http://www.audiomack.com/api/music/url/song/%s?extended=1&_=%d' % ( album_url_tag, time.time()), album_url_tag) # API is inconsistent with errors if 'url' not in api_response or not api_response['url'] or 'error' in api_response: raise ExtractorError(f'Invalid url {url}') # Audiomack wraps a lot of soundcloud tracks in their branded wrapper # if so, pass the work off to the soundcloud extractor if SoundcloudIE.suitable(api_response['url']): return self.url_result(api_response['url'], SoundcloudIE.ie_key()) return { 'id': str(api_response.get('id', album_url_tag)), 'uploader': api_response.get('artist'), 'title': api_response.get('title'), 'url': api_response['url'], } class AudiomackAlbumIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?audiomack\.com/(?:album/|(?=.+/album/))(?P<id>[\w/-]+)' IE_NAME = 'audiomack:album' _TESTS = [ # Standard album playlist { 'url': 'http://www.audiomack.com/album/flytunezcom/tha-tour-part-2-mixtape', 'playlist_count': 11, 'info_dict': { 'id': '812251', 'title': 'Tha Tour: Part 2 (Official Mixtape)', }, }, # Album playlist ripped from fakeshoredrive with no metadata { 'url': 'http://www.audiomack.com/album/fakeshoredrive/ppp-pistol-p-project', 'info_dict': { 'title': 'PPP (Pistol P Project)', 'id': '837572', }, 'playlist': [{ 'info_dict': { 'title': 'PPP (Pistol P Project) - 8. Real (prod by SYK SENSE )', 'id': '837576', 'ext': 'mp3', 'uploader': 'Lil Herb a.k.a. G Herbo', }, }, { 'info_dict': { 'title': 'PPP (Pistol P Project) - 10. 4 Minutes Of Hell Part 4 (prod by DY OF 808 MAFIA)', 'id': '837580', 'ext': 'mp3', 'uploader': 'Lil Herb a.k.a. G Herbo', }, }], }, ] def _real_extract(self, url): # URLs end with [uploader name]/album/[uploader title] # this title is whatever the user types in, and is rarely # the proper song title. Real metadata is in the api response album_url_tag = self._match_id(url).replace('/album/', '/') result = {'_type': 'playlist', 'entries': []} # There is no one endpoint for album metadata - instead it is included/repeated in each song's metadata # Therefore we don't know how many songs the album has and must infi-loop until failure for track_no in itertools.count(): # Get song's metadata api_response = self._download_json( 'http://www.audiomack.com/api/music/url/album/%s/%d?extended=1&_=%d' % (album_url_tag, track_no, time.time()), album_url_tag, note=f'Querying song information ({track_no + 1})') # Total failure, only occurs when url is totally wrong # Won't happen in middle of valid playlist (next case) if 'url' not in api_response or 'error' in api_response: raise ExtractorError(f'Invalid url for track {track_no} of album url {url}') # URL is good but song id doesn't exist - usually means end of playlist elif not api_response['url']: break else: # Pull out the album metadata and add to result (if it exists) for resultkey, apikey in [('id', 'album_id'), ('title', 'album_title')]: if apikey in api_response and resultkey not in result: result[resultkey] = str(api_response[apikey]) song_id = url_basename(api_response['url']).rpartition('.')[0] result['entries'].append({ 'id': str(api_response.get('id', song_id)), 'uploader': api_response.get('artist'), 'title': api_response.get('title', song_id), 'url': api_response['url'], }) return result
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/chzzk.py
yt_dlp/extractor/chzzk.py
from .common import InfoExtractor from ..utils import ( UserNotLive, float_or_none, int_or_none, parse_iso8601, url_or_none, ) from ..utils.traversal import traverse_obj class CHZZKLiveIE(InfoExtractor): IE_NAME = 'chzzk:live' _VALID_URL = r'https?://chzzk\.naver\.com/live/(?P<id>[\da-f]+)' _TESTS = [{ 'url': 'https://chzzk.naver.com/live/c68b8ef525fb3d2fa146344d84991753', 'info_dict': { 'id': 'c68b8ef525fb3d2fa146344d84991753', 'ext': 'mp4', 'title': str, 'channel': '진짜도현', 'channel_id': 'c68b8ef525fb3d2fa146344d84991753', 'channel_is_verified': False, 'thumbnail': r're:https?://.+/.+\.jpg', 'timestamp': 1705510344, 'upload_date': '20240117', 'live_status': 'is_live', 'view_count': int, 'concurrent_view_count': int, }, 'skip': 'The channel is not currently live', }] def _real_extract(self, url): channel_id = self._match_id(url) live_detail = self._download_json( f'https://api.chzzk.naver.com/service/v3/channels/{channel_id}/live-detail', channel_id, note='Downloading channel info', errnote='Unable to download channel info')['content'] if live_detail.get('status') == 'CLOSE': raise UserNotLive(video_id=channel_id) live_playback = self._parse_json(live_detail['livePlaybackJson'], channel_id) thumbnails = [] thumbnail_template = traverse_obj( live_playback, ('thumbnail', 'snapshotThumbnailTemplate', {url_or_none})) if thumbnail_template and '{type}' in thumbnail_template: for width in traverse_obj(live_playback, ('thumbnail', 'types', ..., {str})): thumbnails.append({ 'id': width, 'url': thumbnail_template.replace('{type}', width), 'width': int_or_none(width), }) formats, subtitles = [], {} for media in traverse_obj(live_playback, ('media', lambda _, v: url_or_none(v['path']))): is_low_latency = media.get('mediaId') == 'LLHLS' fmts, subs = self._extract_m3u8_formats_and_subtitles( media['path'], channel_id, 'mp4', fatal=False, live=True, m3u8_id='hls-ll' if is_low_latency else 'hls') for f in fmts: if is_low_latency: f['source_preference'] = -2 if '-afragalow.stream-audio.stream' in f['format_id']: f['quality'] = -2 formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return { 'id': channel_id, 'is_live': True, 'formats': formats, 'subtitles': subtitles, 'thumbnails': thumbnails, **traverse_obj(live_detail, { 'title': ('liveTitle', {str}), 'timestamp': ('openDate', {parse_iso8601(delimiter=' ')}), 'concurrent_view_count': ('concurrentUserCount', {int_or_none}), 'view_count': ('accumulateCount', {int_or_none}), 'channel': ('channel', 'channelName', {str}), 'channel_id': ('channel', 'channelId', {str}), 'channel_is_verified': ('channel', 'verifiedMark', {bool}), }), } class CHZZKVideoIE(InfoExtractor): IE_NAME = 'chzzk:video' _VALID_URL = r'https?://chzzk\.naver\.com/video/(?P<id>\d+)' _TESTS = [{ 'url': 'https://chzzk.naver.com/video/1754', 'md5': 'b0c0c1bb888d913b93d702b1512c7f06', 'info_dict': { 'id': '1754', 'ext': 'mp4', 'title': '치지직 테스트 방송', 'channel': '침착맨', 'channel_id': 'bb382c2c0cc9fa7c86ab3b037fb5799c', 'channel_is_verified': False, 'thumbnail': r're:https?://.+/.+\.jpg', 'duration': 15577, 'timestamp': 1702970505.417, 'upload_date': '20231219', 'view_count': int, }, 'skip': 'Replay video is expired', }, { # Manually uploaded video 'url': 'https://chzzk.naver.com/video/1980', 'info_dict': { 'id': '1980', 'ext': 'mp4', 'title': '※시청주의※한번보면 잊기 힘든 영상', 'channel': '라디유radiyu', 'channel_id': '68f895c59a1043bc5019b5e08c83a5c5', 'channel_is_verified': False, 'thumbnail': r're:https?://.+/.+\.jpg', 'duration': 95, 'timestamp': 1703102631.722, 'upload_date': '20231220', 'view_count': int, }, }, { # Partner channel replay video 'url': 'https://chzzk.naver.com/video/2458', 'info_dict': { 'id': '2458', 'ext': 'mp4', 'title': '첫 방송', 'channel': '강지', 'channel_id': 'b5ed5db484d04faf4d150aedd362f34b', 'channel_is_verified': True, 'thumbnail': r're:https?://.+/.+\.jpg', 'duration': 4433, 'timestamp': 1703307460.214, 'upload_date': '20231223', 'view_count': int, }, }, { # video_status == 'NONE' but is downloadable 'url': 'https://chzzk.naver.com/video/6325166', 'info_dict': { 'id': '6325166', 'ext': 'mp4', 'title': '와이프 숙제빼주기', 'channel': '이 다', 'channel_id': '0076a519f147ee9fd0959bf02f9571ca', 'channel_is_verified': False, 'view_count': int, 'duration': 28167, 'thumbnail': r're:https?://.+/.+\.jpg', 'timestamp': 1742139216.86, 'upload_date': '20250316', 'live_status': 'was_live', }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): video_id = self._match_id(url) video_meta = self._download_json( f'https://api.chzzk.naver.com/service/v3/videos/{video_id}', video_id, note='Downloading video info', errnote='Unable to download video info')['content'] live_status = 'was_live' if video_meta.get('liveOpenDate') else 'not_live' video_status = video_meta.get('vodStatus') if video_status == 'ABR_HLS': formats, subtitles = self._extract_mpd_formats_and_subtitles( f'https://apis.naver.com/neonplayer/vodplay/v1/playback/{video_meta["videoId"]}', video_id, query={ 'key': video_meta['inKey'], 'env': 'real', 'lc': 'en_US', 'cpl': 'en_US', }) else: fatal = video_status == 'UPLOAD' playback = self._parse_json(video_meta['liveRewindPlaybackJson'], video_id, fatal=fatal) formats, subtitles = self._extract_m3u8_formats_and_subtitles( traverse_obj(playback, ('media', 0, 'path')), video_id, 'mp4', m3u8_id='hls', fatal=fatal) if formats and video_status != 'UPLOAD': self.write_debug(f'Video found with status: "{video_status}"') elif not formats: self.raise_no_formats( f'Unknown video status detected: "{video_status}"', expected=True, video_id=video_id) formats, subtitles = [], {} live_status = 'post_live' if live_status == 'was_live' else None return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'live_status': live_status, **traverse_obj(video_meta, { 'title': ('videoTitle', {str}), 'thumbnail': ('thumbnailImageUrl', {url_or_none}), 'timestamp': ('publishDateAt', {float_or_none(scale=1000)}), 'view_count': ('readCount', {int_or_none}), 'duration': ('duration', {int_or_none}), 'channel': ('channel', 'channelName', {str}), 'channel_id': ('channel', 'channelId', {str}), 'channel_is_verified': ('channel', 'verifiedMark', {bool}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/varzesh3.py
yt_dlp/extractor/varzesh3.py
from .common import InfoExtractor from ..utils import ( clean_html, parse_qs, remove_start, ) class Varzesh3IE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?video\.varzesh3\.com/(?:[^/]+/)+(?P<id>[^/]+)/?' _TESTS = [{ 'url': 'http://video.varzesh3.com/germany/bundesliga/5-%D9%88%D8%A7%DA%A9%D9%86%D8%B4-%D8%A8%D8%B1%D8%AA%D8%B1-%D8%AF%D8%B1%D9%88%D8%A7%D8%B2%D9%87%E2%80%8C%D8%A8%D8%A7%D9%86%D8%A7%D9%86%D8%9B%D9%87%D9%81%D8%AA%D9%87-26-%D8%A8%D9%88%D9%86%D8%AF%D8%B3/', 'md5': '2a933874cb7dce4366075281eb49e855', 'info_dict': { 'id': '76337', 'ext': 'mp4', 'title': '۵ واکنش برتر دروازه‌بانان؛هفته ۲۶ بوندسلیگا', 'description': 'فصل ۲۰۱۵-۲۰۱۴', 'thumbnail': r're:^https?://.*\.jpg$', }, 'skip': 'HTTP 404 Error', }, { 'url': 'http://video.varzesh3.com/video/112785/%D8%AF%D9%84%D9%87-%D8%B9%D9%84%DB%8C%D8%9B-%D8%B3%D8%AA%D8%A7%D8%B1%D9%87-%D9%86%D9%88%D8%B8%D9%87%D9%88%D8%B1-%D9%84%DB%8C%DA%AF-%D8%A8%D8%B1%D8%AA%D8%B1-%D8%AC%D8%B2%DB%8C%D8%B1%D9%87', 'md5': '841b7cd3afbc76e61708d94e53a4a4e7', 'info_dict': { 'id': '112785', 'ext': 'mp4', 'title': 'دله علی؛ ستاره نوظهور لیگ برتر جزیره', 'description': 'فوتبال 120', }, 'expected_warnings': ['description'], }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_url = self._search_regex( r'<source[^>]+src="([^"]+)"', webpage, 'video url') title = remove_start(self._html_extract_title(webpage), 'ویدیو ورزش 3 | ') description = self._html_search_regex( r'(?s)<div class="matn">(.+?)</div>', webpage, 'description', default=None) if description is None: description = clean_html(self._html_search_meta('description', webpage)) thumbnail = self._og_search_thumbnail(webpage, default=None) if thumbnail is None: fb_sharer_url = self._search_regex( r'<a[^>]+href="(https?://www\.facebook\.com/sharer/sharer\.php?[^"]+)"', webpage, 'facebook sharer URL', fatal=False) sharer_params = parse_qs(fb_sharer_url) thumbnail = sharer_params.get('p[images][0]', [None])[0] video_id = self._search_regex( r"<link[^>]+rel='(?:canonical|shortlink)'[^>]+href='/\?p=([^']+)'", webpage, display_id, default=None) if video_id is None: video_id = self._search_regex( r'var\s+VideoId\s*=\s*(\d+);', webpage, 'video id', default=display_id) return { 'url': video_url, 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tele5.py
yt_dlp/extractor/tele5.py
import functools from .dplay import DiscoveryPlusBaseIE from ..utils import join_nonempty from ..utils.traversal import traverse_obj class Tele5IE(DiscoveryPlusBaseIE): _VALID_URL = r'https?://(?:www\.)?tele5\.de/(?P<parent_slug>[\w-]+)/(?P<slug_a>[\w-]+)(?:/(?P<slug_b>[\w-]+))?' _TESTS = [{ # slug_a and slug_b 'url': 'https://tele5.de/mediathek/stargate-atlantis/quarantane', 'info_dict': { 'id': '6852024', 'ext': 'mp4', 'title': 'Quarantäne', 'description': 'md5:6af0373bd0fcc4f13e5d47701903d675', 'episode': 'Episode 73', 'episode_number': 73, 'season': 'Season 4', 'season_number': 4, 'series': 'Stargate Atlantis', 'upload_date': '20240525', 'timestamp': 1716643200, 'duration': 2503.2, 'thumbnail': 'https://eu1-prod-images.disco-api.com/2024/05/21/c81fcb45-8902-309b-badb-4e6d546b575d.jpeg', 'creators': ['Tele5'], 'tags': [], }, }, { # only slug_a 'url': 'https://tele5.de/mediathek/inside-out', 'info_dict': { 'id': '6819502', 'ext': 'mp4', 'title': 'Inside out', 'description': 'md5:7e5f32ed0be5ddbd27713a34b9293bfd', 'series': 'Inside out', 'upload_date': '20240523', 'timestamp': 1716494400, 'duration': 5343.4, 'thumbnail': 'https://eu1-prod-images.disco-api.com/2024/05/15/181eba3c-f9f0-3faf-b14d-0097050a3aa4.jpeg', 'creators': ['Tele5'], 'tags': [], }, }, { # playlist 'url': 'https://tele5.de/mediathek/schlefaz', 'info_dict': { 'id': 'mediathek-schlefaz', }, 'playlist_mincount': 3, }] def _real_extract(self, url): parent_slug, slug_a, slug_b = self._match_valid_url(url).group('parent_slug', 'slug_a', 'slug_b') playlist_id = join_nonempty(parent_slug, slug_a, slug_b, delim='-') query = {'environment': 'tele5', 'v': '2'} if not slug_b: endpoint = f'page/{slug_a}' query['parent_slug'] = parent_slug else: endpoint = f'videos/{slug_b}' query['filter[show.slug]'] = slug_a cms_data = self._download_json(f'https://de-api.loma-cms.com/feloma/{endpoint}/', playlist_id, query=query) return self.playlist_result(map( functools.partial(self._get_disco_api_info, url, disco_host='eu1-prod.disco-api.com', realm='dmaxde', country='DE'), traverse_obj(cms_data, ('blocks', ..., 'videoId', {str}))), playlist_id) def _update_disco_api_headers(self, headers, disco_base, display_id, realm): headers.update({ 'x-disco-params': f'realm={realm}', 'x-disco-client': 'Alps:HyogaPlayer:0.0.0', 'Authorization': self._get_auth(disco_base, display_id, realm), })
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ign.py
yt_dlp/extractor/ign.py
import re import urllib.parse from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, determine_ext, extract_attributes, int_or_none, merge_dicts, parse_iso8601, strip_or_none, traverse_obj, url_or_none, urljoin, ) class IGNBaseIE(InfoExtractor): def _call_api(self, slug): return self._download_json( f'http://apis.ign.com/{self._PAGE_TYPE}/v3/{self._PAGE_TYPE}s/slug/{slug}', slug) def _checked_call_api(self, slug): try: return self._call_api(slug) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 404: e.cause.args = e.cause.args or [ e.cause.response.url, e.cause.status, e.cause.reason] raise ExtractorError( 'Content not found: expired?', cause=e.cause, expected=True) raise def _extract_video_info(self, video, fatal=True): video_id = video['videoId'] formats = [] refs = traverse_obj(video, 'refs', expected_type=dict) or {} m3u8_url = url_or_none(refs.get('m3uUrl')) if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) f4m_url = url_or_none(refs.get('f4mUrl')) if f4m_url: formats.extend(self._extract_f4m_formats( f4m_url, video_id, f4m_id='hds', fatal=False)) for asset in (video.get('assets') or []): asset_url = url_or_none(asset.get('url')) if not asset_url: continue formats.append({ 'url': asset_url, 'tbr': int_or_none(asset.get('bitrate'), 1000), 'fps': int_or_none(asset.get('frame_rate')), 'height': int_or_none(asset.get('height')), 'width': int_or_none(asset.get('width')), }) mezzanine_url = traverse_obj( video, ('system', 'mezzanineUrl'), expected_type=url_or_none) if mezzanine_url: formats.append({ 'ext': determine_ext(mezzanine_url, 'mp4'), 'format_id': 'mezzanine', 'quality': 1, 'url': mezzanine_url, }) thumbnails = traverse_obj( video, ('thumbnails', ..., {'url': 'url'}), expected_type=url_or_none) tags = traverse_obj( video, ('tags', ..., 'displayName'), expected_type=lambda x: x.strip() or None) metadata = traverse_obj(video, 'metadata', expected_type=dict) or {} title = traverse_obj( metadata, 'longTitle', 'title', 'name', expected_type=lambda x: x.strip() or None) return { 'id': video_id, 'title': title, 'description': strip_or_none(metadata.get('description')), 'timestamp': parse_iso8601(metadata.get('publishDate')), 'duration': int_or_none(metadata.get('duration')), 'thumbnails': thumbnails, 'formats': formats, 'tags': tags, } class IGNIE(IGNBaseIE): """ Extractor for some of the IGN sites, like www.ign.com, es.ign.com de.ign.com. Some videos of it.ign.com are also supported """ _VIDEO_PATH_RE = r'/(?:\d{4}/\d{2}/\d{2}/)?(?P<id>.+?)' _PLAYLIST_PATH_RE = r'(?:/?\?(?P<filt>[^&#]+))?' _VALID_URL = ( r'https?://(?:.+?\.ign|www\.pcmag)\.com/videos(?:{})'.format('|'.join((_VIDEO_PATH_RE + r'(?:[/?&#]|$)', _PLAYLIST_PATH_RE)))) IE_NAME = 'ign.com' _PAGE_TYPE = 'video' _TESTS = [{ 'url': 'http://www.ign.com/videos/2013/06/05/the-last-of-us-review', 'md5': 'd2e1586d9987d40fad7867bf96a018ea', 'info_dict': { 'id': '8f862beef863986b2785559b9e1aa599', 'ext': 'mp4', 'title': 'The Last of Us Review', 'description': 'md5:c8946d4260a4d43a00d5ae8ed998870c', 'timestamp': 1370440800, 'upload_date': '20130605', 'tags': 'count:9', 'display_id': 'the-last-of-us-review', 'thumbnail': 'https://assets1.ignimgs.com/vid/thumbnails/user/2014/03/26/lastofusreviewmimig2.jpg', 'duration': 440, }, 'params': { 'nocheckcertificate': True, }, }, { 'url': 'http://www.pcmag.com/videos/2015/01/06/010615-whats-new-now-is-gogo-snooping-on-your-data', 'md5': 'f1581a6fe8c5121be5b807684aeac3f6', 'info_dict': { 'id': 'ee10d774b508c9b8ec07e763b9125b91', 'ext': 'mp4', 'title': 'What\'s New Now: Is GoGo Snooping on Your Data?', 'description': 'md5:817a20299de610bd56f13175386da6fa', 'timestamp': 1420571160, 'upload_date': '20150106', 'tags': 'count:4', }, 'skip': '404 Not Found', }, { 'url': 'https://www.ign.com/videos/is-a-resident-evil-4-remake-on-the-way-ign-daily-fix', 'only_matching': True, }] @classmethod def _extract_embed_urls(cls, url, webpage): grids = re.findall( r'''(?s)<section\b[^>]+\bclass\s*=\s*['"](?:[\w-]+\s+)*?content-feed-grid(?!\B|-)[^>]+>(.+?)</section[^>]*>''', webpage) return filter( None, (urljoin(url, m.group('path')) for m in re.finditer( rf'''<a\b[^>]+\bhref\s*=\s*('|")(?P<path>/videos{cls._VIDEO_PATH_RE})\1''', grids[0] if grids else ''))) def _real_extract(self, url): display_id, filt = self._match_valid_url(url).group('id', 'filt') if display_id: return self._extract_video(url, display_id) return self._extract_playlist(url, filt or 'all') def _extract_playlist(self, url, display_id): webpage = self._download_webpage(url, display_id) return self.playlist_result( (self.url_result(u, self.ie_key()) for u in self._extract_embed_urls(url, webpage)), playlist_id=display_id) def _extract_video(self, url, display_id): video = self._checked_call_api(display_id) info = self._extract_video_info(video) return merge_dicts({ 'display_id': display_id, }, info) class IGNVideoIE(IGNBaseIE): _VALID_URL = r'https?://.+?\.ign\.com/(?:[a-z]{2}/)?[^/]+/(?P<id>\d+)/(?:video|trailer)/' _TESTS = [{ 'url': 'http://me.ign.com/en/videos/112203/video/how-hitman-aims-to-be-different-than-every-other-s', 'md5': 'dd9aca7ed2657c4e118d8b261e5e9de1', 'info_dict': { 'id': 'e9be7ea899a9bbfc0674accc22a36cc8', 'ext': 'mp4', 'title': 'How Hitman Aims to Be Different Than Every Other Stealth Game - NYCC 2015', 'description': 'Taking out assassination targets in Hitman has never been more stylish.', 'timestamp': 1444665600, 'upload_date': '20151012', 'display_id': '112203', 'thumbnail': 'https://sm.ign.com/ign_me/video/h/how-hitman/how-hitman-aims-to-be-different-than-every-other-s_8z14.jpg', 'duration': 298, 'tags': 'count:13', }, 'expected_warnings': ['HTTP Error 400: Bad Request'], }, { 'url': 'http://me.ign.com/ar/angry-birds-2/106533/video/lrd-ldyy-lwl-lfylm-angry-birds', 'only_matching': True, }, { # Youtube embed 'url': 'https://me.ign.com/ar/ratchet-clank-rift-apart/144327/trailer/embed', 'only_matching': True, }, { # Twitter embed 'url': 'http://adria.ign.com/sherlock-season-4/9687/trailer/embed', 'only_matching': True, }, { # Vimeo embed 'url': 'https://kr.ign.com/bic-2018/3307/trailer/embed', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) parsed_url = urllib.parse.urlparse(url) embed_url = urllib.parse.urlunparse( parsed_url._replace(path=parsed_url.path.rsplit('/', 1)[0] + '/embed')) webpage, urlh = self._download_webpage_handle(embed_url, video_id) new_url = urlh.url ign_url = urllib.parse.parse_qs( urllib.parse.urlparse(new_url).query).get('url', [None])[-1] if ign_url: return self.url_result(ign_url, IGNIE.ie_key()) video = self._search_regex(r'(<div\b[^>]+\bdata-video-id\s*=\s*[^>]+>)', webpage, 'video element', fatal=False) if not video: if new_url == url: raise ExtractorError('Redirect loop: ' + url) return self.url_result(new_url) video = extract_attributes(video) video_data = video.get('data-settings') or '{}' video_data = self._parse_json(video_data, video_id)['video'] info = self._extract_video_info(video_data) return merge_dicts({ 'display_id': video_id, }, info) class IGNArticleIE(IGNBaseIE): _VALID_URL = r'https?://.+?\.ign\.com/(?:articles(?:/\d{4}/\d{2}/\d{2})?|(?:[a-z]{2}/)?(?:[\w-]+/)*?feature/\d+)/(?P<id>[^/?&#]+)' _PAGE_TYPE = 'article' _TESTS = [{ 'url': 'http://me.ign.com/en/feature/15775/100-little-things-in-gta-5-that-will-blow-your-mind', 'info_dict': { 'id': '72113', 'title': '100 Little Things in GTA 5 That Will Blow Your Mind', }, 'playlist': [ { 'info_dict': { 'id': '5ebbd138523268b93c9141af17bec937', 'ext': 'mp4', 'title': 'Grand Theft Auto V Video Review', 'description': 'Rockstar drops the mic on this generation of games. Watch our review of the masterly Grand Theft Auto V.', 'timestamp': 1379339880, 'upload_date': '20130916', 'tags': 'count:12', 'thumbnail': 'https://assets1.ignimgs.com/thumbs/userUploaded/2021/8/16/gta-v-heistsjpg-e94705-1629138553533.jpeg', 'display_id': 'grand-theft-auto-v-video-review', 'duration': 501, }, }, { 'info_dict': { 'id': '638672ee848ae4ff108df2a296418ee2', 'ext': 'mp4', 'title': 'GTA 5 In Slow Motion', 'description': 'The twisted beauty of GTA 5 in stunning slow motion.', 'timestamp': 1386878820, 'upload_date': '20131212', 'duration': 202, 'tags': 'count:25', 'display_id': 'gta-5-in-slow-motion', 'thumbnail': 'https://assets1.ignimgs.com/vid/thumbnails/user/2013/11/03/GTA-SLO-MO-1.jpg', }, }, ], 'params': { 'skip_download': True, }, 'expected_warnings': ['Backend fetch failed'], }, { 'url': 'http://www.ign.com/articles/2014/08/15/rewind-theater-wild-trailer-gamescom-2014?watch', 'info_dict': { 'id': '53ee806780a81ec46e0790f8', 'title': 'Rewind Theater - Wild Trailer Gamescom 2014', }, 'playlist_count': 1, 'expected_warnings': ['Backend fetch failed'], }, { # videoId pattern 'url': 'http://www.ign.com/articles/2017/06/08/new-ducktales-short-donalds-birthday-doesnt-go-as-planned', 'only_matching': True, }, { # Youtube embed 'url': 'https://www.ign.com/articles/2021-mvp-named-in-puppy-bowl-xvii', 'only_matching': True, }, { # IMDB embed 'url': 'https://www.ign.com/articles/2014/08/07/sons-of-anarchy-final-season-trailer', 'only_matching': True, }, { # Facebook embed 'url': 'https://www.ign.com/articles/2017/09/20/marvels-the-punisher-watch-the-new-trailer-for-the-netflix-series', 'only_matching': True, }, { # Brightcove embed 'url': 'https://www.ign.com/articles/2016/01/16/supergirl-goes-flying-with-martian-manhunter-in-new-clip', 'only_matching': True, }] def _checked_call_api(self, slug): try: return self._call_api(slug) except ExtractorError as e: if isinstance(e.cause, HTTPError): e.cause.args = e.cause.args or [ e.cause.response.url, e.cause.status, e.cause.reason] if e.cause.status == 404: raise ExtractorError( 'Content not found: expired?', cause=e.cause, expected=True) elif e.cause.status == 503: self.report_warning(str(e.cause)) return raise def _real_extract(self, url): display_id = self._match_id(url) article = self._checked_call_api(display_id) if article: # obsolete ? def entries(): media_url = traverse_obj( article, ('mediaRelations', 0, 'media', 'metadata', 'url'), expected_type=url_or_none) if media_url: yield self.url_result(media_url, IGNIE.ie_key()) for content in (article.get('content') or []): for video_url in re.findall(r'(?:\[(?:ignvideo\s+url|youtube\s+clip_id)|<iframe[^>]+src)="([^"]+)"', content): if url_or_none(video_url): yield self.url_result(video_url) return self.playlist_result( entries(), article.get('articleId'), traverse_obj( article, ('metadata', 'headline'), expected_type=lambda x: x.strip() or None)) webpage = self._download_webpage(url, display_id) playlist_id = self._html_search_meta('dable:item_id', webpage, default=None) if playlist_id: def entries(): for m in re.finditer( r'''(?s)<object\b[^>]+\bclass\s*=\s*("|')ign-videoplayer\1[^>]*>(?P<params>.+?)</object''', webpage): flashvars = self._search_regex( r'''(<param\b[^>]+\bname\s*=\s*("|')flashvars\2[^>]*>)''', m.group('params'), 'flashvars', default='') flashvars = urllib.parse.parse_qs(extract_attributes(flashvars).get('value') or '') v_url = url_or_none((flashvars.get('url') or [None])[-1]) if v_url: yield self.url_result(v_url) else: playlist_id = self._search_regex( r'''\bdata-post-id\s*=\s*("|')(?P<id>[\da-f]+)\1''', webpage, 'id', group='id', default=None) nextjs_data = self._search_nextjs_data(webpage, display_id) def entries(): for player in traverse_obj( nextjs_data, ('props', 'apolloState', 'ROOT_QUERY', lambda k, _: k.startswith('videoPlayerProps('), '__ref')): # skip promo links (which may not always be served, eg GH CI servers) if traverse_obj(nextjs_data, ('props', 'apolloState', player.replace('PlayerProps', 'ModernContent')), expected_type=dict): continue video = traverse_obj(nextjs_data, ('props', 'apolloState', player), expected_type=dict) or {} info = self._extract_video_info(video, fatal=False) if info: yield merge_dicts({ 'display_id': display_id, }, info) return self.playlist_result( entries(), playlist_id or display_id, re.sub(r'\s+-\s+IGN\s*$', '', self._og_search_title(webpage, default='')) or None)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bleacherreport.py
yt_dlp/extractor/bleacherreport.py
from .amp import AMPIE from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, parse_iso8601, str_or_none, ) class BleacherReportIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/articles/(?P<id>\d+)' _TESTS = [{ 'url': 'http://bleacherreport.com/articles/2496438-fsu-stat-projections-is-jalen-ramsey-best-defensive-player-in-college-football', 'md5': 'a3ffc3dc73afdbc2010f02d98f990f20', 'info_dict': { 'id': '2496438', 'ext': 'mp4', 'title': 'FSU Stat Projections: Is Jalen Ramsey Best Defensive Player in College Football?', 'uploader_id': '3992341', 'description': 'CFB, ACC, Florida State', 'timestamp': 1434380212, 'upload_date': '20150615', 'uploader': 'Team Stream Now ', }, 'skip': 'Video removed', }, { 'url': 'http://bleacherreport.com/articles/2586817-aussie-golfers-get-fright-of-their-lives-after-being-chased-by-angry-kangaroo', 'md5': '6a5cd403418c7b01719248ca97fb0692', 'info_dict': { 'id': '2586817', 'ext': 'webm', 'title': 'Aussie Golfers Get Fright of Their Lives After Being Chased by Angry Kangaroo', 'timestamp': 1446839961, 'uploader': 'Sean Fay', 'description': 'md5:b1601e2314c4d8eec23b6eafe086a757', 'uploader_id': '6466954', 'upload_date': '20151011', }, 'add_ie': ['Youtube'], }] def _real_extract(self, url): article_id = self._match_id(url) article_data = self._download_json(f'http://api.bleacherreport.com/api/v1/articles/{article_id}', article_id)['article'] thumbnails = [] primary_photo = article_data.get('primaryPhoto') if primary_photo: thumbnails = [{ 'url': primary_photo['url'], 'width': primary_photo.get('width'), 'height': primary_photo.get('height'), }] info = { '_type': 'url_transparent', 'id': article_id, 'title': article_data['title'], 'uploader': article_data.get('author', {}).get('name'), 'uploader_id': str_or_none(article_data.get('authorId')), 'timestamp': parse_iso8601(article_data.get('createdAt')), 'thumbnails': thumbnails, 'comment_count': int_or_none(article_data.get('commentsCount')), 'view_count': int_or_none(article_data.get('hitCount')), } video = article_data.get('video') if video: video_type = video['type'] if video_type in ('cms.bleacherreport.com', 'vid.bleacherreport.com'): info['url'] = 'http://bleacherreport.com/video_embed?id={}'.format(video['id']) elif video_type == 'youtube.com': info['url'] = video['id'] elif video_type == 'vine.co': info['url'] = 'https://vine.co/v/{}'.format(video['id']) else: info['url'] = video_type + video['id'] return info else: raise ExtractorError('no video in the article', expected=True) class BleacherReportCMSIE(AMPIE): _WORKING = False _VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/video_embed\?id=(?P<id>[0-9a-f-]{36}|\d{5})' _TESTS = [{ 'url': 'http://bleacherreport.com/video_embed?id=8fd44c2f-3dc5-4821-9118-2c825a98c0e1&library=video-cms', 'md5': '670b2d73f48549da032861130488c681', 'info_dict': { 'id': '8fd44c2f-3dc5-4821-9118-2c825a98c0e1', 'ext': 'mp4', 'title': 'Cena vs. Rollins Would Expose the Heavyweight Division', 'description': 'md5:984afb4ade2f9c0db35f3267ed88b36e', 'upload_date': '20150723', 'timestamp': 1437679032, }, 'expected_warnings': [ 'Unable to download f4m manifest', ], }] def _real_extract(self, url): video_id = self._match_id(url) info = self._extract_feed_info(f'http://vid.bleacherreport.com/videos/{video_id}.akamai') info['id'] = video_id return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tass.py
yt_dlp/extractor/tass.py
import json from .common import InfoExtractor from ..utils import ( js_to_json, qualities, ) class TassIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:tass\.ru|itar-tass\.com)/[^/]+/(?P<id>\d+)' _TESTS = [ { 'url': 'http://tass.ru/obschestvo/1586870', 'md5': '3b4cdd011bc59174596b6145cda474a4', 'info_dict': { 'id': '1586870', 'ext': 'mp4', 'title': 'Посетителям московского зоопарка показали красную панду', 'description': 'Приехавшую из Дублина Зейну можно увидеть в павильоне "Кошки тропиков"', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'http://itar-tass.com/obschestvo/1600009', 'only_matching': True, }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) sources = json.loads(js_to_json(self._search_regex( r'(?s)sources\s*:\s*(\[.+?\])', webpage, 'sources'))) quality = qualities(['sd', 'hd']) formats = [] for source in sources: video_url = source.get('file') if not video_url or not video_url.startswith('http') or not video_url.endswith('.mp4'): continue label = source.get('label') formats.append({ 'url': video_url, 'format_id': label, 'quality': quality(label), }) return { 'id': video_id, 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/asobichannel.py
yt_dlp/extractor/asobichannel.py
from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, merge_dicts, parse_iso8601, url_or_none, ) from ..utils.traversal import traverse_obj class AsobiChannelBaseIE(InfoExtractor): _MICROCMS_HEADER = {'X-MICROCMS-API-KEY': 'qRaKehul9AHU8KtL0dnq1OCLKnFec6yrbcz3'} def _extract_info(self, metadata): return traverse_obj(metadata, { 'id': ('id', {str}), 'title': ('title', {str}), 'description': ('body', {clean_html}), 'thumbnail': ('contents', 'video_thumb', 'url', {url_or_none}), 'timestamp': ('publishedAt', {parse_iso8601}), 'modified_timestamp': ('updatedAt', {parse_iso8601}), 'channel': ('channel', 'name', {str}), 'channel_id': ('channel', 'id', {str}), }) class AsobiChannelIE(AsobiChannelBaseIE): IE_NAME = 'asobichannel' IE_DESC = 'ASOBI CHANNEL' _VALID_URL = r'https?://asobichannel\.asobistore\.jp/watch/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://asobichannel.asobistore.jp/watch/1ypp48qd32p', 'md5': '39df74e872afe032c4eb27b89144fc92', 'info_dict': { 'id': '1ypp48qd32p', 'ext': 'mp4', 'title': 'アイドルマスター ミリオンライブ! 765プロch 原っぱ通信 #1', 'description': 'md5:b930bd2199c9b2fd75951ce4aaa7efd2', 'thumbnail': 'https://images.microcms-assets.io/assets/d2420de4b9194e11beb164f99edb1f95/a8e6f84119f54eb9ab4ce16729239905/%E3%82%B5%E3%83%A0%E3%83%8D%20(1).png', 'timestamp': 1697098247, 'upload_date': '20231012', 'modified_timestamp': 1698381162, 'modified_date': '20231027', 'channel': 'アイドルマスター', 'channel_id': 'idolmaster', }, }, { 'url': 'https://asobichannel.asobistore.jp/watch/redigiwnjzqj', 'md5': '229fa8fb5c591c75ce8c37a497f113f6', 'info_dict': { 'id': 'redigiwnjzqj', 'ext': 'mp4', 'title': '【おまけ放送】アイドルマスター ミリオンライブ! 765プロch 原っぱ通信 #1', 'description': 'md5:7d9cd35fb54425a6967822bd564ea2d9', 'thumbnail': 'https://images.microcms-assets.io/assets/d2420de4b9194e11beb164f99edb1f95/20e5c1d6184242eebc2512a5dec59bf0/P1_%E5%8E%9F%E3%81%A3%E3%81%B1%E3%82%B5%E3%83%A0%E3%83%8D.png', 'modified_timestamp': 1697797125, 'modified_date': '20231020', 'timestamp': 1697261769, 'upload_date': '20231014', 'channel': 'アイドルマスター', 'channel_id': 'idolmaster', }, }] _survapi_header = None def _real_initialize(self): token = self._download_json( 'https://asobichannel-api.asobistore.jp/api/v1/vspf/token', None, note='Retrieving API token') self._survapi_header = {'Authorization': f'Bearer {token}'} def _process_vod(self, video_id, metadata): content_id = metadata['contents']['video_id'] vod_data = self._download_json( f'https://survapi.channel.or.jp/proxy/v1/contents/{content_id}/get_by_cuid', video_id, headers=self._survapi_header, note='Downloading vod data') return { 'formats': self._extract_m3u8_formats(vod_data['ex_content']['streaming_url'], video_id), } def _process_live(self, video_id, metadata): content_id = metadata['contents']['video_id'] event_data = self._download_json( f'https://survapi.channel.or.jp/ex/events/{content_id}?embed=channel', video_id, headers=self._survapi_header, note='Downloading event data') player_type = traverse_obj(event_data, ('data', 'Player_type', {str})) if player_type == 'poster': self.raise_no_formats('Live event has not yet started', expected=True) live_status = 'is_upcoming' formats = [] elif player_type == 'player': live_status = 'is_live' formats = self._extract_m3u8_formats( event_data['data']['Channel']['Custom_live_url'], video_id, live=True) else: raise ExtractorError('Unsupported player type {player_type!r}') return { 'release_timestamp': traverse_obj(metadata, ('period', 'start', {parse_iso8601})), 'live_status': live_status, 'formats': formats, } def _real_extract(self, url): video_id = self._match_id(url) metadata = self._download_json( f'https://channel.microcms.io/api/v1/media/{video_id}', video_id, headers=self._MICROCMS_HEADER) info = self._extract_info(metadata) video_type = traverse_obj(metadata, ('contents', 'video_type', 0, {str})) if video_type == 'VOD': return merge_dicts(info, self._process_vod(video_id, metadata)) if video_type == 'LIVE': return merge_dicts(info, self._process_live(video_id, metadata)) raise ExtractorError(f'Unexpected video type {video_type!r}') class AsobiChannelTagURLIE(AsobiChannelBaseIE): IE_NAME = 'asobichannel:tag' IE_DESC = 'ASOBI CHANNEL' _VALID_URL = r'https?://asobichannel\.asobistore\.jp/tag/(?P<id>[a-z0-9-_]+)' _TESTS = [{ 'url': 'https://asobichannel.asobistore.jp/tag/bjhh-nbcja', 'info_dict': { 'id': 'bjhh-nbcja', 'title': 'アイドルマスター ミリオンライブ! 765プロch 原っぱ通信', }, 'playlist_mincount': 16, }, { 'url': 'https://asobichannel.asobistore.jp/tag/hvm5qw3c6od', 'info_dict': { 'id': 'hvm5qw3c6od', 'title': 'アイマスMOIW2023ラジオ', }, 'playlist_mincount': 13, }] def _real_extract(self, url): tag_id = self._match_id(url) webpage = self._download_webpage(url, tag_id) title = traverse_obj(self._search_nextjs_data( webpage, tag_id, fatal=False), ('props', 'pageProps', 'data', 'name', {str})) media = self._download_json( f'https://channel.microcms.io/api/v1/media?limit=999&filters=(tag[contains]{tag_id})', tag_id, headers=self._MICROCMS_HEADER) def entries(): for metadata in traverse_obj(media, ('contents', lambda _, v: v['id'])): yield { '_type': 'url', 'url': f'https://asobichannel.asobistore.jp/watch/{metadata["id"]}', 'ie_key': AsobiChannelIE.ie_key(), **self._extract_info(metadata), } return self.playlist_result(entries(), tag_id, title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/radlive.py
yt_dlp/extractor/radlive.py
import json from .common import InfoExtractor from ..utils import ( ExtractorError, format_field, traverse_obj, try_get, unified_timestamp, ) class RadLiveIE(InfoExtractor): IE_NAME = 'radlive' _VALID_URL = r'https?://(?:www\.)?rad\.live/content/(?P<content_type>feature|episode)/(?P<id>[a-f0-9-]+)' _TESTS = [{ 'url': 'https://rad.live/content/feature/dc5acfbc-761b-4bec-9564-df999905116a', 'md5': '6219d5d31d52de87d21c9cf5b7cb27ff', 'info_dict': { 'id': 'dc5acfbc-761b-4bec-9564-df999905116a', 'ext': 'mp4', 'title': 'Deathpact - Digital Mirage 2 [Full Set]', 'language': 'en', 'thumbnail': 'https://static.12core.net/cb65ae077a079c68380e38f387fbc438.png', 'description': '', 'release_timestamp': 1600185600.0, 'channel': 'Proximity', 'channel_id': '9ce6dd01-70a4-4d59-afb6-d01f807cd009', 'channel_url': 'https://rad.live/content/channel/9ce6dd01-70a4-4d59-afb6-d01f807cd009', }, }, { 'url': 'https://rad.live/content/episode/bbcf66ec-0d02-4ca0-8dc0-4213eb2429bf', 'md5': '40b2175f347592125d93e9a344080125', 'info_dict': { 'id': 'bbcf66ec-0d02-4ca0-8dc0-4213eb2429bf', 'ext': 'mp4', 'title': 'E01: Bad Jokes 1', 'language': 'en', 'thumbnail': 'https://lsp.littlstar.com/channels/WHISTLE/BAD_JOKES/SEASON_1/BAD_JOKES_101/poster.jpg', 'description': 'Bad Jokes - Champions, Adam Pally, Super Troopers, Team Edge and 2Hype', 'episode': 'E01: Bad Jokes 1', 'episode_number': 1, 'episode_id': '336', }, }] def _real_extract(self, url): content_type, video_id = self._match_valid_url(url).groups() webpage = self._download_webpage(url, video_id) content_info = json.loads(self._search_regex( r'<script[^>]*type=([\'"])application/json\1[^>]*>(?P<json>{.+?})</script>', webpage, 'video info', group='json'))['props']['pageProps']['initialContentData'] video_info = content_info[content_type] if not video_info: raise ExtractorError('Unable to extract video info, make sure the URL is valid') formats = self._extract_m3u8_formats(video_info['assets']['videos'][0]['url'], video_id) data = video_info.get('structured_data', {}) release_date = unified_timestamp(traverse_obj(data, ('releasedEvent', 'startDate'))) channel = next(iter(content_info.get('channels', [])), {}) channel_id = channel.get('lrn', '').split(':')[-1] or None result = { 'id': video_id, 'title': video_info['title'], 'formats': formats, 'language': traverse_obj(data, ('potentialAction', 'target', 'inLanguage')), 'thumbnail': traverse_obj(data, ('image', 'contentUrl')), 'description': data.get('description'), 'release_timestamp': release_date, 'channel': channel.get('name'), 'channel_id': channel_id, 'channel_url': format_field(channel_id, None, 'https://rad.live/content/channel/%s'), } if content_type == 'episode': result.update({ # TODO: Get season number when downloading single episode 'episode': video_info.get('title'), 'episode_number': video_info.get('number'), 'episode_id': video_info.get('id'), }) return result class RadLiveSeasonIE(RadLiveIE): # XXX: Do not subclass from concrete IE IE_NAME = 'radlive:season' _VALID_URL = r'https?://(?:www\.)?rad\.live/content/season/(?P<id>[a-f0-9-]+)' _TESTS = [{ 'url': 'https://rad.live/content/season/08a290f7-c9ef-4e22-9105-c255995a2e75', 'md5': '40b2175f347592125d93e9a344080125', 'info_dict': { 'id': '08a290f7-c9ef-4e22-9105-c255995a2e75', 'title': 'Bad Jokes - Season 1', }, 'playlist_mincount': 5, }] @classmethod def suitable(cls, url): return False if RadLiveIE.suitable(url) else super().suitable(url) def _real_extract(self, url): season_id = self._match_id(url) webpage = self._download_webpage(url, season_id) content_info = json.loads(self._search_regex( r'<script[^>]*type=([\'"])application/json\1[^>]*>(?P<json>{.+?})</script>', webpage, 'video info', group='json'))['props']['pageProps']['initialContentData'] video_info = content_info['season'] entries = [{ '_type': 'url_transparent', 'id': episode['structured_data']['url'].split('/')[-1], 'url': episode['structured_data']['url'], 'series': try_get(content_info, lambda x: x['series']['title']), 'season': video_info['title'], 'season_number': video_info.get('number'), 'season_id': video_info.get('id'), 'ie_key': RadLiveIE.ie_key(), } for episode in video_info['episodes']] return self.playlist_result(entries, season_id, video_info.get('title')) class RadLiveChannelIE(RadLiveIE): # XXX: Do not subclass from concrete IE IE_NAME = 'radlive:channel' _VALID_URL = r'https?://(?:www\.)?rad\.live/content/channel/(?P<id>[a-f0-9-]+)' _TESTS = [{ 'url': 'https://rad.live/content/channel/5c4d8df4-6fa0-413c-81e3-873479b49274', 'md5': '625156a08b7f2b0b849f234e664457ac', 'info_dict': { 'id': '5c4d8df4-6fa0-413c-81e3-873479b49274', 'title': 'Whistle Sports', }, 'playlist_mincount': 7, }] _QUERY = ''' query WebChannelListing ($lrn: ID!) { channel (id:$lrn) { name features { structured_data } } }''' @classmethod def suitable(cls, url): return False if RadLiveIE.suitable(url) else super().suitable(url) def _real_extract(self, url): channel_id = self._match_id(url) graphql = self._download_json( 'https://content.mhq.12core.net/graphql', channel_id, headers={'Content-Type': 'application/json'}, data=json.dumps({ 'query': self._QUERY, 'variables': {'lrn': f'lrn:12core:media:content:channel:{channel_id}'}, }).encode()) data = traverse_obj(graphql, ('data', 'channel')) if not data: raise ExtractorError('Unable to extract video info, make sure the URL is valid') entries = [{ '_type': 'url_transparent', 'url': feature['structured_data']['url'], 'ie_key': RadLiveIE.ie_key(), } for feature in data['features']] return self.playlist_result(entries, channel_id, data.get('name'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cloudycdn.py
yt_dlp/extractor/cloudycdn.py
import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, url_or_none, urlencode_postdata, ) from ..utils.traversal import traverse_obj class CloudyCDNIE(InfoExtractor): _VALID_URL = r'(?:https?:)?//embed\.(?P<domain>cloudycdn\.services|backscreen\.com)/(?P<site_id>[^/?#]+)/media/(?P<id>[\w-]+)' _EMBED_REGEX = [rf'<iframe[^>]+\bsrc=[\'"](?P<url>{_VALID_URL})'] _TESTS = [{ 'url': 'https://embed.cloudycdn.services/ltv/media/46k_d23-6000-105?', 'md5': '64f72a360ca530d5ed89c77646c9eee5', 'info_dict': { 'id': '46k_d23-6000-105', 'ext': 'mp4', 'timestamp': 1700589151, 'duration': 1442, 'upload_date': '20231121', 'title': 'D23-6000-105_cetstud', 'thumbnail': 'https://store.bstrm.net/tmsp00060/assets/media/660858/placeholder1700589200.jpg', }, }, { 'url': 'https://embed.cloudycdn.services/izm/media/26e_lv-8-5-1', 'md5': '798828a479151e2444d8dcfbec76e482', 'info_dict': { 'id': '26e_lv-8-5-1', 'ext': 'mp4', 'title': 'LV-8-5-1', 'timestamp': 1669767167, 'thumbnail': 'https://store.bstrm.net/tmsp00120/assets/media/488306/placeholder1679423604.jpg', 'duration': 1205, 'upload_date': '20221130', }, }, { # Video-only m3u8 formats need manual fixup 'url': 'https://embed.cloudycdn.services/ltv/media/08j_d24-6000-074', 'md5': 'fc472e40f6e6238446509be411c920e2', 'info_dict': { 'id': '08j_d24-6000-074', 'ext': 'mp4', 'upload_date': '20240620', 'duration': 1673, 'title': 'D24-6000-074-cetstud', 'timestamp': 1718902233, 'thumbnail': 'https://store.bstrm.net/tmsp00060/assets/media/788392/placeholder1718903938.jpg', }, 'params': {'format': 'bv'}, }, { 'url': 'https://embed.backscreen.com/ltv/media/32j_z25-0600-127?', 'md5': '9b6fa09ac1a4de53d4f42b94affc3b42', 'info_dict': { 'id': '32j_z25-0600-127', 'ext': 'mp4', 'title': 'Z25-0600-127-DZ', 'duration': 1906, 'thumbnail': 'https://store.bstrm.net/tmsp00060/assets/media/977427/placeholder1746633646.jpg', 'timestamp': 1746632402, 'upload_date': '20250507', }, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.tavaklase.lv/video/es-esmu-mina-um-2/', 'md5': '63074e8e6c84ac2a01f2fb8bf03b8f43', 'info_dict': { 'id': 'cqd_lib-2', 'ext': 'mp4', 'upload_date': '20230223', 'duration': 629, 'thumbnail': 'https://store.bstrm.net/tmsp00120/assets/media/518407/placeholder1678748124.jpg', 'timestamp': 1677181513, 'title': 'LIB-2', }, }] def _real_extract(self, url): domain, site_id, video_id = self._match_valid_url(url).group('domain', 'site_id', 'id') data = self._download_json( f'https://player.{domain}/player/{site_id}/media/{video_id}/', video_id, data=urlencode_postdata({ 'version': '6.4.0', 'referer': url, })) formats, subtitles = [], {} for m3u8_url in traverse_obj(data, ('source', 'sources', ..., 'src', {url_or_none})): fmts, subs = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id, fatal=False) for fmt in fmts: if re.search(r'chunklist_b\d+_vo_', fmt['url']): fmt['acodec'] = 'none' formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(data, { 'title': ('name', {str}), 'duration': ('duration', {int_or_none}), 'timestamp': ('upload_date', {parse_iso8601}), 'thumbnail': ('source', 'poster', {url_or_none}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cda.py
yt_dlp/extractor/cda.py
import base64 import codecs import datetime as dt import hashlib import hmac import json import random import re import urllib.parse from .common import InfoExtractor from ..compat import compat_ord from ..utils import ( ExtractorError, OnDemandPagedList, determine_ext, float_or_none, int_or_none, merge_dicts, multipart_encode, parse_duration, try_call, url_or_none, urljoin, ) from ..utils.traversal import traverse_obj class CDAIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:(?:www|m)\.)?cda\.pl/video|ebd\.cda\.pl/[0-9]+x[0-9]+)/(?P<id>[0-9a-z]+)' _NETRC_MACHINE = 'cdapl' _BASE_URL = 'https://www.cda.pl' _BASE_API_URL = 'https://api.cda.pl' _API_HEADERS = { 'Accept': 'application/vnd.cda.public+json', } # hardcoded in the app _LOGIN_REQUEST_AUTH = 'Basic YzU3YzBlZDUtYTIzOC00MWQwLWI2NjQtNmZmMWMxY2Y2YzVlOklBTm95QlhRRVR6U09MV1hnV3MwMW0xT2VyNWJNZzV4clRNTXhpNGZJUGVGZ0lWUlo5UGVYTDhtUGZaR1U1U3Q' _BEARER_CACHE = 'cda-bearer' _TESTS = [{ 'url': 'http://www.cda.pl/video/5749950c', 'md5': '6f844bf51b15f31fae165365707ae970', 'info_dict': { 'id': '5749950c', 'ext': 'mp4', 'height': 720, 'title': 'Oto dlaczego przed zakrętem należy zwolnić.', 'description': 'md5:269ccd135d550da90d1662651fcb9772', 'thumbnail': r're:^https?://.*\.jpg$', 'average_rating': float, 'duration': 39, 'age_limit': 0, 'upload_date': '20160221', 'timestamp': 1456078244, }, }, { 'url': 'http://www.cda.pl/video/57413289', 'md5': 'a88828770a8310fc00be6c95faf7f4d5', 'info_dict': { 'id': '57413289', 'ext': 'mp4', 'title': 'Lądowanie na lotnisku na Maderze', 'description': 'md5:60d76b71186dcce4e0ba6d4bbdb13e1a', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'crash404', 'average_rating': float, 'duration': 137, 'age_limit': 0, 'upload_date': '20160220', 'timestamp': 1455968218, }, }, { # Age-restricted with vfilm redirection 'url': 'https://www.cda.pl/video/8753244c4', 'md5': 'd8eeb83d63611289507010d3df3bb8b3', 'info_dict': { 'id': '8753244c4', 'ext': 'mp4', 'title': '[18+] Bez Filtra: Rezerwowe Psy czyli... najwulgarniejsza polska gra?', 'description': 'md5:ae80bac31bd6a9f077a6cce03c7c077e', 'height': 1080, 'uploader': 'arhn eu', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 991, 'age_limit': 18, 'average_rating': float, 'timestamp': 1633888264, 'upload_date': '20211010', }, }, { # Age-restricted without vfilm redirection 'url': 'https://www.cda.pl/video/17028157b8', 'md5': 'c1fe5ff4582bace95d4f0ce0fbd0f992', 'info_dict': { 'id': '17028157b8', 'ext': 'mp4', 'title': 'STENDUPY MICHAŁ OGIŃSKI', 'description': 'md5:5851f3272bfc31f762d616040a1d609a', 'height': 480, 'uploader': 'oginski', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 18855, 'age_limit': 18, 'average_rating': float, 'timestamp': 1699705901, 'upload_date': '20231111', }, }, { 'url': 'http://ebd.cda.pl/0x0/5749950c', 'only_matching': True, }, { 'url': 'https://m.cda.pl/video/617297677', 'only_matching': True, }] def _download_age_confirm_page(self, url, video_id, *args, **kwargs): data, content_type = multipart_encode({'age_confirm': ''}) return self._download_webpage( url, video_id, *args, data=data, headers={ 'Referer': url, 'Content-Type': content_type, }, **kwargs) def _perform_login(self, username, password): app_version = '1.2.255 build 21541' android_version = random.randrange(8, 14) phone_model = random.choice(( # x-kom.pl top selling Android smartphones, as of 2022-12-26 # https://www.x-kom.pl/g-4/c/1590-smartfony-i-telefony.html?f201-system-operacyjny=61322-android 'ASUS ZenFone 8', 'Motorola edge 20 5G', 'Motorola edge 30 neo 5G', 'Motorola moto g22', 'OnePlus Nord 2T 5G', 'Samsung Galaxy A32 SM‑A325F', 'Samsung Galaxy M13', 'Samsung Galaxy S20 FE 5G', 'Xiaomi 11T', 'Xiaomi POCO M4 Pro', 'Xiaomi Redmi 10', 'Xiaomi Redmi 10C', 'Xiaomi Redmi 9C NFC', 'Xiaomi Redmi Note 10 Pro', 'Xiaomi Redmi Note 11 Pro', 'Xiaomi Redmi Note 11', 'Xiaomi Redmi Note 11S 5G', 'Xiaomi Redmi Note 11S', 'realme 10', 'realme 9 Pro+', 'vivo Y33s', )) self._API_HEADERS['User-Agent'] = f'pl.cda 1.0 (version {app_version}; Android {android_version}; {phone_model})' cached_bearer = self.cache.load(self._BEARER_CACHE, username) or {} if cached_bearer.get('valid_until', 0) > dt.datetime.now().timestamp() + 5: self._API_HEADERS['Authorization'] = f'Bearer {cached_bearer["token"]}' return password_hash = base64.urlsafe_b64encode(hmac.new( b's01m1Oer5IANoyBXQETzSOLWXgWs01m1Oer5bMg5xrTMMxRZ9Pi4fIPeFgIVRZ9PeXL8mPfXQETZGUAN5StRZ9P', ''.join(f'{bytes((bt & 255, )).hex():0>2}' for bt in hashlib.md5(password.encode()).digest()).encode(), hashlib.sha256).digest()).decode().replace('=', '') token_res = self._download_json( f'{self._BASE_API_URL}/oauth/token', None, 'Logging in', data=b'', headers={**self._API_HEADERS, 'Authorization': self._LOGIN_REQUEST_AUTH}, query={ 'grant_type': 'password', 'login': username, 'password': password_hash, }) self.cache.store(self._BEARER_CACHE, username, { 'token': token_res['access_token'], 'valid_until': token_res['expires_in'] + dt.datetime.now().timestamp(), }) self._API_HEADERS['Authorization'] = f'Bearer {token_res["access_token"]}' def _real_extract(self, url): video_id = self._match_id(url) if 'Authorization' in self._API_HEADERS: return self._api_extract(video_id) else: return self._web_extract(video_id) def _api_extract(self, video_id): meta = self._download_json( f'{self._BASE_API_URL}/video/{video_id}', video_id, headers=self._API_HEADERS)['video'] uploader = traverse_obj(meta, ('author', 'login', {str})) formats = [{ 'url': quality['file'], 'format': quality.get('title'), 'resolution': quality.get('name'), 'height': try_call(lambda: int(quality['name'][:-1])), 'filesize': quality.get('length'), } for quality in meta['qualities'] if quality.get('file')] if meta.get('premium') and not meta.get('premium_free') and not formats: raise ExtractorError( 'Video requires CDA Premium - subscription needed', expected=True) return { 'id': video_id, 'title': meta.get('title'), 'description': meta.get('description'), 'uploader': None if uploader == 'anonim' else uploader, 'average_rating': float_or_none(meta.get('rating')), 'thumbnail': meta.get('thumb'), 'formats': formats, 'duration': meta.get('duration'), 'age_limit': 18 if meta.get('for_adults') else 0, 'view_count': meta.get('views'), } def _web_extract(self, video_id): self._set_cookie('cda.pl', 'cda.player', 'html5') webpage, urlh = self._download_webpage_handle( f'{self._BASE_URL}/video/{video_id}/vfilm', video_id) if 'Ten film jest dostępny dla użytkowników premium' in webpage: self.raise_login_required('This video is only available for premium users') if re.search(r'niedostępn[ey] w(?:&nbsp;|\s+)Twoim kraju\s*<', webpage): self.raise_geo_restricted() need_confirm_age = False if self._html_search_regex(r'(<button[^>]+name="[^"]*age_confirm[^"]*")', webpage, 'birthday validate form', default=None): webpage = self._download_age_confirm_page( urlh.url, video_id, note='Confirming age') need_confirm_age = True formats = [] uploader = self._search_regex(r'''(?x) <(span|meta)[^>]+itemprop=(["\'])author\2[^>]*> (?:<\1[^>]*>[^<]*</\1>|(?!</\1>)(?:.|\n))*? <(span|meta)[^>]+itemprop=(["\'])name\4[^>]*>(?P<uploader>[^<]+)</\3> ''', webpage, 'uploader', default=None, group='uploader') average_rating = self._search_regex( (r'<(?:span|meta)[^>]+itemprop=(["\'])ratingValue\1[^>]*>(?P<rating_value>[0-9.]+)', r'<span[^>]+\bclass=["\']rating["\'][^>]*>(?P<rating_value>[0-9.]+)'), webpage, 'rating', fatal=False, group='rating_value') info_dict = { 'id': video_id, 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), 'uploader': uploader, 'average_rating': float_or_none(average_rating), 'thumbnail': self._og_search_thumbnail(webpage), 'formats': formats, 'duration': None, 'age_limit': 18 if need_confirm_age else 0, } info = self._search_json_ld(webpage, video_id, default={}) # Source: https://www.cda.pl/js/player.js?t=1606154898 def decrypt_file(a): for p in ('_XDDD', '_CDA', '_ADC', '_CXD', '_QWE', '_Q5', '_IKSDE'): a = a.replace(p, '') a = urllib.parse.unquote(a) b = [] for c in a: f = compat_ord(c) b.append(chr(33 + (f + 14) % 94) if 33 <= f <= 126 else chr(f)) a = ''.join(b) a = a.replace('.cda.mp4', '') for p in ('.2cda.pl', '.3cda.pl'): a = a.replace(p, '.cda.pl') if '/upstream' in a: a = a.replace('/upstream', '.mp4/upstream') return 'https://' + a return 'https://' + a + '.mp4' def extract_format(page, version): json_str = self._html_search_regex( r'player_data=(\\?["\'])(?P<player_data>.+?)\1', page, f'{version} player_json', fatal=False, group='player_data') if not json_str: return player_data = self._parse_json( json_str, f'{version} player_data', fatal=False) if not player_data: return video = player_data.get('video') if not video or 'file' not in video: self.report_warning(f'Unable to extract {version} version information') return video_quality = video.get('quality') qualities = video.get('qualities', {}) video_quality = next((k for k, v in qualities.items() if v == video_quality), video_quality) if video.get('file'): if video['file'].startswith('uggc'): video['file'] = codecs.decode(video['file'], 'rot_13') if video['file'].endswith('adc.mp4'): video['file'] = video['file'].replace('adc.mp4', '.mp4') elif not video['file'].startswith('http'): video['file'] = decrypt_file(video['file']) info_dict['formats'].append({ 'url': video['file'], 'format_id': video_quality, 'height': int_or_none(video_quality[:-1]), }) for quality, cda_quality in qualities.items(): if quality == video_quality: continue data = {'jsonrpc': '2.0', 'method': 'videoGetLink', 'id': 2, 'params': [video_id, cda_quality, video.get('ts'), video.get('hash2'), {}]} data = json.dumps(data).encode() response = self._download_json( f'https://www.cda.pl/video/{video_id}', video_id, headers={ 'Content-Type': 'application/json', 'X-Requested-With': 'XMLHttpRequest', }, data=data, note=f'Fetching {quality} url', errnote=f'Failed to fetch {quality} url', fatal=False) if ( traverse_obj(response, ('result', 'status')) != 'ok' or not traverse_obj(response, ('result', 'resp', {url_or_none})) ): continue video_url = response['result']['resp'] ext = determine_ext(video_url) if ext == 'mpd': info_dict['formats'].extend(self._extract_mpd_formats( video_url, video_id, mpd_id='dash', fatal=False)) elif ext == 'm3u8': info_dict['formats'].extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) else: info_dict['formats'].append({ 'url': video_url, 'format_id': quality, 'height': int_or_none(quality[:-1]), }) if not info_dict['duration']: info_dict['duration'] = parse_duration(video.get('duration')) extract_format(webpage, 'default') for href, resolution in re.findall( r'<a[^>]+data-quality="[^"]+"[^>]+href="([^"]+)"[^>]+class="quality-btn"[^>]*>([0-9]+p)', webpage): if need_confirm_age: handler = self._download_age_confirm_page else: handler = self._download_webpage webpage = handler( urljoin(self._BASE_URL, href), video_id, f'Downloading {resolution} version information', fatal=False) if not webpage: # Manually report warning because empty page is returned when # invalid version is requested. self.report_warning(f'Unable to download {resolution} version information') continue extract_format(webpage, resolution) return merge_dicts(info_dict, info) class CDAFolderIE(InfoExtractor): _MAX_PAGE_SIZE = 36 _VALID_URL = r'https?://(?:(?:www|m)\.)?cda\.pl/(?P<channel>[\w-]+)/folder/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.cda.pl/domino264/folder/31188385', 'info_dict': { 'id': '31188385', 'title': 'SERIA DRUGA', }, 'playlist_mincount': 13, }, { 'url': 'https://www.cda.pl/smiechawaTV/folder/2664592/vfilm', 'info_dict': { 'id': '2664592', 'title': 'VideoDowcipy - wszystkie odcinki', }, 'playlist_mincount': 71, }, { 'url': 'https://www.cda.pl/DeliciousBeauty/folder/19129979/vfilm', 'info_dict': { 'id': '19129979', 'title': 'TESTY KOSMETYKÓW', }, 'playlist_mincount': 139, }, { 'url': 'https://www.cda.pl/FILMY-SERIALE-ANIME-KRESKOWKI-BAJKI/folder/18493422', 'only_matching': True, }, { 'url': 'https://m.cda.pl/smiechawaTV/folder/2664592/vfilm', 'only_matching': True, }] def _real_extract(self, url): folder_id, channel = self._match_valid_url(url).group('id', 'channel') webpage = self._download_webpage(url, folder_id) def extract_page_entries(page): webpage = self._download_webpage( f'https://www.cda.pl/{channel}/folder/{folder_id}/vfilm/{page + 1}', folder_id, f'Downloading page {page + 1}', expected_status=404) items = re.findall(r'<a[^>]+href="/video/([0-9a-z]+)"', webpage) for video_id in items: yield self.url_result(f'https://www.cda.pl/video/{video_id}', CDAIE, video_id) return self.playlist_result( OnDemandPagedList(extract_page_entries, self._MAX_PAGE_SIZE), folder_id, self._og_search_title(webpage))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false