repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ufctv.py
yt_dlp/extractor/ufctv.py
from .imggaming import ImgGamingBaseIE class UFCTVIE(ImgGamingBaseIE): _VALID_URL = ImgGamingBaseIE._VALID_URL_TEMPL % r'(?:(?:app|www)\.)?(?:ufc\.tv|(?:ufc)?fightpass\.com)|ufcfightpass\.img(?:dge|gaming)\.com' _NETRC_MACHINE = 'ufctv' _REALM = 'ufc' class UFCArabiaIE(ImgGamingBaseIE): _VALID_URL = ImgGamingBaseIE._VALID_URL_TEMPL % r'(?:(?:app|www)\.)?ufcarabia\.(?:ae|com)' _NETRC_MACHINE = 'ufcarabia' _REALM = 'admufc'
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/skylinewebcams.py
yt_dlp/extractor/skylinewebcams.py
from .common import InfoExtractor class SkylineWebcamsIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?skylinewebcams\.com/[^/]+/webcam/(?:[^/]+/)+(?P<id>[^/]+)\.html' _TEST = { 'url': 'https://www.skylinewebcams.com/it/webcam/italia/lazio/roma/scalinata-piazza-di-spagna-barcaccia.html', 'info_dict': { 'id': 'scalinata-piazza-di-spagna-barcaccia', 'ext': 'mp4', 'title': 're:^Live Webcam Scalinata di Piazza di Spagna - La Barcaccia [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'Roma, veduta sulla Scalinata di Piazza di Spagna e sulla Barcaccia', 'is_live': True, }, 'params': { 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) stream_url = self._search_regex( r'(?:url|source)\s*:\s*(["\'])(?P<url>(?:https?:)?//.+?\.m3u8.*?)\1', webpage, 'stream url', group='url') title = self._og_search_title(webpage) description = self._og_search_description(webpage) return { 'id': video_id, 'url': stream_url, 'ext': 'mp4', 'title': title, 'description': description, 'is_live': True, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hrti.py
yt_dlp/extractor/hrti.py
import json from .common import InfoExtractor from ..networking import Request from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, clean_html, int_or_none, parse_age_limit, try_get, ) class HRTiBaseIE(InfoExtractor): """ Base Information Extractor for Croatian Radiotelevision video on demand site https://hrti.hrt.hr Reverse engineered from the JavaScript app in app.min.js """ _NETRC_MACHINE = 'hrti' _APP_LANGUAGE = 'hr' _APP_VERSION = '1.1' _APP_PUBLICATION_ID = 'all_in_one' _API_URL = 'http://clientapi.hrt.hr/client_api.php/config/identify/format/json' _token = None def _initialize_pre_login(self): init_data = { 'application_publication_id': self._APP_PUBLICATION_ID, } uuid = self._download_json( self._API_URL, None, note='Downloading uuid', errnote='Unable to download uuid', data=json.dumps(init_data).encode())['uuid'] app_data = { 'uuid': uuid, 'application_publication_id': self._APP_PUBLICATION_ID, 'application_version': self._APP_VERSION, } req = Request(self._API_URL, data=json.dumps(app_data).encode()) req.get_method = lambda: 'PUT' resources = self._download_json( req, None, note='Downloading session information', errnote='Unable to download session information') self._session_id = resources['session_id'] modules = resources['modules'] self._search_url = modules['vod_catalog']['resources']['search']['uri'].format( language=self._APP_LANGUAGE, application_id=self._APP_PUBLICATION_ID) self._login_url = (modules['user']['resources']['login']['uri'] + '/format/json').format(session_id=self._session_id) self._logout_url = modules['user']['resources']['logout']['uri'] def _perform_login(self, username, password): auth_data = { 'username': username, 'password': password, } try: auth_info = self._download_json( self._login_url, None, note='Logging in', errnote='Unable to log in', data=json.dumps(auth_data).encode()) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 406: auth_info = self._parse_json(e.cause.response.read().encode(), None) else: raise error_message = auth_info.get('error', {}).get('message') if error_message: raise ExtractorError( f'{self.IE_NAME} said: {error_message}', expected=True) self._token = auth_info['secure_streaming_token'] def _real_initialize(self): if not self._token: # TODO: figure out authentication with cookies self.raise_login_required(method='password') class HRTiIE(HRTiBaseIE): _VALID_URL = r'''(?x) (?: hrti:(?P<short_id>[0-9]+)| https?:// hrti\.hrt\.hr/(?:\#/)?video/show/(?P<id>[0-9]+)/(?P<display_id>[^/]+)? ) ''' _TESTS = [{ 'url': 'https://hrti.hrt.hr/#/video/show/2181385/republika-dokumentarna-serija-16-hd', 'info_dict': { 'id': '2181385', 'display_id': 'republika-dokumentarna-serija-16-hd', 'ext': 'mp4', 'title': 'REPUBLIKA, dokumentarna serija (1/6) (HD)', 'description': 'md5:48af85f620e8e0e1df4096270568544f', 'duration': 2922, 'view_count': int, 'average_rating': int, 'episode_number': int, 'season_number': int, 'age_limit': 12, }, 'skip': 'Requires account credentials', }, { 'url': 'https://hrti.hrt.hr/#/video/show/2181385/', 'only_matching': True, }, { 'url': 'hrti:2181385', 'only_matching': True, }, { 'url': 'https://hrti.hrt.hr/video/show/3873068/cuvar-dvorca-dramska-serija-14', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('short_id') or mobj.group('id') display_id = mobj.group('display_id') or video_id video = self._download_json( f'{self._search_url}/video_id/{video_id}/format/json', display_id, 'Downloading video metadata JSON')['video'][0] title_info = video['title'] title = title_info['title_long'] movie = video['video_assets']['movie'][0] m3u8_url = movie['url'].format(TOKEN=self._token) formats = self._extract_m3u8_formats( m3u8_url, display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') description = clean_html(title_info.get('summary_long')) age_limit = parse_age_limit(video.get('parental_control', {}).get('rating')) view_count = int_or_none(video.get('views')) average_rating = int_or_none(video.get('user_rating')) duration = int_or_none(movie.get('duration')) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'duration': duration, 'view_count': view_count, 'average_rating': average_rating, 'age_limit': age_limit, 'formats': formats, } class HRTiPlaylistIE(HRTiBaseIE): _VALID_URL = r'https?://hrti\.hrt\.hr/(?:#/)?video/list/category/(?P<id>[0-9]+)/(?P<display_id>[^/]+)?' _TESTS = [{ 'url': 'https://hrti.hrt.hr/#/video/list/category/212/ekumena', 'info_dict': { 'id': '212', 'title': 'ekumena', }, 'playlist_mincount': 8, 'skip': 'Requires account credentials', }, { 'url': 'https://hrti.hrt.hr/#/video/list/category/212/', 'only_matching': True, }, { 'url': 'https://hrti.hrt.hr/video/list/category/212/ekumena', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) category_id = mobj.group('id') display_id = mobj.group('display_id') or category_id response = self._download_json( f'{self._search_url}/category_id/{category_id}/format/json', display_id, 'Downloading video metadata JSON') video_ids = try_get( response, lambda x: x['video_listings'][0]['alternatives'][0]['list'], list) or [video['id'] for video in response.get('videos', []) if video.get('id')] entries = [self.url_result(f'hrti:{video_id}') for video_id in video_ids] return self.playlist_result(entries, category_id, display_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cctv.py
yt_dlp/extractor/cctv.py
import re from .common import InfoExtractor from ..utils import ( float_or_none, try_get, unified_timestamp, ) class CCTVIE(InfoExtractor): IE_DESC = '央视网' _VALID_URL = r'https?://(?:(?:[^/]+)\.(?:cntv|cctv)\.(?:com|cn)|(?:www\.)?ncpa-classic\.com)/(?:[^/]+/)*?(?P<id>[^/?#&]+?)(?:/index)?(?:\.s?html|[?#&]|$)' _TESTS = [{ # fo.addVariable("videoCenterId","id") 'url': 'http://sports.cntv.cn/2016/02/12/ARTIaBRxv4rTT1yWf1frW2wi160212.shtml', 'md5': 'd61ec00a493e09da810bf406a078f691', 'info_dict': { 'id': '5ecdbeab623f4973b40ff25f18b174e8', 'ext': 'mp4', 'title': '[NBA]二少联手砍下46分 雷霆主场击败鹈鹕(快讯)', 'description': 'md5:7e14a5328dc5eb3d1cd6afbbe0574e95', 'duration': 98, 'uploader': 'songjunjie', 'timestamp': 1455279956, 'upload_date': '20160212', }, }, { # var guid = "id" 'url': 'http://tv.cctv.com/2016/02/05/VIDEUS7apq3lKrHG9Dncm03B160205.shtml', 'info_dict': { 'id': 'efc5d49e5b3b4ab2b34f3a502b73d3ae', 'ext': 'mp4', 'title': '[赛车]“车王”舒马赫恢复情况成谜(快讯)', 'description': '2月4日,蒙特泽莫罗透露了关于“车王”舒马赫恢复情况,但情况是否属实遭到了质疑。', 'duration': 37, 'uploader': 'shujun', 'timestamp': 1454677291, 'upload_date': '20160205', }, 'params': { 'skip_download': True, }, }, { # changePlayer('id') 'url': 'http://english.cntv.cn/special/four_comprehensives/index.shtml', 'info_dict': { 'id': '4bb9bb4db7a6471ba85fdeda5af0381e', 'ext': 'mp4', 'title': 'NHnews008 ANNUAL POLITICAL SEASON', 'description': 'Four Comprehensives', 'duration': 60, 'uploader': 'zhangyunlei', 'timestamp': 1425385521, 'upload_date': '20150303', }, 'params': { 'skip_download': True, }, }, { # loadvideo('id') 'url': 'http://cctv.cntv.cn/lm/tvseries_russian/yilugesanghua/index.shtml', 'info_dict': { 'id': 'b15f009ff45c43968b9af583fc2e04b2', 'ext': 'mp4', 'title': 'Путь,усыпанный космеями Серия 1', 'description': 'Путь, усыпанный космеями', 'duration': 2645, 'uploader': 'renxue', 'timestamp': 1477479241, 'upload_date': '20161026', }, 'params': { 'skip_download': True, }, }, { # var initMyAray = 'id' 'url': 'http://www.ncpa-classic.com/2013/05/22/VIDE1369219508996867.shtml', 'info_dict': { 'id': 'a194cfa7f18c426b823d876668325946', 'ext': 'mp4', 'title': '小泽征尔音乐塾 音乐梦想无国界', 'duration': 2173, 'timestamp': 1369248264, 'upload_date': '20130522', }, 'params': { 'skip_download': True, }, }, { # videoCenterId: "id" 'url': 'http://news.cctv.com/2024/02/21/ARTIcU5tKIOIF2myEGCATkLo240221.shtml', 'info_dict': { 'id': '5c846c0518444308ba32c4159df3b3e0', 'ext': 'mp4', 'title': '《平“语”近人——习近平喜欢的典故》第三季 第5集:风物长宜放眼量', 'uploader': 'yangjuan', 'timestamp': 1708554940, 'upload_date': '20240221', }, 'params': { 'skip_download': True, }, }, { # var ids = ["id"] 'url': 'http://www.ncpa-classic.com/clt/more/416/index.shtml', 'info_dict': { 'id': 'a8606119a4884588a79d81c02abecc16', 'ext': 'mp3', 'title': '来自维也纳的新年贺礼', 'description': 'md5:f13764ae8dd484e84dd4b39d5bcba2a7', 'duration': 1578, 'uploader': 'djy', 'timestamp': 1482942419, 'upload_date': '20161228', }, 'params': { 'skip_download': True, }, 'expected_warnings': ['Failed to download m3u8 information'], }, { 'url': 'http://ent.cntv.cn/2016/01/18/ARTIjprSSJH8DryTVr5Bx8Wb160118.shtml', 'only_matching': True, }, { 'url': 'http://tv.cntv.cn/video/C39296/e0210d949f113ddfb38d31f00a4e5c44', 'only_matching': True, }, { 'url': 'http://english.cntv.cn/2016/09/03/VIDEhnkB5y9AgHyIEVphCEz1160903.shtml', 'only_matching': True, }, { 'url': 'http://tv.cctv.com/2016/09/07/VIDE5C1FnlX5bUywlrjhxXOV160907.shtml', 'only_matching': True, }, { 'url': 'http://tv.cntv.cn/video/C39296/95cfac44cabd3ddc4a9438780a4e5c44', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_id = self._search_regex( [r'var\s+guid\s*=\s*["\']([\da-fA-F]+)', r'videoCenterId(?:["\']\s*,|:)\s*["\']([\da-fA-F]+)', r'changePlayer\s*\(\s*["\']([\da-fA-F]+)', r'load[Vv]ideo\s*\(\s*["\']([\da-fA-F]+)', r'var\s+initMyAray\s*=\s*["\']([\da-fA-F]+)', r'var\s+ids\s*=\s*\[["\']([\da-fA-F]+)'], webpage, 'video id') data = self._download_json( 'http://vdn.apps.cntv.cn/api/getHttpVideoInfo.do', video_id, query={ 'pid': video_id, 'url': url, 'idl': 32, 'idlr': 32, 'modifyed': 'false', }) title = data['title'] formats = [] video = data.get('video') if isinstance(video, dict): for quality, chapters_key in enumerate(('lowChapters', 'chapters')): video_url = try_get( video, lambda x: x[chapters_key][0]['url'], str) if video_url: formats.append({ 'url': video_url, 'format_id': 'http', 'quality': quality, # Sample clip 'preference': -10, }) hls_url = try_get(data, lambda x: x['hls_url'], str) if hls_url: hls_url = re.sub(r'maxbr=\d+&?', '', hls_url) formats.extend(self._extract_m3u8_formats( hls_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) uploader = data.get('editer_name') description = self._html_search_meta( 'description', webpage, default=None) timestamp = unified_timestamp(data.get('f_pgmtime')) duration = float_or_none(try_get(video, lambda x: x['totalLength'])) return { 'id': video_id, 'title': title, 'description': description, 'uploader': uploader, 'timestamp': timestamp, 'duration': duration, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/laxarxames.py
yt_dlp/extractor/laxarxames.py
import json from .brightcove import BrightcoveNewIE from .common import InfoExtractor from ..utils import ExtractorError from ..utils.traversal import traverse_obj class LaXarxaMesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?laxarxames\.cat/(?:[^/?#]+/)*?(player|movie-details)/(?P<id>\d+)' _NETRC_MACHINE = 'laxarxames' _TOKEN = None _TESTS = [{ 'url': 'https://www.laxarxames.cat/player/3459421', 'md5': '0966f46c34275934c19af78f3df6e2bc', 'info_dict': { 'id': '6339612436112', 'ext': 'mp4', 'title': 'Resum | UA Horta — UD Viladecans', 'timestamp': 1697905186, 'thumbnail': r're:https?://.*\.jpg', 'description': '', 'upload_date': '20231021', 'duration': 129.44, 'tags': ['ott', 'esports', '23-24', ' futbol', ' futbol-partits', 'elit', 'resum'], 'uploader_id': '5779379807001', }, 'skip': 'Requires login', }] def _perform_login(self, username, password): if self._TOKEN: return login = self._download_json( 'https://api.laxarxames.cat/Authorization/SignIn', None, note='Logging in', headers={ 'X-Tenantorigin': 'https://laxarxames.cat', 'Content-Type': 'application/json', }, data=json.dumps({ 'Username': username, 'Password': password, 'Device': { 'PlatformCode': 'WEB', 'Name': 'Mac OS ()', }, }).encode(), expected_status=401) self._TOKEN = traverse_obj(login, ('AuthorizationToken', 'Token', {str})) if not self._TOKEN: raise ExtractorError('Login failed', expected=True) def _real_extract(self, url): video_id = self._match_id(url) if not self._TOKEN: self.raise_login_required() media_play_info = self._download_json( 'https://api.laxarxames.cat/Media/GetMediaPlayInfo', video_id, data=json.dumps({ 'MediaId': int(video_id), 'StreamType': 'MAIN', }).encode(), headers={ 'Authorization': f'Bearer {self._TOKEN}', 'X-Tenantorigin': 'https://laxarxames.cat', 'Content-Type': 'application/json', }) if not traverse_obj(media_play_info, ('ContentUrl', {str})): self.raise_no_formats('No video found', expected=True) return self.url_result( f'https://players.brightcove.net/5779379807001/default_default/index.html?videoId={media_play_info["ContentUrl"]}', BrightcoveNewIE, video_id, media_play_info.get('Title'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/slideslive.py
yt_dlp/extractor/slideslive.py
import re import urllib.parse import xml.etree.ElementTree from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, parse_qs, smuggle_url, traverse_obj, unified_timestamp, update_url_query, url_or_none, xpath_text, ) class SlidesLiveIE(InfoExtractor): _VALID_URL = r'https?://slideslive\.com/(?:embed/(?:presentation/)?)?(?P<id>[0-9]+)' _TESTS = [{ # service_name = yoda, only XML slides info 'url': 'https://slideslive.com/38902413/gcc-ia16-backend', 'info_dict': { 'id': '38902413', 'ext': 'mp4', 'title': 'GCC IA16 backend', 'timestamp': 1697793372, 'upload_date': '20231020', 'thumbnail': r're:^https?://.*\.jpg', 'thumbnails': 'count:42', 'chapters': 'count:41', 'duration': 1638, }, 'params': { 'skip_download': 'm3u8', }, }, { # service_name = yoda, /v7/ slides 'url': 'https://slideslive.com/38935785', 'info_dict': { 'id': '38935785', 'ext': 'mp4', 'title': 'Offline Reinforcement Learning: From Algorithms to Practical Challenges', 'upload_date': '20231020', 'timestamp': 1697807002, 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'thumbnails': 'count:640', 'chapters': 'count:639', 'duration': 9832, }, 'params': { 'skip_download': 'm3u8', }, }, { # service_name = yoda, /v1/ slides 'url': 'https://slideslive.com/38973182/how-should-a-machine-learning-researcher-think-about-ai-ethics', 'info_dict': { 'id': '38973182', 'ext': 'mp4', 'title': 'How Should a Machine Learning Researcher Think About AI Ethics?', 'upload_date': '20231020', 'thumbnail': r're:^https?://.*\.jpg', 'timestamp': 1697822521, 'thumbnails': 'count:3', 'chapters': 'count:2', 'duration': 5889, }, 'params': { 'skip_download': 'm3u8', }, }, { # formerly youtube, converted to native 'url': 'https://slideslive.com/38897546/special-metaprednaska-petra-ludwiga-hodnoty-pro-lepsi-spolecnost', 'md5': '8a79b5e3d700837f40bd2afca3c8fa01', 'info_dict': { 'id': '38897546', 'ext': 'mp4', 'title': 'SPECIÁL: Meta-přednáška Petra Ludwiga - Hodnoty pro lepší společnost', 'thumbnail': r're:^https?://.*\.jpg', 'upload_date': '20231029', 'timestamp': 1698588144, 'thumbnails': 'count:169', 'chapters': 'count:168', 'duration': 6827, }, 'params': { 'skip_download': 'm3u8', }, }, { # embed-only presentation, only XML slides info 'url': 'https://slideslive.com/embed/presentation/38925850', 'info_dict': { 'id': '38925850', 'ext': 'mp4', 'title': 'Towards a Deep Network Architecture for Structured Smoothness', 'thumbnail': r're:^https?://.*\.jpg', 'thumbnails': 'count:8', 'timestamp': 1697803109, 'upload_date': '20231020', 'chapters': 'count:7', 'duration': 326, }, 'params': { 'skip_download': 'm3u8', }, }, { # embed-only presentation, only JSON slides info, /v5/ slides (.png) 'url': 'https://slideslive.com/38979920/', 'info_dict': { 'id': '38979920', 'ext': 'mp4', 'title': 'MoReL: Multi-omics Relational Learning', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'thumbnails': 'count:7', 'timestamp': 1697824939, 'upload_date': '20231020', 'chapters': 'count:6', 'duration': 171, }, 'params': { 'skip_download': 'm3u8', }, }, { # /v2/ slides (.jpg) 'url': 'https://slideslive.com/38954074', 'info_dict': { 'id': '38954074', 'ext': 'mp4', 'title': 'Decentralized Attribution of Generative Models', 'thumbnail': r're:^https?://.*\.jpg', 'thumbnails': 'count:16', 'timestamp': 1697814901, 'upload_date': '20231020', 'chapters': 'count:15', 'duration': 306, }, 'params': { 'skip_download': 'm3u8', }, }, { # /v4/ slides (.png) 'url': 'https://slideslive.com/38979570/', 'info_dict': { 'id': '38979570', 'ext': 'mp4', 'title': 'Efficient Active Search for Combinatorial Optimization Problems', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'thumbnails': 'count:9', 'timestamp': 1697824757, 'upload_date': '20231020', 'chapters': 'count:8', 'duration': 295, }, 'params': { 'skip_download': 'm3u8', }, }, { # /v10/ slides 'url': 'https://slideslive.com/embed/presentation/38979880?embed_parent_url=https%3A%2F%2Fedit.videoken.com%2F', 'info_dict': { 'id': '38979880', 'ext': 'mp4', 'title': 'The Representation Power of Neural Networks', 'timestamp': 1697824919, 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'thumbnails': 'count:22', 'upload_date': '20231020', 'chapters': 'count:21', 'duration': 294, }, 'params': { 'skip_download': 'm3u8', }, }, { # /v7/ slides, 2 video slides 'url': 'https://slideslive.com/embed/presentation/38979682?embed_container_origin=https%3A%2F%2Fedit.videoken.com', 'playlist_count': 3, 'info_dict': { 'id': '38979682-playlist', 'title': 'LoRA: Low-Rank Adaptation of Large Language Models', }, 'playlist': [{ 'info_dict': { 'id': '38979682', 'ext': 'mp4', 'title': 'LoRA: Low-Rank Adaptation of Large Language Models', 'timestamp': 1697824815, 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'thumbnails': 'count:30', 'upload_date': '20231020', 'chapters': 'count:31', 'duration': 272, }, }, { 'info_dict': { 'id': '38979682-021', 'ext': 'mp4', 'title': 'LoRA: Low-Rank Adaptation of Large Language Models - Slide 021', 'duration': 3, 'timestamp': 1697824815, 'upload_date': '20231020', }, }, { 'info_dict': { 'id': '38979682-024', 'ext': 'mp4', 'title': 'LoRA: Low-Rank Adaptation of Large Language Models - Slide 024', 'duration': 4, 'timestamp': 1697824815, 'upload_date': '20231020', }, }], 'params': { 'skip_download': 'm3u8', }, }, { # /v6/ slides, 1 video slide, edit.videoken.com embed 'url': 'https://slideslive.com/38979481/', 'playlist_count': 2, 'info_dict': { 'id': '38979481-playlist', 'title': 'How to Train Your MAML to Excel in Few-Shot Classification', }, 'playlist': [{ 'info_dict': { 'id': '38979481', 'ext': 'mp4', 'title': 'How to Train Your MAML to Excel in Few-Shot Classification', 'timestamp': 1697824716, 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'thumbnails': 'count:43', 'upload_date': '20231020', 'chapters': 'count:43', 'duration': 315, }, }, { 'info_dict': { 'id': '38979481-013', 'ext': 'mp4', 'title': 'How to Train Your MAML to Excel in Few-Shot Classification - Slide 013', 'duration': 3, 'timestamp': 1697824716, 'upload_date': '20231020', }, }], 'params': { 'skip_download': 'm3u8', }, }, { # /v3/ slides, .jpg and .png, formerly service_name = youtube, now native 'url': 'https://slideslive.com/embed/38932460/', 'info_dict': { 'id': '38932460', 'ext': 'mp4', 'title': 'Active Learning for Hierarchical Multi-Label Classification', 'duration': 941, 'thumbnail': r're:https?://.+/.+\.(?:jpg|png)', 'chapters': 'count:20', 'timestamp': 1708338974, 'upload_date': '20240219', }, 'params': { 'skip_download': 'm3u8', }, }, { # /v3/ slides, .png only, service_name = yoda 'url': 'https://slideslive.com/38983994', 'info_dict': { 'id': '38983994', 'ext': 'mp4', 'title': 'Zero-Shot AutoML with Pretrained Models', 'timestamp': 1697826708, 'upload_date': '20231020', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'thumbnails': 'count:23', 'chapters': 'count:22', 'duration': 295, }, 'params': { 'skip_download': 'm3u8', }, }, { # service_name = yoda 'url': 'https://slideslive.com/38903721/magic-a-scientific-resurrection-of-an-esoteric-legend', 'only_matching': True, }, { # dead link, service_name = url 'url': 'https://slideslive.com/38922070/learning-transferable-skills-1', 'only_matching': True, }, { # dead link, service_name = vimeo 'url': 'https://slideslive.com/38921896/retrospectives-a-venue-for-selfreflection-in-ml-research-3', 'only_matching': True, }] _WEBPAGE_TESTS = [{ # only XML slides info 'url': 'https://iclr.cc/virtual_2020/poster_Hklr204Fvr.html', 'info_dict': { 'id': '38925850', 'ext': 'mp4', 'title': 'Towards a Deep Network Architecture for Structured Smoothness', 'thumbnail': r're:^https?://.*\.jpg', 'thumbnails': 'count:8', 'timestamp': 1697803109, 'upload_date': '20231020', 'chapters': 'count:7', 'duration': 326, }, 'params': { 'skip_download': 'm3u8', }, }] @classmethod def _extract_embed_urls(cls, url, webpage): # Reference: https://slideslive.com/embed_presentation.js for embed_id in re.findall(r'(?s)new\s+SlidesLiveEmbed\s*\([^)]+\bpresentationId:\s*["\'](\d+)["\']', webpage): url_parsed = urllib.parse.urlparse(url) origin = f'{url_parsed.scheme}://{url_parsed.netloc}' yield update_url_query( f'https://slideslive.com/embed/presentation/{embed_id}', { 'embed_parent_url': url, 'embed_container_origin': origin, }) def _download_embed_webpage_handle(self, video_id, headers): return self._download_webpage_handle( f'https://slideslive.com/embed/presentation/{video_id}', video_id, headers=headers, query=traverse_obj(headers, { 'embed_parent_url': 'Referer', 'embed_container_origin': 'Origin', })) def _extract_custom_m3u8_info(self, m3u8_data): m3u8_dict = {} lookup = { 'PRESENTATION-TITLE': 'title', 'PRESENTATION-UPDATED-AT': 'timestamp', 'PRESENTATION-THUMBNAIL': 'thumbnail', 'PLAYLIST-TYPE': 'playlist_type', 'VOD-VIDEO-SERVICE-NAME': 'service_name', 'VOD-VIDEO-ID': 'service_id', 'VOD-VIDEO-SERVERS': 'video_servers', 'VOD-SUBTITLES': 'subtitles', 'VOD-SLIDES-JSON-URL': 'slides_json_url', 'VOD-SLIDES-XML-URL': 'slides_xml_url', } for line in m3u8_data.splitlines(): if not line.startswith('#EXT-SL-'): continue tag, _, value = line.partition(':') key = lookup.get(tag[8:]) if not key: continue m3u8_dict[key] = value # Some values are stringified JSON arrays for key in ('video_servers', 'subtitles'): if key in m3u8_dict: m3u8_dict[key] = self._parse_json(m3u8_dict[key], None, fatal=False) or [] return m3u8_dict def _extract_formats_and_duration(self, cdn_hostname, path, video_id, skip_duration=False): formats, duration = [], None hls_formats = self._extract_m3u8_formats( f'https://{cdn_hostname}/{path}/master.m3u8', video_id, 'mp4', m3u8_id='hls', fatal=False, live=True) if hls_formats: if not skip_duration: duration = self._extract_m3u8_vod_duration( hls_formats[0]['url'], video_id, note='Extracting duration from HLS manifest') formats.extend(hls_formats) dash_formats = self._extract_mpd_formats( f'https://{cdn_hostname}/{path}/master.mpd', video_id, mpd_id='dash', fatal=False) if dash_formats: if not duration and not skip_duration: duration = self._extract_mpd_vod_duration( f'https://{cdn_hostname}/{path}/master.mpd', video_id, note='Extracting duration from DASH manifest') formats.extend(dash_formats) return formats, duration def _real_extract(self, url): video_id = self._match_id(url) webpage, urlh = self._download_embed_webpage_handle( video_id, headers=traverse_obj(parse_qs(url), { 'Referer': ('embed_parent_url', -1), 'Origin': ('embed_container_origin', -1)})) redirect_url = urlh.url if 'domain_not_allowed' in redirect_url: domain = traverse_obj(parse_qs(redirect_url), ('allowed_domains[]', ...), get_all=False) if not domain: raise ExtractorError( 'This is an embed-only presentation. Try passing --referer', expected=True) webpage, _ = self._download_embed_webpage_handle(video_id, headers={ 'Referer': f'https://{domain}/', 'Origin': f'https://{domain}', }) player_token = self._search_regex(r'data-player-token="([^"]+)"', webpage, 'player token') player_data = self._download_webpage( f'https://slideslive.com/player/{video_id}', video_id, note='Downloading player info', query={'player_token': player_token}) player_info = self._extract_custom_m3u8_info(player_data) service_name = player_info['service_name'].lower() assert service_name in ('url', 'yoda', 'vimeo', 'youtube') service_id = player_info['service_id'] slide_url_template = 'https://slides.slideslive.com/%s/slides/original/%s%s' slides, slides_info = {}, [] if player_info.get('slides_json_url'): slides = self._download_json( player_info['slides_json_url'], video_id, fatal=False, note='Downloading slides JSON', errnote=False) or {} slide_ext_default = '.png' slide_quality = traverse_obj(slides, ('slide_qualities', 0)) if slide_quality: slide_ext_default = '.jpg' slide_url_template = f'https://cdn.slideslive.com/data/presentations/%s/slides/{slide_quality}/%s%s' for slide_id, slide in enumerate(traverse_obj(slides, ('slides', ...), expected_type=dict), 1): slides_info.append(( slide_id, traverse_obj(slide, ('image', 'name')), traverse_obj(slide, ('image', 'extname'), default=slide_ext_default), int_or_none(slide.get('time'), scale=1000))) if not slides and player_info.get('slides_xml_url'): slides = self._download_xml( player_info['slides_xml_url'], video_id, fatal=False, note='Downloading slides XML', errnote='Failed to download slides info') if isinstance(slides, xml.etree.ElementTree.Element): slide_url_template = 'https://cdn.slideslive.com/data/presentations/%s/slides/big/%s%s' for slide_id, slide in enumerate(slides.findall('./slide')): slides_info.append(( slide_id, xpath_text(slide, './slideName', 'name'), '.jpg', int_or_none(xpath_text(slide, './timeSec', 'time')))) chapters, thumbnails = [], [] if url_or_none(player_info.get('thumbnail')): thumbnails.append({'id': 'cover', 'url': player_info['thumbnail']}) for slide_id, slide_path, slide_ext, start_time in slides_info: if slide_path: thumbnails.append({ 'id': f'{slide_id:03d}', 'url': slide_url_template % (video_id, slide_path, slide_ext), }) chapters.append({ 'title': f'Slide {slide_id:03d}', 'start_time': start_time, }) subtitles = {} for sub in traverse_obj(player_info, ('subtitles', ...), expected_type=dict): webvtt_url = url_or_none(sub.get('webvtt_url')) if not webvtt_url: continue subtitles.setdefault(sub.get('language') or 'en', []).append({ 'url': webvtt_url, 'ext': 'vtt', }) info = { 'id': video_id, 'title': player_info.get('title') or self._html_search_meta('title', webpage, default=''), 'timestamp': unified_timestamp(player_info.get('timestamp')), 'is_live': player_info.get('playlist_type') != 'vod', 'thumbnails': thumbnails, 'chapters': chapters, 'subtitles': subtitles, } if service_name == 'url': info['url'] = service_id elif service_name == 'yoda': formats, duration = self._extract_formats_and_duration( player_info['video_servers'][0], service_id, video_id) info.update({ 'duration': duration, 'formats': formats, }) else: info.update({ '_type': 'url_transparent', 'url': service_id, 'ie_key': service_name.capitalize(), 'display_id': video_id, }) if service_name == 'vimeo': info['url'] = smuggle_url( f'https://player.vimeo.com/video/{service_id}', {'referer': url}) video_slides = traverse_obj(slides, ('slides', ..., 'video', 'id')) if not video_slides: return info def entries(): yield info service_data = self._download_json( f'https://slideslive.com/player/{video_id}/slides_video_service_data', video_id, fatal=False, query={ 'player_token': player_token, 'videos': ','.join(video_slides), }, note='Downloading video slides info', errnote='Failed to download video slides info') or {} for slide_id, slide in enumerate(traverse_obj(slides, ('slides', ...)), 1): if traverse_obj(slide, ('video', 'service')) != 'yoda': continue video_path = traverse_obj(slide, ('video', 'id')) cdn_hostname = traverse_obj(service_data, ( video_path, 'video_servers', ...), get_all=False) if not cdn_hostname or not video_path: continue formats, _ = self._extract_formats_and_duration( cdn_hostname, video_path, video_id, skip_duration=True) if not formats: continue yield { 'id': f'{video_id}-{slide_id:03d}', 'title': f'{info["title"]} - Slide {slide_id:03d}', 'timestamp': info['timestamp'], 'duration': int_or_none(traverse_obj(slide, ('video', 'duration_ms')), scale=1000), 'formats': formats, } return self.playlist_result(entries(), f'{video_id}-playlist', info['title'])
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/clyp.py
yt_dlp/extractor/clyp.py
from .common import InfoExtractor from ..utils import ( float_or_none, parse_qs, unified_timestamp, ) class ClypIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?clyp\.it/(?P<id>[a-z0-9]+)' _TESTS = [{ 'url': 'https://clyp.it/iynkjk4b', 'md5': '4bc6371c65210e7b372097fce4d92441', 'info_dict': { 'id': 'iynkjk4b', 'ext': 'ogg', 'title': 'research', 'description': '#Research', 'duration': 51.278, 'timestamp': 1435524981, 'upload_date': '20150628', }, }, { 'url': 'https://clyp.it/b04p1odi?token=b0078e077e15835845c528a44417719d', 'info_dict': { 'id': 'b04p1odi', 'ext': 'ogg', 'title': 'GJ! (Reward Edit)', 'description': 'Metal Resistance (THE ONE edition)', 'duration': 177.789, 'timestamp': 1528241278, 'upload_date': '20180605', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://clyp.it/v42214lc', 'md5': '4aca4dfc3236fb6d6ddc4ea08314f33f', 'info_dict': { 'id': 'v42214lc', 'ext': 'wav', 'title': 'i dont wanna go (old version)', 'duration': 113.528, 'timestamp': 1607348505, 'upload_date': '20201207', }, }] def _real_extract(self, url): audio_id = self._match_id(url) qs = parse_qs(url) token = qs.get('token', [None])[0] query = {} if token: query['token'] = token metadata = self._download_json( f'https://api.clyp.it/{audio_id}', audio_id, query=query) formats = [] for secure in ('', 'Secure'): for ext in ('Ogg', 'Mp3'): format_id = f'{secure}{ext}' format_url = metadata.get(f'{format_id}Url') if format_url: formats.append({ 'url': format_url, 'format_id': format_id, 'vcodec': 'none', 'acodec': ext.lower(), }) page = self._download_webpage(url, video_id=audio_id) wav_url = self._html_search_regex( r'var\s*wavStreamUrl\s*=\s*["\'](?P<url>https?://[^\'"]+)', page, 'url', default=None) if wav_url: formats.append({ 'url': wav_url, 'format_id': 'wavStreamUrl', 'vcodec': 'none', 'acodec': 'wav', }) title = metadata['Title'] description = metadata.get('Description') duration = float_or_none(metadata.get('Duration')) timestamp = unified_timestamp(metadata.get('DateCreated')) return { 'id': audio_id, 'title': title, 'description': description, 'duration': duration, 'timestamp': timestamp, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/massengeschmacktv.py
yt_dlp/extractor/massengeschmacktv.py
import re from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, int_or_none, js_to_json, mimetype2ext, parse_filesize, ) class MassengeschmackTVIE(InfoExtractor): IE_NAME = 'massengeschmack.tv' _VALID_URL = r'https?://(?:www\.)?massengeschmack\.tv/play/(?P<id>[^?&#]+)' _TEST = { 'url': 'https://massengeschmack.tv/play/fktv202', 'md5': '9996f314994a49fefe5f39aa1b07ae21', 'info_dict': { 'id': 'fktv202', 'ext': 'mp4', 'title': 'Fernsehkritik-TV #202', 'thumbnail': 'https://cache.massengeschmack.tv/img/mag/fktv202.jpg', }, } def _real_extract(self, url): episode = self._match_id(url) webpage = self._download_webpage(url, episode) sources = self._parse_json(self._search_regex(r'(?s)MEDIA\s*=\s*(\[.+?\]);', webpage, 'media'), episode, js_to_json) formats = [] for source in sources: furl = source.get('src') if not furl: continue furl = self._proto_relative_url(furl) ext = determine_ext(furl) or mimetype2ext(source.get('type')) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( furl, episode, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'url': furl, 'format_id': determine_ext(furl), }) for (durl, format_id, width, height, filesize) in re.findall(r'''(?x) <a[^>]+?href="(?P<url>(?:https:)?//[^"]+)".*? <strong>(?P<format_id>.+?)</strong>.*? <small>(?:(?P<width>\d+)x(?P<height>\d+))?\s+?\((?P<filesize>[\d,]+\s*[GM]iB)\)</small> ''', webpage): formats.append({ 'url': durl, 'format_id': format_id, 'width': int_or_none(width), 'height': int_or_none(height), 'filesize': parse_filesize(filesize), 'vcodec': 'none' if format_id.startswith('Audio') else None, }) return { 'id': episode, 'title': clean_html(self._html_search_regex( r'<span[^>]+\bid=["\']clip-title["\'][^>]*>([^<]+)', webpage, 'title', fatal=False)), 'formats': formats, 'thumbnail': self._search_regex(r'POSTER\s*=\s*"([^"]+)', webpage, 'thumbnail', fatal=False), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tv5mondeplus.py
yt_dlp/extractor/tv5mondeplus.py
import urllib.parse from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, extract_attributes, get_element_by_class, get_element_html_by_class, int_or_none, url_or_none, ) from ..utils.traversal import traverse_obj class TV5MondePlusIE(InfoExtractor): IE_NAME = 'TV5MONDE' _VALID_URL = r'https?://(?:www\.)?tv5monde\.com/tv/video/(?P<id>[^/?#]+)' _TESTS = [{ # documentary 'url': 'https://www.tv5monde.com/tv/video/65931-baudouin-l-heritage-d-un-roi-baudouin-l-heritage-d-un-roi', 'md5': 'd2a708902d3df230a357c99701aece05', 'info_dict': { 'id': '3FPa7JMu21_6D4BA7b', 'display_id': '65931-baudouin-l-heritage-d-un-roi-baudouin-l-heritage-d-un-roi', 'ext': 'mp4', 'title': "Baudouin, l'héritage d'un roi", 'thumbnail': 'https://psi.tv5monde.com/upsilon-images/960x540/6f/baudouin-f49c6b0e.jpg', 'duration': 4842, 'upload_date': '20240130', 'timestamp': 1706641242, 'episode': "BAUDOUIN, L'HERITAGE D'UN ROI", 'description': 'md5:78125c74a5cac06d7743a2d09126edad', 'series': "Baudouin, l'héritage d'un roi", }, }, { # series episode 'url': 'https://www.tv5monde.com/tv/video/52952-toute-la-vie-mardi-23-mars-2021', 'md5': 'f5e09637cadd55639c05874e22eb56bf', 'info_dict': { 'id': 'obRRZ8m6g9_6D4BA7b', 'display_id': '52952-toute-la-vie-mardi-23-mars-2021', 'ext': 'mp4', 'title': 'Toute la vie', 'description': 'md5:a824a2e1dfd94cf45fa379a1fb43ce65', 'thumbnail': 'https://psi.tv5monde.com/media/image/960px/5880553.jpg', 'duration': 2526, 'upload_date': '20230721', 'timestamp': 1689971646, 'series': 'Toute la vie', 'episode': 'Mardi 23 mars 2021', }, }, { # movie 'url': 'https://www.tv5monde.com/tv/video/8771-ce-fleuve-qui-nous-charrie-ce-fleuve-qui-nous-charrie-p001-ce-fleuve-qui-nous-charrie', 'md5': '87cefc34e10a6bf4f7823cccd7b36eb2', 'info_dict': { 'id': 'DOcfvdLKXL_6D4BA7b', 'display_id': '8771-ce-fleuve-qui-nous-charrie-ce-fleuve-qui-nous-charrie-p001-ce-fleuve-qui-nous-charrie', 'ext': 'mp4', 'title': 'Ce fleuve qui nous charrie', 'description': 'md5:62ba3f875343c7fc4082bdfbbc1be992', 'thumbnail': 'https://psi.tv5monde.com/media/image/960px/5476617.jpg', 'duration': 5300, 'upload_date': '20210822', 'timestamp': 1629594105, 'episode': 'CE FLEUVE QUI NOUS CHARRIE-P001-CE FLEUVE QUI NOUS CHARRIE', 'series': 'Ce fleuve qui nous charrie', }, }, { # news 'url': 'https://www.tv5monde.com/tv/video/70402-tv5monde-le-journal-edition-du-08-05-24-11h', 'md5': 'c62977d6d10754a2ecebba70ad370479', 'info_dict': { 'id': 'LgQFrOCNsc_6D4BA7b', 'display_id': '70402-tv5monde-le-journal-edition-du-08-05-24-11h', 'ext': 'mp4', 'title': 'TV5MONDE, le journal', 'description': 'md5:777dc209eaa4423b678477c36b0b04a8', 'thumbnail': 'https://psi.tv5monde.com/media/image/960px/6184105.jpg', 'duration': 854, 'upload_date': '20240508', 'timestamp': 1715159640, 'series': 'TV5MONDE, le journal', 'episode': 'EDITION DU 08/05/24 - 11H', }, }] _GEO_BYPASS = False @staticmethod def _extract_subtitles(data_captions): subtitles = {} for f in traverse_obj(data_captions, ('files', lambda _, v: url_or_none(v['file']))): subtitles.setdefault(f.get('label') or 'fra', []).append({'url': f['file']}) return subtitles def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id, impersonate=True) if ">Ce programme n'est malheureusement pas disponible pour votre zone géographique.<" in webpage: self.raise_geo_restricted(countries=['FR']) vpl_data = extract_attributes(self._search_regex( r'(<[^>]+class="video_player_loader"[^>]+>)', webpage, 'video player loader')) video_files = self._parse_json( vpl_data['data-broadcast'], display_id) formats = [] video_id = None def process_video_files(v): nonlocal video_id for video_file in v: v_url = video_file.get('url') if not v_url: continue if video_file.get('type') == 'application/deferred': d_param = urllib.parse.quote(v_url) token = video_file.get('token') if not token: continue deferred_json = self._download_json( f'https://api.tv5monde.com/player/asset/{d_param}/resolve?condenseKS=true', display_id, 'Downloading deferred info', fatal=False, impersonate=True, headers={'Authorization': f'Bearer {token}'}) v_url = traverse_obj(deferred_json, (0, 'url', {url_or_none})) if not v_url: continue # data-guid from the webpage isn't stable, use the material id from the json urls video_id = self._search_regex( r'materials/([\da-zA-Z]{10}_[\da-fA-F]{7})/', v_url, 'video id', default=None) process_video_files(deferred_json) video_format = video_file.get('format') or determine_ext(v_url) if video_format == 'm3u8': formats.extend(self._extract_m3u8_formats( v_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif video_format == 'mpd': formats.extend(self._extract_mpd_formats( v_url, display_id, fatal=False)) else: formats.append({ 'url': v_url, 'format_id': video_format, }) process_video_files(video_files) metadata = self._parse_json( vpl_data.get('data-metadata') or '{}', display_id, fatal=False) if not video_id: video_id = self._search_regex( (r'data-guid=["\']([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})', r'id_contenu["\']\s:\s*(\d+)'), webpage, 'video id', default=display_id) return { **traverse_obj(metadata, ('content', { 'id': ('id', {str}), 'title': ('title', {str}), 'episode': ('title', {str}), 'series': ('series', {str}), 'timestamp': ('publishDate_ts', {int_or_none}), 'duration': ('duration', {int_or_none}), })), 'id': video_id, 'display_id': display_id, 'title': clean_html(get_element_by_class('main-title', webpage)), 'description': clean_html(get_element_by_class('text', get_element_html_by_class('ep-summary', webpage) or '')), 'thumbnail': url_or_none(vpl_data.get('data-image')), 'formats': formats, 'subtitles': self._extract_subtitles(self._parse_json( traverse_obj(vpl_data, ('data-captions', {str}), default='{}'), display_id, fatal=False)), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bundesliga.py
yt_dlp/extractor/bundesliga.py
from .common import InfoExtractor from .jwplatform import JWPlatformIE class BundesligaIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?bundesliga\.com/[a-z]{2}/bundesliga/videos(?:/[^?]+)?\?vid=(?P<id>[a-zA-Z0-9]{8})' _TESTS = [ { 'url': 'https://www.bundesliga.com/en/bundesliga/videos?vid=bhhHkKyN', 'md5': '8fc3b25cd12440e3a8cdc51f1493849c', 'info_dict': { 'id': 'bhhHkKyN', 'ext': 'mp4', 'title': 'Watch: Alphonso Davies and Jeremie Frimpong head-to-head', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/bhhHkKyN/poster.jpg?width=720', 'upload_date': '20220928', 'duration': 146, 'timestamp': 1664366511, 'description': 'md5:803d4411bd134140c774021dd4b7598b', }, }, { 'url': 'https://www.bundesliga.com/en/bundesliga/videos/latest-features/T8IKc8TX?vid=ROHjs06G', 'only_matching': True, }, { 'url': 'https://www.bundesliga.com/en/bundesliga/videos/goals?vid=mOG56vWA', 'only_matching': True, }, ] def _real_extract(self, url): video_id = self._match_id(url) return self.url_result(f'jwplatform:{video_id}', JWPlatformIE, video_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/disney.py
yt_dlp/extractor/disney.py
import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, join_nonempty, unified_strdate, update_url_query, ) class DisneyIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?P<domain>(?:[^/]+\.)?(?:disney\.[a-z]{2,3}(?:\.[a-z]{2})?|disney(?:(?:me|latino)\.com|turkiye\.com\.tr|channel\.de)|(?:starwars|marvelkids)\.com))/(?:(?:embed/|(?:[^/]+/)+[\w-]+-)(?P<id>[a-z0-9]{24})|(?:[^/]+/)?(?P<display_id>[^/?#]+))''' _TESTS = [{ # Disney.EmbedVideo 'url': 'http://video.disney.com/watch/moana-trailer-545ed1857afee5a0ec239977', 'info_dict': { 'id': '545ed1857afee5a0ec239977', 'ext': 'mp4', 'title': 'Moana - Trailer', 'description': 'A fun adventure for the entire Family! Bring home Moana on Digital HD Feb 21 & Blu-ray March 7', 'upload_date': '20170112', }, 'params': { # m3u8 download 'skip_download': True, }, }, { # Grill.burger 'url': 'http://www.starwars.com/video/rogue-one-a-star-wars-story-intro-featurette', 'info_dict': { 'id': '5454e9f4e9804a552e3524c8', 'ext': 'mp4', 'title': '"Intro" Featurette: Rogue One: A Star Wars Story', 'upload_date': '20170104', 'description': 'Go behind-the-scenes of Rogue One: A Star Wars Story in this featurette with Director Gareth Edwards and the cast of the film.', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://videos.disneylatino.com/ver/spider-man-de-regreso-a-casa-primer-adelanto-543a33a1850bdcfcca13bae2', 'only_matching': True, }, { 'url': 'http://video.en.disneyme.com/watch/future-worm/robo-carp-2001-544b66002aa7353cdd3f5114', 'only_matching': True, }, { 'url': 'http://video.disneyturkiye.com.tr/izle/7c-7-cuceler/kimin-sesi-zaten-5456f3d015f6b36c8afdd0e2', 'only_matching': True, }, { 'url': 'http://disneyjunior.disney.com/embed/546a4798ddba3d1612e4005d', 'only_matching': True, }, { 'url': 'http://www.starwars.com/embed/54690d1e6c42e5f09a0fb097', 'only_matching': True, }, { 'url': 'http://spiderman.marvelkids.com/embed/522900d2ced3c565e4cc0677', 'only_matching': True, }, { 'url': 'http://spiderman.marvelkids.com/videos/contest-of-champions-part-four-clip-1', 'only_matching': True, }, { 'url': 'http://disneyjunior.en.disneyme.com/dj/watch-my-friends-tigger-and-pooh-promo', 'only_matching': True, }, { 'url': 'http://disneychannel.de/sehen/soy-luna-folge-118-5518518987ba27f3cc729268', 'only_matching': True, }, { 'url': 'http://disneyjunior.disney.com/galactech-the-galactech-grab-galactech-an-admiral-rescue', 'only_matching': True, }] def _real_extract(self, url): domain, video_id, display_id = self._match_valid_url(url).groups() if not video_id: webpage = self._download_webpage(url, display_id) grill = re.sub(r'"\s*\+\s*"', '', self._search_regex( r'Grill\.burger\s*=\s*({.+})\s*:', webpage, 'grill data')) page_data = next(s for s in self._parse_json(grill, display_id)['stack'] if s.get('type') == 'video') video_data = page_data['data'][0] else: webpage = self._download_webpage( f'http://{domain}/embed/{video_id}', video_id) page_data = self._parse_json(self._search_regex( r'Disney\.EmbedVideo\s*=\s*({.+});', webpage, 'embed data'), video_id) video_data = page_data['video'] for external in video_data.get('externals', []): if external.get('source') == 'vevo': return self.url_result('vevo:' + external['data_id'], 'Vevo') video_id = video_data['id'] title = video_data['title'] formats = [] for flavor in video_data.get('flavors', []): flavor_format = flavor.get('format') flavor_url = flavor.get('url') if not flavor_url or not re.match(r'https?://', flavor_url) or flavor_format == 'mp4_access': continue tbr = int_or_none(flavor.get('bitrate')) if tbr == 99999: # wrong ks(Kaltura Signature) causes 404 Error flavor_url = update_url_query(flavor_url, {'ks': ''}) m3u8_formats = self._extract_m3u8_formats( flavor_url, video_id, 'mp4', m3u8_id=flavor_format, fatal=False) for f in m3u8_formats: # Apple FairPlay if '/fpshls/' in f['url']: continue formats.append(f) continue ext = determine_ext(flavor_url) if flavor_format == 'applehttp' or ext == 'm3u8': ext = 'mp4' width = int_or_none(flavor.get('width')) height = int_or_none(flavor.get('height')) formats.append({ 'format_id': join_nonempty(flavor_format, tbr), 'url': flavor_url, 'width': width, 'height': height, 'tbr': tbr, 'ext': ext, 'vcodec': 'none' if (width == 0 and height == 0) else None, }) if not formats and video_data.get('expired'): self.raise_no_formats( '{} said: {}'.format(self.IE_NAME, page_data['translations']['video_expired']), expected=True) subtitles = {} for caption in video_data.get('captions', []): caption_url = caption.get('url') caption_format = caption.get('format') if not caption_url or caption_format.startswith('unknown'): continue subtitles.setdefault(caption.get('language', 'en'), []).append({ 'url': caption_url, 'ext': { 'webvtt': 'vtt', }.get(caption_format, caption_format), }) return { 'id': video_id, 'title': title, 'description': video_data.get('description') or video_data.get('short_desc'), 'thumbnail': video_data.get('thumb') or video_data.get('thumb_secure'), 'duration': int_or_none(video_data.get('duration_sec')), 'upload_date': unified_strdate(video_data.get('publish_date')), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/zaiko.py
yt_dlp/extractor/zaiko.py
import base64 from .common import InfoExtractor from ..utils import ( ExtractorError, extract_attributes, int_or_none, str_or_none, traverse_obj, try_call, unescapeHTML, url_basename, url_or_none, ) class ZaikoBaseIE(InfoExtractor): def _download_real_webpage(self, url, video_id): webpage, urlh = self._download_webpage_handle(url, video_id) final_url = urlh.url if 'zaiko.io/login' in final_url: self.raise_login_required() elif '/_buy/' in final_url: raise ExtractorError('Your account does not have tickets to this event', expected=True) return webpage def _parse_vue_element_attr(self, name, string, video_id): page_elem = self._search_regex(rf'(<{name}[^>]+>)', string, name) attrs = {} for key, value in extract_attributes(page_elem).items(): if key.startswith(':'): attrs[key[1:]] = self._parse_json( value, video_id, transform_source=unescapeHTML, fatal=False) return attrs class ZaikoIE(ZaikoBaseIE): _VALID_URL = r'https?://(?:[\w-]+\.)?zaiko\.io/event/(?P<id>\d+)/stream(?:/\d+)+' _TESTS = [{ 'url': 'https://zaiko.io/event/324868/stream/20571/20571', 'info_dict': { 'id': '324868', 'ext': 'mp4', 'title': 'ZAIKO STREAMING TEST', 'alt_title': '[VOD] ZAIKO STREAMING TEST_20210603(Do Not Delete)', 'uploader_id': '454', 'uploader': 'ZAIKO ZERO', 'release_timestamp': 1583809200, 'thumbnail': r're:^https://[\w.-]+/\w+/\w+', 'thumbnails': 'maxcount:2', 'release_date': '20200310', 'categories': ['Tech House'], 'live_status': 'was_live', }, 'params': {'skip_download': 'm3u8'}, 'skip': 'Your account does not have tickets to this event', }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_real_webpage(url, video_id) stream_meta = self._parse_vue_element_attr('stream-page', webpage, video_id) player_page = self._download_webpage( stream_meta['stream-access']['video_source'], video_id, 'Downloading player page', headers={'referer': 'https://zaiko.io/'}) player_meta = self._parse_vue_element_attr('player', player_page, video_id) initial_event_info = traverse_obj(player_meta, ('initial_event_info', {dict})) or {} status = traverse_obj(initial_event_info, ('status', {str})) live_status, msg, expected = { 'vod': ('was_live', 'No VOD stream URL was found', False), 'archiving': ('post_live', 'Event VOD is still being processed', True), 'deleting': ('post_live', 'This event has ended', True), 'deleted': ('post_live', 'This event has ended', True), 'error': ('post_live', 'This event has ended', True), 'disconnected': ('post_live', 'Stream has been disconnected', True), 'live_to_disconnected': ('post_live', 'Stream has been disconnected', True), 'live': ('is_live', 'No livestream URL found was found', False), 'waiting': ('is_upcoming', 'Live event has not yet started', True), 'cancelled': ('not_live', 'Event has been cancelled', True), }.get(status) or ('not_live', f'Unknown event status "{status}"', False) if traverse_obj(initial_event_info, ('is_jwt_protected', {bool})): stream_url = self._download_json( initial_event_info['jwt_token_url'], video_id, 'Downloading JWT-protected stream URL', 'Failed to download JWT-protected stream URL')['playback_url'] else: stream_url = traverse_obj(initial_event_info, ('endpoint', {url_or_none})) formats = self._extract_m3u8_formats( stream_url, video_id, live=True, fatal=False) if stream_url else [] if not formats: self.raise_no_formats(msg, expected=expected) thumbnail_urls = [ traverse_obj(initial_event_info, ('poster_url', {url_or_none})), self._og_search_thumbnail(self._download_webpage( f'https://zaiko.io/event/{video_id}', video_id, 'Downloading event page', fatal=False) or ''), ] return { 'id': video_id, 'formats': formats, 'live_status': live_status, **traverse_obj(stream_meta, { 'title': ('event', 'name', {str}), 'uploader': ('profile', 'name', {str}), 'uploader_id': ('profile', 'id', {str_or_none}), 'release_timestamp': ('stream', 'start', 'timestamp', {int_or_none}), 'categories': ('event', 'genres', ..., filter), }), 'alt_title': traverse_obj(initial_event_info, ('title', {str})), 'thumbnails': [{'url': url, 'id': url_basename(url)} for url in thumbnail_urls if url_or_none(url)], } class ZaikoETicketIE(ZaikoBaseIE): _VALID_URL = r'https?://(?:www.)?zaiko\.io/account/eticket/(?P<id>[\w=-]{49})' _TESTS = [{ 'url': 'https://zaiko.io/account/eticket/TZjMwMzQ2Y2EzMXwyMDIzMDYwNzEyMTMyNXw1MDViOWU2Mw==', 'playlist_count': 1, 'info_dict': { 'id': 'f30346ca31-20230607121325-505b9e63', 'title': 'ZAIKO STREAMING TEST', 'thumbnail': 'https://media.zkocdn.net/pf_1/1_3wdyjcjyupseatkwid34u', }, 'skip': 'Only available with the ticketholding account', }] def _real_extract(self, url): ticket_id = self._match_id(url) ticket_id = try_call( lambda: base64.urlsafe_b64decode(ticket_id[1:]).decode().replace('|', '-')) or ticket_id webpage = self._download_real_webpage(url, ticket_id) eticket = self._parse_vue_element_attr('eticket', webpage, ticket_id) return self.playlist_result( [self.url_result(stream, ZaikoIE) for stream in traverse_obj(eticket, ('streams', ..., 'url'))], ticket_id, **traverse_obj(eticket, ('ticket-details', { 'title': 'event_name', 'thumbnail': 'event_img_url', })))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/telegram.py
yt_dlp/extractor/telegram.py
import re from .common import InfoExtractor from ..utils import ( clean_html, format_field, get_element_by_class, parse_duration, parse_qs, traverse_obj, unified_timestamp, update_url_query, url_basename, ) class TelegramEmbedIE(InfoExtractor): IE_NAME = 'telegram:embed' _VALID_URL = r'https?://t\.me/(?P<channel_id>[^/]+)/(?P<id>\d+)' _TESTS = [{ 'url': 'https://t.me/europa_press/613', 'md5': 'dd707708aea958c11a590e8068825f22', 'info_dict': { 'id': '613', 'ext': 'mp4', 'title': 'md5:6ce2d7e8d56eda16d80607b23db7b252', 'description': 'md5:6ce2d7e8d56eda16d80607b23db7b252', 'channel_id': 'europa_press', 'channel': 'Europa Press ✔', 'thumbnail': r're:^https?://.+', 'timestamp': 1635631203, 'upload_date': '20211030', 'duration': 61, }, }, { # 2-video post 'url': 'https://t.me/vorposte/29342', 'info_dict': { 'id': 'vorposte-29342', 'title': 'Форпост 29342', 'description': 'md5:9d92e22169a3e136d5d69df25f82c3dc', }, 'playlist_count': 2, 'params': { 'skip_download': True, }, }, { # 2-video post with --no-playlist 'url': 'https://t.me/vorposte/29343', 'md5': '1724e96053c18e788c8464038876e245', 'info_dict': { 'id': '29343', 'ext': 'mp4', 'title': 'md5:9d92e22169a3e136d5d69df25f82c3dc', 'description': 'md5:9d92e22169a3e136d5d69df25f82c3dc', 'channel_id': 'vorposte', 'channel': 'Форпост', 'thumbnail': r're:^https?://.+', 'timestamp': 1666384480, 'upload_date': '20221021', 'duration': 35, }, 'params': { 'noplaylist': True, }, }, { # 2-video post with 'single' query param 'url': 'https://t.me/vorposte/29342?single', 'md5': 'd20b202f1e41400a9f43201428add18f', 'info_dict': { 'id': '29342', 'ext': 'mp4', 'title': 'md5:9d92e22169a3e136d5d69df25f82c3dc', 'description': 'md5:9d92e22169a3e136d5d69df25f82c3dc', 'channel_id': 'vorposte', 'channel': 'Форпост', 'thumbnail': r're:^https?://.+', 'timestamp': 1666384480, 'upload_date': '20221021', 'duration': 33, }, }] def _real_extract(self, url): channel_id, msg_id = self._match_valid_url(url).group('channel_id', 'id') embed = self._download_webpage( url, msg_id, query={'embed': '1', 'single': []}, note='Downloading embed frame') def clean_text(html_class, html): text = clean_html(get_element_by_class(html_class, html)) return text.replace('\n', ' ') if text else None description = clean_text('tgme_widget_message_text', embed) message = { 'title': description or '', 'description': description, 'channel': clean_text('tgme_widget_message_author', embed), 'channel_id': channel_id, 'timestamp': unified_timestamp(self._search_regex( r'<time[^>]*datetime="([^"]*)"', embed, 'timestamp', fatal=False)), } videos = [] for video in re.findall(r'<a class="tgme_widget_message_video_player(?s:.+?)</time>', embed): video_url = self._search_regex( r'<video[^>]+src="([^"]+)"', video, 'video URL', fatal=False) webpage_url = self._search_regex( r'<a class="tgme_widget_message_video_player[^>]+href="([^"]+)"', video, 'webpage URL', fatal=False) if not video_url or not webpage_url: continue formats = [{ 'url': video_url, 'ext': 'mp4', }] videos.append({ 'id': url_basename(webpage_url), 'webpage_url': update_url_query(webpage_url, {'single': True}), 'duration': parse_duration(self._search_regex( r'<time[^>]+duration[^>]*>([\d:]+)</time>', video, 'duration', fatal=False)), 'thumbnail': self._search_regex( r'tgme_widget_message_video_thumb"[^>]+background-image:url\(\'([^\']+)\'\)', video, 'thumbnail', fatal=False), 'formats': formats, **message, }) playlist_id = None if len(videos) > 1 and 'single' not in parse_qs(url, keep_blank_values=True): playlist_id = f'{channel_id}-{msg_id}' if self._yes_playlist(playlist_id, msg_id): return self.playlist_result( videos, playlist_id, format_field(message, 'channel', f'%s {msg_id}'), description) else: return traverse_obj(videos, lambda _, x: x['id'] == msg_id, get_all=False)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/duboku.py
yt_dlp/extractor/duboku.py
import base64 import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, extract_attributes, get_elements_by_class, int_or_none, js_to_json, smuggle_url, unescapeHTML, ) def _get_elements_by_tag_and_attrib(html, tag=None, attribute=None, value=None, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" if tag is None: tag = '[a-zA-Z0-9:._-]+' if attribute is None: attribute = '' else: attribute = rf'\s+(?P<attribute>{re.escape(attribute)})' if value is None: value = '' else: value = re.escape(value) if escape_value else value value = f'=[\'"]?(?P<value>{value})[\'"]?' retlist = [] for m in re.finditer(rf'''(?xs) <(?P<tag>{tag}) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? {attribute}{value} (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s*> (?P<content>.*?) </\1> ''', html): retlist.append(m) return retlist def _get_element_by_tag_and_attrib(html, tag=None, attribute=None, value=None, escape_value=True): retval = _get_elements_by_tag_and_attrib(html, tag, attribute, value, escape_value) return retval[0] if retval else None class DubokuIE(InfoExtractor): IE_NAME = 'duboku' IE_DESC = 'www.duboku.io' _VALID_URL = r'(?:https?://[^/]+\.duboku\.io/vodplay/)(?P<id>[0-9]+-[0-9-]+)\.html.*' _TESTS = [{ 'url': 'https://w.duboku.io/vodplay/1575-1-1.html', 'info_dict': { 'id': '1575-1-1', 'ext': 'mp4', 'series': '白色月光', 'title': 'contains:白色月光', 'season_number': 1, 'episode_number': 1, 'season': 'Season 1', 'episode_id': '1', 'season_id': '1', 'episode': 'Episode 1', }, 'params': { 'skip_download': 'm3u8 download', }, }, { 'url': 'https://w.duboku.io/vodplay/1588-1-1.html', 'info_dict': { 'id': '1588-1-1', 'ext': 'mp4', 'series': '亲爱的自己', 'title': 'contains:第1集', 'season_number': 1, 'episode_number': 1, 'episode': 'Episode 1', 'season': 'Season 1', 'episode_id': '1', 'season_id': '1', }, 'params': { 'skip_download': 'm3u8 download', }, }] _PLAYER_DATA_PATTERN = r'player_data\s*=\s*(\{\s*(.*)})\s*;?\s*</script' def _real_extract(self, url): video_id = self._match_id(url) temp = video_id.split('-') series_id = temp[0] season_id = temp[1] episode_id = temp[2] webpage_url = f'https://w.duboku.io/vodplay/{video_id}.html' webpage_html = self._download_webpage(webpage_url, video_id) # extract video url player_data = self._search_regex( self._PLAYER_DATA_PATTERN, webpage_html, 'player_data') player_data = self._parse_json(player_data, video_id, js_to_json) # extract title temp = get_elements_by_class('title', webpage_html) series_title = None title = None for html in temp: mobj = re.search(r'<a\s+.*>(.*)</a>', html) if mobj: href = extract_attributes(mobj.group(0)).get('href') if href: mobj1 = re.search(r'/(\d+)\.html', href) if mobj1 and mobj1.group(1) == series_id: series_title = clean_html(mobj.group(0)) series_title = re.sub(r'[\s\r\n\t]+', ' ', series_title) title = clean_html(html) title = re.sub(r'[\s\r\n\t]+', ' ', title) break data_url = player_data.get('url') if not data_url: raise ExtractorError('Cannot find url in player_data') player_encrypt = player_data.get('encrypt') if player_encrypt == 1: data_url = urllib.parse.unquote(data_url) elif player_encrypt == 2: data_url = urllib.parse.unquote(base64.b64decode(data_url).decode('ascii')) # if it is an embedded iframe, maybe it's an external source headers = {'Referer': webpage_url} if player_data.get('from') == 'iframe': # use _type url_transparent to retain the meaningful details # of the video. return { '_type': 'url_transparent', 'url': smuggle_url(data_url, {'referer': webpage_url}), 'id': video_id, 'title': title, 'series': series_title, 'season_number': int_or_none(season_id), 'season_id': season_id, 'episode_number': int_or_none(episode_id), 'episode_id': episode_id, } formats = self._extract_m3u8_formats(data_url, video_id, 'mp4', headers=headers) return { 'id': video_id, 'title': title, 'series': series_title, 'season_number': int_or_none(season_id), 'season_id': season_id, 'episode_number': int_or_none(episode_id), 'episode_id': episode_id, 'formats': formats, 'http_headers': headers, } class DubokuPlaylistIE(InfoExtractor): IE_NAME = 'duboku:list' IE_DESC = 'www.duboku.io entire series' _VALID_URL = r'(?:https?://[^/]+\.duboku\.io/voddetail/)(?P<id>[0-9]+)\.html.*' _TESTS = [{ 'url': 'https://w.duboku.io/voddetail/1575.html', 'info_dict': { 'id': 'startswith:1575', 'title': '白色月光', }, 'playlist_count': 12, }, { 'url': 'https://w.duboku.io/voddetail/1554.html', 'info_dict': { 'id': 'startswith:1554', 'title': '以家人之名', }, 'playlist_mincount': 30, }] def _real_extract(self, url): mobj = self._match_valid_url(url) if mobj is None: raise ExtractorError(f'Invalid URL: {url}') series_id = mobj.group('id') fragment = urllib.parse.urlparse(url).fragment webpage_url = f'https://w.duboku.io/voddetail/{series_id}.html' webpage_html = self._download_webpage(webpage_url, series_id) # extract title title = _get_element_by_tag_and_attrib(webpage_html, 'h1', 'class', 'title') title = unescapeHTML(title.group('content')) if title else None if not title: title = self._html_search_meta('keywords', webpage_html) if not title: title = _get_element_by_tag_and_attrib(webpage_html, 'title') title = unescapeHTML(title.group('content')) if title else None # extract playlists playlists = {} for div in _get_elements_by_tag_and_attrib( webpage_html, attribute='id', value='playlist\\d+', escape_value=False): playlist_id = div.group('value') playlist = [] for a in _get_elements_by_tag_and_attrib( div.group('content'), 'a', 'href', value='[^\'"]+?', escape_value=False): playlist.append({ 'href': unescapeHTML(a.group('value')), 'title': unescapeHTML(a.group('content')), }) playlists[playlist_id] = playlist # select the specified playlist if url fragment exists playlist = None playlist_id = None if fragment: playlist = playlists.get(fragment) playlist_id = fragment else: first = next(iter(playlists.items()), None) if first: (playlist_id, playlist) = first if not playlist: raise ExtractorError( f'Cannot find {fragment}' if fragment else 'Cannot extract playlist') # return url results return self.playlist_result([ self.url_result( urllib.parse.urljoin('https://w.duboku.io', x['href']), ie=DubokuIE.ie_key(), video_title=x.get('title')) for x in playlist], series_id + '#' + playlist_id, title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/oktoberfesttv.py
yt_dlp/extractor/oktoberfesttv.py
from .common import InfoExtractor class OktoberfestTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?oktoberfest-tv\.de/[^/]+/[^/]+/video/(?P<id>[^/?#]+)' _TEST = { 'url': 'http://www.oktoberfest-tv.de/de/kameras/video/hb-zelt', 'info_dict': { 'id': 'hb-zelt', 'ext': 'mp4', 'title': 're:^Live-Kamera: Hofbräuzelt [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'thumbnail': r're:^https?://.*\.jpg$', 'is_live': True, }, 'params': { 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<h1><strong>.*?</strong>(.*?)</h1>', webpage, 'title') clip = self._search_regex( r"clip:\s*\{\s*url:\s*'([^']+)'", webpage, 'clip') ncurl = self._search_regex( r"netConnectionUrl:\s*'([^']+)'", webpage, 'rtmp base') video_url = ncurl + clip thumbnail = self._search_regex( r"canvas:\s*\{\s*backgroundImage:\s*'url\(([^)]+)\)'", webpage, 'thumbnail', fatal=False) return { 'id': video_id, 'title': title, 'url': video_url, 'ext': 'mp4', 'is_live': True, 'thumbnail': thumbnail, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nowcanal.py
yt_dlp/extractor/nowcanal.py
from .brightcove import BrightcoveNewIE from .common import InfoExtractor class NowCanalIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?nowcanal\.pt(?:/[\w-]+)+/detalhe/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://www.nowcanal.pt/ultimas/detalhe/pedro-sousa-hjulmand-pode-ter-uma-saida-limpa-do-sporting-daqui-a-um-ano', 'md5': '047f17cb783e66e467d703e704bbc95d', 'info_dict': { 'id': '6376598467112', 'ext': 'mp4', 'title': 'Pedro Sousa «Hjulmand pode ter uma saída limpa do Sporting daqui a um ano»', 'description': '', 'uploader_id': '6108484330001', 'duration': 65.237, 'thumbnail': r're:^https://.+\.jpg', 'timestamp': 1754440620, 'upload_date': '20250806', 'tags': ['now'], }, }, { 'url': 'https://www.nowcanal.pt/programas/frente-a-frente/detalhe/frente-a-frente-eva-cruzeiro-ps-e-rita-matias-chega', 'only_matching': True, }] _BC_URL_TMPL = 'https://players.brightcove.net/6108484330001/chhIqzukMq_default/index.html?videoId={}' def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._search_json( r'videoHandler\.addBrightcoveVideoWithJson\(\[', webpage, 'video data', display_id)['brightcoveVideoId'] return self.url_result(self._BC_URL_TMPL.format(video_id), BrightcoveNewIE)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dailymail.py
yt_dlp/extractor/dailymail.py
from .common import InfoExtractor from ..utils import ( determine_protocol, int_or_none, join_nonempty, try_get, unescapeHTML, ) class DailyMailIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?dailymail\.co\.uk/(?:video/[^/]+/video-|embed/video/)(?P<id>[0-9]+)' _EMBED_REGEX = [r'<iframe\b[^>]+\bsrc=["\'](?P<url>(?:https?:)?//(?:www\.)?dailymail\.co\.uk/embed/video/\d+\.html)'] _TESTS = [{ 'url': 'http://www.dailymail.co.uk/video/tvshowbiz/video-1295863/The-Mountain-appears-sparkling-water-ad-Heavy-Bubbles.html', 'md5': 'f6129624562251f628296c3a9ffde124', 'info_dict': { 'id': '1295863', 'ext': 'mp4', 'title': 'The Mountain appears in sparkling water ad for \'Heavy Bubbles\'', 'description': 'md5:a93d74b6da172dd5dc4d973e0b766a84', 'thumbnail': r're:https?://i\.dailymail\.co\.uk/.+\.jpg', }, }, { 'url': 'http://www.dailymail.co.uk/embed/video/1295863.html', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.daily-news.gr/lifestyle/%ce%b7-%cf%84%cf%81%ce%b1%ce%b3%ce%bf%cf%85%ce%b4%ce%af%cf%83%cf%84%cf%81%ce%b9%ce%b1-jessie-j-%ce%bc%ce%bf%ce%b9%cf%81%ce%ac%cf%83%cf%84%ce%b7%ce%ba%ce%b5-%cf%83%cf%85%ce%b3%ce%ba%ce%bb%ce%bf%ce%bd/', 'info_dict': { 'id': '3463585', 'ext': 'mp4', 'title': 'Jessie J reveals she has undergone surgery as she shares clips', 'description': 'md5:9fa9a25feca5b656b0b4a39c922fad1e', 'thumbnail': r're:https?://i\.dailymail\.co\.uk/.+\.jpg', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_data = self._parse_json(self._search_regex( r"data-opts='({.+?})'", webpage, 'video data'), video_id) title = unescapeHTML(video_data['title']) sources_url = (try_get( video_data, (lambda x: x['plugins']['sources']['url'], lambda x: x['sources']['url']), str) or f'http://www.dailymail.co.uk/api/player/{video_id}/video-sources.json') video_sources = self._download_json(sources_url, video_id) body = video_sources.get('body') if body: video_sources = body formats = [] for rendition in video_sources['renditions']: rendition_url = rendition.get('url') if not rendition_url: continue tbr = int_or_none(rendition.get('encodingRate'), 1000) container = rendition.get('videoContainer') is_hls = container == 'M2TS' protocol = 'm3u8_native' if is_hls else determine_protocol({'url': rendition_url}) formats.append({ 'format_id': join_nonempty('hls' if is_hls else protocol, tbr), 'url': rendition_url, 'width': int_or_none(rendition.get('frameWidth')), 'height': int_or_none(rendition.get('frameHeight')), 'tbr': tbr, 'vcodec': rendition.get('videoCodec'), 'container': container, 'protocol': protocol, 'ext': 'mp4' if is_hls else None, }) return { 'id': video_id, 'title': title, 'description': unescapeHTML(video_data.get('descr')), 'thumbnail': video_data.get('poster') or video_data.get('thumbnail'), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/svt.py
yt_dlp/extractor/svt.py
import json import re from .common import InfoExtractor from ..utils import ( determine_ext, dict_get, int_or_none, try_get, unified_timestamp, ) from ..utils.traversal import ( require, traverse_obj, ) class SVTBaseIE(InfoExtractor): _GEO_COUNTRIES = ['SE'] def _extract_video(self, video_info, video_id): is_live = dict_get(video_info, ('live', 'simulcast'), default=False) m3u8_protocol = 'm3u8' if is_live else 'm3u8_native' formats = [] subtitles = {} for vr in video_info['videoReferences']: player_type = vr.get('playerType') or vr.get('format') vurl = vr['url'] ext = determine_ext(vurl) if ext == 'm3u8': fmts, subs = self._extract_m3u8_formats_and_subtitles( vurl, video_id, ext='mp4', entry_protocol=m3u8_protocol, m3u8_id=player_type, fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( vurl + '?hdcore=3.3.0', video_id, f4m_id=player_type, fatal=False)) elif ext == 'mpd': fmts, subs = self._extract_mpd_formats_and_subtitles( vurl, video_id, mpd_id=player_type, fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) else: formats.append({ 'format_id': player_type, 'url': vurl, }) rights = try_get(video_info, lambda x: x['rights'], dict) or {} if not formats and rights.get('geoBlockedSweden'): self.raise_geo_restricted( 'This video is only available in Sweden', countries=self._GEO_COUNTRIES, metadata_available=True) subtitle_references = dict_get(video_info, ('subtitles', 'subtitleReferences')) if isinstance(subtitle_references, list): for sr in subtitle_references: subtitle_url = sr.get('url') subtitle_lang = sr.get('language', 'sv') if subtitle_url: sub = { 'url': subtitle_url, } if determine_ext(subtitle_url) == 'm3u8': # XXX: no way of testing, is it ever hit? sub['ext'] = 'vtt' subtitles.setdefault(subtitle_lang, []).append(sub) title = video_info.get('title') series = video_info.get('programTitle') season_number = int_or_none(video_info.get('season')) episode = video_info.get('episodeTitle') episode_number = int_or_none(video_info.get('episodeNumber')) timestamp = unified_timestamp(rights.get('validFrom')) duration = int_or_none(dict_get(video_info, ('materialLength', 'contentDuration'))) age_limit = None adult = dict_get( video_info, ('inappropriateForChildren', 'blockedForChildren'), skip_false_values=False) if adult is not None: age_limit = 18 if adult else 0 return { 'id': video_id, 'title': title, 'formats': formats, 'subtitles': self._fixup_subtitles(subtitles), 'duration': duration, 'timestamp': timestamp, 'age_limit': age_limit, 'series': series, 'season_number': season_number, 'episode': episode, 'episode_number': episode_number, 'is_live': is_live, } @staticmethod def _fixup_subtitles(subtitles): # See: https://github.com/yt-dlp/yt-dlp/issues/14020 fixed_subtitles = {} for lang, subs in subtitles.items(): for sub in subs: fixed_lang = f'{lang}-forced' if 'text-open' in sub['url'] else lang fixed_subtitles.setdefault(fixed_lang, []).append(sub) return fixed_subtitles class SVTPlayIE(SVTBaseIE): IE_NAME = 'svt:play' IE_DESC = 'SVT Play and Öppet arkiv' _VALID_URL = r'''(?x) (?: (?: svt:| https?://(?:www\.)?svt\.se/barnkanalen/barnplay/[^/]+/ ) (?P<svt_id>[^/?#&]+)| https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se/(?:video|klipp|kanaler)/(?P<id>[^/?#&]+) (?:.*?(?:modalId|id)=(?P<modal_id>[\da-zA-Z-]+))? ) ''' _TESTS = [{ 'url': 'https://www.svtplay.se/video/eXYgwZb/sverige-och-kriget/1-utbrottet', 'md5': '2382036fd6f8c994856c323fe51c426e', 'info_dict': { 'id': 'ePBvGRq', 'ext': 'mp4', 'title': '1. Utbrottet', 'description': 'md5:02291cc3159dbc9aa95d564e77a8a92b', 'series': 'Sverige och kriget', 'episode': '1. Utbrottet', 'timestamp': 1746921600, 'upload_date': '20250511', 'duration': 3585, 'thumbnail': r're:^https?://(?:.*[\.-]jpg|www.svtstatic.se/image/.*)$', 'age_limit': 0, 'subtitles': {'sv': 'count:3', 'sv-forced': 'count:3'}, }, 'params': { 'skip_download': 'm3u8', }, }, { 'url': 'https://www.svtplay.se/video/30479064', 'md5': '2382036fd6f8c994856c323fe51c426e', 'info_dict': { 'id': '8zVbDPA', 'ext': 'mp4', 'title': 'Designdrömmar i Stenungsund', 'timestamp': 1615770000, 'upload_date': '20210315', 'duration': 3519, 'thumbnail': r're:^https?://(?:.*[\.-]jpg|www.svtstatic.se/image/.*)$', 'age_limit': 0, 'subtitles': { 'sv': [{ 'ext': 'vtt', }], }, }, 'params': { 'skip_download': 'm3u8', }, 'skip': 'Episode is no longer available', }, { 'url': 'https://www.svtplay.se/video/emBxBQj', 'md5': '2382036fd6f8c994856c323fe51c426e', 'info_dict': { 'id': 'eyBd9aj', 'ext': 'mp4', 'title': '1. Farlig kryssning', 'timestamp': 1491019200, 'description': 'md5:8f350bc605677a5ead36a19a62fd9a34', 'upload_date': '20170401', 'duration': 2566, 'thumbnail': r're:^https?://(?:.*[\.-]jpg|www.svtstatic.se/image/.*)$', 'age_limit': 0, 'episode': '1. Farlig kryssning', 'series': 'Rederiet', 'subtitles': { 'sv': 'count:3', }, }, 'params': { 'skip_download': 'm3u8', }, 'expected_warnings': [r'Failed to download (?:MPD|m3u8)'], }, { 'url': 'https://www.svtplay.se/video/jz2rYz7/anders-hansen-moter/james-fallon?info=visa', 'info_dict': { 'id': 'jvXAGVb', 'ext': 'mp4', 'title': 'James Fallon', 'description': r're:James Fallon är hjärnforskaren .{532} att upptäcka psykopati tidigt\?$', 'timestamp': 1743379200, 'upload_date': '20250331', 'duration': 1081, 'thumbnail': r're:^https?://(?:.*[\.-]jpg|www.svtstatic.se/image/.*)$', 'age_limit': 0, 'episode': 'James Fallon', 'series': 'Anders Hansen möter', }, 'params': { 'skip_download': 'dash', }, }, { 'url': 'https://www.svtplay.se/video/30479064/husdrommar/husdrommar-sasong-8-designdrommar-i-stenungsund?modalId=8zVbDPA', 'only_matching': True, }, { 'url': 'https://www.svtplay.se/video/30684086/rapport/rapport-24-apr-18-00-7?id=e72gVpa', 'only_matching': True, }, { # geo restricted to Sweden 'url': 'http://www.oppetarkiv.se/video/5219710/trollflojten', 'only_matching': True, }, { 'url': 'http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg', 'only_matching': True, }, { 'url': 'https://www.svtplay.se/kanaler/svt1', 'only_matching': True, }, { 'url': 'svt:1376446-003A', 'only_matching': True, }, { 'url': 'svt:14278044', 'only_matching': True, }, { 'url': 'https://www.svt.se/barnkanalen/barnplay/kar/eWv5MLX/', 'only_matching': True, }, { 'url': 'svt:eWv5MLX', 'only_matching': True, }] def _extract_by_video_id(self, video_id): data = self._download_json( f'https://api.svt.se/videoplayer-api/video/{video_id}', video_id, headers=self.geo_verification_headers()) info_dict = self._extract_video(data, video_id) if not info_dict.get('title'): info_dict['title'] = traverse_obj(info_dict, 'episode', 'series') return info_dict def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') svt_id = mobj.group('svt_id') or mobj.group('modal_id') if svt_id: return self._extract_by_video_id(svt_id) webpage = self._download_webpage(url, video_id) data = traverse_obj(self._search_nextjs_data(webpage, video_id), ( 'props', 'urqlState', ..., 'data', {json.loads}, 'detailsPageByPath', {dict}, any, {require('video data')})) details = traverse_obj(data, ( 'modules', lambda _, v: v['details']['smartStart']['item']['videos'], 'details', any)) svt_id = traverse_obj(details, ( 'smartStart', 'item', 'videos', # There can be 'AudioDescribed' and 'SignInterpreted' variants; try 'Default' or else get first (lambda _, v: v['accessibility'] == 'Default', 0), 'svtId', {str}, any)) if not svt_id: svt_id = traverse_obj(data, ('video', 'svtId', {str}, {require('SVT ID')})) info_dict = self._extract_by_video_id(svt_id) if not info_dict.get('title'): info_dict['title'] = re.sub(r'\s*\|\s*.+?$', '', self._og_search_title(webpage)) if not info_dict.get('thumbnail'): info_dict['thumbnail'] = self._og_search_thumbnail(webpage) if not info_dict.get('description'): info_dict['description'] = traverse_obj(details, ('description', {str})) return info_dict class SVTSeriesIE(SVTBaseIE): IE_NAME = 'svt:play:series' _VALID_URL = r'https?://(?:www\.)?svtplay\.se/(?P<id>[^/?&#]+)(?:.+?\btab=(?P<season_slug>[^&#]+))?' _TESTS = [{ 'url': 'https://www.svtplay.se/rederiet', 'info_dict': { 'id': 'jpmQYgn', 'title': 'Rederiet', 'description': 'md5:f71122f7cf2e52b643e75915e04cb83d', }, 'playlist_mincount': 318, }, { 'url': 'https://www.svtplay.se/rederiet?tab=season-2-jpmQYgn', 'info_dict': { 'id': 'season-2-jpmQYgn', 'title': 'Rederiet - Säsong 2', 'description': 'md5:f71122f7cf2e52b643e75915e04cb83d', }, 'playlist_mincount': 12, }] @classmethod def suitable(cls, url): return False if SVTPlayIE.suitable(url) else super().suitable(url) def _real_extract(self, url): series_slug, season_id = self._match_valid_url(url).groups() series = self._download_json( 'https://api.svt.se/contento/graphql', series_slug, 'Downloading series page', query={ 'query': '''{ listablesBySlug(slugs: ["%s"]) { associatedContent(include: [productionPeriod, season]) { items { item { ... on Episode { videoSvtId } } } id name } id longDescription name shortDescription } }''' % series_slug, # noqa: UP031 })['data']['listablesBySlug'][0] season_name = None entries = [] for season in series['associatedContent']: if not isinstance(season, dict): continue if season_id: if season.get('id') != season_id: continue season_name = season.get('name') items = season.get('items') if not isinstance(items, list): continue for item in items: video = item.get('item') or {} content_id = video.get('videoSvtId') if not content_id or not isinstance(content_id, str): continue entries.append(self.url_result( 'svt:' + content_id, SVTPlayIE.ie_key(), content_id)) title = series.get('name') season_name = season_name or season_id if title and season_name: title = f'{title} - {season_name}' elif season_id: title = season_id return self.playlist_result( entries, season_id or series.get('id'), title, dict_get(series, ('longDescription', 'shortDescription'))) class SVTPageIE(SVTBaseIE): IE_NAME = 'svt:page' _VALID_URL = r'https?://(?:www\.)?svt\.se/(?:[^/?#]+/)*(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://www.svt.se/nyheter/lokalt/skane/viktor-18-forlorade-armar-och-ben-i-sepsis-vill-ateruppta-karaten-och-bli-svetsare', 'info_dict': { 'title': 'Viktor, 18, förlorade armar och ben i sepsis – vill återuppta karaten och bli svetsare', 'id': 'viktor-18-forlorade-armar-och-ben-i-sepsis-vill-ateruppta-karaten-och-bli-svetsare', }, 'playlist_count': 2, }, { 'url': 'https://www.svt.se/nyheter/lokalt/skane/forsvarsmakten-om-trafikkaoset-pa-e22-kunde-inte-varit-dar-snabbare', 'info_dict': { 'id': 'jXvk42E', 'title': 'Försvarsmakten om trafikkaoset på E22: Kunde inte varit där snabbare', 'ext': 'mp4', 'duration': 80, 'age_limit': 0, 'timestamp': 1704370009, 'episode': 'Försvarsmakten om trafikkaoset på E22: Kunde inte varit där snabbare', 'series': 'Lokala Nyheter Skåne', 'upload_date': '20240104', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.svt.se/nyheter/svtforum/2023-tungt-ar-for-svensk-media', 'info_dict': { 'title': '2023 tungt år för svensk media', 'id': 'ewqAZv4', 'ext': 'mp4', 'duration': 3074, 'age_limit': 0, 'series': '', 'timestamp': 1702980479, 'upload_date': '20231219', 'episode': 'Mediestudier', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.svt.se/sport/ishockey/bakom-masken-lehners-kamp-mot-mental-ohalsa', 'info_dict': { 'id': '25298267', 'title': 'Bakom masken – Lehners kamp mot mental ohälsa', }, 'playlist_count': 4, 'skip': 'Video is gone', }, { 'url': 'https://www.svt.se/nyheter/utrikes/svenska-andrea-ar-en-mil-fran-branderna-i-kalifornien', 'info_dict': { 'id': '24243746', 'title': 'Svenska Andrea redo att fly sitt hem i Kalifornien', }, 'playlist_count': 2, 'skip': 'Video is gone', }, { # only programTitle 'url': 'http://www.svt.se/sport/ishockey/jagr-tacklar-giroux-under-intervjun', 'info_dict': { 'id': '8439V2K', 'ext': 'mp4', 'title': 'Stjärnorna skojar till det - under SVT-intervjun', 'duration': 27, 'age_limit': 0, }, 'skip': 'Video is gone', }, { 'url': 'https://www.svt.se/nyheter/lokalt/vast/svt-testar-tar-nagon-upp-skrapet-1', 'only_matching': True, }, { 'url': 'https://www.svt.se/vader/manadskronikor/maj2018', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if SVTPlayIE.suitable(url) else super().suitable(url) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = self._og_search_title(webpage) urql_state = self._search_json(r'urqlState\s*[=:]', webpage, 'json data', display_id) data = traverse_obj(urql_state, (..., 'data', {str}, {json.loads}), get_all=False) or {} def entries(): for video_id in set(traverse_obj(data, ( 'page', (('topMedia', 'svtId'), ('body', ..., 'video', 'svtId')), {str}, ))): info = self._extract_video( self._download_json(f'https://api.svt.se/video/{video_id}', video_id), video_id) info['title'] = title yield info return self.playlist_result(entries(), display_id, title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/olympics.py
yt_dlp/extractor/olympics.py
from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, int_or_none, parse_iso8601, parse_qs, try_get, update_url, url_or_none, ) from ..utils.traversal import traverse_obj class OlympicsReplayIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?olympics\.com/[a-z]{2}/(?:paris-2024/)?(?:replay|videos?|original-series/episode)/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://olympics.com/fr/video/men-s-109kg-group-a-weightlifting-tokyo-2020-replays', 'info_dict': { 'id': 'f6a0753c-8e6f-4b7d-a435-027054a4f8e9', 'ext': 'mp4', 'title': '+109kg (H) Groupe A - Haltérophilie | Replay de Tokyo 2020', 'upload_date': '20210801', 'timestamp': 1627797600, 'description': 'md5:c66af4a5bc7429dbcc43d15845ff03b3', 'thumbnail': 'https://img.olympics.com/images/image/private/t_1-1_1280/primary/nua4o7zwyaznoaejpbk2', 'duration': 7017.0, }, }, { 'url': 'https://olympics.com/en/original-series/episode/b-boys-and-b-girls-take-the-spotlight-breaking-life-road-to-paris-2024', 'info_dict': { 'id': '32633650-c5ee-4280-8b94-fb6defb6a9b5', 'ext': 'mp4', 'title': 'B-girl Nicka - Breaking Life, Road to Paris 2024 | Episode 1', 'upload_date': '20240517', 'timestamp': 1715948200, 'description': 'md5:f63d728a41270ec628f6ac33ce471bb1', 'thumbnail': 'https://img.olympics.com/images/image/private/t_1-1_1280/primary/a3j96l7j6so3vyfijby1', 'duration': 1321.0, }, }, { 'url': 'https://olympics.com/en/paris-2024/videos/men-s-preliminaries-gbr-esp-ned-rsa-hockey-olympic-games-paris-2024', 'info_dict': { 'id': '3d96db23-8eee-4b7c-8ef5-488a0361026c', 'ext': 'mp4', 'title': 'Men\'s Preliminaries GBR-ESP & NED-RSA | Hockey | Olympic Games Paris 2024', 'upload_date': '20240727', 'timestamp': 1722066600, }, 'skip': 'Geo-restricted to RU, BR, BT, NP, TM, BD, TL', }, { 'url': 'https://olympics.com/en/paris-2024/videos/dnp-suni-lee-i-have-goals-and-i-have-expectations-for-myself-but-i-also-am-trying-to-give-myself-grace', 'info_dict': { 'id': 'a42f37ab-8a74-41d0-a7d9-af27b7b02a90', 'ext': 'mp4', 'title': 'md5:c7cfbc9918636a98e66400a812e4d407', 'upload_date': '20240729', 'timestamp': 1722288600, }, }] _GEO_BYPASS = False def _extract_from_nextjs_data(self, webpage, video_id): data = traverse_obj(self._search_nextjs_data(webpage, video_id, default={}), ( 'props', 'pageProps', 'page', 'items', lambda _, v: v['name'] == 'videoPlaylist', 'data', 'currentVideo', {dict}, any)) if not data: return None geo_countries = traverse_obj(data, ('countries', ..., {str})) if traverse_obj(data, ('geoRestrictedVideo', {bool})): self.raise_geo_restricted(countries=geo_countries) is_live = traverse_obj(data, ('streamingStatus', {str})) == 'LIVE' m3u8_url = traverse_obj(data, ('videoUrl', {url_or_none})) or data['streamUrl'] tokenized_url = self._tokenize_url(m3u8_url, data['jwtToken'], is_live, video_id) try: formats, subtitles = self._extract_m3u8_formats_and_subtitles( tokenized_url, video_id, 'mp4', m3u8_id='hls') except ExtractorError as e: if isinstance(e.cause, HTTPError) and 'georestricted' in e.cause.msg: self.raise_geo_restricted(countries=geo_countries) raise return { 'formats': formats, 'subtitles': subtitles, 'is_live': is_live, **traverse_obj(data, { 'id': ('videoID', {str}), 'title': ('title', {str}), 'timestamp': ('contentDate', {parse_iso8601}), }), } def _tokenize_url(self, url, token, is_live, video_id): return self._download_json( 'https://metering.olympics.com/tokengenerator', video_id, 'Downloading tokenized m3u8 url', query={ **parse_qs(url), 'url': update_url(url, query=None), 'service-id': 'live' if is_live else 'vod', 'user-auth': token, })['data']['url'] def _legacy_tokenize_url(self, url, video_id): return self._download_json( 'https://olympics.com/tokenGenerator', video_id, 'Downloading legacy tokenized m3u8 url', query={'url': url}) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) if info := self._extract_from_nextjs_data(webpage, video_id): return info title = self._html_search_meta(('title', 'og:title', 'twitter:title'), webpage) video_uuid = self._html_search_meta('episode_uid', webpage) m3u8_url = self._html_search_meta('video_url', webpage) json_ld = self._search_json_ld(webpage, video_uuid) thumbnails_list = json_ld.get('image') if not thumbnails_list: thumbnails_list = self._html_search_regex( r'["\']image["\']:\s*["\']([^"\']+)["\']', webpage, 'images', default='') thumbnails_list = thumbnails_list.replace('[', '').replace(']', '').split(',') thumbnails_list = [thumbnail.strip() for thumbnail in thumbnails_list] thumbnails = [] for thumbnail in thumbnails_list: width_a, height_a, width = self._search_regex( r'/images/image/private/t_(?P<width_a>\d+)-(?P<height_a>\d+)_(?P<width>\d+)/primary/[\W\w\d]+', thumbnail, 'thumb', group=(1, 2, 3), default=(None, None, None)) width_a, height_a, width = int_or_none(width_a), int_or_none(height_a), int_or_none(width) thumbnails.append({ 'url': thumbnail, 'width': width, 'height': int_or_none(try_get(width, lambda x: x * height_a / width_a)), }) formats, subtitles = self._extract_m3u8_formats_and_subtitles( self._legacy_tokenize_url(m3u8_url, video_uuid), video_uuid, 'mp4', m3u8_id='hls') return { 'id': video_uuid, 'title': title, 'thumbnails': thumbnails, 'formats': formats, 'subtitles': subtitles, **json_ld, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/drbonanza.py
yt_dlp/extractor/drbonanza.py
from .common import InfoExtractor from ..utils import ( js_to_json, parse_duration, unescapeHTML, ) class DRBonanzaIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?dr\.dk/bonanza/[^/]+/\d+/[^/]+/(?P<id>\d+)/(?P<display_id>[^/?#&]+)' _TEST = { 'url': 'http://www.dr.dk/bonanza/serie/154/matador/40312/matador---0824-komme-fremmede-', 'info_dict': { 'id': '40312', 'display_id': 'matador---0824-komme-fremmede-', 'ext': 'mp4', 'title': 'MATADOR - 08:24. "Komme fremmede".', 'description': 'md5:77b4c1ac4d4c1b9d610ab4395212ff84', 'thumbnail': r're:^https?://.*\.(?:gif|jpg)$', 'duration': 4613, }, } def _real_extract(self, url): mobj = self._match_valid_url(url) video_id, display_id = mobj.group('id', 'display_id') webpage = self._download_webpage(url, display_id) info = self._parse_html5_media_entries( url, webpage, display_id, m3u8_id='hls', m3u8_entry_protocol='m3u8_native')[0] asset = self._parse_json( self._search_regex( r'(?s)currentAsset\s*=\s*({.+?})\s*</script', webpage, 'asset'), display_id, transform_source=js_to_json) title = unescapeHTML(asset['AssetTitle']).strip() def extract(field): return self._search_regex( rf'<div[^>]+>\s*<p>{field}:<p>\s*</div>\s*<div[^>]+>\s*<p>([^<]+)</p>', webpage, field, default=None) info.update({ 'id': asset.get('AssetId') or video_id, 'display_id': display_id, 'title': title, 'description': extract('Programinfo'), 'duration': parse_duration(extract('Tid')), 'thumbnail': asset.get('AssetImageUrl'), }) return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/washingtonpost.py
yt_dlp/extractor/washingtonpost.py
import re from .common import InfoExtractor from ..utils import traverse_obj class WashingtonPostIE(InfoExtractor): IE_NAME = 'washingtonpost' _VALID_URL = r'(?:washingtonpost:|https?://(?:www\.)?washingtonpost\.com/(?:video|posttv)/(?:[^/]+/)*)(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _EMBED_REGEX = [r'<iframe[^>]+\bsrc=["\'](?P<url>https?://(?:www\.)?washingtonpost\.com/video/c/embed/[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'] _TESTS = [{ 'url': 'https://www.washingtonpost.com/video/c/video/480ba4ee-1ec7-11e6-82c2-a7dcb313287d', 'md5': '6f537e1334b714eb15f9563bd4b9cdfa', 'info_dict': { 'id': '480ba4ee-1ec7-11e6-82c2-a7dcb313287d', 'ext': 'mp4', 'title': 'Egypt finds belongings, debris from plane crash', 'description': 'md5:a17ceee432f215a5371388c1f680bd86', 'upload_date': '20160520', 'timestamp': 1463775187, }, }, { 'url': 'https://www.washingtonpost.com/video/world/egypt-finds-belongings-debris-from-plane-crash/2016/05/20/480ba4ee-1ec7-11e6-82c2-a7dcb313287d_video.html', 'only_matching': True, }, { 'url': 'https://www.washingtonpost.com/posttv/world/iraq-to-track-down-antiquities-after-islamic-state-museum-rampage/2015/02/28/7c57e916-bf86-11e4-9dfb-03366e719af8_video.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) return self.url_result( 'arcpublishing:wapo:' + video_id, 'ArcPublishing', video_id) class WashingtonPostArticleIE(InfoExtractor): IE_NAME = 'washingtonpost:article' _VALID_URL = r'https?://(?:www\.)?washingtonpost\.com/(?:[^/]+/)*(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'http://www.washingtonpost.com/sf/national/2014/03/22/sinkhole-of-bureaucracy/', 'info_dict': { 'id': 'sinkhole-of-bureaucracy', 'title': 'Sinkhole of bureaucracy', }, 'playlist': [{ 'md5': '7ccf53ea8cbb77de5f570242b3b21a59', 'info_dict': { 'id': 'fc433c38-b146-11e3-b8b3-44b1d1cd4c1f', 'ext': 'mp4', 'title': 'Breaking Points: The Paper Mine', 'duration': 1290, 'description': 'Overly complicated paper pushing is nothing new to government bureaucracy. But the way federal retirement applications are filed may be the most outdated. David Fahrenthold explains.', 'timestamp': 1395440416, 'upload_date': '20140321', 'thumbnail': r're:https://[^\.]+.cloudfront\.net/PAPERMINESplash\.jpg', }, }, { 'md5': '7ccf53ea8cbb77de5f570242b3b21a59', 'info_dict': { 'id': '41255e28-b14a-11e3-b8b3-44b1d1cd4c1f', 'ext': 'mp4', 'title': 'The town bureaucracy sustains', 'description': 'Underneath the friendly town of Boyers is a sea of government paperwork. In a disused limestone mine, hundreds of locals now track, file and process retirement applications for the federal government. We set out to find out what it\'s like to do paperwork 230 feet underground.', 'duration': 2220, 'timestamp': 1395441819, 'upload_date': '20140321', 'thumbnail': r're:https://[^\.]+.cloudfront\.net/BoyersSplash\.jpeg', }, }], }, { 'url': 'http://www.washingtonpost.com/blogs/wonkblog/wp/2014/12/31/one-airline-figured-out-how-to-make-sure-its-airplanes-never-disappear/', 'info_dict': { 'id': 'one-airline-figured-out-how-to-make-sure-its-airplanes-never-disappear', 'title': 'One airline figured out how to make sure its airplanes never disappear', }, 'playlist': [{ 'md5': 'a7c1b5634ba5e57a6a82cdffa5b1e0d0', 'info_dict': { 'id': '0e4bb54c-9065-11e4-a66f-0ca5037a597d', 'ext': 'mp4', 'description': 'Washington Post transportation reporter Ashley Halsey III explains why a plane\'s black box needs to be recovered from a crash site instead of having its information streamed in real time throughout the flight.', 'upload_date': '20141230', 'timestamp': 1419972442, 'title': 'Why black boxes don’t transmit data in real time', }, }], 'skip': 'Doesnt have a video anymore', }, { 'url': 'https://www.washingtonpost.com/nation/2021/08/05/dixie-river-fire-california-climate/', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if WashingtonPostIE.suitable(url) else super().suitable(url) def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) title = self._og_search_title(webpage) uuids = re.findall(r'''(?x) (?: <div\s+class="posttv-video-embed[^>]*?data-uuid=| data-video-uuid= )"([^"]+)"''', webpage) if not uuids: json_data = self._search_nextjs_data(webpage, page_id) for content_element in traverse_obj(json_data, ('props', 'pageProps', 'globalContent', 'content_elements')): if content_element.get('type') == 'video': uuids.append(content_element.get('_id')) entries = [self.url_result(f'washingtonpost:{uuid}', 'WashingtonPost', uuid) for uuid in uuids] return { '_type': 'playlist', 'entries': entries, 'id': page_id, 'title': title, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ivoox.py
yt_dlp/extractor/ivoox.py
from .common import InfoExtractor from ..utils import int_or_none, parse_iso8601, url_or_none, urljoin from ..utils.traversal import traverse_obj class IvooxIE(InfoExtractor): _VALID_URL = ( r'https?://(?:www\.)?ivoox\.com/(?:\w{2}/)?[^/?#]+_rf_(?P<id>[0-9]+)_1\.html', r'https?://go\.ivoox\.com/rf/(?P<id>[0-9]+)', ) _TESTS = [{ 'url': 'https://www.ivoox.com/dex-08x30-rostros-del-mal-los-asesinos-en-audios-mp3_rf_143594959_1.html', 'md5': '993f712de5b7d552459fc66aa3726885', 'info_dict': { 'id': '143594959', 'ext': 'mp3', 'timestamp': 1742731200, 'channel': 'DIAS EXTRAÑOS con Santiago Camacho', 'title': 'DEx 08x30 Rostros del mal: Los asesinos en serie que aterrorizaron España', 'description': 'md5:eae8b4b9740d0216d3871390b056bb08', 'uploader': 'Santiago Camacho', 'thumbnail': 'https://static-1.ivoox.com/audios/c/d/5/2/cd52f46783fe735000c33a803dce2554_XXL.jpg', 'upload_date': '20250323', 'episode': 'DEx 08x30 Rostros del mal: Los asesinos en serie que aterrorizaron España', 'duration': 11837, 'tags': ['españa', 'asesinos en serie', 'arropiero', 'historia criminal', 'mataviejas'], }, }, { 'url': 'https://go.ivoox.com/rf/143594959', 'only_matching': True, }, { 'url': 'https://www.ivoox.com/en/campodelgas-28-03-2025-audios-mp3_rf_144036942_1.html', 'only_matching': True, }] def _real_extract(self, url): media_id = self._match_id(url) webpage = self._download_webpage(url, media_id, fatal=False) data = self._search_nuxt_data( webpage, media_id, fatal=False, traverse=('data', 0, 'data', 'audio')) direct_download = self._download_json( f'https://vcore-web.ivoox.com/v1/public/audios/{media_id}/download-url', media_id, fatal=False, note='Fetching direct download link', headers={'Referer': url}) download_paths = { *traverse_obj(direct_download, ('data', 'downloadUrl', {str}, filter, all)), *traverse_obj(data, (('downloadUrl', 'mediaUrl'), {str}, filter)), } formats = [] for path in download_paths: formats.append({ 'url': urljoin('https://ivoox.com', path), 'http_headers': {'Referer': url}, }) return { 'id': media_id, 'formats': formats, 'uploader': self._html_search_regex(r'data-prm-author="([^"]+)"', webpage, 'author', default=None), 'timestamp': parse_iso8601( self._html_search_regex(r'data-prm-pubdate="([^"]+)"', webpage, 'timestamp', default=None)), 'channel': self._html_search_regex(r'data-prm-podname="([^"]+)"', webpage, 'channel', default=None), 'title': self._html_search_regex(r'data-prm-title="([^"]+)"', webpage, 'title', default=None), 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'description': self._og_search_description(webpage, default=None), **self._search_json_ld(webpage, media_id, default={}), **traverse_obj(data, { 'title': ('title', {str}), 'description': ('description', {str}), 'thumbnail': ('image', {url_or_none}), 'timestamp': ('uploadDate', {parse_iso8601(delimiter=' ')}), 'duration': ('duration', {int_or_none}), 'tags': ('tags', ..., 'name', {str}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/brightcove.py
yt_dlp/extractor/brightcove.py
import base64 import re import struct import urllib.parse import xml.etree.ElementTree from .adobepass import AdobePassIE from .common import InfoExtractor from ..compat import compat_etree_fromstring from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, UnsupportedError, clean_html, dict_get, extract_attributes, find_xpath_attr, fix_xml_ampersands, float_or_none, int_or_none, join_nonempty, js_to_json, mimetype2ext, parse_iso8601, parse_qs, smuggle_url, str_or_none, try_get, unescapeHTML, unsmuggle_url, update_url_query, url_or_none, ) from ..utils.traversal import traverse_obj class BrightcoveLegacyIE(InfoExtractor): IE_NAME = 'brightcove:legacy' _VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)' _TESTS = [ { # From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/ 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001', 'md5': '5423e113865d26e40624dce2e4b45d95', 'note': 'Test Brightcove downloads and detection in GenericIE', 'info_dict': { 'id': '2371591881001', 'ext': 'mp4', 'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”', 'uploader': '8TV', 'description': 'md5:a950cc4285c43e44d763d036710cd9cd', 'timestamp': 1368213670, 'upload_date': '20130510', 'uploader_id': '1589608506001', }, 'skip': 'The player has been deactivated by the content owner', }, { # From http://medianetwork.oracle.com/video/player/1785452137001 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001', 'info_dict': { 'id': '1785452137001', 'ext': 'flv', 'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges', 'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.', 'uploader': 'Oracle', 'timestamp': 1344975024, 'upload_date': '20120814', 'uploader_id': '1460825906', }, 'skip': 'video not playable', }, { # From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/ 'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001', 'info_dict': { 'id': '2750934548001', 'ext': 'mp4', 'title': 'This Bracelet Acts as a Personal Thermostat', 'description': 'md5:547b78c64f4112766ccf4e151c20b6a0', # 'uploader': 'Mashable', 'timestamp': 1382041798, 'upload_date': '20131017', 'uploader_id': '1130468786001', }, }, { # test that the default referer works # from http://national.ballet.ca/interact/video/Lost_in_Motion_II/ 'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001', 'info_dict': { 'id': '2878862109001', 'ext': 'mp4', 'title': 'Lost in Motion II', 'description': 'md5:363109c02998fee92ec02211bd8000df', 'uploader': 'National Ballet of Canada', }, 'skip': 'Video gone', }, { # test flv videos served by akamaihd.net # From http://www.redbull.com/en/bike/stories/1331655643987/replay-uci-dh-world-cup-2014-from-fort-william 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?%40videoPlayer=ref%3Aevent-stream-356&linkBaseURL=http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fvideos%2F1331655630249%2Freplay-uci-fort-william-2014-dh&playerKey=AQ%7E%7E%2CAAAApYJ7UqE%7E%2Cxqr_zXk0I-zzNndy8NlHogrCb5QdyZRf&playerID=1398061561001#__youtubedl_smuggle=%7B%22Referer%22%3A+%22http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fstories%2F1331655643987%2Freplay-uci-dh-world-cup-2014-from-fort-william%22%7D', # The md5 checksum changes on each download 'info_dict': { 'id': '3750436379001', 'ext': 'flv', 'title': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals', 'uploader': 'RBTV Old (do not use)', 'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals', 'timestamp': 1409122195, 'upload_date': '20140827', 'uploader_id': '710858724001', }, 'skip': 'Video gone', }, { # playlist with 'videoList' # from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL', 'info_dict': { 'title': 'Sealife', 'id': '3550319591001', }, 'playlist_mincount': 7, 'skip': 'Unsupported URL', }, { # playlist with 'playlistTab' (https://github.com/ytdl-org/youtube-dl/issues/9965) 'url': 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=AQ%7E%7E,AAABXlLMdok%7E,NJ4EoMlZ4rZdx9eU1rkMVd8EaYPBBUlg', 'info_dict': { 'id': '1522758701001', 'title': 'Lesson 08', }, 'playlist_mincount': 10, 'skip': 'Unsupported URL', }, { # playerID inferred from bcpid # from http://www.un.org/chinese/News/story.asp?NewsID=27724 'url': 'https://link.brightcove.com/services/player/bcpid1722935254001/?bctid=5360463607001&autoStart=false&secureConnections=true&width=650&height=350', 'only_matching': True, # Tested in GenericIE }, ] _WEBPAGE_TESTS = [{ # embedded brightcove video # it also tests brightcove videos that need to set the 'Referer' # in the http requests 'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/', 'info_dict': { 'id': '2765128793001', 'ext': 'mp4', 'title': 'Le cours de bourse : l’analyse technique', 'description': 'md5:7e9ad046e968cb2d1114004aba466fd9', 'uploader': 'BFM BUSINESS', }, 'params': { 'skip_download': True, }, 'skip': '404 Not Found', }, { # embedded with itemprop embedURL and video id spelled as `idVideo` 'url': 'http://bfmbusiness.bfmtv.com/mediaplayer/chroniques/olivier-delamarche/', 'info_dict': { 'id': '5255628253001', 'ext': 'mp4', 'title': 'md5:37c519b1128915607601e75a87995fc0', 'description': 'md5:37f7f888b434bb8f8cc8dbd4f7a4cf26', 'uploader': 'BFM BUSINESS', 'uploader_id': '876450612001', 'timestamp': 1482255315, 'upload_date': '20161220', }, 'params': { 'skip_download': True, }, 'skip': 'Redirects, page gone', }, { # https://github.com/ytdl-org/youtube-dl/issues/2253 'url': 'http://bcove.me/i6nfkrc3', 'md5': '0ba9446db037002366bab3b3eb30c88c', 'info_dict': { 'id': '3101154703001', 'ext': 'mp4', 'title': 'Still no power', 'uploader': 'thestar.com', 'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.', }, 'skip': 'video gone', }, { # https://github.com/ytdl-org/youtube-dl/issues/3541 'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1', 'info_dict': { 'id': '3866516442001', 'ext': 'mp4', 'title': 'Leer mij vrouwen kennen: Aflevering 1', 'description': 'Leer mij vrouwen kennen: Aflevering 1', 'uploader': 'SBS Broadcasting', }, 'skip': 'Restricted to Netherlands, 404 Not Found', 'params': { 'skip_download': True, # m3u8 download }, }, { # Brightcove video in <iframe> 'url': 'http://www.un.org/chinese/News/story.asp?NewsID=27724', 'md5': '36d74ef5e37c8b4a2ce92880d208b968', 'info_dict': { 'id': '5360463607001', 'ext': 'mp4', 'title': '叙利亚失明儿童在废墟上演唱《心跳》 呼吁获得正常童年生活', 'description': '联合国儿童基金会中东和北非区域大使、作曲家扎德·迪拉尼(Zade Dirani)在3月15日叙利亚冲突爆发7周年纪念日之际发布了为叙利亚谱写的歌曲《心跳》(HEARTBEAT),为受到六年冲突影响的叙利亚儿童发出强烈呐喊,呼吁世界做出共同努力,使叙利亚儿童重新获得享有正常童年生活的权利。', 'uploader': 'United Nations', 'uploader_id': '1362235914001', 'timestamp': 1489593889, 'upload_date': '20170315', }, 'skip': '404 Not Found', }, { # Brightcove with UUID in videoPlayer 'url': 'http://www8.hp.com/cn/zh/home.html', 'info_dict': { 'id': '5255815316001', 'ext': 'mp4', 'title': 'Sprocket Video - China', 'description': 'Sprocket Video - China', 'uploader': 'HP-Video Gallery', 'timestamp': 1482263210, 'upload_date': '20161220', 'uploader_id': '1107601872001', }, 'params': { 'skip_download': True, # m3u8 download }, 'skip': 'video rotates...weekly?', }, { # Multiple brightcove videos # https://github.com/ytdl-org/youtube-dl/issues/2283 'url': 'http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html', 'info_dict': { 'id': 'always-never', 'title': 'Always / Never - The New Yorker', }, 'playlist_count': 3, 'params': { 'extract_flat': False, 'skip_download': True, }, 'skip': 'Redirects, page gone', }, { # BrightcoveInPageEmbed embed 'url': 'http://www.geekandsundry.com/tabletop-bonus-wils-final-thoughts-on-dread/', 'info_dict': { 'id': '4238694884001', 'ext': 'flv', 'title': 'Tabletop: Dread, Last Thoughts', 'description': 'Tabletop: Dread, Last Thoughts', 'duration': 51690, }, 'skip': 'Redirects, page gone', }, { # Brightcove embed, with no valid 'renditions' but valid 'IOSRenditions' # This video can't be played in browsers if Flash disabled and UA set to iPhone, which is actually a false alarm 'url': 'https://dl.dropboxusercontent.com/u/29092637/interview.html', 'info_dict': { 'id': '4785848093001', 'ext': 'mp4', 'title': 'The Cardinal Pell Interview', 'description': 'Sky News Contributor Andrew Bolt interviews George Pell in Rome, following the Cardinal\'s evidence before the Royal Commission into Child Abuse. ', 'uploader': 'GlobeCast Australia - GlobeStream', 'uploader_id': '2733773828001', 'upload_date': '20160304', 'timestamp': 1457083087, }, 'params': { # m3u8 downloads 'skip_download': True, }, 'skip': '404 Not Found', }, { # Brightcove embed with whitespace around attribute names 'url': 'http://www.stack.com/video/3167554373001/learn-to-hit-open-three-pointers-with-damian-lillard-s-baseline-drift-drill', 'info_dict': { 'id': '3167554373001', 'ext': 'mp4', 'title': "Learn to Hit Open Three-Pointers With Damian Lillard's Baseline Drift Drill", 'description': 'md5:57bacb0e0f29349de4972bfda3191713', 'uploader_id': '1079349493', 'upload_date': '20140207', 'timestamp': 1391810548, }, 'params': { 'skip_download': True, }, 'skip': '410 Gone', }] @classmethod def _build_brightcove_url(cls, object_str): """ Build a Brightcove url from a xml string containing <object class="BrightcoveExperience">{params}</object> """ # Fix up some stupid HTML, see https://github.com/ytdl-org/youtube-dl/issues/1553 object_str = re.sub(r'(<param(?:\s+[a-zA-Z0-9_]+="[^"]*")*)>', lambda m: m.group(1) + '/>', object_str) # Fix up some stupid XML, see https://github.com/ytdl-org/youtube-dl/issues/1608 object_str = object_str.replace('<--', '<!--') # remove namespace to simplify extraction object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str) object_str = fix_xml_ampersands(object_str) try: object_doc = compat_etree_fromstring(object_str.encode()) except xml.etree.ElementTree.ParseError: return fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars') if fv_el is not None: flashvars = dict( (k, v[0]) for k, v in urllib.parse.parse_qs(fv_el.attrib['value']).items()) else: flashvars = {} data_url = object_doc.attrib.get('data', '') data_url_params = parse_qs(data_url) def find_param(name): if name in flashvars: return flashvars[name] node = find_xpath_attr(object_doc, './param', 'name', name) if node is not None: return node.attrib['value'] return data_url_params.get(name) params = {} player_id = find_param('playerID') or find_param('playerId') if player_id is None: raise ExtractorError('Cannot find player ID') params['playerID'] = player_id player_key = find_param('playerKey') # Not all pages define this value if player_key is not None: params['playerKey'] = player_key # These fields hold the id of the video video_player = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID') or find_param('@videoList') if video_player is not None: if isinstance(video_player, list): video_player = video_player[0] video_player = video_player.strip() # UUID is also possible for videoPlayer (e.g. # http://www.popcornflix.com/hoodies-vs-hooligans/7f2d2b87-bbf2-4623-acfb-ea942b4f01dd # or http://www8.hp.com/cn/zh/home.html) if not (re.match( r'^(?:\d+|[\da-fA-F]{8}-?[\da-fA-F]{4}-?[\da-fA-F]{4}-?[\da-fA-F]{4}-?[\da-fA-F]{12})$', video_player) or video_player.startswith('ref:')): return None params['@videoPlayer'] = video_player link_base = find_param('linkBaseURL') if link_base is not None: params['linkBaseURL'] = link_base return cls._make_brightcove_url(params) @classmethod def _build_brightcove_url_from_js(cls, object_js): # The layout of JS is as follows: # customBC.createVideo = function (width, height, playerID, playerKey, videoPlayer, VideoRandomID) { # // build Brightcove <object /> XML # } m = re.search( r'''(?x)customBC\.createVideo\( .*? # skipping width and height ["\'](?P<playerID>\d+)["\']\s*,\s* # playerID ["\'](?P<playerKey>AQ[^"\']{48})[^"\']*["\']\s*,\s* # playerKey begins with AQ and is 50 characters # in length, however it's appended to itself # in places, so truncate ["\'](?P<videoID>\d+)["\'] # @videoPlayer ''', object_js) if m: return cls._make_brightcove_url(m.groupdict()) @classmethod def _make_brightcove_url(cls, params): return update_url_query( 'https://c.brightcove.com/services/viewer/htmlFederated', params) @classmethod def _extract_brightcove_url(cls, webpage): """Try to extract the brightcove url from the webpage, returns None if it can't be found """ urls = cls._extract_brightcove_urls(webpage) return urls[0] if urls else None @classmethod def _extract_brightcove_urls(cls, webpage): """Return a list of all Brightcove URLs from the webpage """ url_m = re.search( r'''(?x) <meta\s+ (?:property|itemprop)=([\'"])(?:og:video|embedURL)\1[^>]+ content=([\'"])(?P<url>https?://(?:secure|c)\.brightcove.com/(?:(?!\2).)+)\2 ''', webpage) if url_m: url = unescapeHTML(url_m.group('url')) # Some sites don't add it, we can't download with this url, for example: # http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/ if 'playerKey' in url or 'videoId' in url or 'idVideo' in url: return [url] matches = re.findall( r'''(?sx)<object (?: [^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] | [^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/ ).+?>\s*</object>''', webpage) if matches: return list(filter(None, [cls._build_brightcove_url(m) for m in matches])) matches = re.findall(r'(customBC\.createVideo\(.+?\);)', webpage) if matches: return list(filter(None, [ cls._build_brightcove_url_from_js(custom_bc) for custom_bc in matches])) return [src for _, src in re.findall( r'<iframe[^>]+src=([\'"])((?:https?:)?//link\.brightcove\.com/services/player/(?!\1).+)\1', webpage)] def _extract_from_webpage(self, url, webpage): bc_urls = self._extract_brightcove_urls(webpage) for bc_url in bc_urls: yield self.url_result(smuggle_url(bc_url, {'Referer': url}), BrightcoveLegacyIE) def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) # Change the 'videoId' and others field to '@videoPlayer' url = re.sub(r'(?<=[?&])(videoI(d|D)|idVideo|bctid)', '%40videoPlayer', url) # Change bckey (used by bcove.me urls) to playerKey url = re.sub(r'(?<=[?&])bckey', 'playerKey', url) mobj = self._match_valid_url(url) query_str = mobj.group('query') query = urllib.parse.parse_qs(query_str) video_player = query.get('@videoPlayer') if video_player: # We set the original url as the default 'Referer' header referer = query.get('linkBaseURL', [None])[0] or smuggled_data.get('Referer', url) video_id = video_player[0] if 'playerID' not in query: mobj = re.search(r'/bcpid(\d+)', url) if mobj is not None: query['playerID'] = [mobj.group(1)] publisher_id = query.get('publisherId') if publisher_id and publisher_id[0].isdigit(): publisher_id = publisher_id[0] if not publisher_id: player_key = query.get('playerKey') if player_key and ',' in player_key[0]: player_key = player_key[0] else: player_id = query.get('playerID') if player_id and player_id[0].isdigit(): headers = {} if referer: headers['Referer'] = referer player_page = self._download_webpage( 'https://link.brightcove.com/services/player/bcpid' + player_id[0], video_id, headers=headers, fatal=False) if player_page: player_key = self._search_regex( r'<param\s+name="playerKey"\s+value="([\w~,-]+)"', player_page, 'player key', fatal=False) if player_key: enc_pub_id = player_key.split(',')[1].replace('~', '=') publisher_id = struct.unpack('>Q', base64.urlsafe_b64decode(enc_pub_id))[0] if publisher_id: brightcove_new_url = f'https://players.brightcove.net/{publisher_id}/default_default/index.html?videoId={video_id}' if referer: brightcove_new_url = smuggle_url(brightcove_new_url, {'referrer': referer}) return self.url_result(brightcove_new_url, BrightcoveNewIE.ie_key(), video_id) # TODO: figure out if it's possible to extract playlistId from playerKey # elif 'playerKey' in query: # player_key = query['playerKey'] # return self._get_playlist_info(player_key[0]) raise UnsupportedError(url) class BrightcoveNewBaseIE(AdobePassIE): def _parse_brightcove_metadata(self, json_data, video_id, headers={}): formats, subtitles = [], {} sources = json_data.get('sources') or [] for source in sources: container = source.get('container') ext = mimetype2ext(source.get('type')) src = source.get('src') if ext == 'm3u8' or container == 'M2TS': if not src: continue fmts, subs = self._extract_m3u8_formats_and_subtitles( src, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) subtitles = self._merge_subtitles(subtitles, subs) elif ext == 'mpd': if not src: continue fmts, subs = self._extract_mpd_formats_and_subtitles(src, video_id, 'dash', fatal=False) subtitles = self._merge_subtitles(subtitles, subs) else: streaming_src = source.get('streaming_src') stream_name, app_name = source.get('stream_name'), source.get('app_name') if not src and not streaming_src and (not stream_name or not app_name): continue tbr = float_or_none(source.get('avg_bitrate'), 1000) height = int_or_none(source.get('height')) width = int_or_none(source.get('width')) f = { 'tbr': tbr, 'filesize': int_or_none(source.get('size')), 'container': container, 'ext': ext or container.lower(), } if width == 0 and height == 0: f.update({ 'vcodec': 'none', }) else: f.update({ 'width': width, 'height': height, 'vcodec': source.get('codec'), }) def build_format_id(kind): return join_nonempty(kind, tbr and f'{int(tbr)}k', height and f'{height}p') if src or streaming_src: f.update({ 'url': src or streaming_src, 'format_id': build_format_id('http' if src else 'http-streaming'), 'source_preference': 0 if src else -1, }) else: f.update({ 'url': app_name, 'play_path': stream_name, 'format_id': build_format_id('rtmp'), }) fmts = [f] # https://support.brightcove.com/playback-api-video-fields-reference#key_systems_object if container == 'WVM' or source.get('key_systems') or ext == 'ism': for f in fmts: f['has_drm'] = True formats.extend(fmts) if not formats: errors = json_data.get('errors') if errors: error = errors[0] self.raise_no_formats( error.get('message') or error.get('error_subcode') or error['error_code'], expected=True) headers.pop('Authorization', None) # or else http formats will give error 400 for f in formats: f.setdefault('http_headers', {}).update(headers) for text_track in json_data.get('text_tracks', []): if text_track.get('kind') != 'captions': continue text_track_url = url_or_none(text_track.get('src')) if not text_track_url: continue lang = (str_or_none(text_track.get('srclang')) or str_or_none(text_track.get('label')) or 'en').lower() subtitles.setdefault(lang, []).append({ 'url': text_track_url, }) is_live = False duration = float_or_none(json_data.get('duration'), 1000) if duration is not None and duration <= 0: is_live = True common_res = [(160, 90), (320, 180), (480, 720), (640, 360), (768, 432), (1024, 576), (1280, 720), (1366, 768), (1920, 1080)] thumb_base_url = dict_get(json_data, ('poster', 'thumbnail')) thumbnails = [{ 'url': re.sub(r'\d+x\d+', f'{w}x{h}', thumb_base_url), 'width': w, 'height': h, } for w, h in common_res] if thumb_base_url else None return { 'id': video_id, 'thumbnails': thumbnails, 'duration': duration, 'formats': formats, 'subtitles': subtitles, 'is_live': is_live, **traverse_obj(json_data, { 'title': ('name', {clean_html}), 'description': ('description', {clean_html}), 'tags': ('tags', ..., {str}, filter, all, filter), 'timestamp': ('published_at', {parse_iso8601}), 'uploader_id': ('account_id', {str}), }), } class BrightcoveNewIE(BrightcoveNewBaseIE): IE_NAME = 'brightcove:new' _VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*(?P<content_type>video|playlist)Id=(?P<video_id>\d+|ref:[^&]+)' _TESTS = [{ 'url': 'http://players.brightcove.net/929656772001/e41d32dc-ec74-459e-a845-6c69f7b724ea_default/index.html?videoId=4463358922001', 'md5': 'c8100925723840d4b0d243f7025703be', 'info_dict': { 'id': '4463358922001', 'ext': 'mp4', 'title': 'Meet the man behind Popcorn Time', 'description': 'md5:eac376a4fe366edc70279bfb681aea16', 'duration': 165.768, 'timestamp': 1441391203, 'upload_date': '20150904', 'uploader_id': '929656772001', 'formats': 'mincount:20', }, 'skip': '404 Not Found', }, { # with rtmp streams 'url': 'http://players.brightcove.net/4036320279001/5d112ed9-283f-485f-a7f9-33f42e8bc042_default/index.html?videoId=4279049078001', 'info_dict': { 'id': '4279049078001', 'ext': 'mp4', 'title': 'Titansgrave: Chapter 0', 'description': 'Titansgrave: Chapter 0', 'duration': 1242.058, 'timestamp': 1433556729, 'upload_date': '20150606', 'uploader_id': '4036320279001', 'formats': 'mincount:39', }, 'skip': '404 Not Found', }, { # playlist stream 'url': 'https://players.brightcove.net/1752604059001/S13cJdUBz_default/index.html?playlistId=5718313430001', 'info_dict': { 'id': '5718313430001', 'title': 'No Audio Playlist', }, 'playlist_count': 7, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://players.brightcove.net/5690807595001/HyZNerRl7_default/index.html?playlistId=5743160747001', 'only_matching': True, }, { # ref: prefixed video id 'url': 'http://players.brightcove.net/3910869709001/21519b5c-4b3b-4363-accb-bdc8f358f823_default/index.html?videoId=ref:7069442', 'only_matching': True, }, { # non numeric ref: prefixed video id 'url': 'http://players.brightcove.net/710858724001/default_default/index.html?videoId=ref:event-stream-356', 'only_matching': True, }, { # unavailable video without message but with error_code 'url': 'http://players.brightcove.net/1305187701/c832abfb-641b-44eb-9da0-2fe76786505f_default/index.html?videoId=4377407326001', 'only_matching': True, }] _WEBPAGE_TESTS = [{ # brightcove player url embed 'url': 'https://nbc-2.com/weather/forecast/2022/11/16/forecast-warmest-day-of-the-week/', 'md5': '2934d5372b354d27083ccf8575dbfee2', 'info_dict': { 'id': '6315650313112', 'title': 'First Alert Forecast: November 15, 2022', 'ext': 'mp4', 'tags': ['nbc2', 'forecast'], 'uploader_id': '6146886170001', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1668574571, 'duration': 233.375, 'upload_date': '20221116', }, }, { # embedded with video tag only 'url': 'https://www.gooddishtv.com/tiktok-rapping-chef-mr-pyrex', 'info_dict': { 'id': 'tiktok-rapping-chef-mr-pyrex', 'title': 'TikTok\'s Rapping Chef Makes Jambalaya for the Hosts', 'thumbnail': r're:^https?://.*\.jpg$', 'age_limit': 0, 'description': 'Just in time for Mardi Gras', }, 'playlist': [{ 'info_dict': { 'id': '6299189544001', 'ext': 'mp4', 'title': 'TGD_01-032_5', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1646078943, 'uploader_id': '1569565978001', 'upload_date': '20220228', 'duration': 217.195, }, }, { 'info_dict': { 'id': '6305565995112', 'ext': 'mp4', 'title': 'TGD 01-087 (Airs 05.25.22)_Segment 5', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1651604591, 'uploader_id': '1569565978001', 'upload_date': '20220503', 'duration': 310.421, }, }], }, { # Brightcove:new type [2]. 'url': 'http://www.delawaresportszone.com/video-st-thomas-more-earns-first-trip-to-basketball-semis', 'md5': '2b35148fcf48da41c9fb4591650784f3', 'info_dict': { 'id': '5348741021001', 'ext': 'mp4', 'upload_date': '20170306', 'uploader_id': '4191638492001', 'timestamp': 1488769918, 'title': 'VIDEO: St. Thomas More earns first trip to basketball semis', }, 'skip': '404 Not Found', }, { # Alternative brightcove <video> attributes
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/learningonscreen.py
yt_dlp/extractor/learningonscreen.py
import functools import re from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, extract_attributes, join_nonempty, parse_duration, unified_timestamp, ) from ..utils.traversal import find_element, traverse_obj class LearningOnScreenIE(InfoExtractor): _VALID_URL = r'https?://learningonscreen\.ac\.uk/ondemand/index\.php/prog/(?P<id>\w+)' _TESTS = [{ 'url': 'https://learningonscreen.ac.uk/ondemand/index.php/prog/005D81B2?bcast=22757013', 'info_dict': { 'id': '005D81B2', 'ext': 'mp4', 'title': 'Planet Earth', 'duration': 3600.0, 'timestamp': 1164567600.0, 'upload_date': '20061126', 'thumbnail': 'https://stream.learningonscreen.ac.uk/trilt-cover-images/005D81B2-Planet-Earth-2006-11-26T190000Z-BBC4.jpg', }, }] def _real_initialize(self): if not self._get_cookies('https://learningonscreen.ac.uk/').get('PHPSESSID-BOB-LIVE'): self.raise_login_required(method='session_cookies') def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) details = traverse_obj(webpage, ( {find_element(id='programme-details', html=True)}, { 'title': ({find_element(tag='h2')}, {clean_html}), 'timestamp': ( {find_element(cls='broadcast-date')}, {functools.partial(re.match, r'([^<]+)')}, 1, {unified_timestamp}), 'duration': ( {find_element(cls='prog-running-time')}, {clean_html}, {parse_duration}), })) title = details.pop('title', None) or traverse_obj(webpage, ( {find_element(id='add-to-existing-playlist', html=True)}, {extract_attributes}, 'data-record-title', {clean_html})) entries = self._parse_html5_media_entries( 'https://stream.learningonscreen.ac.uk', webpage, video_id, m3u8_id='hls', mpd_id='dash', _headers={'Origin': 'https://learningonscreen.ac.uk', 'Referer': 'https://learningonscreen.ac.uk/'}) if not entries: raise ExtractorError('No video found') if len(entries) > 1: duration = details.pop('duration', None) for idx, entry in enumerate(entries, start=1): entry.update(details) entry['id'] = join_nonempty(video_id, idx) entry['title'] = join_nonempty(title, idx) return self.playlist_result(entries, video_id, title, duration=duration) return { **entries[0], **details, 'id': video_id, 'title': title, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/packtpub.py
yt_dlp/extractor/packtpub.py
import json from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, clean_html, # remove_end, str_or_none, strip_or_none, unified_timestamp, ) class PacktPubBaseIE(InfoExtractor): # _PACKT_BASE = 'https://www.packtpub.com' _STATIC_PRODUCTS_BASE = 'https://static.packt-cdn.com/products/' class PacktPubIE(PacktPubBaseIE): _VALID_URL = r'https?://(?:(?:www\.)?packtpub\.com/mapt|subscription\.packtpub\.com)/video/[^/]+/(?P<course_id>\d+)/(?P<chapter_id>[^/]+)/(?P<id>[^/]+)(?:/(?P<display_id>[^/?&#]+))?' _TESTS = [{ 'url': 'https://www.packtpub.com/mapt/video/web-development/9781787122215/20528/20530/Project+Intro', 'md5': '1e74bd6cfd45d7d07666f4684ef58f70', 'info_dict': { 'id': '20530', 'ext': 'mp4', 'title': 'Project Intro', 'thumbnail': r're:(?i)^https?://.*\.jpg', 'timestamp': 1490918400, 'upload_date': '20170331', }, }, { 'url': 'https://subscription.packtpub.com/video/web_development/9781787122215/20528/20530/project-intro', 'only_matching': True, }, { 'url': 'https://subscription.packtpub.com/video/programming/9781838988906/p1/video1_1/business-card-project', 'only_matching': True, }] _NETRC_MACHINE = 'packtpub' _TOKEN = None def _perform_login(self, username, password): try: self._TOKEN = self._download_json( 'https://services.packtpub.com/auth-v1/users/tokens', None, 'Downloading Authorization Token', data=json.dumps({ 'username': username, 'password': password, }).encode())['data']['access'] except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status in (400, 401, 404): message = self._parse_json(e.cause.response.read().decode(), None)['message'] raise ExtractorError(message, expected=True) raise def _real_extract(self, url): course_id, chapter_id, video_id, display_id = self._match_valid_url(url).groups() headers = {} if self._TOKEN: headers['Authorization'] = 'Bearer ' + self._TOKEN try: video_url = self._download_json( f'https://services.packtpub.com/products-v1/products/{course_id}/{chapter_id}/{video_id}', video_id, 'Downloading JSON video', headers=headers)['data'] except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 400: self.raise_login_required('This video is locked') raise # TODO: find a better way to avoid duplicating course requests # metadata = self._download_json( # '%s/products/%s/chapters/%s/sections/%s/metadata' # % (self._MAPT_REST, course_id, chapter_id, video_id), # video_id)['data'] # title = metadata['pageTitle'] # course_title = metadata.get('title') # if course_title: # title = remove_end(title, ' - %s' % course_title) # timestamp = unified_timestamp(metadata.get('publicationDate')) # thumbnail = urljoin(self._PACKT_BASE, metadata.get('filepath')) return { 'id': video_id, 'url': video_url, 'title': display_id or video_id, # title, # 'thumbnail': thumbnail, # 'timestamp': timestamp, } class PacktPubCourseIE(PacktPubBaseIE): _VALID_URL = r'(?P<url>https?://(?:(?:www\.)?packtpub\.com/mapt|subscription\.packtpub\.com)/video/[^/]+/(?P<id>\d+))' _TESTS = [{ 'url': 'https://www.packtpub.com/mapt/video/web-development/9781787122215', 'info_dict': { 'id': '9781787122215', 'title': 'Learn Nodejs by building 12 projects [Video]', 'description': 'md5:489da8d953f416e51927b60a1c7db0aa', }, 'playlist_count': 90, }, { 'url': 'https://subscription.packtpub.com/video/web_development/9781787122215', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if PacktPubIE.suitable(url) else super().suitable(url) def _real_extract(self, url): mobj = self._match_valid_url(url) url, course_id = mobj.group('url', 'id') course = self._download_json( self._STATIC_PRODUCTS_BASE + f'{course_id}/toc', course_id) metadata = self._download_json( self._STATIC_PRODUCTS_BASE + f'{course_id}/summary', course_id, fatal=False) or {} entries = [] for chapter_num, chapter in enumerate(course['chapters'], 1): chapter_id = str_or_none(chapter.get('id')) sections = chapter.get('sections') if not chapter_id or not isinstance(sections, list): continue chapter_info = { 'chapter': chapter.get('title'), 'chapter_number': chapter_num, 'chapter_id': chapter_id, } for section in sections: section_id = str_or_none(section.get('id')) if not section_id or section.get('contentType') != 'video': continue entry = { '_type': 'url_transparent', 'url': '/'.join([url, chapter_id, section_id]), 'title': strip_or_none(section.get('title')), 'description': clean_html(section.get('summary')), 'thumbnail': metadata.get('coverImage'), 'timestamp': unified_timestamp(metadata.get('publicationDate')), 'ie_key': PacktPubIE.ie_key(), } entry.update(chapter_info) entries.append(entry) return self.playlist_result( entries, course_id, metadata.get('title'), clean_html(metadata.get('about')))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/weyyak.py
yt_dlp/extractor/weyyak.py
from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, parse_age_limit, traverse_obj, unified_timestamp, url_or_none, ) class WeyyakIE(InfoExtractor): _VALID_URL = r'https?://weyyak\.com/(?P<lang>\w+)/(?:player/)?(?P<type>episode|movie)/(?P<id>\d+)' _TESTS = [ { 'url': 'https://weyyak.com/en/player/episode/1341952/Ribat-Al-Hob-Episode49', 'md5': '0caf55c1a615531c8fe60f146ae46849', 'info_dict': { 'id': '1341952', 'ext': 'mp4', 'title': 'Ribat Al Hob', 'duration': 2771, 'alt_title': 'رباط الحب', 'season': 'Season 1', 'season_number': 1, 'episode': 'Episode 49', 'episode_number': 49, 'timestamp': 1485907200, 'upload_date': '20170201', 'thumbnail': r're:^https://content\.weyyak\.com/.+/poster-image', 'categories': ['Drama', 'Thrillers', 'Romance'], 'tags': 'count:8', }, }, { 'url': 'https://weyyak.com/en/movie/233255/8-Seconds', 'md5': 'fe740ae0f63e4d1c8a7fc147a410c564', 'info_dict': { 'id': '233255', 'ext': 'mp4', 'title': '8 Seconds', 'duration': 6490, 'alt_title': '8 ثواني', 'description': 'md5:45b83a155c30b49950624c7e99600b9d', 'age_limit': 15, 'release_year': 2015, 'timestamp': 1683106031, 'upload_date': '20230503', 'thumbnail': r're:^https://content\.weyyak\.com/.+/poster-image', 'categories': ['Drama', 'Social'], 'cast': ['Ceylin Adiyaman', 'Esra Inal'], }, }, ] def _real_extract(self, url): video_id, lang, type_ = self._match_valid_url(url).group('id', 'lang', 'type') path = 'episode/' if type_ == 'episode' else 'contents/moviedetails?contentkey=' data = self._download_json( f'https://msapifo-prod-me.weyyak.z5.com/v1/{lang}/{path}{video_id}', video_id)['data'] m3u8_url = self._download_json( f'https://api-weyyak.akamaized.net/get_info/{data["video_id"]}', video_id, 'Extracting video details')['url_video'] formats, subtitles = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(data, { 'title': ('title', {str}), 'alt_title': ('translated_title', {str}), 'description': ('synopsis', {str}), 'duration': ('length', {float_or_none}), 'age_limit': ('age_rating', {parse_age_limit}), 'season_number': ('season_number', {int_or_none}), 'episode_number': ('episode_number', {int_or_none}), 'thumbnail': ('imagery', 'thumbnail', {url_or_none}), 'categories': ('genres', ..., {str}), 'tags': ('tags', ..., {str}), 'cast': (('main_actor', 'main_actress'), {str}), 'timestamp': ('insertedAt', {unified_timestamp}), 'release_year': ('production_year', {int_or_none}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/fourtube.py
yt_dlp/extractor/fourtube.py
import base64 import re import urllib.parse from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, parse_iso8601, str_or_none, str_to_int, try_get, unified_timestamp, url_or_none, ) class FourTubeBaseIE(InfoExtractor): def _extract_formats(self, url, video_id, media_id, sources): token_url = 'https://{}/{}/desktop/{}'.format( self._TKN_HOST, media_id, '+'.join(sources)) parsed_url = urllib.parse.urlparse(url) tokens = self._download_json(token_url, video_id, data=b'', headers={ 'Origin': f'{parsed_url.scheme}://{parsed_url.hostname}', 'Referer': url, }) return [{ 'url': tokens[res]['token'], 'format_id': res + 'p', 'resolution': res + 'p', 'quality': int(res), } for res in sources] def _real_extract(self, url): mobj = self._match_valid_url(url) kind, video_id, display_id = mobj.group('kind', 'id', 'display_id') if kind == 'm' or not display_id: url = self._URL_TEMPLATE % video_id webpage = self._download_webpage(url, video_id) title = self._html_search_meta('name', webpage) timestamp = parse_iso8601(self._html_search_meta( 'uploadDate', webpage)) thumbnail = self._html_search_meta('thumbnailUrl', webpage) uploader_id = self._html_search_regex( r'<a class="item-to-subscribe" href="[^"]+/(?:channel|user)s?/([^/"]+)" title="Go to [^"]+ page">', webpage, 'uploader id', fatal=False) uploader = self._html_search_regex( r'<a class="item-to-subscribe" href="[^"]+/(?:channel|user)s?/[^/"]+" title="Go to ([^"]+) page">', webpage, 'uploader', fatal=False) categories_html = self._search_regex( r'(?s)><i class="icon icon-tag"></i>\s*Categories / Tags\s*.*?<ul class="[^"]*?list[^"]*?">(.*?)</ul>', webpage, 'categories', fatal=False) categories = None if categories_html: categories = [ c.strip() for c in re.findall( r'(?s)<li><a.*?>(.*?)</a>', categories_html)] view_count = str_to_int(self._search_regex( r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:([0-9,]+)">', webpage, 'view count', default=None)) like_count = str_to_int(self._search_regex( r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserLikes:([0-9,]+)">', webpage, 'like count', default=None)) duration = parse_duration(self._html_search_meta('duration', webpage)) media_id = self._search_regex( r'<button[^>]+data-id=(["\'])(?P<id>\d+)\1[^>]+data-quality=', webpage, 'media id', default=None, group='id') sources = [ quality for _, quality in re.findall(r'<button[^>]+data-quality=(["\'])(.+?)\1', webpage)] if not (media_id and sources): player_js = self._download_webpage( self._search_regex( r'<script[^>]id=(["\'])playerembed\1[^>]+src=(["\'])(?P<url>.+?)\2', webpage, 'player JS', group='url'), video_id, 'Downloading player JS') params_js = self._search_regex( r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)', player_js, 'initialization parameters') params = self._parse_json(f'[{params_js}]', video_id) media_id = params[0] sources = [f'{p}' for p in params[2]] formats = self._extract_formats(url, video_id, media_id, sources) return { 'id': video_id, 'title': title, 'formats': formats, 'categories': categories, 'thumbnail': thumbnail, 'uploader': uploader, 'uploader_id': uploader_id, 'timestamp': timestamp, 'like_count': like_count, 'view_count': view_count, 'duration': duration, 'age_limit': 18, } class FourTubeIE(FourTubeBaseIE): IE_NAME = '4tube' _VALID_URL = r'https?://(?:(?P<kind>www|m)\.)?4tube\.com/(?:videos|embed)/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?' _URL_TEMPLATE = 'https://www.4tube.com/videos/%s/video' _TKN_HOST = 'token.4tube.com' _TESTS = [{ 'url': 'http://www.4tube.com/videos/209733/hot-babe-holly-michaels-gets-her-ass-stuffed-by-black', 'md5': '6516c8ac63b03de06bc8eac14362db4f', 'info_dict': { 'id': '209733', 'ext': 'mp4', 'title': 'Hot Babe Holly Michaels gets her ass stuffed by black', 'uploader': 'WCP Club', 'uploader_id': 'wcp-club', 'upload_date': '20131031', 'timestamp': 1383263892, 'duration': 583, 'view_count': int, 'like_count': int, 'categories': list, 'age_limit': 18, }, }, { 'url': 'http://www.4tube.com/embed/209733', 'only_matching': True, }, { 'url': 'http://m.4tube.com/videos/209733/hot-babe-holly-michaels-gets-her-ass-stuffed-by-black', 'only_matching': True, }] class FuxIE(FourTubeBaseIE): _VALID_URL = r'https?://(?:(?P<kind>www|m)\.)?fux\.com/(?:video|embed)/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?' _URL_TEMPLATE = 'https://www.fux.com/video/%s/video' _TKN_HOST = 'token.fux.com' _TESTS = [{ 'url': 'https://www.fux.com/video/195359/awesome-fucking-kitchen-ends-cum-swallow', 'info_dict': { 'id': '195359', 'ext': 'mp4', 'title': 'Awesome fucking in the kitchen ends with cum swallow', 'uploader': 'alenci2342', 'uploader_id': 'alenci2342', 'upload_date': '20131230', 'timestamp': 1388361660, 'duration': 289, 'view_count': int, 'like_count': int, 'categories': list, 'age_limit': 18, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.fux.com/embed/195359', 'only_matching': True, }, { 'url': 'https://www.fux.com/video/195359/awesome-fucking-kitchen-ends-cum-swallow', 'only_matching': True, }] class PornTubeIE(FourTubeBaseIE): _VALID_URL = r'https?://(?:(?P<kind>www|m)\.)?porntube\.com/(?:videos/(?P<display_id>[^/]+)_|embed/)(?P<id>\d+)' _URL_TEMPLATE = 'https://www.porntube.com/videos/video_%s' _TKN_HOST = 'tkn.porntube.com' _TESTS = [{ 'url': 'https://www.porntube.com/videos/teen-couple-doing-anal_7089759', 'info_dict': { 'id': '7089759', 'ext': 'mp4', 'title': 'Teen couple doing anal', 'uploader': 'Alexy', 'uploader_id': '91488', 'upload_date': '20150606', 'timestamp': 1433595647, 'duration': 5052, 'view_count': int, 'like_count': int, 'age_limit': 18, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.porntube.com/videos/squirting-teen-ballerina-ecg_1331406', 'info_dict': { 'id': '1331406', 'ext': 'mp4', 'title': 'Squirting Teen Ballerina on ECG', 'uploader': 'Exploited College Girls', 'uploader_id': '665', 'channel': 'Exploited College Girls', 'channel_id': '665', 'upload_date': '20130920', 'timestamp': 1379685485, 'duration': 851, 'view_count': int, 'like_count': int, 'age_limit': 18, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.porntube.com/embed/7089759', 'only_matching': True, }, { 'url': 'https://m.porntube.com/videos/teen-couple-doing-anal_7089759', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id, display_id = mobj.group('id', 'display_id') webpage = self._download_webpage(url, display_id) video = self._parse_json( self._search_regex( r'INITIALSTATE\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'data', group='value'), video_id, transform_source=lambda x: urllib.parse.unquote( base64.b64decode(x).decode('utf-8')))['page']['video'] title = video['title'] media_id = video['mediaId'] sources = [str(e['height']) for e in video['encodings'] if e.get('height')] formats = self._extract_formats(url, video_id, media_id, sources) thumbnail = url_or_none(video.get('masterThumb')) uploader = try_get(video, lambda x: x['user']['username'], str) uploader_id = str_or_none(try_get( video, lambda x: x['user']['id'], int)) channel = try_get(video, lambda x: x['channel']['name'], str) channel_id = str_or_none(try_get( video, lambda x: x['channel']['id'], int)) like_count = int_or_none(video.get('likes')) dislike_count = int_or_none(video.get('dislikes')) view_count = int_or_none(video.get('playsQty')) duration = int_or_none(video.get('durationInSeconds')) timestamp = unified_timestamp(video.get('publishedAt')) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'uploader': uploader or channel, 'uploader_id': uploader_id or channel_id, 'channel': channel, 'channel_id': channel_id, 'timestamp': timestamp, 'like_count': like_count, 'dislike_count': dislike_count, 'view_count': view_count, 'duration': duration, 'age_limit': 18, } class PornerBrosIE(FourTubeBaseIE): _VALID_URL = r'https?://(?:(?P<kind>www|m)\.)?pornerbros\.com/(?:videos/(?P<display_id>[^/]+)_|embed/)(?P<id>\d+)' _URL_TEMPLATE = 'https://www.pornerbros.com/videos/video_%s' _TKN_HOST = 'token.pornerbros.com' _TESTS = [{ 'url': 'https://www.pornerbros.com/videos/skinny-brunette-takes-big-cock-down-her-anal-hole_181369', 'md5': '6516c8ac63b03de06bc8eac14362db4f', 'info_dict': { 'id': '181369', 'ext': 'mp4', 'title': 'Skinny brunette takes big cock down her anal hole', 'uploader': 'PornerBros HD', 'uploader_id': 'pornerbros-hd', 'upload_date': '20130130', 'timestamp': 1359527401, 'duration': 1224, 'view_count': int, 'categories': list, 'age_limit': 18, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.pornerbros.com/embed/181369', 'only_matching': True, }, { 'url': 'https://m.pornerbros.com/videos/skinny-brunette-takes-big-cock-down-her-anal-hole_181369', 'only_matching': True, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/xminus.py
yt_dlp/extractor/xminus.py
import re import time from .common import InfoExtractor from ..compat import ( compat_ord, ) from ..utils import ( int_or_none, parse_duration, ) class XMinusIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?x-minus\.org/track/(?P<id>[0-9]+)' _TEST = { 'url': 'http://x-minus.org/track/4542/%D0%BF%D0%B5%D1%81%D0%B5%D0%BD%D0%BA%D0%B0-%D1%88%D0%BE%D1%84%D0%B5%D1%80%D0%B0.html', 'md5': '401a15f2d2dcf6d592cb95528d72a2a8', 'info_dict': { 'id': '4542', 'ext': 'mp3', 'title': 'Леонид Агутин-Песенка шофёра', 'duration': 156, 'tbr': 320, 'filesize_approx': 5900000, 'view_count': int, 'description': 'md5:03238c5b663810bc79cf42ef3c03e371', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) artist = self._html_search_regex( r'<a[^>]+href="/artist/\d+">([^<]+)</a>', webpage, 'artist') title = artist + '-' + self._html_search_regex( r'<span[^>]+class="minustrack-full-title(?:\s+[^"]+)?"[^>]*>([^<]+)', webpage, 'title') duration = parse_duration(self._html_search_regex( r'<span[^>]+class="player-duration(?:\s+[^"]+)?"[^>]*>([^<]+)', webpage, 'duration', fatal=False)) mobj = re.search( r'<div[^>]+class="dw-info(?:\s+[^"]+)?"[^>]*>(?P<tbr>\d+)\s*кбит/c\s+(?P<filesize>[0-9.]+)\s*мб</div>', webpage) tbr = filesize_approx = None if mobj: filesize_approx = float(mobj.group('filesize')) * 1000000 tbr = float(mobj.group('tbr')) view_count = int_or_none(self._html_search_regex( r'<span><[^>]+class="icon-chart-bar".*?>(\d+)</span>', webpage, 'view count', fatal=False)) description = self._html_search_regex( r'(?s)<pre[^>]+id="lyrics-original"[^>]*>(.*?)</pre>', webpage, 'song lyrics', fatal=False) if description: description = re.sub(' *\r *', '\n', description) k = self._search_regex( r'<div[^>]+id="player-bottom"[^>]+data-k="([^"]+)">', webpage, 'encoded data') h = time.time() / 3600 a = sum(map(int, [compat_ord(c) for c in k])) + int(video_id) + h video_url = 'http://x-minus.me/dl/minus?id=%s&tkn2=%df%d' % (video_id, a, h) return { 'id': video_id, 'title': title, 'url': video_url, # The extension is unknown until actual downloading 'ext': 'mp3', 'duration': duration, 'filesize_approx': filesize_approx, 'tbr': tbr, 'view_count': view_count, 'description': description, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/openrec.py
yt_dlp/extractor/openrec.py
from .common import InfoExtractor from ..utils import ( ExtractorError, get_first, int_or_none, traverse_obj, try_get, unified_strdate, unified_timestamp, ) class OpenRecBaseIE(InfoExtractor): _M3U8_HEADERS = {'Referer': 'https://www.openrec.tv/'} def _extract_pagestore(self, webpage, video_id): return self._parse_json( self._search_regex(r'(?m)window\.pageStore\s*=\s*(\{.+?\});$', webpage, 'window.pageStore'), video_id) def _expand_media(self, video_id, media): for name, m3u8_url in (media or {}).items(): if not m3u8_url: continue yield from self._extract_m3u8_formats( m3u8_url, video_id, ext='mp4', m3u8_id=name, headers=self._M3U8_HEADERS) def _extract_movie(self, webpage, video_id, name, is_live): window_stores = self._extract_pagestore(webpage, video_id) movie_stores = [ # extract all three important data (most of data are duplicated each other, but slightly different!) traverse_obj(window_stores, ('v8', 'state', 'movie'), expected_type=dict), traverse_obj(window_stores, ('v8', 'movie'), expected_type=dict), traverse_obj(window_stores, 'movieStore', expected_type=dict), ] if not any(movie_stores): raise ExtractorError(f'Failed to extract {name} info') formats = list(self._expand_media(video_id, get_first(movie_stores, 'media'))) if not formats: # archived livestreams or subscriber-only videos cookies = self._get_cookies('https://www.openrec.tv/') detail = self._download_json( f'https://apiv5.openrec.tv/api/v5/movies/{video_id}/detail', video_id, headers={ 'Origin': 'https://www.openrec.tv', 'Referer': 'https://www.openrec.tv/', 'access-token': try_get(cookies, lambda x: x.get('access_token').value), 'uuid': try_get(cookies, lambda x: x.get('uuid').value), }) new_media = traverse_obj(detail, ('data', 'items', ..., 'media'), get_all=False) formats = list(self._expand_media(video_id, new_media)) is_live = False return { 'id': video_id, 'title': get_first(movie_stores, 'title'), 'description': get_first(movie_stores, 'introduction'), 'thumbnail': get_first(movie_stores, 'thumbnailUrl'), 'formats': formats, 'uploader': get_first(movie_stores, ('channel', 'user', 'name')), 'uploader_id': get_first(movie_stores, ('channel', 'user', 'id')), 'timestamp': int_or_none(get_first(movie_stores, ['publishedAt', 'time']), scale=1000) or unified_timestamp(get_first(movie_stores, 'publishedAt')), 'is_live': is_live, 'http_headers': self._M3U8_HEADERS, } class OpenRecIE(OpenRecBaseIE): IE_NAME = 'openrec' _VALID_URL = r'https?://(?:www\.)?openrec\.tv/live/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.openrec.tv/live/2p8v31qe4zy', 'only_matching': True, }, { 'url': 'https://www.openrec.tv/live/wez93eqvjzl', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(f'https://www.openrec.tv/live/{video_id}', video_id) return self._extract_movie(webpage, video_id, 'live', True) class OpenRecCaptureIE(OpenRecBaseIE): IE_NAME = 'openrec:capture' _VALID_URL = r'https?://(?:www\.)?openrec\.tv/capture/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.openrec.tv/capture/l9nk2x4gn14', 'only_matching': True, }, { 'url': 'https://www.openrec.tv/capture/mldjr82p7qk', 'info_dict': { 'id': 'mldjr82p7qk', 'title': 'たいじの恥ずかしい英語力', 'uploader': 'たいちゃんねる', 'uploader_id': 'Yaritaiji', 'upload_date': '20210803', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(f'https://www.openrec.tv/capture/{video_id}', video_id) window_stores = self._extract_pagestore(webpage, video_id) movie_store = window_stores.get('movie') capture_data = window_stores.get('capture') if not capture_data: raise ExtractorError('Cannot extract title') formats = self._extract_m3u8_formats( capture_data.get('source'), video_id, ext='mp4', headers=self._M3U8_HEADERS) return { 'id': video_id, 'title': capture_data.get('title'), 'thumbnail': capture_data.get('thumbnailUrl'), 'formats': formats, 'timestamp': unified_timestamp(traverse_obj(movie_store, 'createdAt', expected_type=str)), 'uploader': traverse_obj(movie_store, ('channel', 'name'), expected_type=str), 'uploader_id': traverse_obj(movie_store, ('channel', 'id'), expected_type=str), 'upload_date': unified_strdate(capture_data.get('createdAt')), 'http_headers': self._M3U8_HEADERS, } class OpenRecMovieIE(OpenRecBaseIE): IE_NAME = 'openrec:movie' _VALID_URL = r'https?://(?:www\.)?openrec\.tv/movie/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.openrec.tv/movie/nqz5xl5km8v', 'info_dict': { 'id': 'nqz5xl5km8v', 'title': '限定コミュニティ(Discord)参加方法ご説明動画', 'description': 'md5:ebd563e5f5b060cda2f02bf26b14d87f', 'thumbnail': r're:https://.+', 'uploader': 'タイキとカズヒロ', 'uploader_id': 'taiki_to_kazuhiro', 'timestamp': 1638856800, }, }, { 'url': 'https://www.openrec.tv/movie/2p8vvex548y?playlist_id=98brq96vvsgn2nd', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(f'https://www.openrec.tv/movie/{video_id}', video_id) return self._extract_movie(webpage, video_id, 'movie', False)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hypergryph.py
yt_dlp/extractor/hypergryph.py
from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, url_or_none, ) from ..utils.traversal import subs_list_to_dict, traverse_obj class MonsterSirenHypergryphMusicIE(InfoExtractor): IE_NAME = 'monstersiren' IE_DESC = '塞壬唱片' _API_BASE = 'https://monster-siren.hypergryph.com/api' _VALID_URL = r'https?://monster-siren\.hypergryph\.com/music/(?P<id>\d+)' _TESTS = [{ 'url': 'https://monster-siren.hypergryph.com/music/514562', 'info_dict': { 'id': '514562', 'ext': 'wav', 'title': 'Flame Shadow', 'album': 'Flame Shadow', 'artists': ['塞壬唱片-MSR'], 'description': 'md5:19e2acfcd1b65b41b29e8079ab948053', 'thumbnail': r're:https?://web\.hycdn\.cn/siren/pic/.+\.jpg', }, }, { 'url': 'https://monster-siren.hypergryph.com/music/514518', 'info_dict': { 'id': '514518', 'ext': 'wav', 'title': 'Heavenly Me (Instrumental)', 'album': 'Heavenly Me', 'artists': ['塞壬唱片-MSR', 'AIYUE blessed : 理名'], 'description': 'md5:ce790b41c932d1ad72eb791d1d8ae598', 'thumbnail': r're:https?://web\.hycdn\.cn/siren/pic/.+\.jpg', }, }] def _real_extract(self, url): audio_id = self._match_id(url) song = self._download_json(f'{self._API_BASE}/song/{audio_id}', audio_id) if traverse_obj(song, 'code') != 0: msg = traverse_obj(song, ('msg', {str}, filter)) raise ExtractorError( msg or 'API returned an error response', expected=bool(msg)) album = None if album_id := traverse_obj(song, ('data', 'albumCid', {str})): album = self._download_json( f'{self._API_BASE}/album/{album_id}/detail', album_id, fatal=False) return { 'id': audio_id, 'vcodec': 'none', **traverse_obj(song, ('data', { 'title': ('name', {str}), 'artists': ('artists', ..., {str}), 'subtitles': ({'url': 'lyricUrl'}, all, {subs_list_to_dict(lang='en')}), 'url': ('sourceUrl', {url_or_none}), })), **traverse_obj(album, ('data', { 'album': ('name', {str}), 'description': ('intro', {clean_html}), 'thumbnail': ('coverUrl', {url_or_none}), })), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lemonde.py
yt_dlp/extractor/lemonde.py
from .common import InfoExtractor class LemondeIE(InfoExtractor): _VALID_URL = r'https?://(?:.+?\.)?lemonde\.fr/(?:[^/]+/)*(?P<id>[^/]+)\.html' _TESTS = [{ 'url': 'http://www.lemonde.fr/police-justice/video/2016/01/19/comprendre-l-affaire-bygmalion-en-cinq-minutes_4849702_1653578.html', 'md5': 'da120c8722d8632eec6ced937536cc98', 'info_dict': { 'id': 'lqm3kl', 'ext': 'mp4', 'title': "Comprendre l'affaire Bygmalion en 5 minutes", 'thumbnail': r're:^https?://.*\.jpg', 'duration': 309, 'upload_date': '20160119', 'timestamp': 1453194778, 'uploader_id': '3pmkp', }, }, { # standard iframe embed 'url': 'http://www.lemonde.fr/les-decodeurs/article/2016/10/18/tout-comprendre-du-ceta-le-petit-cousin-du-traite-transatlantique_5015920_4355770.html', 'info_dict': { 'id': 'uzsxms', 'ext': 'mp4', 'title': "CETA : quelles suites pour l'accord commercial entre l'Europe et le Canada ?", 'thumbnail': r're:^https?://.*\.jpg', 'duration': 325, 'upload_date': '20161021', 'timestamp': 1477044540, 'uploader_id': '3pmkp', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://redaction.actu.lemonde.fr/societe/video/2016/01/18/calais-debut-des-travaux-de-defrichement-dans-la-jungle_4849233_3224.html', 'only_matching': True, }, { # YouTube embeds 'url': 'http://www.lemonde.fr/pixels/article/2016/12/09/pourquoi-pewdiepie-superstar-de-youtube-a-menace-de-fermer-sa-chaine_5046649_4408996.html', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) digiteka_url = self._proto_relative_url(self._search_regex( r'url\s*:\s*(["\'])(?P<url>(?:https?://)?//(?:www\.)?(?:digiteka\.net|ultimedia\.com)/deliver/.+?)\1', webpage, 'digiteka url', group='url', default=None)) if digiteka_url: return self.url_result(digiteka_url, 'Digiteka') return self.url_result(url, 'Generic')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/stageplus.py
yt_dlp/extractor/stageplus.py
import json import uuid from .common import InfoExtractor from ..utils import ( float_or_none, traverse_obj, try_call, unified_timestamp, url_or_none, ) class StagePlusVODConcertIE(InfoExtractor): _NETRC_MACHINE = 'stageplus' _VALID_URL = r'https?://(?:www\.)?stage-plus\.com/video/(?P<id>vod_concert_\w+)' _TESTS = [{ 'url': 'https://www.stage-plus.com/video/vod_concert_APNM8GRFDPHMASJKBSPJACG', 'playlist_count': 6, 'info_dict': { 'id': 'vod_concert_APNM8GRFDPHMASJKBSPJACG', 'title': 'Yuja Wang plays Rachmaninoff\'s Piano Concerto No. 2 – from Odeonsplatz', 'description': 'md5:50f78ec180518c9bdb876bac550996fc', 'artists': ['Yuja Wang', 'Lorenzo Viotti'], 'upload_date': '20230331', 'timestamp': 1680249600, 'release_date': '20210709', 'release_timestamp': 1625788800, 'thumbnails': 'count:3', }, 'playlist': [{ 'info_dict': { 'id': 'performance_work_A1IN4PJFE9MM2RJ3CLBMUSJBBSOJAD9O', 'ext': 'mp4', 'title': 'Piano Concerto No. 2 in C Minor, Op. 18', 'description': 'md5:50f78ec180518c9bdb876bac550996fc', 'upload_date': '20230331', 'timestamp': 1680249600, 'release_date': '20210709', 'release_timestamp': 1625788800, 'duration': 2207, 'chapters': 'count:5', 'artists': ['Yuja Wang'], 'composers': ['Sergei Rachmaninoff'], 'album': 'Yuja Wang plays Rachmaninoff\'s Piano Concerto No. 2 – from Odeonsplatz', 'album_artists': ['Yuja Wang', 'Lorenzo Viotti'], 'track': 'Piano Concerto No. 2 in C Minor, Op. 18', 'track_number': 1, 'genre': 'Instrumental Concerto', }, }], 'params': {'skip_download': 'm3u8'}, }] # TODO: Prune this after livestream and/or album extractors are added _GRAPHQL_QUERY = '''query videoDetailPage($videoId: ID!, $sliderItemsFirst: Int = 24) { node(id: $videoId) { __typename ...LiveConcertFields ... on LiveConcert { artists { edges { role { ...RoleFields } node { id name sortName } } } isAtmos maxResolution groups { id name typeDisplayName } shortDescription performanceWorks { ...livePerformanceWorkFields } totalDuration sliders { ...contentContainerFields } vodConcert { __typename id } } ...VideoFields ... on Video { artists { edges { role { ...RoleFields } node { id name sortName } } } isAtmos maxResolution isLossless description productionDate takedownDate sliders { ...contentContainerFields } } ...VodConcertFields ... on VodConcert { artists { edges { role { ...RoleFields } node { id name sortName } } } isAtmos maxResolution groups { id name typeDisplayName } performanceWorks { ...PerformanceWorkFields } shortDescription productionDate takedownDate sliders { ...contentContainerFields } } } } fragment LiveConcertFields on LiveConcert { endTime id pictures { ...PictureFields } reruns { ...liveConcertRerunFields } publicationLevel startTime streamStartTime subtitle title typeDisplayName stream { ...liveStreamFields } trailerStream { ...streamFields } geoAccessCountries geoAccessMode } fragment PictureFields on Picture { id url type } fragment liveConcertRerunFields on LiveConcertRerun { streamStartTime endTime startTime stream { ...rerunStreamFields } } fragment rerunStreamFields on RerunStream { publicationLevel streamType url } fragment liveStreamFields on LiveStream { publicationLevel streamType url } fragment streamFields on Stream { publicationLevel streamType url } fragment RoleFields on Role { __typename id type displayName } fragment livePerformanceWorkFields on LivePerformanceWork { __typename id artists { ...artistWithRoleFields } groups { edges { node { id name typeDisplayName } } } work { ...workFields } } fragment artistWithRoleFields on ArtistWithRoleConnection { edges { role { ...RoleFields } node { id name sortName } } } fragment workFields on Work { id title movements { id title } composers { id name } genre { id title } } fragment contentContainerFields on CuratedContentContainer { __typename ...SliderFields ...BannerFields } fragment SliderFields on Slider { id headline items(first: $sliderItemsFirst) { edges { node { id __typename ...AlbumFields ...ArtistFields ...EpochFields ...GenreFields ...GroupFields ...LiveConcertFields ...PartnerFields ...PerformanceWorkFields ...VideoFields ...VodConcertFields } } } } fragment AlbumFields on Album { artistAndGroupDisplayInfo id pictures { ...PictureFields } title } fragment ArtistFields on Artist { id name roles { ...RoleFields } pictures { ...PictureFields } } fragment EpochFields on Epoch { id endYear pictures { ...PictureFields } startYear title } fragment GenreFields on Genre { id pictures { ...PictureFields } title } fragment GroupFields on Group { id name typeDisplayName pictures { ...PictureFields } } fragment PartnerFields on Partner { id name typeDisplayName subtypeDisplayName pictures { ...PictureFields } } fragment PerformanceWorkFields on PerformanceWork { __typename id artists { ...artistWithRoleFields } groups { edges { node { id name typeDisplayName } } } work { ...workFields } stream { ...streamFields } vodConcert { __typename id } duration cuePoints { mark title } } fragment VideoFields on Video { id archiveReleaseDate title subtitle pictures { ...PictureFields } stream { ...streamFields } trailerStream { ...streamFields } duration typeDisplayName duration geoAccessCountries geoAccessMode publicationLevel takedownDate } fragment VodConcertFields on VodConcert { id archiveReleaseDate pictures { ...PictureFields } subtitle title typeDisplayName totalDuration geoAccessCountries geoAccessMode trailerStream { ...streamFields } publicationLevel takedownDate } fragment BannerFields on Banner { description link pictures { ...PictureFields } title }''' _TOKEN = None def _perform_login(self, username, password): auth = self._download_json('https://audience.api.stageplus.io/oauth/token', None, headers={ 'Content-Type': 'application/json', 'Origin': 'https://www.stage-plus.com', }, data=json.dumps({ 'grant_type': 'password', 'username': username, 'password': password, 'device_info': 'Chrome (Windows)', 'client_device_id': str(uuid.uuid4()), }, separators=(',', ':')).encode(), note='Logging in') if auth.get('access_token'): self._TOKEN = auth['access_token'] def _real_initialize(self): if self._TOKEN: return self._TOKEN = try_call( lambda: self._get_cookies('https://www.stage-plus.com/')['dgplus_access_token'].value) if not self._TOKEN: self.raise_login_required() def _real_extract(self, url): concert_id = self._match_id(url) data = self._download_json('https://audience.api.stageplus.io/graphql', concert_id, headers={ 'authorization': f'Bearer {self._TOKEN}', 'content-type': 'application/json', 'Origin': 'https://www.stage-plus.com', }, data=json.dumps({ 'query': self._GRAPHQL_QUERY, 'variables': {'videoId': concert_id}, 'operationName': 'videoDetailPage', }, separators=(',', ':')).encode())['data']['node'] metadata = traverse_obj(data, { 'title': 'title', 'description': ('shortDescription', {str}), 'artists': ('artists', 'edges', ..., 'node', 'name'), 'timestamp': ('archiveReleaseDate', {unified_timestamp}), 'release_timestamp': ('productionDate', {unified_timestamp}), }) thumbnails = traverse_obj(data, ('pictures', lambda _, v: url_or_none(v['url']), { 'id': 'name', 'url': 'url', })) or None entries = [] for idx, video in enumerate(traverse_obj(data, ( 'performanceWorks', lambda _, v: v['id'] and url_or_none(v['stream']['url']))), 1): formats, subtitles = self._extract_m3u8_formats_and_subtitles( video['stream']['url'], video['id'], 'mp4', m3u8_id='hls', query={'token': self._TOKEN}) entries.append({ 'id': video['id'], 'formats': formats, 'subtitles': subtitles, 'album': metadata.get('title'), 'album_artists': metadata.get('artist'), 'track_number': idx, **metadata, **traverse_obj(video, { 'title': ('work', 'title'), 'track': ('work', 'title'), 'duration': ('duration', {float_or_none}), 'chapters': ( 'cuePoints', lambda _, v: float_or_none(v['mark']) is not None, { 'title': 'title', 'start_time': ('mark', {float_or_none}), }), 'artists': ('artists', 'edges', ..., 'node', 'name'), 'composers': ('work', 'composers', ..., 'name'), 'genre': ('work', 'genre', 'title'), }), }) return self.playlist_result(entries, concert_id, thumbnails=thumbnails, **metadata)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/pinterest.py
yt_dlp/extractor/pinterest.py
import json from .common import InfoExtractor from ..utils import ( determine_ext, float_or_none, int_or_none, str_or_none, strip_or_none, traverse_obj, unified_timestamp, url_or_none, ) class PinterestBaseIE(InfoExtractor): _VALID_URL_BASE = r'''(?x) https?://(?:[^/]+\.)?pinterest\.(?: com|fr|de|ch|jp|cl|ca|it|co\.uk|nz|ru|com\.au|at|pt|co\.kr|es|com\.mx| dk|ph|th|com\.uy|co|nl|info|kr|ie|vn|com\.vn|ec|mx|in|pe|co\.at|hu| co\.in|co\.nz|id|com\.ec|com\.py|tw|be|uk|com\.bo|com\.pe)''' def _call_api(self, resource, video_id, options): return self._download_json( f'https://www.pinterest.com/resource/{resource}Resource/get/', video_id, f'Download {resource} JSON metadata', query={'data': json.dumps({'options': options})}, headers={'X-Pinterest-PWS-Handler': 'www/[username].js'})['resource_response'] def _extract_video(self, data, extract_formats=True): video_id = data['id'] thumbnails = [] images = data.get('images') if isinstance(images, dict): for thumbnail in images.values(): if not isinstance(thumbnail, dict): continue thumbnail_url = url_or_none(thumbnail.get('url')) if not thumbnail_url: continue thumbnails.append({ 'url': thumbnail_url, 'width': int_or_none(thumbnail.get('width')), 'height': int_or_none(thumbnail.get('height')), }) info = { 'title': strip_or_none(traverse_obj(data, 'title', 'grid_title', default='')), 'description': traverse_obj(data, 'seo_description', 'description'), 'timestamp': unified_timestamp(data.get('created_at')), 'thumbnails': thumbnails, 'uploader': traverse_obj(data, ('closeup_attribution', 'full_name')), 'uploader_id': str_or_none(traverse_obj(data, ('closeup_attribution', 'id'))), 'repost_count': int_or_none(data.get('repin_count')), 'comment_count': int_or_none(data.get('comment_count')), 'categories': traverse_obj(data, ('pin_join', 'visual_annotation'), expected_type=list), 'tags': traverse_obj(data, 'hashtags', expected_type=list), } urls = [] formats = [] duration = None domain = data.get('domain', '') if domain.lower() != 'uploaded by user' and traverse_obj(data, ('embed', 'src')): if not info['title']: info['title'] = None return { '_type': 'url_transparent', 'url': data['embed']['src'], **info, } elif extract_formats: video_list = traverse_obj( data, ('videos', 'video_list'), ('story_pin_data', 'pages', ..., 'blocks', ..., 'video', 'video_list'), expected_type=dict, get_all=False, default={}) for format_id, format_dict in video_list.items(): if not isinstance(format_dict, dict): continue format_url = url_or_none(format_dict.get('url')) if not format_url or format_url in urls: continue urls.append(format_url) duration = float_or_none(format_dict.get('duration'), scale=1000) ext = determine_ext(format_url) if 'hls' in format_id.lower() or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=format_id, fatal=False)) else: formats.append({ 'url': format_url, 'format_id': format_id, 'width': int_or_none(format_dict.get('width')), 'height': int_or_none(format_dict.get('height')), 'duration': duration, }) return { 'id': video_id, 'formats': formats, 'duration': duration, 'webpage_url': f'https://www.pinterest.com/pin/{video_id}/', 'extractor_key': PinterestIE.ie_key(), 'extractor': PinterestIE.IE_NAME, **info, } class PinterestIE(PinterestBaseIE): _VALID_URL = rf'{PinterestBaseIE._VALID_URL_BASE}/pin/(?:[\w-]+--)?(?P<id>\d+)' _TESTS = [{ # formats found in data['videos'] 'url': 'https://www.pinterest.com/pin/664281013778109217/', 'md5': '6550c2af85d6d9f3fe3b88954d1577fc', 'info_dict': { 'id': '664281013778109217', 'ext': 'mp4', 'title': 'Origami', 'description': 'md5:e29801cab7d741ea8c741bc50c8d00ab', 'duration': 57.7, 'timestamp': 1593073622, 'upload_date': '20200625', 'repost_count': int, 'comment_count': int, 'categories': list, 'tags': list, 'thumbnail': r're:^https?://.*\.(?:jpg|png)$', }, }, { # formats found in data['story_pin_data'] 'url': 'https://www.pinterest.com/pin/1084663891475263837/', 'md5': '069ac19919ab9e1e13fa60de46290b03', 'info_dict': { 'id': '1084663891475263837', 'ext': 'mp4', 'title': 'Gadget, Cool products, Amazon product, technology, Kitchen gadgets', 'description': 'md5:d0a4b6ae996ff0c6eed83bc869598d13', 'uploader': 'CoolCrazyGadgets', 'uploader_id': '1084664028912989237', 'upload_date': '20211003', 'timestamp': 1633246654.0, 'duration': 14.9, 'comment_count': int, 'repost_count': int, 'categories': 'count:9', 'tags': list, 'thumbnail': r're:^https?://.*\.(?:jpg|png)$', }, }, { # vimeo.com embed 'url': 'https://www.pinterest.ca/pin/441282463481903715/', 'info_dict': { 'id': '111691128', 'ext': 'mp4', 'title': 'Tonite Let\'s All Make Love In London (1967)', 'description': 'md5:8190f37b3926807809ec57ec21aa77b2', 'uploader': 'Vimeo', 'uploader_id': '473792960706651251', 'upload_date': '20180120', 'timestamp': 1516409040, 'duration': 3404, 'comment_count': int, 'repost_count': int, 'categories': 'count:9', 'tags': [], 'thumbnail': r're:^https?://.*\.(?:jpg|png)$', 'uploader_url': 'https://vimeo.com/willardandrade', }, 'params': { 'skip_download': 'm3u8', }, }, { 'url': 'https://co.pinterest.com/pin/824721750502199491/', 'only_matching': True, }, { 'url': 'https://pinterest.com/pin/dive-into-serenity-blue-lagoon-pedi-nails-for-a-tranquil-and-refreshing-spa-experience-video-in-2024--2885187256207927', 'info_dict': { 'id': '2885187256207927', 'ext': 'mp4', 'title': 'Dive into Serenity: Blue Lagoon Pedi Nails for a Tranquil and Refreshing Spa Experience! 💙💅', 'description': 'md5:5da41c767d2317e42e49b663b0b2150f', 'uploader': 'Glamour Artistry |Everyday Outfits, Luxury Fashion & Nail Designs', 'uploader_id': '1142999717836434688', 'upload_date': '20240702', 'timestamp': 1719939156, 'duration': 7.967, 'comment_count': int, 'repost_count': int, 'categories': 'count:9', 'tags': ['#BlueLagoonPediNails', '#SpaExperience'], 'thumbnail': r're:^https?://.*\.(?:jpg|png)$', }, }] def _real_extract(self, url): video_id = self._match_id(url) data = self._call_api( 'Pin', video_id, { 'field_set_key': 'unauth_react_main_pin', 'id': video_id, })['data'] return self._extract_video(data) class PinterestCollectionIE(PinterestBaseIE): _VALID_URL = rf'{PinterestBaseIE._VALID_URL_BASE}/(?P<username>[^/]+)/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.pinterest.ca/mashal0407/cool-diys/', 'info_dict': { 'id': '585890301462791043', 'title': 'cool diys', }, 'playlist_count': 8, }, { 'url': 'https://www.pinterest.ca/fudohub/videos/', 'info_dict': { 'id': '682858430939307450', 'title': 'VIDEOS', }, 'playlist_mincount': 365, 'skip': 'Test with extract_formats=False', }] @classmethod def suitable(cls, url): return False if PinterestIE.suitable(url) else super().suitable(url) def _real_extract(self, url): username, slug = self._match_valid_url(url).groups() board = self._call_api( 'Board', slug, { 'slug': slug, 'username': username, })['data'] board_id = board['id'] options = { 'board_id': board_id, 'page_size': 250, } bookmark = None entries = [] while True: if bookmark: options['bookmarks'] = [bookmark] board_feed = self._call_api('BoardFeed', board_id, options) for item in (board_feed.get('data') or []): if not isinstance(item, dict) or item.get('type') != 'pin': continue video_id = item.get('id') if video_id: # Some pins may not be available anonymously via pin URL # video = self._extract_video(item, extract_formats=False) # video.update({ # '_type': 'url_transparent', # 'url': 'https://www.pinterest.com/pin/%s/' % video_id, # }) # entries.append(video) entries.append(self._extract_video(item)) bookmark = board_feed.get('bookmark') if not bookmark: break return self.playlist_result( entries, playlist_id=board_id, playlist_title=board.get('name'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/skeb.py
yt_dlp/extractor/skeb.py
from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, clean_html, int_or_none, str_or_none, url_or_none, ) from ..utils.traversal import traverse_obj class SkebIE(InfoExtractor): _VALID_URL = r'https?://skeb\.jp/@(?P<uploader_id>[^/?#]+)/works/(?P<id>\d+)' _TESTS = [{ 'url': 'https://skeb.jp/@riiru_wm/works/10', 'info_dict': { 'id': '466853', 'ext': 'mp4', 'title': '10-1', 'description': 'md5:1ec50901efc3437cfbfe3790468d532d', 'duration': 313, 'genres': ['video'], 'thumbnail': r're:https?://.+', 'uploader': '姫ノ森りぃる@ひとづま', 'uploader_id': 'riiru_wm', }, }, { 'url': 'https://skeb.jp/@furukawa_nob/works/3', 'info_dict': { 'id': '489408', 'ext': 'mp3', 'title': '3-1', 'description': 'md5:6de1f8f876426a6ac321c123848176a8', 'duration': 98, 'genres': ['voice'], 'tags': 'count:11', 'thumbnail': r're:https?://.+', 'uploader': '古川ノブ@宮城の動画勢Vtuber', 'uploader_id': 'furukawa_nob', }, }, { 'url': 'https://skeb.jp/@Rizu_panda_cube/works/626', 'info_dict': { 'id': '626', 'description': 'md5:834557b39ca56960c5f77dd6ddabe775', 'uploader': 'りづ100億%', 'uploader_id': 'Rizu_panda_cube', 'tags': 'count:57', 'genres': ['video'], }, 'playlist_count': 2, 'expected_warnings': ['Skipping unsupported extension'], }, { 'url': 'https://skeb.jp/@Yossshy_Music/works/13', 'info_dict': { 'ext': 'wav', 'id': '5566495', 'title': '13-1', 'description': 'md5:1026b8b9ae38c67c2d995970ec196550', 'uploader': 'Yossshy', 'uploader_id': 'Yossshy_Music', 'duration': 336, 'thumbnail': r're:https?://.+', 'tags': 'count:59', 'genres': ['music'], }, }] def _call_api(self, uploader_id, work_id): return self._download_json( f'https://skeb.jp/api/users/{uploader_id}/works/{work_id}', work_id, headers={ 'Accept': 'application/json', 'Authorization': 'Bearer null', }) def _real_extract(self, url): uploader_id, work_id = self._match_valid_url(url).group('uploader_id', 'id') try: works = self._call_api(uploader_id, work_id) except ExtractorError as e: if not isinstance(e.cause, HTTPError) or e.cause.status != 429: raise webpage = e.cause.response.read().decode() value = self._search_regex( r'document\.cookie\s*=\s*["\']request_key=([^;"\']+)', webpage, 'request key') self._set_cookie('skeb.jp', 'request_key', value) works = self._call_api(uploader_id, work_id) info = { 'uploader_id': uploader_id, **traverse_obj(works, { 'age_limit': ('nsfw', {bool}, {lambda x: 18 if x else None}), 'description': (('source_body', 'body'), {clean_html}, filter, any), 'genres': ('genre', {str}, filter, all, filter), 'tags': ('tag_list', ..., {str}, filter, all, filter), 'uploader': ('creator', 'name', {str}), }), } entries = [] for idx, preview in enumerate(traverse_obj(works, ('previews', lambda _, v: url_or_none(v['url']))), 1): ext = traverse_obj(preview, ('information', 'extension', {str})) if ext not in ('mp3', 'mp4', 'wav'): self.report_warning(f'Skipping unsupported extension "{ext}"') continue entries.append({ 'ext': ext, 'title': f'{work_id}-{idx}', 'subtitles': { 'ja': [{ 'ext': 'vtt', 'url': preview['vtt_url'], }], } if url_or_none(preview.get('vtt_url')) else None, 'vcodec': 'none' if ext in ('mp3', 'wav') else None, **info, **traverse_obj(preview, { 'id': ('id', {str_or_none}), 'thumbnail': ('poster_url', {url_or_none}), 'url': ('url', {url_or_none}), }), **traverse_obj(preview, ('information', { 'duration': ('duration', {int_or_none}), 'fps': ('frame_rate', {int_or_none}), 'height': ('height', {int_or_none}), 'width': ('width', {int_or_none}), })), }) return self.playlist_result(entries, work_id, **info)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hytale.py
yt_dlp/extractor/hytale.py
import re from .cloudflarestream import CloudflareStreamIE from .common import InfoExtractor from ..utils.traversal import traverse_obj class HytaleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?hytale\.com/news/\d+/\d+/(?P<id>[a-z0-9-]+)' _TESTS = [{ 'url': 'https://hytale.com/news/2021/07/summer-2021-development-update', 'info_dict': { 'id': 'summer-2021-development-update', 'title': 'Summer 2021 Development Update', }, 'playlist_count': 4, 'playlist': [{ 'md5': '0854ebe347d233ee19b86ab7b2ead610', 'info_dict': { 'id': 'ed51a2609d21bad6e14145c37c334999', 'ext': 'mp4', 'title': 'Avatar Personalization', 'thumbnail': r're:https://videodelivery\.net/\w+/thumbnails/thumbnail\.jpg', }, }], }, { 'url': 'https://www.hytale.com/news/2019/11/hytale-graphics-update', 'info_dict': { 'id': 'hytale-graphics-update', 'title': 'Hytale graphics update', }, 'playlist_count': 2, }] def _real_initialize(self): media_webpage = self._download_webpage( 'https://hytale.com/media', None, note='Downloading list of media', fatal=False) or '' clips_json = traverse_obj( self._search_json( r'window\.__INITIAL_COMPONENTS_STATE__\s*=\s*\[', media_webpage, 'clips json', None), ('media', 'clips')) or [] self._titles = {clip.get('src'): clip.get('caption') for clip in clips_json} def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) entries = [ self.url_result( f'https://cloudflarestream.com/{video_hash}/manifest/video.mpd?parentOrigin=https%3A%2F%2Fhytale.com', CloudflareStreamIE, title=self._titles.get(video_hash), url_transparent=True) for video_hash in re.findall( r'<stream\s+class\s*=\s*"ql-video\s+cf-stream"\s+src\s*=\s*"([a-f0-9]{32})"', webpage) ] return self.playlist_result(entries, playlist_id, self._og_search_title(webpage))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mave.py
yt_dlp/extractor/mave.py
import functools import math from .common import InfoExtractor from ..utils import ( InAdvancePagedList, clean_html, int_or_none, parse_iso8601, urljoin, ) from ..utils.traversal import require, traverse_obj class MaveBaseIE(InfoExtractor): _API_BASE_URL = 'https://api.mave.digital/v1/website' _API_BASE_STORAGE_URL = 'https://store.cloud.mts.ru/mave/' def _load_channel_meta(self, channel_id, display_id): return traverse_obj(self._download_json( f'{self._API_BASE_URL}/{channel_id}/', display_id, note='Downloading channel metadata'), 'podcast') def _load_episode_meta(self, channel_id, episode_code, display_id): return self._download_json( f'{self._API_BASE_URL}/{channel_id}/episodes/{episode_code}', display_id, note='Downloading episode metadata') def _create_entry(self, channel_id, channel_meta, episode_meta): episode_code = traverse_obj(episode_meta, ('code', {int}, {require('episode code')})) return { 'display_id': f'{channel_id}-{episode_code}', 'extractor_key': MaveIE.ie_key(), 'extractor': MaveIE.IE_NAME, 'webpage_url': f'https://{channel_id}.mave.digital/ep-{episode_code}', 'channel_id': channel_id, 'channel_url': f'https://{channel_id}.mave.digital/', 'vcodec': 'none', **traverse_obj(episode_meta, { 'id': ('id', {str}), 'url': ('audio', {urljoin(self._API_BASE_STORAGE_URL)}), 'title': ('title', {str}), 'description': ('description', {clean_html}), 'thumbnail': ('image', {urljoin(self._API_BASE_STORAGE_URL)}), 'duration': ('duration', {int_or_none}), 'season_number': ('season', {int_or_none}), 'episode_number': ('number', {int_or_none}), 'view_count': ('listenings', {int_or_none}), 'like_count': ('reactions', lambda _, v: v['type'] == 'like', 'count', {int_or_none}, any), 'dislike_count': ('reactions', lambda _, v: v['type'] == 'dislike', 'count', {int_or_none}, any), 'age_limit': ('is_explicit', {bool}, {lambda x: 18 if x else None}), 'timestamp': ('publish_date', {parse_iso8601}), }), **traverse_obj(channel_meta, { 'series_id': ('id', {str}), 'series': ('title', {str}), 'channel': ('title', {str}), 'uploader': ('author', {str}), }), } class MaveIE(MaveBaseIE): IE_NAME = 'mave' _VALID_URL = r'https?://(?P<channel_id>[\w-]+)\.mave\.digital/ep-(?P<episode_code>\d+)' _TESTS = [{ 'url': 'https://ochenlichnoe.mave.digital/ep-25', 'md5': 'aa3e513ef588b4366df1520657cbc10c', 'info_dict': { 'id': '4035f587-914b-44b6-aa5a-d76685ad9bc2', 'ext': 'mp3', 'display_id': 'ochenlichnoe-25', 'title': 'Между мной и миром: психология самооценки', 'description': 'md5:4b7463baaccb6982f326bce5c700382a', 'uploader': 'Самарский университет', 'channel': 'Очень личное', 'channel_id': 'ochenlichnoe', 'channel_url': 'https://ochenlichnoe.mave.digital/', 'view_count': int, 'like_count': int, 'dislike_count': int, 'duration': 3744, 'thumbnail': r're:https://.+/storage/podcasts/.+\.jpg', 'series': 'Очень личное', 'series_id': '2e0c3749-6df2-4946-82f4-50691419c065', 'season': 'Season 3', 'season_number': 3, 'episode': 'Episode 3', 'episode_number': 3, 'timestamp': 1747817300, 'upload_date': '20250521', }, }, { 'url': 'https://budem.mave.digital/ep-12', 'md5': 'e1ce2780fcdb6f17821aa3ca3e8c919f', 'info_dict': { 'id': '41898bb5-ff57-4797-9236-37a8e537aa21', 'ext': 'mp3', 'display_id': 'budem-12', 'title': 'Екатерина Михайлова: "Горе от ума" не про женщин написана', 'description': 'md5:fa3bdd59ee829dfaf16e3efcb13f1d19', 'uploader': 'Полина Цветкова+Евгения Акопова', 'channel': 'Все там будем', 'channel_id': 'budem', 'channel_url': 'https://budem.mave.digital/', 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'duration': 3664, 'thumbnail': r're:https://.+/storage/podcasts/.+\.jpg', 'series': 'Все там будем', 'series_id': 'fe9347bf-c009-4ebd-87e8-b06f2f324746', 'season': 'Season 2', 'season_number': 2, 'episode': 'Episode 5', 'episode_number': 5, 'timestamp': 1735538400, 'upload_date': '20241230', }, }] def _real_extract(self, url): channel_id, episode_code = self._match_valid_url(url).group( 'channel_id', 'episode_code') display_id = f'{channel_id}-{episode_code}' channel_meta = self._load_channel_meta(channel_id, display_id) episode_meta = self._load_episode_meta(channel_id, episode_code, display_id) return self._create_entry(channel_id, channel_meta, episode_meta) class MaveChannelIE(MaveBaseIE): IE_NAME = 'mave:channel' _VALID_URL = r'https?://(?P<id>[\w-]+)\.mave\.digital/?(?:$|[?#])' _TESTS = [{ 'url': 'https://budem.mave.digital/', 'info_dict': { 'id': 'budem', 'title': 'Все там будем', 'description': 'md5:f04ae12a42be0f1d765c5e326b41987a', }, 'playlist_mincount': 15, }, { 'url': 'https://ochenlichnoe.mave.digital/', 'info_dict': { 'id': 'ochenlichnoe', 'title': 'Очень личное', 'description': 'md5:ee36a6a52546b91b487fe08c552fdbb2', }, 'playlist_mincount': 20, }, { 'url': 'https://geekcity.mave.digital/', 'info_dict': { 'id': 'geekcity', 'title': 'Мужчины в трико', 'description': 'md5:4164d425d60a0d97abdce9d1f6f8e049', }, 'playlist_mincount': 80, }] _PAGE_SIZE = 50 def _entries(self, channel_id, channel_meta, page_num): page_data = self._download_json( f'{self._API_BASE_URL}/{channel_id}/episodes', channel_id, query={ 'view': 'all', 'page': page_num + 1, 'sort': 'newest', 'format': 'all', }, note=f'Downloading page {page_num + 1}') for ep in traverse_obj(page_data, ('episodes', lambda _, v: v['audio'] and v['id'])): yield self._create_entry(channel_id, channel_meta, ep) def _real_extract(self, url): channel_id = self._match_id(url) channel_meta = self._load_channel_meta(channel_id, channel_id) return { '_type': 'playlist', 'id': channel_id, **traverse_obj(channel_meta, { 'title': ('title', {str}), 'description': ('description', {str}), }), 'entries': InAdvancePagedList( functools.partial(self._entries, channel_id, channel_meta), math.ceil(channel_meta['episodes_count'] / self._PAGE_SIZE), self._PAGE_SIZE), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tviplayer.py
yt_dlp/extractor/tviplayer.py
from .common import InfoExtractor from ..utils import traverse_obj class TVIPlayerIE(InfoExtractor): _VALID_URL = r'https?://tviplayer\.iol\.pt(/programa/[\w-]+/[a-f0-9]+)?/\w+/(?P<id>\w+)' _TESTS = [{ 'url': 'https://tviplayer.iol.pt/programa/jornal-das-8/53c6b3903004dc006243d0cf/video/61c8e8b90cf2c7ea0f0f71a9', 'info_dict': { 'id': '61c8e8b90cf2c7ea0f0f71a9', 'ext': 'mp4', 'duration': 4167, 'title': 'Jornal das 8 - 26 de dezembro de 2021', 'thumbnail': 'https://www.iol.pt/multimedia/oratvi/multimedia/imagem/id/61c8ee630cf2cc58e7d98d9f/', 'season_number': 8, 'season': 'Season 8', }, }, { 'url': 'https://tviplayer.iol.pt/programa/isabel/62b471090cf26256cd2a8594/video/62be445f0cf2ea4f0a5218e5', 'info_dict': { 'id': '62be445f0cf2ea4f0a5218e5', 'ext': 'mp4', 'duration': 3255, 'season': 'Season 1', 'title': 'Isabel - Episódio 1', 'thumbnail': 'https://www.iol.pt/multimedia/oratvi/multimedia/imagem/id/62beac200cf2f9a86eab856b/', 'season_number': 1, }, }, { # no /programa/ 'url': 'https://tviplayer.iol.pt/video/62c4131c0cf2f9a86eac06bb', 'info_dict': { 'id': '62c4131c0cf2f9a86eac06bb', 'ext': 'mp4', 'title': 'David e Mickael Carreira respondem: «Qual é o próximo a ser pai?»', 'thumbnail': 'https://www.iol.pt/multimedia/oratvi/multimedia/imagem/id/62c416490cf2ea367d4433fd/', 'season': 'Season 2', 'duration': 148, 'season_number': 2, }, }, { # episodio url 'url': 'https://tviplayer.iol.pt/programa/para-sempre/61716c360cf2365a5ed894c4/episodio/t1e187', 'info_dict': { 'id': 't1e187', 'ext': 'mp4', 'season': 'Season 1', 'title': 'Quem denunciou Pedro?', 'thumbnail': 'https://www.iol.pt/multimedia/oratvi/multimedia/imagem/id/62eda30b0cf2ea367d48973b/', 'duration': 1250, 'season_number': 1, }, }] def _real_initialize(self): self.wms_auth_sign_token = self._download_webpage( 'https://services.iol.pt/matrix?userId=', 'wmsAuthSign', note='Trying to get wmsAuthSign token') def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) json_data = self._search_json( r'<script>\s*jsonData\s*=', webpage, 'json_data', video_id) formats, subtitles = self._extract_m3u8_formats_and_subtitles( f'{json_data["videoUrl"]}?wmsAuthSign={self.wms_auth_sign_token}', video_id, ext='mp4') return { 'id': video_id, 'title': json_data.get('title') or self._og_search_title(webpage), 'thumbnail': json_data.get('cover') or self._og_search_thumbnail(webpage), 'duration': json_data.get('duration'), 'formats': formats, 'subtitles': subtitles, 'season_number': traverse_obj(json_data, ('program', 'seasonNum')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lefigaro.py
yt_dlp/extractor/lefigaro.py
import json import math from .common import InfoExtractor from ..utils import ( InAdvancePagedList, traverse_obj, ) class LeFigaroVideoEmbedIE(InfoExtractor): _VALID_URL = r'https?://video\.lefigaro\.fr/embed/[^?#]+/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://video.lefigaro.fr/embed/figaro/video/les-francais-ne-veulent-ils-plus-travailler-suivez-en-direct-le-club-le-figaro-idees/', 'md5': 'a0c3069b7e4c4526abf0053a7713f56f', 'info_dict': { 'id': 'g9j7Eovo', 'title': 'Les Français ne veulent-ils plus travailler ? Retrouvez Le Club Le Figaro Idées', 'description': 'md5:862b8813148ba4bf10763a65a69dfe41', 'upload_date': '20230216', 'timestamp': 1676581615, 'duration': 3076, 'thumbnail': r're:^https?://[^?#]+\.(?:jpeg|jpg)', 'ext': 'mp4', }, }, { 'url': 'https://video.lefigaro.fr/embed/figaro/video/intelligence-artificielle-faut-il-sen-mefier/', 'md5': '319c662943dd777bab835cae1e2d73a5', 'info_dict': { 'id': 'LeAgybyc', 'title': 'Intelligence artificielle : faut-il s’en méfier ?', 'description': 'md5:249d136e3e5934a67c8cb704f8abf4d2', 'upload_date': '20230124', 'timestamp': 1674584477, 'duration': 860, 'thumbnail': r're:^https?://[^?#]+\.(?:jpeg|jpg)', 'ext': 'mp4', }, }] _WEBPAGE_TESTS = [{ 'url': 'https://video.lefigaro.fr/figaro/video/suivez-en-direct-le-club-le-figaro-international-avec-philippe-gelie-9/', 'md5': '6289f9489efb969e38245f31721596fe', 'info_dict': { 'id': 'QChnbPYA', 'title': 'Où en est le couple franco-allemand ? Retrouvez Le Club Le Figaro International', 'description': 'md5:6f47235b7e7c93b366fd8ebfa10572ac', 'upload_date': '20230123', 'timestamp': 1674503575, 'duration': 3153, 'thumbnail': r're:^https?://[^?#]+\.(?:jpeg|jpg)', 'age_limit': 0, 'ext': 'mp4', }, }, { 'url': 'https://video.lefigaro.fr/figaro/video/la-philosophe-nathalie-sarthou-lajus-est-linvitee-du-figaro-live/', 'md5': 'f6df814cae53e85937621599d2967520', 'info_dict': { 'id': 'QJzqoNbf', 'title': 'La philosophe Nathalie Sarthou-Lajus est l’invitée du Figaro Live', 'description': 'md5:c586793bb72e726c83aa257f99a8c8c4', 'upload_date': '20230217', 'timestamp': 1676661986, 'duration': 1558, 'thumbnail': r're:^https?://[^?#]+\.(?:jpeg|jpg)', 'age_limit': 0, 'ext': 'mp4', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) player_data = self._search_nextjs_data( webpage, display_id)['props']['pageProps']['initialProps']['pageData']['playerData'] return self.url_result( f'jwplatform:{player_data["videoId"]}', title=player_data.get('title'), description=player_data.get('description'), thumbnail=player_data.get('poster')) class LeFigaroVideoSectionIE(InfoExtractor): _VALID_URL = r'https?://video\.lefigaro\.fr/figaro/(?P<id>[\w-]+)/?(?:[#?]|$)' _TESTS = [{ 'url': 'https://video.lefigaro.fr/figaro/le-club-le-figaro-idees/', 'info_dict': { 'id': 'le-club-le-figaro-idees', 'title': 'Le Club Le Figaro Idées', }, 'playlist_mincount': 14, }, { 'url': 'https://video.lefigaro.fr/figaro/factu/', 'info_dict': { 'id': 'factu', 'title': 'Factu', }, 'playlist_mincount': 519, }] _PAGE_SIZE = 20 def _get_api_response(self, display_id, page_num, note=None): return self._download_json( 'https://api-graphql.lefigaro.fr/graphql', display_id, note=note, query={ 'id': 'flive-website_UpdateListPage_1fb260f996bca2d78960805ac382544186b3225f5bedb43ad08b9b8abef79af6', 'variables': json.dumps({ 'slug': display_id, 'videosLimit': self._PAGE_SIZE, 'sort': 'DESC', 'order': 'PUBLISHED_AT', 'page': page_num, }).encode(), }) def _real_extract(self, url): display_id = self._match_id(url) initial_response = self._get_api_response(display_id, page_num=1)['data']['playlist'] def page_func(page_num): api_response = self._get_api_response(display_id, page_num + 1, note=f'Downloading page {page_num + 1}') return [self.url_result( video['embedUrl'], LeFigaroVideoEmbedIE, **traverse_obj(video, { 'title': 'name', 'description': 'description', 'thumbnail': 'thumbnailUrl', })) for video in api_response['data']['playlist']['jsonLd'][0]['itemListElement']] entries = InAdvancePagedList( page_func, math.ceil(initial_response['videoCount'] / self._PAGE_SIZE), self._PAGE_SIZE) return self.playlist_result(entries, playlist_id=display_id, playlist_title=initial_response.get('title'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vidlii.py
yt_dlp/extractor/vidlii.py
import re from .common import InfoExtractor from ..networking import HEADRequest from ..utils import ( float_or_none, format_field, get_element_by_id, int_or_none, str_to_int, strip_or_none, unified_strdate, urljoin, ) class VidLiiIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?vidlii\.com/(?:watch|embed)\?.*?\bv=(?P<id>[0-9A-Za-z_-]{11})' _TESTS = [{ 'url': 'https://www.vidlii.com/watch?v=tJluaH4BJ3v', 'md5': '9bf7d1e005dfa909b6efb0a1ff5175e2', 'info_dict': { 'id': 'tJluaH4BJ3v', 'ext': 'mp4', 'title': 'Vidlii is against me', 'description': 'md5:fa3f119287a2bfb922623b52b1856145', 'thumbnail': 're:https://.*.jpg', 'uploader': 'APPle5auc31995', 'uploader_url': 'https://www.vidlii.com/user/APPle5auc31995', 'upload_date': '20171107', 'duration': 212, 'view_count': int, 'comment_count': int, 'average_rating': float, 'categories': ['News & Politics'], 'tags': ['Vidlii', 'Jan', 'Videogames'], }, }, { 'url': 'https://www.vidlii.com/watch?v=zTAtaAgOLKt', 'md5': '5778f7366aa4c569b77002f8bf6b614f', 'info_dict': { 'id': 'zTAtaAgOLKt', 'ext': 'mp4', 'title': 'FULPTUBE SUCKS.', 'description': 'md5:087b2ca355d4c8f8f77e97c43e72d711', 'thumbnail': 'https://www.vidlii.com/usfi/thmp/zTAtaAgOLKt.jpg', 'uploader': 'Homicide', 'uploader_url': 'https://www.vidlii.com/user/Homicide', 'upload_date': '20210612', 'duration': 89, 'view_count': int, 'comment_count': int, 'average_rating': float, 'categories': ['News & Politics'], 'tags': ['fulp', 'tube', 'sucks', 'bad', 'fulptube'], }, }, { 'url': 'https://www.vidlii.com/embed?v=tJluaH4BJ3v&a=0', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( f'https://www.vidlii.com/watch?v={video_id}', video_id) formats = [] sources = [source[1] for source in re.findall( r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1', webpage) or []] for source in sources: source = urljoin(url, source) height = int(self._search_regex(r'(\d+).mp4', source, 'height', default=360)) if self._request_webpage(HEADRequest(source), video_id, f'Checking {height}p url', errnote=False): formats.append({ 'url': source, 'format_id': f'{height}p', 'height': height, }) title = self._search_regex( (r'<h1>([^<]+)</h1>', r'<title>([^<]+) - VidLii<'), webpage, 'title') description = self._html_search_meta( ('description', 'twitter:description'), webpage, default=None) or strip_or_none( get_element_by_id('des_text', webpage)) thumbnail = self._html_search_meta( 'twitter:image', webpage, default=None) if not thumbnail: thumbnail_path = self._search_regex( r'img\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'thumbnail', fatal=False, group='url') if thumbnail_path: thumbnail = urljoin(url, thumbnail_path) uploader = self._search_regex( r'<div[^>]+class=["\']wt_person[^>]+>\s*<a[^>]+\bhref=["\']/user/[^>]+>([^<]+)', webpage, 'uploader', fatal=False) uploader_url = format_field(uploader, None, 'https://www.vidlii.com/user/%s') upload_date = unified_strdate(self._html_search_meta( 'datePublished', webpage, default=None) or self._search_regex( r'<date>([^<]+)', webpage, 'upload date', fatal=False)) duration = int_or_none(self._html_search_meta( 'video:duration', webpage, 'duration', default=None) or self._search_regex( r'duration\s*:\s*(\d+)', webpage, 'duration', fatal=False)) view_count = str_to_int(self._search_regex( (r'<strong>([,0-9]+)</strong> views', r'Views\s*:\s*<strong>([,0-9]+)</strong>'), webpage, 'view count', fatal=False)) comment_count = int_or_none(self._search_regex( (r'<span[^>]+id=["\']cmt_num[^>]+>(\d+)', r'Comments\s*:\s*<strong>(\d+)'), webpage, 'comment count', fatal=False)) average_rating = float_or_none(self._search_regex( r'rating\s*:\s*([\d.]+)', webpage, 'average rating', fatal=False)) category = self._html_search_regex( r'<div>Category\s*:\s*</div>\s*<div>\s*<a[^>]+>([^<]+)', webpage, 'category', fatal=False) categories = [category] if category else None tags = [ strip_or_none(tag) for tag in re.findall( r'<a[^>]+\bhref=["\']/results\?.*?q=[^>]*>([^<]+)', webpage) if strip_or_none(tag) ] or None return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'formats': formats, 'uploader_url': uploader_url, 'upload_date': upload_date, 'duration': duration, 'view_count': view_count, 'comment_count': comment_count, 'average_rating': average_rating, 'categories': categories, 'tags': tags, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/yle_areena.py
yt_dlp/extractor/yle_areena.py
from .common import InfoExtractor from .kaltura import KalturaIE from ..utils import ( ExtractorError, int_or_none, parse_iso8601, smuggle_url, url_or_none, ) from ..utils.traversal import traverse_obj class YleAreenaIE(InfoExtractor): _VALID_URL = r'https?://areena\.yle\.fi/(?P<podcast>podcastit/)?(?P<id>[\d-]+)' _GEO_COUNTRIES = ['FI'] _TESTS = [ { 'url': 'https://areena.yle.fi/1-4371942', 'md5': 'd87e9a1e74e67e009990ddd413e426b4', 'info_dict': { 'id': '1-4371942', 'ext': 'mp4', 'title': 'Pouchit', 'description': 'md5:01071d7056ceec375f63960f90c35366', 'series': 'Modernit miehet', 'season': 'Season 1', 'season_number': 1, 'episode': 'Episode 2', 'episode_number': 2, 'thumbnail': r're:https://images\.cdn\.yle\.fi/image/upload/.+\.jpg', 'age_limit': 7, 'release_date': '20190105', 'release_timestamp': 1546725660, 'duration': 1435, }, }, { 'url': 'https://areena.yle.fi/1-2158940', 'md5': '6369ddc5e07b5fdaeda27a495184143c', 'info_dict': { 'id': '1-2158940', 'ext': 'mp4', 'title': 'Albi haluaa vessan', 'description': 'Albi haluaa vessan.', 'series': 'Albi Lumiukko', 'thumbnail': r're:https://images\.cdn\.yle\.fi/image/upload/.+\.jpg', 'age_limit': 0, 'release_date': '20211215', 'release_timestamp': 1639555200, 'duration': 319, }, }, { 'url': 'https://areena.yle.fi/1-64829589', 'info_dict': { 'id': '1-64829589', 'ext': 'mp4', 'title': 'HKO & Mälkki & Tanner', 'description': 'md5:b4f1b1af2c6569b33f75179a86eea156', 'series': 'Helsingin kaupunginorkesterin konsertteja', 'thumbnail': r're:https://images\.cdn\.yle\.fi/image/upload/.+\.jpg', 'release_date': '20230120', 'release_timestamp': 1674242079, 'duration': 8004, }, 'params': { 'skip_download': 'm3u8', }, }, { 'url': 'https://areena.yle.fi/1-72251830', 'info_dict': { 'id': '1-72251830', 'ext': 'mp4', 'title': r're:Pentulive 2024 | Pentulive \d{4}-\d{2}-\d{2} \d{2}:\d{2}', 'description': 'md5:1f118707d9093bf894a34fbbc865397b', 'series': 'Pentulive', 'thumbnail': r're:https://images\.cdn\.yle\.fi/image/upload/.+\.jpg', 'live_status': 'is_live', 'release_date': '20241025', 'release_timestamp': 1729875600, }, 'params': { 'skip_download': 'livestream', }, }, { 'url': 'https://areena.yle.fi/podcastit/1-71022852', 'info_dict': { 'id': '1-71022852', 'ext': 'mp3', 'title': 'Värityspäivä', 'description': 'md5:c3a02b0455ec71d32cbe09d32ec161e2', 'series': 'Murun ja Paukun ikioma kaupunki', 'episode': 'Episode 1', 'episode_number': 1, 'release_date': '20240607', 'release_timestamp': 1717736400, 'duration': 442, }, }, ] def _real_extract(self, url): video_id, is_podcast = self._match_valid_url(url).group('id', 'podcast') json_ld = self._search_json_ld(self._download_webpage(url, video_id), video_id, default={}) video_data = self._download_json( f'https://player.api.yle.fi/v1/preview/{video_id}.json?app_id=player_static_prod&app_key=8930d72170e48303cf5f3867780d549b', video_id, headers={ 'origin': 'https://areena.yle.fi', 'referer': 'https://areena.yle.fi/', 'content-type': 'application/json', })['data'] # Example title: 'K1, J2: Pouchit | Modernit miehet' season_number, episode_number, episode, series = self._search_regex( r'K(?P<season_no>\d+),\s*J(?P<episode_no>\d+):?\s*\b(?P<episode>[^|]+)\s*|\s*(?P<series>.+)', json_ld.get('title') or '', 'episode metadata', group=('season_no', 'episode_no', 'episode', 'series'), default=(None, None, None, None)) description = traverse_obj(video_data, ('ongoing_ondemand', 'description', 'fin', {str})) subtitles = {} for sub in traverse_obj(video_data, ('ongoing_ondemand', 'subtitles', lambda _, v: url_or_none(v['uri']))): subtitles.setdefault(sub.get('language') or 'und', []).append({ 'url': sub['uri'], 'ext': 'srt', 'name': sub.get('kind'), }) info_dict, metadata = {}, {} if is_podcast and traverse_obj(video_data, ('ongoing_ondemand', 'media_url', {url_or_none})): metadata = video_data['ongoing_ondemand'] info_dict['url'] = metadata['media_url'] elif traverse_obj(video_data, ('ongoing_event', 'manifest_url', {url_or_none})): metadata = video_data['ongoing_event'] metadata.pop('duration', None) # Duration is not accurate for livestreams info_dict['live_status'] = 'is_live' elif traverse_obj(video_data, ('ongoing_ondemand', 'manifest_url', {url_or_none})): metadata = video_data['ongoing_ondemand'] # XXX: Has all externally-hosted Kaltura content been moved to native hosting? elif kaltura_id := traverse_obj(video_data, ('ongoing_ondemand', 'kaltura', 'id', {str})): metadata = video_data['ongoing_ondemand'] info_dict.update({ '_type': 'url_transparent', 'url': smuggle_url(f'kaltura:1955031:{kaltura_id}', {'source_url': url}), 'ie_key': KalturaIE.ie_key(), }) elif traverse_obj(video_data, ('gone', {dict})): self.raise_no_formats('The content is no longer available', expected=True, video_id=video_id) metadata = video_data['gone'] else: raise ExtractorError('Unable to extract content') if not info_dict.get('url') and metadata.get('manifest_url'): info_dict['formats'], subs = self._extract_m3u8_formats_and_subtitles( metadata['manifest_url'], video_id, 'mp4', m3u8_id='hls') self._merge_subtitles(subs, target=subtitles) return { **traverse_obj(json_ld, { 'title': 'title', 'thumbnails': ('thumbnails', ..., {'url': 'url'}), }), 'id': video_id, 'title': episode, 'description': description, 'series': series, 'season_number': (int_or_none(self._search_regex(r'Kausi (\d+)', description, 'season number', default=None)) or int_or_none(season_number)), 'episode_number': int_or_none(episode_number), 'subtitles': subtitles or None, **traverse_obj(metadata, { 'title': ('title', 'fin', {str}), 'description': ('description', 'fin', {str}), 'series': ('series', 'title', 'fin', {str}), 'episode_number': ('episode_number', {int_or_none}), 'age_limit': ('content_rating', 'age_restriction', {int_or_none}), 'release_timestamp': ('start_time', {parse_iso8601}), 'duration': ('duration', 'duration_in_seconds', {int_or_none}), }), **info_dict, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/daum.py
yt_dlp/extractor/daum.py
import itertools import urllib.parse from .common import InfoExtractor from ..utils import parse_qs class DaumBaseIE(InfoExtractor): _KAKAO_EMBED_BASE = 'http://tv.kakao.com/embed/player/cliplink/' class DaumIE(DaumBaseIE): _VALID_URL = r'https?://(?:(?:m\.)?tvpot\.daum\.net/v/|videofarm\.daum\.net/controller/player/VodPlayer\.swf\?vid=)(?P<id>[^?#&]+)' IE_NAME = 'daum.net' _TESTS = [{ 'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz', 'info_dict': { 'id': 'vab4dyeDBysyBssyukBUjBz', 'ext': 'mp4', 'title': '마크 헌트 vs 안토니오 실바', 'description': 'Mark Hunt vs Antonio Silva', 'upload_date': '20131217', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'duration': 2117, 'view_count': int, 'comment_count': int, 'uploader_id': '186139', 'uploader': '콘간지', 'timestamp': 1387310323, }, }, { 'url': 'http://m.tvpot.daum.net/v/65139429', 'info_dict': { 'id': '65139429', 'ext': 'mp4', 'title': '1297회, \'아빠 아들로 태어나길 잘 했어\' 민수, 감동의 눈물[아빠 어디가] 20150118', 'description': 'md5:79794514261164ff27e36a21ad229fc5', 'upload_date': '20150118', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'duration': 154, 'view_count': int, 'comment_count': int, 'uploader': 'MBC 예능', 'uploader_id': '132251', 'timestamp': 1421604228, }, }, { 'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24', 'only_matching': True, }, { 'url': 'http://videofarm.daum.net/controller/player/VodPlayer.swf?vid=vwIpVpCQsT8%24&ref=', 'info_dict': { 'id': 'vwIpVpCQsT8$', 'ext': 'flv', 'title': '01-Korean War ( Trouble on the horizon )', 'description': 'Korean War 01\r\nTrouble on the horizon\r\n전쟁의 먹구름', 'upload_date': '20080223', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'duration': 249, 'view_count': int, 'comment_count': int, 'uploader': '까칠한 墮落始祖 황비홍님의', 'uploader_id': '560824', 'timestamp': 1203770745, }, }, { # Requires dte_type=WEB (#9972) 'url': 'http://tvpot.daum.net/v/s3794Uf1NZeZ1qMpGpeqeRU', 'md5': 'a8917742069a4dd442516b86e7d66529', 'info_dict': { 'id': 's3794Uf1NZeZ1qMpGpeqeRU', 'ext': 'mp4', 'title': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)', 'description': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)\r\n\r\n[쇼! 음악중심] 20160611, 507회', 'upload_date': '20170129', 'uploader': '쇼! 음악중심', 'uploader_id': '2653210', 'timestamp': 1485684628, }, }] def _real_extract(self, url): video_id = urllib.parse.unquote(self._match_id(url)) if not video_id.isdigit(): video_id += '@my' return self.url_result( self._KAKAO_EMBED_BASE + video_id, 'Kakao', video_id) class DaumClipIE(DaumBaseIE): _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:clip/ClipView.(?:do|tv)|mypot/View.do)\?.*?clipid=(?P<id>\d+)' IE_NAME = 'daum.net:clip' _URL_TEMPLATE = 'http://tvpot.daum.net/clip/ClipView.do?clipid=%s' _TESTS = [{ 'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690', 'info_dict': { 'id': '52554690', 'ext': 'mp4', 'title': 'DOTA 2GETHER 시즌2 6회 - 2부', 'description': 'DOTA 2GETHER 시즌2 6회 - 2부', 'upload_date': '20130831', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'duration': 3868, 'view_count': int, 'uploader': 'GOMeXP', 'uploader_id': '6667', 'timestamp': 1377911092, }, }, { 'url': 'http://m.tvpot.daum.net/clip/ClipView.tv?clipid=54999425', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if DaumPlaylistIE.suitable(url) or DaumUserIE.suitable(url) else super().suitable(url) def _real_extract(self, url): video_id = self._match_id(url) return self.url_result( self._KAKAO_EMBED_BASE + video_id, 'Kakao', video_id) class DaumListIE(InfoExtractor): # XXX: Conventionally, base classes should end with BaseIE/InfoExtractor def _get_entries(self, list_id, list_id_type): name = None entries = [] for pagenum in itertools.count(1): list_info = self._download_json( f'http://tvpot.daum.net/mypot/json/GetClipInfo.do?size=48&init=true&order=date&page={pagenum}&{list_id_type}={list_id}', list_id, f'Downloading list info - {pagenum}') entries.extend([ self.url_result( 'http://tvpot.daum.net/v/{}'.format(clip['vid'])) for clip in list_info['clip_list'] ]) if not name: name = list_info.get('playlist_bean', {}).get('name') or \ list_info.get('potInfo', {}).get('name') if not list_info.get('has_more'): break return name, entries def _check_clip(self, url, list_id): query_dict = parse_qs(url) if 'clipid' in query_dict: clip_id = query_dict['clipid'][0] if not self._yes_playlist(list_id, clip_id): return self.url_result(DaumClipIE._URL_TEMPLATE % clip_id, 'DaumClip') class DaumPlaylistIE(DaumListIE): _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/mypot/(?:View\.do|Top\.tv)\?.*?playlistid=(?P<id>[0-9]+)' IE_NAME = 'daum.net:playlist' _URL_TEMPLATE = 'http://tvpot.daum.net/mypot/View.do?playlistid=%s' _TESTS = [{ 'note': 'Playlist url with clipid', 'url': 'http://tvpot.daum.net/mypot/View.do?playlistid=6213966&clipid=73806844', 'info_dict': { 'id': '6213966', 'title': 'Woorissica Official', }, 'playlist_mincount': 181, }, { 'note': 'Playlist url with clipid - noplaylist', 'url': 'http://tvpot.daum.net/mypot/View.do?playlistid=6213966&clipid=73806844', 'info_dict': { 'id': '73806844', 'ext': 'mp4', 'title': '151017 Airport', 'upload_date': '20160117', }, 'params': { 'noplaylist': True, 'skip_download': True, }, }] @classmethod def suitable(cls, url): return False if DaumUserIE.suitable(url) else super().suitable(url) def _real_extract(self, url): list_id = self._match_id(url) clip_result = self._check_clip(url, list_id) if clip_result: return clip_result name, entries = self._get_entries(list_id, 'playlistid') return self.playlist_result(entries, list_id, name) class DaumUserIE(DaumListIE): _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/mypot/(?:View|Top)\.(?:do|tv)\?.*?ownerid=(?P<id>[0-9a-zA-Z]+)' IE_NAME = 'daum.net:user' _TESTS = [{ 'url': 'http://tvpot.daum.net/mypot/View.do?ownerid=o2scDLIVbHc0', 'info_dict': { 'id': 'o2scDLIVbHc0', 'title': '마이 리틀 텔레비전', }, 'playlist_mincount': 213, }, { 'url': 'http://tvpot.daum.net/mypot/View.do?ownerid=o2scDLIVbHc0&clipid=73801156', 'info_dict': { 'id': '73801156', 'ext': 'mp4', 'title': '[미공개] 김구라, 오만석이 부릅니다 \'오케피\' - 마이 리틀 텔레비전 20160116', 'upload_date': '20160117', 'description': 'md5:5e91d2d6747f53575badd24bd62b9f36', }, 'params': { 'noplaylist': True, 'skip_download': True, }, }, { 'note': 'Playlist url has ownerid and playlistid, playlistid takes precedence', 'url': 'http://tvpot.daum.net/mypot/View.do?ownerid=o2scDLIVbHc0&playlistid=6196631', 'info_dict': { 'id': '6196631', 'title': '마이 리틀 텔레비전 - 20160109', }, 'playlist_count': 11, }, { 'url': 'http://tvpot.daum.net/mypot/Top.do?ownerid=o2scDLIVbHc0', 'only_matching': True, }, { 'url': 'http://m.tvpot.daum.net/mypot/Top.tv?ownerid=45x1okb1If50&playlistid=3569733', 'only_matching': True, }] def _real_extract(self, url): list_id = self._match_id(url) clip_result = self._check_clip(url, list_id) if clip_result: return clip_result query_dict = parse_qs(url) if 'playlistid' in query_dict: playlist_id = query_dict['playlistid'][0] return self.url_result(DaumPlaylistIE._URL_TEMPLATE % playlist_id, 'DaumPlaylist') name, entries = self._get_entries(list_id, 'ownerid') return self.playlist_result(entries, list_id, name)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/teachingchannel.py
yt_dlp/extractor/teachingchannel.py
from .common import InfoExtractor class TeachingChannelIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?teachingchannel\.org/videos?/(?P<id>[^/?&#]+)' _TEST = { 'url': 'https://www.teachingchannel.org/videos/teacher-teaming-evolution', 'info_dict': { 'id': '3swwlzkT', 'ext': 'mp4', 'title': 'A History of Teaming', 'description': 'md5:2a9033db8da81f2edffa4c99888140b3', 'duration': 422, 'upload_date': '20170316', 'timestamp': 1489691297, }, 'params': { 'skip_download': True, }, 'add_ie': ['JWPlatform'], } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) mid = self._search_regex( r'(?:data-mid=["\']|id=["\']jw-video-player-)([a-zA-Z0-9]{8})', webpage, 'media id') return self.url_result('jwplatform:' + mid, 'JWPlatform', mid)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gamestar.py
yt_dlp/extractor/gamestar.py
from .common import InfoExtractor from ..utils import ( int_or_none, remove_end, ) class GameStarIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?game(?P<site>pro|star)\.de/videos/.*,(?P<id>[0-9]+)\.html' _TESTS = [{ 'url': 'http://www.gamestar.de/videos/trailer,3/hobbit-3-die-schlacht-der-fuenf-heere,76110.html', 'md5': 'ee782f1f8050448c95c5cacd63bc851c', 'info_dict': { 'id': '76110', 'ext': 'mp4', 'title': 'Hobbit 3: Die Schlacht der Fünf Heere - Teaser-Trailer zum dritten Teil', 'description': 'Der Teaser-Trailer zu Hobbit 3: Die Schlacht der Fünf Heere zeigt einige Szenen aus dem dritten Teil der Saga und kündigt den...', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1406542380, 'upload_date': '20140728', 'duration': 17, }, }, { 'url': 'http://www.gamepro.de/videos/top-10-indie-spiele-fuer-nintendo-switch-video-tolle-nindies-games-zum-download,95316.html', 'only_matching': True, }, { 'url': 'http://www.gamestar.de/videos/top-10-indie-spiele-fuer-nintendo-switch-video-tolle-nindies-games-zum-download,95316.html', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) site = mobj.group('site') video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) # TODO: there are multiple ld+json objects in the webpage, # while _search_json_ld finds only the first one json_ld = self._parse_json(self._search_regex( r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>[^<]+VideoObject[^<]+)</script>', webpage, 'JSON-LD', group='json_ld'), video_id) info_dict = self._json_ld(json_ld, video_id) info_dict['title'] = remove_end( info_dict['title'], f' - Game{site.title()}') view_count = int_or_none(json_ld.get('interactionCount')) comment_count = int_or_none(self._html_search_regex( r'<span>Kommentare</span>\s*<span[^>]+class=["\']count[^>]+>\s*\(\s*([0-9]+)', webpage, 'comment count', fatal=False)) info_dict.update({ 'id': video_id, 'url': 'http://gamestar.de/_misc/videos/portal/getVideoUrl.cfm?premium=0&videoId=' + video_id, 'ext': 'mp4', 'view_count': view_count, 'comment_count': comment_count, }) return info_dict
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ndr.py
yt_dlp/extractor/ndr.py
import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, determine_ext, int_or_none, merge_dicts, parse_iso8601, qualities, try_get, urljoin, ) class NDRBaseIE(InfoExtractor): def _real_extract(self, url): mobj = self._match_valid_url(url) display_id = next(group for group in mobj.groups() if group) webpage = self._download_webpage(url, display_id) return self._extract_embed(webpage, display_id, url) class NDRIE(NDRBaseIE): IE_NAME = 'ndr' IE_DESC = 'NDR.de - Norddeutscher Rundfunk' _VALID_URL = r'https?://(?:\w+\.)*ndr\.de/(?:[^/]+/)*(?P<id>[^/?#]+),[\da-z]+\.html' _TESTS = [{ # httpVideo, same content id 'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html', 'md5': '6515bc255dc5c5f8c85bbc38e035a659', 'info_dict': { 'id': 'hafengeburtstag988', 'display_id': 'Party-Poette-und-Parade', 'ext': 'mp4', 'title': 'Party, Pötte und Parade', 'description': 'md5:ad14f9d2f91d3040b6930c697e5f6b4c', 'uploader': 'ndrtv', 'timestamp': 1431255671, 'upload_date': '20150510', 'duration': 3498, }, 'params': { 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest'], }, { # httpVideo, different content id 'url': 'http://www.ndr.de/sport/fussball/40-Osnabrueck-spielt-sich-in-einen-Rausch,osna270.html', 'md5': '1043ff203eab307f0c51702ec49e9a71', 'info_dict': { 'id': 'osna272', 'display_id': '40-Osnabrueck-spielt-sich-in-einen-Rausch', 'ext': 'mp4', 'title': 'Osnabrück - Wehen Wiesbaden: Die Highlights', 'description': 'md5:32e9b800b3d2d4008103752682d5dc01', 'uploader': 'ndrtv', 'timestamp': 1442059200, 'upload_date': '20150912', 'duration': 510, }, 'params': { 'skip_download': True, }, 'skip': 'No longer available', }, { # httpAudio, same content id 'url': 'http://www.ndr.de/info/La-Valette-entgeht-der-Hinrichtung,audio51535.html', 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8', 'info_dict': { 'id': 'audio51535', 'display_id': 'La-Valette-entgeht-der-Hinrichtung', 'ext': 'mp3', 'title': 'La Valette entgeht der Hinrichtung', 'description': 'md5:22f9541913a40fe50091d5cdd7c9f536', 'uploader': 'ndrinfo', 'timestamp': 1631711863, 'upload_date': '20210915', 'duration': 884, }, 'params': { 'skip_download': True, }, }, { # with subtitles 'url': 'https://www.ndr.de/fernsehen/sendungen/extra_3/extra-3-Satiremagazin-mit-Christian-Ehring,sendung1091858.html', 'info_dict': { 'id': 'extra18674', 'display_id': 'extra-3-Satiremagazin-mit-Christian-Ehring', 'ext': 'mp4', 'title': 'Extra 3 vom 11.11.2020 mit Christian Ehring', 'description': 'md5:700f6de264010585012a72f97b0ac0c9', 'uploader': 'ndrtv', 'upload_date': '20201207', 'timestamp': 1614349457, 'duration': 1749, 'subtitles': { 'de': [{ 'ext': 'ttml', 'url': r're:^https://www\.ndr\.de.+', }], }, }, 'params': { 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest'], }, { 'url': 'https://www.ndr.de/Fettes-Brot-Ferris-MC-und-Thees-Uhlmann-live-on-stage,festivalsommer116.html', 'only_matching': True, }] def _extract_embed(self, webpage, display_id, url): embed_url = ( self._html_search_meta( 'embedURL', webpage, 'embed URL', default=None) or self._search_regex( r'\bembedUrl["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'embed URL', group='url', default=None) or self._search_regex( r'\bvar\s*sophoraID\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'embed URL', group='url', default='')) # some more work needed if we only found sophoraID if re.match(r'^[a-z]+\d+$', embed_url): # get the initial part of the url path,. eg /panorama/archiv/2022/ parsed_url = urllib.parse.urlparse(url) path = self._search_regex(rf'(.+/){display_id}', parsed_url.path or '', 'embed URL', default='') # find tell-tale image with the actual ID ndr_id = self._search_regex(rf'{path}([a-z]+\d+)(?!\.)\b', webpage, 'embed URL', default=None) # or try to use special knowledge! NDR_INFO_URL_TPL = 'https://www.ndr.de/info/%s-player.html' embed_url = f'ndr:{ndr_id}' if ndr_id else NDR_INFO_URL_TPL % (embed_url, ) if not embed_url: raise ExtractorError('Unable to extract embedUrl') description = self._search_regex( r'<p[^>]+itemprop="description">([^<]+)</p>', webpage, 'description', default=None) or self._og_search_description(webpage) timestamp = parse_iso8601( self._search_regex( (r'<span[^>]+itemprop="(?:datePublished|uploadDate)"[^>]+content="(?P<cont>[^"]+)"', r'\bvar\s*pdt\s*=\s*(?P<q>["\'])(?P<cont>(?:(?!(?P=q)).)+)(?P=q)'), webpage, 'upload date', group='cont', default=None)) info = self._search_json_ld(webpage, display_id, default={}) return merge_dicts({ '_type': 'url_transparent', 'url': embed_url, 'display_id': display_id, 'description': description, 'timestamp': timestamp, }, info) class NJoyIE(NDRBaseIE): IE_NAME = 'njoy' IE_DESC = 'N-JOY' _VALID_URL = r'https?://(?:www\.)?n-joy\.de/(?:[^/]+/)*(?:(?P<display_id>[^/?#]+),)?(?P<id>[\da-z]+)\.html' _TESTS = [{ # httpVideo, same content id 'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html', 'md5': 'cb63be60cd6f9dd75218803146d8dc67', 'info_dict': { 'id': 'comedycontest2480', 'display_id': 'Benaissa-beim-NDR-Comedy-Contest', 'ext': 'mp4', 'title': 'Benaissa beim NDR Comedy Contest', 'description': 'md5:f057a6c4e1c728b10d33b5ffd36ddc39', 'uploader': 'ndrtv', 'upload_date': '20141129', 'duration': 654, }, 'params': { 'skip_download': True, }, 'skip': 'No longer available', }, { # httpVideo, different content id 'url': 'http://www.n-joy.de/musik/Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-,felixjaehn168.html', 'md5': '417660fffa90e6df2fda19f1b40a64d8', 'info_dict': { 'id': 'livestream283', 'display_id': 'Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-', 'ext': 'mp3', 'title': 'Das frueheste DJ Set des Nordens live mit Felix Jaehn', 'description': 'md5:681698f527b8601e511e7b79edde7d2c', 'uploader': 'njoy', 'upload_date': '20210830', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.n-joy.de/radio/webradio/morningshow209.html', 'only_matching': True, }] def _extract_embed(self, webpage, display_id, url=None): # find tell-tale URL with the actual ID, or ... video_id = self._search_regex( (r'''\bsrc\s*=\s*["']?(?:/\w+)+/([a-z]+\d+)(?!\.)\b''', r'<iframe[^>]+id="pp_([\da-z]+)"'), webpage, 'NDR id', default=None) description = ( self._html_search_meta('description', webpage) or self._search_regex( r'<div[^>]+class="subline"[^>]*>[^<]+</div>\s*<p>([^<]+)</p>', webpage, 'description', fatal=False)) return { '_type': 'url_transparent', 'ie_key': 'NDREmbedBase', 'url': f'ndr:{video_id}', 'display_id': display_id, 'description': description, 'title': display_id.replace('-', ' ').strip(), } class NDREmbedBaseIE(InfoExtractor): # XXX: Conventionally, Concrete class names do not end in BaseIE IE_NAME = 'ndr:embed:base' _VALID_URL = r'(?:ndr:(?P<id_s>[\da-z]+)|https?://www\.ndr\.de/(?P<id>[\da-z]+)-ppjson\.json)' _TESTS = [{ 'url': 'ndr:soundcheck3366', 'only_matching': True, }, { 'url': 'http://www.ndr.de/soundcheck3366-ppjson.json', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') or mobj.group('id_s') ppjson = self._download_json( f'http://www.ndr.de/{video_id}-ppjson.json', video_id) playlist = ppjson['playlist'] formats = [] quality_key = qualities(('xs', 's', 'm', 'l', 'xl')) for format_id, f in playlist.items(): src = f.get('src') if not src: continue ext = determine_ext(src, None) if ext == 'f4m': formats.extend(self._extract_f4m_formats( src + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, f4m_id='hds', fatal=False)) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( src, video_id, 'mp4', m3u8_id='hls', entry_protocol='m3u8_native', fatal=False)) else: quality = f.get('quality') ff = { 'url': src, 'format_id': quality or format_id, 'quality': quality_key(quality), } type_ = f.get('type') if type_ and type_.split('/')[0] == 'audio': ff['vcodec'] = 'none' ff['ext'] = ext or 'mp3' formats.append(ff) config = playlist['config'] live = playlist.get('config', {}).get('streamType') in ['httpVideoLive', 'httpAudioLive'] title = config['title'] uploader = ppjson.get('config', {}).get('branding') upload_date = ppjson.get('config', {}).get('publicationDate') duration = int_or_none(config.get('duration')) thumbnails = [] poster = try_get(config, lambda x: x['poster'], dict) or {} for thumbnail_id, thumbnail in poster.items(): thumbnail_url = urljoin(url, thumbnail.get('src')) if not thumbnail_url: continue thumbnails.append({ 'id': thumbnail.get('quality') or thumbnail_id, 'url': thumbnail_url, 'preference': quality_key(thumbnail.get('quality')), }) subtitles = {} tracks = config.get('tracks') if tracks and isinstance(tracks, list): for track in tracks: if not isinstance(track, dict): continue track_url = urljoin(url, track.get('src')) if not track_url: continue subtitles.setdefault(track.get('srclang') or 'de', []).append({ 'url': track_url, 'ext': 'ttml', }) return { 'id': video_id, 'title': title, 'is_live': live, 'uploader': uploader if uploader != '-' else None, 'upload_date': upload_date[0:8] if upload_date else None, 'duration': duration, 'thumbnails': thumbnails, 'formats': formats, 'subtitles': subtitles, } class NDREmbedIE(NDREmbedBaseIE): # XXX: Do not subclass from concrete IE IE_NAME = 'ndr:embed' _VALID_URL = r'https?://(?:\w+\.)*ndr\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:(?:ard)?player|externalPlayer)\.html' _TESTS = [{ 'url': 'http://www.ndr.de/fernsehen/sendungen/ndr_aktuell/ndraktuell28488-player.html', 'md5': '8b9306142fe65bbdefb5ce24edb6b0a9', 'info_dict': { 'id': 'ndraktuell28488', 'ext': 'mp4', 'title': 'Norddeutschland begrüßt Flüchtlinge', 'is_live': False, 'uploader': 'ndrtv', 'upload_date': '20150907', 'duration': 132, }, 'skip': 'No longer available', }, { 'url': 'http://www.ndr.de/ndr2/events/soundcheck/soundcheck3366-player.html', 'md5': '002085c44bae38802d94ae5802a36e78', 'info_dict': { 'id': 'soundcheck3366', 'ext': 'mp4', 'title': 'Ella Henderson braucht Vergleiche nicht zu scheuen', 'is_live': False, 'uploader': 'ndr2', 'upload_date': '20150912', 'duration': 3554, }, 'params': { 'skip_download': True, }, 'skip': 'No longer available', }, { 'url': 'http://www.ndr.de/info/audio51535-player.html', 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8', 'info_dict': { 'id': 'audio51535', 'ext': 'mp3', 'title': 'La Valette entgeht der Hinrichtung', 'is_live': False, 'uploader': 'ndrinfo', 'upload_date': '20210915', 'duration': 884, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/visite/visite11010-externalPlayer.html', 'md5': 'ae57f80511c1e1f2fd0d0d3d31aeae7c', 'info_dict': { 'id': 'visite11010', 'ext': 'mp4', 'title': 'Visite - die ganze Sendung', 'is_live': False, 'uploader': 'ndrtv', 'upload_date': '20150902', 'duration': 3525, }, 'params': { 'skip_download': True, }, 'skip': 'No longer available', }, { # httpVideoLive 'url': 'http://www.ndr.de/fernsehen/livestream/livestream217-externalPlayer.html', 'info_dict': { 'id': 'livestream217', 'ext': 'mp4', 'title': r're:^NDR Fernsehen Niedersachsen \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'is_live': True, 'upload_date': '20210409', 'uploader': 'ndrtv', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.ndr.de/ndrkultur/audio255020-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/nordtour/nordtour7124-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/kultur/film/videos/videoimport10424-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/hamburg_journal/hamj43006-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/weltbilder/weltbilder4518-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/doku952-player.html', 'only_matching': True, }] class NJoyEmbedIE(NDREmbedBaseIE): # XXX: Do not subclass from concrete IE IE_NAME = 'njoy:embed' _VALID_URL = r'https?://(?:www\.)?n-joy\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)_[^/]+\.html' _TESTS = [{ # httpVideo 'url': 'http://www.n-joy.de/events/reeperbahnfestival/doku948-player_image-bc168e87-5263-4d6d-bd27-bb643005a6de_theme-n-joy.html', 'md5': '8483cbfe2320bd4d28a349d62d88bd74', 'info_dict': { 'id': 'doku948', 'ext': 'mp4', 'title': 'Zehn Jahre Reeperbahn Festival - die Doku', 'is_live': False, 'upload_date': '20200826', 'duration': 1011, }, 'expected_warnings': ['Unable to download f4m manifest'], }, { # httpAudio 'url': 'http://www.n-joy.de/news_wissen/stefanrichter100-player_image-d5e938b1-f21a-4b9a-86b8-aaba8bca3a13_theme-n-joy.html', 'md5': 'd989f80f28ac954430f7b8a48197188a', 'info_dict': { 'id': 'stefanrichter100', 'ext': 'mp3', 'title': 'Interview mit einem Augenzeugen', 'is_live': False, 'uploader': 'njoy', 'upload_date': '20150909', 'duration': 140, }, 'params': { 'skip_download': True, }, 'skip': 'No longer available', }, { # httpAudioLive, no explicit ext 'url': 'http://www.n-joy.de/news_wissen/webradioweltweit100-player_image-3fec0484-2244-4565-8fb8-ed25fd28b173_theme-n-joy.html', 'info_dict': { 'id': 'webradioweltweit100', 'ext': 'mp3', 'title': r're:^N-JOY Weltweit \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'is_live': True, 'uploader': 'njoy', 'upload_date': '20210830', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.n-joy.de/musik/dockville882-player_image-3905259e-0803-4764-ac72-8b7de077d80a_theme-n-joy.html', 'only_matching': True, }, { 'url': 'http://www.n-joy.de/radio/sendungen/morningshow/urlaubsfotos190-player_image-066a5df1-5c95-49ec-a323-941d848718db_theme-n-joy.html', 'only_matching': True, }, { 'url': 'http://www.n-joy.de/entertainment/comedy/krudetv290-player_image-ab261bfe-51bf-4bf3-87ba-c5122ee35b3d_theme-n-joy.html', 'only_matching': True, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/agalega.py
yt_dlp/extractor/agalega.py
import json import time from .common import InfoExtractor from ..utils import jwt_decode_hs256, url_or_none from ..utils.traversal import traverse_obj class AGalegaBaseIE(InfoExtractor): _access_token = None @staticmethod def _jwt_is_expired(token): return jwt_decode_hs256(token)['exp'] - time.time() < 120 def _refresh_access_token(self, video_id): AGalegaBaseIE._access_token = self._download_json( 'https://www.agalega.gal/api/fetch-api/jwt/token', video_id, note='Downloading access token', data=json.dumps({ 'username': None, 'password': None, 'client': 'crtvg', 'checkExistsCookies': False, }).encode())['access'] def _call_api(self, endpoint, display_id, note, fatal=True, query=None): if not AGalegaBaseIE._access_token or self._jwt_is_expired(AGalegaBaseIE._access_token): self._refresh_access_token(endpoint) return self._download_json( f'https://api-agalega.interactvty.com/api/2.0/contents/{endpoint}', display_id, note=note, fatal=fatal, query=query, headers={'Authorization': f'jwtok {AGalegaBaseIE._access_token}'}) class AGalegaIE(AGalegaBaseIE): IE_NAME = 'agalega:videos' _VALID_URL = r'https?://(?:www\.)?agalega\.gal/videos/(?:detail/)?(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.agalega.gal/videos/288664-lr-ninguencheconta', 'md5': '04533a66c5f863d08dd9724b11d1c223', 'info_dict': { 'id': '288664', 'title': 'Roberto e Ángel Martín atenden consultas dos espectadores', 'description': 'O cómico ademais fai un repaso dalgúns momentos da súa traxectoria profesional', 'thumbnail': 'https://crtvg-bucket.flumotion.cloud/content_cards/2ef32c3b9f6249d9868fd8f11d389d3d.png', 'ext': 'mp4', }, }, { 'url': 'https://www.agalega.gal/videos/detail/296152-pulso-activo-7', 'md5': '26df7fdcf859f38ad92d837279d6b56d', 'info_dict': { 'id': '296152', 'title': 'Pulso activo | 18-11-2025', 'description': 'Anxo, Noemí, Silvia e Estrella comparten as sensacións da clase de Eddy.', 'thumbnail': 'https://crtvg-bucket.flumotion.cloud/content_cards/a6bb7da6c8994b82bf961ac6cad1707b.png', 'ext': 'mp4', }, }] def _real_extract(self, url): video_id = self._match_id(url) content_data = self._call_api( f'content/{video_id}/', video_id, note='Downloading content data', fatal=False, query={ 'optional_fields': 'image,is_premium,short_description,has_subtitle', }) resource_data = self._call_api( f'content_resources/{video_id}/', video_id, note='Downloading resource data', query={ 'optional_fields': 'media_url', }) formats = [] subtitles = {} for m3u8_url in traverse_obj(resource_data, ('results', ..., 'media_url', {url_or_none})): fmts, subs = self._extract_m3u8_formats_and_subtitles( m3u8_url, video_id, ext='mp4', m3u8_id='hls') formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(content_data, { 'title': ('name', {str}), 'description': (('description', 'short_description'), {str}, any), 'thumbnail': ('image', {url_or_none}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bild.py
yt_dlp/extractor/bild.py
from .common import InfoExtractor from ..utils import ( int_or_none, traverse_obj, unescapeHTML, ) class BildIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?bild\.de/(?:[^/]+/)+(?P<display_id>[^/]+)-(?P<id>\d+)(?:,auto=true)?\.bild\.html' IE_DESC = 'Bild.de' _TESTS = [{ 'note': 'static MP4 only', 'url': 'http://www.bild.de/video/clip/apple-ipad-air/das-koennen-die-neuen-ipads-38184146.bild.html', 'md5': 'dd495cbd99f2413502a1713a1156ac8a', 'info_dict': { 'id': '38184146', 'ext': 'mp4', 'title': 'Das können die neuen iPads', 'description': 'md5:a4058c4fa2a804ab59c00d7244bbf62f', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 196, }, }, { 'note': 'static MP4 and HLS', 'url': 'https://www.bild.de/video/clip/news-ausland/deftiger-abgang-vom-10m-turm-bademeister-sorgt-fuer-skandal-85158620.bild.html', 'md5': 'fb0ed4f09c495d4ba7ce2eee0bb90de1', 'info_dict': { 'id': '85158620', 'ext': 'mp4', 'title': 'Der Sprungturm-Skandal', 'description': 'md5:709b543c24dc31bbbffee73bccda34ad', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 69, }, }] def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( url.split('.bild.html')[0] + ',view=json.bild.html', video_id) formats = [] for src in traverse_obj(video_data, ('clipList', 0, 'srces', lambda _, v: v['src'])): src_type = src.get('type') if src_type == 'application/x-mpegURL': formats.extend( self._extract_m3u8_formats( src['src'], video_id, 'mp4', m3u8_id='hls', fatal=False)) elif src_type == 'video/mp4': formats.append({'url': src['src'], 'format_id': 'http-mp4'}) else: self.report_warning(f'Skipping unsupported format type: "{src_type}"') return { 'id': video_id, 'title': unescapeHTML(video_data['title']).strip(), 'description': unescapeHTML(video_data.get('description')), 'formats': formats, 'thumbnail': video_data.get('poster'), 'duration': int_or_none(video_data.get('durationSec')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/eyedotv.py
yt_dlp/extractor/eyedotv.py
from .common import InfoExtractor from ..utils import ( ExtractorError, parse_duration, xpath_text, ) class EyedoTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?eyedo\.tv/[^/]+/(?:#!/)?Live/Detail/(?P<id>[0-9]+)' _TEST = { 'url': 'https://www.eyedo.tv/en-US/#!/Live/Detail/16301', 'md5': 'ba14f17995cdfc20c36ba40e21bf73f7', 'info_dict': { 'id': '16301', 'ext': 'mp4', 'title': 'Journée du conseil scientifique de l\'Afnic 2015', 'description': 'md5:4abe07293b2f73efc6e1c37028d58c98', 'uploader': 'Afnic Live', 'uploader_id': '8023', }, } _ROOT_URL = 'http://live.eyedo.net:1935/' def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_xml(f'http://eyedo.tv/api/live/GetLive/{video_id}', video_id) def _add_ns(path): return self._xpath_ns(path, 'http://schemas.datacontract.org/2004/07/EyeDo.Core.Implementation.Web.ViewModels.Api') title = xpath_text(video_data, _add_ns('Titre'), 'title', True) state_live_code = xpath_text(video_data, _add_ns('StateLiveCode'), 'title', True) if state_live_code == 'avenir': raise ExtractorError( f'{self.IE_NAME} said: We\'re sorry, but this video is not yet available.', expected=True) is_live = state_live_code == 'live' m3u8_url = None # http://eyedo.tv/Content/Html5/Scripts/html5view.js if is_live: if xpath_text(video_data, 'Cdn') == 'true': m3u8_url = f'http://rrr.sz.xlcdn.com/?account=eyedo&file=A{video_id}&type=live&service=wowza&protocol=http&output=playlist.m3u8' else: m3u8_url = self._ROOT_URL + f'w/{video_id}/eyedo_720p/playlist.m3u8' else: m3u8_url = self._ROOT_URL + f'replay-w/{video_id}/mp4:{video_id}.mp4/playlist.m3u8' return { 'id': video_id, 'title': title, 'formats': self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native'), 'description': xpath_text(video_data, _add_ns('Description')), 'duration': parse_duration(xpath_text(video_data, _add_ns('Duration'))), 'uploader': xpath_text(video_data, _add_ns('Createur')), 'uploader_id': xpath_text(video_data, _add_ns('CreateurId')), 'chapter': xpath_text(video_data, _add_ns('ChapitreTitre')), 'chapter_id': xpath_text(video_data, _add_ns('ChapitreId')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/streamcz.py
yt_dlp/extractor/streamcz.py
import json from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, parse_codecs, traverse_obj, urljoin, ) class StreamCZIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:stream|televizeseznam)\.cz/[^?#]+/(?P<display_id>[^?#]+)-(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.televizeseznam.cz/video/lajna/buh-57953890', 'md5': '40c41ade1464a390a0b447e333df4239', 'info_dict': { 'id': '57953890', 'ext': 'mp4', 'title': 'Bůh', 'display_id': 'buh', 'description': 'md5:8f5f09b9b7bc67df910486cdd88f7165', 'duration': 1369.6, 'view_count': int, }, }, { 'url': 'https://www.stream.cz/kdo-to-mluvi/kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna-64087937', 'md5': '41fd358000086a1ccdb068c77809b158', 'info_dict': { 'id': '64087937', 'ext': 'mp4', 'title': 'Kdo to mluví? Velké odhalení přináší nový pořad už od 25. srpna', 'display_id': 'kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna', 'description': 'md5:97a811000a6460266029d6c1c2ebcd59', 'duration': 50.2, 'view_count': int, }, }, { 'url': 'https://www.stream.cz/tajemno/znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili-64147267', 'md5': '3ee4d0be040e8f4a543e67e509d55e3f', 'info_dict': { 'id': '64147267', 'ext': 'mp4', 'title': 'Zničehonic jim skrz střechu prolítnul záhadný předmět. Badatelé vše objasnili', 'display_id': 'znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili', 'description': 'md5:4b8ada6718d34bb011c4e04ca4bc19bf', 'duration': 442.84, 'view_count': int, }, }] def _extract_formats(self, spl_url, video): for ext, pref, streams in ( ('ts', -1, traverse_obj(video, ('http_stream', 'qualities')) or {}), ('mp4', 1, video.get('mp4') or {})): for format_id, stream in streams.items(): if not stream.get('url'): continue yield { 'format_id': f'{format_id}-{ext}', 'ext': ext, 'source_preference': pref, 'url': urljoin(spl_url, stream['url']), 'tbr': float_or_none(stream.get('bandwidth'), scale=1000), 'duration': float_or_none(stream.get('duration'), scale=1000), 'width': traverse_obj(stream, ('resolution', 0)), 'height': traverse_obj(stream, ('resolution', 1)) or int_or_none(format_id.replace('p', '')), **parse_codecs(stream.get('codec')), } def _real_extract(self, url): display_id, video_id = self._match_valid_url(url).groups() data = self._download_json( 'https://www.televizeseznam.cz/api/graphql', video_id, 'Downloading GraphQL result', data=json.dumps({ 'variables': {'urlName': video_id}, 'query': ''' query LoadEpisode($urlName : String){ episode(urlName: $urlName){ ...VideoDetailFragmentOnEpisode } } fragment VideoDetailFragmentOnEpisode on Episode { id spl urlName name perex duration views }''', }).encode(), headers={'Content-Type': 'application/json;charset=UTF-8'}, )['data']['episode'] spl_url = data['spl'] + 'spl2,3' metadata = self._download_json(spl_url, video_id, 'Downloading playlist') if 'Location' in metadata and 'data' not in metadata: spl_url = metadata['Location'] metadata = self._download_json(spl_url, video_id, 'Downloading redirected playlist') video = metadata['data'] subtitles = {} for subs in video.get('subtitles', {}).values(): if not subs.get('language'): continue for ext, sub_url in subs.get('urls').items(): subtitles.setdefault(subs['language'], []).append({ 'ext': ext, 'url': urljoin(spl_url, sub_url), }) formats = list(self._extract_formats(spl_url, video)) return { 'id': video_id, 'display_id': display_id, 'title': data.get('name'), 'description': data.get('perex'), 'duration': float_or_none(data.get('duration')), 'view_count': int_or_none(data.get('views')), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/slutload.py
yt_dlp/extractor/slutload.py
from .common import InfoExtractor class SlutloadIE(InfoExtractor): _VALID_URL = r'https?://(?:\w+\.)?slutload\.com/(?:video/[^/]+|embed_player|watch)/(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.slutload.com/video/virginie-baisee-en-cam/TD73btpBqSxc/', 'md5': '868309628ba00fd488cf516a113fd717', 'info_dict': { 'id': 'TD73btpBqSxc', 'ext': 'mp4', 'title': 'virginie baisee en cam', 'age_limit': 18, 'thumbnail': r're:https?://.*?\.jpg', }, }, { # mobile site 'url': 'http://mobile.slutload.com/video/masturbation-solo/fviFLmc6kzJ/', 'only_matching': True, }, { 'url': 'http://www.slutload.com/embed_player/TD73btpBqSxc/', 'only_matching': True, }, { 'url': 'http://www.slutload.com/watch/TD73btpBqSxc/Virginie-Baisee-En-Cam.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) embed_page = self._download_webpage( f'http://www.slutload.com/embed_player/{video_id}', video_id, 'Downloading embed page', fatal=False) if embed_page: def extract(what): return self._html_search_regex( rf'data-video-{what}=(["\'])(?P<url>(?:(?!\1).)+)\1', embed_page, f'video {what}', default=None, group='url') video_url = extract('url') if video_url: title = self._html_search_regex( r'<title>([^<]+)', embed_page, 'title', default=video_id) return { 'id': video_id, 'url': video_url, 'title': title, 'thumbnail': extract('preview'), 'age_limit': 18, } webpage = self._download_webpage( f'http://www.slutload.com/video/_/{video_id}/', video_id) title = self._html_search_regex( r'<h1><strong>([^<]+)</strong>', webpage, 'title').strip() info = self._parse_html5_media_entries(url, webpage, video_id)[0] info.update({ 'id': video_id, 'title': title, 'age_limit': 18, }) return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/israelnationalnews.py
yt_dlp/extractor/israelnationalnews.py
from .common import InfoExtractor from ..utils import ExtractorError, traverse_obj class IsraelNationalNewsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?israelnationalnews\.com/news/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.israelnationalnews.com/news/354520', 'info_dict': { 'id': '354520', }, 'playlist': [{ 'info_dict': { 'id': 'jA84wQhVvg8', 'title': 'Even CNN Host Is Shocked by How Bad Biden\'s Approval Ratings Have Gotten | DM CLIPS | Rubin Report', 'ext': 'mp4', 'description': 'md5:b7325a3d00c7596337dc3ae37e32d35c', 'channel': 'The Rubin Report', 'channel_follower_count': int, 'comment_count': int, 'categories': ['News & Politics'], 'like_count': int, 'uploader_url': 'http://www.youtube.com/user/RubinReport', 'uploader_id': 'RubinReport', 'availability': 'public', 'view_count': int, 'duration': 240, 'thumbnail': 'https://i.ytimg.com/vi_webp/jA84wQhVvg8/maxresdefault.webp', 'live_status': 'not_live', 'playable_in_embed': True, 'age_limit': 0, 'tags': 'count:29', 'channel_id': 'UCJdKr0Bgd_5saZYqLCa9mng', 'channel_url': 'https://www.youtube.com/channel/UCJdKr0Bgd_5saZYqLCa9mng', 'upload_date': '20220606', 'uploader': 'The Rubin Report', }, }], }] def _real_extract(self, url): news_article_id = self._match_id(url) article_json = self._download_json( f'https://www.israelnationalnews.com/Generic/NewAPI/Item?type=0&Item={news_article_id}', news_article_id) urls = traverse_obj(article_json, ('Content2', ..., 'content', ..., 'attrs', 'src')) if not urls: raise ExtractorError('This article does not have any videos', expected=True) return self.playlist_from_matches(urls, news_article_id, ie='Youtube')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/unitednations.py
yt_dlp/extractor/unitednations.py
from .common import InfoExtractor from .kaltura import KalturaIE class UnitedNationsWebTvIE(InfoExtractor): _VALID_URL = r'https?://webtv\.un\.org/(?:ar|zh|en|fr|ru|es)/asset/\w+/(?P<id>\w+)' _TESTS = [{ 'url': 'https://webtv.un.org/en/asset/k1o/k1o7stmi6p', 'md5': 'b2f8b3030063298ae841b4b7ddc01477', 'info_dict': { 'id': '1_o7stmi6p', 'ext': 'mp4', 'title': 'António Guterres (Secretary-General) on Israel and Iran - Security Council, 9939th meeting', 'thumbnail': 'http://cfvod.kaltura.com/p/2503451/sp/250345100/thumbnail/entry_id/1_o7stmi6p/version/100021', 'uploader_id': 'evgeniia.alisova@un.org', 'upload_date': '20250620', 'timestamp': 1750430976, 'duration': 234, 'view_count': int, }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) partner_id = self._html_search_regex( r'partnerId:\s*(\d+)', webpage, 'partner_id') entry_id = self._html_search_regex( r'const\s+kentryID\s*=\s*["\'](\w+)["\']', webpage, 'kentry_id') return self.url_result(f'kaltura:{partner_id}:{entry_id}', KalturaIE)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/teletask.py
yt_dlp/extractor/teletask.py
import re from .common import InfoExtractor from ..utils import unified_strdate class TeleTaskIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?tele-task\.de/archive/video/html5/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.tele-task.de/archive/video/html5/26168/', 'info_dict': { 'id': '26168', 'title': 'Duplicate Detection', }, 'playlist': [{ 'md5': '290ef69fb2792e481169c3958dbfbd57', 'info_dict': { 'id': '26168-speaker', 'ext': 'mp4', 'title': 'Duplicate Detection', 'upload_date': '20141218', }, }, { 'md5': 'e1e7218c5f0e4790015a437fcf6c71b4', 'info_dict': { 'id': '26168-slides', 'ext': 'mp4', 'title': 'Duplicate Detection', 'upload_date': '20141218', }, }], } def _real_extract(self, url): lecture_id = self._match_id(url) webpage = self._download_webpage(url, lecture_id) title = self._html_search_regex( r'itemprop="name">([^<]+)</a>', webpage, 'title') upload_date = unified_strdate(self._html_search_regex( r'Date:</td><td>([^<]+)</td>', webpage, 'date', fatal=False)) entries = [{ 'id': f'{lecture_id}-{format_id}', 'url': video_url, 'title': title, 'upload_date': upload_date, } for format_id, video_url in re.findall( r'<video class="([^"]+)"[^>]*>\s*<source src="([^"]+)"', webpage)] return self.playlist_result(entries, lecture_id, title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cultureunplugged.py
yt_dlp/extractor/cultureunplugged.py
from .common import InfoExtractor from ..utils import int_or_none class CultureUnpluggedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?cultureunplugged\.com/(?:documentary/watch-online/)?play/(?P<id>\d+)(?:/(?P<display_id>[^/#?]+))?' _TESTS = [{ 'url': 'http://www.cultureunplugged.com/documentary/watch-online/play/53662/The-Next--Best-West', 'md5': 'ac6c093b089f7d05e79934dcb3d228fc', 'info_dict': { 'id': '53662', 'display_id': 'The-Next--Best-West', 'ext': 'mp4', 'title': 'The Next, Best West', 'description': 'md5:770033a3b7c2946a3bcfb7f1c6fb7045', 'thumbnail': r're:^https?://.*\.jpg$', 'creators': ['Coldstream Creative'], 'duration': 2203, 'view_count': int, }, }, { 'url': 'https://www.cultureunplugged.com/play/2833/Koi-Sunta-Hai--Journeys-with-Kumar---Kabir--Someone-is-Listening-', 'md5': 'dc2014bc470dfccba389a1c934fa29fa', 'info_dict': { 'id': '2833', 'display_id': 'Koi-Sunta-Hai--Journeys-with-Kumar---Kabir--Someone-is-Listening-', 'ext': 'mp4', 'title': 'Koi Sunta Hai: Journeys with Kumar & Kabir (Someone is Listening)', 'description': 'md5:fa94ac934927c98660362b8285b2cda5', 'view_count': int, 'thumbnail': 'https://s3.amazonaws.com/cdn.cultureunplugged.com/thumbnails_16_9/lg/2833.jpg', 'creators': ['Srishti'], }, }, { 'url': 'http://www.cultureunplugged.com/documentary/watch-online/play/53662', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') display_id = mobj.group('display_id') or video_id movie_data = self._download_json( f'http://www.cultureunplugged.com/movie-data/cu-{video_id}.json', display_id) video_url = movie_data['url'] title = movie_data['title'] description = movie_data.get('synopsis') creator = movie_data.get('producer') duration = int_or_none(movie_data.get('duration')) view_count = int_or_none(movie_data.get('views')) thumbnails = [{ 'url': movie_data[f'{size}_thumb'], 'id': size, 'preference': preference, } for preference, size in enumerate(( 'small', 'large')) if movie_data.get(f'{size}_thumb')] return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'title': title, 'description': description, 'creator': creator, 'duration': duration, 'view_count': view_count, 'thumbnails': thumbnails, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/comedycentral.py
yt_dlp/extractor/comedycentral.py
from .mtv import MTVServicesBaseIE class ComedyCentralIE(MTVServicesBaseIE): _VALID_URL = r'https?://(?:www\.)?cc\.com/video-clips/(?P<id>[\da-z]{6})' _TESTS = [{ 'url': 'https://www.cc.com/video-clips/wl12cx', 'info_dict': { 'id': 'dec6953e-80c8-43b3-96cd-05e9230e704d', 'ext': 'mp4', 'display_id': 'wl12cx', 'title': 'Alison Brie and Dave Franco -"Together"- Extended Interview', 'description': 'md5:ec68e38d3282f863de9cde0ce5cd231c', 'duration': 516.76, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'The Daily Show', 'season': 'Season 30', 'season_number': 30, 'episode': 'Episode 0', 'episode_number': 0, 'timestamp': 1753973314, 'upload_date': '20250731', 'release_timestamp': 1753977914, 'release_date': '20250731', }, 'params': {'skip_download': 'm3u8'}, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ted.py
yt_dlp/extractor/ted.py
import itertools import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, str_to_int, try_get, unified_strdate, url_or_none, ) class TedBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://www\.ted\.com/(?:{type})(?:/lang/[^/#?]+)?/(?P<id>[\w-]+)' def _parse_playlist(self, playlist): for entry in try_get(playlist, lambda x: x['videos']['nodes'], list): if entry.get('__typename') == 'Video' and entry.get('canonicalUrl'): yield self.url_result(entry['canonicalUrl'], TedTalkIE.ie_key()) class TedTalkIE(TedBaseIE): _VALID_URL = TedBaseIE._VALID_URL_BASE.format(type='talks') _TESTS = [{ 'url': 'https://www.ted.com/talks/candace_parker_how_to_break_down_barriers_and_not_accept_limits', 'md5': '47e82c666d9c3261d4fe74748a90aada', 'info_dict': { 'id': '86532', 'ext': 'mp4', 'title': 'How to break down barriers and not accept limits', 'description': 'md5:000707cece219d1e165b11550d612331', 'view_count': int, 'tags': 'count:6', 'uploader': 'Candace Parker', 'duration': 679, 'thumbnail': r're:https?://pi\.tedcdn\.com/.+\.jpg', 'upload_date': '20220114', 'release_date': '20211201', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) talk_info = self._search_nextjs_data(webpage, display_id)['props']['pageProps']['videoData'] video_id = talk_info['id'] player_data = self._parse_json(talk_info.get('playerData'), video_id) http_url = None formats, subtitles = [], {} for format_id, resources in (player_data.get('resources') or {}).items(): if format_id == 'hls': stream_url = url_or_none(try_get(resources, lambda x: x['stream'])) if not stream_url: continue m3u8_formats, m3u8_subs = self._extract_m3u8_formats_and_subtitles( stream_url, video_id, 'mp4', m3u8_id=format_id, fatal=False) formats.extend(m3u8_formats) subtitles = self._merge_subtitles(subtitles, m3u8_subs) continue if not isinstance(resources, list): continue if format_id == 'h264': for resource in resources: h264_url = resource.get('file') if not h264_url: continue bitrate = int_or_none(resource.get('bitrate')) formats.append({ 'url': h264_url, 'format_id': f'{format_id}-{bitrate}k', 'tbr': bitrate, }) if re.search(r'\d+k', h264_url): http_url = h264_url elif format_id == 'rtmp': streamer = talk_info.get('streamer') if not streamer: continue formats.extend({ 'format_id': '{}-{}'.format(format_id, resource.get('name')), 'url': streamer, 'play_path': resource['file'], 'ext': 'flv', 'width': int_or_none(resource.get('width')), 'height': int_or_none(resource.get('height')), 'tbr': int_or_none(resource.get('bitrate')), } for resource in resources if resource.get('file')) if http_url: m3u8_formats = [f for f in formats if f.get('protocol') == 'm3u8' and f.get('vcodec') != 'none'] for m3u8_format in m3u8_formats: bitrate = self._search_regex(r'(\d+k)', m3u8_format['url'], 'bitrate', default=None) if not bitrate: continue bitrate_url = re.sub(r'\d+k', bitrate, http_url) if not self._is_valid_url( bitrate_url, video_id, f'{bitrate} bitrate'): continue f = m3u8_format.copy() f.update({ 'url': bitrate_url, 'format_id': m3u8_format['format_id'].replace('hls', 'http'), 'protocol': 'http', }) if f.get('acodec') == 'none': del f['acodec'] formats.append(f) audio_download = talk_info.get('audioDownload') if audio_download: formats.append({ 'url': audio_download, 'format_id': 'audio', 'vcodec': 'none', }) if not formats: external = player_data.get('external') or {} service = external.get('service') or '' ext_url = external.get('code') if service.lower() == 'youtube' else None return self.url_result(ext_url or external['uri']) thumbnail = player_data.get('thumb') or self._og_search_property('image', webpage) if thumbnail: # trim thumbnail resize parameters thumbnail = thumbnail.split('?')[0] return { 'id': video_id, 'title': talk_info.get('title') or self._og_search_title(webpage), 'uploader': talk_info.get('presenterDisplayName'), 'thumbnail': thumbnail, 'description': talk_info.get('description') or self._og_search_description(webpage), 'subtitles': subtitles, 'formats': formats, 'duration': talk_info.get('duration') or parse_duration(self._og_search_property('video:duration', webpage)), 'view_count': str_to_int(talk_info.get('viewedCount')), 'upload_date': unified_strdate(talk_info.get('publishedAt')), 'release_date': unified_strdate(talk_info.get('recordedOn')), 'tags': try_get(player_data, lambda x: x['targeting']['tag'].split(',')), } class TedSeriesIE(TedBaseIE): _VALID_URL = fr'{TedBaseIE._VALID_URL_BASE.format(type=r"series")}(?:#season_(?P<season>\d+))?' _TESTS = [{ 'url': 'https://www.ted.com/series/small_thing_big_idea', 'info_dict': { 'id': '3', 'title': 'Small Thing Big Idea', 'series': 'Small Thing Big Idea', 'description': 'md5:6869ca52cec661aef72b3e9f7441c55c', }, 'playlist_mincount': 16, }, { 'url': 'https://www.ted.com/series/the_way_we_work#season_2', 'info_dict': { 'id': '8_2', 'title': 'The Way We Work Season 2', 'series': 'The Way We Work', 'description': 'md5:36678fe045f6ad7f39da80ea9370cbcd', 'season_number': 2, }, 'playlist_mincount': 8, }] def _real_extract(self, url): display_id, season = self._match_valid_url(url).group('id', 'season') webpage = self._download_webpage(url, display_id, 'Downloading series webpage') info = self._search_nextjs_data(webpage, display_id)['props']['pageProps'] entries = itertools.chain.from_iterable( self._parse_playlist(s) for s in info['seasons'] if season in [None, s.get('seasonNumber')]) series_id = try_get(info, lambda x: x['series']['id']) series_name = try_get(info, lambda x: x['series']['name']) or self._og_search_title(webpage, fatal=False) return self.playlist_result( entries, f'{series_id}_{season}' if season and series_id else series_id, f'{series_name} Season {season}' if season else series_name, self._og_search_description(webpage), series=series_name, season_number=int_or_none(season)) class TedPlaylistIE(TedBaseIE): _VALID_URL = TedBaseIE._VALID_URL_BASE.format(type=r'playlists(?:/\d+)?') _TESTS = [{ 'url': 'https://www.ted.com/playlists/171/the_most_popular_talks_of_all', 'info_dict': { 'id': '171', 'title': 'The most popular talks of all time', 'description': 'md5:d2f22831dc86c7040e733a3cb3993d78', }, 'playlist_mincount': 25, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) playlist = self._search_nextjs_data(webpage, display_id)['props']['pageProps']['playlist'] return self.playlist_result( self._parse_playlist(playlist), playlist.get('id'), playlist.get('title') or self._og_search_title(webpage, default='').replace(' | TED Talks', '') or None, self._og_search_description(webpage)) class TedEmbedIE(InfoExtractor): _VALID_URL = r'https?://embed(?:-ssl)?\.ted\.com/' _EMBED_REGEX = [rf'<iframe[^>]+?src=(["\'])(?P<url>{_VALID_URL}.+?)\1'] _TESTS = [{ 'url': 'https://embed.ted.com/talks/janet_stovall_how_to_get_serious_about_diversity_and_inclusion_in_the_workplace', 'info_dict': { 'id': '21802', 'ext': 'mp4', 'title': 'How to get serious about diversity and inclusion in the workplace', 'description': 'md5:0978aafe396e05341f8ecc795d22189d', 'view_count': int, 'uploader': 'Janet Stovall', 'duration': 654, 'tags': 'count:10', 'thumbnail': r're:https?://pi\.tedcdn\.com/.+\.jpg', 'upload_date': '20180822', 'release_date': '20180719', }, }] _WEBPAGE_TESTS = [{ 'url': 'https://ideas.ted.com/6-ways-to-give-that-arent-about-money/', 'info_dict': { 'id': '123235', 'ext': 'mp4', 'title': 'It\'s time for infectious generosity. Here\'s how', 'description': 'md5:0f972eb2b53ad7d1385fb65f519657b4', 'duration': 1172, 'release_date': '20231128', 'tags': 'count:9', 'thumbnail': r're:https?://pi\.tedcdn\.com/.+\.jpg', 'upload_date': '20240109', 'uploader': 'Chris Anderson', 'view_count': int, }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): return self.url_result(re.sub(r'://embed(-ssl)?', '://www', url), TedTalkIE.ie_key())
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/platzi.py
yt_dlp/extractor/platzi.py
import base64 from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, int_or_none, str_or_none, try_get, url_or_none, urlencode_postdata, urljoin, ) class PlatziBaseIE(InfoExtractor): _LOGIN_URL = 'https://platzi.com/login/' _NETRC_MACHINE = 'platzi' def _perform_login(self, username, password): login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading login page') login_form = self._hidden_inputs(login_page) login_form.update({ 'email': username, 'password': password, }) urlh = self._request_webpage( self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(login_form), headers={'Referer': self._LOGIN_URL}) # login succeeded if 'platzi.com/login' not in urlh.url: return login_error = self._webpage_read_content( urlh, self._LOGIN_URL, None, 'Downloading login error page') login = self._parse_json( self._search_regex( r'login\s*=\s*({.+?})(?:\s*;|\s*</script)', login_error, 'login'), None) for kind in ('error', 'password', 'nonFields'): error = str_or_none(login.get(f'{kind}Error')) if error: raise ExtractorError( f'Unable to login: {error}', expected=True) raise ExtractorError('Unable to log in') class PlatziIE(PlatziBaseIE): _VALID_URL = r'''(?x) https?:// (?: platzi\.com/clases| # es version courses\.platzi\.com/classes # en version )/[^/]+/(?P<id>\d+)-[^/?\#&]+ ''' _TESTS = [{ 'url': 'https://platzi.com/clases/1311-next-js/12074-creando-nuestra-primera-pagina/', 'md5': '8f56448241005b561c10f11a595b37e3', 'info_dict': { 'id': '12074', 'ext': 'mp4', 'title': 'Creando nuestra primera página', 'description': 'md5:4c866e45034fc76412fbf6e60ae008bc', 'duration': 420, }, 'skip': 'Requires platzi account credentials', }, { 'url': 'https://courses.platzi.com/classes/1367-communication-codestream/13430-background/', 'info_dict': { 'id': '13430', 'ext': 'mp4', 'title': 'Background', 'description': 'md5:49c83c09404b15e6e71defaf87f6b305', 'duration': 360, }, 'skip': 'Requires platzi account credentials', 'params': { 'skip_download': True, }, }] def _real_extract(self, url): lecture_id = self._match_id(url) webpage = self._download_webpage(url, lecture_id) data = self._parse_json( self._search_regex( # client_data may contain "};" so that we have to try more # strict regex first (r'client_data\s*=\s*({.+?})\s*;\s*\n', r'client_data\s*=\s*({.+?})\s*;'), webpage, 'client data'), lecture_id) material = data['initialState']['material'] desc = material['description'] title = desc['title'] formats = [] for server_id, server in material['videos'].items(): if not isinstance(server, dict): continue for format_id in ('hls', 'dash'): format_url = url_or_none(server.get(format_id)) if not format_url: continue if format_id == 'hls': formats.extend(self._extract_m3u8_formats( format_url, lecture_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=format_id, note=f'Downloading {server_id} m3u8 information', fatal=False)) elif format_id == 'dash': formats.extend(self._extract_mpd_formats( format_url, lecture_id, mpd_id=format_id, note=f'Downloading {server_id} MPD manifest', fatal=False)) content = str_or_none(desc.get('content')) description = (clean_html(base64.b64decode(content).decode('utf-8')) if content else None) duration = int_or_none(material.get('duration'), invscale=60) return { 'id': lecture_id, 'title': title, 'description': description, 'duration': duration, 'formats': formats, } class PlatziCourseIE(PlatziBaseIE): _VALID_URL = r'''(?x) https?:// (?: platzi\.com/clases| # es version courses\.platzi\.com/classes # en version )/(?P<id>[^/?\#&]+) ''' _TESTS = [{ 'url': 'https://platzi.com/clases/next-js/', 'info_dict': { 'id': '1311', 'title': 'Curso de Next.js', }, 'playlist_count': 22, }, { 'url': 'https://courses.platzi.com/classes/communication-codestream/', 'info_dict': { 'id': '1367', 'title': 'Codestream Course', }, 'playlist_count': 14, }] @classmethod def suitable(cls, url): return False if PlatziIE.suitable(url) else super().suitable(url) def _real_extract(self, url): course_name = self._match_id(url) webpage = self._download_webpage(url, course_name) props = self._parse_json( self._search_regex(r'data\s*=\s*({.+?})\s*;', webpage, 'data'), course_name)['initialProps'] entries = [] for chapter_num, chapter in enumerate(props['concepts'], 1): if not isinstance(chapter, dict): continue materials = chapter.get('materials') if not materials or not isinstance(materials, list): continue chapter_title = chapter.get('title') chapter_id = str_or_none(chapter.get('id')) for material in materials: if not isinstance(material, dict): continue if material.get('material_type') != 'video': continue video_url = urljoin(url, material.get('url')) if not video_url: continue entries.append({ '_type': 'url_transparent', 'url': video_url, 'title': str_or_none(material.get('name')), 'id': str_or_none(material.get('id')), 'ie_key': PlatziIE.ie_key(), 'chapter': chapter_title, 'chapter_number': chapter_num, 'chapter_id': chapter_id, }) course_id = str(try_get(props, lambda x: x['course']['id'])) course_title = try_get(props, lambda x: x['course']['name'], str) return self.playlist_result(entries, course_id, course_title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/glomex.py
yt_dlp/extractor/glomex.py
import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, determine_ext, extract_attributes, int_or_none, parse_qs, smuggle_url, unescapeHTML, unsmuggle_url, ) class GlomexBaseIE(InfoExtractor): _DEFAULT_ORIGIN_URL = 'https://player.glomex.com/' _API_URL = 'https://integration-cloudfront-eu-west-1.mes.glomex.cloud/' @staticmethod def _smuggle_origin_url(url, origin_url): if origin_url is None: return url return smuggle_url(url, {'origin': origin_url}) @classmethod def _unsmuggle_origin_url(cls, url, fallback_origin_url=None): defaults = {'origin': fallback_origin_url or cls._DEFAULT_ORIGIN_URL} unsmuggled_url, data = unsmuggle_url(url, default=defaults) return unsmuggled_url, data['origin'] def _get_videoid_type(self, video_id): _VIDEOID_TYPES = { 'v': 'video', 'pl': 'playlist', 'rl': 'related videos playlist', 'cl': 'curated playlist', } prefix = video_id.split('-')[0] return _VIDEOID_TYPES.get(prefix, 'unknown type') def _download_api_data(self, video_id, integration, current_url=None): query = { 'integration_id': integration, 'playlist_id': video_id, 'current_url': current_url or self._DEFAULT_ORIGIN_URL, } video_id_type = self._get_videoid_type(video_id) return self._download_json( self._API_URL, video_id, f'Downloading {video_id_type} JSON', f'Unable to download {video_id_type} JSON', query=query) def _download_and_extract_api_data(self, video_id, integration, current_url): api_data = self._download_api_data(video_id, integration, current_url) videos = api_data['videos'] if not videos: raise ExtractorError(f'no videos found for {video_id}') videos = [self._extract_api_data(video, video_id) for video in videos] return videos[0] if len(videos) == 1 else self.playlist_result(videos, video_id) def _extract_api_data(self, video, video_id): if video.get('error_code') == 'contentGeoblocked': self.raise_geo_restricted(countries=video['geo_locations']) formats, subs = [], {} for format_id, format_url in video['source'].items(): ext = determine_ext(format_url) if ext == 'm3u8': formats_, subs_ = self._extract_m3u8_formats_and_subtitles( format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False) formats.extend(formats_) self._merge_subtitles(subs_, target=subs) else: formats.append({ 'url': format_url, 'format_id': format_id, }) if video.get('language'): for fmt in formats: fmt['language'] = video['language'] images = (video.get('images') or []) + [video.get('image') or {}] thumbnails = [{ 'id': image.get('id'), 'url': f'{image["url"]}/profile:player-960x540', 'width': 960, 'height': 540, } for image in images if image.get('url')] self._remove_duplicate_formats(thumbnails) return { 'id': video.get('clip_id') or video_id, 'title': video.get('title'), 'description': video.get('description'), 'thumbnails': thumbnails, 'duration': int_or_none(video.get('clip_duration')), 'timestamp': video.get('created_at'), 'formats': formats, 'subtitles': subs, } class GlomexIE(GlomexBaseIE): IE_NAME = 'glomex' IE_DESC = 'Glomex videos' _VALID_URL = r'https?://video\.glomex\.com/[^/]+/(?P<id>v-[^-]+)' _INTEGRATION_ID = '19syy24xjn1oqlpc' _TESTS = [{ 'url': 'https://video.glomex.com/sport/v-cb24uwg77hgh-nach-2-0-sieg-guardiola-mit-mancity-vor-naechstem-titel', 'info_dict': { 'id': 'v-cb24uwg77hgh', 'ext': 'mp4', 'title': 'Nach 2:0-Sieg: Guardiola mit ManCity vor nächstem Titel', 'description': 'md5:1ea6b6caff1443fcbbba159e432eedb8', 'duration': 29600, 'thumbnail': r're:https?://i[a-z0-9]thumbs\.glomex\.com/.+', 'timestamp': 1619895017, 'upload_date': '20210501', }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): video_id = self._match_id(url) return self.url_result( GlomexEmbedIE.build_player_url(video_id, self._INTEGRATION_ID, url), GlomexEmbedIE.ie_key(), video_id) class GlomexEmbedIE(GlomexBaseIE): IE_NAME = 'glomex:embed' IE_DESC = 'Glomex embedded videos' _BASE_PLAYER_URL = '//player.glomex.com/integration/1/iframe-player.html' _BASE_PLAYER_URL_RE = re.escape(_BASE_PLAYER_URL).replace('/1/', r'/[^/]/') _VALID_URL = rf'https?:{_BASE_PLAYER_URL_RE}\?([^#]+&)?playlistId=(?P<id>[^#&]+)' _TESTS = [{ 'url': 'https://player.glomex.com/integration/1/iframe-player.html?integrationId=4059a013k56vb2yd&playlistId=v-cfa6lye0dkdd-sf', 'info_dict': { 'id': 'v-cfa6lye0dkdd-sf', 'ext': 'mp4', 'title': 'Φώφη Γεννηματά: Ο επικήδειος λόγος του 17χρονου γιου της, Γιώργου', 'thumbnail': r're:https?://i[a-z0-9]thumbs\.glomex\.com/.+', 'timestamp': 1635337199, 'duration': 133080, 'upload_date': '20211027', 'description': 'md5:e741185fc309310ff5d0c789b437be66', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://player.glomex.com/integration/1/iframe-player.html?origin=fullpage&integrationId=19syy24xjn1oqlpc&playlistId=rl-vcb49w1fb592p&playlistIndex=0', 'info_dict': { 'id': 'rl-vcb49w1fb592p', }, 'playlist_count': 100, }, { # Geo-restricted 'url': 'https://player.glomex.com/integration/1/iframe-player.html?playlistId=cl-bgqaata6aw8x&integrationId=19syy24xjn1oqlpc', 'info_dict': { 'id': 'cl-bgqaata6aw8x', }, 'playlist_mincount': 2, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.skai.gr/news/world/iatrikos-syllogos-tourkias-to-turkovac-aplo-dialyma-erntogan-eiste-apateones-kai-pseytes', 'info_dict': { 'id': 'v-ch2nkhcirwc9-sf', 'ext': 'mp4', 'title': 'Ιατρικός Σύλλογος Τουρκίας: Το Turkovac είναι ένα απλό διάλυμα –Ερντογάν: Είστε απατεώνες και ψεύτες', 'description': 'md5:8b517a61d577efe7e36fde72fd535995', 'duration': 460000, 'thumbnail': r're:https?://i[a-z0-9]thumbs\.glomex\.com/.+', 'timestamp': 1641885019, 'upload_date': '20220111', }, 'params': {'skip_download': 'm3u8'}, }] @classmethod def build_player_url(cls, video_id, integration, origin_url=None): query_string = urllib.parse.urlencode({ 'playlistId': video_id, 'integrationId': integration, }) return cls._smuggle_origin_url(f'https:{cls._BASE_PLAYER_URL}?{query_string}', origin_url) @classmethod def _extract_embed_urls(cls, url, webpage): # https://docs.glomex.com/publisher/video-player-integration/javascript-api/ quot_re = r'["\']' regex = fr'''(?x) <iframe[^>]+?src=(?P<q>{quot_re})(?P<url> (?:https?:)?{cls._BASE_PLAYER_URL_RE}\?(?:(?!(?P=q)).)+ )(?P=q)''' for mobj in re.finditer(regex, webpage): embed_url = unescapeHTML(mobj.group('url')) if cls.suitable(embed_url): yield cls._smuggle_origin_url(embed_url, url) regex = fr'''(?x) <glomex-player [^>]+?>| <div[^>]* data-glomex-player=(?P<q>{quot_re})true(?P=q)[^>]*>''' for mobj in re.finditer(regex, webpage): attrs = extract_attributes(mobj.group(0)) if attrs.get('data-integration-id') and attrs.get('data-playlist-id'): yield cls.build_player_url(attrs['data-playlist-id'], attrs['data-integration-id'], url) # naive parsing of inline scripts for hard-coded integration parameters regex = fr'''(?x) (?P<is_js>dataset\.)?%s\s*(?(is_js)=|:)\s* (?P<q>{quot_re})(?P<id>(?:(?!(?P=q)).)+)(?P=q)\s''' for mobj in re.finditer(r'(?x)<script[^<]*>.+?</script>', webpage): script = mobj.group(0) integration_id = re.search(regex % 'integrationId', script) if not integration_id: continue playlist_id = re.search(regex % 'playlistId', script) if playlist_id: yield cls.build_player_url(playlist_id, integration_id, url) def _real_extract(self, url): url, origin_url = self._unsmuggle_origin_url(url) playlist_id = self._match_id(url) integration = parse_qs(url).get('integrationId', [None])[0] if not integration: raise ExtractorError('No integrationId in URL', expected=True) return self._download_and_extract_api_data(playlist_id, integration, origin_url)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/yandexvideo.py
yt_dlp/extractor/yandexvideo.py
import itertools from .common import InfoExtractor from ..utils import ( bug_reports_message, determine_ext, int_or_none, lowercase_escape, parse_qs, qualities, try_get, update_url_query, url_or_none, ) from ..utils.traversal import traverse_obj class YandexVideoIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?: yandex\.ru(?:/(?:portal/(?:video|efir)|efir))?/?\?.*?stream_id=| frontend\.vh\.yandex\.ru/player/ ) (?P<id>(?:[\da-f]{32}|[\w-]{12})) ''' _TESTS = [{ 'url': 'https://yandex.ru/portal/video?stream_id=4dbb36ec4e0526d58f9f2dc8f0ecf374', 'info_dict': { 'id': '4dbb36ec4e0526d58f9f2dc8f0ecf374', 'ext': 'mp4', 'title': 'Русский Вудсток - главный рок-фест в истории СССР / вДудь', 'description': 'md5:7d6b8d4bc4a3b9a56499916c1ea5b5fa', 'thumbnail': r're:^https?://', 'timestamp': 1549972939, 'duration': 5575, 'age_limit': 18, 'upload_date': '20190212', 'view_count': int, 'like_count': int, 'dislike_count': int, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://yandex.ru/portal/efir?stream_id=4dbb262b4fe5cf15a215de4f34eee34d&from=morda', 'only_matching': True, }, { 'url': 'https://yandex.ru/?stream_id=4dbb262b4fe5cf15a215de4f34eee34d', 'only_matching': True, }, { 'url': 'https://frontend.vh.yandex.ru/player/4dbb262b4fe5cf15a215de4f34eee34d?from=morda', 'only_matching': True, }, { # vod-episode, series episode 'url': 'https://yandex.ru/portal/video?stream_id=45b11db6e4b68797919c93751a938cee', 'only_matching': True, }, { # episode, sports 'url': 'https://yandex.ru/?stream_channel=1538487871&stream_id=4132a07f71fb0396be93d74b3477131d', 'only_matching': True, }, { # DASH with DRM 'url': 'https://yandex.ru/portal/video?from=morda&stream_id=485a92d94518d73a9d0ff778e13505f8', 'only_matching': True, }, { 'url': 'https://yandex.ru/efir?stream_active=watching&stream_id=v7a2dZ-v5mSI&from_block=efir_newtab', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) player = try_get((self._download_json( 'https://frontend.vh.yandex.ru/graphql', video_id, data=('''{ player(content_id: "%s") { computed_title content_url description dislikes duration likes program_title release_date release_date_ut release_year restriction_age season start_time streams thumbnail title views_count } }''' % video_id).encode(), fatal=False)), lambda x: x['player']['content']) # noqa: UP031 if not player or player.get('error'): player = self._download_json( f'https://frontend.vh.yandex.ru/v23/player/{video_id}.json', video_id, query={ 'stream_options': 'hires', 'disable_trackings': 1, }) content = player['content'] title = content.get('title') or content['computed_title'] formats = [] streams = content.get('streams') or [] streams.append({'url': content.get('content_url')}) for stream in streams: content_url = url_or_none(stream.get('url')) if not content_url: continue ext = determine_ext(content_url) if ext == 'ismc': continue elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( content_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( content_url, video_id, mpd_id='dash', fatal=False)) else: formats.append({'url': content_url}) timestamp = (int_or_none(content.get('release_date')) or int_or_none(content.get('release_date_ut')) or int_or_none(content.get('start_time'))) season = content.get('season') or {} return { 'id': video_id, 'title': title, 'description': content.get('description'), 'thumbnail': content.get('thumbnail'), 'timestamp': timestamp, 'duration': int_or_none(content.get('duration')), 'series': content.get('program_title'), 'age_limit': int_or_none(content.get('restriction_age')), 'view_count': int_or_none(content.get('views_count')), 'like_count': int_or_none(content.get('likes')), 'dislike_count': int_or_none(content.get('dislikes')), 'season_number': int_or_none(season.get('season_number')), 'season_id': season.get('id'), 'release_year': int_or_none(content.get('release_year')), 'formats': formats, } class YandexVideoPreviewIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?yandex\.\w{2,3}(?:\.(?:am|ge|il|tr))?/video/preview(?:/?\?.*?filmId=|/)(?P<id>\d+)' _TESTS = [{ # Odnoklassniki 'url': 'https://yandex.ru/video/preview/?filmId=10682852472978372885&text=summer', 'info_dict': { 'id': '1352565459459', 'ext': 'mp4', 'like_count': int, 'upload_date': '20191202', 'age_limit': 0, 'duration': 196, 'thumbnail': 'https://i.mycdn.me/videoPreview?id=544866765315&type=37&idx=13&tkn=TY5qjLYZHxpmcnK8U2LgzYkgmaU&fn=external_8', 'uploader_id': '481054701571', 'title': 'LOFT - summer, summer, summer HD', 'uploader': 'АРТЁМ КУДРОВ', }, }, { # youtube 'url': 'https://yandex.ru/video/preview/?filmId=4479424425337895262&source=main_redirect&text=видео&utm_source=main_stripe_big', 'only_matching': True, }, { # YandexVideo 'url': 'https://yandex.ru/video/preview/5275069442094787341', 'only_matching': True, }, { # youtube 'url': 'https://yandex.ru/video/preview/?filmId=16658118429797832897&from=tabbar&p=1&text=%D0%BF%D1%80%D0%BE%D1%81%D0%BC%D0%BE%D1%82%D1%80+%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82%D0%B0+%D0%BC%D0%B0%D0%BB%D0%B5%D0%BD%D1%8C%D0%BA%D0%B8%D0%B9+%D0%BF%D1%80%D0%B8%D0%BD%D1%86+%D0%BC%D1%8B+%D0%B2+%D0%BE%D1%82%D0%B2%D0%B5%D1%82%D0%B5+%D0%B7%D0%B0+%D1%82%D0%B5%D1%85+%D0%BA%D0%BE%D0%B3%D0%BE+%D0%BF%D1%80%D0%B8%D1%80%D1%83%D1%87%D0%B8%D0%BB%D0%B8', 'only_matching': True, }, { # Odnoklassniki 'url': 'https://yandex.ru/video/preview/?text=Francis%20Lai%20-%20Le%20Bon%20Et%20Les%20MC)chants&path=wizard&parent-reqid=1643208087979310-1481782809207673478-sas3-0931-2f9-sas-l7-balancer-8080-BAL-9380&wiz_type=vital&filmId=12508152936505397283', 'only_matching': True, }, { # Odnoklassniki 'url': 'https://yandex.com/video/preview/?text=dossier%2051%20film%201978&path=yandex_search&parent-reqid=1664361087754492-8727541069609384458-sas2-0340-sas-l7-balancer-8080-BAL-8045&noreask=1&from_type=vast&filmId=5794987234584444632', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data_raw = self._search_regex(r'window.Ya.__inline_params__\s*=\s*JSON.parse\(\'([^"]+?\\u0022video\\u0022:[^"]+?})\'\);', webpage, 'data_raw') data_json = self._parse_json(data_raw, video_id, transform_source=lowercase_escape) return self.url_result(data_json['video']['url']) class ZenYandexBaseIE(InfoExtractor): def _fetch_ssr_data(self, url, video_id): webpage = self._download_webpage(url, video_id) redirect = self._search_json( r'(?:var|let|const)\s+it\s*=', webpage, 'redirect', video_id, default={}).get('retpath') if redirect: video_id = self._match_id(redirect) webpage = self._download_webpage(redirect, video_id, note='Redirecting') return video_id, self._search_json( r'(?:var|let|const)\s+_params\s*=\s*\(', webpage, 'metadata', video_id, contains_pattern=r'{["\']ssrData.+}')['ssrData'] class ZenYandexIE(ZenYandexBaseIE): IE_NAME = 'dzen.ru' IE_DESC = 'Дзен (dzen) formerly Яндекс.Дзен (Yandex Zen)' _VALID_URL = r'https?://(zen\.yandex|dzen)\.ru(?:/video)?/(media|watch)/(?:(?:id/[^/]+/|[^/]+/)(?:[a-z0-9-]+)-)?(?P<id>[a-z0-9-]+)' _TESTS = [{ 'url': 'https://zen.yandex.ru/media/id/606fd806cc13cb3c58c05cf5/vot-eto-focus-dedy-morozy-na-gidrociklah-60c7c443da18892ebfe85ed7', 'info_dict': { 'id': '60c7c443da18892ebfe85ed7', 'ext': 'mp4', 'title': 'ВОТ ЭТО Focus. Деды Морозы на гидроциклах', 'description': 'md5:8684912f6086f298f8078d4af0e8a600', 'thumbnail': 're:^https://avatars.dzeninfra.ru/', 'uploader': 'AcademeG DailyStream', }, 'params': { 'skip_download': 'm3u8', 'format': 'bestvideo', }, 'skip': 'The page does not exist', }, { 'url': 'https://dzen.ru/media/id/606fd806cc13cb3c58c05cf5/vot-eto-focus-dedy-morozy-na-gidrociklah-60c7c443da18892ebfe85ed7', 'info_dict': { 'id': '60c7c443da18892ebfe85ed7', 'ext': 'mp4', 'title': 'ВОТ ЭТО Focus. Деды Морозы на гидроциклах', 'description': 'md5:8684912f6086f298f8078d4af0e8a600', 'thumbnail': r're:^https://avatars\.dzeninfra\.ru/', 'uploader': 'AcademeG DailyStream', 'upload_date': '20191111', 'timestamp': 1573465585, }, 'params': {'skip_download': 'm3u8'}, 'skip': 'The page does not exist', }, { 'url': 'https://zen.yandex.ru/video/watch/6002240ff8b1af50bb2da5e3', 'info_dict': { 'id': '6002240ff8b1af50bb2da5e3', 'ext': 'mp4', 'title': 'Извержение вулкана из спичек: зрелищный опыт', 'description': 'md5:053ad3c61b5596d510c9a199dc8ee633', 'thumbnail': r're:^https://avatars\.dzeninfra\.ru/', 'uploader': 'TechInsider', 'timestamp': 1611378221, 'upload_date': '20210123', 'view_count': int, 'duration': 243, 'tags': ['опыт', 'эксперимент', 'огонь'], }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://dzen.ru/video/watch/6002240ff8b1af50bb2da5e3', 'info_dict': { 'id': '6002240ff8b1af50bb2da5e3', 'ext': 'mp4', 'title': 'Извержение вулкана из спичек: зрелищный опыт', 'description': 'md5:053ad3c61b5596d510c9a199dc8ee633', 'thumbnail': 're:^https://avatars.dzeninfra.ru/', 'uploader': 'TechInsider', 'upload_date': '20210123', 'timestamp': 1611378221, 'view_count': int, 'duration': 243, 'tags': ['опыт', 'эксперимент', 'огонь'], }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://zen.yandex.ru/media/id/606fd806cc13cb3c58c05cf5/novyi-samsung-fold-3-moskvich-barahlit-612f93b7f8d48e7e945792a2?from=channel&rid=2286618386.482.1630817595976.42360', 'only_matching': True, }, { 'url': 'https://dzen.ru/media/id/606fd806cc13cb3c58c05cf5/novyi-samsung-fold-3-moskvich-barahlit-612f93b7f8d48e7e945792a2?from=channel&rid=2286618386.482.1630817595976.42360', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video_id, ssr_data = self._fetch_ssr_data(url, video_id) video_data = ssr_data['videoMetaResponse'] formats, subtitles = [], {} quality = qualities(('4', '0', '1', '2', '3', '5', '6', '7')) # Deduplicate stream URLs. The "dzen_dash" query parameter is present in some URLs but can be omitted stream_urls = set(traverse_obj(video_data, ( 'video', ('id', ('streams', ...), ('mp4Streams', ..., 'url'), ('oneVideoStreams', ..., 'url')), {url_or_none}, {update_url_query(query={'dzen_dash': []})}))) for s_url in stream_urls: ext = determine_ext(s_url) content_type = traverse_obj(parse_qs(s_url), ('ct', 0)) if ext == 'mpd' or content_type == '6': fmts, subs = self._extract_mpd_formats_and_subtitles(s_url, video_id, mpd_id='dash', fatal=False) elif ext == 'm3u8' or content_type == '8': fmts, subs = self._extract_m3u8_formats_and_subtitles(s_url, video_id, 'mp4', m3u8_id='hls', fatal=False) elif content_type == '0': format_type = traverse_obj(parse_qs(s_url), ('type', 0)) formats.append({ 'url': s_url, 'format_id': format_type, 'ext': 'mp4', 'quality': quality(format_type), }) continue else: self.report_warning(f'Unsupported stream URL: {s_url}{bug_reports_message()}') continue formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(video_data, { 'title': ('title', {str}), 'description': ('description', {str}), 'thumbnail': ('image', {url_or_none}), 'duration': ('video', 'duration', {int_or_none}), 'view_count': ('video', 'views', {int_or_none}), 'timestamp': ('publicationDate', {int_or_none}), 'tags': ('tags', ..., {str}), 'uploader': ('source', 'title', {str}), }), } class ZenYandexChannelIE(ZenYandexBaseIE): IE_NAME = 'dzen.ru:channel' _VALID_URL = r'https?://(zen\.yandex|dzen)\.ru/(?!media|video)(?:id/)?(?P<id>[a-z0-9-_]+)' _TESTS = [{ 'url': 'https://zen.yandex.ru/tok_media', 'info_dict': { 'id': 'tok_media', 'title': 'СПЕКТР', 'description': 'md5:a9e5b3c247b7fe29fd21371a428bcf56', }, 'playlist_mincount': 169, 'skip': 'The page does not exist', }, { 'url': 'https://dzen.ru/tok_media', 'info_dict': { 'id': 'tok_media', 'title': 'СПЕКТР', 'description': 'md5:a9e5b3c247b7fe29fd21371a428bcf56', }, 'playlist_mincount': 169, 'skip': 'The page does not exist', }, { 'url': 'https://zen.yandex.ru/id/606fd806cc13cb3c58c05cf5', 'info_dict': { 'id': '606fd806cc13cb3c58c05cf5', 'description': 'md5:517b7c97d8ca92e940f5af65448fd928', 'title': 'AcademeG DailyStream', }, 'playlist_mincount': 657, }, { # Test that the playlist extractor finishes extracting when the # channel has less than one page 'url': 'https://zen.yandex.ru/jony_me', 'info_dict': { 'id': 'jony_me', 'description': 'md5:7c30d11dc005faba8826feae99da3113', 'title': 'JONY', }, 'playlist_count': 18, }, { # Test that the playlist extractor finishes extracting when the # channel has more than one page of entries 'url': 'https://zen.yandex.ru/tatyanareva', 'info_dict': { 'id': 'tatyanareva', 'description': 'md5:92e56fa730a932ca2483ba5c2186ad96', 'title': 'Татьяна Рева', }, 'playlist_mincount': 46, }, { 'url': 'https://dzen.ru/id/606fd806cc13cb3c58c05cf5', 'info_dict': { 'id': '606fd806cc13cb3c58c05cf5', 'title': 'AcademeG DailyStream', 'description': 'md5:517b7c97d8ca92e940f5af65448fd928', }, 'playlist_mincount': 657, }] def _entries(self, feed_data, channel_id): next_page_id = None for page in itertools.count(1): for item in traverse_obj(feed_data, ( (None, ('items', lambda _, v: v['tab'] in ('shorts', 'longs'))), 'items', lambda _, v: url_or_none(v['link']), )): yield self.url_result(item['link'], ZenYandexIE, item.get('id'), title=item.get('title')) more = traverse_obj(feed_data, ('more', 'link', {url_or_none})) current_page_id = next_page_id next_page_id = traverse_obj(parse_qs(more), ('next_page_id', -1)) if not all((more, next_page_id, next_page_id != current_page_id)): break feed_data = self._download_json(more, channel_id, note=f'Downloading Page {page}') def _real_extract(self, url): channel_id = self._match_id(url) channel_id, ssr_data = self._fetch_ssr_data(url, channel_id) channel_data = ssr_data['exportResponse'] return self.playlist_result( self._entries(channel_data['feedData'], channel_id), channel_id, **traverse_obj(channel_data, ('channel', 'source', { 'title': ('title', {str}), 'description': ('description', {str}), })))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/faz.py
yt_dlp/extractor/faz.py
import re from .common import InfoExtractor from ..compat import compat_etree_fromstring from ..utils import ( int_or_none, xpath_element, xpath_text, ) class FazIE(InfoExtractor): IE_NAME = 'faz.net' _VALID_URL = r'https?://(?:www\.)?faz\.net/(?:[^/]+/)*.*?-(?P<id>\d+)\.html' _TESTS = [{ 'url': 'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html', 'info_dict': { 'id': '12610585', 'ext': 'mp4', 'title': 'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher', 'description': 'md5:1453fbf9a0d041d985a47306192ea253', }, }, { 'url': 'http://www.faz.net/aktuell/politik/berlin-gabriel-besteht-zerreissprobe-ueber-datenspeicherung-13659345.html', 'only_matching': True, }, { 'url': 'http://www.faz.net/berlin-gabriel-besteht-zerreissprobe-ueber-datenspeicherung-13659345.html', 'only_matching': True, }, { 'url': 'http://www.faz.net/-13659345.html', 'only_matching': True, }, { 'url': 'http://www.faz.net/aktuell/politik/-13659345.html', 'only_matching': True, }, { 'url': 'http://www.faz.net/foobarblafasel-13659345.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) description = self._og_search_description(webpage) media = self._html_search_regex( r"data-videojs-media='([^']+)", webpage, 'media') if media == 'extern': perform_url = self._search_regex( r"<iframe[^>]+?src='((?:http:)?//player\.performgroup\.com/eplayer/eplayer\.html#/?[0-9a-f]{26}\.[0-9a-z]{26})", webpage, 'perform url') return self.url_result(perform_url) config = compat_etree_fromstring(media) encodings = xpath_element(config, 'ENCODINGS', 'encodings', True) formats = [] for pref, code in enumerate(['LOW', 'HIGH', 'HQ']): encoding = xpath_element(encodings, code) if encoding is not None: encoding_url = xpath_text(encoding, 'FILENAME') if encoding_url: tbr = xpath_text(encoding, 'AVERAGEBITRATE', 1000) if tbr: tbr = int_or_none(tbr.replace(',', '.')) f = { 'url': encoding_url, 'format_id': code.lower(), 'quality': pref, 'tbr': tbr, 'vcodec': xpath_text(encoding, 'CODEC'), } mobj = re.search(r'(\d+)x(\d+)_(\d+)\.mp4', encoding_url) if mobj: f.update({ 'width': int(mobj.group(1)), 'height': int(mobj.group(2)), 'tbr': tbr or int(mobj.group(3)), }) formats.append(f) return { 'id': video_id, 'title': self._og_search_title(webpage), 'formats': formats, 'description': description.strip() if description else None, 'thumbnail': xpath_text(config, 'STILL/STILL_BIG'), 'duration': int_or_none(xpath_text(config, 'DURATION')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/threeqsdn.py
yt_dlp/extractor/threeqsdn.py
from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, determine_ext, float_or_none, int_or_none, join_nonempty, parse_iso8601, ) class ThreeQSDNIE(InfoExtractor): IE_NAME = '3qsdn' IE_DESC = '3Q SDN' _VALID_URL = r'https?://playout\.3qsdn\.com/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _EMBED_REGEX = [rf'<iframe[^>]+\b(?:data-)?src=(["\'])(?P<url>{_VALID_URL}.*?)\1'] _TESTS = [{ # https://player.3qsdn.com/demo.html 'url': 'https://playout.3qsdn.com/7201c779-6b3c-11e7-a40e-002590c750be', 'md5': '64a57396b16fa011b15e0ea60edce918', 'info_dict': { 'id': '7201c779-6b3c-11e7-a40e-002590c750be', 'ext': 'mp4', 'title': 'Video Ads', 'is_live': False, 'description': 'Video Ads Demo', 'timestamp': 1500334803, 'upload_date': '20170717', 'duration': 888.032, 'subtitles': { 'eng': 'count:1', }, }, 'expected_warnings': ['Unknown MIME type application/mp4 in DASH manifest'], }, { # live video stream 'url': 'https://playout.3qsdn.com/66e68995-11ca-11e8-9273-002590c750be', 'info_dict': { 'id': '66e68995-11ca-11e8-9273-002590c750be', 'ext': 'mp4', 'title': 're:^66e68995-11ca-11e8-9273-002590c750be [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'is_live': True, }, 'params': { 'skip_download': True, # m3u8 downloads }, }, { # live audio stream 'url': 'http://playout.3qsdn.com/9edf36e0-6bf2-11e2-a16a-9acf09e2db48', 'only_matching': True, }, { # live audio stream with some 404 URLs 'url': 'http://playout.3qsdn.com/ac5c3186-777a-11e2-9c30-9acf09e2db48', 'only_matching': True, }, { # geo restricted with 'This content is not available in your country' 'url': 'http://playout.3qsdn.com/d63a3ffe-75e8-11e2-9c30-9acf09e2db48', 'only_matching': True, }, { # geo restricted with 'playout.3qsdn.com/forbidden' 'url': 'http://playout.3qsdn.com/8e330f26-6ae2-11e2-a16a-9acf09e2db48', 'only_matching': True, }, { # live video with rtmp link 'url': 'https://playout.3qsdn.com/6092bb9e-8f72-11e4-a173-002590c750be', 'only_matching': True, }, { # ondemand from http://www.philharmonie.tv/veranstaltung/26/ 'url': 'http://playout.3qsdn.com/0280d6b9-1215-11e6-b427-0cc47a188158?protocol=http', 'only_matching': True, }, { # live video stream 'url': 'https://playout.3qsdn.com/d755d94b-4ab9-11e3-9162-0025907ad44f?js=true', 'only_matching': True, }] def _extract_from_webpage(self, url, webpage): for res in super()._extract_from_webpage(url, webpage): yield { **res, '_type': 'url_transparent', 'uploader': self._search_regex(r'^(?:https?://)?([^/]*)/.*', url, 'video uploader'), } def _real_extract(self, url): video_id = self._match_id(url) try: config = self._download_json( url.replace('://playout.3qsdn.com/', '://playout.3qsdn.com/config/'), video_id) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 401: self.raise_geo_restricted() raise live = config.get('streamContent') == 'live' aspect = float_or_none(config.get('aspect')) formats = [] subtitles = {} for source_type, source in (config.get('sources') or {}).items(): if not source: continue if source_type == 'dash': fmts, subs = self._extract_mpd_formats_and_subtitles( source, video_id, mpd_id='mpd', fatal=False) formats.extend(fmts) subtitles = self._merge_subtitles(subtitles, subs) elif source_type == 'hls': fmts, subs = self._extract_m3u8_formats_and_subtitles( source, video_id, 'mp4', live=live, m3u8_id='hls', fatal=False) formats.extend(fmts) subtitles = self._merge_subtitles(subtitles, subs) elif source_type == 'progressive': for s in source: src = s.get('src') if not (src and self._is_valid_url(src, video_id)): continue ext = determine_ext(src) height = int_or_none(s.get('height')) formats.append({ 'ext': ext, 'format_id': join_nonempty('http', ext, height and f'{height}p'), 'height': height, 'source_preference': 0, 'url': src, 'vcodec': 'none' if height == 0 else None, 'width': int(height * aspect) if height and aspect else None, }) for subtitle in (config.get('subtitles') or []): src = subtitle.get('src') if not src: continue subtitles.setdefault(subtitle.get('label') or 'eng', []).append({ 'url': src, }) title = config.get('title') or video_id return { 'id': video_id, 'title': title, 'thumbnail': config.get('poster') or None, 'description': config.get('description') or None, 'timestamp': parse_iso8601(config.get('upload_date')), 'duration': float_or_none(config.get('vlength')) or None, 'is_live': live, 'formats': formats, 'subtitles': subtitles, # It seems like this would be correctly handled by default # However, unless someone can confirm this, the old # behaviour is being kept as-is '_format_sort_fields': ('res', 'source_preference'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nuvid.py
yt_dlp/extractor/nuvid.py
import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, strip_or_none, traverse_obj, url_or_none, ) class NuvidIE(InfoExtractor): _VALID_URL = r'https?://(?:www|m)\.nuvid\.com/video/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.nuvid.com/video/6513023/italian-babe', 'md5': '772d2f8288f3d3c5c45f7a41761c7844', 'info_dict': { 'id': '6513023', 'ext': 'mp4', 'title': 'italian babe', 'duration': 321.0, 'age_limit': 18, 'thumbnail': r're:https?://.+\.jpg', }, }, { 'url': 'https://m.nuvid.com/video/6523263', 'md5': 'ebd22ce8e47e1d9a4d0756a15c67da52', 'info_dict': { 'id': '6523263', 'ext': 'mp4', 'title': 'Slut brunette college student anal dorm', 'duration': 421.0, 'age_limit': 18, 'thumbnail': r're:https?://.+\.jpg', 'thumbnails': list, }, }, { 'url': 'http://m.nuvid.com/video/6415801/', 'md5': '638d5ececb138d5753593f751ae3f697', 'info_dict': { 'id': '6415801', 'ext': 'mp4', 'title': 'My best friend wanted to fuck my wife for a long time', 'duration': 1882, 'age_limit': 18, 'thumbnail': r're:https?://.+\.jpg', }, }] def _real_extract(self, url): video_id = self._match_id(url) qualities = { 'lq': '360p', 'hq': '720p', } json_url = f'https://www.nuvid.com/player_config_json/?vid={video_id}&aid=0&domain_id=0&embed=0&check_speed=0' video_data = self._download_json( json_url, video_id, headers={ 'Accept': 'application/json, text/javascript, */*; q = 0.01', 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8', }) webpage = self._download_webpage( f'http://m.nuvid.com/video/{video_id}', video_id, 'Downloading video page', fatal=False) or '' title = strip_or_none(video_data.get('title') or self._html_search_regex( (r'''<span\s[^>]*?\btitle\s*=\s*(?P<q>"|'|\b)(?P<title>[^"]+)(?P=q)\s*>''', r'''<div\s[^>]*?\bclass\s*=\s*(?P<q>"|'|\b)thumb-holder video(?P=q)>\s*<h5\b[^>]*>(?P<title>[^<]+)</h5''', r'''<span\s[^>]*?\bclass\s*=\s*(?P<q>"|'|\b)title_thumb(?P=q)>(?P<title>[^<]+)</span'''), webpage, 'title', group='title')) formats = [{ 'url': source, 'format_id': qualities.get(quality), 'height': int_or_none(qualities.get(quality)[:-1]), } for quality, source in video_data.get('files').items() if source] self._check_formats(formats, video_id) duration = parse_duration(traverse_obj(video_data, 'duration', 'duration_format')) thumbnails = [ {'url': thumb_url} for thumb_url in re.findall( r'<div\s+class\s*=\s*"video-tmb-wrap"\s*>\s*<img\s+src\s*=\s*"([^"]+)"\s*/>', webpage) if url_or_none(thumb_url)] if url_or_none(video_data.get('poster')): thumbnails.append({'url': video_data['poster'], 'preference': 1}) return { 'id': video_id, 'formats': formats, 'title': title, 'thumbnails': thumbnails, 'duration': duration, 'age_limit': 18, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/piapro.py
yt_dlp/extractor/piapro.py
import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, get_element_by_class, parse_duration, parse_filesize, str_to_int, unified_timestamp, urlencode_postdata, ) class PiaproIE(InfoExtractor): _NETRC_MACHINE = 'piapro' _VALID_URL = r'https?://piapro\.jp/(?:t|content)/(?P<id>[\w-]+)/?' _TESTS = [{ 'url': 'https://piapro.jp/t/NXYR', 'md5': 'f7c0f760913fb1d44a1c45a4af793909', 'info_dict': { 'id': 'NXYR', 'ext': 'mp3', 'uploader': 'wowaka', 'uploader_id': 'wowaka', 'title': '裏表ラバーズ', 'description': 'http://www.nicovideo.jp/watch/sm8082467', 'duration': 189.0, 'timestamp': 1251785475, 'thumbnail': r're:^https?://.*\.(?:png|jpg)$', 'upload_date': '20090901', 'view_count': int, }, }, { 'note': 'There are break lines in description, mandating (?s) flag', 'url': 'https://piapro.jp/t/9cSd', 'md5': '952bb6d1e8de95050206408a87790676', 'info_dict': { 'id': '9cSd', 'ext': 'mp3', 'title': '青に溶けた風船 / 初音ミク', 'description': 'md5:d395a9bd151447631a5a1460bc7f9132', 'uploader': 'シアン・キノ', 'duration': 229.0, 'timestamp': 1644030039, 'upload_date': '20220205', 'view_count': int, 'thumbnail': r're:^https?://.*\.(?:png|jpg)$', 'uploader_id': 'cyankino', }, }, { 'url': 'https://piapro.jp/content/hcw0z3a169wtemz6', 'only_matching': True, }, { 'url': 'https://piapro.jp/t/-SO-', 'only_matching': True, }] _login_status = False def _perform_login(self, username, password): login_ok = True login_form_strs = { '_username': username, '_password': password, '_remember_me': 'on', 'login': 'ログイン', } self._request_webpage('https://piapro.jp/login/', None) urlh = self._request_webpage( 'https://piapro.jp/login/exe', None, note='Logging in', errnote='Unable to log in', data=urlencode_postdata(login_form_strs)) if urlh is False: login_ok = False else: parts = urllib.parse.urlparse(urlh.url) if parts.path != '/': login_ok = False if not login_ok: self.report_warning( 'unable to log in: bad username or password') self._login_status = login_ok def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) category_id = self._search_regex(r'categoryId=(.+)">', webpage, 'category ID') if category_id not in ('1', '2', '21', '22', '23', '24', '25'): raise ExtractorError('The URL does not contain audio.', expected=True) def extract_info(name, description): return self._search_regex(rf'{name}[::]\s*([\d\s,:/]+)\s*</p>', webpage, description, default=None) return { 'id': video_id, 'title': clean_html(get_element_by_class('contents_title', webpage)), 'description': clean_html(get_element_by_class('contents_description', webpage)), 'uploader': clean_html(get_element_by_class('contents_creator_txt', webpage)), 'uploader_id': self._search_regex( r'<a\s+href="/([^"]+)"', get_element_by_class('contents_creator', webpage), 'uploader id', default=None), 'timestamp': unified_timestamp(extract_info('投稿日', 'timestamp'), False), 'duration': parse_duration(extract_info('長さ', 'duration')), 'view_count': str_to_int(extract_info('閲覧数', 'view count')), 'thumbnail': self._html_search_meta('twitter:image', webpage), 'filesize_approx': parse_filesize((extract_info('サイズ', 'size') or '').replace(',', '')), 'url': self._search_regex(r'\"url\":\s*\"(.*?)\"', webpage, 'url'), 'ext': 'mp3', 'vcodec': 'none', }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dailymotion.py
yt_dlp/extractor/dailymotion.py
import functools import json import re import urllib.parse from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, OnDemandPagedList, age_restricted, clean_html, extract_attributes, int_or_none, traverse_obj, try_get, unescapeHTML, unsmuggle_url, update_url, url_or_none, urlencode_postdata, ) class DailymotionBaseInfoExtractor(InfoExtractor): _FAMILY_FILTER = None _HEADERS = { 'Content-Type': 'application/json', 'Origin': 'https://www.dailymotion.com', } _NETRC_MACHINE = 'dailymotion' def _get_dailymotion_cookies(self): return self._get_cookies('https://www.dailymotion.com/') @staticmethod def _get_cookie_value(cookies, name): cookie = cookies.get(name) if cookie: return cookie.value def _set_dailymotion_cookie(self, name, value): self._set_cookie('www.dailymotion.com', name, value) def _real_initialize(self): cookies = self._get_dailymotion_cookies() ff = self._get_cookie_value(cookies, 'ff') self._FAMILY_FILTER = ff == 'on' if ff else age_restricted(18, self.get_param('age_limit')) self._set_dailymotion_cookie('ff', 'on' if self._FAMILY_FILTER else 'off') def _get_token(self, xid): cookies = self._get_dailymotion_cookies() token = self._get_cookie_value(cookies, 'access_token') or self._get_cookie_value(cookies, 'client_token') if token: return token data = { 'client_id': 'f1a362d288c1b98099c7', 'client_secret': 'eea605b96e01c796ff369935357eca920c5da4c5', } username, password = self._get_login_info() if username: data.update({ 'grant_type': 'password', 'password': password, 'username': username, }) else: data['grant_type'] = 'client_credentials' try: token = self._download_json( 'https://graphql.api.dailymotion.com/oauth/token', None, 'Downloading Access Token', data=urlencode_postdata(data))['access_token'] except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 400: raise ExtractorError(self._parse_json( e.cause.response.read().decode(), xid)['error_description'], expected=True) raise self._set_dailymotion_cookie('access_token' if username else 'client_token', token) return token def _call_api(self, object_type, xid, object_fields, note, filter_extra=None): if not self._HEADERS.get('Authorization'): self._HEADERS['Authorization'] = f'Bearer {self._get_token(xid)}' resp = self._download_json( 'https://graphql.api.dailymotion.com/', xid, note, data=json.dumps({ 'query': '''{ %s(xid: "%s"%s) { %s } }''' % (object_type, xid, ', ' + filter_extra if filter_extra else '', object_fields), # noqa: UP031 }).encode(), headers=self._HEADERS) obj = resp['data'][object_type] if not obj: raise ExtractorError(resp['errors'][0]['message'], expected=True) return obj class DailymotionIE(DailymotionBaseInfoExtractor): _VALID_URL = r'''(?ix) (?:https?:)?// (?: dai\.ly/| (?: (?:(?:www|touch|geo)\.)?dailymotion\.[a-z]{2,3}| (?:www\.)?lequipe\.fr )/ (?: swf/(?!video)| (?:(?:crawler|embed|swf)/)?video/| player(?:/[\da-z]+)?\.html\?(?:video|(?P<is_playlist>playlist))= ) ) (?P<id>[^/?_&#]+)(?:[\w-]*\?playlist=(?P<playlist_id>x[0-9a-z]+))? ''' IE_NAME = 'dailymotion' _EMBED_REGEX = [rf'(?ix)<(?:(?:embed|iframe)[^>]+?src=|input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=)["\'](?P<url>{_VALID_URL[5:]})'] _TESTS = [{ 'url': 'http://www.dailymotion.com/video/x5kesuj_office-christmas-party-review-jason-bateman-olivia-munn-t-j-miller_news', 'info_dict': { 'id': 'x5kesuj', 'ext': 'mp4', 'title': 'Office Christmas Party Review – Jason Bateman, Olivia Munn, T.J. Miller', 'description': 'Office Christmas Party Review - Jason Bateman, Olivia Munn, T.J. Miller', 'duration': 187, 'tags': 'count:5', 'thumbnail': r're:https?://s[12]\.dmcdn\.net/v/.+', 'timestamp': 1493651285, 'upload_date': '20170501', 'uploader': 'Deadline', 'uploader_id': 'x1xm8ri', 'age_limit': 0, 'view_count': int, 'like_count': int, }, }, { 'url': 'https://geo.dailymotion.com/player.html?video=x89eyek&mute=true', 'info_dict': { 'id': 'x89eyek', 'ext': 'mp4', 'title': 'En quête d\'esprit du 27/03/2022', 'description': 'md5:66542b9f4df2eb23f314fc097488e553', 'duration': 2756, 'tags': 'count:1', 'thumbnail': r're:https?://s[12]\.dmcdn\.net/v/.+', 'timestamp': 1648383669, 'upload_date': '20220327', 'uploader': 'CNEWS', 'uploader_id': 'x24vth', 'age_limit': 0, 'view_count': int, 'like_count': int, }, }, { 'url': 'https://www.dailymotion.com/video/x2iuewm_steam-machine-models-pricing-listed-on-steam-store-ign-news_videogames', 'md5': '2137c41a8e78554bb09225b8eb322406', 'info_dict': { 'id': 'x2iuewm', 'ext': 'mp4', 'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News', 'description': 'Several come bundled with the Steam Controller.', 'duration': 74, 'thumbnail': r're:https?://s[12]\.dmcdn\.net/v/.+', 'timestamp': 1425657362, 'upload_date': '20150306', 'uploader': 'IGN', 'uploader_id': 'xijv66', 'age_limit': 0, 'view_count': int, }, 'skip': 'video gone', }, { # age-restricted video 'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband', 'md5': '0d667a7b9cebecc3c89ee93099c4159d', 'info_dict': { 'id': 'xyh2zz', 'ext': 'mp4', 'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]', 'uploader': 'HotWaves1012', 'age_limit': 18, }, 'skip': 'video gone', }, { # geo-restricted, player v5 'url': 'http://www.dailymotion.com/video/xhza0o', 'only_matching': True, }, { # with subtitles 'url': 'http://www.dailymotion.com/video/x20su5f_the-power-of-nightmares-1-the-rise-of-the-politics-of-fear-bbc-2004_news', 'only_matching': True, }, { 'url': 'http://www.dailymotion.com/swf/video/x3n92nf', 'only_matching': True, }, { 'url': 'http://www.dailymotion.com/swf/x3ss1m_funny-magic-trick-barry-and-stuart_fun', 'only_matching': True, }, { 'url': 'https://www.lequipe.fr/video/x791mem', 'only_matching': True, }, { 'url': 'https://www.lequipe.fr/video/k7MtHciueyTcrFtFKA2', 'only_matching': True, }, { 'url': 'https://www.dailymotion.com/video/x3z49k?playlist=xv4bw', 'only_matching': True, }, { 'url': 'https://geo.dailymotion.com/player/x86gw.html?video=k46oCapRs4iikoz9DWy', 'only_matching': True, }, { 'url': 'https://geo.dailymotion.com/player/xakln.html?video=x8mjju4&customConfig%5BcustomParams%5D=%2Ffr-fr%2Ftennis%2Fwimbledon-mens-singles%2Farticles-video', 'only_matching': True, }, { # playlist-only 'url': 'https://geo.dailymotion.com/player/xf7zn.html?playlist=x7wdsj', 'only_matching': True, }, { 'url': 'https://geo.dailymotion.com/player/xmyye.html?video=x93blhi', 'only_matching': True, }, { 'url': 'https://www.dailymotion.com/crawler/video/x8u4owg', 'only_matching': True, }, { 'url': 'https://www.dailymotion.com/embed/video/x8u4owg', 'only_matching': True, }, { 'url': 'https://dai.ly/x94cnnk', 'only_matching': True, }] _WEBPAGE_TESTS = [{ # https://geo.dailymotion.com/player/xmyye.html?video=x93blhi 'url': 'https://www.financialounge.com/video/2024/08/01/borse-europee-in-rosso-dopo-la-fed-a-milano-volano-mediobanca-e-tim-edizione-del-1-agosto/', 'info_dict': { 'id': 'x93blhi', 'ext': 'mp4', 'title': 'OnAir - 01/08/24', 'description': '', 'duration': 217, 'timestamp': 1722505658, 'upload_date': '20240801', 'uploader': 'Financialounge', 'uploader_id': 'x2vtgmm', 'age_limit': 0, 'tags': [], 'thumbnail': r're:https?://s[12]\.dmcdn\.net/v/.+', 'view_count': int, 'like_count': int, }, }, { # https://geo.dailymotion.com/player/xf7zn.html?playlist=x7wdsj 'url': 'https://www.cycleworld.com/blogs/ask-kevin/ducati-continues-to-evolve-with-v4/', 'info_dict': { 'id': 'x7wdsj', }, 'playlist_mincount': 50, }, { # https://www.dailymotion.com/crawler/video/x8u4owg 'url': 'https://www.leparisien.fr/environnement/video-le-veloto-la-voiture-a-pedales-qui-aimerait-se-faire-une-place-sur-les-routes-09-03-2024-KCYMCPM4WFHJXMSKBUI66UNFPU.php', 'info_dict': { 'id': 'x8u4owg', 'ext': 'mp4', 'description': 'À bord du « véloto », l’alternative à la voiture pour la campagne', 'like_count': int, 'uploader': 'Le Parisien', 'upload_date': '20240309', 'view_count': int, 'tags': 'count:7', 'thumbnail': r're:https?://www\.leparisien\.fr/.+\.jpg', 'timestamp': 1709997866, 'age_limit': 0, 'uploader_id': 'x32f7b', 'title': 'VIDÉO. Le «\xa0véloto\xa0», la voiture à pédales qui aimerait se faire une place sur les routes', 'duration': 428.0, }, }, { # https://geo.dailymotion.com/player/xry80.html?video=x8vu47w 'url': 'https://www.metatube.com/en/videos/546765/This-frogs-decorates-Christmas-tree/', 'info_dict': { 'id': 'x8vu47w', 'ext': 'mp4', 'like_count': int, 'uploader': 'Metatube', 'upload_date': '20240326', 'view_count': int, 'thumbnail': r're:https?://s[12]\.dmcdn\.net/v/.+', 'timestamp': 1711496732, 'age_limit': 0, 'uploader_id': 'x2xpy74', 'title': 'Está lindas ranitas ponen su arbolito', 'duration': 28, 'description': 'Que lindura', 'tags': [], }, 'skip': 'Invalid URL', }, { # //geo.dailymotion.com/player/xysxq.html?video=k2Y4Mjp7krAF9iCuINM 'url': 'https://lcp.fr/programmes/avant-la-catastrophe-la-naissance-de-la-dictature-nazie-1933-1936-346819', 'info_dict': { 'id': 'k2Y4Mjp7krAF9iCuINM', 'ext': 'mp4', 'title': 'Avant la catastrophe la naissance de la dictature nazie 1933 -1936', 'description': 'md5:7b620d5e26edbe45f27bbddc1c0257c1', 'uploader': 'LCP Assemblée nationale', 'uploader_id': 'xbz33d', 'view_count': int, 'like_count': int, 'age_limit': 0, 'duration': 3220, 'tags': [], 'thumbnail': r're:https?://s[12]\.dmcdn\.net/v/.+', 'timestamp': 1739919947, 'upload_date': '20250218', }, 'skip': 'Invalid URL', }, { 'url': 'https://forum.ionicframework.com/t/ionic-2-jw-player-dailymotion-player/83248', 'info_dict': { 'id': 'xwr14q', 'ext': 'mp4', 'title': 'Macklemore & Ryan Lewis - Thrift Shop (feat. Wanz)', 'age_limit': 0, 'description': 'md5:47fbe168b5a6ddc4a205e20dd6c841b2', 'duration': 234, 'like_count': int, 'tags': 'count:5', 'thumbnail': r're:https?://s[12]\.dmcdn\.net/v/.+', 'timestamp': 1358177670, 'upload_date': '20130114', 'uploader': 'Macklemore Official', 'uploader_id': 'x19qlwr', 'view_count': int, }, }] _GEO_BYPASS = False _COMMON_MEDIA_FIELDS = '''description geoblockedCountries { allowed } xid''' @classmethod def _extract_embed_urls(cls, url, webpage): # https://developer.dailymotion.com/player#player-parameters yield from super()._extract_embed_urls(url, webpage) for mobj in re.finditer( r'(?s)DM\.player\([^,]+,\s*{.*?video[\'"]?\s*:\s*["\']?(?P<id>[0-9a-zA-Z]+).+?}\s*\);', webpage): yield 'https://www.dailymotion.com/embed/video/' + mobj.group('id') for mobj in re.finditer( r'(?s)<script [^>]*\bsrc=(["\'])(?:https?:)?//[\w-]+\.dailymotion\.com/player/(?:(?!\1).)+\1[^>]*>', webpage): attrs = extract_attributes(mobj.group(0)) player_url = url_or_none(attrs.get('src')) if not player_url: continue player_url = player_url.replace('.js', '.html') if player_url.startswith('//'): player_url = f'https:{player_url}' if video_id := attrs.get('data-video'): query_string = f'video={video_id}' elif playlist_id := attrs.get('data-playlist'): query_string = f'playlist={playlist_id}' else: continue yield update_url(player_url, query=query_string) def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url) video_id, is_playlist, playlist_id = self._match_valid_url(url).group('id', 'is_playlist', 'playlist_id') if is_playlist: # We matched the playlist query param as video_id playlist_id = video_id video_id = None if self._yes_playlist(playlist_id, video_id): return self.url_result( f'http://www.dailymotion.com/playlist/{playlist_id}', 'DailymotionPlaylist', playlist_id) password = self.get_param('videopassword') media = self._call_api( 'media', video_id, '''... on Video { %s stats { likes { total } views { total } } } ... on Live { %s audienceCount isOnAir }''' % (self._COMMON_MEDIA_FIELDS, self._COMMON_MEDIA_FIELDS), 'Downloading media JSON metadata', # noqa: UP031 'password: "{}"'.format(self.get_param('videopassword')) if password else None) xid = media['xid'] metadata = self._download_json( 'https://www.dailymotion.com/player/metadata/video/' + xid, xid, 'Downloading metadata JSON', query=traverse_obj(smuggled_data, 'query') or {'app': 'com.dailymotion.neon'}) error = metadata.get('error') if error: title = error.get('title') or error['raw_message'] # See https://developer.dailymotion.com/api#access-error if error.get('code') == 'DM007': allowed_countries = try_get(media, lambda x: x['geoblockedCountries']['allowed'], list) self.raise_geo_restricted(msg=title, countries=allowed_countries) raise ExtractorError( f'{self.IE_NAME} said: {title}', expected=True) title = metadata['title'] is_live = media.get('isOnAir') formats = [] subtitles = {} for quality, media_list in metadata['qualities'].items(): for m in media_list: media_url = m.get('url') media_type = m.get('type') if not media_url or media_type == 'application/vnd.lumberjack.manifest': continue if media_type == 'application/x-mpegURL': fmt, subs = self._extract_m3u8_formats_and_subtitles( media_url, video_id, 'mp4', live=is_live, m3u8_id='hls', fatal=False) formats.extend(fmt) self._merge_subtitles(subs, target=subtitles) else: f = { 'url': media_url, 'format_id': 'http-' + quality, } m = re.search(r'/H264-(\d+)x(\d+)(?:-(60)/)?', media_url) if m: width, height, fps = map(int_or_none, m.groups()) f.update({ 'fps': fps, 'height': height, 'width': width, }) formats.append(f) for f in formats: f['url'] = f['url'].split('#')[0] if not f.get('fps') and f['format_id'].endswith('@60'): f['fps'] = 60 subtitles_data = try_get(metadata, lambda x: x['subtitles']['data'], dict) or {} for subtitle_lang, subtitle in subtitles_data.items(): subtitles[subtitle_lang] = [{ 'url': subtitle_url, } for subtitle_url in subtitle.get('urls', [])] thumbnails = traverse_obj(metadata, ( ('posters', 'thumbnails'), {dict.items}, lambda _, v: url_or_none(v[1]), { 'height': (0, {int_or_none}), 'id': (0, {str}), 'url': 1, })) owner = metadata.get('owner') or {} stats = media.get('stats') or {} get_count = lambda x: int_or_none(try_get(stats, lambda y: y[x + 's']['total'])) return { 'id': video_id, 'title': title, 'description': clean_html(media.get('description')), 'thumbnails': thumbnails, 'duration': int_or_none(metadata.get('duration')) or None, 'timestamp': int_or_none(metadata.get('created_time')), 'uploader': owner.get('screenname'), 'uploader_id': owner.get('id') or metadata.get('screenname'), 'age_limit': 18 if metadata.get('explicit') else 0, 'tags': metadata.get('tags'), 'view_count': get_count('view') or int_or_none(media.get('audienceCount')), 'like_count': get_count('like'), 'formats': formats, 'subtitles': subtitles, 'is_live': is_live, } class DailymotionPlaylistBaseIE(DailymotionBaseInfoExtractor): _PAGE_SIZE = 100 def _fetch_page(self, playlist_id, page): page += 1 videos = self._call_api( self._OBJECT_TYPE, playlist_id, '''videos(allowExplicit: %s, first: %d, page: %d) { edges { node { xid url } } }''' % ('false' if self._FAMILY_FILTER else 'true', self._PAGE_SIZE, page), f'Downloading page {page}')['videos'] for edge in videos['edges']: node = edge['node'] yield self.url_result( node['url'], DailymotionIE.ie_key(), node['xid']) def _real_extract(self, url): playlist_id = self._match_id(url) entries = OnDemandPagedList(functools.partial( self._fetch_page, playlist_id), self._PAGE_SIZE) return self.playlist_result( entries, playlist_id) class DailymotionPlaylistIE(DailymotionPlaylistBaseIE): IE_NAME = 'dailymotion:playlist' _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>x[0-9a-z]+)' _TESTS = [{ 'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q', 'info_dict': { 'id': 'xv4bw', }, 'playlist_mincount': 20, }] _OBJECT_TYPE = 'collection' @classmethod def _extract_embed_urls(cls, url, webpage): # Look for embedded Dailymotion playlist player (#3822) for mobj in re.finditer( r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage): for p in re.findall(r'list\[\]=/playlist/([^/]+)/', unescapeHTML(mobj.group('url'))): yield f'//dailymotion.com/playlist/{p}' class DailymotionSearchIE(DailymotionPlaylistBaseIE): IE_NAME = 'dailymotion:search' _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/search/(?P<id>[^/?#]+)/videos' _PAGE_SIZE = 20 _TESTS = [{ 'url': 'http://www.dailymotion.com/search/king of turtles/videos', 'info_dict': { 'id': 'king of turtles', 'title': 'king of turtles', }, 'playlist_mincount': 0, }] _SEARCH_QUERY = 'query SEARCH_QUERY( $query: String! $page: Int $limit: Int ) { search { videos( query: $query first: $limit page: $page ) { edges { node { xid } } } } } ' def _call_search_api(self, term, page, note): if not self._HEADERS.get('Authorization'): self._HEADERS['Authorization'] = f'Bearer {self._get_token(term)}' resp = self._download_json( 'https://graphql.api.dailymotion.com/', None, note, data=json.dumps({ 'operationName': 'SEARCH_QUERY', 'query': self._SEARCH_QUERY, 'variables': { 'limit': 20, 'page': page, 'query': term, }, }).encode(), headers=self._HEADERS) obj = traverse_obj(resp, ('data', 'search', {dict})) if not obj: raise ExtractorError( traverse_obj(resp, ('errors', 0, 'message', {str})) or 'Could not fetch search data') return obj def _fetch_page(self, term, page): page += 1 response = self._call_search_api(term, page, f'Searching "{term}" page {page}') for xid in traverse_obj(response, ('videos', 'edges', ..., 'node', 'xid')): yield self.url_result(f'https://www.dailymotion.com/video/{xid}', DailymotionIE, xid) def _real_extract(self, url): term = urllib.parse.unquote_plus(self._match_id(url)) return self.playlist_result( OnDemandPagedList(functools.partial(self._fetch_page, term), self._PAGE_SIZE), term, term) class DailymotionUserIE(DailymotionPlaylistBaseIE): IE_NAME = 'dailymotion:user' _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|swf|#|video|playlist|search|crawler)/)(?:(?:old/)?user/)?(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.dailymotion.com/user/nqtv', 'info_dict': { 'id': 'nqtv', }, 'playlist_mincount': 148, }, { 'url': 'http://www.dailymotion.com/user/UnderProject', 'info_dict': { 'id': 'UnderProject', }, 'playlist_mincount': 1000, 'skip': 'Takes too long time', }, { 'url': 'https://www.dailymotion.com/user/nqtv', 'info_dict': { 'id': 'nqtv', }, 'playlist_mincount': 148, 'params': { 'age_limit': 0, }, }] _OBJECT_TYPE = 'channel'
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/islamchannel.py
yt_dlp/extractor/islamchannel.py
import re from .common import InfoExtractor from ..utils import traverse_obj, urljoin class IslamChannelIE(InfoExtractor): _VALID_URL = r'https?://watch\.islamchannel\.tv/watch/(?P<id>\d+)' _TESTS = [{ 'url': 'https://watch.islamchannel.tv/watch/38604310', 'info_dict': { 'id': '38604310', 'title': 'Omar - Young Omar', 'description': 'md5:5cc7ddecef064ea7afe52eb5e0e33b55', 'thumbnail': r're:https?://.+', 'ext': 'mp4', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) thumbnail = self._search_regex( r'data-poster="([^"]+)"', webpage, 'data poster', fatal=False) or \ self._html_search_meta(('og:image', 'twitter:image'), webpage) headers = { 'Token': self._search_regex(r'data-token="([^"]+)"', webpage, 'data token'), 'Token-Expiry': self._search_regex(r'data-expiry="([^"]+)"', webpage, 'data expiry'), 'Uvid': video_id, } show_stream = self._download_json( f'https://v2-streams-elb.simplestreamcdn.com/api/show/stream/{video_id}', video_id, query={ 'key': self._search_regex(r'data-key="([^"]+)"', webpage, 'data key'), 'platform': 'chrome', }, headers=headers) # TODO: show_stream['stream'] and show_stream['drm'] may contain something interesting streams = self._download_json( traverse_obj(show_stream, ('response', 'tokenization', 'url')), video_id, headers=headers) formats, subs = self._extract_m3u8_formats_and_subtitles(traverse_obj(streams, ('Streams', 'Adaptive')), video_id, 'mp4') return { 'id': video_id, 'title': self._html_search_meta(('og:title', 'twitter:title'), webpage), 'description': self._html_search_meta(('og:description', 'twitter:description', 'description'), webpage), 'formats': formats, 'subtitles': subs, 'thumbnails': [{ 'id': 'unscaled', 'url': thumbnail.split('?')[0], 'ext': 'jpg', 'preference': 2, }, { 'id': 'orig', 'url': thumbnail, 'ext': 'jpg', 'preference': 1, }] if thumbnail else None, } class IslamChannelSeriesIE(InfoExtractor): _VALID_URL = r'https?://watch\.islamchannel\.tv/series/(?P<id>[a-f\d-]+)' _TESTS = [{ 'url': 'https://watch.islamchannel.tv/series/a6cccef3-3ef1-11eb-bc19-06b69c2357cd', 'info_dict': { 'id': 'a6cccef3-3ef1-11eb-bc19-06b69c2357cd', }, 'playlist_mincount': 31, }] def _real_extract(self, url): pl_id = self._match_id(url) webpage = self._download_webpage(url, pl_id) return self.playlist_from_matches( re.finditer(r'<a\s+href="(/watch/\d+)"[^>]+?data-video-type="show">', webpage), pl_id, getter=lambda x: urljoin(url, x.group(1)), ie=IslamChannelIE)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tvanouvelles.py
yt_dlp/extractor/tvanouvelles.py
import re from .brightcove import BrightcoveNewIE from .common import InfoExtractor class TVANouvellesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tvanouvelles\.ca/videos/(?P<id>\d+)' _TEST = { 'url': 'http://www.tvanouvelles.ca/videos/5117035533001', 'info_dict': { 'id': '5117035533001', 'ext': 'mp4', 'title': 'L’industrie du taxi dénonce l’entente entre Québec et Uber: explications', 'description': 'md5:479653b7c8cf115747bf5118066bd8b3', 'uploader_id': '1741764581', 'timestamp': 1473352030, 'upload_date': '20160908', }, 'add_ie': ['BrightcoveNew'], } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1741764581/default_default/index.html?videoId=%s' def _real_extract(self, url): brightcove_id = self._match_id(url) return self.url_result( self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, BrightcoveNewIE.ie_key(), brightcove_id) class TVANouvellesArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tvanouvelles\.ca/(?:[^/]+/)+(?P<id>[^/?#&]+)' _TEST = { 'url': 'http://www.tvanouvelles.ca/2016/11/17/des-policiers-qui-ont-la-meche-un-peu-courte', 'info_dict': { 'id': 'des-policiers-qui-ont-la-meche-un-peu-courte', 'title': 'Des policiers qui ont «la mèche un peu courte»?', 'description': 'md5:92d363c8eb0f0f030de9a4a84a90a3a0', }, 'playlist_mincount': 4, } @classmethod def suitable(cls, url): return False if TVANouvellesIE.suitable(url) else super().suitable(url) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) entries = [ self.url_result( 'http://www.tvanouvelles.ca/videos/{}'.format(mobj.group('id')), ie=TVANouvellesIE.ie_key(), video_id=mobj.group('id')) for mobj in re.finditer( r'data-video-id=(["\'])?(?P<id>\d+)', webpage)] title = self._og_search_title(webpage, fatal=False) description = self._og_search_description(webpage) return self.playlist_result(entries, display_id, title, description)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/huffpost.py
yt_dlp/extractor/huffpost.py
import re from .common import InfoExtractor from ..utils import ( determine_ext, parse_duration, unified_strdate, ) class HuffPostIE(InfoExtractor): IE_DESC = 'Huffington Post' _VALID_URL = r'''(?x) https?://(embed\.)?live\.huffingtonpost\.com/ (?: r/segment/[^/]+/| HPLEmbedPlayer/\?segmentId= ) (?P<id>[0-9a-f]+)''' _EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1'] _TEST = { 'url': 'http://live.huffingtonpost.com/r/segment/legalese-it/52dd3e4b02a7602131000677', 'md5': '55f5e8981c1c80a64706a44b74833de8', 'info_dict': { 'id': '52dd3e4b02a7602131000677', 'ext': 'mp4', 'title': 'Legalese It! with @MikeSacksHP', 'description': 'This week on Legalese It, Mike talks to David Bosco about his new book on the ICC, "Rough Justice," he also discusses the Virginia AG\'s historic stance on gay marriage, the execution of Edgar Tamayo, the ICC\'s delay of Kenya\'s President and more. ', 'duration': 1549, 'upload_date': '20140124', }, 'params': { # m3u8 download 'skip_download': True, }, 'expected_warnings': ['HTTP Error 404: Not Found'], } def _real_extract(self, url): video_id = self._match_id(url) api_url = f'http://embed.live.huffingtonpost.com/api/segments/{video_id}.json' data = self._download_json(api_url, video_id)['data'] video_title = data['title'] duration = parse_duration(data.get('running_time')) upload_date = unified_strdate( data.get('schedule', {}).get('starts_at') or data.get('segment_start_date_time')) description = data.get('description') thumbnails = [] for url in filter(None, data['images'].values()): m = re.match(r'.*-([0-9]+x[0-9]+)\.', url) if not m: continue thumbnails.append({ 'url': url, 'resolution': m.group(1), }) formats = [] sources = data.get('sources', {}) live_sources = list(sources.get('live', {}).items()) + list(sources.get('live_again', {}).items()) for key, url in live_sources: ext = determine_ext(url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( url, video_id, ext='mp4', m3u8_id='hls', fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( url + '?hdcore=2.9.5', video_id, f4m_id='hds', fatal=False)) else: formats.append({ 'format': key, 'format_id': key.replace('/', '.'), 'ext': 'mp4', 'url': url, 'vcodec': 'none' if key.startswith('audio/') else None, }) return { 'id': video_id, 'title': video_title, 'description': description, 'formats': formats, 'duration': duration, 'upload_date': upload_date, 'thumbnails': thumbnails, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lsm.py
yt_dlp/extractor/lsm.py
import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, determine_ext, int_or_none, js_to_json, parse_iso8601, parse_qs, str_or_none, url_or_none, urljoin, ) from ..utils.traversal import traverse_obj class LSMLREmbedIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?: (?:latvijasradio|lr1|lr2|klasika|lr4|naba|radioteatris)\.lsm| pieci )\.lv/[^/?#]+/(?: pleijeris|embed )/?\?(?:[^#]+&)?(?:show|id)=(?P<id>\d+)''' _TESTS = [{ 'url': 'https://latvijasradio.lsm.lv/lv/embed/?theme=black&size=16x9&showCaptions=0&id=183522', 'md5': '719b33875cd1429846eeeaeec6df2830', 'info_dict': { 'id': 'a342781', 'ext': 'mp3', 'duration': 1823, 'title': '#138 Nepilnīgā kompensējamo zāļu sistēma pat mēnešiem dzenā pacientus pa aptiekām', 'thumbnail': 'https://pic.latvijasradio.lv/public/assets/media/9/d/gallery_fd4675ac.jpg', }, }, { 'url': 'https://radioteatris.lsm.lv/lv/embed/?id=&show=1270&theme=white&size=16x9', 'info_dict': { 'id': '1270', }, 'playlist_count': 3, 'playlist': [{ 'md5': '2e61b6eceff00d14d57fdbbe6ab24cac', 'info_dict': { 'id': 'a297397', 'ext': 'mp3', 'title': 'Eriks Emanuels Šmits "Pilāta evaņģēlijs". 1. daļa', 'thumbnail': 'https://radioteatris.lsm.lv/public/assets/shows/62f131ae81e3c.jpg', 'duration': 3300, }, }], }, { 'url': 'https://radioteatris.lsm.lv/lv/embed/?id=&show=1269&theme=white&size=16x9', 'md5': '24810d4a961da2295d9860afdcaf4f5a', 'info_dict': { 'id': 'a230690', 'ext': 'mp3', 'title': 'Jens Ahlboms "Spārni". Radioizrāde ar Mārtiņa Freimaņa mūziku', 'thumbnail': 'https://radioteatris.lsm.lv/public/assets/shows/62f13023a457c.jpg', 'duration': 1788, }, }, { 'url': 'https://lr1.lsm.lv/lv/embed/?id=166557&show=0&theme=white&size=16x9', 'info_dict': { 'id': '166557', }, 'playlist_count': 2, 'playlist': [{ 'md5': '6a8b0927572f443f09c6e50a3ad65f2d', 'info_dict': { 'id': 'a303104', 'ext': 'mp3', 'thumbnail': 'https://pic.latvijasradio.lv/public/assets/media/c/5/gallery_a83ad2c2.jpg', 'title': 'Krustpunktā Lielā intervija: Valsts prezidents Egils Levits', 'duration': 3222, }, }, { 'md5': '5d5e191e718b7644e5118b7b4e093a6d', 'info_dict': { 'id': 'v303104', 'ext': 'mp4', 'thumbnail': 'https://pic.latvijasradio.lv/public/assets/media/c/5/gallery_a83ad2c2.jpg', 'title': 'Krustpunktā Lielā intervija: Valsts prezidents Egils Levits - Video Version', 'duration': 3222, }, }], }, { 'url': 'https://lr1.lsm.lv/lv/embed/?id=183522&show=0&theme=white&size=16x9', 'only_matching': True, }, { 'url': 'https://lr2.lsm.lv/lv/embed/?id=182126&show=0&theme=white&size=16x9', 'only_matching': True, }, { 'url': 'https://klasika.lsm.lv/lv/embed/?id=110806&show=0&theme=white&size=16x9', 'only_matching': True, }, { 'url': 'https://lr4.lsm.lv/lv/embed/?id=184282&show=0&theme=white&size=16x9', 'only_matching': True, }, { 'url': 'https://pieci.lv/lv/embed/?id=168896&show=0&theme=white&size=16x9', 'only_matching': True, }, { 'url': 'https://naba.lsm.lv/lv/embed/?id=182901&show=0&theme=white&size=16x9', 'only_matching': True, }, { 'url': 'https://radioteatris.lsm.lv/lv/embed/?id=176439&show=0&theme=white&size=16x9', 'only_matching': True, }, { 'url': 'https://lr1.lsm.lv/lv/pleijeris/?embed=0&id=48205&time=00%3A00&idx=0', 'only_matching': True, }] def _real_extract(self, url): query = parse_qs(url) video_id = traverse_obj(query, ( ('show', 'id'), 0, {int_or_none}, filter, {str_or_none}), get_all=False) webpage = self._download_webpage(url, video_id) player_data, media_data = self._search_regex( r'LR\.audio\.Player\s*\([^{]*(?P<player>\{.*?\}),(?P<media>\{.*\})\);', webpage, 'player json', group=('player', 'media')) player_json = self._parse_json( player_data, video_id, transform_source=js_to_json, fatal=False) or {} media_json = self._parse_json(media_data, video_id, transform_source=js_to_json) entries = [] for item in traverse_obj(media_json, (('audio', 'video'), lambda _, v: v['id'])): formats = [] for source_url in traverse_obj(item, ('sources', ..., 'file', {url_or_none})): if determine_ext(source_url) == 'm3u8': formats.extend(self._extract_m3u8_formats(source_url, video_id, fatal=False)) else: formats.append({'url': source_url}) id_ = item['id'] title = item.get('title') if id_.startswith('v') and not title: title = traverse_obj( media_json, ('audio', lambda _, v: v['id'][1:] == id_[1:], 'title', {lambda x: x and f'{x} - Video Version'}), get_all=False) entries.append({ 'formats': formats, 'thumbnail': urljoin(url, player_json.get('poster')), 'id': id_, 'title': title, 'duration': traverse_obj(item, ('duration', {int_or_none})), }) if len(entries) == 1: return entries[0] return self.playlist_result(entries, video_id) class LSMLTVEmbedIE(InfoExtractor): _VALID_URL = r'https?://ltv\.lsm\.lv/embed\?(?:[^#]+&)?c=(?P<id>[^#&]+)' _TESTS = [{ 'url': 'https://ltv.lsm.lv/embed?c=eyJpdiI6IjQzbHVUeHAyaDJiamFjcjdSUUFKdnc9PSIsInZhbHVlIjoiMHl3SnJNRmd2TmFIdnZwOGtGUUpzODFzUEZ4SVVsN2xoRjliSW9vckUyMWZIWG8vbWVzaFFkY0lhNmRjbjRpaCIsIm1hYyI6ImMzNjdhMzFhNTFhZmY1ZmE0NWI5YmFjZGI1YmJiNGEyNjgzNDM4MjUzMWEwM2FmMDMyZDMwYWM1MDFjZmM5MGIiLCJ0YWciOiIifQ==', 'md5': '64f72a360ca530d5ed89c77646c9eee5', 'info_dict': { 'id': '46k_d23-6000-105', 'ext': 'mp4', 'timestamp': 1700589151, 'duration': 1442, 'upload_date': '20231121', 'title': 'D23-6000-105_cetstud', 'thumbnail': 'https://store.bstrm.net/tmsp00060/assets/media/660858/placeholder1700589200.jpg', }, }, { 'url': 'https://ltv.lsm.lv/embed?enablesdkjs=1&c=eyJpdiI6IncwVzZmUFk2MU12enVWK1I3SUcwQ1E9PSIsInZhbHVlIjoid3FhV29vamc3T2sxL1RaRmJ5Rm1GTXozU0o2dVczdUtLK0cwZEZJMDQ2a3ZIRG5DK2pneGlnbktBQy9uazVleHN6VXhxdWIweWNvcHRDSnlISlNYOHlVZ1lpcTUrcWZSTUZPQW14TVdkMW9aOUtRWVNDcFF4eWpHNGcrT0VZbUNFQStKQk91cGpndW9FVjJIa0lpbkh3PT0iLCJtYWMiOiIyZGI1NDJlMWRlM2QyMGNhOGEwYTM2MmNlN2JlOGRhY2QyYjdkMmEzN2RlOTEzYTVkNzI1ODlhZDlhZjU4MjQ2IiwidGFnIjoiIn0=', 'md5': 'f236cef2fd5953612754e4e66be51e7a', 'info_dict': { 'id': 'wUnFArIPDSY', 'ext': 'mp4', 'uploader': 'LTV_16plus', 'release_date': '20220514', 'channel_url': 'https://www.youtube.com/channel/UCNMrnafwXD2XKeeQOyfkFCw', 'view_count': int, 'availability': 'public', 'thumbnail': 'https://i.ytimg.com/vi/wUnFArIPDSY/maxresdefault.jpg', 'release_timestamp': 1652544074, 'title': 'EIROVĪZIJA SALĀTOS', 'live_status': 'was_live', 'uploader_id': '@LTV16plus', 'comment_count': int, 'channel_id': 'UCNMrnafwXD2XKeeQOyfkFCw', 'channel_follower_count': int, 'categories': ['Entertainment'], 'duration': 5269, 'upload_date': '20220514', 'age_limit': 0, 'channel': 'LTV_16plus', 'playable_in_embed': True, 'tags': [], 'uploader_url': 'https://www.youtube.com/@LTV16plus', 'like_count': int, 'description': 'md5:7ff0c42ba971e3c13e4b8a2ff03b70b5', 'media_type': 'livestream', 'timestamp': 1652550741, }, }] def _real_extract(self, url): video_id = urllib.parse.unquote(self._match_id(url)) webpage = self._download_webpage(url, video_id) data = self._search_json( r'window\.ltvEmbedPayload\s*=', webpage, 'embed json', video_id) embed_type = traverse_obj(data, ('source', 'name', {str})) if embed_type in ('backscreen', 'telia'): # 'telia' only for backwards compat ie_key = 'CloudyCDN' embed_url = traverse_obj(data, ('source', 'embed_url', {url_or_none})) elif embed_type == 'youtube': ie_key = 'Youtube' embed_url = traverse_obj(data, ('source', 'id', {str})) else: raise ExtractorError(f'Unsupported embed type {embed_type!r}') return self.url_result( embed_url, ie_key, video_id, **traverse_obj(data, { 'title': ('parentInfo', 'title'), 'duration': ('parentInfo', 'duration', {int_or_none}), 'thumbnail': ('source', 'poster', {url_or_none}), })) class LSMReplayIE(InfoExtractor): _VALID_URL = r'https?://replay\.lsm\.lv/[^/?#]+/(?:skaties/|klausies/)?(?:ieraksts|statja)/[^/?#]+/(?P<id>\d+)' _TESTS = [{ 'url': 'https://replay.lsm.lv/lv/skaties/ieraksts/ltv/311130/4-studija-zolitudes-tragedija-un-incupes-stacija', 'md5': '64f72a360ca530d5ed89c77646c9eee5', 'info_dict': { 'id': '46k_d23-6000-105', 'ext': 'mp4', 'timestamp': 1700586300, 'description': 'md5:0f1b14798cc39e1ae578bd0eb268f759', 'duration': 1442, 'upload_date': '20231121', 'title': '4. studija. Zolitūdes traģēdija un Inčupes stacija', 'thumbnail': 'https://ltv.lsm.lv/storage/media/8/7/large/5/1f9604e1.jpg', }, }, { 'url': 'https://replay.lsm.lv/lv/klausies/ieraksts/lr/183522/138-nepilniga-kompensejamo-zalu-sistema-pat-menesiem-dzena-pacientus-pa-aptiekam', 'md5': '84feb80fd7e6ec07744726a9f01cda4d', 'info_dict': { 'id': '183522', 'ext': 'm4a', 'duration': 1823, 'title': '#138 Nepilnīgā kompensējamo zāļu sistēma pat mēnešiem dzenā pacientus pa aptiekām', 'thumbnail': 'https://pic.latvijasradio.lv/public/assets/media/9/d/large_fd4675ac.jpg', 'upload_date': '20231102', 'timestamp': 1698913860, 'description': 'md5:7bac3b2dd41e44325032943251c357b1', }, }, { 'url': 'https://replay.lsm.lv/ru/skaties/statja/ltv/355067/v-kengaragse-nacalas-ukladka-relsov', 'only_matching': True, }, { 'url': 'https://replay.lsm.lv/lv/ieraksts/ltv/311130/4-studija-zolitudes-tragedija-un-incupes-stacija', 'only_matching': True, }] def _fix_nuxt_data(self, webpage): return re.sub(r'Object\.create\(null(?:,(\{.+\}))?\)', lambda m: m.group(1) or 'null', webpage) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data = self._search_nuxt_data( self._fix_nuxt_data(webpage), video_id, context_name='__REPLAY__') playback_type = data['playback']['type'] if playback_type == 'playable_audio_lr': playback_data = { 'formats': self._extract_m3u8_formats(data['playback']['service']['hls_url'], video_id), } elif playback_type == 'embed': playback_data = { '_type': 'url_transparent', 'url': data['playback']['service']['url'], } else: raise ExtractorError(f'Unsupported playback type "{playback_type}"') return { 'id': video_id, **playback_data, **traverse_obj(data, { 'title': ('mediaItem', 'title'), 'description': ('mediaItem', ('lead', 'body')), 'duration': ('mediaItem', 'duration', {int_or_none}), 'timestamp': ('mediaItem', 'aired_at', {parse_iso8601}), 'thumbnail': ('mediaItem', 'largeThumbnail', {url_or_none}), }, get_all=False), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/glide.py
yt_dlp/extractor/glide.py
from .common import InfoExtractor class GlideIE(InfoExtractor): IE_DESC = 'Glide mobile video messages (glide.me)' _VALID_URL = r'https?://share\.glide\.me/(?P<id>[A-Za-z0-9\-=_+]+)' _TEST = { 'url': 'http://share.glide.me/UZF8zlmuQbe4mr+7dCiQ0w==', 'md5': '4466372687352851af2d131cfaa8a4c7', 'info_dict': { 'id': 'UZF8zlmuQbe4mr+7dCiQ0w==', 'ext': 'mp4', 'title': "Damon's Glide message", 'thumbnail': r're:^https?://.*?\.cloudfront\.net/.*\.jpg$', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._generic_title('', webpage) video_url = self._proto_relative_url(self._search_regex( r'<source[^>]+src=(["\'])(?P<url>.+?)\1', webpage, 'video URL', default=None, group='url')) or self._og_search_video_url(webpage) thumbnail = self._proto_relative_url(self._search_regex( r'<img[^>]+id=["\']video-thumbnail["\'][^>]+src=(["\'])(?P<url>.+?)\1', webpage, 'thumbnail url', default=None, group='url')) or self._og_search_thumbnail(webpage) return { 'id': video_id, 'title': title, 'url': video_url, 'thumbnail': thumbnail, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cbc.py
yt_dlp/extractor/cbc.py
import functools import re import time import urllib.parse from .common import InfoExtractor from ..networking import HEADRequest from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, float_or_none, int_or_none, js_to_json, jwt_decode_hs256, mimetype2ext, orderedSet, parse_age_limit, parse_iso8601, replace_extension, smuggle_url, strip_or_none, try_get, unified_timestamp, update_url, url_basename, url_or_none, urlencode_postdata, ) from ..utils.traversal import require, traverse_obj, trim_str class CBCIE(InfoExtractor): IE_NAME = 'cbc.ca' _VALID_URL = r'https?://(?:www\.)?cbc\.ca/(?!player/|listen/|i/caffeine/syndicate/)(?:[^/?#]+/)+(?P<id>[^/?#]+)' _TESTS = [{ # with mediaId 'url': 'http://www.cbc.ca/22minutes/videos/clips-season-23/don-cherry-play-offs', 'md5': '97e24d09672fc4cf56256d6faa6c25bc', 'info_dict': { 'id': '2682904050', 'ext': 'mp4', 'title': 'Don Cherry – All-Stars', 'description': 'Don Cherry has a bee in his bonnet about AHL player John Scott because that guy’s got heart.', 'timestamp': 1454463000, 'upload_date': '20160203', 'uploader': 'CBCC-NEW', }, 'skip': 'Geo-restricted to Canada', }, { # with clipId, feed available via tpfeed.cbc.ca and feed.theplatform.com 'url': 'http://www.cbc.ca/22minutes/videos/22-minutes-update/22-minutes-update-episode-4', 'md5': '162adfa070274b144f4fdc3c3b8207db', 'info_dict': { 'id': '2414435309', 'ext': 'mp4', 'title': '22 Minutes Update: What Not To Wear Quebec', 'description': "This week's latest Canadian top political story is What Not To Wear Quebec.", 'upload_date': '20131025', 'uploader': 'CBCC-NEW', 'timestamp': 1382717907, }, 'skip': 'No longer available', }, { # with clipId, feed only available via tpfeed.cbc.ca 'url': 'http://www.cbc.ca/archives/entry/1978-robin-williams-freestyles-on-90-minutes-live', 'md5': '0274a90b51a9b4971fe005c63f592f12', 'info_dict': { 'id': '2487345465', 'ext': 'mp4', 'title': 'Robin Williams freestyles on 90 Minutes Live', 'description': 'Wacky American comedian Robin Williams shows off his infamous "freestyle" comedic talents while being interviewed on CBC\'s 90 Minutes Live.', 'upload_date': '19780210', 'uploader': 'CBCC-NEW', 'timestamp': 255977160, }, 'skip': '404 Not Found', }, { # multiple iframes 'url': 'http://www.cbc.ca/natureofthings/blog/birds-eye-view-from-vancouvers-burrard-street-bridge-how-we-got-the-shot', 'playlist': [{ 'md5': '377572d0b49c4ce0c9ad77470e0b96b4', 'info_dict': { 'id': '2680832926', 'ext': 'mp4', 'title': 'An Eagle\'s-Eye View Off Burrard Bridge', 'description': 'Hercules the eagle flies from Vancouver\'s Burrard Bridge down to a nearby park with a mini-camera strapped to his back.', 'upload_date': '20160201', 'timestamp': 1454342820, 'uploader': 'CBCC-NEW', }, }, { 'md5': '415a0e3f586113894174dfb31aa5bb1a', 'info_dict': { 'id': '2658915080', 'ext': 'mp4', 'title': 'Fly like an eagle!', 'description': 'Eagle equipped with a mini camera flies from the world\'s tallest tower', 'upload_date': '20150315', 'timestamp': 1426443984, 'uploader': 'CBCC-NEW', }, }], 'skip': 'Geo-restricted to Canada', }, { # multiple CBC.APP.Caffeine.initInstance(...) 'url': 'http://www.cbc.ca/news/canada/calgary/dog-indoor-exercise-winter-1.3928238', 'info_dict': { 'title': 'Keep Rover active during the deep freeze with doggie pushups and other fun indoor tasks', # FIXME: actual title includes " | CBC News" 'id': 'dog-indoor-exercise-winter-1.3928238', 'description': 'md5:c18552e41726ee95bd75210d1ca9194c', }, 'playlist_mincount': 6, }] def _extract_player_init(self, player_init, display_id): player_info = self._parse_json(player_init, display_id, js_to_json) media_id = player_info.get('mediaId') if not media_id: clip_id = player_info['clipId'] feed = self._download_json( f'http://tpfeed.cbc.ca/f/ExhSPC/vms_5akSXx4Ng_Zn?byCustomValue={{:mpsReleases}}{{{clip_id}}}', clip_id, fatal=False) if feed: media_id = try_get(feed, lambda x: x['entries'][0]['guid'], str) if not media_id: media_id = self._download_json( 'http://feed.theplatform.com/f/h9dtGB/punlNGjMlc1F?fields=id&byContent=byReleases%3DbyId%253D' + clip_id, clip_id)['entries'][0]['id'].split('/')[-1] return self.url_result(f'cbcplayer:{media_id}', 'CBCPlayer', media_id) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = (self._og_search_title(webpage, default=None) or self._html_search_meta('twitter:title', webpage, 'title', default=None) or self._html_extract_title(webpage)) entries = [ self._extract_player_init(player_init, display_id) for player_init in re.findall(r'CBC\.APP\.Caffeine\.initInstance\(({.+?})\);', webpage)] media_ids = [] for media_id_re in ( r'<iframe[^>]+src="[^"]+?mediaId=(\d+)"', r'<div[^>]+\bid=["\']player-(\d+)', r'guid["\']\s*:\s*["\'](\d+)'): media_ids.extend(re.findall(media_id_re, webpage)) entries.extend([ self.url_result(f'cbcplayer:{media_id}', 'CBCPlayer', media_id) for media_id in orderedSet(media_ids)]) return self.playlist_result( entries, display_id, strip_or_none(title), self._og_search_description(webpage)) class CBCPlayerIE(InfoExtractor): IE_NAME = 'cbc.ca:player' _VALID_URL = r'(?:cbcplayer:|https?://(?:www\.)?cbc\.ca/(?:player/play/(?:video/)?|i/caffeine/syndicate/\?mediaId=))(?P<id>(?:\d\.)?\d+)' _GEO_COUNTRIES = ['CA'] _TESTS = [{ 'url': 'http://www.cbc.ca/player/play/2683190193', 'md5': '64d25f841ddf4ddb28a235338af32e2c', 'info_dict': { 'id': '2683190193', 'ext': 'mp4', 'title': 'Gerry Runs a Sweat Shop', 'description': 'md5:b457e1c01e8ff408d9d801c1c2cd29b0', 'timestamp': 1455071400, 'upload_date': '20160210', 'uploader': 'CBCC-NEW', }, 'skip': 'Geo-restricted to Canada and no longer available', }, { 'url': 'http://www.cbc.ca/i/caffeine/syndicate/?mediaId=2657631896', 'md5': 'e5e708c34ae6fca156aafe17c43e8b75', 'info_dict': { 'id': '2657631896', 'ext': 'mp3', 'title': 'CBC Montreal is organizing its first ever community hackathon!', 'description': 'md5:dd3b692f0a139b0369943150bd1c46a9', 'timestamp': 1425704400, 'upload_date': '20150307', 'thumbnail': 'https://i.cbc.ca/ais/1.2985700,1717262248558/full/max/0/default.jpg', 'chapters': [], 'duration': 494.811, 'categories': ['All in a Weekend Montreal'], 'tags': 'count:11', 'location': 'Quebec', 'series': 'All in a Weekend Montreal', 'season': 'Season 2015', 'season_number': 2015, 'media_type': 'Excerpt', 'genres': ['Other'], }, }, { 'url': 'http://www.cbc.ca/i/caffeine/syndicate/?mediaId=2164402062', 'info_dict': { 'id': '2164402062', 'ext': 'mp4', 'title': 'Cancer survivor four times over', 'description': 'Tim Mayer has beaten three different forms of cancer four times in five years.', 'timestamp': 1320410746, 'upload_date': '20111104', 'thumbnail': 'https://i.cbc.ca/ais/1.1711287,1717139372111/full/max/0/default.jpg', 'chapters': [], 'duration': 186.867, 'series': 'CBC News: Windsor at 6:00', 'categories': ['Windsor'], 'location': 'Windsor', 'tags': ['Cancer', 'News/Canada/Windsor', 'Windsor'], 'media_type': 'Excerpt', 'genres': ['News'], }, 'params': {'skip_download': 'm3u8'}, }, { # Redirected from http://www.cbc.ca/player/AudioMobile/All%20in%20a%20Weekend%20Montreal/ID/2657632011/ 'url': 'https://www.cbc.ca/player/play/1.2985700', 'md5': 'e5e708c34ae6fca156aafe17c43e8b75', 'info_dict': { 'id': '1.2985700', 'ext': 'mp3', 'title': 'CBC Montreal is organizing its first ever community hackathon!', 'description': 'The modern technology we tend to depend on so heavily, is never without it\'s share of hiccups and headaches. Next weekend - CBC Montreal will be getting members of the public for its first Hackathon.', 'timestamp': 1425704400, 'upload_date': '20150307', 'thumbnail': 'https://i.cbc.ca/ais/1.2985700,1717262248558/full/max/0/default.jpg', 'chapters': [], 'duration': 494.811, 'categories': ['All in a Weekend Montreal'], 'tags': 'count:11', 'location': 'Quebec', 'series': 'All in a Weekend Montreal', 'season': 'Season 2015', 'season_number': 2015, 'media_type': 'Excerpt', 'genres': ['Other'], }, }, { 'url': 'https://www.cbc.ca/player/play/1.1711287', 'info_dict': { 'id': '1.1711287', 'ext': 'mp4', 'title': 'Cancer survivor four times over', 'description': 'Tim Mayer has beaten three different forms of cancer four times in five years.', 'timestamp': 1320410746, 'upload_date': '20111104', 'thumbnail': 'https://i.cbc.ca/ais/1.1711287,1717139372111/full/max/0/default.jpg', 'chapters': [], 'duration': 186.867, 'series': 'CBC News: Windsor at 6:00', 'categories': ['Windsor'], 'location': 'Windsor', 'tags': ['Cancer', 'News/Canada/Windsor', 'Windsor'], 'media_type': 'Excerpt', 'genres': ['News'], }, 'params': {'skip_download': 'm3u8'}, }, { # Has subtitles # These broadcasts expire after ~1 month, can find new test URL here: # https://www.cbc.ca/player/news/TV%20Shows/The%20National/Latest%20Broadcast 'url': 'https://www.cbc.ca/player/play/video/9.6424403', 'md5': '8025909eaffcf0adf59922904def9a5e', 'info_dict': { 'id': '9.6424403', 'ext': 'mp4', 'title': 'The National | N.W.T. wildfire emergency', 'description': 'md5:ada33d36d1df69347ed575905bfd496c', 'timestamp': 1718589600, 'duration': 2692.833, 'subtitles': { 'en-US': [{ 'name': 'English Captions', 'url': 'https://cbchls.akamaized.net/delivery/news-shows/2024/06/17/NAT_JUN16-00-55-00/NAT_JUN16_cc.vtt', }], }, 'thumbnail': 'https://i.cbc.ca/ais/6272b5c6-5e78-4c05-915d-0e36672e33d1,1714756287822/full/max/0/default.jpg', 'chapters': 'count:5', 'upload_date': '20240617', 'categories': ['News', 'The National', 'The National Latest Broadcasts'], 'series': 'The National - Full Show', 'tags': ['The National'], 'location': 'Canada', 'media_type': 'Full Program', 'genres': ['News'], }, }, { 'url': 'https://www.cbc.ca/player/play/video/1.7194274', 'md5': '188b96cf6bdcb2540e178a6caa957128', 'info_dict': { 'id': '1.7194274', 'ext': 'mp4', 'title': '#TheMoment a rare white spirit moose was spotted in Alberta', 'description': 'md5:18ae269a2d0265c5b0bbe4b2e1ac61a3', 'timestamp': 1714788791, 'duration': 77.678, 'subtitles': {'eng': [{'ext': 'vtt', 'protocol': 'm3u8_native'}]}, 'thumbnail': 'https://i.cbc.ca/ais/1.7194274,1717224990425/full/max/0/default.jpg', 'chapters': [], 'categories': 'count:3', 'series': 'The National', 'tags': 'count:17', 'location': 'Canada', 'media_type': 'Excerpt', 'upload_date': '20240504', 'genres': ['News'], }, }, { 'url': 'https://www.cbc.ca/player/play/video/9.6427282', 'info_dict': { 'id': '9.6427282', 'ext': 'mp4', 'title': 'Men\'s Soccer - Argentina vs Morocco', 'description': 'Argentina faces Morocco on the football pitch at Saint Etienne Stadium.', 'series': 'CBC Sports', 'media_type': 'Event Coverage', 'thumbnail': 'https://i.cbc.ca/ais/a4c5c0c2-99fa-4bd3-8061-5a63879c1b33,1718828053500/full/max/0/default.jpg', 'timestamp': 1721825400.0, 'upload_date': '20240724', 'duration': 10568.0, 'chapters': [], 'genres': [], 'tags': ['2024 Paris Olympic Games'], 'categories': ['Olympics Summer Soccer', 'Summer Olympics Replays', 'Summer Olympics Soccer Replays'], 'location': 'Canada', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.cbc.ca/player/play/video/9.6459530', 'md5': '6c1bb76693ab321a2e99c347a1d5ecbc', 'info_dict': { 'id': '9.6459530', 'ext': 'mp4', 'title': 'Parts of Jasper incinerated as wildfire rages', 'description': 'md5:6f1caa8d128ad3f629257ef5fecf0962', 'series': 'The National', 'media_type': 'Excerpt', 'thumbnail': 'https://i.cbc.ca/ais/507c0086-31a2-494d-96e4-bffb1048d045,1721953984375/full/max/0/default.jpg', 'timestamp': 1721964091.012, 'upload_date': '20240726', 'duration': 952.285, 'chapters': [], 'genres': [], 'tags': 'count:23', 'categories': ['News (FAST)', 'News', 'The National', 'TV News Shows', 'The National '], }, }, { 'url': 'https://www.cbc.ca/player/play/video/9.6420651', 'md5': '71a850c2c6ee5e912de169f5311bb533', 'info_dict': { 'id': '9.6420651', 'ext': 'mp4', 'title': 'Is it a breath of fresh air? Measuring air quality in Edmonton', 'description': 'md5:3922b92cc8b69212d739bd9dd095b1c3', 'series': 'CBC News Edmonton', 'media_type': 'Excerpt', 'thumbnail': 'https://i.cbc.ca/ais/73c4ab9c-7ad4-46ee-bb9b-020fdc01c745,1718214547576/full/max/0/default.jpg', 'timestamp': 1718220065.768, 'upload_date': '20240612', 'duration': 286.086, 'chapters': [], 'genres': ['News'], 'categories': ['News', 'Edmonton'], 'tags': 'count:7', 'location': 'Edmonton', }, }, { 'url': 'cbcplayer:1.7159484', 'only_matching': True, }, { 'url': 'cbcplayer:2164402062', 'only_matching': True, }, { 'url': 'http://www.cbc.ca/player/play/2657631896', 'only_matching': True, }] def _parse_param(self, asset_data, name): return traverse_obj(asset_data, ('params', lambda _, v: v['name'] == name, 'value', {str}, any)) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(f'https://www.cbc.ca/player/play/{video_id}', video_id) data = self._search_json( r'window\.__INITIAL_STATE__\s*=', webpage, 'initial state', video_id)['video']['currentClip'] assets = traverse_obj( data, ('media', 'assets', lambda _, v: url_or_none(v['key']) and v['type'])) if not assets and (media_id := traverse_obj(data, ('mediaId', {str}))): # XXX: Deprecated; CBC is migrating off of ThePlatform return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': smuggle_url( f'http://link.theplatform.com/s/ExhSPC/media/guid/2655402169/{media_id}?mbr=true&formats=MPEG4,FLV,MP3', { 'force_smil_url': True, }), 'id': media_id, '_format_sort_fields': ('res', 'proto'), # Prioritize direct http formats over HLS } is_live = traverse_obj(data, ('media', 'streamType', {str})) == 'Live' formats, subtitles = [], {} for sub in traverse_obj(data, ('media', 'textTracks', lambda _, v: url_or_none(v['src']))): subtitles.setdefault(sub.get('language') or 'und', []).append({ 'url': sub['src'], 'name': sub.get('label'), }) for asset in assets: asset_key = asset['key'] asset_type = asset['type'] if asset_type != 'medianet': self.report_warning(f'Skipping unsupported asset type "{asset_type}": {asset_key}') continue asset_data = self._download_json(asset_key, video_id, f'Downloading {asset_type} JSON') ext = mimetype2ext(self._parse_param(asset_data, 'contentType')) if ext == 'm3u8': fmts, subs = self._extract_m3u8_formats_and_subtitles( asset_data['url'], video_id, 'mp4', m3u8_id='hls', live=is_live) formats.extend(fmts) # Avoid slow/error-prone webvtt-over-m3u8 if direct https vtt is available if not subtitles: self._merge_subtitles(subs, target=subtitles) if is_live or not fmts: continue # Check for direct https mp4 format best_video_fmt = traverse_obj(fmts, ( lambda _, v: v.get('vcodec') != 'none' and v['tbr'], all, {functools.partial(sorted, key=lambda x: x['tbr'])}, -1, {dict})) or {} base_url = self._search_regex( r'(https?://[^?#]+?/)hdntl=', best_video_fmt.get('url'), 'base url', default=None) if not base_url or '/live/' in base_url: continue mp4_url = base_url + replace_extension(url_basename(best_video_fmt['url']), 'mp4') if self._request_webpage( HEADRequest(mp4_url), video_id, 'Checking for https format', errnote=False, fatal=False): formats.append({ **best_video_fmt, 'url': mp4_url, 'format_id': 'https-mp4', 'protocol': 'https', 'manifest_url': None, 'acodec': None, }) else: formats.append({ 'url': asset_data['url'], 'ext': ext, 'vcodec': 'none' if self._parse_param(asset_data, 'mediaType') == 'audio' else None, }) chapters = traverse_obj(data, ( 'media', 'chapters', lambda _, v: float(v['startTime']) is not None, { 'start_time': ('startTime', {float_or_none(scale=1000)}), 'end_time': ('endTime', {float_or_none(scale=1000)}), 'title': ('name', {str}), })) # Filter out pointless single chapters with start_time==0 and no end_time if len(chapters) == 1 and not (chapters[0].get('start_time') or chapters[0].get('end_time')): chapters = [] return { **traverse_obj(data, { 'title': ('title', {str}), 'description': ('description', {str.strip}), 'thumbnail': ('image', 'url', {url_or_none}, {update_url(query=None)}), 'timestamp': ('publishedAt', {float_or_none(scale=1000)}), 'media_type': ('media', 'clipType', {str}), 'series': ('showName', {str}), 'season_number': ('media', 'season', {int_or_none}), 'duration': ('media', 'duration', {float_or_none}, {lambda x: None if is_live else x}), 'location': ('media', 'region', {str}), 'tags': ('tags', ..., 'name', {str}), 'genres': ('media', 'genre', all), 'categories': ('categories', ..., 'name', {str}), }), 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'chapters': chapters, 'is_live': is_live, } class CBCPlayerPlaylistIE(InfoExtractor): IE_NAME = 'cbc.ca:player:playlist' _VALID_URL = r'https?://(?:www\.)?cbc\.ca/(?:player/)(?!play/)(?P<id>[^?#]+)' _TESTS = [{ 'url': 'https://www.cbc.ca/player/news/TV%20Shows/The%20National/Latest%20Broadcast', 'playlist_mincount': 25, 'info_dict': { 'id': 'news/tv shows/the national/latest broadcast', }, }, { 'url': 'https://www.cbc.ca/player/news/Canada/North', 'playlist_mincount': 25, 'info_dict': { 'id': 'news/canada/north', }, }] def _real_extract(self, url): playlist_id = urllib.parse.unquote(self._match_id(url)).lower() webpage = self._download_webpage(url, playlist_id) json_content = self._search_json( r'window\.__INITIAL_STATE__\s*=', webpage, 'initial state', playlist_id) def entries(): for video_id in traverse_obj(json_content, ( 'video', 'clipsByCategory', lambda k, _: k.lower() == playlist_id, 'items', ..., 'id', )): yield self.url_result(f'https://www.cbc.ca/player/play/{video_id}', CBCPlayerIE) return self.playlist_result(entries(), playlist_id) class CBCGemBaseIE(InfoExtractor): _NETRC_MACHINE = 'cbcgem' _GEO_COUNTRIES = ['CA'] def _call_show_api(self, item_id, display_id=None): return self._download_json( f'https://services.radio-canada.ca/ott/catalog/v2/gem/show/{item_id}', display_id or item_id, query={'device': 'web'}) def _extract_item_info(self, item_info): episode_number = None title = traverse_obj(item_info, ('title', {str})) if title and (mobj := re.match(r'(?P<episode>\d+)\. (?P<title>.+)', title)): episode_number = int_or_none(mobj.group('episode')) title = mobj.group('title') return { 'episode_number': episode_number, **traverse_obj(item_info, { 'id': ('url', {str}), 'episode_id': ('url', {str}), 'description': ('description', {str}), 'thumbnail': ('images', 'card', 'url', {url_or_none}, {update_url(query=None)}), 'episode_number': ('episodeNumber', {int_or_none}), 'duration': ('metadata', 'duration', {int_or_none}), 'release_timestamp': ('metadata', 'airDate', {unified_timestamp}), 'timestamp': ('metadata', 'availabilityDate', {unified_timestamp}), 'age_limit': ('metadata', 'rating', {trim_str(start='C')}, {parse_age_limit}), }), 'episode': title, 'title': title, } class CBCGemIE(CBCGemBaseIE): IE_NAME = 'gem.cbc.ca' _VALID_URL = r'https?://gem\.cbc\.ca/(?:media/)?(?P<id>[0-9a-z-]+/s(?P<season>[0-9]+)[a-z][0-9]+)' _TESTS = [{ # This is a normal, public, TV show video 'url': 'https://gem.cbc.ca/media/schitts-creek/s06e01', 'info_dict': { 'id': 'schitts-creek/s06e01', 'ext': 'mp4', 'title': 'Smoke Signals', 'description': 'md5:929868d20021c924020641769eb3e7f1', 'thumbnail': r're:https://images\.radio-canada\.ca/[^#?]+/cbc_schitts_creek_season_06e01_thumbnail_v01\.jpg', 'duration': 1324, 'genres': ['Comédie et humour'], 'series': 'Schitt\'s Creek', 'season': 'Season 6', 'season_number': 6, 'episode': 'Smoke Signals', 'episode_number': 1, 'episode_id': 'schitts-creek/s06e01', 'upload_date': '20210618', 'timestamp': 1623974400, 'release_date': '20200107', 'release_timestamp': 1578355200, 'age_limit': 14, }, 'params': {'format': 'bv'}, }, { # This video requires an account in the browser, but works fine in yt-dlp 'url': 'https://gem.cbc.ca/media/schitts-creek/s01e01', 'info_dict': { 'id': 'schitts-creek/s01e01', 'ext': 'mp4', 'title': 'The Cup Runneth Over', 'description': 'md5:9bca14ea49ab808097530eb05a29e797', 'thumbnail': r're:https://images\.radio-canada\.ca/[^#?]+/cbc_schitts_creek_season_01e01_thumbnail_v01\.jpg', 'series': 'Schitt\'s Creek', 'season_number': 1, 'season': 'Season 1', 'episode_number': 1, 'episode': 'The Cup Runneth Over', 'episode_id': 'schitts-creek/s01e01', 'duration': 1308, 'genres': ['Comédie et humour'], 'upload_date': '20210617', 'timestamp': 1623888000, 'release_date': '20151123', 'release_timestamp': 1448236800, 'age_limit': 14, }, 'params': {'format': 'bv'}, }, { 'url': 'https://gem.cbc.ca/nadiyas-family-favourites/s01e01', 'only_matching': True, }] _CLIENT_ID = 'fc05b0ee-3865-4400-a3cc-3da82c330c23' _refresh_token = None _access_token = None _claims_token = None @functools.cached_property def _ropc_settings(self): return self._download_json( 'https://services.radio-canada.ca/ott/catalog/v1/gem/settings', None, 'Downloading site settings', query={'device': 'web'})['identityManagement']['ropc'] def _is_jwt_expired(self, token): return jwt_decode_hs256(token)['exp'] - time.time() < 300 def _call_oauth_api(self, oauth_data, note='Refreshing access token'): response = self._download_json( self._ropc_settings['url'], None, note, data=urlencode_postdata({ 'client_id': self._CLIENT_ID, **oauth_data, 'scope': self._ropc_settings['scopes'], })) self._refresh_token = response['refresh_token'] self._access_token = response['access_token'] self.cache.store(self._NETRC_MACHINE, 'token_data', [self._refresh_token, self._access_token]) def _perform_login(self, username, password): if not self._refresh_token: self._refresh_token, self._access_token = self.cache.load( self._NETRC_MACHINE, 'token_data', default=[None, None]) if self._refresh_token and self._access_token: self.write_debug('Using cached refresh token') if not self._claims_token: self._claims_token = self.cache.load(self._NETRC_MACHINE, 'claims_token') return try: self._call_oauth_api({ 'grant_type': 'password', 'username': username, 'password': password, }, note='Logging in') except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 400: raise ExtractorError('Invalid username and/or password', expected=True) raise def _fetch_access_token(self): if self._is_jwt_expired(self._access_token): try: self._call_oauth_api({ 'grant_type': 'refresh_token', 'refresh_token': self._refresh_token, }) except ExtractorError: self._refresh_token, self._access_token = None, None self.cache.store(self._NETRC_MACHINE, 'token_data', [None, None]) self.report_warning('Refresh token has been invalidated; retrying with credentials') self._perform_login(*self._get_login_info()) return self._access_token def _fetch_claims_token(self): if not self._get_login_info()[0]: return None if not self._claims_token or self._is_jwt_expired(self._claims_token): self._claims_token = self._download_json( 'https://services.radio-canada.ca/ott/subscription/v2/gem/Subscriber/profile', None, 'Downloading claims token', query={'device': 'web'}, headers={'Authorization': f'Bearer {self._fetch_access_token()}'})['claimsToken'] self.cache.store(self._NETRC_MACHINE, 'claims_token', self._claims_token) else: self.write_debug('Using cached claims token') return self._claims_token def _real_extract(self, url): video_id, season_number = self._match_valid_url(url).group('id', 'season') video_info = self._call_show_api(video_id) item_info = traverse_obj(video_info, ( 'content', ..., 'lineups', ..., 'items', lambda _, v: v['url'] == video_id, any, {require('item info')})) headers = {} if claims_token := self._fetch_claims_token(): headers['x-claims-token'] = claims_token m3u8_info = self._download_json( 'https://services.radio-canada.ca/media/validation/v2/', video_id, headers=headers, query={ 'appCode': 'gem', 'connectionType': 'hd', 'deviceType': 'ipad', 'multibitrate': 'true', 'output': 'json', 'tech': 'hls', 'manifestVersion': '2', 'manifestType': 'desktop', 'idMedia': item_info['idMedia'], }) if m3u8_info.get('errorCode') == 1: self.raise_geo_restricted(countries=['CA']) elif m3u8_info.get('errorCode') == 35: self.raise_login_required(method='password') elif m3u8_info.get('errorCode') != 0: raise ExtractorError(f'{self.IE_NAME} said: {m3u8_info.get("errorCode")} - {m3u8_info.get("message")}') formats = self._extract_m3u8_formats( m3u8_info['url'], video_id, 'mp4', m3u8_id='hls', query={'manifestType': ''}) self._remove_duplicate_formats(formats) for fmt in formats: if fmt.get('vcodec') == 'none': if fmt.get('ext') is None: fmt['ext'] = 'm4a' if fmt.get('acodec') is None: fmt['acodec'] = 'mp4a.40.2' # Put described audio at the beginning of the list, so that it # isn't chosen by default, as most people won't want it. if 'descriptive' in fmt['format_id'].lower(): fmt['preference'] = -2 return { 'season_number': int_or_none(season_number), **traverse_obj(video_info, { 'series': ('title', {str}), 'season_number': ('structuredMetadata', 'partofSeason', 'seasonNumber', {int_or_none}), 'genres': ('structuredMetadata', 'genre', ..., {str}), }), **self._extract_item_info(item_info), 'id': video_id, 'episode_id': video_id, 'formats': formats, } class CBCGemPlaylistIE(CBCGemBaseIE): IE_NAME = 'gem.cbc.ca:playlist' _VALID_URL = r'https?://gem\.cbc\.ca/(?:media/)?(?P<id>(?P<show>[0-9a-z-]+)/s(?P<season>[0-9]+))/?(?:[?#]|$)'
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/corus.py
yt_dlp/extractor/corus.py
from .theplatform import ThePlatformFeedIE from ..utils import ( ExtractorError, dict_get, float_or_none, int_or_none, ) class CorusIE(ThePlatformFeedIE): # XXX: Do not subclass from concrete IE _VALID_URL = r'''(?x) https?:// (?:www\.)? (?P<domain> (?: globaltv| etcanada| seriesplus| wnetwork| ytv )\.com| (?: hgtv| foodnetwork| slice| history| showcase| bigbrothercanada| abcspark| disney(?:channel|lachaine) )\.ca ) /(?:[^/]+/)* (?: video\.html\?.*?\bv=| videos?/(?:[^/]+/)*(?:[a-z0-9-]+-)? ) (?P<id> [\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}| (?:[A-Z]{4})?\d{12,20} ) ''' _TESTS = [{ 'url': 'https://www.hgtv.ca/video/bryan-inc/movie-night-popcorn-with-bryan/870923331648/', 'info_dict': { 'id': '870923331648', 'ext': 'mp4', 'title': 'Movie Night Popcorn with Bryan', 'description': 'Bryan whips up homemade popcorn, the old fashion way for Jojo and Lincoln.', 'upload_date': '20170206', 'timestamp': 1486392197, }, 'params': { 'skip_download': True, }, 'expected_warnings': ['Failed to parse JSON'], # FIXME: yt-dlp wrongly raises for geo restriction }, { 'url': 'http://www.foodnetwork.ca/shows/chopped/video/episode/chocolate-obsession/video.html?v=872683587753', 'only_matching': True, }, { 'url': 'http://etcanada.com/video/873675331955/meet-the-survivor-game-changers-castaways-part-2/', 'only_matching': True, }, { 'url': 'http://www.history.ca/the-world-without-canada/video/full-episodes/natural-resources/video.html?v=955054659646#video', 'only_matching': True, }, { 'url': 'http://www.showcase.ca/eyewitness/video/eyewitness++106/video.html?v=955070531919&p=1&s=da#video', 'only_matching': True, }, { 'url': 'http://www.bigbrothercanada.ca/video/1457812035894/', 'only_matching': True, }, { 'url': 'https://www.bigbrothercanada.ca/video/big-brother-canada-704/1457812035894/', 'only_matching': True, }, { 'url': 'https://www.seriesplus.com/emissions/dre-mary-mort-sur-ordonnance/videos/deux-coeurs-battant/SERP0055626330000200/', 'only_matching': True, }, { 'url': 'https://www.disneychannel.ca/shows/gabby-duran-the-unsittables/video/crybaby-duran-clip/2f557eec-0588-11ea-ae2b-e2c6776b770e/', 'only_matching': True, }] _GEO_BYPASS = False _SITE_MAP = { 'globaltv': 'series', 'etcanada': 'series', 'foodnetwork': 'food', 'bigbrothercanada': 'series', 'disneychannel': 'disneyen', 'disneylachaine': 'disneyfr', } def _real_extract(self, url): domain, video_id = self._match_valid_url(url).groups() site = domain.split('.')[0] path = self._SITE_MAP.get(site, site) if path != 'series': path = 'migration/' + path video = self._download_json( f'https://globalcontent.corusappservices.com/templates/{path}/playlist/', video_id, query={'byId': video_id}, headers={'Accept': 'application/json'})[0] title = video['title'] formats = [] for source in video.get('sources', []): smil_url = source.get('file') if not smil_url: continue source_type = source.get('type') note = 'Downloading{} smil file'.format(' ' + source_type if source_type else '') resp = self._download_webpage( smil_url, video_id, note, fatal=False, headers=self.geo_verification_headers()) if not resp: continue error = self._parse_json(resp, video_id, fatal=False) if error: if error.get('exception') == 'GeoLocationBlocked': self.raise_geo_restricted(countries=['CA']) raise ExtractorError(error['description']) smil = self._parse_xml(resp, video_id, fatal=False) if smil is None: continue namespace = self._parse_smil_namespace(smil) formats.extend(self._parse_smil_formats( smil, smil_url, video_id, namespace)) if not formats and video.get('drm'): self.report_drm(video_id) subtitles = {} for track in video.get('tracks', []): track_url = track.get('file') if not track_url: continue lang = 'fr' if site in ('disneylachaine', 'seriesplus') else 'en' subtitles.setdefault(lang, []).append({'url': track_url}) metadata = video.get('metadata') or {} get_number = lambda x: int_or_none(video.get('pl1$' + x) or metadata.get(x + 'Number')) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': dict_get(video, ('defaultThumbnailUrl', 'thumbnail', 'image')), 'description': video.get('description'), 'timestamp': int_or_none(video.get('availableDate'), 1000), 'subtitles': subtitles, 'duration': float_or_none(metadata.get('duration')), 'series': dict_get(video, ('show', 'pl1$show')), 'season_number': get_number('season'), 'episode_number': get_number('episode'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/likee.py
yt_dlp/extractor/likee.py
import json from .common import InfoExtractor from ..utils import ( int_or_none, js_to_json, parse_iso8601, str_or_none, traverse_obj, ) class LikeeIE(InfoExtractor): IE_NAME = 'likee' _VALID_URL = r'(?x)https?://(www\.)?likee\.video/(?:(?P<channel_name>[^/]+)/video/|v/)(?P<id>\w+)' _TESTS = [{ 'url': 'https://likee.video/@huynh_hong_quan_/video/7093444807096327263', 'info_dict': { 'id': '7093444807096327263', 'ext': 'mp4', 'title': '🤴🤴🤴', 'description': 'md5:9a7ebe816f0e78722ee5ed76f75983b4', 'thumbnail': r're:^https?://.+\.jpg', 'uploader': 'Huỳnh Hồng Qu&acirc;n ', 'artist': 'Huỳnh Hồng Qu&acirc;n ', 'timestamp': 1651571320, 'upload_date': '20220503', 'view_count': int, 'uploader_id': 'huynh_hong_quan_', 'duration': 12374, 'comment_count': int, 'like_count': int, }, }, { 'url': 'https://likee.video/@649222262/video/7093167848050058862', 'info_dict': { 'id': '7093167848050058862', 'ext': 'mp4', 'title': 'likee video #7093167848050058862', 'description': 'md5:3f971c8c6ee8a216f2b1a9094c5de99f', 'thumbnail': r're:^https?://.+\.jpg', 'comment_count': int, 'like_count': int, 'uploader': 'Vương Phước Nhi', 'timestamp': 1651506835, 'upload_date': '20220502', 'duration': 60024, 'artist': 'Vương Phước Nhi', 'uploader_id': '649222262', 'view_count': int, }, }, { 'url': 'https://likee.video/@fernanda_rivasg/video/6932224568407629502', 'info_dict': { 'id': '6932224568407629502', 'ext': 'mp4', 'title': 'Un trend viejito🔥 #LIKEE #Ferlovers #trend ', 'description': 'md5:c42b903a72a99d6d8b73e3d1126fbcef', 'thumbnail': r're:^https?://.+\.jpg', 'comment_count': int, 'duration': 9684, 'uploader_id': 'fernanda_rivasg', 'view_count': int, 'artist': 'La Cami La✨', 'like_count': int, 'uploader': 'Fernanda Rivas🎶', 'timestamp': 1614034308, 'upload_date': '20210222', }, }, { 'url': 'https://likee.video/v/k6QcOp', 'info_dict': { 'id': 'k6QcOp', 'ext': 'mp4', 'title': '#AguaChallenge t&uacute; ya lo intentaste?😱🤩', 'description': 'md5:b0cc462689d4ff2b624daa4dba7640d9', 'thumbnail': r're:^https?://.+\.jpg', 'comment_count': int, 'duration': 18014, 'view_count': int, 'timestamp': 1611694774, 'like_count': int, 'uploader': 'Fernanda Rivas🎶', 'uploader_id': 'fernanda_rivasg', 'artist': 'ʟᴇʀɪᴋ_ᴜɴɪᴄᴏʀɴ♡︎', 'upload_date': '20210126', }, }, { 'url': 'https://www.likee.video/@649222262/video/7093167848050058862', 'only_matching': True, }, { 'url': 'https://www.likee.video/v/k6QcOp', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) info = self._parse_json( self._search_regex(r'window\.data\s=\s({.+?});', webpage, 'video info'), video_id, transform_source=js_to_json) video_url = traverse_obj(info, 'video_url', ('originVideoInfo', 'video_url')) if not video_url: self.raise_no_formats('Video was deleted', expected=True) formats = [{ 'format_id': 'mp4-with-watermark', 'url': video_url, 'height': info.get('video_height'), 'width': info.get('video_width'), }, { 'format_id': 'mp4-without-watermark', 'url': video_url.replace('_4', ''), 'height': info.get('video_height'), 'width': info.get('video_width'), 'quality': 1, }] return { 'id': video_id, 'title': info.get('msgText'), 'description': info.get('share_desc'), 'view_count': int_or_none(info.get('video_count')), 'like_count': int_or_none(info.get('likeCount')), 'comment_count': int_or_none(info.get('comment_count')), 'uploader': str_or_none(info.get('nick_name')), 'uploader_id': str_or_none(info.get('likeeId')), 'artist': str_or_none(traverse_obj(info, ('sound', 'owner_name'))), 'timestamp': parse_iso8601(info.get('uploadDate')), 'thumbnail': info.get('coverUrl'), 'duration': int_or_none(traverse_obj(info, ('option_data', 'dur'))), 'formats': formats, } class LikeeUserIE(InfoExtractor): IE_NAME = 'likee:user' _VALID_URL = r'https?://(www\.)?likee\.video/(?P<id>[^/]+)/?$' _TESTS = [{ 'url': 'https://likee.video/@fernanda_rivasg', 'info_dict': { 'id': '925638334', 'title': 'fernanda_rivasg', }, 'playlist_mincount': 500, }, { 'url': 'https://likee.video/@may_hmoob', 'info_dict': { 'id': '2943949041', 'title': 'may_hmoob', }, 'playlist_mincount': 80, }] _PAGE_SIZE = 50 _API_GET_USER_VIDEO = 'https://api.like-video.com/likee-activity-flow-micro/videoApi/getUserVideo' def _entries(self, user_name, user_id): last_post_id = '' while True: user_videos = self._download_json( self._API_GET_USER_VIDEO, user_name, data=json.dumps({ 'uid': user_id, 'count': self._PAGE_SIZE, 'lastPostId': last_post_id, 'tabType': 0, }).encode(), headers={'content-type': 'application/json'}, note=f'Get user info with lastPostId #{last_post_id}') items = traverse_obj(user_videos, ('data', 'videoList')) if not items: break for item in items: last_post_id = item['postId'] yield self.url_result(f'https://likee.video/{user_name}/video/{last_post_id}') def _real_extract(self, url): user_name = self._match_id(url) webpage = self._download_webpage(url, user_name) info = self._parse_json( self._search_regex(r'window\.data\s*=\s*({.+?});', webpage, 'user info'), user_name, transform_source=js_to_json) user_id = traverse_obj(info, ('userinfo', 'uid')) return self.playlist_result(self._entries(user_name, user_id), user_id, traverse_obj(info, ('userinfo', 'user_name')))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/eporner.py
yt_dlp/extractor/eporner.py
from .common import InfoExtractor from ..utils import ( ExtractorError, encode_base_n, get_elements_by_class, int_or_none, join_nonempty, merge_dicts, parse_duration, str_to_int, url_or_none, ) class EpornerIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?eporner\.com/(?:(?:hd-porn|embed)/|video-)(?P<id>\w+)(?:/(?P<display_id>[\w-]+))?' _TESTS = [{ 'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/', 'md5': '39d486f046212d8e1b911c52ab4691f8', 'info_dict': { 'id': 'qlDUmNsj6VS', 'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video', 'ext': 'mp4', 'title': 'Infamous Tiffany Teen Strip Tease Video', 'description': 'md5:764f39abf932daafa37485eb46efa152', 'timestamp': 1232520922, 'upload_date': '20090121', 'duration': 1838, 'view_count': int, 'age_limit': 18, }, }, { # New (May 2016) URL layout 'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0/Star-Wars-XXX-Parody/', 'only_matching': True, }, { 'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0', 'only_matching': True, }, { 'url': 'http://www.eporner.com/embed/3YRUtzMcWn0', 'only_matching': True, }, { 'url': 'https://www.eporner.com/video-FJsA19J3Y3H/one-of-the-greats/', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') display_id = mobj.group('display_id') or video_id webpage, urlh = self._download_webpage_handle(url, display_id) video_id = self._match_id(urlh.url) vid_hash = self._search_regex( r'hash\s*[:=]\s*["\']([\da-f]{32})', webpage, 'hash') title = self._og_search_title(webpage, default=None) or self._html_search_regex( r'<title>(.+?) - EPORNER', webpage, 'title') # Reverse engineered from vjs.js def calc_hash(s): return ''.join(encode_base_n(int(s[lb:lb + 8], 16), 36) for lb in range(0, 32, 8)) video = self._download_json( f'http://www.eporner.com/xhr/video/{video_id}', display_id, note='Downloading video JSON', query={ 'hash': calc_hash(vid_hash), 'device': 'generic', 'domain': 'www.eporner.com', 'fallback': 'false', }) if video.get('available') is False: raise ExtractorError( '{} said: {}'.format(self.IE_NAME, video['message']), expected=True) sources = video['sources'] formats = [] has_av1 = bool(get_elements_by_class('download-av1', webpage)) for kind, formats_dict in sources.items(): if not isinstance(formats_dict, dict): continue for format_id, format_dict in formats_dict.items(): if not isinstance(format_dict, dict): continue src = url_or_none(format_dict.get('src')) if not src or not src.startswith('http'): continue if kind == 'hls': formats.extend(self._extract_m3u8_formats( src, display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=kind, fatal=False)) else: height = int_or_none(self._search_regex( r'(\d+)[pP]', format_id, 'height', default=None)) fps = int_or_none(self._search_regex( r'(\d+)fps', format_id, 'fps', default=None)) formats.append({ 'url': src, 'format_id': format_id, 'height': height, 'fps': fps, }) if has_av1: formats.append({ 'url': src.replace('.mp4', '-av1.mp4'), 'format_id': join_nonempty('av1', format_id), 'height': height, 'fps': fps, 'vcodec': 'av1', }) json_ld = self._search_json_ld(webpage, display_id, default={}) duration = parse_duration(self._html_search_meta( 'duration', webpage, default=None)) view_count = str_to_int(self._search_regex( r'id=["\']cinemaviews1["\'][^>]*>\s*([0-9,]+)', webpage, 'view count', default=None)) return merge_dicts(json_ld, { 'id': video_id, 'display_id': display_id, 'title': title, 'duration': duration, 'view_count': view_count, 'formats': formats, 'age_limit': 18, })
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/audioboom.py
yt_dlp/extractor/audioboom.py
from .common import InfoExtractor from ..utils import clean_html, float_or_none, traverse_obj, unescapeHTML class AudioBoomIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?audioboom\.com/(?:boos|posts)/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://audioboom.com/posts/7398103-asim-chaudhry', 'md5': '4d68be11c9f9daf3dab0778ad1e010c3', 'info_dict': { 'id': '7398103', 'ext': 'mp3', 'title': 'Asim Chaudhry', 'description': 'md5:0ed714ae0e81e5d9119cac2f618ad679', 'duration': 4000.99, 'uploader': 'Sue Perkins: An hour or so with...', 'uploader_url': r're:https?://(?:www\.)?audioboom\.com/channel/perkins', }, }, { # Direct mp3-file link 'url': 'https://audioboom.com/posts/8128496.mp3', 'md5': 'e329edf304d450def95c7f86a9165ee1', 'info_dict': { 'id': '8128496', 'ext': 'mp3', 'title': 'TCRNo8 / DAILY 03 - In Control', 'description': 'md5:44665f142db74858dfa21c5b34787948', 'duration': 1689.7, 'uploader': 'Lost Dot Podcast: The Trans Pyrenees and Transcontinental Race', 'uploader_url': r're:https?://(?:www\.)?audioboom\.com/channels/5003904', }, }, { 'url': 'https://audioboom.com/posts/4279833-3-09-2016-czaban-hour-3?t=0', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(f'https://audioboom.com/posts/{video_id}', video_id) clip_store = self._search_json( r'data-react-class="V5DetailPagePlayer"\s*data-react-props=["\']', webpage, 'clip store', video_id, fatal=False, transform_source=unescapeHTML) clip = traverse_obj(clip_store, ('clips', 0), expected_type=dict) or {} return { 'id': video_id, 'url': clip.get('clipURLPriorToLoading') or self._og_search_property('audio', webpage, 'audio url'), 'title': clip.get('title') or self._html_search_meta(['og:title', 'og:audio:title', 'audio_title'], webpage), 'description': (clip.get('description') or clean_html(clip.get('formattedDescription')) or self._og_search_description(webpage)), 'duration': float_or_none(clip.get('duration') or self._html_search_meta('weibo:audio:duration', webpage)), 'uploader': clip.get('author') or self._html_search_meta( ['og:audio:artist', 'twitter:audio:artist_name', 'audio_artist'], webpage, 'uploader'), 'uploader_url': clip.get('author_url') or self._html_search_regex( r'<div class="avatar flex-shrink-0">\s*<a href="(?P<uploader_url>http[^"]+)"', webpage, 'uploader url', fatal=False), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ebaumsworld.py
yt_dlp/extractor/ebaumsworld.py
from .common import InfoExtractor class EbaumsWorldIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ebaumsworld\.com/videos/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'http://www.ebaumsworld.com/videos/a-giant-python-opens-the-door/83367677/', 'info_dict': { 'id': '83367677', 'ext': 'mp4', 'title': 'A Giant Python Opens The Door', 'description': 'This is how nightmares start...', 'uploader': 'jihadpizza', }, } def _real_extract(self, url): video_id = self._match_id(url) config = self._download_xml( f'http://www.ebaumsworld.com/video/player/{video_id}', video_id) video_url = config.find('file').text return { 'id': video_id, 'title': config.find('title').text, 'url': video_url, 'description': config.find('description').text, 'thumbnail': config.find('image').text, 'uploader': config.find('username').text, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/weverse.py
yt_dlp/extractor/weverse.py
import base64 import functools import hashlib import hmac import itertools import json import re import time import urllib.parse import uuid from .common import InfoExtractor from .naver import NaverBaseIE from .youtube import YoutubeIE from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, UserNotLive, float_or_none, int_or_none, join_nonempty, jwt_decode_hs256, str_or_none, try_call, update_url_query, url_or_none, ) from ..utils.traversal import require, traverse_obj class WeverseBaseIE(InfoExtractor): _NETRC_MACHINE = 'weverse' _ACCOUNT_API_BASE = 'https://accountapi.weverse.io' _CLIENT_PLATFORM = 'WEB' _SIGNING_KEY = b'1b9cb6378d959b45714bec49971ade22e6e24e42' _ACCESS_TOKEN_KEY = 'we2_access_token' _REFRESH_TOKEN_KEY = 'we2_refresh_token' _DEVICE_ID_KEY = 'we2_device_id' _API_HEADERS = { 'Accept': 'application/json', 'Origin': 'https://weverse.io', 'Referer': 'https://weverse.io/', } _LOGIN_HINT_TMPL = ( 'You can log in using your refresh token with --username "{}" --password "REFRESH_TOKEN" ' '(replace REFRESH_TOKEN with the actual value of the "{}" cookie found in your web browser). ' 'You can add an optional username suffix, e.g. --username "{}" , ' 'if you need to manage multiple accounts. ') _LOGIN_ERRORS_MAP = { 'login_required': 'This content is only available for logged-in users. ', 'invalid_username': '"{}" is not valid login username for this extractor. ', 'invalid_password': ( 'Your password is not a valid refresh token. Make sure that ' 'you are passing the refresh token, and NOT the access token. '), 'no_refresh_token': ( 'Your access token has expired and there is no refresh token available. ' 'Refresh your session/cookies in the web browser and try again. '), 'expired_refresh_token': ( 'Your refresh token has expired. Log in to the site again using ' 'your web browser to get a new refresh token or export fresh cookies. '), } _OAUTH_PREFIX = 'oauth' _oauth_tokens = {} _device_id = None @property def _oauth_headers(self): return { **self._API_HEADERS, 'X-ACC-APP-SECRET': '5419526f1c624b38b10787e5c10b2a7a', 'X-ACC-SERVICE-ID': 'weverse', 'X-ACC-TRACE-ID': str(uuid.uuid4()), } @functools.cached_property def _oauth_cache_key(self): username = self._get_login_info()[0] if not username: return 'cookies' return join_nonempty(self._OAUTH_PREFIX, username.partition('+')[2]) @property def _is_logged_in(self): return bool(self._oauth_tokens.get(self._ACCESS_TOKEN_KEY)) def _access_token_is_valid(self): response = self._download_json( f'{self._ACCOUNT_API_BASE}/api/v1/token/validate', None, 'Validating access token', 'Unable to valid access token', expected_status=401, headers={ **self._oauth_headers, 'Authorization': f'Bearer {self._oauth_tokens[self._ACCESS_TOKEN_KEY]}', }) return traverse_obj(response, ('expiresIn', {int}), default=0) > 60 def _token_is_expired(self, key): is_expired = jwt_decode_hs256(self._oauth_tokens[key])['exp'] - time.time() < 3600 if key == self._REFRESH_TOKEN_KEY or not is_expired: return is_expired return not self._access_token_is_valid() def _refresh_access_token(self): if not self._oauth_tokens.get(self._REFRESH_TOKEN_KEY): self._report_login_error('no_refresh_token') if self._token_is_expired(self._REFRESH_TOKEN_KEY): self._report_login_error('expired_refresh_token') headers = {'Content-Type': 'application/json'} if self._is_logged_in: headers['Authorization'] = f'Bearer {self._oauth_tokens[self._ACCESS_TOKEN_KEY]}' try: response = self._download_json( f'{self._ACCOUNT_API_BASE}/api/v1/token/refresh', None, 'Refreshing access token', 'Unable to refresh access token', headers={**self._oauth_headers, **headers}, data=json.dumps({ 'refreshToken': self._oauth_tokens[self._REFRESH_TOKEN_KEY], }, separators=(',', ':')).encode()) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 401: self._oauth_tokens.clear() if self._oauth_cache_key == 'cookies': self.cookiejar.clear(domain='.weverse.io', path='/', name=self._ACCESS_TOKEN_KEY) self.cookiejar.clear(domain='.weverse.io', path='/', name=self._REFRESH_TOKEN_KEY) else: self.cache.store(self._NETRC_MACHINE, self._oauth_cache_key, self._oauth_tokens) self._report_login_error('expired_refresh_token') raise self._oauth_tokens.update(traverse_obj(response, { self._ACCESS_TOKEN_KEY: ('accessToken', {str}, {require('access token')}), self._REFRESH_TOKEN_KEY: ('refreshToken', {str}, {require('refresh token')}), })) if self._oauth_cache_key == 'cookies': self._set_cookie('.weverse.io', self._ACCESS_TOKEN_KEY, self._oauth_tokens[self._ACCESS_TOKEN_KEY]) self._set_cookie('.weverse.io', self._REFRESH_TOKEN_KEY, self._oauth_tokens[self._REFRESH_TOKEN_KEY]) else: self.cache.store(self._NETRC_MACHINE, self._oauth_cache_key, self._oauth_tokens) def _get_authorization_header(self): if not self._is_logged_in: return {} if self._token_is_expired(self._ACCESS_TOKEN_KEY): self._refresh_access_token() return {'Authorization': f'Bearer {self._oauth_tokens[self._ACCESS_TOKEN_KEY]}'} def _report_login_error(self, error_id): error_msg = self._LOGIN_ERRORS_MAP[error_id] username = self._get_login_info()[0] if error_id == 'invalid_username': error_msg = error_msg.format(username) username = f'{self._OAUTH_PREFIX}+{username}' elif not username: username = f'{self._OAUTH_PREFIX}+USERNAME' raise ExtractorError(join_nonempty( error_msg, self._LOGIN_HINT_TMPL.format(self._OAUTH_PREFIX, self._REFRESH_TOKEN_KEY, username), 'Or else you can u', self._login_hint(method='session_cookies')[1:], delim=''), expected=True) def _perform_login(self, username, password): if self._is_logged_in: return if username.partition('+')[0] != self._OAUTH_PREFIX: self._report_login_error('invalid_username') self._oauth_tokens.update(self.cache.load(self._NETRC_MACHINE, self._oauth_cache_key, default={})) if self._is_logged_in and self._access_token_is_valid(): return rt_key = self._REFRESH_TOKEN_KEY if not self._oauth_tokens.get(rt_key) or self._token_is_expired(rt_key): if try_call(lambda: jwt_decode_hs256(password)['scope']) != 'refresh': self._report_login_error('invalid_password') self._oauth_tokens[rt_key] = password self._refresh_access_token() def _real_initialize(self): cookies = self._get_cookies('https://weverse.io/') if not self._device_id: self._device_id = traverse_obj(cookies, (self._DEVICE_ID_KEY, 'value')) or str(uuid.uuid4()) if self._is_logged_in: return self._oauth_tokens.update(traverse_obj(cookies, { self._ACCESS_TOKEN_KEY: (self._ACCESS_TOKEN_KEY, 'value'), self._REFRESH_TOKEN_KEY: (self._REFRESH_TOKEN_KEY, 'value'), })) if self._is_logged_in and not self._access_token_is_valid(): self._refresh_access_token() def _call_api(self, ep, video_id, data=None, note='Downloading API JSON'): # Ref: https://ssl.pstatic.net/static/wevweb/2_3_2_11101725/public/static/js/2488.a09b41ff.chunk.js # From https://ssl.pstatic.net/static/wevweb/2_3_2_11101725/public/static/js/main.e206f7c1.js: api_path = update_url_query(ep, { # 'gcc': 'US', 'appId': 'be4d79eb8fc7bd008ee82c8ec4ff6fd4', 'language': 'en', 'os': self._CLIENT_PLATFORM, 'platform': self._CLIENT_PLATFORM, 'wpf': 'pc', }) for is_retry in (False, True): wmsgpad = int(time.time() * 1000) wmd = base64.b64encode(hmac.HMAC( self._SIGNING_KEY, f'{api_path[:255]}{wmsgpad}'.encode(), digestmod=hashlib.sha1).digest()).decode() try: return self._download_json( f'https://global.apis.naver.com/weverse/wevweb{api_path}', video_id, note=note, data=data, headers={ **self._API_HEADERS, **self._get_authorization_header(), **({'Content-Type': 'application/json'} if data else {}), 'WEV-device-Id': self._device_id, }, query={ 'wmsgpad': wmsgpad, 'wmd': wmd, }) except ExtractorError as e: if is_retry or not isinstance(e.cause, HTTPError): raise elif self._is_logged_in and e.cause.status == 401: self._refresh_access_token() continue elif e.cause.status == 403: if self._is_logged_in: raise ExtractorError( 'Your account does not have access to this content', expected=True) self._report_login_error('login_required') raise def _call_post_api(self, video_id): path = '' if self._is_logged_in else '/preview' return self._call_api(f'/post/v1.0/post-{video_id}{path}?fieldSet=postV1', video_id) def _get_community_id(self, channel): return str(self._call_api( f'/community/v1.0/communityIdUrlPathByUrlPathArtistCode?keyword={channel}', channel, note='Fetching community ID')['communityId']) def _get_formats(self, data, video_id): formats = traverse_obj(data, ('videos', 'list', lambda _, v: url_or_none(v['source']), { 'url': 'source', 'width': ('encodingOption', 'width', {int_or_none}), 'height': ('encodingOption', 'height', {int_or_none}), 'vcodec': 'type', 'vbr': ('bitrate', 'video', {int_or_none}), 'abr': ('bitrate', 'audio', {int_or_none}), 'filesize': ('size', {int_or_none}), 'format_id': ('encodingOption', 'id', {str_or_none}), })) for stream in traverse_obj(data, ('streams', lambda _, v: v['type'] == 'HLS' and url_or_none(v['source']))): query = {} for param in traverse_obj(stream, ('keys', lambda _, v: v['type'] == 'param' and v['name'])): query[param['name']] = param.get('value', '') fmts = self._extract_m3u8_formats( stream['source'], video_id, 'mp4', m3u8_id='hls', fatal=False, query=query) if query: for fmt in fmts: fmt['url'] = update_url_query(fmt['url'], query) fmt['extra_param_to_segment_url'] = urllib.parse.urlencode(query) formats.extend(fmts) return formats def _get_subs(self, caption_url): subs_ext_re = r'\.(?:ttml|vtt)' replace_ext = lambda x, y: re.sub(subs_ext_re, y, x) if re.search(subs_ext_re, caption_url): return [replace_ext(caption_url, '.ttml'), replace_ext(caption_url, '.vtt')] return [caption_url] def _parse_post_meta(self, metadata): return traverse_obj(metadata, { 'title': ((('extension', 'mediaInfo', 'title'), 'title'), {str}), 'description': ((('extension', 'mediaInfo', 'body'), 'body'), {str}), 'uploader': ('author', 'profileName', {str}), 'uploader_id': ('author', 'memberId', {str}), 'creators': ('community', 'communityName', {str}, all), 'channel_id': (('community', 'author'), 'communityId', {str_or_none}), 'duration': ('extension', 'video', 'playTime', {float_or_none}), 'timestamp': ('publishedAt', {int_or_none(scale=1000)}), 'release_timestamp': ('extension', 'video', 'onAirStartAt', {int_or_none(scale=1000)}), 'thumbnail': ('extension', (('mediaInfo', 'thumbnail', 'url'), ('video', 'thumb')), {url_or_none}), 'view_count': ('extension', 'video', 'playCount', {int_or_none}), 'like_count': ('extension', 'video', 'likeCount', {int_or_none}), 'comment_count': ('commentCount', {int_or_none}), }, get_all=False) def _extract_availability(self, data): return self._availability(**traverse_obj(data, ((('extension', 'video'), None), { 'needs_premium': 'paid', 'needs_subscription': 'membershipOnly', }), get_all=False, expected_type=bool), needs_auth=True) def _extract_live_status(self, data): data = traverse_obj(data, ('extension', 'video', {dict})) or {} if data.get('type') == 'LIVE': return traverse_obj({ 'ONAIR': 'is_live', 'DONE': 'post_live', 'STANDBY': 'is_upcoming', 'DELAY': 'is_upcoming', }, (data.get('status'), {str})) or 'is_live' return 'was_live' if data.get('liveToVod') else 'not_live' class WeverseIE(WeverseBaseIE): _VALID_URL = r'https?://(?:www\.|m\.)?weverse\.io/(?P<artist>[^/?#]+)/live/(?P<id>[\d-]+)' _TESTS = [{ 'url': 'https://weverse.io/billlie/live/0-107323480', 'md5': '1fa849f00181eef9100d3c8254c47979', 'info_dict': { 'id': '0-107323480', 'ext': 'mp4', 'title': '행복한 평이루💜', 'description': '', 'uploader': 'Billlie', 'uploader_id': '5ae14aed7b7cdc65fa87c41fe06cc936', 'channel': 'billlie', 'channel_id': '72', 'channel_url': 'https://weverse.io/billlie', 'creators': ['Billlie'], 'timestamp': 1666262062, 'upload_date': '20221020', 'release_timestamp': 1666262058, 'release_date': '20221020', 'duration': 3102, 'thumbnail': r're:^https?://.*\.jpe?g$', 'view_count': int, 'like_count': int, 'comment_count': int, 'availability': 'needs_auth', 'live_status': 'was_live', }, }, { 'url': 'https://weverse.io/lesserafim/live/2-102331763', 'md5': 'e46125c08b13a6c8c1f4565035cca987', 'info_dict': { 'id': '2-102331763', 'ext': 'mp4', 'title': '🎂김채원 생신🎂', 'description': '🎂김채원 생신🎂', 'uploader': 'LE SSERAFIM ', 'uploader_id': 'd26ddc1e258488a0a2b795218d14d59d', 'channel': 'lesserafim', 'channel_id': '47', 'channel_url': 'https://weverse.io/lesserafim', 'creators': ['LE SSERAFIM'], 'timestamp': 1659353400, 'upload_date': '20220801', 'release_timestamp': 1659353400, 'release_date': '20220801', 'duration': 3006, 'thumbnail': r're:^https?://.*\.jpe?g$', 'view_count': int, 'like_count': int, 'comment_count': int, 'availability': 'needs_auth', 'live_status': 'was_live', 'subtitles': { 'id_ID': 'count:2', 'en_US': 'count:2', 'es_ES': 'count:2', 'vi_VN': 'count:2', 'th_TH': 'count:2', 'zh_CN': 'count:2', 'zh_TW': 'count:2', 'ja_JP': 'count:2', 'ko_KR': 'count:2', }, }, }, { 'url': 'https://weverse.io/treasure/live/2-117230416', 'info_dict': { 'id': '2-117230416', 'ext': 'mp4', 'title': r're:스껄도려님 첫 스무살 생파🦋', 'description': '', 'uploader': 'TREASURE', 'uploader_id': '77eabbc449ca37f7970054a136f60082', 'channel': 'treasure', 'channel_id': '20', 'channel_url': 'https://weverse.io/treasure', 'creator': 'TREASURE', 'timestamp': 1680667651, 'upload_date': '20230405', 'release_timestamp': 1680667639, 'release_date': '20230405', 'thumbnail': r're:^https?://.*\.jpe?g$', 'view_count': int, 'like_count': int, 'comment_count': int, 'availability': 'needs_auth', 'live_status': 'is_live', }, 'skip': 'Livestream has ended', }] def _real_extract(self, url): channel, video_id = self._match_valid_url(url).group('artist', 'id') post = self._call_post_api(video_id) api_video_id = post['extension']['video']['videoId'] availability = self._extract_availability(post) live_status = self._extract_live_status(post) video_info, formats = {}, [] if live_status == 'is_upcoming': self.raise_no_formats('Livestream has not yet started', expected=True) elif live_status == 'is_live': video_info = self._call_api( f'/video/v1.3/lives/{api_video_id}/playInfo?preview.format=json&preview.version=v2', video_id, note='Downloading live JSON') playback = self._parse_json(video_info['lipPlayback'], video_id) m3u8_url = traverse_obj(playback, ( 'media', lambda _, v: v['protocol'] == 'HLS', 'path', {url_or_none}), get_all=False) # Live subtitles are not downloadable, but extract to silence "ignoring subs" warning formats, _ = self._extract_m3u8_formats_and_subtitles( m3u8_url, video_id, 'mp4', m3u8_id='hls', live=True) elif live_status == 'post_live': if availability in ('premium_only', 'subscriber_only'): self.report_drm(video_id) self.raise_no_formats( 'Livestream has ended and downloadable VOD is not available', expected=True) else: infra_video_id = post['extension']['video']['infraVideoId'] in_key = self._call_api( f'/video/v1.1/vod/{api_video_id}/inKey?preview=false', video_id, data=b'{}', note='Downloading VOD API key')['inKey'] video_info = self._download_json( f'https://global.apis.naver.com/rmcnmv/rmcnmv/vod/play/v2.0/{infra_video_id}', video_id, note='Downloading VOD JSON', query={ 'key': in_key, 'sid': traverse_obj(post, ('extension', 'video', 'serviceId')) or '2070', 'pid': str(uuid.uuid4()), 'nonce': int(time.time() * 1000), 'devt': 'html5_pc', 'prv': 'Y' if post.get('membershipOnly') else 'N', 'aup': 'N', 'stpb': 'N', 'cpl': 'en', 'env': 'prod', 'lc': 'en', 'adi': '[{"adSystem":"null"}]', 'adu': '/', }) formats = self._get_formats(video_info, video_id) has_drm = traverse_obj(video_info, ('meta', 'provider', 'name', {str.lower})) == 'drm' if has_drm and formats: self.report_warning( 'Requested content is DRM-protected, only a 30-second preview is available', video_id) elif has_drm and not formats: self.report_drm(video_id) return { 'id': video_id, 'channel': channel, 'channel_url': f'https://weverse.io/{channel}', 'formats': formats, 'availability': availability, 'live_status': live_status, **self._parse_post_meta(post), **NaverBaseIE.process_subtitles(video_info, self._get_subs), } class WeverseMediaIE(WeverseBaseIE): _VALID_URL = r'https?://(?:www\.|m\.)?weverse\.io/(?P<artist>[^/?#]+)/media/(?P<id>[\d-]+)' _TESTS = [{ 'url': 'https://weverse.io/billlie/media/4-116372884', 'info_dict': { 'id': 'e-C9wLSQs6o', 'ext': 'mp4', 'title': 'Billlie | \'EUNOIA\' Performance Video (heartbeat ver.)', 'description': 'md5:6181caaf2a2397bca913ffe368c104e5', 'channel': 'Billlie', 'channel_id': 'UCyc9sUCxELTDK9vELO5Fzeg', 'channel_url': 'https://www.youtube.com/channel/UCyc9sUCxELTDK9vELO5Fzeg', 'uploader': 'Billlie', 'uploader_id': '@Billlie', 'uploader_url': 'https://www.youtube.com/@Billlie', 'upload_date': '20230403', 'timestamp': 1680533992, 'duration': 211, 'age_limit': 0, 'playable_in_embed': True, 'live_status': 'not_live', 'availability': 'public', 'view_count': int, 'comment_count': int, 'like_count': int, 'channel_follower_count': int, 'thumbnail': 'https://i.ytimg.com/vi/e-C9wLSQs6o/maxresdefault.jpg', 'categories': ['Entertainment'], 'tags': 'count:7', 'channel_is_verified': True, 'heatmap': 'count:100', }, }, { 'url': 'https://weverse.io/billlie/media/3-102914520', 'md5': '031551fcbd716bc4f080cb6174a43d8a', 'info_dict': { 'id': '3-102914520', 'ext': 'mp4', 'title': 'From. SUHYEON🌸', 'description': 'Billlie 멤버별 독점 영상 공개💙💜', 'uploader': 'Billlie_official', 'uploader_id': 'f569c6e92f7eaffef0a395037dcaa54f', 'channel': 'billlie', 'channel_id': '72', 'channel_url': 'https://weverse.io/billlie', 'creators': ['Billlie'], 'timestamp': 1662174000, 'upload_date': '20220903', 'release_timestamp': 1662174000, 'release_date': '20220903', 'duration': 17.0, 'thumbnail': r're:^https?://.*\.jpe?g$', 'view_count': int, 'like_count': int, 'comment_count': int, 'availability': 'needs_auth', 'live_status': 'not_live', }, }] def _real_extract(self, url): channel, video_id = self._match_valid_url(url).group('artist', 'id') post = self._call_post_api(video_id) media_type = traverse_obj(post, ('extension', 'mediaInfo', 'mediaType', {str.lower})) youtube_id = traverse_obj(post, ('extension', 'youtube', 'youtubeVideoId', {str})) if media_type == 'vod': return self.url_result(f'https://weverse.io/{channel}/live/{video_id}', WeverseIE) elif media_type == 'youtube' and youtube_id: return self.url_result(youtube_id, YoutubeIE) elif media_type == 'image': self.raise_no_formats('No video content found in webpage', expected=True) elif media_type: raise ExtractorError(f'Unsupported media type "{media_type}"') self.raise_no_formats('No video content found in webpage') class WeverseMomentIE(WeverseBaseIE): _VALID_URL = r'https?://(?:www\.|m\.)?weverse\.io/(?P<artist>[^/?#]+)/moment/(?P<uid>[\da-f]+)/post/(?P<id>[\d-]+)' _TESTS = [{ 'url': 'https://weverse.io/secretnumber/moment/66a07e164b56a696ee71c99315ffe27b/post/1-117229444', 'md5': '87733ac19a54081b7dfc2442036d282b', 'info_dict': { 'id': '1-117229444', 'ext': 'mp4', 'title': '今日もめっちゃいい天気☀️🌤️', 'uploader': '레아', 'uploader_id': '66a07e164b56a696ee71c99315ffe27b', 'channel': 'secretnumber', 'channel_id': '56', 'creators': ['SECRET NUMBER'], 'duration': 10, 'upload_date': '20230405', 'timestamp': 1680653968, 'thumbnail': r're:^https?://.*\.jpe?g$', 'like_count': int, 'comment_count': int, 'availability': 'needs_auth', }, }] def _real_extract(self, url): channel, uploader_id, video_id = self._match_valid_url(url).group('artist', 'uid', 'id') post = self._call_post_api(video_id) api_video_id = post['extension']['moment']['video']['videoId'] video_info = self._call_api( f'/cvideo/v1.0/cvideo-{api_video_id}/playInfo?videoId={api_video_id}', video_id, note='Downloading moment JSON')['playInfo'] return { 'id': video_id, 'channel': channel, 'uploader_id': uploader_id, 'formats': self._get_formats(video_info, video_id), 'availability': self._extract_availability(post), **traverse_obj(post, { 'title': ((('extension', 'moment', 'body'), 'body'), {str}), 'uploader': ('author', 'profileName', {str}), 'creator': (('community', 'author'), 'communityName', {str}), 'channel_id': (('community', 'author'), 'communityId', {str_or_none}), 'duration': ('extension', 'moment', 'video', 'uploadInfo', 'playTime', {float_or_none}), 'timestamp': ('publishedAt', {int_or_none(scale=1000)}), 'thumbnail': ('extension', 'moment', 'video', 'uploadInfo', 'imageUrl', {url_or_none}), 'like_count': ('emotionCount', {int_or_none}), 'comment_count': ('commentCount', {int_or_none}), }, get_all=False), **NaverBaseIE.process_subtitles(video_info, self._get_subs), } class WeverseTabBaseIE(WeverseBaseIE): _ENDPOINT = None _PATH = None _QUERY = {} _RESULT_IE = None def _entries(self, channel_id, channel, first_page): query = self._QUERY.copy() for page in itertools.count(1): posts = first_page if page == 1 else self._call_api( update_url_query(self._ENDPOINT % channel_id, query), channel, note=f'Downloading {self._PATH} tab page {page}') for post in traverse_obj(posts, ('data', lambda _, v: v['postId'])): yield self.url_result( f'https://weverse.io/{channel}/{self._PATH}/{post["postId"]}', self._RESULT_IE, post['postId'], **self._parse_post_meta(post), channel=channel, channel_url=f'https://weverse.io/{channel}', availability=self._extract_availability(post), live_status=self._extract_live_status(post)) query['after'] = traverse_obj(posts, ('paging', 'nextParams', 'after', {str})) if not query['after']: break def _real_extract(self, url): channel = self._match_id(url) channel_id = self._get_community_id(channel) first_page = self._call_api( update_url_query(self._ENDPOINT % channel_id, self._QUERY), channel, note=f'Downloading {self._PATH} tab page 1') return self.playlist_result( self._entries(channel_id, channel, first_page), f'{channel}-{self._PATH}', **traverse_obj(first_page, ('data', ..., { 'playlist_title': ('community', 'communityName', {str}), 'thumbnail': ('author', 'profileImageUrl', {url_or_none}), }), get_all=False)) class WeverseLiveTabIE(WeverseTabBaseIE): _VALID_URL = r'https?://(?:www\.|m\.)?weverse\.io/(?P<id>[^/?#]+)/live/?(?:[?#]|$)' _TESTS = [{ 'url': 'https://weverse.io/billlie/live/', 'playlist_mincount': 55, 'info_dict': { 'id': 'billlie-live', 'title': 'Billlie', 'thumbnail': r're:^https?://.*\.jpe?g$', }, }] _ENDPOINT = '/post/v1.0/community-%s/liveTabPosts' _PATH = 'live' _QUERY = {'fieldSet': 'postsV1'} _RESULT_IE = WeverseIE class WeverseMediaTabIE(WeverseTabBaseIE): _VALID_URL = r'https?://(?:www\.|m\.)?weverse\.io/(?P<id>[^/?#]+)/media(?:/|/all|/new)?(?:[?#]|$)' _TESTS = [{ 'url': 'https://weverse.io/billlie/media/', 'playlist_mincount': 231, 'info_dict': { 'id': 'billlie-media', 'title': 'Billlie', 'thumbnail': r're:^https?://.*\.jpe?g$', }, }, { 'url': 'https://weverse.io/lesserafim/media/all', 'only_matching': True, }, { 'url': 'https://weverse.io/lesserafim/media/new', 'only_matching': True, }] _ENDPOINT = '/media/v1.0/community-%s/more' _PATH = 'media' _QUERY = {'fieldSet': 'postsV1', 'filterType': 'RECENT'} _RESULT_IE = WeverseMediaIE class WeverseLiveIE(WeverseBaseIE): _VALID_URL = r'https?://(?:www\.|m\.)?weverse\.io/(?P<id>[^/?#]+)/?(?:[?#]|$)' _TESTS = [{ 'url': 'https://weverse.io/purplekiss', 'info_dict': { 'id': '3-116560493', 'ext': 'mp4', 'title': r're:모하냥🫶🏻', 'description': '내일은 금요일~><', 'uploader': '채인', 'uploader_id': '1ffb1d9d904d6b3db2783f876eb9229d', 'channel': 'purplekiss', 'channel_id': '35', 'channel_url': 'https://weverse.io/purplekiss', 'creators': ['PURPLE KISS'], 'timestamp': 1680780892, 'upload_date': '20230406', 'release_timestamp': 1680780883, 'release_date': '20230406', 'thumbnail': 'https://weverse-live.pstatic.net/v1.0/live/62044/thumb', 'view_count': int, 'like_count': int, 'comment_count': int, 'availability': 'needs_auth', 'live_status': 'is_live', }, 'skip': 'Livestream has ended', }, { 'url': 'https://weverse.io/lesserafim', 'info_dict': { 'id': '4-181521628', 'ext': 'mp4', 'title': r're:심심해서요', 'description': '', 'uploader': '채채🤎', 'uploader_id': 'd49b8b06f3cc1d92d655b25ab27ac2e7', 'channel': 'lesserafim', 'channel_id': '47', 'creators': ['LE SSERAFIM'], 'channel_url': 'https://weverse.io/lesserafim', 'timestamp': 1728570273, 'upload_date': '20241010', 'release_timestamp': 1728570264, 'release_date': '20241010', 'thumbnail': r're:https://phinf\.wevpstatic\.net/.+\.png', 'view_count': int, 'like_count': int, 'comment_count': int, 'availability': 'needs_auth', 'live_status': 'is_live', }, 'skip': 'Livestream has ended', }, { 'url': 'https://weverse.io/billlie/', 'only_matching': True, }] def _real_extract(self, url): channel = self._match_id(url) channel_id = self._get_community_id(channel) video_id = traverse_obj( self._call_api(update_url_query(f'/post/v1.0/community-{channel_id}/liveTab', { 'debugMessage': 'true', 'fields': 'onAirLivePosts.fieldSet(postsV1).limit(10),reservedLivePosts.fieldSet(postsV1).limit(10)', }), channel, note='Downloading live JSON'), ( ('onAirLivePosts', 'reservedLivePosts'), 'data', lambda _, v: self._extract_live_status(v) in ('is_live', 'is_upcoming'), 'postId', {str}), get_all=False) if not video_id: raise UserNotLive(video_id=channel) return self.url_result(f'https://weverse.io/{channel}/live/{video_id}', WeverseIE)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/drooble.py
yt_dlp/extractor/drooble.py
import json from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, try_get, ) class DroobleIE(InfoExtractor): _VALID_URL = r'''(?x)https?://drooble\.com/(?: (?:(?P<user>[^/]+)/)?(?P<kind>song|videos|music/albums)/(?P<id>\d+)| (?P<user_2>[^/]+)/(?P<kind_2>videos|music)) ''' _TESTS = [{ 'url': 'https://drooble.com/song/2858030', 'md5': '5ffda90f61c7c318dc0c3df4179eb064', 'info_dict': { 'id': '2858030', 'ext': 'mp3', 'title': 'Skankocillin', 'upload_date': '20200801', 'timestamp': 1596241390, 'uploader_id': '95894', 'uploader': 'Bluebeat Shelter', }, }, { 'url': 'https://drooble.com/karl340758/videos/2859183', 'info_dict': { 'id': 'J6QCQY_I5Tk', 'ext': 'mp4', 'title': 'Skankocillin', 'uploader_id': 'UCrSRoI5vVyeYihtWEYua7rg', 'description': 'md5:ffc0bd8ba383db5341a86a6cd7d9bcca', 'upload_date': '20200731', 'uploader': 'Bluebeat Shelter', }, }, { 'url': 'https://drooble.com/karl340758/music/albums/2858031', 'info_dict': { 'id': '2858031', }, 'playlist_mincount': 8, }, { 'url': 'https://drooble.com/karl340758/music', 'info_dict': { 'id': 'karl340758', }, 'playlist_mincount': 8, }, { 'url': 'https://drooble.com/karl340758/videos', 'info_dict': { 'id': 'karl340758', }, 'playlist_mincount': 8, }] def _call_api(self, method, video_id, data=None): response = self._download_json( f'https://drooble.com/api/dt/{method}', video_id, data=json.dumps(data).encode()) if not response[0]: raise ExtractorError('Unable to download JSON metadata') return response[1] def _real_extract(self, url): mobj = self._match_valid_url(url) user = mobj.group('user') or mobj.group('user_2') kind = mobj.group('kind') or mobj.group('kind_2') display_id = mobj.group('id') or user if mobj.group('kind_2') == 'videos': data = {'from_user': display_id, 'album': -1, 'limit': 18, 'offset': 0, 'order': 'new2old', 'type': 'video'} elif kind in ('music/albums', 'music'): data = {'user': user, 'public_only': True, 'individual_limit': {'singles': 1, 'albums': 1, 'playlists': 1}} else: data = {'url_slug': display_id, 'children': 10, 'order': 'old2new'} method = 'getMusicOverview' if kind in ('music/albums', 'music') else 'getElements' json_data = self._call_api(method, display_id, data=data) if kind in ('music/albums', 'music'): json_data = json_data['singles']['list'] entites = [] for media in json_data: url = media.get('external_media_url') or media.get('link') if url.startswith('https://www.youtube.com'): entites.append({ '_type': 'url', 'url': url, 'ie_key': 'Youtube', }) continue is_audio = (media.get('type') or '').lower() == 'audio' entites.append({ 'url': url, 'id': media['id'], 'title': media['title'], 'duration': int_or_none(media.get('duration')), 'timestamp': int_or_none(media.get('timestamp')), 'album': try_get(media, lambda x: x['album']['title']), 'uploader': try_get(media, lambda x: x['creator']['display_name']), 'uploader_id': try_get(media, lambda x: x['creator']['id']), 'thumbnail': media.get('image_comment'), 'like_count': int_or_none(media.get('likes')), 'vcodec': 'none' if is_audio else None, 'ext': 'mp3' if is_audio else None, }) if len(entites) > 1: return self.playlist_result(entites, display_id) return entites[0]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/musescore.py
yt_dlp/extractor/musescore.py
import hashlib from .common import InfoExtractor class MuseScoreIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?musescore\.com/(?:user/\d+|[^/]+)(?:/scores)?/(?P<id>[^#&?]+)' _TESTS = [{ 'url': 'https://musescore.com/user/73797/scores/142975', 'info_dict': { 'id': '142975', 'ext': 'mp3', 'title': 'WA Mozart Marche Turque (Turkish March fingered)', 'description': 'md5:0ca4cf6b79d7f5868a1fee74097394ab', 'thumbnail': r're:https?://cdn\.ustatik\.com/musescore/.*\.jpg', 'uploader': 'PapyPiano', 'creators': ['Wolfgang Amadeus Mozart'], }, }, { 'url': 'https://musescore.com/user/36164500/scores/6837638', 'info_dict': { 'id': '6837638', 'ext': 'mp3', 'title': 'Sweet Child O\' Mine – Guns N\' Roses sweet child', 'description': 'md5:2cd49bd6b4e48a75a3c469d4775d5079', 'thumbnail': r're:https?://cdn\.ustatik\.com/musescore/.*\.png', 'uploader': 'roxbelviolin', 'creators': ['Guns N´Roses Arr. Roxbel Violin'], }, }, { 'url': 'https://musescore.com/classicman/fur-elise', 'info_dict': { 'id': '33816', 'ext': 'mp3', 'title': 'Für Elise – Beethoven', 'description': 'md5:e37b241c0280b33e9ac25651b815d06e', 'thumbnail': r're:https?://cdn\.ustatik\.com/musescore/.*\.jpg', 'uploader': 'ClassicMan', 'creators': ['Ludwig van Beethoven (1770–1827)'], }, }, { 'url': 'https://musescore.com/minh_cuteee/scores/6555384', 'only_matching': True, }] @staticmethod def _generate_auth_token(video_id): return hashlib.md5((video_id + 'mp30gs').encode()).hexdigest()[:4] def _real_extract(self, url): webpage = self._download_webpage(url, None) url = self._og_search_url(webpage) or url video_id = self._match_id(url) mp3_url = self._download_json( 'https://musescore.com/api/jmuse', video_id, headers={'authorization': self._generate_auth_token(video_id)}, query={'id': video_id, 'index': '0', 'type': 'mp3'})['info']['url'] formats = [{ 'url': mp3_url, 'ext': 'mp3', 'vcodec': 'none', }] return { 'id': video_id, 'formats': formats, 'title': self._og_search_title(webpage), 'description': self._html_search_meta('description', webpage, 'description'), 'thumbnail': self._og_search_thumbnail(webpage), 'uploader': self._html_search_meta('musescore:author', webpage, 'uploader'), 'creator': self._html_search_meta('musescore:composer', webpage, 'composer'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/boosty.py
yt_dlp/extractor/boosty.py
import json import urllib.parse from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import ( ExtractorError, bug_reports_message, int_or_none, qualities, str_or_none, url_or_none, ) from ..utils.traversal import traverse_obj class BoostyIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?boosty\.to/(?P<user>[^/#?]+)/posts/(?P<post_id>[^/#?]+)' _TESTS = [{ # single ok_video 'url': 'https://boosty.to/kuplinov/posts/e55d050c-e3bb-4873-a7db-ac7a49b40c38', 'info_dict': { 'id': 'd7473824-352e-48e2-ae53-d4aa39459968', 'title': 'phasma_3', 'channel': 'Kuplinov', 'channel_id': '7958701', 'timestamp': 1655031975, 'upload_date': '20220612', 'release_timestamp': 1655049000, 'release_date': '20220612', 'modified_timestamp': 1668680993, 'modified_date': '20221117', 'tags': ['куплинов', 'phasmophobia'], 'like_count': int, 'ext': 'mp4', 'duration': 105, 'view_count': int, 'thumbnail': r're:^https://i\.mycdn\.me/videoPreview\?', }, }, { # multiple ok_video 'url': 'https://boosty.to/maddyson/posts/0c652798-3b35-471f-8b48-a76a0b28736f', 'info_dict': { 'id': '0c652798-3b35-471f-8b48-a76a0b28736f', 'title': 'то что не пропустил юта6', 'channel': 'Илья Давыдов', 'channel_id': '6808257', 'timestamp': 1694017040, 'upload_date': '20230906', 'release_timestamp': 1694017040, 'release_date': '20230906', 'modified_timestamp': 1694071178, 'modified_date': '20230907', 'like_count': int, }, 'playlist_count': 3, 'playlist': [{ 'info_dict': { 'id': 'cc325a9f-a563-41c6-bf47-516c1b506c9a', 'title': 'то что не пропустил юта6', 'channel': 'Илья Давыдов', 'channel_id': '6808257', 'timestamp': 1694017040, 'upload_date': '20230906', 'release_timestamp': 1694017040, 'release_date': '20230906', 'modified_timestamp': 1694071178, 'modified_date': '20230907', 'like_count': int, 'ext': 'mp4', 'duration': 31204, 'view_count': int, 'thumbnail': r're:^https://i\.mycdn\.me/videoPreview\?', }, }, { 'info_dict': { 'id': 'd07b0a72-9493-4512-b54e-55ce468fd4b7', 'title': 'то что не пропустил юта6', 'channel': 'Илья Давыдов', 'channel_id': '6808257', 'timestamp': 1694017040, 'upload_date': '20230906', 'release_timestamp': 1694017040, 'release_date': '20230906', 'modified_timestamp': 1694071178, 'modified_date': '20230907', 'like_count': int, 'ext': 'mp4', 'duration': 25704, 'view_count': int, 'thumbnail': r're:^https://i\.mycdn\.me/videoPreview\?', }, }, { 'info_dict': { 'id': '4a3bba32-78c8-422a-9432-2791aff60b42', 'title': 'то что не пропустил юта6', 'channel': 'Илья Давыдов', 'channel_id': '6808257', 'timestamp': 1694017040, 'upload_date': '20230906', 'release_timestamp': 1694017040, 'release_date': '20230906', 'modified_timestamp': 1694071178, 'modified_date': '20230907', 'like_count': int, 'ext': 'mp4', 'duration': 31867, 'view_count': int, 'thumbnail': r're:^https://i\.mycdn\.me/videoPreview\?', }, }], }, { # single external video (youtube) 'url': 'https://boosty.to/denischuzhoy/posts/6094a487-bcec-4cf8-a453-43313b463c38', 'info_dict': { 'id': 'EXelTnve5lY', 'title': 'Послание Президента Федеральному Собранию | Класс народа', 'upload_date': '20210425', 'channel': 'Денис Чужой', 'tags': 'count:10', 'like_count': int, 'ext': 'mp4', 'duration': 816, 'view_count': int, 'thumbnail': r're:^https://i\.ytimg\.com/', 'age_limit': 0, 'availability': 'public', 'categories': list, 'channel_follower_count': int, 'channel_id': 'UCCzVNbWZfYpBfyofCCUD_0w', 'channel_is_verified': bool, 'channel_url': r're:^https://www\.youtube\.com/', 'comment_count': int, 'description': str, 'heatmap': 'count:100', 'live_status': str, 'playable_in_embed': bool, 'uploader': str, 'uploader_id': str, 'uploader_url': r're:^https://www\.youtube\.com/', }, }] _MP4_TYPES = ('tiny', 'lowest', 'low', 'medium', 'high', 'full_hd', 'quad_hd', 'ultra_hd') def _extract_formats(self, player_urls, video_id): formats = [] quality = qualities(self._MP4_TYPES) for player_url in traverse_obj(player_urls, lambda _, v: url_or_none(v['url'])): url = player_url['url'] format_type = player_url.get('type') if format_type in ('hls', 'hls_live', 'live_ondemand_hls', 'live_playback_hls'): formats.extend(self._extract_m3u8_formats(url, video_id, m3u8_id='hls', fatal=False)) elif format_type in ('dash', 'dash_live', 'live_playback_dash'): formats.extend(self._extract_mpd_formats(url, video_id, mpd_id='dash', fatal=False)) elif format_type in self._MP4_TYPES: formats.append({ 'url': url, 'ext': 'mp4', 'format_id': format_type, 'quality': quality(format_type), }) else: self.report_warning(f'Unknown format type: {format_type!r}') return formats def _real_extract(self, url): user, post_id = self._match_valid_url(url).group('user', 'post_id') auth_headers = {} auth_cookie = self._get_cookies('https://boosty.to/').get('auth') if auth_cookie is not None: try: auth_data = json.loads(urllib.parse.unquote(auth_cookie.value)) auth_headers['Authorization'] = f'Bearer {auth_data["accessToken"]}' except (json.JSONDecodeError, KeyError): self.report_warning(f'Failed to extract token from auth cookie{bug_reports_message()}') post = self._download_json( f'https://api.boosty.to/v1/blog/{user}/post/{post_id}', post_id, note='Downloading post data', errnote='Unable to download post data', headers=auth_headers) post_title = post.get('title') if not post_title: self.report_warning('Unable to extract post title. Falling back to parsing html page') webpage = self._download_webpage(url, video_id=post_id) post_title = self._og_search_title(webpage, default=None) or self._html_extract_title(webpage) common_metadata = { 'title': post_title, **traverse_obj(post, { 'channel': ('user', 'name', {str}), 'channel_id': ('user', 'id', {str_or_none}), 'timestamp': ('createdAt', {int_or_none}), 'release_timestamp': ('publishTime', {int_or_none}), 'modified_timestamp': ('updatedAt', {int_or_none}), 'tags': ('tags', ..., 'title', {str}), 'like_count': ('count', 'likes', {int_or_none}), }), } entries = [] for item in traverse_obj(post, ('data', ..., {dict})): item_type = item.get('type') if item_type == 'video' and url_or_none(item.get('url')): entries.append(self.url_result(item['url'], YoutubeIE)) elif item_type == 'ok_video': video_id = item.get('id') or post_id entries.append({ 'id': video_id, 'formats': self._extract_formats(item.get('playerUrls'), video_id), **common_metadata, **traverse_obj(item, { 'title': ('title', {str}), 'duration': ('duration', {int_or_none}), 'view_count': ('viewsCounter', {int_or_none}), 'thumbnail': (('previewUrl', 'defaultPreview'), {url_or_none}), }, get_all=False)}) if not entries and not post.get('hasAccess'): self.raise_login_required('This post requires a subscription', metadata_available=True) elif not entries: raise ExtractorError('No videos found', expected=True) if len(entries) == 1: return entries[0] return self.playlist_result(entries, post_id, post_title, **common_metadata)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/pgatour.py
yt_dlp/extractor/pgatour.py
from .brightcove import BrightcoveNewIE from .common import InfoExtractor class PGATourIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?pgatour\.com/video/[\w-]+/(?P<tc>T)?(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.pgatour.com/video/competition/T6322447785112/adam-hadwin-2023-the-players-round-4-18th-hole-shot-1', 'info_dict': { 'id': '6322447785112', 'ext': 'mp4', 'title': 'Adam Hadwin | 2023 THE PLAYERS | Round 4 | 18th hole | Shot 1', 'uploader_id': '6116716431001', 'upload_date': '20230312', 'timestamp': 1678653136, 'duration': 20.011, 'thumbnail': r're:^https://.+\.jpg', 'tags': 'count:7', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.pgatour.com/video/features/6322506425112/follow-the-players-trophy-on-championship-sunday', 'info_dict': { 'id': '6322506425112', 'ext': 'mp4', 'title': 'Follow THE PLAYERS trophy on Championship Sunday', 'description': 'md5:4d29e4bdfa03694a0ebfd08950398568', 'uploader_id': '6082840763001', 'upload_date': '20230313', 'timestamp': 1678739835, 'duration': 123.435, 'thumbnail': r're:^https://.+\.jpg', 'tags': 'count:8', }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): video_id, is_tourcast = self._match_valid_url(url).group('id', 'tc') # From https://www.pgatour.com/_next/static/chunks/pages/_app-8bcf849560daf38d.js account_id = '6116716431001' if is_tourcast else '6082840763001' player_id = 'Vsd5Umu8r' if is_tourcast else 'FWIBYMBPj' return self.url_result( f'https://players.brightcove.net/{account_id}/{player_id}_default/index.html?videoId={video_id}', BrightcoveNewIE)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/txxx.py
yt_dlp/extractor/txxx.py
import base64 import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, js_to_json, merge_dicts, parse_duration, traverse_obj, try_call, url_or_none, urljoin, variadic, ) def decode_base64(text): return base64.b64decode(text.translate(text.maketrans({ '\u0405': 'S', '\u0406': 'I', '\u0408': 'J', '\u0410': 'A', '\u0412': 'B', '\u0415': 'E', '\u041a': 'K', '\u041c': 'M', '\u041d': 'H', '\u041e': 'O', '\u0420': 'P', '\u0421': 'C', '\u0425': 'X', ',': '/', '.': '+', '~': '=', }))).decode() def get_formats(host, video_file): return [{ 'url': urljoin(f'https://{host}', decode_base64(video['video_url'])), 'format_id': try_call(lambda: variadic(video['format'])[0].lstrip('_')), 'quality': index, } for index, video in enumerate(video_file) if video.get('video_url')] class TxxxIE(InfoExtractor): _DOMAINS = ( 'hclips.com', 'hdzog.com', 'hdzog.tube', 'hotmovs.com', 'hotmovs.tube', 'inporn.com', 'privatehomeclips.com', 'tubepornclassic.com', 'txxx.com', 'txxx.tube', 'upornia.com', 'upornia.tube', 'vjav.com', 'vjav.tube', 'vxxx.com', 'voyeurhit.com', 'voyeurhit.tube', ) _VALID_URL = rf'''(?x) https?://(?:www\.)?(?P<host>{"|".join(map(re.escape, _DOMAINS))})/ (?:videos?[/-]|embed/)(?P<id>\d+)(?:/(?P<display_id>[^/?#]+))? ''' _EMBED_REGEX = [rf'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?(?:{"|".join(map(re.escape, _DOMAINS))})/embed/[^"\']*)\1'] _TESTS = [{ 'url': 'https://txxx.com/videos/16574965/digital-desire-malena-morgan/', 'md5': 'c54e4ace54320aaf8e2a72df87859391', 'info_dict': { 'id': '16574965', 'display_id': 'digital-desire-malena-morgan', 'ext': 'mp4', 'title': 'Digital Desire - Malena Morgan', 'uploader': 'Lois Argentum', 'duration': 694, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://tn.txxx.tube/contents/videos_sources/16574000/16574965/screenshots/1.jpg', }, }, { 'url': 'https://txxx.tube/videos/16574965/digital-desire-malena-morgan/', 'md5': 'c54e4ace54320aaf8e2a72df87859391', 'info_dict': { 'id': '16574965', 'display_id': 'digital-desire-malena-morgan', 'ext': 'mp4', 'title': 'Digital Desire - Malena Morgan', 'uploader': 'Lois Argentum', 'duration': 694, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://tn.txxx.tube/contents/videos_sources/16574000/16574965/screenshots/1.jpg', }, }, { 'url': 'https://vxxx.com/video-68925/', 'md5': '1fcff3748b0c5b41fe41d0afa22409e1', 'info_dict': { 'id': '68925', 'display_id': '68925', 'ext': 'mp4', 'title': 'Malena Morgan', 'uploader': 'Huge Hughes', 'duration': 694, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://tn.vxxx.com/contents/videos_sources/68000/68925/screenshots/1.jpg', }, }, { 'url': 'https://hclips.com/videos/6291073/malena-morgan-masturbates-her-sweet/', 'md5': 'a5dd4f83363972ee043313cff85e7e26', 'info_dict': { 'id': '6291073', 'display_id': 'malena-morgan-masturbates-her-sweet', 'ext': 'mp4', 'title': 'Malena Morgan masturbates her sweet', 'uploader': 'John Salt', 'duration': 426, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://hctn.nv7s.com/contents/videos_sources/6291000/6291073/screenshots/1.jpg', }, }, { 'url': 'https://hdzog.com/videos/67063/gorgeous-malena-morgan-will-seduce-you-at-the-first-glance/', 'md5': 'f8bdedafd45d1ec2875c43fe33a846d3', 'info_dict': { 'id': '67063', 'display_id': 'gorgeous-malena-morgan-will-seduce-you-at-the-first-glance', 'ext': 'mp4', 'title': 'Gorgeous Malena Morgan will seduce you at the first glance', 'uploader': 'momlesson', 'duration': 601, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://tn.hdzog.com/contents/videos_sources/67000/67063/screenshots/1.jpg', }, }, { 'url': 'https://hdzog.tube/videos/67063/gorgeous-malena-morgan-will-seduce-you-at-the-first-glance/', 'md5': 'f8bdedafd45d1ec2875c43fe33a846d3', 'info_dict': { 'id': '67063', 'display_id': 'gorgeous-malena-morgan-will-seduce-you-at-the-first-glance', 'ext': 'mp4', 'title': 'Gorgeous Malena Morgan will seduce you at the first glance', 'uploader': 'momlesson', 'duration': 601, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://tn.hdzog.com/contents/videos_sources/67000/67063/screenshots/1.jpg', }, }, { 'url': 'https://hotmovs.com/videos/8789287/unbelievable-malena-morgan-performing-in-incredible-masturantion/', 'md5': '71d32c51584876472db87e561171a386', 'info_dict': { 'id': '8789287', 'display_id': 'unbelievable-malena-morgan-performing-in-incredible-masturantion', 'ext': 'mp4', 'title': 'Unbelievable Malena Morgan performing in incredible masturantion', 'uploader': 'Davit Sanchez', 'duration': 940, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://tn.hotmovs.com/contents/videos_sources/8789000/8789287/screenshots/10.jpg', }, }, { 'url': 'https://hotmovs.tube/videos/8789287/unbelievable-malena-morgan-performing-in-incredible-masturantion/', 'md5': '71d32c51584876472db87e561171a386', 'info_dict': { 'id': '8789287', 'display_id': 'unbelievable-malena-morgan-performing-in-incredible-masturantion', 'ext': 'mp4', 'title': 'Unbelievable Malena Morgan performing in incredible masturantion', 'uploader': 'Davit Sanchez', 'duration': 940, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://tn.hotmovs.com/contents/videos_sources/8789000/8789287/screenshots/10.jpg', }, }, { 'url': 'https://inporn.com/video/517897/malena-morgan-solo/', 'md5': '344db467481edf78f193cdf5820a7cfb', 'info_dict': { 'id': '517897', 'display_id': 'malena-morgan-solo', 'ext': 'mp4', 'title': 'Malena Morgan - Solo', 'uploader': 'Ashley Oxy', 'duration': 480, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://iptn.m3pd.com/media/tn/sources/517897_1.jpg', }, }, { 'url': 'https://privatehomeclips.com/videos/3630599/malena-morgan-cam-show/', 'md5': 'ea657273e352493c5fb6357fbfa4f126', 'info_dict': { 'id': '3630599', 'display_id': 'malena-morgan-cam-show', 'ext': 'mp4', 'title': 'malena morgan cam show', 'uploader': 'Member9915', 'duration': 290, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://hctn.nv7s.com/contents/videos_sources/3630000/3630599/screenshots/15.jpg', }, }, { 'url': 'https://tubepornclassic.com/videos/1015455/mimi-rogers-full-body-massage-nude-compilation/', 'md5': '2e9a6cf610c9862e86e0ce24f08f4427', 'info_dict': { 'id': '1015455', 'display_id': 'mimi-rogers-full-body-massage-nude-compilation', 'ext': 'mp4', 'title': 'Mimi Rogers - Full Body Massage (Nude) compilation', 'uploader': '88bhuto', 'duration': 286, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://tn.tubepornclassic.com/contents/videos_sources/1015000/1015455/screenshots/6.jpg', }, }, { 'url': 'https://upornia.com/videos/1498858/twistys-malena-morgan-starring-at-dr-morgan-baller/', 'md5': '7ff7033340bc88a173198b7c22600e4f', 'info_dict': { 'id': '1498858', 'display_id': 'twistys-malena-morgan-starring-at-dr-morgan-baller', 'ext': 'mp4', 'title': 'Twistys - Malena Morgan starring at Dr. Morgan-Baller', 'uploader': 'mindgeek', 'duration': 480, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://tn.upornia.com/contents/videos_sources/1498000/1498858/screenshots/1.jpg', }, }, { 'url': 'https://upornia.tube/videos/1498858/twistys-malena-morgan-starring-at-dr-morgan-baller/', 'md5': '7ff7033340bc88a173198b7c22600e4f', 'info_dict': { 'id': '1498858', 'display_id': 'twistys-malena-morgan-starring-at-dr-morgan-baller', 'ext': 'mp4', 'title': 'Twistys - Malena Morgan starring at Dr. Morgan-Baller', 'uploader': 'mindgeek', 'duration': 480, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://tn.upornia.com/contents/videos_sources/1498000/1498858/screenshots/1.jpg', }, }, { 'url': 'https://vjav.com/videos/11761/yui-hatano-in-if-yui-was-my-girlfriend2/', 'md5': '6de5bc1f13bdfc3491a77f23edb1676f', 'info_dict': { 'id': '11761', 'display_id': 'yui-hatano-in-if-yui-was-my-girlfriend2', 'ext': 'mp4', 'title': 'Yui Hatano in If Yui Was My Girlfriend', 'uploader': 'Matheus69', 'duration': 3310, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://tn.vjav.com/contents/videos_sources/11000/11761/screenshots/23.jpg', }, }, { 'url': 'https://vjav.tube/videos/11761/yui-hatano-in-if-yui-was-my-girlfriend2/', 'md5': '6de5bc1f13bdfc3491a77f23edb1676f', 'info_dict': { 'id': '11761', 'display_id': 'yui-hatano-in-if-yui-was-my-girlfriend2', 'ext': 'mp4', 'title': 'Yui Hatano in If Yui Was My Girlfriend', 'uploader': 'Matheus69', 'duration': 3310, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://tn.vjav.com/contents/videos_sources/11000/11761/screenshots/23.jpg', }, }, { 'url': 'https://voyeurhit.com/videos/332875/charlotte-stokely-elle-alexandra-malena-morgan-lingerie/', 'md5': '12b4666e9c3e60dafe9182e5d12aae33', 'info_dict': { 'id': '332875', 'display_id': 'charlotte-stokely-elle-alexandra-malena-morgan-lingerie', 'ext': 'mp4', 'title': 'Charlotte Stokely, Elle Alexandra, Malena Morgan-Lingerie', 'uploader': 'Kyle Roberts', 'duration': 655, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://tn.voyeurhit.com/contents/videos_sources/332000/332875/screenshots/1.jpg', }, }, { 'url': 'https://voyeurhit.tube/videos/332875/charlotte-stokely-elle-alexandra-malena-morgan-lingerie/', 'md5': '12b4666e9c3e60dafe9182e5d12aae33', 'info_dict': { 'id': '332875', 'display_id': 'charlotte-stokely-elle-alexandra-malena-morgan-lingerie', 'ext': 'mp4', 'title': 'Charlotte Stokely, Elle Alexandra, Malena Morgan-Lingerie', 'uploader': 'Kyle Roberts', 'duration': 655, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'thumbnail': 'https://tn.voyeurhit.com/contents/videos_sources/332000/332875/screenshots/1.jpg', }, }] def _call_api(self, url, video_id, fatal=False, **kwargs): content = self._download_json(url, video_id, fatal=fatal, **kwargs) if traverse_obj(content, 'error'): raise self._error_or_warning(ExtractorError( f'Txxx said: {content["error"]}', expected=True), fatal=fatal) return content or {} def _real_extract(self, url): video_id, host, display_id = self._match_valid_url(url).group('id', 'host', 'display_id') headers = {'Referer': url, 'X-Requested-With': 'XMLHttpRequest'} video_file = self._call_api( f'https://{host}/api/videofile.php?video_id={video_id}&lifetime=8640000', video_id, fatal=True, note='Downloading video file info', headers=headers) slug = f'{int(1E6 * (int(video_id) // 1E6))}/{1000 * (int(video_id) // 1000)}' video_info = self._call_api( f'https://{host}/api/json/video/86400/{slug}/{video_id}.json', video_id, note='Downloading video info', headers=headers) return { 'id': video_id, 'display_id': display_id, 'title': traverse_obj(video_info, ('video', 'title')), 'uploader': traverse_obj(video_info, ('video', 'user', 'username')), 'duration': parse_duration(traverse_obj(video_info, ('video', 'duration'))), 'view_count': int_or_none(traverse_obj(video_info, ('video', 'statistics', 'viewed'))), 'like_count': int_or_none(traverse_obj(video_info, ('video', 'statistics', 'likes'))), 'dislike_count': int_or_none(traverse_obj(video_info, ('video', 'statistics', 'dislikes'))), 'age_limit': 18, 'thumbnail': traverse_obj(video_info, ('video', 'thumbsrc', {url_or_none})), 'formats': get_formats(host, video_file), } class PornTopIE(InfoExtractor): _VALID_URL = r'https?://(?P<host>(?:www\.)?porntop\.com)/video/(?P<id>\d+)(?:/(?P<display_id>[^/?]+))?' _TESTS = [{ 'url': 'https://porntop.com/video/101569/triple-threat-with-lia-lor-malena-morgan-and-dani-daniels/', 'md5': '612ba7b3cb99455b382972948e200b08', 'info_dict': { 'id': '101569', 'display_id': 'triple-threat-with-lia-lor-malena-morgan-and-dani-daniels', 'ext': 'mp4', 'title': 'Triple Threat With Lia Lor, Malena Morgan And Dani Daniels', 'description': 'md5:285357d9d3a00ce5acb29f39f826dbf6', 'uploader': 'PatrickBush', 'duration': 480, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, 'timestamp': 1609455029, 'upload_date': '20201231', 'thumbnail': 'https://tn.porntop.com/media/tn/sources/101569_1.jpg', }, }] def _real_extract(self, url): video_id, host, display_id = self._match_valid_url(url).group('id', 'host', 'display_id') webpage = self._download_webpage(url, video_id) json_ld = self._json_ld(self._search_json( r'\bschemaJson\s*=', webpage, 'JSON-LD', video_id, transform_source=js_to_json, contains_pattern='{[^<]+?VideoObject[^<]+};'), video_id, fatal=True) video_file = self._parse_json(decode_base64(self._search_regex( r"window\.initPlayer\(.*}}},\s*'(?P<json_b64c>[^']+)'", webpage, 'json_urls', group='json_b64c')), video_id) return merge_dicts({ 'id': video_id, 'display_id': display_id, 'age_limit': 18, 'formats': get_formats(host, video_file), }, json_ld)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ndtv.py
yt_dlp/extractor/ndtv.py
import urllib.parse from .common import InfoExtractor from ..utils import parse_duration, remove_end, unified_strdate, urljoin class NDTVIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:[^/]+\.)?ndtv\.com/(?:[^/]+/)*videos?/?(?:[^/]+/)*[^/?^&]+-(?P<id>\d+)' _TESTS = [ { 'url': 'https://khabar.ndtv.com/video/show/prime-time/prime-time-ill-system-and-poor-education-468818', 'md5': '78efcf3880ef3fd9b83d405ca94a38eb', 'info_dict': { 'id': '468818', 'ext': 'mp4', 'title': 'प्राइम टाइम: सिस्टम बीमार, स्कूल बदहाल', 'description': 'md5:f410512f1b49672e5695dea16ef2731d', 'upload_date': '20170928', 'duration': 2218, 'thumbnail': r're:https?://.*\.jpg', }, }, { # __filename is url 'url': 'http://movies.ndtv.com/videos/cracker-free-diwali-wishes-from-karan-johar-kriti-sanon-other-stars-470304', 'md5': 'f1d709352305b44443515ac56b45aa46', 'info_dict': { 'id': '470304', 'ext': 'mp4', 'title': 'Cracker-Free Diwali Wishes From Karan Johar, Kriti Sanon & Other Stars', 'description': 'md5:f115bba1adf2f6433fa7c1ade5feb465', 'upload_date': '20171019', 'duration': 137, 'thumbnail': r're:https?://.*\.jpg', }, }, { 'url': 'https://www.ndtv.com/video/news/news/delhi-s-air-quality-status-report-after-diwali-is-very-poor-470372', 'only_matching': True, }, { 'url': 'https://auto.ndtv.com/videos/the-cnb-daily-october-13-2017-469935', 'only_matching': True, }, { 'url': 'https://sports.ndtv.com/cricket/videos/2nd-t20i-rock-thrown-at-australia-cricket-team-bus-after-win-over-india-469764', 'only_matching': True, }, { 'url': 'http://gadgets.ndtv.com/videos/uncharted-the-lost-legacy-review-465568', 'only_matching': True, }, { 'url': 'http://profit.ndtv.com/videos/news/video-indian-economy-on-very-solid-track-international-monetary-fund-chief-470040', 'only_matching': True, }, { 'url': 'http://food.ndtv.com/video-basil-seeds-coconut-porridge-419083', 'only_matching': True, }, { 'url': 'https://doctor.ndtv.com/videos/top-health-stories-of-the-week-467396', 'only_matching': True, }, { 'url': 'https://swirlster.ndtv.com/video/how-to-make-friends-at-work-469324', 'only_matching': True, }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) # '__title' does not contain extra words such as sub-site name, "Video" etc. title = urllib.parse.unquote_plus( self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None) or self._og_search_title(webpage)) filename = self._search_regex( r"(?:__)?filename\s*[:=]\s*'([^']+)'", webpage, 'video filename') # in "movies" sub-site pages, filename is URL video_url = urljoin('https://ndtvod.bc-ssl.cdn.bitgravity.com/23372/ndtv/', filename.lstrip('/')) # "doctor" sub-site has MM:SS format duration = parse_duration(self._search_regex( r"(?:__)?duration\s*[:=]\s*'([^']+)'", webpage, 'duration', fatal=False)) # "sports", "doctor", "swirlster" sub-sites don't have 'publish-date' upload_date = unified_strdate(self._html_search_meta( 'publish-date', webpage, 'upload date', default=None) or self._html_search_meta( 'uploadDate', webpage, 'upload date', default=None) or self._search_regex( r'datePublished"\s*:\s*"([^"]+)"', webpage, 'upload date', fatal=False)) description = remove_end(self._og_search_description(webpage), ' (Read more)') return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': self._og_search_thumbnail(webpage), 'duration': duration, 'upload_date': upload_date, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sportdeutschland.py
yt_dlp/extractor/sportdeutschland.py
from .common import InfoExtractor from ..utils import ( join_nonempty, strip_or_none, traverse_obj, unified_timestamp, ) class SportDeutschlandIE(InfoExtractor): IE_NAME = 'sporteurope' _VALID_URL = r'https?://(?:player\.)?sporteurope\.tv/(?P<id>(?:[^/?#]+/)?[^?#/&]+)' _TESTS = [{ # Single-part video, direct link 'url': 'https://sporteurope.tv/rostock-griffins/gfl2-rostock-griffins-vs-elmshorn-fighting-pirates', 'md5': '35c11a19395c938cdd076b93bda54cde', 'info_dict': { 'id': '9f27a97d-1544-4d0b-aa03-48d92d17a03a', 'ext': 'mp4', 'title': 'GFL2: Rostock Griffins vs. Elmshorn Fighting Pirates', 'display_id': 'rostock-griffins/gfl2-rostock-griffins-vs-elmshorn-fighting-pirates', 'channel': 'Rostock Griffins', 'channel_url': 'https://sporteurope.tv/rostock-griffins', 'live_status': 'was_live', 'description': r're:Video-Livestream des Spiels Rostock Griffins vs\. Elmshorn Fighting Pirates.+', 'channel_id': '9635f21c-3f67-4584-9ce4-796e9a47276b', 'timestamp': 1749913117, 'upload_date': '20250614', 'duration': 12287.0, }, }, { # Single-part video, embedded player link 'url': 'https://player.sporteurope.tv/9e9619c4-7d77-43c4-926d-49fb57dc06dc', 'info_dict': { 'id': '9f27a97d-1544-4d0b-aa03-48d92d17a03a', 'ext': 'mp4', 'title': 'GFL2: Rostock Griffins vs. Elmshorn Fighting Pirates', 'display_id': '9e9619c4-7d77-43c4-926d-49fb57dc06dc', 'channel': 'Rostock Griffins', 'channel_url': 'https://sporteurope.tv/rostock-griffins', 'live_status': 'was_live', 'description': r're:Video-Livestream des Spiels Rostock Griffins vs\. Elmshorn Fighting Pirates.+', 'channel_id': '9635f21c-3f67-4584-9ce4-796e9a47276b', 'timestamp': 1749913117, 'upload_date': '20250614', 'duration': 12287.0, }, 'params': {'skip_download': True}, }, { # Multi-part video 'url': 'https://sporteurope.tv/rhine-ruhr-2025-fisu-world-university-games/volleyball-w-japan-vs-brasilien-halbfinale-2', 'info_dict': { 'id': '9f63d737-2444-4e3a-a1ea-840df73fd481', 'display_id': 'rhine-ruhr-2025-fisu-world-university-games/volleyball-w-japan-vs-brasilien-halbfinale-2', 'title': 'Volleyball w: Japan vs. Braslien - Halbfinale 2', 'description': 'md5:0a17da15e48a687e6019639c3452572b', 'channel': 'Rhine-Ruhr 2025 FISU World University Games', 'channel_id': '9f5216be-a49d-470b-9a30-4fe9df993334', 'channel_url': 'https://sporteurope.tv/rhine-ruhr-2025-fisu-world-university-games', 'live_status': 'was_live', }, 'playlist_count': 2, 'playlist': [{ 'info_dict': { 'id': '9f725a94-d43e-40ff-859d-13da3081bb04', 'ext': 'mp4', 'title': 'Volleyball w: Japan vs. Braslien - Halbfinale 2 Part 1', 'channel': 'Rhine-Ruhr 2025 FISU World University Games', 'channel_id': '9f5216be-a49d-470b-9a30-4fe9df993334', 'channel_url': 'https://sporteurope.tv/rhine-ruhr-2025-fisu-world-university-games', 'duration': 14773.0, 'timestamp': 1753085197, 'upload_date': '20250721', 'live_status': 'was_live', }, }, { 'info_dict': { 'id': '9f725a94-370e-4477-89ac-1751098e3217', 'ext': 'mp4', 'title': 'Volleyball w: Japan vs. Braslien - Halbfinale 2 Part 2', 'channel': 'Rhine-Ruhr 2025 FISU World University Games', 'channel_id': '9f5216be-a49d-470b-9a30-4fe9df993334', 'channel_url': 'https://sporteurope.tv/rhine-ruhr-2025-fisu-world-university-games', 'duration': 14773.0, 'timestamp': 1753128421, 'upload_date': '20250721', 'live_status': 'was_live', }, }], 'skip': '404 Not Found', }, { # Livestream 'url': 'https://sporteurope.tv/dtb/gymnastik-international-tag-1', 'info_dict': { 'id': '95d71b8a-370a-4b87-ad16-94680da18528', 'ext': 'mp4', 'title': r're:Gymnastik International - Tag 1 .+', 'display_id': 'dtb/gymnastik-international-tag-1', 'channel_id': '936ecef1-2f4a-4e08-be2f-68073cb7ecab', 'channel': 'Deutscher Turner-Bund', 'channel_url': 'https://sporteurope.tv/dtb', 'description': 'md5:07a885dde5838a6f0796ee21dc3b0c52', 'live_status': 'is_live', }, 'skip': 'live', }] def _process_video(self, asset_id, video): is_live = video['type'] == 'mux_live' token = self._download_json( f'https://api.sporteurope.tv/api/web/personal/asset-token/{asset_id}', video['id'], query={'type': video['type'], 'playback_id': video['src']}, headers={'Referer': 'https://sporteurope.tv/'})['token'] formats, subtitles = self._extract_m3u8_formats_and_subtitles( f'https://stream.mux.com/{video["src"]}.m3u8?token={token}', video['id'], live=is_live) return { 'is_live': is_live, 'formats': formats, 'subtitles': subtitles, **traverse_obj(video, { 'id': 'id', 'duration': ('duration', {lambda x: float(x) > 0 and float(x)}), 'timestamp': ('created_at', {unified_timestamp}), }), } def _real_extract(self, url): display_id = self._match_id(url) meta = self._download_json( f'https://api.sporteurope.tv/api/stateless/frontend/assets/{display_id}', display_id, query={'access_token': 'true'}) info = { 'display_id': display_id, **traverse_obj(meta, { 'id': (('id', 'uuid'), ), 'title': (('title', 'name'), {strip_or_none}), 'description': 'description', 'channel': ('profile', 'name'), 'channel_id': ('profile', 'id'), 'is_live': 'currently_live', 'was_live': 'was_live', 'channel_url': ('profile', 'slug', {lambda x: f'https://sporteurope.tv/{x}'}), }, get_all=False), } parts = traverse_obj(meta, (('livestream', ('videos', ...)), )) entries = [{ 'title': join_nonempty(info.get('title'), f'Part {i}', delim=' '), **traverse_obj(info, {'channel': 'channel', 'channel_id': 'channel_id', 'channel_url': 'channel_url', 'was_live': 'was_live'}), **self._process_video(info['id'], video), } for i, video in enumerate(parts, 1)] return { '_type': 'multi_video', **info, 'entries': entries, } if len(entries) > 1 else { **info, **entries[0], 'title': info.get('title'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/wordpress.py
yt_dlp/extractor/wordpress.py
import re from .common import InfoExtractor from ..utils import ( extract_attributes, get_elements_by_class, get_elements_text_and_html_by_attribute, int_or_none, parse_duration, traverse_obj, ) # https://codex.wordpress.org/Playlist_Shortcode class WordpressPlaylistEmbedIE(InfoExtractor): _VALID_URL = False IE_NAME = 'wordpress:playlist' _WEBPAGE_TESTS = [{ # 5 WordPress playlists. This is using wpse-playlist, which is similar. # See: https://github.com/birgire/wpse-playlist 'url': 'https://xlino.com/wordpress-playlist-shortcode-with-external-audio-or-video-files/', 'info_dict': { 'id': 'wordpress-playlist-shortcode-with-external-audio-or-video-files', 'title': 'WordPress: Playlist shortcode with external audio or video files – Birgir Erlendsson (birgire)', 'age_limit': 0, }, 'playlist_count': 5, }, { 'url': 'https://pianoadventures.com/products/piano-adventures-level-1-lesson-book-enhanced-cd/', 'info_dict': { 'id': 'piano-adventures-level-1-lesson-book-enhanced-cd-wp-playlist-1', 'title': 'Wordpress Playlist', 'thumbnail': 'https://pianoadventures.com/wp-content/uploads/sites/13/2022/01/CD1002cover.jpg', 'age_limit': 0, }, 'playlist': [{ 'info_dict': { 'id': 'CD1002-21', 'ext': 'mp3', 'title': '21 Half-Time Show', 'thumbnail': 'https://pianoadventures.com/wp-content/plugins/media-library-assistant/images/crystal/audio.png', 'album': 'Piano Adventures Level 1 Lesson Book (2nd Edition)', 'genre': 'Classical', 'duration': 49.0, 'artist': 'Nancy and Randall Faber', 'description': 'md5:a9f8e9aeabbd2912bc13cc0fab1a4ce8', }, }], 'playlist_count': 6, 'params': {'skip_download': True}, }] def _extract_from_webpage(self, url, webpage): # class should always be "wp-playlist-script" # See: https://core.trac.wordpress.org/browser/trunk/src/wp-includes/media.php#L2930 for i, j in enumerate(get_elements_by_class('wp-playlist-script', webpage)): playlist_json = self._parse_json(j, self._generic_id(url), fatal=False, ignore_extra=True, errnote='') or {} if not playlist_json: continue entries = [{ 'id': self._generic_id(track['src']), 'title': track.get('title'), 'url': track.get('src'), 'thumbnail': traverse_obj(track, ('thumb', 'src')), 'album': traverse_obj(track, ('meta', 'album')), 'artist': traverse_obj(track, ('meta', 'artist')), 'genre': traverse_obj(track, ('meta', 'genre')), 'duration': parse_duration(traverse_obj(track, ('meta', 'length_formatted'))), 'description': track.get('description'), 'height': int_or_none(traverse_obj(track, ('dimensions', 'original', 'height'))), 'width': int_or_none(traverse_obj(track, ('dimensions', 'original', 'width'))), } for track in traverse_obj(playlist_json, ('tracks', ...), expected_type=dict)] yield self.playlist_result(entries, self._generic_id(url) + f'-wp-playlist-{i + 1}', 'Wordpress Playlist') class WordpressMiniAudioPlayerEmbedIE(InfoExtractor): # WordPress MB Mini Player Plugin # https://wordpress.org/plugins/wp-miniaudioplayer/ # Note: This is for the WordPress plugin version only. _VALID_URL = False IE_NAME = 'wordpress:mb.miniAudioPlayer' _WEBPAGE_TESTS = [{ # Version 1.8.10: https://plugins.trac.wordpress.org/browser/wp-miniaudioplayer/tags/1.8.10 'url': 'https://news.samsung.com/global/over-the-horizon-the-evolution-of-the-samsung-galaxy-brand-sound', 'info_dict': { 'id': 'over-the-horizon-the-evolution-of-the-samsung-galaxy-brand-sound', 'title': 'Over the Horizon: The Evolution of the Samsung Galaxy Brand Sound', 'age_limit': 0, 'thumbnail': 'https://img.global.news.samsung.com/global/wp-content/uploads/2015/04/OTH_Main_Title-e1429612467870.jpg', 'description': 'md5:bc3dd738d1f11d9232e94e6629983bf7', }, 'playlist': [{ 'info_dict': { 'id': 'over_the_horizon_2013', 'ext': 'mp3', 'title': 'Over the Horizon 2013', 'url': 'http://news.samsung.com/global/wp-content/uploads/ringtones/over_the_horizon_2013.mp3', }, }], 'playlist_count': 6, 'params': {'skip_download': True}, }, { # Version 1.9.3: https://plugins.trac.wordpress.org/browser/wp-miniaudioplayer/tags/1.9.3 'url': 'https://www.booksontape.com/collections/audiobooks-with-teacher-guides/', 'info_dict': { 'id': 'audiobooks-with-teacher-guides', 'title': 'Audiobooks with Teacher Guides | Books on Tape', 'age_limit': 0, 'thumbnail': 'https://www.booksontape.com/wp-content/uploads/2016/09/bot-logo-1200x630.jpg', }, 'playlist_mincount': 12, }, { # Version 1.9.7: https://plugins.trac.wordpress.org/browser/wp-miniaudioplayer/tags/1.9.7 # But has spaces around href filter 'url': 'https://www.estudiords.com.br/temas/', 'info_dict': { 'id': 'temas', 'title': 'Temas Variados', 'age_limit': 0, 'timestamp': float, 'upload_date': str, 'thumbnail': 'https://www.estudiords.com.br/wp-content/uploads/2021/03/LOGO-TEMAS.png', 'description': 'md5:ab24d6a7ed0312ad2d466e721679f5a0', }, 'playlist_mincount': 30, }] def _extract_from_webpage(self, url, webpage): # Common function for the WordPress plugin version only. mb_player_params = self._search_regex( r'function\s*initializeMiniAudioPlayer\(\){[^}]+jQuery([^;]+)\.mb_miniPlayer', webpage, 'mb player params', default=None) if not mb_player_params: return # v1.55 - 1.9.3 has "a[href*='.mp3'] ,a[href*='.m4a']" # v1.9.4+ has "a[href*='.mp3']" only file_exts = re.findall(r'a\[href\s*\*=\s*\'\.([a-zA-Z\d]+)\'', mb_player_params) if not file_exts: return candidates = get_elements_text_and_html_by_attribute( 'href', rf'(?:[^\"\']+\.(?:{"|".join(file_exts)}))', webpage, escape_value=False, tag='a') for title, html in candidates: attrs = extract_attributes(html) # XXX: not tested - have not found any example of it being used if any(c in (attrs.get('class') or '') for c in re.findall(r'\.not\("\.([^"]+)', mb_player_params)): continue href = attrs['href'] yield { 'id': self._generic_id(href), 'title': title or self._generic_title(href), 'url': href, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dctp.py
yt_dlp/extractor/dctp.py
from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, unified_timestamp, url_or_none, ) class DctpTvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?dctp\.tv/(?:#/)?filme/(?P<id>[^/?#&]+)' _TESTS = [{ # 4x3 'url': 'http://www.dctp.tv/filme/videoinstallation-fuer-eine-kaufhausfassade/', 'md5': '3ffbd1556c3fe210724d7088fad723e3', 'info_dict': { 'id': '95eaa4f33dad413aa17b4ee613cccc6c', 'display_id': 'videoinstallation-fuer-eine-kaufhausfassade', 'ext': 'm4v', 'title': 'Videoinstallation für eine Kaufhausfassade', 'description': 'Kurzfilm', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 71.24, 'timestamp': 1302172322, 'upload_date': '20110407', }, }, { # 16x9 'url': 'http://www.dctp.tv/filme/sind-youtuber-die-besseren-lehrer/', 'only_matching': True, }] _BASE_URL = 'http://dctp-ivms2-restapi.s3.amazonaws.com' def _real_extract(self, url): display_id = self._match_id(url) version = self._download_json( f'{self._BASE_URL}/version.json', display_id, 'Downloading version JSON') restapi_base = '{}/{}/restapi'.format( self._BASE_URL, version['version_name']) info = self._download_json( f'{restapi_base}/slugs/{display_id}.json', display_id, 'Downloading video info JSON') media = self._download_json( '{}/media/{}.json'.format(restapi_base, str(info['object_id'])), display_id, 'Downloading media JSON') uuid = media['uuid'] title = media['title'] is_wide = media.get('is_wide') formats = [] def add_formats(suffix): templ = f'https://%s/{uuid}_dctp_{suffix}.m4v' formats.extend([{ 'format_id': 'hls-' + suffix, 'url': templ % 'cdn-segments.dctp.tv' + '/playlist.m3u8', 'protocol': 'm3u8_native', }, { 'format_id': 's3-' + suffix, 'url': templ % 'completed-media.s3.amazonaws.com', }, { 'format_id': 'http-' + suffix, 'url': templ % 'cdn-media.dctp.tv', }]) add_formats('0500_' + ('16x9' if is_wide else '4x3')) if is_wide: add_formats('720p') thumbnails = [] images = media.get('images') if isinstance(images, list): for image in images: if not isinstance(image, dict): continue image_url = url_or_none(image.get('url')) if not image_url: continue thumbnails.append({ 'url': image_url, 'width': int_or_none(image.get('width')), 'height': int_or_none(image.get('height')), }) return { 'id': uuid, 'display_id': display_id, 'title': title, 'alt_title': media.get('subtitle'), 'description': media.get('description') or media.get('teaser'), 'timestamp': unified_timestamp(media.get('created')), 'duration': float_or_none(media.get('duration_in_ms'), scale=1000), 'thumbnails': thumbnails, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vidyard.py
yt_dlp/extractor/vidyard.py
import re from .common import InfoExtractor from ..utils import ( extract_attributes, float_or_none, int_or_none, join_nonempty, mimetype2ext, parse_resolution, str_or_none, unescapeHTML, url_or_none, ) from ..utils.traversal import traverse_obj class VidyardBaseIE(InfoExtractor): _HEADERS = {'Referer': 'https://play.vidyard.com/'} def _get_formats_and_subtitles(self, sources, video_id): formats, subtitles = [], {} def add_hls_fmts_and_subs(m3u8_url): fmts, subs = self._extract_m3u8_formats_and_subtitles( m3u8_url, video_id, 'mp4', m3u8_id='hls', headers=self._HEADERS, fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) hls_list = isinstance(sources, dict) and sources.pop('hls', None) if master_m3u8_url := traverse_obj( hls_list, (lambda _, v: v['profile'] == 'auto', 'url', {url_or_none}, any)): add_hls_fmts_and_subs(master_m3u8_url) if not formats: # These are duplicate and unnecesary requests if we got 'auto' hls fmts for variant_m3u8_url in traverse_obj(hls_list, (..., 'url', {url_or_none})): add_hls_fmts_and_subs(variant_m3u8_url) for source_type, source_list in traverse_obj(sources, ({dict.items}, ...)): for source in traverse_obj(source_list, lambda _, v: url_or_none(v['url'])): profile = source.get('profile') formats.append({ 'url': source['url'], 'ext': mimetype2ext(source.get('mimeType'), default=None), 'format_id': join_nonempty('http', source_type, profile), **parse_resolution(profile), }) self._remove_duplicate_formats(formats) return formats, subtitles def _get_direct_subtitles(self, caption_json): subs = {} for caption in traverse_obj(caption_json, lambda _, v: url_or_none(v['vttUrl'])): subs.setdefault(caption.get('language') or 'und', []).append({ 'url': caption['vttUrl'], 'name': caption.get('name'), }) return subs def _get_additional_metadata(self, video_id): additional_metadata = self._download_json( f'https://play.vidyard.com/video/{video_id}', video_id, note='Downloading additional metadata', fatal=False) return traverse_obj(additional_metadata, { 'title': ('name', {str}), 'duration': ('seconds', {int_or_none}), 'thumbnails': ('thumbnailUrl', {'url': {url_or_none}}, all), 'chapters': ('videoSections', lambda _, v: float_or_none(v['milliseconds']) is not None, { 'title': ('title', {str}), 'start_time': ('milliseconds', {float_or_none(scale=1000)}), }), }) def _fetch_video_json(self, video_id): return self._download_json( f'https://play.vidyard.com/player/{video_id}.json', video_id)['payload'] def _process_video_json(self, json_data, video_id): formats, subtitles = self._get_formats_and_subtitles(json_data['sources'], video_id) self._merge_subtitles(self._get_direct_subtitles(json_data.get('captions')), target=subtitles) return { **self._get_additional_metadata(json_data['facadeUuid']), **traverse_obj(json_data, { 'id': ('facadeUuid', {str}), 'display_id': ('videoId', {int}, {str_or_none}), 'title': ('name', {str}), 'description': ('description', {str}, {unescapeHTML}, filter), 'duration': (( ('milliseconds', {float_or_none(scale=1000)}), ('seconds', {int_or_none})), any), 'thumbnails': ('thumbnailUrls', ('small', 'normal'), {'url': {url_or_none}}), 'tags': ('tags', ..., 'name', {str}), }), 'formats': formats, 'subtitles': subtitles, 'http_headers': self._HEADERS, } class VidyardIE(VidyardBaseIE): _VALID_URL = [ r'https?://[\w-]+(?:\.hubs)?\.vidyard\.com/watch/(?P<id>[\w-]+)', r'https?://(?:embed|share)\.vidyard\.com/share/(?P<id>[\w-]+)', r'https?://play\.vidyard\.com/(?:player/)?(?P<id>[\w-]+)', ] _EMBED_REGEX = [r'<iframe[^>]* src=["\'](?P<url>(?:https?:)?//play\.vidyard\.com/[\w-]+)'] _TESTS = [{ 'url': 'https://vyexample03.hubs.vidyard.com/watch/oTDMPlUv--51Th455G5u7Q', 'info_dict': { 'id': 'oTDMPlUv--51Th455G5u7Q', 'display_id': '50347', 'ext': 'mp4', 'title': 'Homepage Video', 'description': 'Look I changed the description.', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/50347/OUPa5LTKV46849sLYngMqQ_small.jpg', 'duration': 99, 'tags': ['these', 'are', 'all', 'tags'], }, }, { 'url': 'https://share.vidyard.com/watch/PaQzDAT1h8JqB8ivEu2j6Y?', 'info_dict': { 'id': 'PaQzDAT1h8JqB8ivEu2j6Y', 'display_id': '9281024', 'ext': 'mp4', 'title': 'Inline Embed', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/spacer.gif', 'duration': 41.186, }, }, { 'url': 'https://share.vidyard.com/watch/wL237MtNgZUHo6e8WPiJbF', 'info_dict': { 'id': 'wL237MtNgZUHo6e8WPiJbF', 'display_id': '25926870', 'ext': 'mp4', 'title': 'Adding & Editing Video Chapters', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/25926870/bvSEZS3dGY7DByQ_bzB57avIZ_hsvhr4_small.jpg', 'duration': 135.46, 'chapters': [{ 'title': 'Adding new chapters', 'start_time': 0, }, { 'title': 'Previewing your video', 'start_time': 74, }, { 'title': 'Editing your chapters', 'start_time': 91, }, { 'title': 'Share a link to a specific chapter', 'start_time': 105, }], }, }, { 'url': 'https://embed.vidyard.com/share/oTDMPlUv--51Th455G5u7Q', 'info_dict': { 'id': 'oTDMPlUv--51Th455G5u7Q', 'display_id': '50347', 'ext': 'mp4', 'title': 'Homepage Video', 'description': 'Look I changed the description.', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/50347/OUPa5LTKV46849sLYngMqQ_small.jpg', 'duration': 99, 'tags': ['these', 'are', 'all', 'tags'], }, }, { # First video from playlist below 'url': 'https://embed.vidyard.com/share/SyStyHtYujcBHe5PkZc5DL', 'info_dict': { 'id': 'SyStyHtYujcBHe5PkZc5DL', 'display_id': '41974005', 'ext': 'mp4', 'title': 'Install Palm Beach Shutters with a Bi-Fold Track System (Video 1 of 6)', 'description': r're:In this video, you will learn the first step.+', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/41974005/IJw7oCaJcF1h7WWu3OVZ8A_small.png', 'duration': 258.666, }, }, { # Playlist 'url': 'https://thelink.hubs.vidyard.com/watch/pwu7pCYWSwAnPxs8nDoFrE', 'info_dict': { 'id': 'pwu7pCYWSwAnPxs8nDoFrE', 'title': 'PLAYLIST - Palm Beach Shutters- Bi-Fold Track System Installation', 'entries': [{ 'id': 'SyStyHtYujcBHe5PkZc5DL', 'display_id': '41974005', 'ext': 'mp4', 'title': 'Install Palm Beach Shutters with a Bi-Fold Track System (Video 1 of 6)', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/41974005/IJw7oCaJcF1h7WWu3OVZ8A_small.png', 'duration': 258.666, }, { 'id': '1Fw4B84jZTXLXWqkE71RiM', 'display_id': '5861113', 'ext': 'mp4', 'title': 'Install Palm Beach Shutters with a Bi-Fold Track System (Video 2 of 6)', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/5861113/29CJ54s5g1_aP38zkKLHew_small.jpg', 'duration': 167.858, }, { 'id': 'DqP3wBvLXSpxrcqpT5kEeo', 'display_id': '41976334', 'ext': 'mp4', 'title': 'Install Palm Beach Shutters with a Bi-Fold Track System (Video 3 of 6)', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/5861090/RwG2VaTylUa6KhSTED1r1Q_small.png', 'duration': 94.229, }, { 'id': 'opfybfxpzQArxqtQYB6oBU', 'display_id': '41976364', 'ext': 'mp4', 'title': 'Install Palm Beach Shutters with a Bi-Fold Track System (Video 4 of 6)', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/5860926/JIOaJR08dM4QgXi_iQ2zGA_small.png', 'duration': 191.467, }, { 'id': 'rWrXvkbTNNaNqD6189HJya', 'display_id': '41976382', 'ext': 'mp4', 'title': 'Install Palm Beach Shutters with a Bi-Fold Track System (Video 5 of 6)', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/5860687/CwHxBv4UudAhOh43FVB4tw_small.png', 'duration': 138.155, }, { 'id': 'eYPTB521MZ9TPEArSethQ5', 'display_id': '41976409', 'ext': 'mp4', 'title': 'Install Palm Beach Shutters with a Bi-Fold Track System (Video 6 of 6)', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/5861425/0y68qlMU4O5VKU7bJ8i_AA_small.png', 'duration': 148.224, }], }, 'playlist_count': 6, }, { # Non hubs.vidyard.com playlist 'url': 'https://salesforce.vidyard.com/watch/d4vqPjs7Q5EzVEis5QT3jd', 'skip': 'URL now 404s. Alternative non hubs.vidyard.com playlist not yet available', 'info_dict': { 'id': 'd4vqPjs7Q5EzVEis5QT3jd', 'title': 'How To: Service Cloud: Import External Content in Lightning Knowledge', 'entries': [{ 'id': 'mcjDpSZir2iSttbvFkx6Rv', 'display_id': '29479036', 'ext': 'mp4', 'title': 'Welcome to this Expert Coaching Series', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/ouyQi9WuwyiOupChUWNmjQ/7170d3485ba602e012df05_small.jpg', 'duration': 38.205, }, { 'id': '84bPYwpg243G6xYEfJdYw9', 'display_id': '21820704', 'ext': 'mp4', 'title': 'Chapter 1 - Title + Agenda', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/HFPN0ZgQq4Ow8BghGcQSow/bfaa30123c8f6601e7d7f2_small.jpg', 'duration': 98.016, }, { 'id': 'nP17fMuvA66buVHUrzqjTi', 'display_id': '21820707', 'ext': 'mp4', 'title': 'Chapter 2 - Import Options', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/rGRIF5nFjPI9OOA2qJ_Dbg/86a8d02bfec9a566845dd4_small.jpg', 'duration': 199.136, }, { 'id': 'm54EcwXdpA5gDBH5rgCYoV', 'display_id': '21820710', 'ext': 'mp4', 'title': 'Chapter 3 - Importing Article Translations', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/IVX4XR8zpSsiNIHx45kz-A/1ccbf8a29a33856d06b3ed_small.jpg', 'duration': 184.352, }, { 'id': 'j4nzS42oq4hE9oRV73w3eQ', 'display_id': '21820716', 'ext': 'mp4', 'title': 'Chapter 4 - Best Practices', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/BtrRrQpRDLbA4AT95YQyog/1f1e6b8e7fdc3fa95ec8d3_small.jpg', 'duration': 296.960, }, { 'id': 'y28PYfW5pftvers9PXzisC', 'display_id': '21820727', 'ext': 'mp4', 'title': 'Chapter 5 - Migration Steps', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/K2CdQOXDfLcrVTF60r0bdw/a09239ada28b6ffce12b1f_small.jpg', 'duration': 620.640, }, { 'id': 'YWU1eQxYvhj29SjYoPw5jH', 'display_id': '21820733', 'ext': 'mp4', 'title': 'Chapter 6 - Demo', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/rsmhP-cO8dAa8ilvFGCX0g/7911ef415167cd14032068_small.jpg', 'duration': 631.456, }, { 'id': 'nmEvVqpwdJUgb74zKsLGxn', 'display_id': '29479037', 'ext': 'mp4', 'title': 'Schedule Your Follow-Up', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/Rtwc7X4PEkF4Ae5kHi-Jvw/174ebed3f34227b1ffa1d0_small.jpg', 'duration': 33.608, }], }, 'playlist_count': 8, }, { # URL of iframe embed src 'url': 'https://play.vidyard.com/iDqTwWGrd36vaLuaCY3nTs.html', 'info_dict': { 'id': 'iDqTwWGrd36vaLuaCY3nTs', 'display_id': '9281009', 'ext': 'mp4', 'title': 'Lightbox Embed', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/spacer.gif', 'duration': 39.035, }, }, { # Player JSON URL 'url': 'https://play.vidyard.com/player/7GAApnNNbcZZ46k6JqJQSh.json?disable_analytics=0', 'info_dict': { 'id': '7GAApnNNbcZZ46k6JqJQSh', 'display_id': '820026', 'ext': 'mp4', 'title': 'The Art of Storytelling: How to Deliver Your Brand Story with Content & Social', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/MhbE-5sEFQu4x3fI6FkNlA/41eb5717c557cd19456910_small.jpg', 'duration': 2153.013, 'tags': ['Summit2017'], }, }, { 'url': 'http://share.vidyard.com/share/diYeo6YR2yiGgL8odvS8Ri', 'only_matching': True, }, { 'url': 'https://play.vidyard.com/FFlz3ZpxhIfKQ1fd9DAryA', 'only_matching': True, }, { 'url': 'https://play.vidyard.com/qhMAu5A76GZVrFzOPgSf9A/type/standalone', 'only_matching': True, }] _WEBPAGE_TESTS = [{ # URL containing inline/lightbox embedded video 'url': 'https://resources.altium.com/p/2-the-extreme-importance-of-pc-board-stack-up', 'info_dict': { 'id': 'GDx1oXrFWj4XHbipfoXaMn', 'display_id': '3225198', 'ext': 'mp4', 'title': 'The Extreme Importance of PC Board Stack Up', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/73_Q3_hBexWX7Og1sae6cg/9998fa4faec921439e2c04_small.jpg', 'duration': 3422.742, }, }, { # <script ... id="vidyard_embed_code_DXx2sW4WaLA6hTdGFz7ja8" src="//play.vidyard.com/DXx2sW4WaLA6hTdGFz7ja8.js? 'url': 'http://videos.vivint.com/watch/DXx2sW4WaLA6hTdGFz7ja8', 'skip': 'URL certificate expired 2025-09-10. Alternative script embed test case not yet available', 'info_dict': { 'id': 'DXx2sW4WaLA6hTdGFz7ja8', 'display_id': '2746529', 'ext': 'mp4', 'title': 'How To Powercycle the Smart Hub Panel', 'duration': 30.613, 'thumbnail': 'https://cdn.vidyard.com/thumbnails/_-6cw8xQUJ3qiCs_JENc_A/b21d7a5e47967f49399d30_small.jpg', }, }, { # <script id="vidyard_embed_code_MIBHhiLVTxga7wqLsuoDjQ" src="//embed.vidyard.com/embed/MIBHhiLVTxga7wqLsuoDjQ/inline?v=2.1"> 'url': 'https://www.babypips.com/learn/forex/introduction-to-metatrader4', 'info_dict': { 'id': 'MIBHhiLVTxga7wqLsuoDjQ', 'display_id': '20291', 'ext': 'mp4', 'title': 'Lesson 1 - Opening an MT4 Account', 'description': 'Never heard of MetaTrader4? Here\'s the 411 on the popular trading platform!', 'duration': 168.16, 'thumbnail': 'https://cdn.vidyard.com/thumbnails/20291/IM-G2WXQR9VBLl2Cmzvftg_small.jpg', }, }, { # <iframe ... src="//play.vidyard.com/d61w8EQoZv1LDuPxDkQP2Q/type/background?preview=1" 'skip': 'URL changed embed method to \'class="vidyard-player-embed"\'. An alternative iframe embed test case is not yet available', 'url': 'https://www.avaya.com/en/', 'info_dict': { # These values come from the generic extractor and don't matter 'id': str, 'title': str, 'age_limit': 0, 'upload_date': str, 'description': str, 'thumbnail': str, 'timestamp': float, }, 'playlist': [{ 'info_dict': { 'id': 'd61w8EQoZv1LDuPxDkQP2Q', 'display_id': '42456529', 'ext': 'mp4', 'title': 'GettyImages-1027', 'duration': 6.0, 'thumbnail': 'https://cdn.vidyard.com/thumbnails/42061563/p6bY08d2N4e4IDz-7J4_wkgsPq3-qgcx_small.jpg', }, }, { 'info_dict': { 'id': 'VAsYDi7eiqZRbHodUA2meC', 'display_id': '42456569', 'ext': 'mp4', 'title': 'GettyImages-1325598833', 'duration': 6.083, 'thumbnail': 'https://cdn.vidyard.com/thumbnails/42052358/y3qrbDpn_2quWr_5XBi7yzS3UvEI__ZM_small.jpg', }, }], 'playlist_count': 2, }, { # <div class="vidyard-player-embed" data-uuid="pMk8eNCYzukzJaEPoo1Hgn" # URL previously used iframe embeds and was used for that test case 'url': 'https://www.avaya.com/en/', 'info_dict': { 'id': 'pMk8eNCYzukzJaEPoo1Hgn', 'display_id': '47074153', 'ext': 'mp4', 'title': 'Avaya Infinity Helps Redefine the Contact Center as Your Connection Center', 'description': r're:Our mission is to help you turn single engagements.+', 'duration': 81.55, 'thumbnail': 'https://cdn.vidyard.com/thumbnails/47074153/MZOLKhXdbiUWwp2ROnT5HaXL0oau6JtR_small.jpg', }, }] @classmethod def _extract_embed_urls(cls, url, webpage): # Handle protocol-less embed URLs for embed_url in super()._extract_embed_urls(url, webpage): if embed_url.startswith('//'): embed_url = f'https:{embed_url}' yield embed_url # Extract inline/lightbox embeds for embed_element in re.findall( r'(<(?:img|div)[^>]* class=(["\'])(?:[^>"\']* )?vidyard-player-embed(?: [^>"\']*)?\2[^>]+>)', webpage): if video_id := extract_attributes(embed_element[0]).get('data-uuid'): yield f'https://play.vidyard.com/{video_id}' for embed_id in re.findall(r'<script[^>]* id=["\']vidyard_embed_code_([\w-]+)["\']', webpage): yield f'https://play.vidyard.com/{embed_id}' def _real_extract(self, url): video_id = self._match_id(url) video_json = self._fetch_video_json(video_id) if len(video_json['chapters']) == 1: return self._process_video_json(video_json['chapters'][0], video_id) return self.playlist_result( (self._process_video_json(chapter, video_id) for chapter in video_json['chapters']), str(video_json['playerUuid']), video_json.get('name'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vodplatform.py
yt_dlp/extractor/vodplatform.py
from .common import InfoExtractor from ..utils import unescapeHTML class VODPlatformIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www\.)?vod-platform\.net|embed\.kwikmotion\.com)/[eE]mbed/(?P<id>[^/?#]+)' _EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:(?:www\.)?vod-platform\.net|embed\.kwikmotion\.com)/[eE]mbed/.+?)\1'] _TESTS = [{ # from http://www.lbcgroup.tv/watch/chapter/29143/52844/%D8%A7%D9%84%D9%86%D8%B5%D8%B1%D8%A9-%D9%81%D9%8A-%D8%B6%D9%8A%D8%A7%D9%81%D8%A9-%D8%A7%D9%84%D9%80-cnn/ar 'url': 'http://vod-platform.net/embed/RufMcytHDolTH1MuKHY9Fw', 'md5': '1db2b7249ce383d6be96499006e951fc', 'info_dict': { 'id': 'RufMcytHDolTH1MuKHY9Fw', 'ext': 'mp4', 'title': 'LBCi News_ النصرة في ضيافة الـ "سي.أن.أن"', }, }, { 'url': 'http://embed.kwikmotion.com/embed/RufMcytHDolTH1MuKHY9Fw', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = unescapeHTML(self._og_search_title(webpage)) hidden_inputs = self._hidden_inputs(webpage) formats = self._extract_wowza_formats( hidden_inputs.get('HiddenmyhHlsLink') or hidden_inputs['HiddenmyDashLink'], video_id, skip_protocols=['f4m', 'smil']) return { 'id': video_id, 'title': title, 'thumbnail': hidden_inputs.get('HiddenThumbnail') or self._og_search_thumbnail(webpage), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vimm.py
yt_dlp/extractor/vimm.py
from .common import InfoExtractor class VimmIE(InfoExtractor): IE_NAME = 'Vimm:stream' _VALID_URL = r'https?://(?:www\.)?vimm\.tv/(?:c/)?(?P<id>[0-9a-z-]+)$' _TESTS = [{ 'url': 'https://www.vimm.tv/c/calimeatwagon', 'info_dict': { 'id': 'calimeatwagon', 'ext': 'mp4', 'title': 're:^calimeatwagon [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'live_status': 'is_live', }, 'skip': 'Live', }, { 'url': 'https://www.vimm.tv/octaafradio', 'only_matching': True, }] def _real_extract(self, url): channel_id = self._match_id(url) formats, subs = self._extract_m3u8_formats_and_subtitles( f'https://www.vimm.tv/hls/{channel_id}.m3u8', channel_id, 'mp4', m3u8_id='hls', live=True) return { 'id': channel_id, 'title': channel_id, 'is_live': True, 'formats': formats, 'subtitles': subs, } class VimmRecordingIE(InfoExtractor): IE_NAME = 'Vimm:recording' _VALID_URL = r'https?://(?:www\.)?vimm\.tv/c/(?P<channel_id>[0-9a-z-]+)\?v=(?P<video_id>[0-9A-Za-z]+)' _TESTS = [{ 'url': 'https://www.vimm.tv/c/kaldewei?v=2JZsrPTFxsSz', 'md5': '15122ee95baa32a548e4a3e120b598f1', 'info_dict': { 'id': '2JZsrPTFxsSz', 'ext': 'mp4', 'title': 'VIMM - [DE/GER] Kaldewei Live - In Farbe und Bunt', 'uploader_id': 'kaldewei', }, }] def _real_extract(self, url): channel_id, video_id = self._match_valid_url(url).groups() webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage) formats, subs = self._extract_m3u8_formats_and_subtitles( f'https://d211qfrkztakg3.cloudfront.net/{channel_id}/{video_id}/index.m3u8', video_id, 'mp4', m3u8_id='hls', live=False) return { 'id': video_id, 'title': title, 'is_live': False, 'uploader_id': channel_id, 'formats': formats, 'subtitles': subs, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/__init__.py
yt_dlp/extractor/__init__.py
from ..compat.compat_utils import passthrough_module from ..globals import extractors as _extractors_context from ..globals import plugin_ies as _plugin_ies_context from ..plugins import PluginSpec, register_plugin_spec passthrough_module(__name__, '.extractors') del passthrough_module register_plugin_spec(PluginSpec( module_name='extractor', suffix='IE', destination=_extractors_context, plugin_destination=_plugin_ies_context, )) def gen_extractor_classes(): """ Return a list of supported extractors. The order does matter; the first extractor matched is the one handling the URL. """ import_extractors() return list(_extractors_context.value.values()) def gen_extractors(): """ Return a list of an instance of every supported extractor. The order does matter; the first extractor matched is the one handling the URL. """ return [klass() for klass in gen_extractor_classes()] def list_extractor_classes(age_limit=None): """Return a list of extractors that are suitable for the given age, sorted by extractor name""" from .generic import GenericIE yield from sorted(filter( lambda ie: ie.is_suitable(age_limit) and ie != GenericIE, gen_extractor_classes()), key=lambda ie: ie.IE_NAME.lower()) yield GenericIE def list_extractors(age_limit=None): """Return a list of extractor instances that are suitable for the given age, sorted by extractor name""" return [ie() for ie in list_extractor_classes(age_limit)] def get_info_extractor(ie_name): """Returns the info extractor class with the given ie_name""" import_extractors() return _extractors_context.value[f'{ie_name}IE'] def import_extractors(): from . import extractors # noqa: F401
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hotstar.py
yt_dlp/extractor/hotstar.py
import functools import hashlib import hmac import json import re import time import uuid from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, OnDemandPagedList, determine_ext, filter_dict, int_or_none, join_nonempty, jwt_decode_hs256, parse_iso8601, str_or_none, url_or_none, ) from ..utils.traversal import require, traverse_obj class HotStarBaseIE(InfoExtractor): _TOKEN_NAME = 'userUP' _BASE_URL = 'https://www.hotstar.com' _API_URL = 'https://api.hotstar.com' _API_URL_V2 = 'https://www.hotstar.com/api/internal/bff/v2' _AKAMAI_ENCRYPTION_KEY = b'\x05\xfc\x1a\x01\xca\xc9\x4b\xc4\x12\xfc\x53\x12\x07\x75\xf9\xee' _FREE_HEADERS = { 'user-agent': 'Hotstar;in.startv.hotstar/25.06.30.0.11580 (Android/12)', 'x-hs-client': 'platform:android;app_id:in.startv.hotstar;app_version:25.06.30.0;os:Android;os_version:12;schema_version:0.0.1523', 'x-hs-platform': 'android', } _SUB_HEADERS = { 'user-agent': 'Disney+;in.startv.hotstar.dplus.tv/23.08.14.4.2915 (Android/13)', 'x-hs-client': 'platform:androidtv;app_id:in.startv.hotstar.dplus.tv;app_version:23.08.14.4;os:Android;os_version:13;schema_version:0.0.970', 'x-hs-platform': 'androidtv', } def _has_active_subscription(self, cookies, server_time): server_time = int_or_none(server_time) or int(time.time()) expiry = traverse_obj(cookies, ( self._TOKEN_NAME, 'value', {jwt_decode_hs256}, 'sub', {json.loads}, 'subscriptions', 'in', ..., 'expiry', {parse_iso8601}, all, {max})) or 0 return expiry > server_time def _call_api_v1(self, path, *args, **kwargs): return self._download_json( f'{self._API_URL}/o/v1/{path}', *args, **kwargs, headers={'x-country-code': 'IN', 'x-platform-code': 'PCTV'}) def _call_api_impl(self, path, video_id, query, cookies=None, st=None): st = int_or_none(st) or int(time.time()) exp = st + 6000 auth = f'st={st}~exp={exp}~acl=/*' auth += '~hmac=' + hmac.new(self._AKAMAI_ENCRYPTION_KEY, auth.encode(), hashlib.sha256).hexdigest() response = self._download_json( f'{self._API_URL_V2}/{path}', video_id, query=query, headers=filter_dict({ **(self._SUB_HEADERS if self._has_active_subscription(cookies, st) else self._FREE_HEADERS), 'hotstarauth': auth, 'x-hs-usertoken': traverse_obj(cookies, (self._TOKEN_NAME, 'value')), 'x-hs-device-id': traverse_obj(cookies, ('deviceId', 'value')) or str(uuid.uuid4()), 'content-type': 'application/json', })) if not traverse_obj(response, ('success', {dict})): raise ExtractorError('API call was unsuccessful') return response['success'] def _call_api_v2(self, path, video_id, content_type, cookies=None, st=None): return self._call_api_impl(f'{path}', video_id, query={ 'content_id': video_id, 'filters': f'content_type={content_type}', 'client_capabilities': json.dumps({ 'package': ['dash', 'hls'], 'container': ['fmp4', 'fmp4br', 'ts'], 'ads': ['non_ssai', 'ssai'], 'audio_channel': ['stereo', 'dolby51', 'atmos'], 'encryption': ['plain', 'widevine'], # wv only so we can raise appropriate error 'video_codec': ['h264', 'h265'], 'video_codec_non_secure': ['h264', 'h265', 'vp9'], 'ladder': ['phone', 'tv', 'full'], 'resolution': ['hd', '4k'], 'true_resolution': ['hd', '4k'], 'dynamic_range': ['sdr', 'hdr'], }, separators=(',', ':')), 'drm_parameters': json.dumps({ 'widevine_security_level': ['SW_SECURE_DECODE', 'SW_SECURE_CRYPTO'], 'hdcp_version': ['HDCP_V2_2', 'HDCP_V2_1', 'HDCP_V2', 'HDCP_V1'], }, separators=(',', ':')), }, cookies=cookies, st=st) @staticmethod def _parse_metadata_v1(video_data): return traverse_obj(video_data, { 'id': ('contentId', {str}), 'title': ('title', {str}), 'description': ('description', {str}), 'duration': ('duration', {int_or_none}), 'timestamp': (('broadcastDate', 'startDate'), {int_or_none}, any), 'release_year': ('year', {int_or_none}), 'channel': ('channelName', {str}), 'channel_id': ('channelId', {int}, {str_or_none}), 'series': ('showName', {str}), 'season': ('seasonName', {str}), 'season_number': ('seasonNo', {int_or_none}), 'season_id': ('seasonId', {int}, {str_or_none}), 'episode': ('title', {str}), 'episode_number': ('episodeNo', {int_or_none}), }) def _fetch_page(self, path, item_id, name, query, root, page): results = self._call_api_v1( path, item_id, note=f'Downloading {name} page {page + 1} JSON', query={ **query, 'tao': page * self._PAGE_SIZE, 'tas': self._PAGE_SIZE, })['body']['results'] for video in traverse_obj(results, (('assets', None), 'items', lambda _, v: v['contentId'])): yield self.url_result( HotStarIE._video_url(video['contentId'], root=root), HotStarIE, **self._parse_metadata_v1(video)) class HotStarIE(HotStarBaseIE): IE_NAME = 'hotstar' IE_DESC = 'JioHotstar' _VALID_URL = r'''(?x) https?://(?:www\.)?hotstar\.com(?:/in)?/(?!in/) (?: (?P<type>movies|sports|clips|episode|(?P<tv>tv|shows))/ (?(tv)(?:[^/?#]+/){2}|[^?#]*) )? [^/?#]+/ (?P<id>\d{10}) ''' _TESTS = [{ 'url': 'https://www.hotstar.com/can-you-not-spread-rumours/1000076273', 'info_dict': { 'id': '1000076273', 'ext': 'mp4', 'title': 'Can You Not Spread Rumours?', 'description': 'md5:c957d8868e9bc793ccb813691cc4c434', 'timestamp': 1447248600, 'upload_date': '20151111', 'duration': 381, 'episode': 'Can You Not Spread Rumours?', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.hotstar.com/tv/ek-bhram-sarvagun-sampanna/s-2116/janhvi-targets-suman/1000234847', 'info_dict': { 'id': '1000234847', 'ext': 'mp4', 'title': 'Janhvi Targets Suman', 'description': 'md5:78a85509348910bd1ca31be898c5796b', 'timestamp': 1556670600, 'upload_date': '20190501', 'duration': 1219, 'channel': 'StarPlus', 'channel_id': '821', 'series': 'Ek Bhram - Sarvagun Sampanna', 'season': 'Chapter 1', 'season_number': 1, 'season_id': '1260004607', 'episode': 'Janhvi Targets Suman', 'episode_number': 8, }, 'params': {'skip_download': 'm3u8'}, }, { # Metadata call gets HTTP Error 504 with tas=10000 'url': 'https://www.hotstar.com/in/shows/anupama/1260022017/anupama-anuj-share-a-moment/1000282843', 'info_dict': { 'id': '1000282843', 'ext': 'mp4', 'title': 'Anupama, Anuj Share a Moment', 'season': 'Chapter 1', 'description': 'md5:8d74ed2248423b8b06d5c8add4d7a0c0', 'timestamp': 1678149000, 'channel': 'StarPlus', 'series': 'Anupama', 'season_number': 1, 'season_id': '1260022018', 'upload_date': '20230307', 'episode': 'Anupama, Anuj Share a Moment', 'episode_number': 853, 'duration': 1266, 'channel_id': '821', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.hotstar.com/in/shows/kana-kaanum-kaalangal/1260097087/back-to-school/1260097320', 'info_dict': { 'id': '1260097320', 'ext': 'mp4', 'title': 'Back To School', 'season': 'Chapter 1', 'description': 'md5:b0d6a4c8a650681491e7405496fc7e13', 'timestamp': 1650564000, 'channel': 'Hotstar Specials', 'series': 'Kana Kaanum Kaalangal', 'season_number': 1, 'season_id': '1260097089', 'upload_date': '20220421', 'episode': 'Back To School', 'episode_number': 1, 'duration': 1810, 'channel_id': '1260003991', }, 'params': {'skip_download': 'm3u8'}, }, { # Metadata call gets HTTP Error 504 with tas=10000 'url': 'https://www.hotstar.com/in/clips/e3-sairat-kahani-pyaar-ki/1000262286', 'info_dict': { 'id': '1000262286', 'ext': 'mp4', 'title': 'E3 - SaiRat, Kahani Pyaar Ki', 'description': 'md5:e3b4b3203bc0c5396fe7d0e4948a6385', 'episode': 'E3 - SaiRat, Kahani Pyaar Ki', 'upload_date': '20210606', 'timestamp': 1622943900, 'duration': 5395, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.hotstar.com/in/movies/premam/1000091195', 'info_dict': { 'id': '1000091195', 'ext': 'mp4', 'title': 'Premam', 'release_year': 2015, 'description': 'md5:096cd8aaae8dab56524823dc19dfa9f7', 'timestamp': 1462149000, 'upload_date': '20160502', 'episode': 'Premam', 'duration': 8994, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.hotstar.com/movies/radha-gopalam/1000057157', 'only_matching': True, }, { 'url': 'https://www.hotstar.com/in/sports/cricket/follow-the-blues-2021/recap-eng-fight-back-on-day-2/1260066104', 'only_matching': True, }, { 'url': 'https://www.hotstar.com/in/sports/football/most-costly-pl-transfers-ft-grealish/1260065956', 'only_matching': True, }] _GEO_BYPASS = False _TYPE = { 'movies': 'movie', 'sports': 'match', 'episode': 'episode', 'tv': 'episode', 'shows': 'episode', 'clips': 'content', None: 'content', } _CONTENT_TYPE = { 'movie': 'MOVIE', 'episode': 'EPISODE', 'match': 'SPORT', 'content': 'CLIPS', } _IGNORE_MAP = { 'res': 'resolution', 'vcodec': 'video_codec', 'dr': 'dynamic_range', } _TAG_FIELDS = { 'language': 'language', 'acodec': 'audio_codec', 'vcodec': 'video_codec', } @classmethod def _video_url(cls, video_id, video_type=None, *, slug='ignore_me', root=None): assert None in (video_type, root) if not root: root = join_nonempty(cls._BASE_URL, video_type, delim='/') return f'{root}/{slug}/{video_id}' def _real_extract(self, url): video_id, video_type = self._match_valid_url(url).group('id', 'type') video_type = self._TYPE[video_type] cookies = self._get_cookies(url) # Cookies before any request if not cookies or not cookies.get(self._TOKEN_NAME): self.raise_login_required() video_data = traverse_obj( self._call_api_v1(f'{video_type}/detail', video_id, fatal=False, query={ 'tas': 5, # See https://github.com/yt-dlp/yt-dlp/issues/7946 'contentId': video_id, }), ('body', 'results', 'item', {dict})) or {} if video_data.get('drmProtected'): self.report_drm(video_id) geo_restricted = False formats, subs, has_drm = [], {}, False headers = {'Referer': f'{self._BASE_URL}/in'} content_type = traverse_obj(video_data, ('contentType', {str})) or self._CONTENT_TYPE[video_type] # See https://github.com/yt-dlp/yt-dlp/issues/396 st = self._request_webpage( f'{self._BASE_URL}/in', video_id, 'Fetching server time').get_header('x-origin-date') watch = self._call_api_v2('pages/watch', video_id, content_type, cookies, st) player_config = traverse_obj(watch, ( 'page', 'spaces', 'player', 'widget_wrappers', lambda _, v: v['template'] == 'PlayerWidget', 'widget', 'data', 'player_config', {dict}, any, {require('player config')})) for playback_set in traverse_obj(player_config, ( ('media_asset', 'media_asset_v2'), ('primary', 'fallback'), all, lambda _, v: url_or_none(v['content_url']), )): tags = str_or_none(playback_set.get('playback_tags')) or '' if any(f'{prefix}:{ignore}' in tags for key, prefix in self._IGNORE_MAP.items() for ignore in self._configuration_arg(key)): continue tag_dict = dict((*t.split(':', 1), None)[:2] for t in tags.split(';')) if tag_dict.get('encryption') not in ('plain', None): has_drm = True continue format_url = re.sub(r'(?<=//staragvod)(\d)', r'web\1', playback_set['content_url']) ext = determine_ext(format_url) current_formats, current_subs = [], {} try: if 'package:hls' in tags or ext == 'm3u8': current_formats, current_subs = self._extract_m3u8_formats_and_subtitles( format_url, video_id, ext='mp4', headers=headers) elif 'package:dash' in tags or ext == 'mpd': current_formats, current_subs = self._extract_mpd_formats_and_subtitles( format_url, video_id, headers=headers) elif ext == 'f4m': pass # XXX: produce broken files else: current_formats = [{ 'url': format_url, 'width': int_or_none(playback_set.get('width')), 'height': int_or_none(playback_set.get('height')), }] except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status in (403, 474): geo_restricted = True else: self.write_debug(e) continue for f in current_formats: for k, v in self._TAG_FIELDS.items(): if not f.get(k): f[k] = tag_dict.get(v) if f.get('vcodec') != 'none' and not f.get('dynamic_range'): f['dynamic_range'] = tag_dict.get('dynamic_range') if f.get('acodec') != 'none' and not f.get('audio_channels'): f['audio_channels'] = { 'stereo': 2, 'dolby51': 6, }.get(tag_dict.get('audio_channel')) if ( 'Audio_Description' in f['format_id'] or 'Audio Description' in (f.get('format_note') or '') ): f['source_preference'] = -99 + (f.get('source_preference') or -1) f['format_note'] = join_nonempty( tag_dict.get('ladder'), tag_dict.get('audio_channel') if f.get('acodec') != 'none' else None, f.get('format_note'), delim=', ') formats.extend(current_formats) subs = self._merge_subtitles(subs, current_subs) if not formats: if geo_restricted: self.raise_geo_restricted(countries=['IN'], metadata_available=True) elif has_drm: self.report_drm(video_id) elif not self._has_active_subscription(cookies, st): self.raise_no_formats('Your account does not have access to this content', expected=True) self._remove_duplicate_formats(formats) for f in formats: f.setdefault('http_headers', {}).update(headers) return { **self._parse_metadata_v1(video_data), 'id': video_id, 'formats': formats, 'subtitles': subs, } class HotStarPrefixIE(InfoExtractor): """ The "hotstar:" prefix is no longer in use, but this is kept for backward compatibility """ IE_DESC = False _VALID_URL = r'hotstar:(?:(?P<type>\w+):)?(?P<id>\d+)$' _TESTS = [{ 'url': 'hotstar:1000076273', 'only_matching': True, }, { 'url': 'hotstar:movies:1260009879', 'info_dict': { 'id': '1260009879', 'ext': 'mp4', 'title': 'Nuvvu Naaku Nachav', 'description': 'md5:d43701b1314e6f8233ce33523c043b7d', 'timestamp': 1567525674, 'upload_date': '20190903', 'duration': 10787, 'episode': 'Nuvvu Naaku Nachav', }, }, { 'url': 'hotstar:episode:1000234847', 'only_matching': True, }, { # contentData 'url': 'hotstar:sports:1260065956', 'only_matching': True, }, { # contentData 'url': 'hotstar:sports:1260066104', 'only_matching': True, }] def _real_extract(self, url): video_id, video_type = self._match_valid_url(url).group('id', 'type') return self.url_result(HotStarIE._video_url(video_id, video_type), HotStarIE, video_id) class HotStarSeriesIE(HotStarBaseIE): IE_NAME = 'hotstar:series' _VALID_URL = r'(?P<url>https?://(?:www\.)?hotstar\.com(?:/in)?/(?:tv|shows)/[^/]+/(?P<id>\d+))/?(?:[#?]|$)' _TESTS = [{ 'url': 'https://www.hotstar.com/in/tv/radhakrishn/1260000646', 'info_dict': { 'id': '1260000646', }, 'playlist_mincount': 690, }, { 'url': 'https://www.hotstar.com/tv/dancee-/1260050431', 'info_dict': { 'id': '1260050431', }, 'playlist_mincount': 42, }, { 'url': 'https://www.hotstar.com/in/tv/mahabharat/435/', 'info_dict': { 'id': '435', }, 'playlist_mincount': 267, }, { # HTTP Error 504 with tas=10000 (possibly because total size is over 1000 items?) 'url': 'https://www.hotstar.com/in/shows/anupama/1260022017/', 'info_dict': { 'id': '1260022017', }, 'playlist_mincount': 1601, }] _PAGE_SIZE = 100 def _real_extract(self, url): url, series_id = self._match_valid_url(url).group('url', 'id') eid = self._call_api_v1( 'show/detail', series_id, query={'contentId': series_id})['body']['results']['item']['id'] entries = OnDemandPagedList(functools.partial( self._fetch_page, 'tray/g/1/items', series_id, 'series', {'etid': 0, 'eid': eid}, url), self._PAGE_SIZE) return self.playlist_result(entries, series_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dvtv.py
yt_dlp/extractor/dvtv.py
import re from .common import InfoExtractor from ..utils import ( ExtractorError, determine_ext, int_or_none, join_nonempty, js_to_json, mimetype2ext, parse_iso8601, try_get, unescapeHTML, ) class DVTVIE(InfoExtractor): IE_NAME = 'dvtv' IE_DESC = 'http://video.aktualne.cz/' _VALID_URL = r'https?://video\.aktualne\.cz/(?:[^/]+/)+r~(?P<id>[0-9a-f]{32})' _TESTS = [{ 'url': 'http://video.aktualne.cz/dvtv/vondra-o-ceskem-stoleti-pri-pohledu-na-havla-mi-bylo-trapne/r~e5efe9ca855511e4833a0025900fea04/', 'md5': '67cb83e4a955d36e1b5d31993134a0c2', 'info_dict': { 'id': 'dc0768de855511e49e4b0025900fea04', 'ext': 'mp4', 'title': 'Vondra o Českém století: Při pohledu na Havla mi bylo trapně', 'duration': 1484, 'upload_date': '20141217', 'timestamp': 1418792400, }, }, { 'url': 'http://video.aktualne.cz/dvtv/dvtv-16-12-2014-utok-talibanu-boj-o-kliniku-uprchlici/r~973eb3bc854e11e498be002590604f2e/', 'info_dict': { 'title': r'DVTV 16. 12. 2014: útok Talibanu, boj o kliniku, uprchlíci', 'id': '973eb3bc854e11e498be002590604f2e', }, 'playlist': [{ 'md5': 'da7ca6be4935532241fa9520b3ad91e4', 'info_dict': { 'id': 'b0b40906854d11e4bdad0025900fea04', 'ext': 'mp4', 'title': 'Drtinová Veselovský TV 16. 12. 2014: Témata dne', 'description': 'md5:0916925dea8e30fe84222582280b47a0', 'timestamp': 1418760010, 'upload_date': '20141216', }, }, { 'md5': '5f7652a08b05009c1292317b449ffea2', 'info_dict': { 'id': '420ad9ec854a11e4bdad0025900fea04', 'ext': 'mp4', 'title': 'Školní masakr možná změní boj s Talibanem, říká novinářka', 'description': 'md5:ff2f9f6de73c73d7cef4f756c1c1af42', 'timestamp': 1418760010, 'upload_date': '20141216', }, }, { 'md5': '498eb9dfa97169f409126c617e2a3d64', 'info_dict': { 'id': '95d35580846a11e4b6d20025900fea04', 'ext': 'mp4', 'title': 'Boj o kliniku: Veřejný zájem, nebo právo na majetek?', 'description': 'md5:889fe610a70fee5511dc3326a089188e', 'timestamp': 1418760010, 'upload_date': '20141216', }, }, { 'md5': 'b8dc6b744844032dab6ba3781a7274b9', 'info_dict': { 'id': '6fe14d66853511e4833a0025900fea04', 'ext': 'mp4', 'title': 'Pánek: Odmítání syrských uprchlíků je ostudou české vlády', 'description': 'md5:544f86de6d20c4815bea11bf2ac3004f', 'timestamp': 1418760010, 'upload_date': '20141216', }, }], }, { 'url': 'https://video.aktualne.cz/dvtv/zeman-si-jen-leci-mindraky-sobotku-nenavidi-a-babis-se-mu-te/r~960cdb3a365a11e7a83b0025900fea04/', 'md5': 'f8efe9656017da948369aa099788c8ea', 'info_dict': { 'id': '3c496fec365911e7a6500025900fea04', 'ext': 'mp4', 'title': 'Zeman si jen léčí mindráky, Sobotku nenávidí a Babiš se mu teď hodí, tvrdí Kmenta', 'duration': 1103, 'upload_date': '20170511', 'timestamp': 1494514200, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://video.aktualne.cz/v-cechach-poprve-zazni-zelenkova-zrestaurovana-mse/r~45b4b00483ec11e4883b002590604f2e/', 'only_matching': True, }, { # Test live stream video (liveStarter) parsing 'url': 'https://video.aktualne.cz/dvtv/zive-mistryne-sveta-eva-samkova-po-navratu-ze-sampionatu/r~182654c2288811e990fd0cc47ab5f122/', 'md5': '2e552e483f2414851ca50467054f9d5d', 'info_dict': { 'id': '8d116360288011e98c840cc47ab5f122', 'ext': 'mp4', 'title': 'Živě: Mistryně světa Eva Samková po návratu ze šampionátu', 'upload_date': '20190204', 'timestamp': 1549289591, }, 'params': { # Video content is no longer available 'skip_download': True, }, }] def _parse_video_metadata(self, js, video_id, timestamp): data = self._parse_json(js, video_id, transform_source=js_to_json) title = unescapeHTML(data['title']) live_starter = try_get(data, lambda x: x['plugins']['liveStarter'], dict) if live_starter: data.update(live_starter) formats = [] for tracks in data.get('tracks', {}).values(): for video in tracks: video_url = video.get('src') if not video_url: continue video_type = video.get('type') ext = determine_ext(video_url, mimetype2ext(video_type)) if video_type == 'application/vnd.apple.mpegurl' or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif video_type == 'application/dash+xml' or ext == 'mpd': formats.extend(self._extract_mpd_formats( video_url, video_id, mpd_id='dash', fatal=False)) else: label = video.get('label') height = self._search_regex( r'^(\d+)[pP]', label or '', 'height', default=None) formats.append({ 'url': video_url, 'format_id': join_nonempty('http', ext, label), 'height': int_or_none(height), }) return { 'id': data.get('mediaid') or video_id, 'title': title, 'description': data.get('description'), 'thumbnail': data.get('image'), 'duration': int_or_none(data.get('duration')), 'timestamp': int_or_none(timestamp), 'formats': formats, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) timestamp = parse_iso8601(self._html_search_meta( 'article:published_time', webpage, 'published time', default=None)) items = re.findall(r'(?s)playlist\.push\(({.+?})\);', webpage) if items: return self.playlist_result( (self._parse_video_metadata(i, video_id, timestamp) for i in items), video_id, self._html_search_meta('twitter:title', webpage)) item = self._search_regex( r'(?s)BBXPlayer\.setup\((.+?)\);', webpage, 'video', default=None) if item: # remove function calls (ex. htmldeentitize) # TODO: this should be fixed in a general way in the js_to_json item = re.sub(r'\w+?\((.+)\)', r'\1', item) return self._parse_video_metadata(item, video_id, timestamp) raise ExtractorError('Could not find neither video nor playlist')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/livejournal.py
yt_dlp/extractor/livejournal.py
from .common import InfoExtractor from ..utils import int_or_none class LiveJournalIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:[^.]+\.)?livejournal\.com/video/album/\d+.+?\bid=(?P<id>\d+)' _TEST = { 'url': 'https://andrei-bt.livejournal.com/video/album/407/?mode=view&id=51272', 'md5': 'adaf018388572ced8a6f301ace49d4b2', 'info_dict': { 'id': '1263729', 'ext': 'mp4', 'title': 'Истребители против БПЛА', 'upload_date': '20190624', 'timestamp': 1561406715, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) record = self._parse_json(self._search_regex( r'Site\.page\s*=\s*({.+?});', webpage, 'page data'), video_id)['video']['record'] storage_id = str(record['storageid']) title = record.get('name') if title: # remove filename extension(.mp4, .mov, etc...) title = title.rsplit('.', 1)[0] return { '_type': 'url_transparent', 'id': video_id, 'title': title, 'thumbnail': record.get('thumbnail'), 'timestamp': int_or_none(record.get('timecreate')), 'url': 'eagleplatform:vc.videos.livejournal.com:' + storage_id, 'ie_key': 'EaglePlatform', }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gaskrank.py
yt_dlp/extractor/gaskrank.py
import re from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, unified_strdate, ) class GaskrankIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?gaskrank\.tv/tv/(?P<categories>[^/]+)/(?P<id>[^/]+)\.htm' _TESTS = [{ 'url': 'http://www.gaskrank.tv/tv/motorrad-fun/strike-einparken-durch-anfaenger-crash-mit-groesserem-flurschaden.htm', 'md5': '1ae88dbac97887d85ebd1157a95fc4f9', 'info_dict': { 'id': '201601/26955', 'ext': 'mp4', 'title': 'Strike! Einparken können nur Männer - Flurschaden hält sich in Grenzen *lol*', 'thumbnail': r're:^https?://.*\.jpg$', 'categories': ['motorrad-fun'], 'display_id': 'strike-einparken-durch-anfaenger-crash-mit-groesserem-flurschaden', 'uploader_id': 'Bikefun', 'upload_date': '20170110', }, }, { 'url': 'http://www.gaskrank.tv/tv/racing/isle-of-man-tt-2011-michael-du-15920.htm', 'md5': 'c33ee32c711bc6c8224bfcbe62b23095', 'info_dict': { 'id': '201106/15920', 'ext': 'mp4', 'title': 'Isle of Man - Michael Dunlop vs Guy Martin - schwindelig kucken', 'thumbnail': r're:^https?://.*\.jpg$', 'categories': ['racing'], 'display_id': 'isle-of-man-tt-2011-michael-du-15920', 'uploader_id': 'IOM', 'upload_date': '20170523', 'uploader_url': 'www.iomtt.com', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = self._og_search_title( webpage, default=None) or self._html_search_meta( 'title', webpage, fatal=True) categories = [self._match_valid_url(url).group('categories')] mobj = re.search( r'Video von:\s*(?P<uploader_id>[^|]*?)\s*\|\s*vom:\s*(?P<upload_date>[0-9][0-9]\.[0-9][0-9]\.[0-9][0-9][0-9][0-9])', webpage) if mobj is not None: uploader_id = mobj.groupdict().get('uploader_id') upload_date = unified_strdate(mobj.groupdict().get('upload_date')) uploader_url = self._search_regex( r'Homepage:\s*<[^>]*>(?P<uploader_url>[^<]*)', webpage, 'uploader_url', default=None) tags = re.findall( r'/tv/tags/[^/]+/"\s*>(?P<tag>[^<]*?)<', webpage) view_count = self._search_regex( r'class\s*=\s*"gkRight"(?:[^>]*>\s*<[^>]*)*icon-eye-open(?:[^>]*>\s*<[^>]*)*>\s*(?P<view_count>[0-9\.]*)', webpage, 'view_count', default=None) if view_count: view_count = int_or_none(view_count.replace('.', '')) average_rating = self._search_regex( r'itemprop\s*=\s*"ratingValue"[^>]*>\s*(?P<average_rating>[0-9,]+)', webpage, 'average_rating') if average_rating: average_rating = float_or_none(average_rating.replace(',', '.')) video_id = self._search_regex( r'https?://movies\.gaskrank\.tv/([^-]*?)(-[^\.]*)?\.mp4', webpage, 'video id', default=display_id) entry = self._parse_html5_media_entries(url, webpage, video_id)[0] entry.update({ 'id': video_id, 'title': title, 'categories': categories, 'display_id': display_id, 'uploader_id': uploader_id, 'upload_date': upload_date, 'uploader_url': uploader_url, 'tags': tags, 'view_count': view_count, 'average_rating': average_rating, }) return entry
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ustream.py
yt_dlp/extractor/ustream.py
import random import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, encode_data_uri, float_or_none, int_or_none, join_nonempty, mimetype2ext, str_or_none, ) class UstreamIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:ustream\.tv|video\.ibm\.com)/(?P<type>recorded|embed|embed/recorded)/(?P<id>\d+)' IE_NAME = 'ustream' _EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?(?:ustream\.tv|video\.ibm\.com)/embed/.+?)\1'] _TESTS = [{ 'url': 'http://www.ustream.tv/recorded/20274954', 'md5': '088f151799e8f572f84eb62f17d73e5c', 'info_dict': { 'id': '20274954', 'ext': 'flv', 'title': 'Young Americans for Liberty February 7, 2012 2:28 AM', 'description': 'Young Americans for Liberty February 7, 2012 2:28 AM', 'timestamp': 1328577035, 'upload_date': '20120207', 'uploader': 'yaliberty', 'uploader_id': '6780869', }, }, { # From http://sportscanada.tv/canadagames/index.php/week2/figure-skating/444 # Title and uploader available only from params JSON 'url': 'http://www.ustream.tv/embed/recorded/59307601?ub=ff0000&lc=ff0000&oc=ffffff&uc=ffffff&v=3&wmode=direct', 'md5': '5a2abf40babeac9812ed20ae12d34e10', 'info_dict': { 'id': '59307601', 'ext': 'flv', 'title': '-CG11- Canada Games Figure Skating', 'uploader': 'sportscanadatv', }, 'skip': 'This Pro Broadcaster has chosen to remove this video from the ustream.tv site.', }, { 'url': 'http://www.ustream.tv/embed/10299409', 'info_dict': { 'id': '10299409', }, 'playlist_count': 3, }, { 'url': 'http://www.ustream.tv/recorded/91343263', 'info_dict': { 'id': '91343263', 'ext': 'mp4', 'title': 'GitHub Universe - General Session - Day 1', 'upload_date': '20160914', 'description': 'GitHub Universe - General Session - Day 1', 'timestamp': 1473872730, 'uploader': 'wa0dnskeqkr', 'uploader_id': '38977840', }, 'params': { 'skip_download': True, # m3u8 download }, }, { 'url': 'https://video.ibm.com/embed/recorded/128240221?&autoplay=true&controls=true&volume=100', 'only_matching': True, }] def _get_stream_info(self, url, video_id, app_id_ver, extra_note=None): def num_to_hex(n): return hex(n)[2:] rnd = lambda x: random.randrange(int(x)) if not extra_note: extra_note = '' conn_info = self._download_json( f'http://r{rnd(1e8)}-1-{video_id}-recorded-lp-live.ums.ustream.tv/1/ustream', video_id, note='Downloading connection info' + extra_note, query={ 'type': 'viewer', 'appId': app_id_ver[0], 'appVersion': app_id_ver[1], 'rsid': f'{num_to_hex(rnd(1e8))}:{num_to_hex(rnd(1e8))}', 'rpin': f'_rpin.{rnd(1e15)}', 'referrer': url, 'media': video_id, 'application': 'recorded', }) host = conn_info[0]['args'][0]['host'] connection_id = conn_info[0]['args'][0]['connectionId'] return self._download_json( f'http://{host}/1/ustream?connectionId={connection_id}', video_id, note='Downloading stream info' + extra_note) def _get_streams(self, url, video_id, app_id_ver): # Sometimes the return dict does not have 'stream' for trial_count in range(3): stream_info = self._get_stream_info( url, video_id, app_id_ver, extra_note=f' (try {trial_count + 1})' if trial_count > 0 else '') if 'stream' in stream_info[0]['args'][0]: return stream_info[0]['args'][0]['stream'] return [] def _parse_segmented_mp4(self, dash_stream_info): def resolve_dash_template(template, idx, chunk_hash): return template.replace('%', str(idx), 1).replace('%', chunk_hash) formats = [] for stream in dash_stream_info['streams']: # Use only one provider to avoid too many formats provider = dash_stream_info['providers'][0] fragments = [{ 'url': resolve_dash_template( provider['url'] + stream['initUrl'], 0, dash_stream_info['hashes']['0']), }] for idx in range(dash_stream_info['videoLength'] // dash_stream_info['chunkTime']): fragments.append({ 'url': resolve_dash_template( provider['url'] + stream['segmentUrl'], idx, dash_stream_info['hashes'][str(idx // 10 * 10)]), }) content_type = stream['contentType'] kind = content_type.split('/')[0] f = { 'format_id': join_nonempty( 'dash', kind, str_or_none(stream.get('bitrate'))), 'protocol': 'http_dash_segments', # TODO: generate a MPD doc for external players? 'url': encode_data_uri(b'<MPD/>', 'text/xml'), 'ext': mimetype2ext(content_type), 'height': stream.get('height'), 'width': stream.get('width'), 'fragments': fragments, } if kind == 'video': f.update({ 'vcodec': stream.get('codec'), 'acodec': 'none', 'vbr': stream.get('bitrate'), }) else: f.update({ 'vcodec': 'none', 'acodec': stream.get('codec'), 'abr': stream.get('bitrate'), }) formats.append(f) return formats def _real_extract(self, url): m = self._match_valid_url(url) video_id = m.group('id') # some sites use this embed format (see: https://github.com/ytdl-org/youtube-dl/issues/2990) if m.group('type') == 'embed/recorded': video_id = m.group('id') desktop_url = 'http://www.ustream.tv/recorded/' + video_id return self.url_result(desktop_url, 'Ustream') if m.group('type') == 'embed': video_id = m.group('id') webpage = self._download_webpage(url, video_id) content_video_ids = self._parse_json(self._search_regex( r'ustream\.vars\.offAirContentVideoIds=([^;]+);', webpage, 'content video IDs'), video_id) return self.playlist_result( (self.url_result('http://www.ustream.tv/recorded/' + u, 'Ustream') for u in content_video_ids), video_id) params = self._download_json( f'https://api.ustream.tv/videos/{video_id}.json', video_id) error = params.get('error') if error: raise ExtractorError( f'{self.IE_NAME} returned error: {error}', expected=True) video = params['video'] title = video['title'] filesize = float_or_none(video.get('file_size')) formats = [{ 'id': video_id, 'url': video_url, 'ext': format_id, 'filesize': filesize, } for format_id, video_url in video['media_urls'].items() if video_url] if not formats: hls_streams = self._get_streams(url, video_id, app_id_ver=(11, 2)) if hls_streams: # m3u8_native leads to intermittent ContentTooShortError formats.extend(self._extract_m3u8_formats( hls_streams[0]['url'], video_id, ext='mp4', m3u8_id='hls')) ''' # DASH streams handling is incomplete as 'url' is missing dash_streams = self._get_streams(url, video_id, app_id_ver=(3, 1)) if dash_streams: formats.extend(self._parse_segmented_mp4(dash_streams)) ''' description = video.get('description') timestamp = int_or_none(video.get('created_at')) duration = float_or_none(video.get('length')) view_count = int_or_none(video.get('views')) uploader = video.get('owner', {}).get('username') uploader_id = video.get('owner', {}).get('id') thumbnails = [{ 'id': thumbnail_id, 'url': thumbnail_url, } for thumbnail_id, thumbnail_url in video.get('thumbnail', {}).items()] return { 'id': video_id, 'title': title, 'description': description, 'thumbnails': thumbnails, 'timestamp': timestamp, 'duration': duration, 'view_count': view_count, 'uploader': uploader, 'uploader_id': uploader_id, 'formats': formats, } class UstreamChannelIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ustream\.tv/channel/(?P<slug>.+)' IE_NAME = 'ustream:channel' _TEST = { 'url': 'http://www.ustream.tv/channel/channeljapan', 'info_dict': { 'id': '10874166', }, 'playlist_mincount': 17, } def _real_extract(self, url): m = self._match_valid_url(url) display_id = m.group('slug') webpage = self._download_webpage(url, display_id) channel_id = self._html_search_meta('ustream:channel_id', webpage) BASE = 'http://www.ustream.tv' next_url = f'/ajax/socialstream/videos/{channel_id}/1.json' video_ids = [] while next_url: reply = self._download_json( urllib.parse.urljoin(BASE, next_url), display_id, note=f'Downloading video information (next: {len(video_ids) + 1})') video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data'])) next_url = reply['nextUrl'] entries = [ self.url_result('http://www.ustream.tv/recorded/' + vid, 'Ustream') for vid in video_ids] return { '_type': 'playlist', 'id': channel_id, 'display_id': display_id, 'entries': entries, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mlssoccer.py
yt_dlp/extractor/mlssoccer.py
from .common import InfoExtractor class MLSSoccerIE(InfoExtractor): _VALID_DOMAINS = r'(?:(?:cfmontreal|intermiamicf|lagalaxy|lafc|houstondynamofc|dcunited|atlutd|mlssoccer|fcdallas|columbuscrew|coloradorapids|fccincinnati|chicagofirefc|austinfc|nashvillesc|whitecapsfc|sportingkc|soundersfc|sjearthquakes|rsl|timbers|philadelphiaunion|orlandocitysc|newyorkredbulls|nycfc)\.com|(?:torontofc)\.ca|(?:revolutionsoccer)\.net)' _VALID_URL = rf'https?://(?:www\.)?{_VALID_DOMAINS}/video/#?(?P<id>[^/&$#?]+)' _TESTS = [{ 'url': 'https://www.mlssoccer.com/video/the-octagon-can-alphonso-davies-lead-canada-to-first-world-cup-since-1986#the-octagon-can-alphonso-davies-lead-canada-to-first-world-cup-since-1986', 'info_dict': { 'id': '6276033198001', 'ext': 'mp4', 'title': 'The Octagon | Can Alphonso Davies lead Canada to first World Cup since 1986?', 'description': 'md5:f0a883ee33592a0221798f451a98be8f', 'thumbnail': 'https://cf-images.us-east-1.prod.boltdns.net/v1/static/5530036772001/1bbc44f6-c63c-4981-82fa-46b0c1f891e0/5c1ca44a-a033-4e98-b531-ff24c4947608/160x90/match/image.jpg', 'duration': 350.165, 'timestamp': 1633627291, 'uploader_id': '5530036772001', 'tags': ['club/canada'], 'is_live': False, 'upload_date': '20211007', 'filesize_approx': 255193528.83200002, }, 'params': {'skip_download': True}, }, { 'url': 'https://www.whitecapsfc.com/video/highlights-san-jose-earthquakes-vs-vancouver-whitecaps-fc-october-23-2021#highlights-san-jose-earthquakes-vs-vancouver-whitecaps-fc-october-23-2021', 'only_matching': True, }, { 'url': 'https://www.torontofc.ca/video/highlights-toronto-fc-vs-cf-montreal-october-23-2021-x6733#highlights-toronto-fc-vs-cf-montreal-october-23-2021-x6733', 'only_matching': True, }, { 'url': 'https://www.sportingkc.com/video/post-match-press-conference-john-pulskamp-oct-27-2021#post-match-press-conference-john-pulskamp-oct-27-2021', 'only_matching': True, }, { 'url': 'https://www.soundersfc.com/video/highlights-seattle-sounders-fc-vs-sporting-kansas-city-october-23-2021', 'only_matching': True, }, { 'url': 'https://www.sjearthquakes.com/video/#highlights-austin-fc-vs-san-jose-earthquakes-june-19-2021', 'only_matching': True, }, { 'url': 'https://www.rsl.com/video/2021-u-of-u-health-mic-d-up-vs-colorado-10-16-21#2021-u-of-u-health-mic-d-up-vs-colorado-10-16-21', 'only_matching': True, }, { 'url': 'https://www.timbers.com/video/highlights-d-chara-asprilla-with-goals-in-portland-timbers-2-0-win-over-san-jose#highlights-d-chara-asprilla-with-goals-in-portland-timbers-2-0-win-over-san-jose', 'only_matching': True, }, { 'url': 'https://www.philadelphiaunion.com/video/highlights-torvphi', 'only_matching': True, }, { 'url': 'https://www.orlandocitysc.com/video/highlight-columbus-crew-vs-orlando-city-sc', 'only_matching': True, }, { 'url': 'https://www.newyorkredbulls.com/video/all-access-matchday-double-derby-week#all-access-matchday-double-derby-week', 'only_matching': True, }, { 'url': 'https://www.nycfc.com/video/highlights-nycfc-1-0-chicago-fire-fc#highlights-nycfc-1-0-chicago-fire-fc', 'only_matching': True, }, { 'url': 'https://www.revolutionsoccer.net/video/two-minute-highlights-revs-1-rapids-0-october-27-2021#two-minute-highlights-revs-1-rapids-0-october-27-2021', 'only_matching': True, }, { 'url': 'https://www.nashvillesc.com/video/goal-c-j-sapong-nashville-sc-92nd-minute', 'only_matching': True, }, { 'url': 'https://www.cfmontreal.com/video/faits-saillants-tor-v-mtl#faits-saillants-orl-v-mtl-x5645', 'only_matching': True, }, { 'url': 'https://www.intermiamicf.com/video/all-access-victory-vs-nashville-sc-by-ukg#all-access-victory-vs-nashville-sc-by-ukg', 'only_matching': True, }, { 'url': 'https://www.lagalaxy.com/video/#moment-of-the-month-presented-by-san-manuel-casino-rayan-raveloson-scores-his-se', 'only_matching': True, }, { 'url': 'https://www.lafc.com/video/breaking-down-lafc-s-final-6-matches-of-the-2021-mls-regular-season#breaking-down-lafc-s-final-6-matches-of-the-2021-mls-regular-season', 'only_matching': True, }, { 'url': 'https://www.houstondynamofc.com/video/postgame-press-conference-michael-nelson-presented-by-coushatta-casino-res-x9660#postgame-press-conference-michael-nelson-presented-by-coushatta-casino-res-x9660', 'only_matching': True, }, { 'url': 'https://www.dcunited.com/video/tony-alfaro-my-family-pushed-me-to-believe-everything-was-possible', 'only_matching': True, }, { 'url': 'https://www.fcdallas.com/video/highlights-fc-dallas-vs-minnesota-united-fc-october-02-2021#highlights-fc-dallas-vs-minnesota-united-fc-october-02-2021', 'only_matching': True, }, { 'url': 'https://www.columbuscrew.com/video/match-rewind-columbus-crew-vs-new-york-red-bulls-october-23-2021', 'only_matching': True, }, { 'url': 'https://www.coloradorapids.com/video/postgame-reaction-robin-fraser-october-27#postgame-reaction-robin-fraser-october-27', 'only_matching': True, }, { 'url': 'https://www.fccincinnati.com/video/#keeping-cincy-chill-presented-by-coors-lite', 'only_matching': True, }, { 'url': 'https://www.chicagofirefc.com/video/all-access-fire-score-dramatic-road-win-in-cincy#all-access-fire-score-dramatic-road-win-in-cincy', 'only_matching': True, }, { 'url': 'https://www.austinfc.com/video/highlights-colorado-rapids-vs-austin-fc-september-29-2021#highlights-colorado-rapids-vs-austin-fc-september-29-2021', 'only_matching': True, }, { 'url': 'https://www.atlutd.com/video/goal-josef-martinez-scores-in-the-73rd-minute#goal-josef-martinez-scores-in-the-73rd-minute', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data_json = self._parse_json( self._html_search_regex(r'data-options\=\"([^\"]+)\"', webpage, 'json'), video_id)['videoList'][0] return { 'id': video_id, '_type': 'url', 'url': 'https://players.brightcove.net/{}/default_default/index.html?videoId={}'.format(data_json['accountId'], data_json['videoId']), 'ie_key': 'BrightcoveNew', }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/extractors.py
yt_dlp/extractor/extractors.py
import inspect import os from ..globals import LAZY_EXTRACTORS from ..globals import extractors as _extractors_context _CLASS_LOOKUP = None if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'): LAZY_EXTRACTORS.value = False else: try: from .lazy_extractors import _CLASS_LOOKUP LAZY_EXTRACTORS.value = True except ImportError: LAZY_EXTRACTORS.value = None if not _CLASS_LOOKUP: from . import _extractors _CLASS_LOOKUP = { name: value for name, value in inspect.getmembers(_extractors) if name.endswith('IE') and name != 'GenericIE' } _CLASS_LOOKUP['GenericIE'] = _extractors.GenericIE # We want to append to the main lookup _current = _extractors_context.value for name, ie in _CLASS_LOOKUP.items(): _current.setdefault(name, ie) def __getattr__(name): value = _CLASS_LOOKUP.get(name) if not value: raise AttributeError(f'module {__name__} has no attribute {name}') return value
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/plutotv.py
yt_dlp/extractor/plutotv.py
import re import urllib.parse import uuid from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, try_get, url_or_none, ) class PlutoTVIE(InfoExtractor): _WORKING = False _VALID_URL = r'''(?x) https?://(?:www\.)?pluto\.tv(?:/[^/]+)?/on-demand /(?P<video_type>movies|series) /(?P<series_or_movie_slug>[^/]+) (?: (?:/seasons?/(?P<season_no>\d+))? (?:/episode/(?P<episode_slug>[^/]+))? )? /?(?:$|[#?])''' _INFO_URL = 'https://service-vod.clusters.pluto.tv/v3/vod/slugs/' _INFO_QUERY_PARAMS = { 'appName': 'web', 'appVersion': 'na', 'clientID': str(uuid.uuid1()), 'clientModelNumber': 'na', 'serverSideAds': 'false', 'deviceMake': 'unknown', 'deviceModel': 'web', 'deviceType': 'web', 'deviceVersion': 'unknown', 'sid': str(uuid.uuid1()), } _TESTS = [ { 'url': 'https://pluto.tv/on-demand/series/i-love-money/season/2/episode/its-in-the-cards-2009-2-3', 'md5': 'ebcdd8ed89aaace9df37924f722fd9bd', 'info_dict': { 'id': '5de6c598e9379ae4912df0a8', 'ext': 'mp4', 'title': 'It\'s In The Cards', 'episode': 'It\'s In The Cards', 'description': 'The teams face off against each other in a 3-on-2 soccer showdown. Strategy comes into play, though, as each team gets to select their opposing teams’ two defenders.', 'series': 'I Love Money', 'season_number': 2, 'episode_number': 3, 'duration': 3600, }, }, { 'url': 'https://pluto.tv/on-demand/series/i-love-money/season/1/', 'playlist_count': 11, 'info_dict': { 'id': '5de6c582e9379ae4912dedbd', 'title': 'I Love Money - Season 1', }, }, { 'url': 'https://pluto.tv/on-demand/series/i-love-money/', 'playlist_count': 26, 'info_dict': { 'id': '5de6c582e9379ae4912dedbd', 'title': 'I Love Money', }, }, { 'url': 'https://pluto.tv/on-demand/movies/arrival-2015-1-1', 'md5': '3cead001d317a018bf856a896dee1762', 'info_dict': { 'id': '5e83ac701fa6a9001bb9df24', 'ext': 'mp4', 'title': 'Arrival', 'description': 'When mysterious spacecraft touch down across the globe, an elite team - led by expert translator Louise Banks (Academy Award® nominee Amy Adams) – races against time to decipher their intent.', 'duration': 9000, }, }, { 'url': 'https://pluto.tv/en/on-demand/series/manhunters-fugitive-task-force/seasons/1/episode/third-times-the-charm-1-1', 'only_matching': True, }, { 'url': 'https://pluto.tv/it/on-demand/series/csi-vegas/episode/legacy-2021-1-1', 'only_matching': True, }, { 'url': 'https://pluto.tv/en/on-demand/movies/attack-of-the-killer-tomatoes-1977-1-1-ptv1', 'md5': '7db56369c0da626a32d505ec6eb3f89f', 'info_dict': { 'id': '5b190c7bb0875c36c90c29c4', 'ext': 'mp4', 'title': 'Attack of the Killer Tomatoes', 'description': 'A group of scientists band together to save the world from mutated tomatoes that KILL! (1978)', 'duration': 5700, }, }, ] def _to_ad_free_formats(self, video_id, formats, subtitles): ad_free_formats, ad_free_subtitles, m3u8_urls = [], {}, set() for fmt in formats: res = self._download_webpage( fmt.get('url'), video_id, note='Downloading m3u8 playlist', fatal=False) if not res: continue first_segment_url = re.search( r'^(https?://.*/)0\-(end|[0-9]+)/[^/]+\.ts$', res, re.MULTILINE) if first_segment_url: m3u8_urls.add( urllib.parse.urljoin(first_segment_url.group(1), '0-end/master.m3u8')) continue first_segment_url = re.search( r'^(https?://.*/).+\-0+[0-1]0\.ts$', res, re.MULTILINE) if first_segment_url: m3u8_urls.add( urllib.parse.urljoin(first_segment_url.group(1), 'master.m3u8')) continue for m3u8_url in m3u8_urls: fmts, subs = self._extract_m3u8_formats_and_subtitles( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) ad_free_formats.extend(fmts) ad_free_subtitles = self._merge_subtitles(ad_free_subtitles, subs) if ad_free_formats: formats, subtitles = ad_free_formats, ad_free_subtitles else: self.report_warning('Unable to find ad-free formats') return formats, subtitles def _get_video_info(self, video_json, slug, series_name=None): video_id = video_json.get('_id', slug) formats, subtitles = [], {} for video_url in try_get(video_json, lambda x: x['stitched']['urls'], list) or []: if video_url.get('type') != 'hls': continue url = url_or_none(video_url.get('url')) fmts, subs = self._extract_m3u8_formats_and_subtitles( url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) formats.extend(fmts) subtitles = self._merge_subtitles(subtitles, subs) formats, subtitles = self._to_ad_free_formats(video_id, formats, subtitles) info = { 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'title': video_json.get('name'), 'description': video_json.get('description'), 'duration': float_or_none(video_json.get('duration'), scale=1000), } if series_name: info.update({ 'series': series_name, 'episode': video_json.get('name'), 'season_number': int_or_none(video_json.get('season')), 'episode_number': int_or_none(video_json.get('number')), }) return info def _real_extract(self, url): mobj = self._match_valid_url(url).groupdict() info_slug = mobj['series_or_movie_slug'] video_json = self._download_json(self._INFO_URL + info_slug, info_slug, query=self._INFO_QUERY_PARAMS) if mobj['video_type'] == 'series': series_name = video_json.get('name', info_slug) season_number, episode_slug = mobj.get('season_number'), mobj.get('episode_slug') videos = [] for season in video_json['seasons']: if season_number is not None and season_number != int_or_none(season.get('number')): continue for episode in season['episodes']: if episode_slug is not None and episode_slug != episode.get('slug'): continue videos.append(self._get_video_info(episode, episode_slug, series_name)) if not videos: raise ExtractorError('Failed to find any videos to extract') if episode_slug is not None and len(videos) == 1: return videos[0] playlist_title = series_name if season_number is not None: playlist_title += ' - Season %d' % season_number return self.playlist_result(videos, playlist_id=video_json.get('_id', info_slug), playlist_title=playlist_title) return self._get_video_info(video_json, info_slug)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/commonmistakes.py
yt_dlp/extractor/commonmistakes.py
from .common import InfoExtractor from ..utils import ExtractorError class CommonMistakesIE(InfoExtractor): IE_DESC = False # Do not list _VALID_URL = r'(?:url|URL|yt-dlp)$' _TESTS = [{ 'url': 'url', 'only_matching': True, }, { 'url': 'URL', 'only_matching': True, }] def _real_extract(self, url): msg = ( f'You\'ve asked yt-dlp to download the URL "{url}". ' 'That doesn\'t make any sense. ' 'Simply remove the parameter in your command or configuration.' ) if not self.get_param('verbose'): msg += ' Add -v to the command line to see what arguments and configuration yt-dlp has' raise ExtractorError(msg, expected=True) class UnicodeBOMIE(InfoExtractor): IE_DESC = False _VALID_URL = r'(?P<bom>\ufeff)(?P<id>.*)$' _TESTS = [{ 'url': '\ufeffhttp://www.youtube.com/watch?v=BaW_jenozKc', 'only_matching': True, }] def _real_extract(self, url): real_url = self._match_id(url) self.report_warning( 'Your URL starts with a Byte Order Mark (BOM). ' f'Removing the BOM and looking for "{real_url}" ...') return self.url_result(real_url) class BlobIE(InfoExtractor): IE_DESC = False _VALID_URL = r'blob:' _TESTS = [{ 'url': 'blob:https://www.youtube.com/4eb3d090-a761-46e6-8083-c32016a36e3b', 'only_matching': True, }] def _real_extract(self, url): raise ExtractorError( 'You\'ve asked yt-dlp to download a blob URL. ' 'A blob URL exists only locally in your browser. ' 'It is not possible for yt-dlp to access it.', expected=True)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dfb.py
yt_dlp/extractor/dfb.py
from .common import InfoExtractor from ..utils import unified_strdate class DFBIE(InfoExtractor): IE_NAME = 'tv.dfb.de' _VALID_URL = r'https?://tv\.dfb\.de/video/(?P<display_id>[^/]+)/(?P<id>\d+)' _TEST = { 'url': 'http://tv.dfb.de/video/u-19-em-stimmen-zum-spiel-gegen-russland/11633/', 'md5': 'ac0f98a52a330f700b4b3034ad240649', 'info_dict': { 'id': '11633', 'display_id': 'u-19-em-stimmen-zum-spiel-gegen-russland', 'ext': 'mp4', 'title': 'U 19-EM: Stimmen zum Spiel gegen Russland', 'upload_date': '20150714', }, } def _real_extract(self, url): display_id, video_id = self._match_valid_url(url).groups() player_info = self._download_xml( f'http://tv.dfb.de/server/hd_video.php?play={video_id}', display_id) video_info = player_info.find('video') stream_access_url = self._proto_relative_url(video_info.find('url').text.strip()) formats = [] # see http://tv.dfb.de/player/js/ajax.js for the method to extract m3u8 formats for sa_url in (stream_access_url, stream_access_url + '&area=&format=iphone'): stream_access_info = self._download_xml(sa_url, display_id) token_el = stream_access_info.find('token') manifest_url = token_el.attrib['url'] + '?' + 'hdnea=' + token_el.attrib['auth'] if '.f4m' in manifest_url: formats.extend(self._extract_f4m_formats( manifest_url + '&hdcore=3.2.0', display_id, f4m_id='hds', fatal=False)) else: formats.extend(self._extract_m3u8_formats( manifest_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) return { 'id': video_id, 'display_id': display_id, 'title': video_info.find('title').text, 'thumbnail': f'http://tv.dfb.de/images/{video_id}_640x360.jpg', 'upload_date': unified_strdate(video_info.find('time_date').text), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sexu.py
yt_dlp/extractor/sexu.py
from .common import InfoExtractor class SexuIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?sexu\.com/(?P<id>\d+)' _TEST = { 'url': 'http://sexu.com/961791/', 'md5': 'ff615aca9691053c94f8f10d96cd7884', 'info_dict': { 'id': '961791', 'ext': 'mp4', 'title': 'md5:4d05a19a5fc049a63dbbaf05fb71d91b', 'description': 'md5:2b75327061310a3afb3fbd7d09e2e403', 'categories': list, # NSFW 'thumbnail': r're:https?://.*\.jpg$', 'age_limit': 18, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) jwvideo = self._parse_json( self._search_regex(r'\.setup\(\s*({.+?})\s*\);', webpage, 'jwvideo'), video_id) sources = jwvideo['sources'] formats = [{ 'url': source['file'].replace('\\', ''), 'format_id': source.get('label'), 'height': int(self._search_regex( r'^(\d+)[pP]', source.get('label', ''), 'height', default=None)), } for source in sources if source.get('file')] title = self._html_search_regex( r'<title>([^<]+)\s*-\s*Sexu\.Com</title>', webpage, 'title') description = self._html_search_meta( 'description', webpage, 'description') thumbnail = jwvideo.get('image') categories_str = self._html_search_meta( 'keywords', webpage, 'categories') categories = ( None if categories_str is None else categories_str.split(',')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'categories': categories, 'formats': formats, 'age_limit': 18, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/shemaroome.py
yt_dlp/extractor/shemaroome.py
import base64 from .common import InfoExtractor from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7 from ..utils import ( ExtractorError, unified_strdate, ) class ShemarooMeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?shemaroome\.com/(?:movies|shows)/(?P<id>[^?#]+)' _TESTS = [{ 'url': 'https://www.shemaroome.com/movies/dil-hai-tumhaara', 'info_dict': { 'id': 'dil-hai-tumhaara', 'ext': 'mp4', 'title': 'Dil Hai Tumhaara', 'release_date': '20020906', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:2782c4127807103cf5a6ae2ca33645ce', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.shemaroome.com/shows/jurm-aur-jazbaat/laalach', 'info_dict': { 'id': 'jurm-aur-jazbaat_laalach', 'ext': 'mp4', 'title': 'Laalach', 'description': 'md5:92b79c2dcb539b0ab53f9fa5a048f53c', 'thumbnail': r're:^https?://.*\.jpg$', 'release_date': '20210507', }, 'params': { 'skip_download': True, }, 'skip': 'Premium videos cannot be downloaded yet.', }, { 'url': 'https://www.shemaroome.com/shows/jai-jai-jai-bajrang-bali/jai-jai-jai-bajrang-bali-episode-99', 'info_dict': { 'id': 'jai-jai-jai-bajrang-bali_jai-jai-jai-bajrang-bali-episode-99', 'ext': 'mp4', 'title': 'Jai Jai Jai Bajrang Bali Episode 99', 'description': 'md5:850d127a18ee3f9529d7fbde2f49910d', 'thumbnail': r're:^https?://.*\.jpg$', 'release_date': '20110101', }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url).replace('/', '_') webpage = self._download_webpage(url, video_id) title = self._search_regex(r'id=\"ma_title\" value=\"([^\"]+)', webpage, 'title') thumbnail = self._og_search_thumbnail(webpage) content_def = self._search_regex(r'id=\"content_definition\" value=\"([^\"]+)', webpage, 'content_def') catalog_id = self._search_regex(r'id=\"catalog_id\" value=\"([^\"]+)', webpage, 'catalog_id') item_category = self._search_regex(r'id=\"item_category\" value=\"([^\"]+)', webpage, 'item_category') content_id = self._search_regex(r'id=\"content_id\" value=\"([^\"]+)', webpage, 'content_id') data = f'catalog_id={catalog_id}&content_id={content_id}&category={item_category}&content_def={content_def}' data_json = self._download_json('https://www.shemaroome.com/users/user_all_lists', video_id, data=data.encode()) if not data_json.get('status'): raise ExtractorError('Premium videos cannot be downloaded yet.', expected=True) url_data = base64.b64decode(data_json['new_play_url']) key = base64.b64decode(data_json['key']) iv = bytes(16) m3u8_url = unpad_pkcs7(aes_cbc_decrypt_bytes(url_data, key, iv)).decode('ascii') headers = {'stream_key': data_json['stream_key']} formats, m3u8_subs = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id, fatal=False, headers=headers) for fmt in formats: fmt['http_headers'] = headers release_date = self._html_search_regex( (r'itemprop="uploadDate">\s*([\d-]+)', r'id="release_date" value="([\d-]+)'), webpage, 'release date', fatal=False) subtitles = {} sub_url = data_json.get('subtitle') if sub_url: subtitles.setdefault('EN', []).append({ 'url': self._proto_relative_url(sub_url), }) subtitles = self._merge_subtitles(subtitles, m3u8_subs) description = self._html_search_regex(r'(?s)>Synopsis(</.+?)</', webpage, 'description', fatal=False) return { 'id': video_id, 'formats': formats, 'title': title, 'thumbnail': thumbnail, 'release_date': unified_strdate(release_date), 'description': description, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false