repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/servus.py
yt_dlp/extractor/servus.py
from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, format_field, int_or_none, join_nonempty, traverse_obj, unescapeHTML, unified_timestamp, ) class ServusIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:www\.)? (?: servus\.com/(?:(?:at|de)/p/[^/]+|tv/videos)| (?:servustv|pm-wissen)\.com/(?:[^/]+/)?v(?:ideos)? ) /(?P<id>[aA]{2}-?\w+|\d+-\d+) ''' _TESTS = [{ # URL schema v3 'url': 'https://www.servustv.com/natur/v/aa-28bycqnh92111/', 'info_dict': { 'id': 'AA-28BYCQNH92111', 'ext': 'mp4', 'title': 'Vie Ferrate - Klettersteige in den Alpen', 'description': 'md5:25e47ddd83a009a0f9789ba18f2850ce', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 2823, 'timestamp': 1655752333, 'upload_date': '20220620', 'series': 'Bergwelten', 'season': 'Season 11', 'season_number': 11, 'episode': 'Episode 8 - Vie Ferrate – Klettersteige in den Alpen', 'episode_number': 8, 'categories': ['Bergwelten'], }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.servustv.com/natur/v/aa-1xg5xwmgw2112/', 'only_matching': True, }, { 'url': 'https://www.servustv.com/natur/v/aansszcx3yi9jmlmhdc1/', 'only_matching': True, }, { # URL schema v2 'url': 'https://www.servustv.com/videos/aa-1t6vbu5pw1w12/', 'only_matching': True, }, { # URL schema v1 'url': 'https://www.servus.com/de/p/Die-Gr%C3%BCnen-aus-Sicht-des-Volkes/AA-1T6VBU5PW1W12/', 'only_matching': True, }, { 'url': 'https://www.servus.com/at/p/Wie-das-Leben-beginnt/1309984137314-381415152/', 'only_matching': True, }, { 'url': 'https://www.servus.com/tv/videos/aa-1t6vbu5pw1w12/', 'only_matching': True, }, { 'url': 'https://www.servus.com/tv/videos/1380889096408-1235196658/', 'only_matching': True, }, { 'url': 'https://www.pm-wissen.com/videos/aa-24mus4g2w2112/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url).upper() webpage = self._download_webpage(url, video_id) next_data = self._search_nextjs_data(webpage, video_id, fatal=False) video = self._download_json( 'https://api-player.redbull.com/stv/servus-tv-playnet', video_id, 'Downloading video JSON', query={'videoId': video_id}) if not video.get('videoUrl'): self._report_errors(video) formats, subtitles = self._extract_m3u8_formats_and_subtitles( video['videoUrl'], video_id, 'mp4', m3u8_id='hls') season = video.get('season') season_number = int_or_none(self._search_regex( r'Season (\d+)', season or '', 'season number', default=None)) episode = video.get('chapter') episode_number = int_or_none(self._search_regex( r'Episode (\d+)', episode or '', 'episode number', default=None)) return { 'id': video_id, 'title': video.get('title'), 'description': self._get_description(next_data) or video.get('description'), 'thumbnail': video.get('poster'), 'duration': float_or_none(video.get('duration')), 'timestamp': unified_timestamp(video.get('currentSunrise')), 'series': video.get('label'), 'season': season, 'season_number': season_number, 'episode': episode, 'episode_number': episode_number, 'formats': formats, 'subtitles': subtitles, **traverse_obj(next_data, ('props', 'pageProps', 'data', { 'title': ('title', 'rendered', {str}), 'timestamp': ('stv_date', 'raw', {int}), 'duration': ('stv_duration', {float_or_none}), 'categories': ('category_names', ..., {str}), })), } def _get_description(self, next_data): return join_nonempty(*traverse_obj(next_data, ( 'props', 'pageProps', 'data', ('stv_short_description', 'stv_long_description'), {str}, {lambda x: x.replace('\n\n', '\n')}, {unescapeHTML})), delim='\n\n') def _report_errors(self, video): playability_errors = traverse_obj(video, ('playabilityErrors', ...)) if not playability_errors: raise ExtractorError('No videoUrl and no information about errors') elif 'FSK_BLOCKED' in playability_errors: details = traverse_obj(video, ('playabilityErrorDetails', 'FSK_BLOCKED'), expected_type=dict) message = format_field(''.join(( format_field(details, 'minEveningHour', ' from %02d:00'), format_field(details, 'maxMorningHour', ' to %02d:00'), format_field(details, 'minAge', ' (Minimum age %d)'), )), None, 'Only available%s') or 'Blocked by FSK with unknown availability' elif 'NOT_YET_AVAILABLE' in playability_errors: message = format_field( video, (('playabilityErrorDetails', 'NOT_YET_AVAILABLE', 'availableFrom'), 'currentSunrise'), 'Only available from %s') or 'Video not yet available with unknown availability' else: message = f'Video unavailable: {", ".join(playability_errors)}' raise ExtractorError(message, expected=True)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tweakers.py
yt_dlp/extractor/tweakers.py
from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, mimetype2ext, ) class TweakersIE(InfoExtractor): _VALID_URL = r'https?://tweakers\.net/video/(?P<id>\d+)' _TEST = { 'url': 'https://tweakers.net/video/9926/new-nintendo-3ds-xl-op-alle-fronten-beter.html', 'md5': 'fe73e417c093a788e0160c4025f88b15', 'info_dict': { 'id': '9926', 'ext': 'mp4', 'title': 'New Nintendo 3DS XL - Op alle fronten beter', 'description': 'md5:3789b21fed9c0219e9bcaacd43fab280', 'thumbnail': r're:^https?://.*\.jpe?g$', 'duration': 386, 'uploader_id': 's7JeEm', }, } def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( f'https://tweakers.net/video/s1playlist/{video_id}/1920/1080/playlist.json', video_id)['items'][0] title = video_data['title'] formats = [] for location in video_data.get('locations', {}).get('progressive', []): format_id = location.get('label') width = int_or_none(location.get('width')) height = int_or_none(location.get('height')) for source in location.get('sources', []): source_url = source.get('src') if not source_url: continue ext = mimetype2ext(source.get('type')) or determine_ext(source_url) formats.append({ 'format_id': format_id, 'url': source_url, 'width': width, 'height': height, 'ext': ext, }) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'thumbnail': video_data.get('poster'), 'duration': int_or_none(video_data.get('duration')), 'uploader_id': video_data.get('account'), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/udemy.py
yt_dlp/extractor/udemy.py
import re import urllib.parse from .common import InfoExtractor from ..networking import Request from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, determine_ext, extract_attributes, float_or_none, int_or_none, js_to_json, smuggle_url, try_get, unescapeHTML, unsmuggle_url, url_or_none, urlencode_postdata, ) class UdemyIE(InfoExtractor): IE_NAME = 'udemy' _VALID_URL = r'''(?x) https?:// (?:[^/]+\.)?udemy\.com/ (?: [^#]+\#/lecture/| lecture/view/?\?lectureId=| [^/]+/learn/v4/t/lecture/ ) (?P<id>\d+) ''' _LOGIN_URL = 'https://www.udemy.com/join/login-popup/?displayType=ajax&showSkipButton=1' _ORIGIN_URL = 'https://www.udemy.com' _NETRC_MACHINE = 'udemy' _TESTS = [{ 'url': 'https://www.udemy.com/java-tutorial/#/lecture/172757', 'md5': '98eda5b657e752cf945d8445e261b5c5', 'info_dict': { 'id': '160614', 'ext': 'mp4', 'title': 'Introduction and Installation', 'description': 'md5:c0d51f6f21ef4ec65f091055a5eef876', 'duration': 579.29, }, 'skip': 'Requires udemy account credentials', }, { # new URL schema 'url': 'https://www.udemy.com/electric-bass-right-from-the-start/learn/v4/t/lecture/4580906', 'only_matching': True, }, { # no url in outputs format entry 'url': 'https://www.udemy.com/learn-web-development-complete-step-by-step-guide-to-success/learn/v4/t/lecture/4125812', 'only_matching': True, }, { # only outputs rendition 'url': 'https://www.udemy.com/how-you-can-help-your-local-community-5-amazing-examples/learn/v4/t/lecture/3225750?start=0', 'only_matching': True, }, { 'url': 'https://wipro.udemy.com/java-tutorial/#/lecture/172757', 'only_matching': True, }] def _extract_course_info(self, webpage, video_id): course = self._parse_json( unescapeHTML(self._search_regex( r'ng-init=["\'].*\bcourse=({.+?})[;"\']', webpage, 'course', default='{}')), video_id, fatal=False) or {} course_id = course.get('id') or self._search_regex( [ r'data-course-id=["\'](\d+)', r'&quot;courseId&quot;\s*:\s*(\d+)', ], webpage, 'course id') return course_id, course.get('title') def _enroll_course(self, base_url, webpage, course_id): def combine_url(base_url, url): return urllib.parse.urljoin(base_url, url) if not url.startswith('http') else url checkout_url = unescapeHTML(self._search_regex( r'href=(["\'])(?P<url>(?:https?://(?:www\.)?udemy\.com)?/(?:payment|cart)/checkout/.+?)\1', webpage, 'checkout url', group='url', default=None)) if checkout_url: raise ExtractorError( f'Course {course_id} is not free. You have to pay for it before you can download. ' f'Use this URL to confirm purchase: {combine_url(base_url, checkout_url)}', expected=True) enroll_url = unescapeHTML(self._search_regex( r'href=(["\'])(?P<url>(?:https?://(?:www\.)?udemy\.com)?/course/subscribe/.+?)\1', webpage, 'enroll url', group='url', default=None)) if enroll_url: webpage = self._download_webpage( combine_url(base_url, enroll_url), course_id, 'Enrolling in the course', headers={'Referer': base_url}) if '>You have enrolled in' in webpage: self.to_screen(f'{course_id}: Successfully enrolled in the course') def _download_lecture(self, course_id, lecture_id): return self._download_json( f'https://www.udemy.com/api-2.0/users/me/subscribed-courses/{course_id}/lectures/{lecture_id}?', lecture_id, 'Downloading lecture JSON', query={ 'fields[lecture]': 'title,description,view_html,asset', 'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,stream_urls,captions,data,course_is_drmed', }) def _handle_error(self, response): if not isinstance(response, dict): return error = response.get('error') if error: error_str = 'Udemy returned error #{}: {}'.format(error.get('code'), error.get('message')) error_data = error.get('data') if error_data: error_str += ' - {}'.format(error_data.get('formErrors')) raise ExtractorError(error_str, expected=True) def _download_webpage_handle(self, *args, **kwargs): headers = kwargs.get('headers', {}).copy() headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36' kwargs['headers'] = headers ret = super()._download_webpage_handle( *args, **kwargs) if not ret: return ret webpage, _ = ret if any(p in webpage for p in ( '>Please verify you are a human', 'Access to this page has been denied because we believe you are using automation tools to browse the website', '"_pxCaptcha"')): raise ExtractorError( 'Udemy asks you to solve a CAPTCHA. Login with browser, ' 'solve CAPTCHA, then export cookies and pass cookie file to ' 'yt-dlp with --cookies.', expected=True) return ret def _download_json(self, url_or_request, *args, **kwargs): headers = { 'X-Udemy-Snail-Case': 'true', 'X-Requested-With': 'XMLHttpRequest', } for cookie in self.cookiejar: if cookie.name == 'client_id': headers['X-Udemy-Client-Id'] = cookie.value elif cookie.name == 'access_token': headers['X-Udemy-Bearer-Token'] = cookie.value headers['X-Udemy-Authorization'] = f'Bearer {cookie.value}' if isinstance(url_or_request, Request): url_or_request.headers.update(headers) else: url_or_request = Request(url_or_request, headers=headers) response = super()._download_json(url_or_request, *args, **kwargs) self._handle_error(response) return response def _perform_login(self, username, password): login_popup = self._download_webpage( self._LOGIN_URL, None, 'Downloading login popup') def is_logged(webpage): return any(re.search(p, webpage) for p in ( r'href=["\'](?:https://www\.udemy\.com)?/user/logout/', r'>Logout<')) # already logged in if is_logged(login_popup): return login_form = self._form_hidden_inputs('login-form', login_popup) login_form.update({ 'email': username, 'password': password, }) response = self._download_webpage( self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(login_form), headers={ 'Referer': self._ORIGIN_URL, 'Origin': self._ORIGIN_URL, }) if not is_logged(response): error = self._html_search_regex( r'(?s)<div[^>]+class="form-errors[^"]*">(.+?)</div>', response, 'error message', default=None) if error: raise ExtractorError(f'Unable to login: {error}', expected=True) raise ExtractorError('Unable to log in') def _real_extract(self, url): lecture_id = self._match_id(url) course_id = unsmuggle_url(url, {})[1].get('course_id') webpage = None if not course_id: webpage = self._download_webpage(url, lecture_id) course_id, _ = self._extract_course_info(webpage, lecture_id) try: lecture = self._download_lecture(course_id, lecture_id) except ExtractorError as e: # Error could possibly mean we are not enrolled in the course if isinstance(e.cause, HTTPError) and e.cause.status == 403: webpage = webpage or self._download_webpage(url, lecture_id) self._enroll_course(url, webpage, course_id) lecture = self._download_lecture(course_id, lecture_id) else: raise title = lecture['title'] description = lecture.get('description') asset = lecture['asset'] asset_type = asset.get('asset_type') or asset.get('assetType') if asset_type != 'Video': raise ExtractorError( f'Lecture {lecture_id} is not a video', expected=True) stream_url = asset.get('stream_url') or asset.get('streamUrl') if stream_url: youtube_url = self._search_regex( r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url, 'youtube URL', default=None) if youtube_url: return self.url_result(youtube_url, 'Youtube') video_id = str(asset['id']) thumbnail = asset.get('thumbnail_url') or asset.get('thumbnailUrl') duration = float_or_none(asset.get('data', {}).get('duration')) subtitles = {} automatic_captions = {} formats = [] def extract_output_format(src, f_id): return { 'url': src.get('url'), 'format_id': '%sp' % (src.get('height') or f_id), 'width': int_or_none(src.get('width')), 'height': int_or_none(src.get('height')), 'vbr': int_or_none(src.get('video_bitrate_in_kbps')), 'vcodec': src.get('video_codec'), 'fps': int_or_none(src.get('frame_rate')), 'abr': int_or_none(src.get('audio_bitrate_in_kbps')), 'acodec': src.get('audio_codec'), 'asr': int_or_none(src.get('audio_sample_rate')), 'tbr': int_or_none(src.get('total_bitrate_in_kbps')), 'filesize': int_or_none(src.get('file_size_in_bytes')), } outputs = asset.get('data', {}).get('outputs') if not isinstance(outputs, dict): outputs = {} def add_output_format_meta(f, key): output = outputs.get(key) if isinstance(output, dict): output_format = extract_output_format(output, key) output_format.update(f) return output_format return f def extract_formats(source_list): if not isinstance(source_list, list): return for source in source_list: video_url = url_or_none(source.get('file') or source.get('src')) if not video_url: continue if source.get('type') == 'application/x-mpegURL' or determine_ext(video_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) continue format_id = source.get('label') f = { 'url': video_url, 'format_id': f'{format_id}p', 'height': int_or_none(format_id), } if format_id: # Some videos contain additional metadata (e.g. # https://www.udemy.com/ios9-swift/learn/#/lecture/3383208) f = add_output_format_meta(f, format_id) formats.append(f) def extract_subtitles(track_list): if not isinstance(track_list, list): return for track in track_list: if not isinstance(track, dict): continue if track.get('kind') != 'captions': continue src = url_or_none(track.get('src')) if not src: continue lang = track.get('language') or track.get( 'srclang') or track.get('label') sub_dict = automatic_captions if track.get( 'autogenerated') is True else subtitles sub_dict.setdefault(lang, []).append({ 'url': src, }) for url_kind in ('download', 'stream'): urls = asset.get(f'{url_kind}_urls') if isinstance(urls, dict): extract_formats(urls.get('Video')) captions = asset.get('captions') if isinstance(captions, list): for cc in captions: if not isinstance(cc, dict): continue cc_url = url_or_none(cc.get('url')) if not cc_url: continue lang = try_get(cc, lambda x: x['locale']['locale'], str) sub_dict = (automatic_captions if cc.get('source') == 'auto' else subtitles) sub_dict.setdefault(lang or 'en', []).append({ 'url': cc_url, }) view_html = lecture.get('view_html') if view_html: view_html_urls = set() for source in re.findall(r'<source[^>]+>', view_html): attributes = extract_attributes(source) src = attributes.get('src') if not src: continue res = attributes.get('data-res') height = int_or_none(res) if src in view_html_urls: continue view_html_urls.add(src) if attributes.get('type') == 'application/x-mpegURL' or determine_ext(src) == 'm3u8': m3u8_formats = self._extract_m3u8_formats( src, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) for f in m3u8_formats: m = re.search(r'/hls_(?P<height>\d{3,4})_(?P<tbr>\d{2,})/', f['url']) if m: if not f.get('height'): f['height'] = int(m.group('height')) if not f.get('tbr'): f['tbr'] = int(m.group('tbr')) formats.extend(m3u8_formats) else: formats.append(add_output_format_meta({ 'url': src, 'format_id': f'{height}p' if height else None, 'height': height, }, res)) # react rendition since 2017.04.15 (see # https://github.com/ytdl-org/youtube-dl/issues/12744) data = self._parse_json( self._search_regex( r'videojs-setup-data=(["\'])(?P<data>{.+?})\1', view_html, 'setup data', default='{}', group='data'), video_id, transform_source=unescapeHTML, fatal=False) if data and isinstance(data, dict): extract_formats(data.get('sources')) if not duration: duration = int_or_none(data.get('duration')) extract_subtitles(data.get('tracks')) if not subtitles and not automatic_captions: text_tracks = self._parse_json( self._search_regex( r'text-tracks=(["\'])(?P<data>\[.+?\])\1', view_html, 'text tracks', default='{}', group='data'), video_id, transform_source=lambda s: js_to_json(unescapeHTML(s)), fatal=False) extract_subtitles(text_tracks) if not formats and outputs: for format_id, output in outputs.items(): f = extract_output_format(output, format_id) if f.get('url'): formats.append(f) if not formats and asset.get('course_is_drmed'): self.report_drm(video_id) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, 'subtitles': subtitles, 'automatic_captions': automatic_captions, } class UdemyCourseIE(UdemyIE): # XXX: Do not subclass from concrete IE IE_NAME = 'udemy:course' _VALID_URL = r'https?://(?:[^/]+\.)?udemy\.com/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.udemy.com/java-tutorial/', 'only_matching': True, }, { 'url': 'https://wipro.udemy.com/java-tutorial/', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if UdemyIE.suitable(url) else super().suitable(url) def _real_extract(self, url): course_path = self._match_id(url) webpage = self._download_webpage(url, course_path) course_id, title = self._extract_course_info(webpage, course_path) self._enroll_course(url, webpage, course_id) response = self._download_json( f'https://www.udemy.com/api-2.0/courses/{course_id}/cached-subscriber-curriculum-items', course_id, 'Downloading course curriculum', query={ 'fields[chapter]': 'title,object_index', 'fields[lecture]': 'title,asset', 'page_size': '1000', }) entries = [] chapter, chapter_number = [None] * 2 for entry in response['results']: clazz = entry.get('_class') if clazz == 'lecture': asset = entry.get('asset') if isinstance(asset, dict): asset_type = asset.get('asset_type') or asset.get('assetType') if asset_type != 'Video': continue lecture_id = entry.get('id') if lecture_id: entry = { '_type': 'url_transparent', 'url': smuggle_url( f'https://www.udemy.com/{course_path}/learn/v4/t/lecture/{entry["id"]}', {'course_id': course_id}), 'title': entry.get('title'), 'ie_key': UdemyIE.ie_key(), } if chapter_number: entry['chapter_number'] = chapter_number if chapter: entry['chapter'] = chapter entries.append(entry) elif clazz == 'chapter': chapter_number = entry.get('object_index') chapter = entry.get('title') return self.playlist_result(entries, course_id, title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/minds.py
yt_dlp/extractor/minds.py
from .common import InfoExtractor from ..utils import ( clean_html, format_field, int_or_none, str_or_none, strip_or_none, ) class MindsBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:www\.)?minds\.com/' def _call_api(self, path, video_id, resource, query=None): api_url = 'https://www.minds.com/api/' + path token = self._get_cookies(api_url).get('XSRF-TOKEN') return self._download_json( api_url, video_id, f'Downloading {resource} JSON metadata', headers={ 'Referer': 'https://www.minds.com/', 'X-XSRF-TOKEN': token.value if token else '', }, query=query) class MindsIE(MindsBaseIE): IE_NAME = 'minds' _VALID_URL = MindsBaseIE._VALID_URL_BASE + r'(?:media|newsfeed|archive/view)/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.minds.com/media/100000000000086822', 'md5': '215a658184a419764852239d4970b045', 'info_dict': { 'id': '100000000000086822', 'ext': 'mp4', 'title': 'Minds intro sequence', 'thumbnail': r're:https?://.+\.png', 'uploader_id': 'ottman', 'upload_date': '20130524', 'timestamp': 1369404826, 'uploader': 'Bill Ottman', 'view_count': int, 'like_count': int, 'dislike_count': int, 'tags': ['animation'], 'comment_count': int, 'license': 'attribution-cc', }, }, { # entity.type == 'activity' and empty title 'url': 'https://www.minds.com/newsfeed/798025111988506624', 'md5': 'b2733a74af78d7fd3f541c4cbbaa5950', 'info_dict': { 'id': '798022190320226304', 'ext': 'mp4', 'title': '798022190320226304', 'uploader': 'ColinFlaherty', 'upload_date': '20180111', 'timestamp': 1515639316, 'uploader_id': 'ColinFlaherty', }, }, { 'url': 'https://www.minds.com/archive/view/715172106794442752', 'only_matching': True, }, { # youtube perma_url 'url': 'https://www.minds.com/newsfeed/1197131838022602752', 'only_matching': True, }] def _real_extract(self, url): entity_id = self._match_id(url) entity = self._call_api( 'v1/entities/entity/' + entity_id, entity_id, 'entity')['entity'] if entity.get('type') == 'activity': if entity.get('custom_type') == 'video': video_id = entity['entity_guid'] else: return self.url_result(entity['perma_url']) else: assert entity['subtype'] == 'video' video_id = entity_id # 1080p and webm formats available only on the sources array video = self._call_api( 'v2/media/video/' + video_id, video_id, 'video') formats = [] for source in (video.get('sources') or []): src = source.get('src') if not src: continue formats.append({ 'format_id': source.get('label'), 'height': int_or_none(source.get('size')), 'url': src, }) entity = video.get('entity') or entity owner = entity.get('ownerObj') or {} uploader_id = owner.get('username') tags = entity.get('tags') if tags and isinstance(tags, str): tags = [tags] thumbnail = None poster = video.get('poster') or entity.get('thumbnail_src') if poster: urlh = self._request_webpage(poster, video_id, fatal=False) if urlh: thumbnail = urlh.url return { 'id': video_id, 'title': entity.get('title') or video_id, 'formats': formats, 'description': clean_html(entity.get('description')) or None, 'license': str_or_none(entity.get('license')), 'timestamp': int_or_none(entity.get('time_created')), 'uploader': strip_or_none(owner.get('name')), 'uploader_id': uploader_id, 'uploader_url': format_field(uploader_id, None, 'https://www.minds.com/%s'), 'view_count': int_or_none(entity.get('play:count')), 'like_count': int_or_none(entity.get('thumbs:up:count')), 'dislike_count': int_or_none(entity.get('thumbs:down:count')), 'tags': tags, 'comment_count': int_or_none(entity.get('comments:count')), 'thumbnail': thumbnail, } class MindsFeedBaseIE(MindsBaseIE): _PAGE_SIZE = 150 def _entries(self, feed_id): query = {'limit': self._PAGE_SIZE, 'sync': 1} i = 1 while True: data = self._call_api( f'v2/feeds/container/{feed_id}/videos', feed_id, f'page {i}', query) entities = data.get('entities') or [] for entity in entities: guid = entity.get('guid') if not guid: continue yield self.url_result( 'https://www.minds.com/newsfeed/' + guid, MindsIE.ie_key(), guid) query['from_timestamp'] = data['load-next'] if not (query['from_timestamp'] and len(entities) == self._PAGE_SIZE): break i += 1 def _real_extract(self, url): feed_id = self._match_id(url) feed = self._call_api( f'v1/{self._FEED_PATH}/{feed_id}', feed_id, self._FEED_TYPE)[self._FEED_TYPE] return self.playlist_result( self._entries(feed['guid']), feed_id, strip_or_none(feed.get('name')), feed.get('briefdescription')) class MindsChannelIE(MindsFeedBaseIE): _FEED_TYPE = 'channel' IE_NAME = 'minds:' + _FEED_TYPE _VALID_URL = MindsBaseIE._VALID_URL_BASE + r'(?!(?:newsfeed|media|api|archive|groups)/)(?P<id>[^/?&#]+)' _FEED_PATH = 'channel' _TEST = { 'url': 'https://www.minds.com/ottman', 'info_dict': { 'id': 'ottman', 'title': 'Bill Ottman', 'description': 'Co-creator & CEO @minds', }, 'playlist_mincount': 54, } class MindsGroupIE(MindsFeedBaseIE): _FEED_TYPE = 'group' IE_NAME = 'minds:' + _FEED_TYPE _VALID_URL = MindsBaseIE._VALID_URL_BASE + r'groups/profile/(?P<id>[0-9]+)' _FEED_PATH = 'groups/group' _TEST = { 'url': 'https://www.minds.com/groups/profile/785582576369672204/feed/videos', 'info_dict': { 'id': '785582576369672204', 'title': 'Cooking Videos', }, 'playlist_mincount': 1, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sztvhu.py
yt_dlp/extractor/sztvhu.py
from .common import InfoExtractor class SztvHuIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)' _TEST = { 'url': 'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909', 'md5': 'a6df607b11fb07d0e9f2ad94613375cb', 'info_dict': { 'id': '20130909', 'ext': 'mp4', 'title': 'Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren', 'description': 'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_file = self._search_regex( r'file: "...:(.*?)",', webpage, 'video file') title = self._html_search_regex( r'<meta name="title" content="([^"]*?) - [^-]*? - [^-]*?"', webpage, 'video title') description = self._html_search_regex( r'<meta name="description" content="([^"]*)"/>', webpage, 'video description', fatal=False) thumbnail = self._og_search_thumbnail(webpage) video_url = 'http://media.sztv.hu/vod/' + video_file return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gmanetwork.py
yt_dlp/extractor/gmanetwork.py
from .common import InfoExtractor from .dailymotion import DailymotionIE from .youtube import YoutubeIE class GMANetworkVideoIE(InfoExtractor): _VALID_URL = r'https?://(?:www)\.gmanetwork\.com/(?:\w+/){3}(?P<id>\d+)/(?P<display_id>[\w-]+)/video' _TESTS = [{ 'url': 'https://www.gmanetwork.com/fullepisodes/home/running_man_philippines/168677/running-man-philippines-catch-the-thief-full-chapter-2/video?section=home', 'info_dict': { 'id': '28BqW0AXPe0', 'ext': 'mp4', 'upload_date': '20220919', 'uploader_url': 'http://www.youtube.com/channel/UChsoPNR5x-wdSO2GrOSIWqQ', 'like_count': int, 'view_count': int, 'uploader': 'YoüLOL', 'channel_id': 'UChsoPNR5x-wdSO2GrOSIWqQ', 'duration': 5313, 'comment_count': int, 'tags': 'count:22', 'uploader_id': 'UChsoPNR5x-wdSO2GrOSIWqQ', 'title': 'Running Man Philippines: Catch the Thief (FULL CHAPTER 2)', 'channel_url': 'https://www.youtube.com/channel/UChsoPNR5x-wdSO2GrOSIWqQ', 'thumbnail': 'https://i.ytimg.com/vi/28BqW0AXPe0/maxresdefault.jpg', 'release_timestamp': 1663594212, 'age_limit': 0, 'channel_follower_count': int, 'categories': ['Entertainment'], 'description': 'md5:811bdcea74f9c48051824e494756e926', 'live_status': 'not_live', 'playable_in_embed': True, 'channel': 'YoüLOL', 'availability': 'public', 'release_date': '20220919', }, }, { 'url': 'https://www.gmanetwork.com/fullepisodes/home/more_than_words/87059/more-than-words-full-episode-80/video?section=home', 'info_dict': { 'id': 'yiDOExw2aSA', 'ext': 'mp4', 'live_status': 'not_live', 'channel': 'GMANetwork', 'like_count': int, 'channel_follower_count': int, 'description': 'md5:6d00cd658394fa1a5071200d3ed4be05', 'duration': 1419, 'age_limit': 0, 'comment_count': int, 'upload_date': '20181003', 'thumbnail': 'https://i.ytimg.com/vi_webp/yiDOExw2aSA/maxresdefault.webp', 'availability': 'public', 'playable_in_embed': True, 'channel_id': 'UCKL5hAuzgFQsyrsQKgU0Qng', 'title': 'More Than Words: Full Episode 80 (Finale)', 'uploader_id': 'GMANETWORK', 'categories': ['Entertainment'], 'uploader': 'GMANetwork', 'channel_url': 'https://www.youtube.com/channel/UCKL5hAuzgFQsyrsQKgU0Qng', 'tags': 'count:29', 'view_count': int, 'uploader_url': 'http://www.youtube.com/user/GMANETWORK', }, }] def _real_extract(self, url): content_id, display_id = self._match_valid_url(url).group('id', 'display_id') webpage = self._download_webpage(url, display_id) # webpage route youtube_id = self._search_regex( r'var\s*YOUTUBE_VIDEO\s*=\s*[\'"]+(?P<yt_id>[\w-]+)', webpage, 'youtube_id', fatal=False) if youtube_id: return self.url_result(youtube_id, YoutubeIE, youtube_id) # api call route # more info at https://aphrodite.gmanetwork.com/fullepisodes/assets/fullepisodes/js/dist/fullepisodes_video.js?v=1.1.11 network_url = self._search_regex( r'NETWORK_URL\s*=\s*[\'"](?P<url>[^\'"]+)', webpage, 'network_url') json_data = self._download_json(f'{network_url}api/data/content/video/{content_id}', display_id) if json_data.get('video_file'): return self.url_result(json_data['video_file'], YoutubeIE, json_data['video_file']) else: return self.url_result(json_data['dailymotion_file'], DailymotionIE, json_data['dailymotion_file'])
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hrefli.py
yt_dlp/extractor/hrefli.py
from .common import InfoExtractor class HrefLiRedirectIE(InfoExtractor): IE_NAME = 'href.li' IE_DESC = False # Do not list _VALID_URL = r'https?://href\.li/\?(?P<url>.+)' _TESTS = [{ 'url': 'https://href.li/?https://www.reddit.com/r/cats/comments/12bluel/my_cat_helps_me_with_water/?utm_source=share&utm_medium=android_app&utm_name=androidcss&utm_term=1&utm_content=share_button', 'only_matching': True, }] def _real_extract(self, url): return self.url_result(self._match_valid_url(url).group('url'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/stacommu.py
yt_dlp/extractor/stacommu.py
import time from .wrestleuniverse import WrestleUniverseBaseIE from ..utils import ( int_or_none, traverse_obj, url_basename, url_or_none, ) class StacommuBaseIE(WrestleUniverseBaseIE): _NETRC_MACHINE = 'stacommu' _API_HOST = 'api.stacommu.jp' _LOGIN_QUERY = {'key': 'AIzaSyCR9czxhH2eWuijEhTNWBZ5MCcOYEUTAhg'} _LOGIN_HEADERS = { 'Accept': '*/*', 'Content-Type': 'application/json', 'X-Client-Version': 'Chrome/JsCore/9.9.4/FirebaseCore-web', 'Referer': 'https://www.stacommu.jp/', 'Origin': 'https://www.stacommu.jp', } @WrestleUniverseBaseIE._TOKEN.getter def _TOKEN(self): if self._REAL_TOKEN and self._TOKEN_EXPIRY <= int(time.time()): self._refresh_token() return self._REAL_TOKEN def _get_formats(self, data, path, video_id=None): if not traverse_obj(data, path) and not data.get('canWatch') and not self._TOKEN: self.raise_login_required(method='password') return super()._get_formats(data, path, video_id) def _extract_hls_key(self, data, path, decrypt): encryption_data = traverse_obj(data, path) if traverse_obj(encryption_data, ('encryptType', {int})) == 0: return None return traverse_obj(encryption_data, {'key': ('key', {decrypt}), 'iv': ('iv', {decrypt})}) def _extract_vod(self, url): video_id = self._match_id(url) video_info = self._download_metadata( url, video_id, 'ja', ('dehydratedState', 'queries', 0, 'state', 'data')) hls_info, decrypt = self._call_encrypted_api( video_id, ':watch', 'stream information', data={'method': 1}) return { 'id': video_id, 'formats': self._get_formats(hls_info, ('protocolHls', 'url', {url_or_none}), video_id), 'hls_aes': self._extract_hls_key(hls_info, 'protocolHls', decrypt), **traverse_obj(video_info, { 'title': ('displayName', {str}), 'description': ('description', {str}), 'timestamp': ('watchStartTime', {int_or_none}), 'thumbnail': ('keyVisualUrl', {url_or_none}), 'cast': ('casts', ..., 'displayName', {str}), 'duration': ('duration', {int}), }), } def _extract_ppv(self, url): video_id = self._match_id(url) video_info = self._call_api(video_id, msg='video information', query={'al': 'ja'}, auth=False) hls_info, decrypt = self._call_encrypted_api( video_id, ':watchArchive', 'stream information', data={'method': 1}) formats = self._get_formats(hls_info, ('hls', 'urls', ..., {url_or_none}), video_id) for f in formats: # bitrates are exaggerated in PPV playlists, so avoid wrong/huge filesize_approx values if f.get('tbr'): f['tbr'] = int(f['tbr'] / 2.5) # prefer variants with the same basename as the master playlist to avoid partial streams f['format_id'] = url_basename(f['url']).partition('.')[0] if not f['format_id'].startswith(url_basename(f['manifest_url']).partition('.')[0]): f['preference'] = -10 return { 'id': video_id, 'formats': formats, 'hls_aes': self._extract_hls_key(hls_info, 'hls', decrypt), **traverse_obj(video_info, { 'title': ('displayName', {str}), 'timestamp': ('startTime', {int_or_none}), 'thumbnail': ('keyVisualUrl', {url_or_none}), 'duration': ('duration', {int_or_none}), }), } class StacommuVODIE(StacommuBaseIE): _VALID_URL = r'https?://www\.stacommu\.jp/(?:en/)?videos/episodes/(?P<id>[\da-zA-Z]+)' _TESTS = [{ # not encrypted 'url': 'https://www.stacommu.jp/videos/episodes/aXcVKjHyAENEjard61soZZ', 'info_dict': { 'id': 'aXcVKjHyAENEjard61soZZ', 'ext': 'mp4', 'title': 'スタコミュAWARDの裏側、ほぼ全部見せます!〜晴れ舞台の直前ドキドキ編〜', 'description': 'md5:6400275c57ae75c06da36b06f96beb1c', 'timestamp': 1679652000, 'upload_date': '20230324', 'thumbnail': 'https://image.stacommu.jp/6eLobQan8PFtBoU4RL4uGg/6eLobQan8PFtBoU4RL4uGg', 'cast': 'count:11', 'duration': 250, }, 'params': { 'skip_download': 'm3u8', }, }, { # encrypted; requires a premium account 'url': 'https://www.stacommu.jp/videos/episodes/3hybMByUvzMEqndSeu5LpD', 'info_dict': { 'id': '3hybMByUvzMEqndSeu5LpD', 'ext': 'mp4', 'title': 'スタプラフェス2023〜裏側ほぼ全部見せます〜#10', 'description': 'md5:85494488ccf1dfa1934accdeadd7b340', 'timestamp': 1682506800, 'upload_date': '20230426', 'thumbnail': 'https://image.stacommu.jp/eMdXtEefR4kEyJJMpAFi7x/eMdXtEefR4kEyJJMpAFi7x', 'cast': 'count:55', 'duration': 312, 'hls_aes': { 'key': '6bbaf241b8e1fd9f59ecf546a70e4ae7', 'iv': '1fc9002a23166c3bb1d240b953d09de9', }, }, 'params': { 'skip_download': 'm3u8', }, }, { 'url': 'https://www.stacommu.jp/en/videos/episodes/aXcVKjHyAENEjard61soZZ', 'only_matching': True, }] _API_PATH = 'videoEpisodes' def _real_extract(self, url): return self._extract_vod(url) class StacommuLiveIE(StacommuBaseIE): _VALID_URL = r'https?://www\.stacommu\.jp/(?:en/)?live/(?P<id>[\da-zA-Z]+)' _TESTS = [{ 'url': 'https://www.stacommu.jp/live/d2FJ3zLnndegZJCAEzGM3m', 'info_dict': { 'id': 'd2FJ3zLnndegZJCAEzGM3m', 'ext': 'mp4', 'title': '仲村悠菜 2023/05/04', 'timestamp': 1683195647, 'upload_date': '20230504', 'thumbnail': 'https://image.stacommu.jp/pHGF57SPEHE2ke83FS92FN/pHGF57SPEHE2ke83FS92FN', 'duration': 5322, 'hls_aes': { 'key': 'efbb3ec0b8246f61adf1764c5a51213a', 'iv': '80621d19a1f19167b64cedb415b05d1c', }, }, 'params': { 'skip_download': 'm3u8', }, }, { 'url': 'https://www.stacommu.jp/en/live/d2FJ3zLnndegZJCAEzGM3m', 'only_matching': True, }] _API_PATH = 'events' def _real_extract(self, url): return self._extract_ppv(url) class TheaterComplexTownBaseIE(StacommuBaseIE): _NETRC_MACHINE = 'theatercomplextown' _API_HOST = 'api.theater-complex.town' _LOGIN_QUERY = {'key': 'AIzaSyAgNCqToaIz4a062EeIrkhI_xetVfAOrfc'} _LOGIN_HEADERS = { 'Accept': '*/*', 'Content-Type': 'application/json', 'X-Client-Version': 'Chrome/JsCore/9.23.0/FirebaseCore-web', 'Referer': 'https://www.theater-complex.town/', 'Origin': 'https://www.theater-complex.town', } class TheaterComplexTownVODIE(TheaterComplexTownBaseIE): _VALID_URL = r'https?://(?:www\.)?theater-complex\.town/(?:(?:en|ja)/)?videos/episodes/(?P<id>\w+)' IE_NAME = 'theatercomplextown:vod' _TESTS = [{ 'url': 'https://www.theater-complex.town/videos/episodes/hoxqidYNoAn7bP92DN6p78', 'info_dict': { 'id': 'hoxqidYNoAn7bP92DN6p78', 'ext': 'mp4', 'title': '演劇ドラフトグランプリ2023 劇団『恋のぼり』〜劇団名決定秘話ラジオ', 'description': 'md5:a7e2e9cf570379ea67fb630f345ff65d', 'cast': ['玉城 裕規', '石川 凌雅'], 'thumbnail': 'https://image.theater-complex.town/5URnXX6KCeDysuFrPkP38o/5URnXX6KCeDysuFrPkP38o', 'upload_date': '20231103', 'timestamp': 1699016400, 'duration': 868, }, 'params': { 'skip_download': 'm3u8', }, }, { 'url': 'https://www.theater-complex.town/en/videos/episodes/6QT7XYwM9dJz5Gf9VB6K5y', 'only_matching': True, }, { 'url': 'https://www.theater-complex.town/ja/videos/episodes/hoxqidYNoAn7bP92DN6p78', 'only_matching': True, }] _API_PATH = 'videoEpisodes' def _real_extract(self, url): return self._extract_vod(url) class TheaterComplexTownPPVIE(TheaterComplexTownBaseIE): _VALID_URL = r'https?://(?:www\.)?theater-complex\.town/(?:(?:en|ja)/)?(?:ppv|live)/(?P<id>\w+)' IE_NAME = 'theatercomplextown:ppv' _TESTS = [{ 'url': 'https://www.theater-complex.town/ppv/wytW3X7khrjJBUpKuV3jen', 'info_dict': { 'id': 'wytW3X7khrjJBUpKuV3jen', 'ext': 'mp4', 'title': 'BREAK FREE STARS 11月5日(日)12:30千秋楽公演', 'thumbnail': 'https://image.theater-complex.town/5GWEB31JcTUfjtgdeV5t6o/5GWEB31JcTUfjtgdeV5t6o', 'upload_date': '20231105', 'timestamp': 1699155000, 'duration': 8378, }, 'params': { 'skip_download': 'm3u8', }, }, { 'url': 'https://www.theater-complex.town/en/ppv/wytW3X7khrjJBUpKuV3jen', 'only_matching': True, }, { 'url': 'https://www.theater-complex.town/ja/ppv/qwUVmLmGEiZ3ZW6it9uGys', 'only_matching': True, }, { 'url': 'https://www.theater-complex.town/en/live/79akNM7bJeD5Fi9EP39aDp', 'only_matching': True, }] _API_PATH = 'events' def _real_extract(self, url): return self._extract_ppv(url)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/parlview.py
yt_dlp/extractor/parlview.py
import re from .common import InfoExtractor from ..utils import parse_duration, parse_iso8601, url_or_none from ..utils.traversal import traverse_obj class ParlviewIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?aph\.gov\.au/News_and_Events/Watch_Read_Listen/ParlView/video/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.aph.gov.au/News_and_Events/Watch_Read_Listen/ParlView/video/3406614', 'info_dict': { 'id': '3406614', 'ext': 'mp4', 'title': 'Senate Chamber', 'description': 'Official Recording of Senate Proceedings from the Australian Parliament', 'thumbnail': 'https://aphbroadcasting-prod.z01.azurefd.net/vod-storage/vod-logos/SenateParlview06.jpg', 'upload_date': '20250325', 'duration': 17999, 'timestamp': 1742939400, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.aph.gov.au/News_and_Events/Watch_Read_Listen/ParlView/video/SV1394.dv', 'info_dict': { 'id': 'SV1394.dv', 'ext': 'mp4', 'title': 'Senate Select Committee on Uranium Mining and Milling [Part 1]', 'description': 'Official Recording of Senate Committee Proceedings from the Australian Parliament', 'thumbnail': 'https://aphbroadcasting-prod.z01.azurefd.net/vod-storage/vod-logos/CommitteeThumbnail06.jpg', 'upload_date': '19960822', 'duration': 14765, 'timestamp': 840754200, }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) video_details = self._download_json( f'https://vodapi.aph.gov.au/api/search/parlview/{video_id}', video_id)['videoDetails'] formats, subtitles = self._extract_m3u8_formats_and_subtitles( video_details['files']['file']['url'], video_id, 'mp4') DURATION_RE = re.compile(r'(?P<duration>\d+:\d+:\d+):\d+') return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(video_details, { 'title': (('parlViewTitle', 'title'), {str}, any), 'description': ('parlViewDescription', {str}), 'duration': ('files', 'file', 'duration', {DURATION_RE.fullmatch}, 'duration', {parse_duration}), 'timestamp': ('recordingFrom', {parse_iso8601}), 'thumbnail': ('thumbUrl', {url_or_none}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vodpl.py
yt_dlp/extractor/vodpl.py
from .onet import OnetBaseIE class VODPlIE(OnetBaseIE): _VALID_URL = r'https?://vod\.pl/(?:[^/]+/)+(?P<id>[0-9a-zA-Z]+)' _TESTS = [{ 'url': 'https://vod.pl/filmy/chlopaki-nie-placza/3ep3jns', 'md5': 'a7dc3b2f7faa2421aefb0ecaabf7ec74', 'info_dict': { 'id': '3ep3jns', 'ext': 'mp4', 'title': 'Chłopaki nie płaczą', 'description': 'md5:f5f03b84712e55f5ac9f0a3f94445224', 'timestamp': 1463415154, 'duration': 5765, 'upload_date': '20160516', }, }, { 'url': 'https://vod.pl/seriale/belfer-na-planie-praca-kamery-online/2c10heh', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) info_dict = self._extract_from_id(self._search_mvp_id(webpage), webpage) info_dict['id'] = video_id return info_dict
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/aliexpress.py
yt_dlp/extractor/aliexpress.py
from .common import InfoExtractor from ..utils import ( float_or_none, try_get, ) class AliExpressLiveIE(InfoExtractor): _VALID_URL = r'https?://live\.aliexpress\.com/live/(?P<id>\d+)' _TEST = { 'url': 'https://live.aliexpress.com/live/2800002704436634', 'md5': 'e729e25d47c5e557f2630eaf99b740a5', 'info_dict': { 'id': '2800002704436634', 'ext': 'mp4', 'title': 'CASIMA7.22', 'thumbnail': r're:https?://.*\.jpg', 'uploader': 'CASIMA Official Store', 'timestamp': 1500717600, 'upload_date': '20170722', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data = self._parse_json( self._search_regex( r'(?s)runParams\s*=\s*({.+?})\s*;?\s*var', webpage, 'runParams'), video_id) title = data['title'] formats = self._extract_m3u8_formats( data['replyStreamUrl'], video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') return { 'id': video_id, 'title': title, 'thumbnail': data.get('coverUrl'), 'uploader': try_get( data, lambda x: x['followBar']['name'], str), 'timestamp': float_or_none(data.get('startTimeLong'), scale=1000), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hearthisat.py
yt_dlp/extractor/hearthisat.py
from .common import InfoExtractor from ..utils import ( KNOWN_EXTENSIONS, determine_ext, str_to_int, ) class HearThisAtIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?hearthis\.at/(?P<artist>[^/?#]+)/(?P<title>[\w.-]+)' _PLAYLIST_URL = 'https://hearthis.at/playlist.php' _TESTS = [{ 'url': 'https://hearthis.at/moofi/dr-kreep', 'md5': 'ab6ec33c8fed6556029337c7885eb4e0', 'info_dict': { 'id': '150939', 'display_id': 'moofi - dr-kreep', 'ext': 'wav', 'title': 'Moofi - Dr. Kreep', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1421564134, 'description': 'md5:1adb0667b01499f9d27e97ddfd53852a', 'upload_date': '20150118', 'view_count': int, 'duration': 70, 'genres': ['Experimental'], }, }, { # 'download' link redirects to the original webpage 'url': 'https://hearthis.at/twitchsf/dj-jim-hopkins-totally-bitchin-80s-dance-mix/', 'md5': '5980ceb7c461605d30f1f039df160c6e', 'info_dict': { 'id': '811296', 'display_id': 'twitchsf - dj-jim-hopkins-totally-bitchin-80s-dance-mix', 'ext': 'mp3', 'title': 'TwitchSF - DJ Jim Hopkins - Totally Bitchin\' 80\'s Dance Mix!', 'description': 'md5:ef26815ca8f483272a87b137ff175be2', 'upload_date': '20160328', 'timestamp': 1459186146, 'thumbnail': r're:^https?://.*\.jpg$', 'view_count': int, 'duration': 4360, 'genres': ['Dance'], }, }, { 'url': 'https://hearthis.at/tindalos/0001-tindalos-gnrique/eQd/', 'md5': 'cd08e51911f147f6da2d9678905b0bd9', 'info_dict': { 'id': '2685222', 'ext': 'mp3', 'duration': 86, 'view_count': int, 'timestamp': 1545471670, 'display_id': 'tindalos - 0001-tindalos-gnrique', 'thumbnail': r're:^https?://.*\.jpg$', 'genres': ['Other'], 'title': 'Tindalos - Tindalos - générique n°1', 'description': '', 'upload_date': '20181222', }, }, { 'url': 'https://hearthis.at/sithi2/biochip-c-classics-set-wolle-xdp-tresor.core-special-tresor-globus-berlin-13.07.20011/', 'md5': 'b45ac60f0c8111eef6ddc10ec232e312', 'info_dict': { 'id': '7145959', 'ext': 'mp3', 'description': 'md5:d7ae36a453d78903f6b7ed6eb2fce1f2', 'duration': 8986, 'thumbnail': r're:^https?://.*\.jpg$', 'title': 'md5:62669ce5b1b67f45c6f846033f37d3b9', 'timestamp': 1588699409, 'display_id': 'sithi2 - biochip-c-classics-set-wolle-xdp-tresor.core-special-tresor-globus-berlin-13.07.20011', 'view_count': int, 'upload_date': '20200505', 'genres': ['Other'], }, }] def _real_extract(self, url): m = self._match_valid_url(url) display_id = '{artist:s} - {title:s}'.format(**m.groupdict()) api_url = url.replace('www.', '').replace('hearthis.at', 'api-v2.hearthis.at') data_json = self._download_json(api_url, display_id) track_id = data_json.get('id') artist_json = data_json.get('user') title = '{} - {}'.format(artist_json.get('username'), data_json.get('title')) genre = data_json.get('genre') description = data_json.get('description') thumbnail = data_json.get('artwork_url') or data_json.get('thumb') view_count = str_to_int(data_json.get('playback_count')) duration = str_to_int(data_json.get('duration')) timestamp = data_json.get('release_timestamp') formats = [] mp3_url = data_json.get('stream_url') if mp3_url: formats.append({ 'format_id': 'mp3', 'vcodec': 'none', 'acodec': 'mp3', 'url': mp3_url, 'ext': 'mp3', }) if data_json.get('download_url'): download_url = data_json['download_url'] ext = determine_ext(data_json['download_filename']) if ext in KNOWN_EXTENSIONS: formats.append({ 'format_id': ext, 'vcodec': 'none', 'ext': ext, 'url': download_url, 'acodec': ext, 'quality': 2, # Usually better quality }) return { 'id': track_id, 'display_id': display_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'description': description, 'duration': duration, 'timestamp': timestamp, 'view_count': view_count, 'genre': genre, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/heise.py
yt_dlp/extractor/heise.py
import urllib.parse from .common import InfoExtractor from .kaltura import KalturaIE from .youtube import YoutubeIE from ..utils import ( NO_DEFAULT, determine_ext, int_or_none, parse_iso8601, smuggle_url, xpath_text, ) class HeiseIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?heise\.de/(?:[^/]+/)+[^/]+-(?P<id>[0-9]+)\.html' _TESTS = [{ # kaltura embed 'url': 'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html', 'info_dict': { 'id': '1_kkrq94sm', 'ext': 'mp4', 'title': "Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone", 'timestamp': 1512734959, 'upload_date': '20171208', 'description': 'md5:c934cbfb326c669c2bcabcbe3d3fcd20', 'thumbnail': 're:^https?://.*/thumbnail/.*', 'duration': 2845, 'view_count': int, }, 'params': { 'skip_download': True, }, }, { # YouTube embed 'url': 'http://www.heise.de/newsticker/meldung/Netflix-In-20-Jahren-vom-Videoverleih-zum-TV-Revolutionaer-3814130.html', 'md5': 'e403d2b43fea8e405e88e3f8623909f1', 'info_dict': { 'id': '6kmWbXleKW4', 'ext': 'mp4', 'title': 'Neu im September 2017 | Netflix', 'description': 'md5:d6852d1f96bb80760608eed3b907437c', 'upload_date': '20170830', 'uploader': 'Netflix Deutschland, Österreich und Schweiz', 'uploader_id': 'netflixdach', 'categories': ['Entertainment'], 'tags': 'count:27', 'age_limit': 0, 'availability': 'public', 'comment_count': int, 'channel_id': 'UCZqgRlLcvO3Fnx_npQJygcQ', 'thumbnail': 'https://i.ytimg.com/vi_webp/6kmWbXleKW4/maxresdefault.webp', 'uploader_url': 'http://www.youtube.com/user/netflixdach', 'playable_in_embed': True, 'live_status': 'not_live', 'channel_url': 'https://www.youtube.com/channel/UCZqgRlLcvO3Fnx_npQJygcQ', 'view_count': int, 'channel': 'Netflix Deutschland, Österreich und Schweiz', 'channel_follower_count': int, 'like_count': int, 'duration': 67, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.heise.de/video/artikel/nachgehakt-Wie-sichert-das-c-t-Tool-Restric-tor-Windows-10-ab-3700244.html', 'info_dict': { 'id': '1_ntrmio2s', 'ext': 'mp4', 'title': "nachgehakt: Wie sichert das c't-Tool Restric'tor Windows 10 ab?", 'description': 'md5:47e8ffb6c46d85c92c310a512d6db271', 'timestamp': 1512470717, 'upload_date': '20171205', 'duration': 786, 'view_count': int, 'thumbnail': 're:^https?://.*/thumbnail/.*', }, 'params': { 'skip_download': True, }, }, { # FIXME: Video m3u8 fails to download; issue with Kaltura extractor 'url': 'https://www.heise.de/ct/artikel/c-t-uplink-20-8-Staubsaugerroboter-Xiaomi-Vacuum-2-AR-Brille-Meta-2-und-Android-rooten-3959893.html', 'info_dict': { 'id': '1_59mk80sf', 'ext': 'mp4', 'title': "c't uplink 20.8: Staubsaugerroboter Xiaomi Vacuum 2, AR-Brille Meta 2 und Android rooten", 'description': 'md5:f50fe044d3371ec73a8f79fcebd74afc', 'timestamp': 1517567237, 'upload_date': '20180202', }, 'params': { 'skip_download': True, }, }, { # videout 'url': 'https://www.heise.de/ct/artikel/c-t-uplink-3-8-Anonyme-SIM-Karten-G-Sync-Monitore-Citizenfour-2440327.html', 'info_dict': { 'id': '2440327', 'ext': 'mp4', 'title': 'c\'t uplink 3.8: Anonyme SIM-Karten, G-Sync-Monitore, Citizenfour', 'thumbnail': 'http://www.heise.de/imagine/yxM2qmol0xV3iFB7qFb70dGvXjc/gallery/', 'description': 'md5:fa164d8c8707dff124a9626d39205f5d', 'timestamp': 1414825200, 'upload_date': '20141101', }, }, { 'url': 'http://www.heise.de/ct/artikel/c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2403911.html', 'only_matching': True, }, { 'url': 'http://www.heise.de/newsticker/meldung/c-t-uplink-Owncloud-Tastaturen-Peilsender-Smartphone-2404251.html?wt_mc=rss.ho.beitrag.atom', 'only_matching': True, }, { 'url': 'http://www.heise.de/ct/ausgabe/2016-12-Spiele-3214137.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) def extract_title(default=NO_DEFAULT): title = self._html_search_meta( ('fulltitle', 'title'), webpage, default=None) if not title or title == "c't": title = self._search_regex( r'<div[^>]+class="videoplayerjw"[^>]+data-title="([^"]+)"', webpage, 'title', default=None) if not title: title = self._html_search_regex( r'<h1[^>]+\bclass=["\']article_page_title[^>]+>(.+?)<', webpage, 'title', default=default) return title title = extract_title(default=None) description = self._og_search_description( webpage, default=None) or self._html_search_meta( 'description', webpage) def _make_kaltura_result(kaltura_url): return { '_type': 'url_transparent', 'url': smuggle_url(kaltura_url, {'source_url': url}), 'ie_key': KalturaIE.ie_key(), 'title': title, 'description': description, } kaltura_url = KalturaIE._extract_url(webpage) if kaltura_url: return _make_kaltura_result(kaltura_url) kaltura_id = self._search_regex( r'entry-id=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'kaltura id', default=None, group='id') if kaltura_id: return _make_kaltura_result(f'kaltura:2238431:{kaltura_id}') yt_urls = tuple(YoutubeIE._extract_embed_urls(url, webpage)) if yt_urls: return self.playlist_from_matches( yt_urls, video_id, title, ie=YoutubeIE.ie_key()) title = extract_title() api_params = urllib.parse.parse_qs( self._search_regex(r'/videout/feed\.json\?([^\']+)', webpage, 'feed params', default=None) or '') if not api_params or 'container' not in api_params or 'sequenz' not in api_params: container_id = self._search_regex( r'<div class="videoplayerjw"[^>]+data-container="([0-9]+)"', webpage, 'container ID') sequenz_id = self._search_regex( r'<div class="videoplayerjw"[^>]+data-sequenz="([0-9]+)"', webpage, 'sequenz ID') api_params = { 'container': container_id, 'sequenz': sequenz_id, } doc = self._download_xml( 'http://www.heise.de/videout/feed', video_id, query=api_params) formats = [] for source_node in doc.findall('.//{http://rss.jwpcdn.com/}source'): label = source_node.attrib['label'] height = int_or_none(self._search_regex( r'^(.*?_)?([0-9]+)p$', label, 'height', default=None)) video_url = source_node.attrib['file'] ext = determine_ext(video_url, '') formats.append({ 'url': video_url, 'format_note': label, 'format_id': f'{ext}_{label}', 'height': height, }) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': (xpath_text(doc, './/{http://rss.jwpcdn.com/}image') or self._og_search_thumbnail(webpage)), 'timestamp': parse_iso8601( self._html_search_meta('date', webpage)), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/altcensored.py
yt_dlp/extractor/altcensored.py
import re from .archiveorg import ArchiveOrgIE from .common import InfoExtractor from ..utils import ( InAdvancePagedList, clean_html, int_or_none, orderedSet, str_to_int, urljoin, ) class AltCensoredIE(InfoExtractor): IE_NAME = 'altcensored' _VALID_URL = r'https?://(?:www\.)?altcensored\.com/(?:watch\?v=|embed/)(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.altcensored.com/watch?v=k0srjLSkga8', 'info_dict': { 'id': 'youtube-k0srjLSkga8', 'ext': 'webm', 'title': "QUELLES SONT LES CONSÉQUENCES DE L'HYPERSEXUALISATION DE LA SOCIÉTÉ ?", 'display_id': 'k0srjLSkga8.webm', 'release_date': '20180403', 'creators': ['Virginie Vota'], 'release_year': 2018, 'upload_date': '20230318', 'uploader': 'admin@altcensored.com', 'description': 'md5:0b38a8fc04103579d5c1db10a247dc30', 'timestamp': 1679161343, 'track': 'k0srjLSkga8', 'duration': 926.09, 'thumbnail': 'https://archive.org/download/youtube-k0srjLSkga8/youtube-k0srjLSkga8.thumbs/k0srjLSkga8_000925.jpg', 'view_count': int, 'categories': ['News & Politics'], }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) category = clean_html(self._html_search_regex( r'<a href="/category/\d+">([^<]+)</a>', webpage, 'category', default=None)) return { '_type': 'url_transparent', 'url': f'https://archive.org/details/youtube-{video_id}', 'ie_key': ArchiveOrgIE.ie_key(), 'view_count': str_to_int(self._html_search_regex( r'YouTube Views:(?:\s|&nbsp;)*([\d,]+)', webpage, 'view count', default=None)), 'categories': [category] if category else None, } class AltCensoredChannelIE(InfoExtractor): IE_NAME = 'altcensored:channel' _VALID_URL = r'https?://(?:www\.)?altcensored\.com/channel/(?!page|table)(?P<id>[^/?#]+)' _PAGE_SIZE = 24 _TESTS = [{ 'url': 'https://www.altcensored.com/channel/UCFPTO55xxHqFqkzRZHu4kcw', 'info_dict': { 'title': 'Virginie Vota', 'id': 'UCFPTO55xxHqFqkzRZHu4kcw', }, 'playlist_count': 85, }, { 'url': 'https://altcensored.com/channel/UC9CcJ96HKMWn0LZlcxlpFTw', 'info_dict': { 'title': 'yukikaze775', 'id': 'UC9CcJ96HKMWn0LZlcxlpFTw', }, 'playlist_count': 4, }, { 'url': 'https://altcensored.com/channel/UCfYbb7nga6-icsFWWgS-kWw', 'info_dict': { 'title': 'Mister Metokur', 'id': 'UCfYbb7nga6-icsFWWgS-kWw', }, 'playlist_count': 121, }] def _real_extract(self, url): channel_id = self._match_id(url) webpage = self._download_webpage( url, channel_id, 'Download channel webpage', 'Unable to get channel webpage') title = self._html_search_meta('altcen_title', webpage, 'title', fatal=False) page_count = int_or_none(self._html_search_regex( r'<a[^>]+href="/channel/[\w-]+/page/(\d+)">(?:\1)</a>', webpage, 'page count', default='1')) def page_func(page_num): page_num += 1 webpage = self._download_webpage( f'https://altcensored.com/channel/{channel_id}/page/{page_num}', channel_id, note=f'Downloading page {page_num}') items = re.findall(r'<a[^>]+href="(/watch\?v=[^"]+)', webpage) return [self.url_result(urljoin('https://www.altcensored.com', path), AltCensoredIE) for path in orderedSet(items)] return self.playlist_result( InAdvancePagedList(page_func, page_count, self._PAGE_SIZE), playlist_id=channel_id, playlist_title=title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/btvplus.py
yt_dlp/extractor/btvplus.py
from .common import InfoExtractor from ..utils import ( bug_reports_message, clean_html, get_element_by_class, js_to_json, mimetype2ext, strip_or_none, url_or_none, urljoin, ) from ..utils.traversal import traverse_obj class BTVPlusIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?btvplus\.bg/produkt/(?:predavaniya|seriali|novini)/(?P<id>\d+)' _TESTS = [{ 'url': 'https://btvplus.bg/produkt/predavaniya/67271/btv-reporterite/btv-reporterite-12-07-2025-g', 'info_dict': { 'ext': 'mp4', 'id': '67271', 'title': 'bTV Репортерите - 12.07.2025 г.', 'thumbnail': 'https://cdn.btv.bg/media/images/940x529/Jul2025/2113606319.jpg', }, }, { 'url': 'https://btvplus.bg/produkt/seriali/66942/sezon-2/plen-sezon-2-epizod-55', 'info_dict': { 'ext': 'mp4', 'id': '66942', 'title': 'Плен - сезон 2, епизод 55', 'thumbnail': 'https://cdn.btv.bg/media/images/940x529/Jun2025/2113595104.jpg', }, }, { 'url': 'https://btvplus.bg/produkt/novini/67270/btv-novinite-centralna-emisija-12-07-2025', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) player_url = self._search_regex( r'var\s+videoUrl\s*=\s*[\'"]([^\'"]+)[\'"]', webpage, 'player URL') player_config = self._download_json( urljoin('https://btvplus.bg', player_url), video_id)['config'] videojs_data = self._search_json( r'videojs\(["\'][^"\']+["\'],', player_config, 'videojs data', video_id, transform_source=js_to_json) formats = [] subtitles = {} for src in traverse_obj(videojs_data, ('sources', lambda _, v: url_or_none(v['src']))): ext = mimetype2ext(src.get('type')) if ext == 'm3u8': fmts, subs = self._extract_m3u8_formats_and_subtitles( src['src'], video_id, 'mp4', m3u8_id='hls', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) else: self.report_warning(f'Unknown format type {ext}{bug_reports_message()}') return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'title': ( strip_or_none(self._og_search_title(webpage, default=None)) or clean_html(get_element_by_class('product-title', webpage))), 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'description': self._og_search_description(webpage, default=None), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/xstream.py
yt_dlp/extractor/xstream.py
import re from .common import InfoExtractor from ..utils import ( find_xpath_attr, int_or_none, parse_iso8601, xpath_text, xpath_with_ns, ) class XstreamIE(InfoExtractor): _VALID_URL = r'''(?x) (?: xstream:| https?://frontend\.xstream\.(?:dk|net)/ ) (?P<partner_id>[^/]+) (?: :| /feed/video/\?.*?\bid= ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'http://frontend.xstream.dk/btno/feed/video/?platform=web&id=86588', 'md5': 'd7d17e3337dc80de6d3a540aefbe441b', 'info_dict': { 'id': '86588', 'ext': 'mov', 'title': 'Otto Wollertsen', 'description': 'Vestlendingen Otto Fredrik Wollertsen', 'timestamp': 1430473209, 'upload_date': '20150501', }, }, { 'url': 'http://frontend.xstream.dk/ap/feed/video/?platform=web&id=21039', 'only_matching': True, }] def _extract_video_info(self, partner_id, video_id): data = self._download_xml( f'http://frontend.xstream.dk/{partner_id}/feed/video/?platform=web&id={video_id}', video_id) NS_MAP = { 'atom': 'http://www.w3.org/2005/Atom', 'xt': 'http://xstream.dk/', 'media': 'http://search.yahoo.com/mrss/', } entry = data.find(xpath_with_ns('./atom:entry', NS_MAP)) title = xpath_text( entry, xpath_with_ns('./atom:title', NS_MAP), 'title') description = xpath_text( entry, xpath_with_ns('./atom:summary', NS_MAP), 'description') timestamp = parse_iso8601(xpath_text( entry, xpath_with_ns('./atom:published', NS_MAP), 'upload date')) formats = [] media_group = entry.find(xpath_with_ns('./media:group', NS_MAP)) for media_content in media_group.findall(xpath_with_ns('./media:content', NS_MAP)): media_url = media_content.get('url') if not media_url: continue tbr = int_or_none(media_content.get('bitrate')) mobj = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', media_url) if mobj: formats.append({ 'url': mobj.group('url'), 'play_path': 'mp4:{}'.format(mobj.group('playpath')), 'app': mobj.group('app'), 'ext': 'flv', 'tbr': tbr, 'format_id': 'rtmp-%d' % tbr, }) else: formats.append({ 'url': media_url, 'tbr': tbr, }) link = find_xpath_attr( entry, xpath_with_ns('./atom:link', NS_MAP), 'rel', 'original') if link is not None: formats.append({ 'url': link.get('href'), 'format_id': link.get('rel'), 'quality': 1, }) thumbnails = [{ 'url': splash.get('url'), 'width': int_or_none(splash.get('width')), 'height': int_or_none(splash.get('height')), } for splash in media_group.findall(xpath_with_ns('./xt:splash', NS_MAP))] return { 'id': video_id, 'title': title, 'description': description, 'timestamp': timestamp, 'formats': formats, 'thumbnails': thumbnails, } def _real_extract(self, url): mobj = self._match_valid_url(url) partner_id = mobj.group('partner_id') video_id = mobj.group('id') return self._extract_video_info(partner_id, video_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bitchute.py
yt_dlp/extractor/bitchute.py
import functools import json import re from .common import InfoExtractor from ..networking import HEADRequest from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, OnDemandPagedList, clean_html, determine_ext, format_field, get_element_by_class, get_elements_html_by_class, int_or_none, orderedSet, parse_count, parse_duration, parse_iso8601, url_or_none, urlencode_postdata, urljoin, ) from ..utils.traversal import traverse_obj class BitChuteIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|old)\.)?bitchute\.com/(?:video|embed|torrent/[^/?#]+)/(?P<id>[^/?#&]+)' _EMBED_REGEX = [rf'<(?:script|iframe)[^>]+\bsrc=(["\'])(?P<url>{_VALID_URL})'] _TESTS = [{ 'url': 'https://www.bitchute.com/video/UGlrF9o9b-Q/', 'md5': '7e427d7ed7af5a75b5855705ec750e2b', 'info_dict': { 'id': 'UGlrF9o9b-Q', 'ext': 'mp4', 'title': 'This is the first video on #BitChute !', 'description': 'md5:a0337e7b1fe39e32336974af8173a034', 'thumbnail': r're:https?://.+/.+\.jpg$', 'uploader': 'BitChute', 'upload_date': '20170103', 'uploader_url': 'https://www.bitchute.com/profile/I5NgtHZn9vPj/', 'channel': 'BitChute', 'channel_url': 'https://www.bitchute.com/channel/bitchute/', 'uploader_id': 'I5NgtHZn9vPj', 'channel_id': '1VBwRfyNcKdX', 'view_count': int, 'duration': 16.0, 'timestamp': 1483425443, }, }, { # test case: video with different channel and uploader 'url': 'https://www.bitchute.com/video/Yti_j9A-UZ4/', 'md5': 'f10e6a8e787766235946d0868703f1d0', 'info_dict': { 'id': 'Yti_j9A-UZ4', 'ext': 'mp4', 'title': 'Israel at War | Full Measure', 'description': 'md5:e60198b89971966d6030d22b3268f08f', 'thumbnail': r're:https?://.+/.+\.jpg$', 'uploader': 'sharylattkisson', 'upload_date': '20231106', 'uploader_url': 'https://www.bitchute.com/profile/9K0kUWA9zmd9/', 'channel': 'Full Measure with Sharyl Attkisson', 'channel_url': 'https://www.bitchute.com/channel/sharylattkisson/', 'uploader_id': '9K0kUWA9zmd9', 'channel_id': 'NpdxoCRv3ZLb', 'view_count': int, 'duration': 554.0, 'timestamp': 1699296106, }, }, { # video not downloadable in browser, but we can recover it 'url': 'https://www.bitchute.com/video/2s6B3nZjAk7R/', 'md5': '05c12397d5354bf24494885b08d24ed1', 'info_dict': { 'id': '2s6B3nZjAk7R', 'ext': 'mp4', 'filesize': 71537926, 'title': 'STYXHEXENHAMMER666 - Election Fraud, Clinton 2020, EU Armies, and Gun Control', 'description': 'md5:2029c7c212ccd4b040f52bb2d036ef4e', 'thumbnail': r're:https?://.+/.+\.jpg$', 'uploader': 'BitChute', 'upload_date': '20181113', 'uploader_url': 'https://www.bitchute.com/profile/I5NgtHZn9vPj/', 'channel': 'BitChute', 'channel_url': 'https://www.bitchute.com/channel/bitchute/', 'uploader_id': 'I5NgtHZn9vPj', 'channel_id': '1VBwRfyNcKdX', 'view_count': int, 'duration': 1701.0, 'tags': ['bitchute'], 'timestamp': 1542130287, }, 'params': {'check_formats': None}, }, { 'url': 'https://www.bitchute.com/embed/lbb5G1hjPhw/', 'only_matching': True, }, { 'url': 'https://www.bitchute.com/torrent/Zee5BE49045h/szoMrox2JEI.webtorrent', 'only_matching': True, }, { 'url': 'https://old.bitchute.com/video/UGlrF9o9b-Q/', 'only_matching': True, }] _GEO_BYPASS = False _UPLOADER_URL_TMPL = 'https://www.bitchute.com/profile/%s/' _CHANNEL_URL_TMPL = 'https://www.bitchute.com/channel/%s/' def _check_format(self, video_url, video_id): urls = orderedSet( re.sub(r'(^https?://)(seed\d+)(?=\.bitchute\.com)', fr'\g<1>{host}', video_url) for host in (r'\g<2>', 'seed122', 'seed125', 'seed126', 'seed128', 'seed132', 'seed150', 'seed151', 'seed152', 'seed153', 'seed167', 'seed171', 'seed177', 'seed305', 'seed307', 'seedp29xb', 'zb10-7gsop1v78')) for url in urls: try: response = self._request_webpage( HEADRequest(url), video_id=video_id, note=f'Checking {url}') except ExtractorError as e: self.to_screen(f'{video_id}: URL is invalid, skipping: {e.cause}') continue return { 'url': url, 'filesize': int_or_none(response.headers.get('Content-Length')), } def _call_api(self, endpoint, data, display_id, fatal=True): note = endpoint.rpartition('/')[2] try: return self._download_json( f'https://api.bitchute.com/api/beta/{endpoint}', display_id, f'Downloading {note} API JSON', f'Unable to download {note} API JSON', data=json.dumps(data).encode(), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json', }) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 403: errors = '. '.join(traverse_obj(e.cause.response.read().decode(), ( {json.loads}, 'errors', lambda _, v: v['context'] == 'reason', 'message', {str}))) if errors and 'location' in errors: # Can always be fatal since the video/media call will reach this code first self.raise_geo_restricted(errors) if fatal: raise self.report_warning(e.msg) def _real_extract(self, url): video_id = self._match_id(url) data = {'video_id': video_id} media_url = self._call_api('video/media', data, video_id)['media_url'] formats = [] if determine_ext(media_url) == 'm3u8': formats.extend( self._extract_m3u8_formats(media_url, video_id, 'mp4', m3u8_id='hls', live=True)) else: if self.get_param('check_formats') is not False: if fmt := self._check_format(media_url, video_id): formats.append(fmt) else: formats.append({'url': media_url}) if not formats: self.raise_no_formats( 'Video is unavailable. Please make sure this video is playable in the browser ' 'before reporting this issue.', expected=True, video_id=video_id) video = self._call_api('video', data, video_id, fatal=False) channel = None if channel_id := traverse_obj(video, ('channel', 'channel_id', {str})): channel = self._call_api('channel', {'channel_id': channel_id}, video_id, fatal=False) return { **traverse_obj(video, { 'title': ('video_name', {str}), 'description': ('description', {str}), 'thumbnail': ('thumbnail_url', {url_or_none}), 'channel': ('channel', 'channel_name', {str}), 'channel_id': ('channel', 'channel_id', {str}), 'channel_url': ('channel', 'channel_url', {urljoin('https://www.bitchute.com/')}), 'uploader_id': ('profile_id', {str}), 'uploader_url': ('profile_id', {format_field(template=self._UPLOADER_URL_TMPL)}, filter), 'timestamp': ('date_published', {parse_iso8601}), 'duration': ('duration', {parse_duration}), 'tags': ('hashtags', ..., {str}, filter, all, filter), 'view_count': ('view_count', {int_or_none}), 'is_live': ('state_id', {lambda x: x == 'live'}), }), **traverse_obj(channel, { 'channel': ('channel_name', {str}), 'channel_id': ('channel_id', {str}), 'channel_url': ('url_slug', {format_field(template=self._CHANNEL_URL_TMPL)}, filter), 'uploader': ('profile_name', {str}), 'uploader_id': ('profile_id', {str}), 'uploader_url': ('profile_id', {format_field(template=self._UPLOADER_URL_TMPL)}, filter), }), 'id': video_id, 'formats': formats, } class BitChuteChannelIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|old)\.)?bitchute\.com/(?P<type>channel|playlist)/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.bitchute.com/channel/bitchute/', 'info_dict': { 'id': 'bitchute', 'title': 'BitChute', 'description': 'md5:2134c37d64fc3a4846787c402956adac', }, 'playlist': [ { 'md5': '7e427d7ed7af5a75b5855705ec750e2b', 'info_dict': { 'id': 'UGlrF9o9b-Q', 'ext': 'mp4', 'title': 'This is the first video on #BitChute !', 'description': 'md5:a0337e7b1fe39e32336974af8173a034', 'thumbnail': r're:https?://.+/.+\.jpg$', 'uploader': 'BitChute', 'upload_date': '20170103', 'uploader_url': 'https://www.bitchute.com/profile/I5NgtHZn9vPj/', 'channel': 'BitChute', 'channel_url': 'https://www.bitchute.com/channel/bitchute/', 'duration': 16, 'view_count': int, 'uploader_id': 'I5NgtHZn9vPj', 'channel_id': '1VBwRfyNcKdX', 'timestamp': 1483425443, }, }, ], 'params': { 'skip_download': True, 'playlist_items': '-1', }, }, { 'url': 'https://www.bitchute.com/playlist/wV9Imujxasw9/', 'playlist_mincount': 20, 'info_dict': { 'id': 'wV9Imujxasw9', 'title': 'Bruce MacDonald and "The Light of Darkness"', 'description': 'md5:747724ef404eebdfc04277714f81863e', }, 'skip': '404 Not Found', }, { 'url': 'https://old.bitchute.com/playlist/wV9Imujxasw9/', 'only_matching': True, }] _TOKEN = 'zyG6tQcGPE5swyAEFLqKUwMuMMuF6IO2DZ6ZDQjGfsL0e4dcTLwqkTTul05Jdve7' PAGE_SIZE = 25 HTML_CLASS_NAMES = { 'channel': { 'container': 'channel-videos-container', 'title': 'channel-videos-title', 'description': 'channel-videos-text', }, 'playlist': { 'container': 'playlist-video', 'title': 'title', 'description': 'description', }, } @staticmethod def _make_url(playlist_id, playlist_type): return f'https://old.bitchute.com/{playlist_type}/{playlist_id}/' def _fetch_page(self, playlist_id, playlist_type, page_num): playlist_url = self._make_url(playlist_id, playlist_type) data = self._download_json( f'{playlist_url}extend/', playlist_id, f'Downloading page {page_num}', data=urlencode_postdata({ 'csrfmiddlewaretoken': self._TOKEN, 'name': '', 'offset': page_num * self.PAGE_SIZE, }), headers={ 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Referer': playlist_url, 'X-Requested-With': 'XMLHttpRequest', 'Cookie': f'csrftoken={self._TOKEN}', }) if not data.get('success'): return classes = self.HTML_CLASS_NAMES[playlist_type] for video_html in get_elements_html_by_class(classes['container'], data.get('html')): video_id = self._search_regex( r'<a\s[^>]*\bhref=["\']/video/([^"\'/]+)', video_html, 'video id', default=None) if not video_id: continue yield self.url_result( f'https://www.bitchute.com/video/{video_id}', BitChuteIE, video_id, url_transparent=True, title=clean_html(get_element_by_class(classes['title'], video_html)), description=clean_html(get_element_by_class(classes['description'], video_html)), duration=parse_duration(get_element_by_class('video-duration', video_html)), view_count=parse_count(clean_html(get_element_by_class('video-views', video_html)))) def _real_extract(self, url): playlist_type, playlist_id = self._match_valid_url(url).group('type', 'id') webpage = self._download_webpage(self._make_url(playlist_id, playlist_type), playlist_id) page_func = functools.partial(self._fetch_page, playlist_id, playlist_type) return self.playlist_result( OnDemandPagedList(page_func, self.PAGE_SIZE), playlist_id, title=self._html_extract_title(webpage, default=None), description=self._html_search_meta( ('description', 'og:description', 'twitter:description'), webpage, default=None), playlist_count=int_or_none(self._html_search_regex( r'<span>(\d+)\s+videos?</span>', webpage, 'playlist count', default=None)))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/plyr.py
yt_dlp/extractor/plyr.py
import re from .common import InfoExtractor from .vimeo import VimeoIE class PlyrEmbedIE(InfoExtractor): _VALID_URL = False _WEBPAGE_TESTS = [{ # data-plyr-embed-id="https://player.vimeo.com/video/522319456/90e5c96063?dnt=1" 'url': 'https://www.dhm.de/zeughauskino/filmreihen/online-filmreihen/filme-des-marshall-plans/200000000-mouths/', 'info_dict': { 'id': '522319456', 'ext': 'mp4', 'title': '200.000.000 Mouths (1950–51)', 'uploader': 'Zeughauskino', 'uploader_url': '', 'comment_count': int, 'like_count': int, 'duration': 963, 'thumbnail': 'https://i.vimeocdn.com/video/1081797161-9f09ddb4b7faa86e834e006b8e4b9c2cbaa0baa7da493211bf0796ae133a5ab8-d', 'timestamp': 1615467405, 'upload_date': '20210311', 'release_timestamp': 1615467405, 'release_date': '20210311', }, 'params': {'skip_download': 'm3u8'}, 'expected_warnings': ['Failed to parse XML: not well-formed'], }, { # data-plyr-provider="vimeo" data-plyr-embed-id="803435276" 'url': 'https://www.inarcassa.it/', 'info_dict': { 'id': '803435276', 'ext': 'mp4', 'title': 'HOME_Moto_Perpetuo', 'uploader': 'Inarcassa', 'uploader_url': '', 'duration': 38, 'thumbnail': 'https://i.vimeocdn.com/video/1663734769-945ad7ffabb16dbca009c023fd1d7b36bdb426a3dbae8345ed758136fe28f89a-d', }, 'params': {'skip_download': 'm3u8'}, 'expected_warnings': ['Failed to parse XML: not well-formed'], }, { # data-plyr-embed-id="https://youtu.be/GF-BjYKoAqI" 'url': 'https://www.profile.nl', 'info_dict': { 'id': 'GF-BjYKoAqI', 'ext': 'mp4', 'title': 'PROFILE: Recruitment Profile', 'description': '', 'media_type': 'video', 'uploader': 'Profile Nederland', 'uploader_id': '@profilenederland', 'uploader_url': 'https://www.youtube.com/@profilenederland', 'channel': 'Profile Nederland', 'channel_id': 'UC9AUkB0Tv39-TBYjs05n3vg', 'channel_url': 'https://www.youtube.com/channel/UC9AUkB0Tv39-TBYjs05n3vg', 'channel_follower_count': int, 'view_count': int, 'like_count': int, 'age_limit': 0, 'duration': 39, 'thumbnail': 'https://i.ytimg.com/vi/GF-BjYKoAqI/maxresdefault.jpg', 'categories': ['Autos & Vehicles'], 'tags': [], 'timestamp': 1675692990, 'upload_date': '20230206', 'playable_in_embed': True, 'availability': 'public', 'live_status': 'not_live', }, }, { # data-plyr-embed-id="B1TZV8rNZoc" data-plyr-provider="youtube" 'url': 'https://www.vnis.edu.vn', 'info_dict': { 'id': 'vnis.edu', 'title': 'VNIS Education - Master Agent các Trường hàng đầu Bắc Mỹ', 'description': 'md5:4dafcf7335bb018780e4426da8ab8e4e', 'age_limit': 0, 'thumbnail': 'https://vnis.edu.vn/wp-content/uploads/2021/05/ve-welcome-en.png', 'timestamp': 1753233356, 'upload_date': '20250723', }, 'playlist_count': 3, }] @classmethod def _extract_embed_urls(cls, url, webpage): plyr_embeds = re.finditer(r'''(?x) <div[^>]+(?: data-plyr-embed-id="(?P<id1>[^"]+)"[^>]+data-plyr-provider="(?P<provider1>[^"]+)"| data-plyr-provider="(?P<provider2>[^"]+)"[^>]+data-plyr-embed-id="(?P<id2>[^"]+)" )[^>]*>''', webpage) for mobj in plyr_embeds: embed_id = mobj.group('id1') or mobj.group('id2') provider = mobj.group('provider1') or mobj.group('provider2') if provider == 'vimeo': if not re.match(r'https?://', embed_id): embed_id = f'https://player.vimeo.com/video/{embed_id}' yield VimeoIE._smuggle_referrer(embed_id, url) elif provider == 'youtube': if not re.match(r'https?://', embed_id): embed_id = f'https://youtube.com/watch?v={embed_id}' yield embed_id
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/motorsport.py
yt_dlp/extractor/motorsport.py
import urllib.parse from .common import InfoExtractor class MotorsportIE(InfoExtractor): _WORKING = False IE_DESC = 'motorsport.com' _VALID_URL = r'https?://(?:www\.)?motorsport\.com/[^/?#]+/video/(?:[^/?#]+/)(?P<id>[^/]+)/?(?:$|[?#])' _TEST = { 'url': 'http://www.motorsport.com/f1/video/main-gallery/red-bull-racing-2014-rules-explained/', 'info_dict': { 'id': '2-T3WuR-KMM', 'ext': 'mp4', 'title': 'Red Bull Racing: 2014 Rules Explained', 'duration': 208, 'description': 'A new clip from Red Bull sees Daniel Ricciardo and Sebastian Vettel explain the 2014 Formula One regulations – which are arguably the most complex the sport has ever seen.', 'uploader': 'mcomstaff', 'uploader_id': 'UC334JIYKkVnyFoNCclfZtHQ', 'upload_date': '20140903', 'thumbnail': r're:^https?://.+\.jpg$', }, 'add_ie': ['Youtube'], 'params': { 'skip_download': True, }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) iframe_path = self._html_search_regex( r'<iframe id="player_iframe"[^>]+src="([^"]+)"', webpage, 'iframe path', default=None) if iframe_path is None: iframe_path = self._html_search_regex( r'<iframe [^>]*\bsrc="(https://motorsport\.tv/embed/[^"]+)', webpage, 'embed iframe path') return self.url_result(iframe_path) iframe = self._download_webpage( urllib.parse.urljoin(url, iframe_path), display_id, 'Downloading iframe') youtube_id = self._search_regex( r'www.youtube.com/embed/(.{11})', iframe, 'youtube id') return { '_type': 'url_transparent', 'display_id': display_id, 'url': f'https://youtube.com/watch?v={youtube_id}', }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bbc.py
yt_dlp/extractor/bbc.py
import functools import itertools import json import re import urllib.parse import xml.etree.ElementTree from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, OnDemandPagedList, clean_html, dict_get, float_or_none, get_element_by_class, int_or_none, join_nonempty, js_to_json, parse_duration, parse_iso8601, parse_qs, strip_or_none, traverse_obj, try_get, unescapeHTML, unified_timestamp, url_or_none, urlencode_postdata, urljoin, ) class BBCCoUkIE(InfoExtractor): IE_NAME = 'bbc.co.uk' IE_DESC = 'BBC iPlayer' _ID_REGEX = r'(?:[pbml][\da-z]{7}|w[\da-z]{7,14})' _VALID_URL = rf'''(?x) https?:// (?:www\.)?bbc\.co\.uk/ (?: programmes/(?!articles/)| iplayer(?:/[^/]+)?/(?:episode/|playlist/)| music/(?:clips|audiovideo/popular)[/#]| radio/player/| events/[^/]+/play/[^/]+/ ) (?P<id>{_ID_REGEX})(?!/(?:episodes|broadcasts|clips)) ''' _EMBED_REGEX = [r'setPlaylist\("(?P<url>https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)'] _LOGIN_URL = 'https://account.bbc.com/signin' _NETRC_MACHINE = 'bbc' _MEDIA_SELECTOR_URL_TEMPL = 'https://open.live.bbc.co.uk/mediaselector/6/select/version/2.0/mediaset/%s/vpid/%s' _MEDIA_SETS = [ # Provides HQ HLS streams with even better quality that pc mediaset but fails # with geolocation in some cases when it's even not geo restricted at all (e.g. # http://www.bbc.co.uk/programmes/b06bp7lf). Also may fail with selectionunavailable. 'iptv-all', 'pc', ] _EMP_PLAYLIST_NS = 'http://bbc.co.uk/2008/emp/playlist' _TESTS = [ { 'url': 'http://www.bbc.co.uk/programmes/b039g8p7', 'info_dict': { 'id': 'b039d07m', 'ext': 'flv', 'title': 'Kaleidoscope, Leonard Cohen', 'description': 'The Canadian poet and songwriter reflects on his musical career.', }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/', 'info_dict': { 'id': 'b00yng1d', 'ext': 'flv', 'title': 'The Man in Black: Series 3: The Printed Name', 'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.", 'duration': 1800, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Episode is no longer available on BBC iPlayer Radio', }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/', 'info_dict': { 'id': 'b00yng1d', 'ext': 'flv', 'title': 'The Voice UK: Series 3: Blind Auditions 5', 'description': 'Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.', 'duration': 5100, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion', 'info_dict': { 'id': 'b03k3pb7', 'ext': 'flv', 'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction", 'description': '2. Invasion', 'duration': 3600, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', }, { 'url': 'http://www.bbc.co.uk/programmes/b04v20dw', 'info_dict': { 'id': 'b04v209v', 'ext': 'flv', 'title': 'Pete Tong, The Essential New Tune Special', 'description': "Pete has a very special mix - all of 2014's Essential New Tunes!", 'duration': 10800, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Episode is no longer available on BBC iPlayer Radio', }, { 'url': 'http://www.bbc.co.uk/music/clips/p022h44b', 'note': 'Audio', 'info_dict': { 'id': 'p022h44j', 'ext': 'flv', 'title': 'BBC Proms Music Guides, Rachmaninov: Symphonic Dances', 'description': "In this Proms Music Guide, Andrew McGregor looks at Rachmaninov's Symphonic Dances.", 'duration': 227, }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://www.bbc.co.uk/music/clips/p025c0zz', 'note': 'Video', 'info_dict': { 'id': 'p025c103', 'ext': 'flv', 'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)', 'description': 'Rae Morris performs Closer for BBC Three at Reading 2014', 'duration': 226, }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls', 'info_dict': { 'id': 'p02n76xf', 'ext': 'flv', 'title': 'Natural World, 2015-2016: 2. Super Powered Owls', 'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d', 'duration': 3540, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'geolocation', }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition', 'info_dict': { 'id': 'b05zmgw1', 'ext': 'flv', 'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.', 'title': 'Royal Academy Summer Exhibition', 'duration': 3540, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'geolocation', }, { # iptv-all mediaset fails with geolocation however there is no geo restriction # for this programme at all 'url': 'http://www.bbc.co.uk/programmes/b06rkn85', 'info_dict': { 'id': 'b06rkms3', 'ext': 'flv', 'title': "Best of the Mini-Mixes 2015: Part 3, Annie Mac's Friday Night - BBC Radio 1", 'description': "Annie has part three in the Best of the Mini-Mixes 2015, plus the year's Most Played!", }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Now it\'s really geo-restricted', }, { # compact player (https://github.com/ytdl-org/youtube-dl/issues/8147) 'url': 'http://www.bbc.co.uk/programmes/p028bfkf/player', 'info_dict': { 'id': 'p028bfkj', 'ext': 'flv', 'title': 'Extract from BBC documentary Look Stranger - Giant Leeks and Magic Brews', 'description': 'Extract from BBC documentary Look Stranger - Giant Leeks and Magic Brews', }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4', 'only_matching': True, }, { 'url': 'http://www.bbc.co.uk/music/clips#p02frcc3', 'only_matching': True, }, { 'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo', 'only_matching': True, }, { 'url': 'http://www.bbc.co.uk/radio/player/p03cchwf', 'only_matching': True, }, { 'url': 'https://www.bbc.co.uk/music/audiovideo/popular#p055bc55', 'only_matching': True, }, { 'url': 'http://www.bbc.co.uk/programmes/w3csv1y9', 'only_matching': True, }, { 'url': 'https://www.bbc.co.uk/programmes/m00005xn', 'only_matching': True, }, { 'url': 'https://www.bbc.co.uk/programmes/w172w4dww1jqt5s', 'only_matching': True, }] def _perform_login(self, username, password): login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading signin page') login_form = self._hidden_inputs(login_page) login_form.update({ 'username': username, 'password': password, }) post_url = urljoin(self._LOGIN_URL, self._search_regex( r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page, 'post url', default=self._LOGIN_URL, group='url')) response, urlh = self._download_webpage_handle( post_url, None, 'Logging in', data=urlencode_postdata(login_form), headers={'Referer': self._LOGIN_URL}) if self._LOGIN_URL in urlh.url: error = clean_html(get_element_by_class('form-message', response)) if error: raise ExtractorError( f'Unable to login: {error}', expected=True) raise ExtractorError('Unable to log in') class MediaSelectionError(Exception): def __init__(self, error_id): self.id = error_id def _extract_asx_playlist(self, connection, programme_id): asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist') return [ref.get('href') for ref in asx.findall('./Entry/ref')] def _extract_items(self, playlist): return playlist.findall(f'./{{{self._EMP_PLAYLIST_NS}}}item') def _extract_medias(self, media_selection): error = media_selection.get('result') if error: raise BBCCoUkIE.MediaSelectionError(error) return media_selection.get('media') or [] def _extract_connections(self, media): return media.get('connection') or [] def _get_subtitles(self, media, programme_id): subtitles = {} for connection in self._extract_connections(media): cc_url = url_or_none(connection.get('href')) if not cc_url: continue captions = self._download_xml( cc_url, programme_id, 'Downloading captions', fatal=False) if not isinstance(captions, xml.etree.ElementTree.Element): continue subtitles['en'] = [ { 'url': connection.get('href'), 'ext': 'ttml', }, ] break return subtitles def _raise_extractor_error(self, media_selection_error): raise ExtractorError( f'{self.IE_NAME} returned error: {media_selection_error.id}', expected=True) def _download_media_selector(self, programme_id): last_exception = None formats, subtitles = [], {} for media_set in self._MEDIA_SETS: try: fmts, subs = self._download_media_selector_url( self._MEDIA_SELECTOR_URL_TEMPL % (media_set, programme_id), programme_id) formats.extend(fmts) if subs: self._merge_subtitles(subs, target=subtitles) except BBCCoUkIE.MediaSelectionError as e: if e.id in ('notukerror', 'geolocation', 'selectionunavailable'): last_exception = e continue self._raise_extractor_error(e) if last_exception: if formats or subtitles: self.report_warning(f'{self.IE_NAME} returned error: {last_exception.id}') else: self._raise_extractor_error(last_exception) return formats, subtitles def _download_media_selector_url(self, url, programme_id=None): media_selection = self._download_json( url, programme_id, 'Downloading media selection JSON', expected_status=(403, 404)) return self._process_media_selector(media_selection, programme_id) def _process_media_selector(self, media_selection, programme_id): formats = [] subtitles = None urls = [] for media in self._extract_medias(media_selection): kind = media.get('kind') if kind in ('video', 'audio'): bitrate = int_or_none(media.get('bitrate')) encoding = media.get('encoding') width = int_or_none(media.get('width')) height = int_or_none(media.get('height')) file_size = int_or_none(media.get('media_file_size')) for connection in self._extract_connections(media): href = connection.get('href') if href in urls: continue if href: urls.append(href) conn_kind = connection.get('kind') protocol = connection.get('protocol') supplier = connection.get('supplier') transfer_format = connection.get('transferFormat') format_id = supplier or conn_kind or protocol # ASX playlist if supplier == 'asx': for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)): formats.append({ 'url': ref, 'format_id': f'ref{i}_{format_id}', }) elif transfer_format == 'dash': formats.extend(self._extract_mpd_formats( href, programme_id, mpd_id=format_id, fatal=False)) elif transfer_format == 'hls': # TODO: let expected_status be passed into _extract_xxx_formats() instead try: fmts = self._extract_m3u8_formats( href, programme_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id=format_id, fatal=False) except ExtractorError as e: if not (isinstance(e.exc_info[1], HTTPError) and e.exc_info[1].status in (403, 404)): raise fmts = [] formats.extend(fmts) elif transfer_format == 'hds': formats.extend(self._extract_f4m_formats( href, programme_id, f4m_id=format_id, fatal=False)) else: if not supplier and bitrate: format_id += f'-{bitrate}' fmt = { 'format_id': format_id, 'filesize': file_size, } if kind == 'video': fmt.update({ 'width': width, 'height': height, 'tbr': bitrate, 'vcodec': encoding, }) else: fmt.update({ 'abr': bitrate, 'acodec': encoding, 'vcodec': 'none', }) if protocol in ('http', 'https'): # Direct link fmt.update({ 'url': href, }) elif protocol == 'rtmp': application = connection.get('application', 'ondemand') auth_string = connection.get('authString') identifier = connection.get('identifier') server = connection.get('server') fmt.update({ 'url': f'{protocol}://{server}/{application}?{auth_string}', 'play_path': identifier, 'app': f'{application}?{auth_string}', 'page_url': 'http://www.bbc.co.uk', 'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf', 'rtmp_live': False, 'ext': 'flv', }) else: continue formats.append(fmt) elif kind == 'captions': subtitles = self.extract_subtitles(media, programme_id) return formats, subtitles def _download_playlist(self, playlist_id): try: playlist = self._download_json( f'http://www.bbc.co.uk/programmes/{playlist_id}/playlist.json', playlist_id, 'Downloading playlist JSON') formats = [] subtitles = {} for version in playlist.get('allAvailableVersions', []): smp_config = version['smpConfig'] title = smp_config['title'] description = smp_config['summary'] for item in smp_config['items']: kind = item['kind'] if kind not in ('programme', 'radioProgramme'): continue programme_id = item.get('vpid') duration = int_or_none(item.get('duration')) version_formats, version_subtitles = self._download_media_selector(programme_id) types = version['types'] for f in version_formats: f['format_note'] = ', '.join(types) if any('AudioDescribed' in x for x in types): f['language_preference'] = -10 formats += version_formats for tag, subformats in (version_subtitles or {}).items(): subtitles.setdefault(tag, []).extend(subformats) return programme_id, title, description, duration, formats, subtitles except ExtractorError as ee: if not (isinstance(ee.cause, HTTPError) and ee.cause.status == 404): raise # fallback to legacy playlist return self._process_legacy_playlist(playlist_id) def _process_legacy_playlist_url(self, url, display_id): playlist = self._download_legacy_playlist_url(url, display_id) return self._extract_from_legacy_playlist(playlist, display_id) def _process_legacy_playlist(self, playlist_id): return self._process_legacy_playlist_url( f'http://www.bbc.co.uk/iplayer/playlist/{playlist_id}', playlist_id) def _download_legacy_playlist_url(self, url, playlist_id=None): return self._download_xml( url, playlist_id, 'Downloading legacy playlist XML') def _extract_from_legacy_playlist(self, playlist, playlist_id): no_items = playlist.find(f'./{{{self._EMP_PLAYLIST_NS}}}noItems') if no_items is not None: reason = no_items.get('reason') if reason == 'preAvailability': msg = f'Episode {playlist_id} is not yet available' elif reason == 'postAvailability': msg = f'Episode {playlist_id} is no longer available' elif reason == 'noMedia': msg = f'Episode {playlist_id} is not currently available' else: msg = f'Episode {playlist_id} is not available: {reason}' raise ExtractorError(msg, expected=True) for item in self._extract_items(playlist): kind = item.get('kind') if kind not in ('programme', 'radioProgramme'): continue title = playlist.find(f'./{{{self._EMP_PLAYLIST_NS}}}title').text description_el = playlist.find(f'./{{{self._EMP_PLAYLIST_NS}}}summary') description = description_el.text if description_el is not None else None def get_programme_id(item): def get_from_attributes(item): for p in ('identifier', 'group'): value = item.get(p) if value and re.match(r'^[pb][\da-z]{7}$', value): return value get_from_attributes(item) mediator = item.find(f'./{{{self._EMP_PLAYLIST_NS}}}mediator') if mediator is not None: return get_from_attributes(mediator) programme_id = get_programme_id(item) duration = int_or_none(item.get('duration')) if programme_id: formats, subtitles = self._download_media_selector(programme_id) else: formats, subtitles = self._process_media_selector(item, playlist_id) programme_id = playlist_id return programme_id, title, description, duration, formats, subtitles def _real_extract(self, url): group_id = self._match_id(url) webpage = self._download_webpage(url, group_id, 'Downloading video page') error = self._search_regex( r'<div\b[^>]+\bclass=["\'](?:smp|playout)__message delta["\'][^>]*>\s*([^<]+?)\s*<', webpage, 'error', default=None) if error: raise ExtractorError(error, expected=True) programme_id = None duration = None tviplayer = self._search_regex( r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById', webpage, 'player', default=None) if tviplayer: player = self._parse_json(tviplayer, group_id).get('player', {}) duration = int_or_none(player.get('duration')) programme_id = player.get('vpid') if not programme_id: programme_id = self._search_regex( rf'"vpid"\s*:\s*"({self._ID_REGEX})"', webpage, 'vpid', fatal=False, default=None) if programme_id: formats, subtitles = self._download_media_selector(programme_id) title = self._og_search_title(webpage, default=None) or self._html_search_regex( (r'<h2[^>]+id="parent-title"[^>]*>(.+?)</h2>', r'<div[^>]+class="info"[^>]*>\s*<h1>(.+?)</h1>'), webpage, 'title') description = self._search_regex( (r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>', r'<div[^>]+class="info_+synopsis"[^>]*>([^<]+)</div>'), webpage, 'description', default=None) if not description: description = self._html_search_meta('description', webpage) else: programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id) return { 'id': programme_id, 'title': title, 'description': description, 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'duration': duration, 'formats': formats, 'subtitles': subtitles, } class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE IE_NAME = 'bbc' IE_DESC = 'BBC' _VALID_URL = r'''(?x) https?://(?:www\.)?(?: bbc\.(?:com|co\.uk)| bbcnewsd73hkzno2ini43t4gblxvycyac5aw4gnv7t2rccijh7745uqd\.onion| bbcweb3hytmzhn5d532owbu6oqadra5z3ar726vq5kgwwn6aucdccrad\.onion )/(?:[^/]+/)+(?P<id>[^/#?]+)''' _MEDIA_SETS = [ 'pc', 'mobile-tablet-main', ] _TESTS = [{ # article with multiple videos embedded with data-playable containing vpids 'url': 'http://www.bbc.com/news/world-europe-32668511', 'info_dict': { 'id': 'world-europe-32668511', 'title': 'Russia stages massive WW2 parade despite Western boycott', 'description': 'md5:00ff61976f6081841f759a08bf78cc9c', }, 'playlist_count': 2, }, { # article with multiple videos embedded with data-playable (more videos) 'url': 'http://www.bbc.com/news/business-28299555', 'info_dict': { 'id': 'business-28299555', 'title': 'Farnborough Airshow: Video highlights', 'description': 'BBC reports and video highlights at the Farnborough Airshow.', }, 'playlist_count': 9, 'skip': 'Save time', }, { # article with multiple videos embedded with `new SMP()` # broken 'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460', 'info_dict': { 'id': '3662a707-0af9-3149-963f-47bea720b460', 'title': 'BUGGER', 'description': r're:BUGGER The recent revelations by the whistleblower Edward Snowden were fascinating. .{211}\.{3}$', }, 'playlist_count': 18, }, { # single video embedded with data-playable containing vpid 'url': 'http://www.bbc.com/news/world-europe-32041533', 'info_dict': { 'id': 'p02mprgb', 'ext': 'mp4', 'title': 'Germanwings crash site aerial video', 'description': r're:(?s)Aerial video showed the site where the Germanwings flight 4U 9525, .{156} BFM TV\.$', 'duration': 47, 'timestamp': 1427219242, 'upload_date': '20150324', 'thumbnail': 'https://ichef.bbci.co.uk/news/1024/media/images/81879000/jpg/_81879090_81879089.jpg', }, 'params': { 'skip_download': True, }, }, { # article with single video embedded with data-playable containing XML playlist # with direct video links as progressiveDownloadUrl (for now these are extracted) # and playlist with f4m and m3u8 as streamingUrl 'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu', 'info_dict': { 'id': '150615_telabyad_kentin_cogu', 'ext': 'mp4', 'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde", 'description': 'md5:33a4805a855c9baf7115fcbde57e7025', 'timestamp': 1434397334, 'upload_date': '20150615', }, 'params': { 'skip_download': True, }, 'skip': 'now SIMORGH_DATA with no video', }, { # single video embedded with data-playable containing XML playlists (regional section) 'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw', 'info_dict': { 'id': '39275083', 'display_id': '150619_video_honduras_militares_hospitales_corrupcion_aw', 'ext': 'mp4', 'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción', 'description': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción', 'timestamp': 1434713142, 'upload_date': '20150619', 'thumbnail': 'https://a.files.bbci.co.uk/worldservice/live/assets/images/2015/06/19/150619132146_honduras_hsopitales_militares_640x360_aptn_nocredit.jpg', }, 'params': { 'skip_download': True, }, }, { # single video from video playlist embedded with vxp-playlist-data JSON 'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376', 'info_dict': { 'id': 'p02w6qjc', 'ext': 'mp4', 'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''', 'duration': 56, 'description': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''', }, 'params': { 'skip_download': True, }, 'skip': '404 Not Found', }, { # single video story with __PWA_PRELOADED_STATE__ 'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret', 'info_dict': { 'id': 'p02q6gc4', 'ext': 'mp4', 'title': 'Tasting the spice of life in Jaffna', 'description': r're:(?s)BBC Travel Show’s Henry Golding explores the city of Jaffna .{151} aftertaste\.$', 'timestamp': 1646058397, 'upload_date': '20220228', 'duration': 255, 'thumbnail': 'https://ichef.bbci.co.uk/images/ic/1920xn/p02vxvkn.jpg', }, }, { # single video story without digitalData 'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star', 'info_dict': { 'id': 'p018zqqg', 'ext': 'mp4', 'title': 'Hyundai Santa Fe Sport: Rock star', 'description': 'md5:b042a26142c4154a6e472933cf20793d', 'timestamp': 1415867444, 'upload_date': '20141113', }, 'skip': 'redirects to TopGear home page', }, { # single video embedded with Morph # TODO: replacement test page 'url': 'http://www.bbc.co.uk/sport/live/olympics/36895975', 'info_dict': { 'id': 'p041vhd0', 'ext': 'mp4', 'title': "Nigeria v Japan - Men's First Round", 'description': 'Live coverage of the first round from Group B at the Amazonia Arena.', 'duration': 7980, 'uploader': 'BBC Sport', 'uploader_id': 'bbc_sport', }, 'skip': 'Video no longer in page', }, { # single video in __INITIAL_DATA__ 'url': 'http://www.bbc.com/sport/0/football/33653409', 'info_dict': { 'id': 'p02xycnp', 'ext': 'mp4', 'title': 'Ronaldo to Man Utd, Arsenal to spend?', 'description': r're:(?s)BBC Sport\'s David Ornstein rounds up the latest transfer reports, .{359} here\.$', 'timestamp': 1437750175, 'upload_date': '20150724', 'thumbnail': r're:https?://.+/.+media/images/69320000/png/_69320754_mmgossipcolumnextraaugust18.png', 'duration': 140, }, }, { # article with multiple videos embedded with Morph.setPayload 'url': 'http://www.bbc.com/sport/0/football/34475836', 'info_dict': { 'id': '34475836', 'title': 'Jurgen Klopp: Furious football from a witty and winning coach', 'description': 'Fast-paced football, wit, wisdom and a ready smile - why Liverpool fans should come to love new boss Jurgen Klopp.', }, 'playlist_count': 3, }, { # Testing noplaylist 'url': 'http://www.bbc.com/sport/0/football/34475836', 'info_dict': { 'id': 'p034ppnv', 'ext': 'mp4', 'title': 'All you need to know about Jurgen Klopp', 'timestamp': 1444335081, 'upload_date': '20151008',
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/volejtv.py
yt_dlp/extractor/volejtv.py
from .common import InfoExtractor class VolejTVIE(InfoExtractor): _VALID_URL = r'https?://volej\.tv/video/(?P<id>\d+)' _TESTS = [{ 'url': 'https://volej.tv/video/725742/', 'info_dict': { 'id': '725742', 'ext': 'mp4', 'description': 'Zápas VK Královo Pole vs VK Prostějov 10.12.2022 v 19:00 na Volej.TV', 'thumbnail': 'https://volej.tv/images/og/16/17186/og.png', 'title': 'VK Královo Pole vs VK Prostějov', }, }, { 'url': 'https://volej.tv/video/725605/', 'info_dict': { 'id': '725605', 'ext': 'mp4', 'thumbnail': 'https://volej.tv/images/og/15/17185/og.png', 'title': 'VK Lvi Praha vs VK Euro Sitex Příbram', 'description': 'Zápas VK Lvi Praha vs VK Euro Sitex Příbram 11.12.2022 v 19:00 na Volej.TV', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) json_data = self._search_json( r'<\s*!\[CDATA[^=]+=', webpage, 'CDATA', video_id) formats, subtitle = self._extract_m3u8_formats_and_subtitles( json_data['urls']['hls'], video_id) return { 'id': video_id, 'title': self._html_search_meta(['og:title', 'twitter:title'], webpage), 'thumbnail': self._html_search_meta(['og:image', 'twitter:image'], webpage), 'description': self._html_search_meta(['description', 'og:description', 'twitter:description'], webpage), 'formats': formats, 'subtitles': subtitle, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/xhamster.py
yt_dlp/extractor/xhamster.py
import itertools import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, determine_ext, dict_get, extract_attributes, float_or_none, int_or_none, join_nonempty, parse_duration, str_or_none, try_get, unified_strdate, url_or_none, ) def to_signed_32(n): return n % ((-1 if n < 0 else 1) * 2**32) class _ByteGenerator: def __init__(self, algo_id, seed): try: self._algorithm = getattr(self, f'_algo{algo_id}') except AttributeError: raise ExtractorError(f'Unknown algorithm ID "{algo_id}"') self._s = to_signed_32(seed) def _algo1(self, s): # LCG (a=1664525, c=1013904223, m=2^32) # Ref: https://en.wikipedia.org/wiki/Linear_congruential_generator s = self._s = to_signed_32(s * 1664525 + 1013904223) return s def _algo2(self, s): # xorshift32 # Ref: https://en.wikipedia.org/wiki/Xorshift s = to_signed_32(s ^ (s << 13)) s = to_signed_32(s ^ ((s & 0xFFFFFFFF) >> 17)) s = self._s = to_signed_32(s ^ (s << 5)) return s def _algo3(self, s): # Weyl Sequence (k≈2^32*φ, m=2^32) + MurmurHash3 (fmix32) # Ref: https://en.wikipedia.org/wiki/Weyl_sequence # https://commons.apache.org/proper/commons-codec/jacoco/org.apache.commons.codec.digest/MurmurHash3.java.html s = self._s = to_signed_32(s + 0x9e3779b9) s = to_signed_32(s ^ ((s & 0xFFFFFFFF) >> 16)) s = to_signed_32(s * to_signed_32(0x85ebca77)) s = to_signed_32(s ^ ((s & 0xFFFFFFFF) >> 13)) s = to_signed_32(s * to_signed_32(0xc2b2ae3d)) return to_signed_32(s ^ ((s & 0xFFFFFFFF) >> 16)) def _algo4(self, s): # Custom scrambling function involving a left rotation (ROL) s = self._s = to_signed_32(s + 0x6d2b79f5) s = to_signed_32((s << 7) | ((s & 0xFFFFFFFF) >> 25)) # ROL 7 s = to_signed_32(s + 0x9e3779b9) s = to_signed_32(s ^ ((s & 0xFFFFFFFF) >> 11)) return to_signed_32(s * 0x27d4eb2d) def _algo5(self, s): # xorshift variant with a final addition s = to_signed_32(s ^ (s << 7)) s = to_signed_32(s ^ ((s & 0xFFFFFFFF) >> 9)) s = to_signed_32(s ^ (s << 8)) s = self._s = to_signed_32(s + 0xa5a5a5a5) return s def _algo6(self, s): # LCG (a=0x2c9277b5, c=0xac564b05) with a variable right shift scrambler s = self._s = to_signed_32(s * to_signed_32(0x2c9277b5) + to_signed_32(0xac564b05)) s2 = to_signed_32(s ^ ((s & 0xFFFFFFFF) >> 18)) shift = (s & 0xFFFFFFFF) >> 27 & 31 return to_signed_32((s2 & 0xFFFFFFFF) >> shift) def _algo7(self, s): # Weyl Sequence (k=0x9e3779b9) + custom multiply-xor-shift mixing function s = self._s = to_signed_32(s + to_signed_32(0x9e3779b9)) e = to_signed_32(s ^ (s << 5)) e = to_signed_32(e * to_signed_32(0x7feb352d)) e = to_signed_32(e ^ ((e & 0xFFFFFFFF) >> 15)) return to_signed_32(e * to_signed_32(0x846ca68b)) def __next__(self): return self._algorithm(self._s) & 0xFF class XHamsterIE(InfoExtractor): _DOMAINS = r'(?:xhamster\.(?:com|one|desi)|xhms\.pro|xhamster\d+\.(?:com|desi)|xhday\.com|xhvid\.com)' _VALID_URL = rf'''(?x) https?:// (?:[^/?#]+\.)?{_DOMAINS}/ (?: movies/(?P<id>[\dA-Za-z]+)/(?P<display_id>[^/]*)\.html| videos/(?P<display_id_2>[^/]*)-(?P<id_2>[\dA-Za-z]+) ) ''' _TESTS = [{ 'url': 'https://xhamster.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445', 'md5': 'e009ea6b849b129e3bebaeb9cf0dee51', 'info_dict': { 'id': '1509445', 'display_id': 'femaleagent-shy-beauty-takes-the-bait', 'ext': 'mp4', 'title': 'FemaleAgent Shy beauty takes the bait', 'timestamp': 1350194821, 'upload_date': '20121014', 'uploader': 'Ruseful2011', 'uploader_id': 'ruseful2011', 'duration': 893, 'age_limit': 18, 'thumbnail': r're:https?://.+\.jpg', 'uploader_url': 'https://xhamster.com/users/ruseful2011', 'description': '', 'view_count': int, 'comment_count': int, }, }, { 'url': 'https://xhamster.com/videos/britney-spears-sexy-booty-2221348?hd=', 'info_dict': { 'id': '2221348', 'display_id': 'britney-spears-sexy-booty', 'ext': 'mp4', 'title': 'Britney Spears Sexy Booty', 'timestamp': 1379123460, 'upload_date': '20130914', 'uploader': 'jojo747400', 'duration': 200, 'age_limit': 18, 'description': '', 'view_count': int, 'thumbnail': r're:https?://.+\.jpg', 'comment_count': int, }, 'params': { 'extractor_args': {'generic': {'impersonate': ['chrome']}}, 'skip_download': 'm3u8', }, }, { # empty seo, unavailable via new URL schema 'url': 'http://xhamster.com/movies/5667973/.html', 'info_dict': { 'id': '5667973', 'ext': 'mp4', 'title': '....', 'timestamp': 1454948101, 'upload_date': '20160208', 'uploader': 'parejafree', 'uploader_id': 'parejafree', 'duration': 72, 'age_limit': 18, 'comment_count': int, 'uploader_url': 'https://xhamster.com/users/parejafree', 'description': '', 'view_count': int, 'thumbnail': r're:https?://.+\.jpg', }, 'skip': 'Invalid URL', }, { # mobile site 'url': 'https://m.xhamster.com/videos/cute-teen-jacqueline-solo-masturbation-8559111', 'only_matching': True, }, { 'url': 'https://xhamster.com/movies/2272726/amber_slayed_by_the_knight.html', 'only_matching': True, }, { # This video is visible for marcoalfa123456's friends only 'url': 'https://it.xhamster.com/movies/7263980/la_mia_vicina.html', 'only_matching': True, }, { # new URL schema 'url': 'https://pt.xhamster.com/videos/euro-pedal-pumping-7937821', 'only_matching': True, }, { 'url': 'https://xhamster.one/videos/femaleagent-shy-beauty-takes-the-bait-1509445', 'only_matching': True, }, { 'url': 'https://xhamster.desi/videos/femaleagent-shy-beauty-takes-the-bait-1509445', 'only_matching': True, }, { 'url': 'https://xhamster2.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445', 'only_matching': True, }, { 'url': 'https://xhamster11.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445', 'only_matching': True, }, { 'url': 'https://xhamster26.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445', 'only_matching': True, }, { 'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html', 'only_matching': True, }, { 'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd', 'only_matching': True, }, { 'url': 'http://de.xhamster.com/videos/skinny-girl-fucks-herself-hard-in-the-forest-xhnBJZx', 'only_matching': True, }, { 'url': 'https://xhday.com/videos/strapless-threesome-xhh7yVf', 'only_matching': True, }, { 'url': 'https://xhvid.com/videos/lk-mm-xhc6wn6', 'only_matching': True, }, { 'url': 'https://xhamster20.desi/videos/my-verification-video-scottishmistress23-11937369', 'only_matching': True, }] def _decipher_format_url(self, format_url, format_id): parsed_url = urllib.parse.urlparse(format_url) hex_string, path_remainder = self._search_regex( r'^/(?P<hex>[0-9a-fA-F]{12,})(?P<rem>[/,].+)$', parsed_url.path, 'url components', default=(None, None), group=('hex', 'rem')) if not hex_string: self.report_warning(f'Skipping format "{format_id}": unsupported URL format') return None byte_data = bytes.fromhex(hex_string) seed = int.from_bytes(byte_data[1:5], byteorder='little', signed=True) try: byte_gen = _ByteGenerator(byte_data[0], seed) except ExtractorError as e: self.report_warning(f'Skipping format "{format_id}": {e.msg}') return None deciphered = bytearray(byte ^ next(byte_gen) for byte in byte_data[5:]).decode('latin-1') return parsed_url._replace(path=f'/{deciphered}{path_remainder}').geturl() def _fixup_formats(self, formats): for f in formats: if f.get('vcodec'): continue for vcodec in ('av1', 'h264'): if any(f'.{vcodec}.' in f_url for f_url in (f['url'], f.get('manifest_url', ''))): f['vcodec'] = vcodec break return formats def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') or mobj.group('id_2') display_id = mobj.group('display_id') or mobj.group('display_id_2') desktop_url = re.sub(r'^(https?://(?:.+?\.)?)m\.', r'\1', url) webpage, urlh = self._download_webpage_handle(desktop_url, video_id) error = self._html_search_regex( r'<div[^>]+id=["\']videoClosed["\'][^>]*>(.+?)</div>', webpage, 'error', default=None) if error: raise ExtractorError(error, expected=True) age_limit = self._rta_search(webpage) def get_height(s): return int_or_none(self._search_regex( r'^(\d+)[pP]', s, 'height', default=None)) initials = self._parse_json( self._search_regex( (r'window\.initials\s*=\s*({.+?})\s*;\s*</script>', r'window\.initials\s*=\s*({.+?})\s*;'), webpage, 'initials', default='{}'), video_id, fatal=False) if initials: video = initials['videoModel'] title = video['title'] formats = [] format_urls = set() format_sizes = {} sources = try_get(video, lambda x: x['sources'], dict) or {} for format_id, formats_dict in sources.items(): if not isinstance(formats_dict, dict): continue download_sources = try_get(sources, lambda x: x['download'], dict) or {} for quality, format_dict in download_sources.items(): if not isinstance(format_dict, dict): continue format_sizes[quality] = float_or_none(format_dict.get('size')) for quality, format_item in formats_dict.items(): if format_id == 'download': # Download link takes some time to be generated, # skipping for now continue format_url = format_item format_url = url_or_none(format_url) if not format_url or format_url in format_urls: continue format_urls.add(format_url) formats.append({ 'format_id': f'{format_id}-{quality}', 'url': format_url, 'ext': determine_ext(format_url, 'mp4'), 'height': get_height(quality), 'filesize': format_sizes.get(quality), 'http_headers': { 'Referer': urlh.url, }, }) xplayer_sources = try_get( initials, lambda x: x['xplayerSettings']['sources'], dict) if xplayer_sources: hls_sources = xplayer_sources.get('hls') if isinstance(hls_sources, dict): for hls_format_key in ('url', 'fallback'): hls_url = hls_sources.get(hls_format_key) if not hls_url: continue hls_url = self._decipher_format_url(hls_url, f'hls-{hls_format_key}') if not hls_url or hls_url in format_urls: continue format_urls.add(hls_url) formats.extend(self._extract_m3u8_formats( hls_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) standard_sources = xplayer_sources.get('standard') if isinstance(standard_sources, dict): for identifier, formats_list in standard_sources.items(): if not isinstance(formats_list, list): continue for standard_format in formats_list: if not isinstance(standard_format, dict): continue for standard_format_key in ('url', 'fallback'): standard_url = standard_format.get(standard_format_key) if not standard_url: continue quality = (str_or_none(standard_format.get('quality')) or str_or_none(standard_format.get('label')) or '') format_id = join_nonempty(identifier, quality) standard_url = self._decipher_format_url(standard_url, format_id) if not standard_url or standard_url in format_urls: continue format_urls.add(standard_url) ext = determine_ext(standard_url, 'mp4') if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( standard_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) continue formats.append({ 'format_id': format_id, 'url': standard_url, 'ext': ext, 'height': get_height(quality), 'filesize': format_sizes.get(quality), 'http_headers': { 'Referer': urlh.url, }, # HTTP formats return "Wrong key" error even when deciphered by site JS # TODO: Remove this when resolved on the site's end '__needs_testing': True, }) categories_list = video.get('categories') if isinstance(categories_list, list): categories = [] for c in categories_list: if not isinstance(c, dict): continue c_name = c.get('name') if isinstance(c_name, str): categories.append(c_name) else: categories = None uploader_url = url_or_none(try_get(video, lambda x: x['author']['pageURL'])) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': video.get('description'), 'timestamp': int_or_none(video.get('created')), 'uploader': try_get( video, lambda x: x['author']['name'], str), 'uploader_url': uploader_url, 'uploader_id': uploader_url.split('/')[-1] if uploader_url else None, 'thumbnail': video.get('thumbURL'), 'duration': int_or_none(video.get('duration')), 'view_count': int_or_none(video.get('views')), 'like_count': int_or_none(try_get( video, lambda x: x['rating']['likes'], int)), 'dislike_count': int_or_none(try_get( video, lambda x: x['rating']['dislikes'], int)), 'comment_count': int_or_none(video.get('comments')), 'age_limit': age_limit if age_limit is not None else 18, 'categories': categories, 'formats': self._fixup_formats(formats), # TODO: Revert to ('res', 'proto', 'tbr') when HTTP formats problem is resolved '_format_sort_fields': ('res', 'proto:m3u8', 'tbr'), } # Old layout fallback title = self._html_search_regex( [r'<h1[^>]*>([^<]+)</h1>', r'<meta[^>]+itemprop=".*?caption.*?"[^>]+content="(.+?)"', r'<title[^>]*>(.+?)(?:,\s*[^,]*?\s*Porn\s*[^,]*?:\s*xHamster[^<]*| - xHamster\.com)</title>'], webpage, 'title') formats = [] format_urls = set() sources = self._parse_json( self._search_regex( r'sources\s*:\s*({.+?})\s*,?\s*\n', webpage, 'sources', default='{}'), video_id, fatal=False) for format_id, format_url in sources.items(): format_url = url_or_none(format_url) if not format_url: continue if format_url in format_urls: continue format_urls.add(format_url) formats.append({ 'format_id': format_id, 'url': format_url, 'height': get_height(format_id), }) video_url = self._search_regex( [r'''file\s*:\s*(?P<q>["'])(?P<mp4>.+?)(?P=q)''', r'''<a\s+href=(?P<q>["'])(?P<mp4>.+?)(?P=q)\s+class=["']mp4Thumb''', r'''<video[^>]+file=(?P<q>["'])(?P<mp4>.+?)(?P=q)[^>]*>'''], webpage, 'video url', group='mp4', default=None) if video_url and video_url not in format_urls: formats.append({ 'url': video_url, }) # Only a few videos have an description mobj = re.search(r'<span>Description: </span>([^<]+)', webpage) description = mobj.group(1) if mobj else None upload_date = unified_strdate(self._search_regex( r'hint=["\'](\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}', webpage, 'upload date', fatal=False)) uploader = self._html_search_regex( r'<span[^>]+itemprop=["\']author[^>]+><a[^>]+><span[^>]+>([^<]+)', webpage, 'uploader', default='anonymous') thumbnail = self._search_regex( [r'''["']thumbUrl["']\s*:\s*(?P<q>["'])(?P<thumbnail>.+?)(?P=q)''', r'''<video[^>]+"poster"=(?P<q>["'])(?P<thumbnail>.+?)(?P=q)[^>]*>'''], webpage, 'thumbnail', fatal=False, group='thumbnail') duration = parse_duration(self._search_regex( [r'<[^<]+\bitemprop=["\']duration["\'][^<]+\bcontent=["\'](.+?)["\']', r'Runtime:\s*</span>\s*([\d:]+)'], webpage, 'duration', fatal=False)) view_count = int_or_none(self._search_regex( r'content=["\']User(?:View|Play)s:(\d+)', webpage, 'view count', fatal=False)) mobj = re.search(r'hint=[\'"](?P<likecount>\d+) Likes / (?P<dislikecount>\d+) Dislikes', webpage) (like_count, dislike_count) = (mobj.group('likecount'), mobj.group('dislikecount')) if mobj else (None, None) mobj = re.search(r'</label>Comments \((?P<commentcount>\d+)\)</div>', webpage) comment_count = mobj.group('commentcount') if mobj else 0 categories_html = self._search_regex( r'(?s)<table.+?(<span>Categories:.+?)</table>', webpage, 'categories', default=None) categories = [clean_html(category) for category in re.findall( r'<a[^>]+>(.+?)</a>', categories_html)] if categories_html else None return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'upload_date': upload_date, 'uploader': uploader, 'uploader_id': uploader.lower() if uploader else None, 'thumbnail': thumbnail, 'duration': duration, 'view_count': view_count, 'like_count': int_or_none(like_count), 'dislike_count': int_or_none(dislike_count), 'comment_count': int_or_none(comment_count), 'age_limit': age_limit, 'categories': categories, 'formats': formats, } class XHamsterEmbedIE(InfoExtractor): _VALID_URL = rf'https?://(?:[^/?#]+\.)?{XHamsterIE._DOMAINS}/xembed\.php\?video=(?P<id>\d+)' _EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?xhamster\.com/xembed\.php\?video=\d+)\1'] _TESTS = [{ 'url': 'http://xhamster.com/xembed.php?video=3328539', 'info_dict': { 'id': '3328539', 'ext': 'mp4', 'title': 'Pen Masturbation', 'comment_count': int, 'description': '', 'display_id': 'pen-masturbation', 'timestamp': 1406581861, 'upload_date': '20140728', 'uploader': 'ManyakisArt', 'duration': 5, 'age_limit': 18, 'thumbnail': r're:https?://.+\.jpg', 'uploader_id': 'manyakisart', 'uploader_url': 'https://xhamster.com/users/manyakisart', 'view_count': int, }, }] _WEBPAGE_TESTS = [{ # FIXME: Embed detection 'url': 'https://xhamster.com/awards/2023', 'info_dict': { 'id': 'xh2VnYn', 'ext': 'mp4', 'title': 'xHamster Awards 2023 - The Winners', 'age_limit': 18, 'comment_count': int, 'description': '', 'display_id': 'xhamster-awards-2023-the-winners', 'duration': 292, 'thumbnail': r're:https?://ic-vt-nss\.xhcdn\.com/.+', 'timestamp': 1700122082, 'upload_date': '20231116', 'uploader': 'xHamster', 'uploader_id': 'xhamster', 'uploader_url': 'https://xhamster.com/users/xhamster', 'view_count': int, }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_url = self._search_regex( rf'href="(https?://xhamster\.com/(?:movies/{video_id}/[^"]*\.html|videos/[^/]*-{video_id})[^"]*)"', webpage, 'xhamster url', default=None) if not video_url: player_vars = self._parse_json( self._search_regex(r'vars\s*:\s*({.+?})\s*,\s*\n', webpage, 'vars'), video_id) video_url = dict_get(player_vars, ('downloadLink', 'homepageLink', 'commentsLink', 'shareUrl')) return self.url_result(video_url, 'XHamster') class XHamsterUserIE(InfoExtractor): _VALID_URL = rf'https?://(?:[^/?#]+\.)?{XHamsterIE._DOMAINS}/(?:(?P<user>users)|creators)/(?P<id>[^/?#&]+)' _TESTS = [{ # Paginated user profile 'url': 'https://xhamster.com/users/netvideogirls/videos', 'info_dict': { 'id': 'netvideogirls', }, 'playlist_mincount': 267, }, { # Non-paginated user profile 'url': 'https://xhamster.com/users/firatkaan/videos', 'info_dict': { 'id': 'firatkaan', }, 'playlist_mincount': 0, }, { 'url': 'https://xhamster.com/creators/squirt-orgasm-69', 'info_dict': { 'id': 'squirt-orgasm-69', }, 'playlist_mincount': 46, }, { 'url': 'https://xhday.com/users/mobhunter', 'only_matching': True, }, { 'url': 'https://xhvid.com/users/pelushe21', 'only_matching': True, }] def _entries(self, user_id, is_user): prefix, suffix = ('users', 'videos') if is_user else ('creators', 'exclusive') next_page_url = f'https://xhamster.com/{prefix}/{user_id}/{suffix}/1' for pagenum in itertools.count(1): page = self._download_webpage( next_page_url, user_id, f'Downloading page {pagenum}') for video_tag in re.findall( r'(<a[^>]+class=["\'].*?\bvideo-thumb__image-container[^>]+>)', page): video = extract_attributes(video_tag) video_url = url_or_none(video.get('href')) if not video_url or not XHamsterIE.suitable(video_url): continue video_id = XHamsterIE._match_id(video_url) yield self.url_result( video_url, ie=XHamsterIE.ie_key(), video_id=video_id) mobj = re.search(r'<a[^>]+data-page=["\']next[^>]+>', page) if not mobj: break next_page = extract_attributes(mobj.group(0)) next_page_url = url_or_none(next_page.get('href')) if not next_page_url: break def _real_extract(self, url): user, user_id = self._match_valid_url(url).group('user', 'id') return self.playlist_result(self._entries(user_id, bool(user)), user_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/kaltura.py
yt_dlp/extractor/kaltura.py
import base64 import contextlib import json import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, format_field, int_or_none, remove_start, smuggle_url, traverse_obj, unsmuggle_url, ) class KalturaIE(InfoExtractor): _VALID_URL = r'''(?x) (?: kaltura:(?P<partner_id>\w+):(?P<id>\w+)(?::(?P<player_type>\w+))?| https?:// (?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com(?::\d+)?/ (?: (?: # flash player index\.php/(?:kwidget|extwidget/preview)| # html5 player html5/html5lib/[^/]+/mwEmbedFrame\.php ) )(?:/(?P<path>[^?]+))?(?:\?(?P<query>.*))? ) ''' _SERVICE_URL = 'http://cdnapi.kaltura.com' _SERVICE_BASE = '/api_v3/service/multirequest' # See https://github.com/kaltura/server/blob/master/plugins/content/caption/base/lib/model/enums/CaptionType.php _CAPTION_TYPES = { 1: 'srt', 2: 'ttml', 3: 'vtt', } _TESTS = [{ 'url': 'kaltura:269692:1_1jc2y3e4', 'md5': '3adcbdb3dcc02d647539e53f284ba171', 'info_dict': { 'id': '1_1jc2y3e4', 'ext': 'mp4', 'title': 'Straight from the Heart', 'upload_date': '20131219', 'uploader_id': 'mlundberg@wolfgangsvault.com', 'description': 'The Allman Brothers Band, 12/16/1981', 'thumbnail': r're:https?://.+/thumbnail/.+', 'timestamp': int, }, 'skip': 'The access to this service is forbidden since the specified partner is blocked', }, { 'url': 'http://www.kaltura.com/index.php/kwidget/cache_st/1300318621/wid/_269692/uiconf_id/3873291/entry_id/1_1jc2y3e4', 'only_matching': True, }, { 'url': 'https://cdnapisec.kaltura.com/index.php/kwidget/wid/_557781/uiconf_id/22845202/entry_id/1_plr1syf3', 'only_matching': True, }, { 'url': 'https://cdnapisec.kaltura.com/html5/html5lib/v2.30.2/mwEmbedFrame.php/p/1337/uiconf_id/20540612/entry_id/1_sf5ovm7u?wid=_243342', 'only_matching': True, }, { # video with subtitles 'url': 'kaltura:111032:1_cw786r8q', 'only_matching': True, }, { # video with ttml subtitles (no fileExt) 'url': 'kaltura:1926081:0_l5ye1133', 'info_dict': { 'id': '0_l5ye1133', 'ext': 'mp4', 'title': 'What Can You Do With Python?', 'upload_date': '20160221', 'uploader_id': 'stork', 'thumbnail': r're:https?://.+/thumbnail/.+', 'timestamp': int, 'subtitles': { 'en': [{ 'ext': 'ttml', }], }, }, 'skip': 'Gone. Maybe https://www.safaribooksonline.com/library/tutorials/introduction-to-python-anon/3469/', 'params': {'skip_download': True}, }, { 'url': 'https://www.kaltura.com/index.php/extwidget/preview/partner_id/1770401/uiconf_id/37307382/entry_id/0_58u8kme7/embed/iframe?&flashvars[streamerType]=auto', 'only_matching': True, }, { 'url': 'https://www.kaltura.com:443/index.php/extwidget/preview/partner_id/1770401/uiconf_id/37307382/entry_id/0_58u8kme7/embed/iframe?&flashvars[streamerType]=auto', 'only_matching': True, }, { # unavailable source format 'url': 'kaltura:513551:1_66x4rg7o', 'only_matching': True, }, { # html5lib URL using kwidget player 'url': 'https://cdnapisec.kaltura.com/html5/html5lib/v2.46/mwEmbedFrame.php/p/691292/uiconf_id/20499062/entry_id/0_c076mna6?wid=_691292&iframeembed=true&playerId=kaltura_player_1420508608&entry_id=0_c076mna6&flashvars%5BakamaiHD.loadingPolicy%5D=preInitialize&flashvars%5BakamaiHD.asyncInit%5D=true&flashvars%5BstreamerType%5D=hdnetwork', 'info_dict': { 'id': '0_c076mna6', 'ext': 'mp4', 'title': 'md5:4883e7acbcbf42583a2dddc97dee4855', 'duration': 3608, 'uploader_id': 'commons@swinburne.edu.au', 'timestamp': 1408086874, 'view_count': int, 'upload_date': '20140815', 'thumbnail': r're:https?://cfvod\.kaltura\.com/.+', }, }, { # html5lib playlist URL using kwidget player 'url': 'https://cdnapisec.kaltura.com/html5/html5lib/v2.89/mwEmbedFrame.php/p/2019031/uiconf_id/40436601?wid=1_4j3m32cv&iframeembed=true&playerId=kaltura_player_&flashvars[playlistAPI.kpl0Id]=1_jovey5nu&flashvars[ks]=&&flashvars[imageDefaultDuration]=30&flashvars[localizationCode]=en&flashvars[leadWithHTML5]=true&flashvars[forceMobileHTML5]=true&flashvars[nextPrevBtn.plugin]=true&flashvars[hotspots.plugin]=true&flashvars[sideBarContainer.plugin]=true&flashvars[sideBarContainer.position]=left&flashvars[sideBarContainer.clickToClose]=true&flashvars[chapters.plugin]=true&flashvars[chapters.layout]=vertical&flashvars[chapters.thumbnailRotator]=false&flashvars[streamSelector.plugin]=true&flashvars[EmbedPlayer.SpinnerTarget]=videoHolder&flashvars[dualScreen.plugin]=true&flashvars[playlistAPI.playlistUrl]=https://canvasgatechtest.kaf.kaltura.com/playlist/details/{playlistAPI.kpl0Id}/categoryid/126428551', 'info_dict': { 'id': '1_jovey5nu', 'title': '00-00 Introduction', }, 'playlist': [ { 'info_dict': { 'id': '1_b1y5hlvx', 'ext': 'mp4', 'title': 'CS7646_00-00 Introductio_Introduction', 'duration': 91, 'thumbnail': r're:https?://cfvod\.kaltura\.com/.+', 'view_count': int, 'timestamp': 1533154447, 'upload_date': '20180801', 'uploader_id': 'djoyner3', }, }, { 'info_dict': { 'id': '1_jfb7mdpn', 'ext': 'mp4', 'title': 'CS7646_00-00 Introductio_Three parts to the course', 'duration': 63, 'thumbnail': r're:https?://cfvod\.kaltura\.com/.+', 'view_count': int, 'timestamp': 1533154489, 'upload_date': '20180801', 'uploader_id': 'djoyner3', }, }, { 'info_dict': { 'id': '1_8xflxdp7', 'ext': 'mp4', 'title': 'CS7646_00-00 Introductio_Textbooks', 'duration': 37, 'thumbnail': r're:https?://cfvod\.kaltura\.com/.+', 'view_count': int, 'timestamp': 1533154512, 'upload_date': '20180801', 'uploader_id': 'djoyner3', }, }, { 'info_dict': { 'id': '1_3hqew8kn', 'ext': 'mp4', 'title': 'CS7646_00-00 Introductio_Prerequisites', 'duration': 49, 'thumbnail': r're:https?://cfvod\.kaltura\.com/.+', 'view_count': int, 'timestamp': 1533154536, 'upload_date': '20180801', 'uploader_id': 'djoyner3', }, }, ], }] _WEBPAGE_TESTS = [{ 'url': 'https://www.cornell.edu/VIDEO/nima-arkani-hamed-standard-models-of-particle-physics', 'info_dict': { 'id': '1_sgtvehim', 'ext': 'mp4', 'title': 'Our "Standard Models" of particle physics and cosmology', 'duration': 5420, 'thumbnail': r're:https?://cdnsecakmi\.kaltura\.com/.+', 'timestamp': 1321158993, 'upload_date': '20111113', 'uploader_id': 'kps1', 'view_count': int, }, }, { 'url': 'https://www.oreilly.com/ideas/my-cloud-makes-pretty-pictures', 'info_dict': { 'id': '0_utuok90b', 'ext': 'mp4', 'title': '06_matthew_brender_raj_dutt', 'duration': 331, 'thumbnail': r're:https?://cfvod\.kaltura\.com/.+', 'timestamp': 1466638791, 'upload_date': '20160622', 'uploader_id': '', 'view_count': int, }, }, { 'url': 'https://fod.infobase.com/p_ViewPlaylist.aspx?AssignmentID=NUN8ZY', 'info_dict': { 'id': '0_izeg5utt', 'ext': 'mp4', 'title': '35871', 'duration': 3403, 'thumbnail': r're:https?://cfvod\.kaltura\.com/.+', 'timestamp': 1355743100, 'upload_date': '20121217', 'uploader_id': 'cplapp@learn360.com', 'view_count': int, }, }, { 'url': 'https://www.cns.nyu.edu/~eero/math-tools17/Videos/lecture-05sep2017.html', 'info_dict': { 'id': '1_9gzouybz', 'ext': 'mp4', 'title': 'lecture-05sep2017', 'duration': 7219, 'thumbnail': r're:https?://cfvod\.kaltura\.com/.+', 'timestamp': 1505340777, 'upload_date': '20170913', 'uploader_id': 'eps2', 'view_count': int, }, }] @classmethod def _extract_embed_urls(cls, url, webpage): # Embed codes: https://knowledge.kaltura.com/embedding-kaltura-media-players-your-site finditer = ( list(re.finditer( r'''(?xs) kWidget\.(?:thumb)?[Ee]mbed\( \{.*? (?P<q1>['"])wid(?P=q1)\s*:\s* (?P<q2>['"])_?(?P<partner_id>(?:(?!(?P=q2)).)+)(?P=q2),.*? (?P<q3>['"])entry_?[Ii]d(?P=q3)\s*:\s* (?P<q4>['"])(?P<id>(?:(?!(?P=q4)).)+)(?P=q4)(?:,|\s*\}) ''', webpage)) or list(re.finditer( r'''(?xs) (?P<q1>["']) (?:https?:)?//cdnapi(?:sec)?\.kaltura\.com(?::\d+)?/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)(?:(?!(?P=q1)).)* (?P=q1).*? (?: (?: entry_?[Ii]d| (?P<q2>["'])entry_?[Ii]d(?P=q2) )\s*:\s*| \[\s*(?P<q2_1>["'])entry_?[Ii]d(?P=q2_1)\s*\]\s*=\s* ) (?P<q3>["'])(?P<id>(?:(?!(?P=q3)).)+)(?P=q3) ''', webpage)) or list(re.finditer( r'''(?xs) <(?:iframe[^>]+src|meta[^>]+\bcontent)=(?P<q1>["'])\s* (?:https?:)?//(?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+) (?:(?!(?P=q1)).)* [?&;]entry_id=(?P<id>(?:(?!(?P=q1))[^&])+) (?:(?!(?P=q1)).)* (?P=q1) ''', webpage)) ) urls = [] for mobj in finditer: embed_info = mobj.groupdict() for k, v in embed_info.items(): if v: embed_info[k] = v.strip() embed_url = 'kaltura:{partner_id}:{id}'.format(**embed_info) escaped_pid = re.escape(embed_info['partner_id']) service_mobj = re.search( rf'<script[^>]+src=(["\'])(?P<id>(?:https?:)?//(?:(?!\1).)+)/p/{escaped_pid}/sp/{escaped_pid}00/embedIframeJs', webpage) if service_mobj: embed_url = smuggle_url(embed_url, {'service_url': service_mobj.group('id')}) urls.append(embed_url) return urls def _kaltura_api_call(self, video_id, actions, service_url=None, **kwargs): params = actions[0] params.update(dict(enumerate(actions[1:], start=1))) data = self._download_json( (service_url or self._SERVICE_URL) + self._SERVICE_BASE, video_id, data=json.dumps(params).encode(), headers={ 'Content-Type': 'application/json', 'Accept-Encoding': 'gzip, deflate, br', }, **kwargs) for idx, status in enumerate(data): if not isinstance(status, dict): continue if status.get('objectType') == 'KalturaAPIException': raise ExtractorError( '{} said: {} ({})'.format(self.IE_NAME, status['message'], idx)) data[1] = traverse_obj(data, (1, 'objects', 0)) return data def _get_video_info(self, video_id, partner_id, service_url=None, player_type='html5'): assert player_type in ('html5', 'kwidget') if player_type == 'kwidget': return self._get_video_info_kwidget(video_id, partner_id, service_url) return self._get_video_info_html5(video_id, partner_id, service_url) def _get_video_info_html5(self, video_id, partner_id, service_url=None): actions = [ { 'apiVersion': '3.3.0', 'clientTag': 'html5:v3.1.0', 'format': 1, # JSON, 2 = XML, 3 = PHP 'ks': '', 'partnerId': partner_id, }, { 'expiry': 86400, 'service': 'session', 'action': 'startWidgetSession', 'widgetId': self._build_widget_id(partner_id), }, # info { 'action': 'list', 'filter': {'redirectFromEntryId': video_id}, 'service': 'baseentry', 'ks': '{1:result:ks}', 'responseProfile': { 'type': 1, 'fields': 'createdAt,dataUrl,duration,name,plays,thumbnailUrl,userId', }, }, # flavor_assets { 'action': 'getbyentryid', 'entryId': video_id, 'service': 'flavorAsset', 'ks': '{1:result:ks}', }, # captions { 'action': 'list', 'filter:entryIdEqual': video_id, 'service': 'caption_captionasset', 'ks': '{1:result:ks}', }, ] return self._kaltura_api_call( video_id, actions, service_url, note='Downloading video info JSON (Kaltura html5 player)') def _get_video_info_kwidget(self, video_id, partner_id, service_url=None): actions = [ { 'service': 'multirequest', 'apiVersion': '3.1', 'expiry': 86400, 'clientTag': 'kwidget:v2.89', 'format': 1, # JSON, 2 = XML, 3 = PHP 'ignoreNull': 1, 'action': 'null', }, # header { 'expiry': 86400, 'service': 'session', 'action': 'startWidgetSession', 'widgetId': self._build_widget_id(partner_id), }, # (empty) { 'expiry': 86400, 'service': 'session', 'action': 'startwidgetsession', 'widgetId': self._build_widget_id(partner_id), 'format': 9, 'apiVersion': '3.1', 'clientTag': 'kwidget:v2.89', 'ignoreNull': 1, 'ks': '{1:result:ks}', }, # info { 'action': 'list', 'filter': {'redirectFromEntryId': video_id}, 'service': 'baseentry', 'ks': '{1:result:ks}', 'responseProfile': { 'type': 1, 'fields': 'createdAt,dataUrl,duration,name,plays,thumbnailUrl,userId', }, }, # flavor_assets { 'action': 'getbyentryid', 'entryId': video_id, 'service': 'flavorAsset', 'ks': '{1:result:ks}', }, # captions { 'action': 'list', 'filter:entryIdEqual': video_id, 'service': 'caption_captionasset', 'ks': '{1:result:ks}', }, ] # second object (representing the second start widget session) is None header, _, _info, flavor_assets, captions = self._kaltura_api_call( video_id, actions, service_url, note='Downloading video info JSON (Kaltura kwidget player)') info = _info['objects'][0] return header, info, flavor_assets, captions def _build_widget_id(self, partner_id): return partner_id if '_' in partner_id else f'_{partner_id}' IFRAME_PACKAGE_DATA_REGEX = r'window\.kalturaIframePackageData\s*=' def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) mobj = self._match_valid_url(url) partner_id, entry_id, player_type = mobj.group('partner_id', 'id', 'player_type') ks, captions = None, None if not player_type: player_type = 'kwidget' if 'html5lib/v2' in url else 'html5' if partner_id and entry_id: _, info, flavor_assets, captions = self._get_video_info(entry_id, partner_id, smuggled_data.get('service_url'), player_type=player_type) else: path, query = mobj.group('path', 'query') if not path and not query: raise ExtractorError('Invalid URL', expected=True) params = {} if query: params = urllib.parse.parse_qs(query) if path: splitted_path = path.split('/') params.update(dict(zip(splitted_path[::2], [[v] for v in splitted_path[1::2]]))) # noqa: B905 if 'wid' in params: partner_id = remove_start(params['wid'][0], '_') elif 'p' in params: partner_id = params['p'][0] elif 'partner_id' in params: partner_id = params['partner_id'][0] else: raise ExtractorError('Invalid URL', expected=True) if 'entry_id' in params: entry_id = params['entry_id'][0] _, info, flavor_assets, captions = self._get_video_info(entry_id, partner_id, player_type=player_type) elif 'uiconf_id' in params and 'flashvars[referenceId]' in params: reference_id = params['flashvars[referenceId]'][0] webpage = self._download_webpage(url, reference_id) entry_data = self._search_json( self.IFRAME_PACKAGE_DATA_REGEX, webpage, 'kalturaIframePackageData', reference_id)['entryResult'] info, flavor_assets = entry_data['meta'], entry_data['contextData']['flavorAssets'] entry_id = info['id'] # Unfortunately, data returned in kalturaIframePackageData lacks # captions so we will try requesting the complete data using # regular approach since we now know the entry_id # Even if this fails we already have everything extracted # apart from captions and can process at least with this with contextlib.suppress(ExtractorError): _, info, flavor_assets, captions = self._get_video_info( entry_id, partner_id, player_type=player_type) elif 'uiconf_id' in params and 'flashvars[playlistAPI.kpl0Id]' in params: playlist_id = params['flashvars[playlistAPI.kpl0Id]'][0] webpage = self._download_webpage(url, playlist_id) playlist_data = self._search_json( self.IFRAME_PACKAGE_DATA_REGEX, webpage, 'kalturaIframePackageData', playlist_id)['playlistResult'] return self.playlist_from_matches( traverse_obj(playlist_data, (playlist_id, 'items', ..., 'id')), playlist_id, traverse_obj(playlist_data, (playlist_id, 'name')), ie=KalturaIE, getter=lambda x: f'kaltura:{partner_id}:{x}:{player_type}') else: raise ExtractorError('Invalid URL', expected=True) ks = params.get('flashvars[ks]', [None])[0] return self._per_video_extract(smuggled_data, entry_id, info, ks, flavor_assets, captions) def _per_video_extract(self, smuggled_data, entry_id, info, ks, flavor_assets, captions): source_url = smuggled_data.get('source_url') if source_url: referrer = base64.b64encode( '://'.join(urllib.parse.urlparse(source_url)[:2]) .encode()).decode('utf-8') else: referrer = None def sign_url(unsigned_url): if ks: unsigned_url += f'/ks/{ks}' if referrer: unsigned_url += f'?referrer={referrer}' return unsigned_url data_url = info['dataUrl'] if '/flvclipper/' in data_url: data_url = re.sub(r'/flvclipper/.*', '/serveFlavor', data_url) formats = [] subtitles = {} for f in flavor_assets: # Continue if asset is not ready if f.get('status') != 2: continue # Original format that's not available (e.g. kaltura:1926081:0_c03e1b5g) # skip for now. if f.get('fileExt') == 'chun': continue # DRM-protected video, cannot be decrypted if not self.get_param('allow_unplayable_formats') and f.get('fileExt') == 'wvm': continue if not f.get('fileExt'): # QT indicates QuickTime; some videos have broken fileExt if f.get('containerFormat') == 'qt': f['fileExt'] = 'mov' else: f['fileExt'] = 'mp4' video_url = sign_url( '{}/flavorId/{}'.format(data_url, f['id'])) format_id = '{fileExt}-{bitrate}'.format(**f) # Source format may not be available (e.g. kaltura:513551:1_66x4rg7o) if f.get('isOriginal') is True and not self._is_valid_url( video_url, entry_id, format_id): continue # audio-only has no videoCodecId (e.g. kaltura:1926081:0_c03e1b5g # -f mp4-56) vcodec = 'none' if 'videoCodecId' not in f and f.get( 'frameRate') == 0 else f.get('videoCodecId') formats.append({ 'format_id': format_id, 'ext': f.get('fileExt'), 'tbr': int_or_none(f['bitrate']), 'fps': int_or_none(f.get('frameRate')), 'filesize_approx': int_or_none(f.get('size'), invscale=1024), 'container': f.get('containerFormat'), 'vcodec': vcodec, 'height': int_or_none(f.get('height')), 'width': int_or_none(f.get('width')), 'url': video_url, }) if '/playManifest/' in data_url: m3u8_url = sign_url(data_url.replace( 'format/url', 'format/applehttp')) fmts, subs = self._extract_m3u8_formats_and_subtitles( m3u8_url, entry_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) if captions: for caption in captions.get('objects', []): # Continue if caption is not ready if caption.get('status') != 2: continue if not caption.get('id'): continue caption_format = int_or_none(caption.get('format')) subtitles.setdefault(caption.get('languageCode') or caption.get('language'), []).append({ 'url': '{}/api_v3/service/caption_captionasset/action/serve/captionAssetId/{}'.format(self._SERVICE_URL, caption['id']), 'ext': caption.get('fileExt') or self._CAPTION_TYPES.get(caption_format) or 'ttml', }) return { 'id': entry_id, 'title': info['name'], 'formats': formats, 'subtitles': subtitles, 'description': clean_html(info.get('description')), 'thumbnail': info.get('thumbnailUrl'), 'duration': info.get('duration'), 'timestamp': info.get('createdAt'), 'uploader_id': format_field(info, 'userId', ignore=('None', None)), 'view_count': int_or_none(info.get('plays')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/viqeo.py
yt_dlp/extractor/viqeo.py
from .common import InfoExtractor from ..utils import ( int_or_none, str_or_none, url_or_none, ) class ViqeoIE(InfoExtractor): _WORKING = False _VALID_URL = r'''(?x) (?: viqeo:| https?://cdn\.viqeo\.tv/embed/*\?.*?\bvid=| https?://api\.viqeo\.tv/v\d+/data/startup?.*?\bvideo(?:%5B%5D|\[\])= ) (?P<id>[\da-f]+) ''' _EMBED_REGEX = [r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//cdn\.viqeo\.tv/embed/*\?.*?\bvid=[\da-f]+.*?)\1'] _TESTS = [{ 'url': 'https://cdn.viqeo.tv/embed/?vid=cde96f09d25f39bee837', 'md5': 'a169dd1a6426b350dca4296226f21e76', 'info_dict': { 'id': 'cde96f09d25f39bee837', 'ext': 'mp4', 'title': 'cde96f09d25f39bee837', 'thumbnail': r're:https?://.+\.jpg', 'duration': 76, }, }, { 'url': 'viqeo:cde96f09d25f39bee837', 'only_matching': True, }, { 'url': 'https://api.viqeo.tv/v1/data/startup?video%5B%5D=71bbec412ade45c3216c&profile=112', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'https://viqeo.tv/', 'info_dict': { 'id': 'viqeo', 'title': 'Viqeo video platform', 'age_limit': 0, 'description': 'md5:e8e06e20df92ed66febeaef2533a0d5d', 'thumbnail': r're:https?://static\.tildacdn\.com/.+\.png', 'timestamp': 1751479769, 'upload_date': '20250702', }, 'playlist_count': 3, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( f'https://cdn.viqeo.tv/embed/?vid={video_id}', video_id) data = self._parse_json( self._search_regex( r'SLOT_DATA\s*=\s*({.+?})\s*;', webpage, 'slot data'), video_id) formats = [] thumbnails = [] for media_file in data['mediaFiles']: if not isinstance(media_file, dict): continue media_url = url_or_none(media_file.get('url')) if not media_url or not media_url.startswith(('http', '//')): continue media_type = str_or_none(media_file.get('type')) if not media_type: continue media_kind = media_type.split('/')[0].lower() f = { 'url': media_url, 'width': int_or_none(media_file.get('width')), 'height': int_or_none(media_file.get('height')), } format_id = str_or_none(media_file.get('quality')) if media_kind == 'image': f['id'] = format_id thumbnails.append(f) elif media_kind in ('video', 'audio'): is_audio = media_kind == 'audio' f.update({ 'format_id': 'audio' if is_audio else format_id, 'fps': int_or_none(media_file.get('fps')), 'vcodec': 'none' if is_audio else None, }) formats.append(f) duration = int_or_none(data.get('duration')) return { 'id': video_id, 'title': video_id, 'duration': duration, 'thumbnails': thumbnails, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nowness.py
yt_dlp/extractor/nowness.py
from .brightcove import ( BrightcoveLegacyIE, BrightcoveNewIE, ) from .common import InfoExtractor from ..networking import Request from ..utils import ExtractorError class NownessBaseIE(InfoExtractor): def _extract_url_result(self, post): if post['type'] == 'video': for media in post['media']: if media['type'] == 'video': video_id = media['content'] source = media['source'] if source == 'brightcove': player_code = self._download_webpage( f'http://www.nowness.com/iframe?id={video_id}', video_id, note='Downloading player JavaScript', errnote='Unable to download player JavaScript') bc_url = BrightcoveLegacyIE._extract_brightcove_url(player_code) if bc_url: return self.url_result(bc_url, BrightcoveLegacyIE.ie_key()) bc_url = BrightcoveNewIE._extract_url(self, player_code) if bc_url: return self.url_result(bc_url, BrightcoveNewIE.ie_key()) raise ExtractorError('Could not find player definition') elif source == 'vimeo': return self.url_result(f'http://vimeo.com/{video_id}', 'Vimeo') elif source == 'youtube': return self.url_result(video_id, 'Youtube') elif source == 'cinematique': # yt-dlp currently doesn't support cinematique # return self.url_result('http://cinematique.com/embed/%s' % video_id, 'Cinematique') pass def _api_request(self, url, request_path): display_id = self._match_id(url) request = Request( 'http://api.nowness.com/api/' + request_path % display_id, headers={ 'X-Nowness-Language': 'zh-cn' if 'cn.nowness.com' in url else 'en-us', }) return display_id, self._download_json(request, display_id) class NownessIE(NownessBaseIE): IE_NAME = 'nowness' _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/(?:story|(?:series|category)/[^/]+)/(?P<id>[^/]+?)(?:$|[?#])' _TESTS = [{ 'url': 'https://www.nowness.com/story/candor-the-art-of-gesticulation', 'md5': '068bc0202558c2e391924cb8cc470676', 'info_dict': { 'id': '2520295746001', 'ext': 'mp4', 'title': 'Candor: The Art of Gesticulation', 'description': 'Candor: The Art of Gesticulation', 'thumbnail': r're:^https?://.*\.jpg', 'timestamp': 1446745676, 'upload_date': '20151105', 'uploader_id': '2385340575001', }, 'add_ie': ['BrightcoveNew'], }, { 'url': 'https://cn.nowness.com/story/kasper-bjorke-ft-jaakko-eino-kalevi-tnr', 'md5': 'e79cf125e387216f86b2e0a5b5c63aa3', 'info_dict': { 'id': '3716354522001', 'ext': 'mp4', 'title': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR', 'description': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR', 'thumbnail': r're:^https?://.*\.jpg', 'timestamp': 1407315371, 'upload_date': '20140806', 'uploader_id': '2385340575001', }, 'add_ie': ['BrightcoveNew'], }, { # vimeo 'url': 'https://www.nowness.com/series/nowness-picks/jean-luc-godard-supercut', 'md5': '9a5a6a8edf806407e411296ab6bc2a49', 'info_dict': { 'id': '130020913', 'ext': 'mp4', 'title': 'Bleu, Blanc, Rouge - A Godard Supercut', 'description': 'md5:f0ea5f1857dffca02dbd37875d742cec', 'thumbnail': r're:^https?://.*\.jpg', 'upload_date': '20150607', 'uploader': 'Cinema Sem Lei', 'uploader_id': 'cinemasemlei', }, 'add_ie': ['Vimeo'], }] def _real_extract(self, url): _, post = self._api_request(url, 'post/getBySlug/%s') return self._extract_url_result(post) class NownessPlaylistIE(NownessBaseIE): IE_NAME = 'nowness:playlist' _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/playlist/(?P<id>\d+)' _TEST = { 'url': 'https://www.nowness.com/playlist/3286/i-guess-thats-why-they-call-it-the-blues', 'info_dict': { 'id': '3286', }, 'playlist_mincount': 8, } def _real_extract(self, url): playlist_id, playlist = self._api_request(url, 'post?PlaylistId=%s') entries = [self._extract_url_result(item) for item in playlist['items']] return self.playlist_result(entries, playlist_id) class NownessSeriesIE(NownessBaseIE): IE_NAME = 'nowness:series' _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/series/(?P<id>[^/]+?)(?:$|[?#])' _TEST = { 'url': 'https://www.nowness.com/series/60-seconds', 'info_dict': { 'id': '60', 'title': '60 Seconds', 'description': 'One-minute wisdom in a new NOWNESS series', }, 'playlist_mincount': 4, } def _real_extract(self, url): _, series = self._api_request(url, 'series/getBySlug/%s') entries = [self._extract_url_result(post) for post in series['posts']] series_title = None series_description = None translations = series.get('translations', []) if translations: series_title = translations[0].get('title') or translations[0]['seoTitle'] series_description = translations[0].get('seoDescription') return self.playlist_result( entries, str(series['id']), series_title, series_description)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mediaset.py
yt_dlp/extractor/mediaset.py
import functools import re from .theplatform import ThePlatformBaseIE from ..utils import ( ExtractorError, GeoRestrictedError, OnDemandPagedList, int_or_none, try_get, update_url_query, urljoin, ) class MediasetIE(ThePlatformBaseIE): _TP_TLD = 'eu' _GUID_RE = r'F[0-9A-Z]{15}' _VALID_URL = rf'''(?x) (?: mediaset:| https?:// (?:\w+\.)+mediaset\.it/ (?: (?:video|on-demand|movie)/(?:[^/]+/)+[^/]+_| player/(?:v\d+/)?index\.html\?\S*?\bprogramGuid= ) )(?P<id>{_GUID_RE}) ''' _EMBED_REGEX = [ rf'<iframe[^>]+src=[\'"](?P<url>(?:https?:)?//(?:\w+\.)+mediaset\.it/player/(?:v\d+/)?index\.html\?\S*?programGuid={_GUID_RE})[\'"&]', ] _TESTS = [{ # full episode 'url': 'https://mediasetinfinity.mediaset.it/video/mrwronglezionidamore/episodio-1_F310575103000102', 'md5': 'a7e75c6384871f322adb781d3bd72c26', 'info_dict': { 'id': 'F310575103000102', 'ext': 'mp4', 'title': 'Episodio 1', 'description': 'md5:e8017b7d7194e9bfb75299c2b8d81e02', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2682.0, 'upload_date': '20210530', 'series': 'Mr Wrong - Lezioni d\'amore', 'timestamp': 1622413946, 'uploader': 'Canale 5', 'uploader_id': 'C5', 'season': 'Season 1', 'episode': 'Episode 1', 'season_number': 1, 'episode_number': 1, 'chapters': [{'start_time': 0.0, 'end_time': 439.88}, {'start_time': 439.88, 'end_time': 1685.84}, {'start_time': 1685.84, 'end_time': 2682.0}], }, }, { 'url': 'https://mediasetinfinity.mediaset.it/video/matrix/puntata-del-25-maggio_F309013801000501', 'md5': '1276f966ac423d16ba255ce867de073e', 'info_dict': { 'id': 'F309013801000501', 'ext': 'mp4', 'title': 'Puntata del 25 maggio', 'description': 'md5:ee2e456e3eb1dba5e814596655bb5296', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 6565.008, 'upload_date': '20200903', 'series': 'Matrix', 'timestamp': 1599172492, 'uploader': 'Canale 5', 'uploader_id': 'C5', 'season': 'Season 5', 'episode': 'Episode 5', 'season_number': 5, 'episode_number': 5, 'chapters': [{'start_time': 0.0, 'end_time': 3409.08}, {'start_time': 3409.08, 'end_time': 6565.008}], 'categories': ['Informazione'], }, }, { # DRM 'url': 'https://mediasetinfinity.mediaset.it/movie/selvaggi/selvaggi_F006474501000101', 'info_dict': { 'id': 'F006474501000101', 'ext': 'mp4', 'title': 'Selvaggi', 'description': 'md5:cfdedbbfdd12d4d0e5dcf1fa1b75284f', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 5233.01, 'upload_date': '20210729', 'timestamp': 1627594716, 'uploader': 'Cine34', 'uploader_id': 'B6', 'chapters': [{'start_time': 0.0, 'end_time': 1938.56}, {'start_time': 1938.56, 'end_time': 5233.01}], }, 'params': { 'ignore_no_formats_error': True, }, 'expected_warnings': [ 'None of the available releases match the specified AssetType, ProtectionScheme, and/or Format preferences', 'Content behind paywall and DRM', ], 'skip': True, }, { # old domain 'url': 'https://www.mediasetplay.mediaset.it/video/mrwronglezionidamore/episodio-1_F310575103000102', 'only_matching': True, }, { # iframe 'url': 'https://static3.mediasetplay.mediaset.it/player/index.html?appKey=5ad3966b1de1c4000d5cec48&programGuid=FAFU000000665924&id=665924', 'only_matching': True, }, { 'url': 'mediaset:FAFU000000665924', 'only_matching': True, }] _WEBPAGE_TESTS = [{ # Mediaset embed 'url': 'http://www.tgcom24.mediaset.it/politica/serracchiani-voglio-vivere-in-una-societa-aperta-reazioni-sproporzionate-_3071354-201702a.shtml', 'info_dict': { 'id': 'FD00000000004929', 'ext': 'mp4', 'title': 'Serracchiani: "Voglio vivere in una società aperta, con tutela del patto di fiducia"', 'duration': 67.013, 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Mediaset Play', 'uploader_id': 'QY', 'upload_date': '20201005', 'timestamp': 1601866168, 'chapters': [], }, 'params': { 'skip_download': True, }, 'skip': 'Dead link', }, { # WittyTV embed 'url': 'https://www.wittytv.it/mauriziocostanzoshow/ultima-puntata-venerdi-25-novembre/', 'info_dict': { 'id': 'F312172801000801', 'ext': 'mp4', 'title': 'Ultima puntata - Venerdì 25 novembre', 'description': 'Una serata all\'insegna della musica e del buonumore ma non priva di spunti di riflessione', 'duration': 6203.01, 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Canale 5', 'uploader_id': 'C5', 'upload_date': '20221126', 'timestamp': 1669428689, 'chapters': list, 'series': 'Maurizio Costanzo Show', 'season': 'Season 12', 'season_number': 12, 'episode': 'Episode 8', 'episode_number': 8, 'categories': ['Intrattenimento'], }, 'params': { 'skip_download': True, }, }] def _parse_smil_formats_and_subtitles( self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None): for video in smil.findall(self._xpath_ns('.//video', namespace)): video.attrib['src'] = re.sub(r'(https?://vod05)t(-mediaset-it\.akamaized\.net/.+?.mpd)\?.+', r'\1\2', video.attrib['src']) return super()._parse_smil_formats_and_subtitles( smil, smil_url, video_id, namespace, f4m_params, transform_rtmp_url) def _check_drm_formats(self, tp_formats, video_id): has_nondrm, drm_manifest = False, '' for f in tp_formats: if '_sampleaes/' in (f.get('manifest_url') or ''): drm_manifest = drm_manifest or f['manifest_url'] f['has_drm'] = True if not f.get('has_drm') and f.get('manifest_url'): has_nondrm = True nodrm_manifest = re.sub(r'_sampleaes/(\w+)_fp_', r'/\1_no_', drm_manifest) if has_nondrm or nodrm_manifest == drm_manifest: return tp_formats.extend(self._extract_m3u8_formats( nodrm_manifest, video_id, m3u8_id='hls', fatal=False) or []) def _real_extract(self, url): guid = self._match_id(url) tp_path = f'PR1GhC/media/guid/2702976343/{guid}' info = self._extract_theplatform_metadata(tp_path, guid) formats = [] subtitles = {} first_e = geo_e = None asset_type = 'geoNo:HD,browser,geoIT|geoNo:HD,geoIT|geoNo:SD,browser,geoIT|geoNo:SD,geoIT|geoNo|HD|SD' # TODO: fixup ISM+none manifest URLs for f in ('MPEG4', 'MPEG-DASH', 'M3U'): try: tp_formats, tp_subtitles = self._extract_theplatform_smil( update_url_query(f'http://link.theplatform.{self._TP_TLD}/s/{tp_path}', { 'mbr': 'true', 'formats': f, 'assetTypes': asset_type, }), guid, f'Downloading {f.split("+")[0]} SMIL data') except ExtractorError as e: if e.orig_msg == 'None of the available releases match the specified AssetType, ProtectionScheme, and/or Format preferences': e.orig_msg = 'This video is DRM protected' if not geo_e and isinstance(e, GeoRestrictedError): geo_e = e if not first_e: first_e = e continue self._check_drm_formats(tp_formats, guid) formats.extend(tp_formats) subtitles = self._merge_subtitles(subtitles, tp_subtitles) # check for errors and report them if (first_e or geo_e) and not formats: raise geo_e or first_e feed_data = self._download_json( f'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs-v2/guid/-/{guid}', guid, fatal=False) if feed_data: publish_info = feed_data.get('mediasetprogram$publishInfo') or {} thumbnails = feed_data.get('thumbnails') or {} thumbnail = None for key, value in thumbnails.items(): if key.startswith('image_keyframe_poster-'): thumbnail = value.get('url') break info.update({ 'description': info.get('description') or feed_data.get('description') or feed_data.get('longDescription'), 'uploader': publish_info.get('description'), 'uploader_id': publish_info.get('channel'), 'view_count': int_or_none(feed_data.get('mediasetprogram$numberOfViews')), 'thumbnail': thumbnail, }) if feed_data.get('programType') == 'episode': info.update({ 'episode_number': int_or_none( feed_data.get('tvSeasonEpisodeNumber')), 'season_number': int_or_none( feed_data.get('tvSeasonNumber')), 'series': feed_data.get('mediasetprogram$brandTitle'), }) info.update({ 'id': guid, 'formats': formats, 'subtitles': subtitles, }) return info class MediasetShowIE(MediasetIE): # XXX: Do not subclass from concrete IE _VALID_URL = r'''(?x) (?: https?:// (\w+\.)+mediaset\.it/ (?: (?:fiction|programmi-tv|serie-tv|kids)/(?:.+?/)? (?:[a-z-]+)_SE(?P<id>\d{12}) (?:,ST(?P<st>\d{12}))? (?:,sb(?P<sb>\d{9}))?$ ) ) ''' _TESTS = [{ # TV Show webpage (general webpage) 'url': 'https://mediasetinfinity.mediaset.it/programmi-tv/leiene/leiene_SE000000000061', 'info_dict': { 'id': '000000000061', 'title': 'Le Iene 2022/2023', }, 'playlist_mincount': 6, }, { # TV Show webpage (specific season) 'url': 'https://mediasetinfinity.mediaset.it/programmi-tv/leiene/leiene_SE000000000061,ST000000002763', 'info_dict': { 'id': '000000002763', 'title': 'Le Iene 2021/2022', }, 'playlist_mincount': 7, }, { # TV Show specific playlist (with multiple pages) 'url': 'https://mediasetinfinity.mediaset.it/programmi-tv/leiene/iservizi_SE000000000061,ST000000002763,sb100013375', 'info_dict': { 'id': '100013375', 'title': 'I servizi', }, 'playlist_mincount': 50, }] _BY_SUBBRAND = 'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs-v2?byCustomValue={subBrandId}{%s}&sort=:publishInfo_lastPublished|desc,tvSeasonEpisodeNumber|desc&range=%d-%d' _PAGE_SIZE = 25 def _fetch_page(self, sb, page): lower_limit = page * self._PAGE_SIZE + 1 upper_limit = lower_limit + self._PAGE_SIZE - 1 content = self._download_json( self._BY_SUBBRAND % (sb, lower_limit, upper_limit), sb) for entry in content.get('entries') or []: yield self.url_result( 'mediaset:' + entry['guid'], playlist_title=entry['mediasetprogram$subBrandDescription']) def _real_extract(self, url): playlist_id, st, sb = self._match_valid_url(url).group('id', 'st', 'sb') if not sb: page = self._download_webpage(url, st or playlist_id) entries = [self.url_result(urljoin('https://mediasetinfinity.mediaset.it', url)) for url in re.findall(r'href="([^<>=]+SE\d{12},ST\d{12},sb\d{9})">[^<]+<', page)] title = self._html_extract_title(page).split('|')[0].strip() return self.playlist_result(entries, st or playlist_id, title) entries = OnDemandPagedList( functools.partial(self._fetch_page, sb), self._PAGE_SIZE) title = try_get(entries, lambda x: x[0]['playlist_title']) return self.playlist_result(entries, sb, title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vidflex.py
yt_dlp/extractor/vidflex.py
import base64 import json from .common import InfoExtractor from ..utils import ( int_or_none, join_nonempty, mimetype2ext, url_or_none, ) from ..utils.traversal import traverse_obj class VidflexIE(InfoExtractor): _DOMAINS_RE = [ r'[^.]+\.vidflex\.tv', r'(?:www\.)?acactv\.ca', r'(?:www\.)?albertalacrossetv\.com', r'(?:www\.)?cjfltv\.com', r'(?:www\.)?figureitoutbaseball\.com', r'(?:www\.)?ocaalive\.com', r'(?:www\.)?pegasussports\.tv', r'(?:www\.)?praxisseries\.ca', r'(?:www\.)?silenticetv\.com', r'(?:www\.)?tuffhedemantv\.com', r'(?:www\.)?watchfuntv\.com', r'live\.ofsaa\.on\.ca', r'tv\.procoro\.ca', r'tv\.realcastmedia\.net', r'tv\.fringetheatre\.ca', r'video\.haisla\.ca', r'video\.hockeycanada\.ca', r'video\.huuayaht\.org', r'video\.turningpointensemble\.ca', r'videos\.livingworks\.net', r'videos\.telusworldofscienceedmonton\.ca', r'watch\.binghamtonbulldogs\.com', r'watch\.rekindle\.tv', r'watch\.wpca\.com', ] _VALID_URL = rf'https?://(?:{"|".join(_DOMAINS_RE)})/[a-z]{{2}}(?:-[a-z]{{2}})?/c/[\w-]+\.(?P<id>\d+)' _TESTS = [{ 'url': 'https://video.hockeycanada.ca/en/c/nwt-micd-up-with-jamie-lee-rattray.107486', 'only_matching': True, }, { # m3u8 + https 'url': 'https://video.hockeycanada.ca/en-us/c/nwt-micd-up-with-jamie-lee-rattray.107486', 'info_dict': { 'id': '107486', 'title': 'NWT: Mic’d up with Jamie Lee Rattray', 'ext': 'mp4', 'duration': 115, 'timestamp': 1634310409, 'upload_date': '20211015', 'tags': ['English', '2021', "National Women's Team"], 'description': 'md5:efb1cf6165b48cc3f5555c4262dd5b23', 'thumbnail': r're:^https?://wpmedia01-a\.akamaihd\.net/en/asset/public/image/.+', }, 'params': {'skip_download': True}, }, { 'url': 'https://video.hockeycanada.ca/en/c/mwc-remembering-the-wild-ride-in-riga.112307', 'info_dict': { 'id': '112307', 'title': 'MWC: Remembering the wild ride in Riga', 'ext': 'mp4', 'duration': 322, 'timestamp': 1716235607, 'upload_date': '20240520', 'tags': ['English', '2024', "National Men's Team", 'IIHF World Championship', 'Fan'], 'description': r're:.+Canada’s National Men’s Team.+', 'thumbnail': r're:^https?://wpmedia01-a\.akamaihd\.net/en/asset/public/image/.+', }, 'params': {'skip_download': True}, }, { # the same video in French 'url': 'https://video.hockeycanada.ca/fr/c/cmm-retour-sur-un-parcours-endiable-a-riga.112304', 'info_dict': { 'id': '112304', 'title': 'CMM : Retour sur un parcours endiablé à Riga', 'ext': 'mp4', 'duration': 322, 'timestamp': 1716235545, 'upload_date': '20240520', 'tags': ['French', '2024', "National Men's Team", 'IIHF World Championship', 'Fan'], 'description': 'md5:cf825222882a3dab1cd62cffcf3b4d1f', 'thumbnail': r're:^https?://wpmedia01-a\.akamaihd\.net/en/asset/public/image/.+', }, 'params': {'skip_download': True}, }, { 'url': 'https://myfbcgreenville.vidflex.tv/en/c/may-12th-2024.658', 'only_matching': True, }, { 'url': 'https://www.figureitoutbaseball.com/en/c/fiob-podcast-14-dan-bertolini-ncaa-d1-head-coach-recorded-11-29-2018.1367', 'only_matching': True, }, { 'url': 'https://videos.telusworldofscienceedmonton.ca/en/c/the-aurora-project-timelapse-4.577', 'only_matching': True, }, { 'url': 'https://www.tuffhedemantv.com/en/c/2022-tuff-hedeman-tour-hobbs-nm-january-22.227', 'only_matching': True, }, { 'url': 'https://www.albertalacrossetv.com/en/c/up-floor-ground-balls-one-more.3449', 'only_matching': True, }, { 'url': 'https://www.silenticetv.com/en/c/jp-unlocked-day-in-the-life-of-langley-ha-15u.5197', 'only_matching': True, }, { 'url': 'https://jphl.vidflex.tv/en/c/jp-unlocked-day-in-the-life-of-langley-ha-15u.5197', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data_url = self._html_search_regex( r'content_api:\s*(["\'])(?P<url>https?://(?:(?!\1).)+)\1', webpage, 'content api url', group='url') media_config = traverse_obj( self._download_json(data_url, video_id), ('config', {base64.b64decode}, {bytes.decode}, {json.loads}, {dict})) return { 'id': video_id, 'formats': list(self._yield_formats(media_config, video_id)), **self._search_json_ld( webpage.replace('/*<![CDATA[*/', '').replace('/*]]>*/', ''), video_id), } def _yield_formats(self, media_config, video_id): for media_source in traverse_obj(media_config, ('media', 'source', lambda _, v: url_or_none(v['src']))): media_url = media_source['src'] media_type = mimetype2ext(media_source.get('type')) if media_type == 'm3u8': yield from self._extract_m3u8_formats(media_url, video_id, fatal=False, m3u8_id='hls') elif media_type == 'mp4': bitrate = self._search_regex(r'_(\d+)k\.mp4', media_url, 'bitrate', default=None) yield { 'format_id': join_nonempty('http', bitrate), 'url': media_url, 'ext': 'mp4', 'tbr': int_or_none(bitrate), } else: yield { 'url': media_url, 'ext': media_type, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/screen9.py
yt_dlp/extractor/screen9.py
from .common import InfoExtractor from ..utils import traverse_obj class Screen9IE(InfoExtractor): _VALID_URL = r'https?://(?:\w+\.screen9\.(?:tv|com)|play\.su\.se)/(?:embed|media)/(?P<id>[^?#/]+)' _TESTS = [ { 'url': 'https://api.screen9.com/embed/8kTNEjvoXGM33dmWwF0uDA', 'md5': 'd60d23f8980583b930724b01fa6ddb41', 'info_dict': { 'id': '8kTNEjvoXGM33dmWwF0uDA', 'ext': 'mp4', 'title': 'Östersjön i förändrat klimat', 'thumbnail': r're:^https?://.+\.jpg', }, }, { 'url': 'https://folkhogskolekanalen.screen9.tv/media/gy35PKLHe-5K29RYHga2bw/ett-starkare-samhalle-en-snabbguide-om-sveriges-folkhogskolor', 'md5': 'c9389806e78573ea34fc48b6f94465dc', 'info_dict': { 'id': 'gy35PKLHe-5K29RYHga2bw', 'ext': 'mp4', 'title': 'Ett starkare samhälle - en snabbguide om Sveriges folkhögskolor', 'thumbnail': r're:^https?://.+\.jpg', }, }, { 'url': 'https://play.su.se/media/H1YA0EYNCxiesrSU1kaRBQ/baltic-breakfast', 'md5': '2b817647c3058002526269deff4c0683', 'info_dict': { 'id': 'H1YA0EYNCxiesrSU1kaRBQ', 'ext': 'mp4', 'title': 'Baltic Breakfast', 'thumbnail': r're:^https?://.+\.jpg', }, }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(f'https://api.screen9.com/embed/{video_id}', video_id) config = self._search_json(r'var\s+config\s*=', webpage, 'config', video_id) formats, subtitles = self._extract_m3u8_formats_and_subtitles( traverse_obj(config, ('src', lambda _, v: v['type'] == 'application/x-mpegURL', 'src'), get_all=False), video_id, ext='mp4') formats.append({ 'url': traverse_obj(config, ('src', lambda _, v: v['type'] == 'video/mp4', 'src'), get_all=False), 'format': 'mp4', }) return { 'id': video_id, 'title': traverse_obj( config, ('plugins', (('title', 'title'), ('googleAnalytics', 'title'), ('share', 'mediaTitle'))), get_all=False), 'description': traverse_obj(config, ('plugins', 'title', 'description')), 'thumbnail': traverse_obj(config, ('poster')), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nova.py
yt_dlp/extractor/nova.py
import re from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, int_or_none, js_to_json, traverse_obj, unified_strdate, url_or_none, ) class NovaEmbedIE(InfoExtractor): _VALID_URL = r'https?://media(?:tn)?\.cms\.nova\.cz/embed/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://media.cms.nova.cz/embed/8o0n0r?autoplay=1', 'info_dict': { 'id': '8o0n0r', 'title': '2180. díl', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 2578, }, 'params': { 'skip_download': True, 'ignore_no_formats_error': True, }, 'expected_warnings': ['DRM protected', 'Requested format is not available'], }, { 'url': 'https://media.cms.nova.cz/embed/KybpWYvcgOa', 'info_dict': { 'id': 'KybpWYvcgOa', 'ext': 'mp4', 'title': 'Borhyová oslavila 60? Soutěžící z pořadu odboural moderátora Ondřeje Sokola', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 114, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://mediatn.cms.nova.cz/embed/EU5ELEsmOHt?autoplay=1', 'info_dict': { 'id': 'EU5ELEsmOHt', 'ext': 'mp4', 'title': 'Haptické křeslo, bionická ruka nebo roboti. Reportérka se podívala na Týden inovací', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 1780, }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) has_drm = False duration = None formats = [] def process_format_list(format_list, format_id=''): nonlocal formats, has_drm if not isinstance(format_list, list): format_list = [format_list] for format_dict in format_list: if not isinstance(format_dict, dict): continue if (not self.get_param('allow_unplayable_formats') and traverse_obj(format_dict, ('drm', 'keySystem'))): has_drm = True continue format_url = url_or_none(format_dict.get('src')) format_type = format_dict.get('type') ext = determine_ext(format_url) if (format_type == 'application/x-mpegURL' or format_id == 'HLS' or ext == 'm3u8'): formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif (format_type == 'application/dash+xml' or format_id == 'DASH' or ext == 'mpd'): formats.extend(self._extract_mpd_formats( format_url, video_id, mpd_id='dash', fatal=False)) else: formats.append({ 'url': format_url, }) player = self._search_json( r'player:', webpage, 'player', video_id, fatal=False, end_pattern=r';\s*</script>') if player: for src in traverse_obj(player, ('lib', 'source', 'sources', ...)): process_format_list(src) duration = traverse_obj(player, ('sourceInfo', 'duration', {int_or_none})) if not formats and not has_drm: # older code path, in use before August 2023 player = self._parse_json( self._search_regex( (r'(?:(?:replacePlaceholders|processAdTagModifier).*?:\s*)?(?:replacePlaceholders|processAdTagModifier)\s*\(\s*(?P<json>{.*?})\s*\)(?:\s*\))?\s*,', r'Player\.init\s*\([^,]+,(?P<cndn>\s*\w+\s*\?)?\s*(?P<json>{(?(cndn).+?|.+)})\s*(?(cndn):|,\s*{.+?}\s*\)\s*;)'), webpage, 'player', group='json'), video_id) if player: for format_id, format_list in player['tracks'].items(): process_format_list(format_list, format_id) duration = int_or_none(player.get('duration')) if not formats and has_drm: self.report_drm(video_id) title = self._og_search_title( webpage, default=None) or self._search_regex( (r'<value>(?P<title>[^<]+)', r'videoTitle\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1'), webpage, 'title', group='value') thumbnail = self._og_search_thumbnail( webpage, default=None) or self._search_regex( r'poster\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'thumbnail', fatal=False, group='value') duration = int_or_none(self._search_regex( r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=duration)) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, } class NovaIE(InfoExtractor): IE_DESC = 'TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz' _VALID_URL = r'https?://(?:[^.]+\.)?(?P<site>tv(?:noviny)?|tn|novaplus|vymena|fanda|krasna|doma|prask)\.nova\.cz/(?:[^/]+/)+(?P<id>[^/]+?)(?:\.html|/|$)' _TESTS = [{ 'url': 'http://tn.nova.cz/clanek/tajemstvi-ukryte-v-podzemi-specialni-nemocnice-v-prazske-krci.html#player_13260', 'md5': 'da8f3f1fcdaf9fb0f112a32a165760a3', 'info_dict': { 'id': '8OvQqEvV3MW', 'display_id': '8OvQqEvV3MW', 'ext': 'mp4', 'title': 'Podzemní nemocnice v pražské Krči', 'description': 'md5:f0a42dd239c26f61c28f19e62d20ef53', 'thumbnail': r're:^https?://.*\.(?:jpg)', 'duration': 151, }, }, { 'url': 'http://fanda.nova.cz/clanek/fun-and-games/krvavy-epos-zaklinac-3-divoky-hon-vychazi-vyhrajte-ho-pro-sebe.html', 'info_dict': { 'id': '1753621', 'ext': 'mp4', 'title': 'Zaklínač 3: Divoký hon', 'description': 're:.*Pokud se stejně jako my nemůžete.*', 'thumbnail': r're:https?://.*\.jpg(\?.*)?', 'upload_date': '20150521', }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'gone', }, { # media.cms.nova.cz embed 'url': 'https://novaplus.nova.cz/porad/ulice/epizoda/18760-2180-dil', 'info_dict': { 'id': '8o0n0r', 'ext': 'mp4', 'title': '2180. díl', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 2578, }, 'params': { 'skip_download': True, }, 'add_ie': [NovaEmbedIE.ie_key()], 'skip': 'CHYBA 404: STRÁNKA NENALEZENA', }, { 'url': 'http://sport.tn.nova.cz/clanek/sport/hokej/nhl/zivot-jde-dal-hodnotil-po-vyrazeni-z-playoff-jiri-sekac.html', 'only_matching': True, }, { 'url': 'http://fanda.nova.cz/clanek/fun-and-games/krvavy-epos-zaklinac-3-divoky-hon-vychazi-vyhrajte-ho-pro-sebe.html', 'only_matching': True, }, { 'url': 'http://doma.nova.cz/clanek/zdravi/prijdte-se-zapsat-do-registru-kostni-drene-jiz-ve-stredu-3-cervna.html', 'only_matching': True, }, { 'url': 'http://prask.nova.cz/clanek/novinky/co-si-na-sobe-nase-hvezdy-nechaly-pojistit.html', 'only_matching': True, }, { 'url': 'http://tv.nova.cz/clanek/novinky/zivot-je-zivot-bondovsky-trailer.html', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) display_id = mobj.group('id') site = mobj.group('site') webpage = self._download_webpage(url, display_id) description = clean_html(self._og_search_description(webpage, default=None)) if site == 'novaplus': upload_date = unified_strdate(self._search_regex( r'(\d{1,2}-\d{1,2}-\d{4})$', display_id, 'upload date', default=None)) elif site == 'fanda': upload_date = unified_strdate(self._search_regex( r'<span class="date_time">(\d{1,2}\.\d{1,2}\.\d{4})', webpage, 'upload date', default=None)) else: upload_date = None # novaplus embed_id = self._search_regex( r'<iframe[^>]+\bsrc=["\'](?:https?:)?//media(?:tn)?\.cms\.nova\.cz/embed/([^/?#&"\']+)', webpage, 'embed url', default=None) if embed_id: return { '_type': 'url_transparent', 'url': f'https://media.cms.nova.cz/embed/{embed_id}', 'ie_key': NovaEmbedIE.ie_key(), 'id': embed_id, 'description': description, 'upload_date': upload_date, } video_id = self._search_regex( [r"(?:media|video_id)\s*:\s*'(\d+)'", r'media=(\d+)', r'id="article_video_(\d+)"', r'id="player_(\d+)"'], webpage, 'video id') config_url = self._search_regex( r'src="(https?://(?:tn|api)\.nova\.cz/bin/player/videojs/config\.php\?[^"]+)"', webpage, 'config url', default=None) config_params = {} if not config_url: player = self._parse_json( self._search_regex( r'(?s)Player\s*\(.+?\s*,\s*({.+?\bmedia\b["\']?\s*:\s*["\']?\d+.+?})\s*\)', webpage, 'player', default='{}'), video_id, transform_source=js_to_json, fatal=False) if player: config_url = url_or_none(player.get('configUrl')) params = player.get('configParams') if isinstance(params, dict): config_params = params if not config_url: DEFAULT_SITE_ID = '23000' SITES = { 'tvnoviny': DEFAULT_SITE_ID, 'novaplus': DEFAULT_SITE_ID, 'vymena': DEFAULT_SITE_ID, 'krasna': DEFAULT_SITE_ID, 'fanda': '30', 'tn': '30', 'doma': '30', } site_id = self._search_regex( r'site=(\d+)', webpage, 'site id', default=None) or SITES.get( site, DEFAULT_SITE_ID) config_url = 'https://api.nova.cz/bin/player/videojs/config.php' config_params = { 'site': site_id, 'media': video_id, 'quality': 3, 'version': 1, } config = self._download_json( config_url, display_id, 'Downloading config JSON', query=config_params, transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1]) mediafile = config['mediafile'] video_url = mediafile['src'] m = re.search(r'^(?P<url>rtmpe?://[^/]+/(?P<app>[^/]+?))/&*(?P<playpath>.+)$', video_url) if m: formats = [{ 'url': m.group('url'), 'app': m.group('app'), 'play_path': m.group('playpath'), 'player_path': 'http://tvnoviny.nova.cz/static/shared/app/videojs/video-js.swf', 'ext': 'flv', }] else: formats = [{ 'url': video_url, }] title = mediafile.get('meta', {}).get('title') or self._og_search_title(webpage) thumbnail = config.get('poster') return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'upload_date': upload_date, 'thumbnail': thumbnail, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/coub.py
yt_dlp/extractor/coub.py
from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, parse_iso8601, qualities, ) class CoubIE(InfoExtractor): _VALID_URL = r'(?:coub:|https?://(?:coub\.com/(?:view|embed|coubs)/|c-cdn\.coub\.com/fb-player\.swf\?.*\bcoub(?:ID|id)=))(?P<id>[\da-z]+)' _TESTS = [{ 'url': 'http://coub.com/view/5u5n1', 'info_dict': { 'id': '5u5n1', 'ext': 'mp4', 'title': 'The Matrix Moonwalk', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 4.6, 'timestamp': 1428527772, 'upload_date': '20150408', 'uploader': 'Artyom Loskutnikov', 'uploader_id': 'artyom.loskutnikov', 'view_count': int, 'like_count': int, 'repost_count': int, 'age_limit': 0, }, }, { 'url': 'http://c-cdn.coub.com/fb-player.swf?bot_type=vk&coubID=7w5a4', 'only_matching': True, }, { 'url': 'coub:5u5n1', 'only_matching': True, }, { # longer video id 'url': 'http://coub.com/view/237d5l5h', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) coub = self._download_json( f'http://coub.com/api/v2/coubs/{video_id}.json', video_id) if coub.get('error'): raise ExtractorError( '{} said: {}'.format(self.IE_NAME, coub['error']), expected=True) title = coub['title'] file_versions = coub['file_versions'] QUALITIES = ('low', 'med', 'high', 'higher') MOBILE = 'mobile' IPHONE = 'iphone' HTML5 = 'html5' SOURCE_PREFERENCE = (MOBILE, IPHONE, HTML5) quality_key = qualities(QUALITIES) preference_key = qualities(SOURCE_PREFERENCE) formats = [] for kind, items in file_versions.get(HTML5, {}).items(): if kind not in ('video', 'audio'): continue if not isinstance(items, dict): continue for quality, item in items.items(): if not isinstance(item, dict): continue item_url = item.get('url') if not item_url: continue formats.append({ 'url': item_url, 'format_id': f'{HTML5}-{kind}-{quality}', 'filesize': int_or_none(item.get('size')), 'vcodec': 'none' if kind == 'audio' else None, 'acodec': 'none' if kind == 'video' else None, 'quality': quality_key(quality), 'source_preference': preference_key(HTML5), }) iphone_url = file_versions.get(IPHONE, {}).get('url') if iphone_url: formats.append({ 'url': iphone_url, 'format_id': IPHONE, 'source_preference': preference_key(IPHONE), }) mobile_url = file_versions.get(MOBILE, {}).get('audio_url') if mobile_url: formats.append({ 'url': mobile_url, 'format_id': f'{MOBILE}-audio', 'source_preference': preference_key(MOBILE), }) thumbnail = coub.get('picture') duration = float_or_none(coub.get('duration')) timestamp = parse_iso8601(coub.get('published_at') or coub.get('created_at')) uploader = coub.get('channel', {}).get('title') uploader_id = coub.get('channel', {}).get('permalink') view_count = int_or_none(coub.get('views_count') or coub.get('views_increase_count')) like_count = int_or_none(coub.get('likes_count')) repost_count = int_or_none(coub.get('recoubs_count')) age_restricted = coub.get('age_restricted', coub.get('age_restricted_by_admin')) if age_restricted is not None: age_limit = 18 if age_restricted is True else 0 else: age_limit = None return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'timestamp': timestamp, 'uploader': uploader, 'uploader_id': uploader_id, 'view_count': view_count, 'like_count': like_count, 'repost_count': repost_count, 'age_limit': age_limit, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/epoch.py
yt_dlp/extractor/epoch.py
from .common import InfoExtractor from ..utils import extract_attributes, get_element_html_by_id class EpochIE(InfoExtractor): _VALID_URL = r'https?://www.theepochtimes\.com/[\w-]+_(?P<id>\d+).html' _TESTS = [ { 'url': 'https://www.theepochtimes.com/they-can-do-audio-video-physical-surveillance-on-you-24h-365d-a-year-rex-lee-on-intrusive-apps_4661688.html', 'info_dict': { 'id': 'a3dd732c-4750-4bc8-8156-69180668bda1', 'ext': 'mp4', 'title': '‘They Can Do Audio, Video, Physical Surveillance on You 24H/365D a Year’: Rex Lee on Intrusive Apps', }, }, { 'url': 'https://www.theepochtimes.com/the-communist-partys-cyberattacks-on-america-explained-rex-lee-talks-tech-hybrid-warfare_4342413.html', 'info_dict': { 'id': '276c7f46-3bbf-475d-9934-b9bbe827cf0a', 'ext': 'mp4', 'title': 'The Communist Party’s Cyberattacks on America Explained; Rex Lee Talks Tech Hybrid Warfare', }, }, { 'url': 'https://www.theepochtimes.com/kash-patel-a-6-year-saga-of-government-corruption-from-russiagate-to-mar-a-lago_4690250.html', 'info_dict': { 'id': 'aa9ceecd-a127-453d-a2de-7153d6fd69b6', 'ext': 'mp4', 'title': 'Kash Patel: A ‘6-Year-Saga’ of Government Corruption, From Russiagate to Mar-a-Lago', }, }, { 'url': 'https://www.theepochtimes.com/dick-morris-discusses-his-book-the-return-trumps-big-2024-comeback_4819205.html', 'info_dict': { 'id': '9489f994-2a20-4812-b233-ac0e5c345632', 'ext': 'mp4', 'title': 'Dick Morris Discusses His Book ‘The Return: Trump’s Big 2024 Comeback’', }, }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) youmaker_video_id = extract_attributes(get_element_html_by_id('videobox', webpage))['data-id'] formats, subtitles = self._extract_m3u8_formats_and_subtitles( f'http://vs1.youmaker.com/assets/{youmaker_video_id}/playlist.m3u8', video_id, 'mp4', m3u8_id='hls') return { 'id': youmaker_video_id, 'formats': formats, 'subtitles': subtitles, 'title': self._html_extract_title(webpage), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ntvcojp.py
yt_dlp/extractor/ntvcojp.py
from .streaks import StreaksBaseIE from ..utils import ( int_or_none, parse_iso8601, str_or_none, url_or_none, ) from ..utils.traversal import require, traverse_obj class NTVCoJpCUIE(StreaksBaseIE): IE_NAME = 'cu.ntv.co.jp' IE_DESC = '日テレ無料TADA!' _VALID_URL = r'https?://cu\.ntv\.co\.jp/(?!program-list|search)(?P<id>[\w-]+)/?(?:[?#]|$)' _TESTS = [{ 'url': 'https://cu.ntv.co.jp/gaki_20250525/', 'info_dict': { 'id': 'gaki_20250525', 'ext': 'mp4', 'title': '放送開始36年!方正ココリコが選ぶ神回&地獄回!', 'cast': 'count:2', 'description': 'md5:1e1db556224d627d4d2f74370c650927', 'display_id': 'ref:gaki_20250525', 'duration': 1450, 'episode': '放送開始36年!方正ココリコが選ぶ神回&地獄回!', 'episode_id': '000000010172808', 'episode_number': 255, 'genres': ['variety'], 'live_status': 'not_live', 'modified_date': '20250525', 'modified_timestamp': 1748145537, 'release_date': '20250525', 'release_timestamp': 1748145539, 'series': 'ダウンタウンのガキの使いやあらへんで!', 'series_id': 'gaki', 'thumbnail': r're:https?://.+\.jpg', 'timestamp': 1748145197, 'upload_date': '20250525', 'uploader': '日本テレビ放送網', 'uploader_id': '0x7FE2', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) info = self._search_json( r'window\.app\s*=', webpage, 'video info', display_id)['falcorCache']['catalog']['episode'][display_id]['value'] media_id = traverse_obj(info, ( 'streaks_data', 'mediaid', {str_or_none}, {require('Streaks media ID')})) non_phonetic = (lambda _, v: v['is_phonetic'] is False, 'value', {str}) return { **self._extract_from_streaks_api('ntv-tada', media_id, headers={ 'X-Streaks-Api-Key': 'df497719056b44059a0483b8faad1f4a', }), **traverse_obj(info, { 'id': ('content_id', {str_or_none}), 'title': ('title', *non_phonetic, any), 'age_limit': ('is_adult_only_content', {lambda x: 18 if x else None}), 'cast': ('credit', ..., 'name', *non_phonetic), 'genres': ('genre', ..., {str}), 'release_timestamp': ('pub_date', {parse_iso8601}), 'tags': ('tags', ..., {str}), 'thumbnail': ('artwork', ..., 'url', any, {url_or_none}), }), **traverse_obj(info, ('tv_episode_info', { 'duration': ('duration', {int_or_none}), 'episode_number': ('episode_number', {int}), 'series': ('parent_show_title', *non_phonetic, any), 'series_id': ('show_content_id', {str}), })), **traverse_obj(info, ('custom_data', { 'description': ('program_detail', {str}), 'episode': ('episode_title', {str}), 'episode_id': ('episode_id', {str_or_none}), 'uploader': ('network_name', {str}), 'uploader_id': ('network_id', {str}), })), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lci.py
yt_dlp/extractor/lci.py
from .common import InfoExtractor from .wat import WatIE from ..utils import ExtractorError, int_or_none from ..utils.traversal import traverse_obj class LCIIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:lci|tf1info)\.fr/(?:[^/?#]+/)+[\w-]+-(?P<id>\d+)\.html' _TESTS = [{ 'url': 'https://www.tf1info.fr/replay-lci/videos/video-24h-pujadas-du-vendredi-24-mai-6708-2300831.html', 'info_dict': { 'id': '14113788', 'ext': 'mp4', 'title': '24H Pujadas du vendredi 24 mai 2024', 'thumbnail': 'https://photos.tf1.fr/1280/720/24h-pujadas-du-24-mai-2024-55bf2d-0@1x.jpg', 'upload_date': '20240524', 'duration': 6158, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.tf1info.fr/politique/election-presidentielle-2022-second-tour-j-2-marine-le-pen-et-emmanuel-macron-en-interview-de-lci-vendredi-soir-2217486.html', 'info_dict': { 'id': '13875948', 'ext': 'mp4', 'title': 'md5:660df5481fd418bc3bbb0d070e6fdb5a', 'thumbnail': 'https://photos.tf1.fr/1280/720/presidentielle-2022-marine-le-pen-et-emmanuel-macron-invites-de-lci-ce-vendredi-9c0e73-e1a036-0@1x.jpg', 'upload_date': '20220422', 'duration': 33, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.lci.fr/politique/election-presidentielle-2022-second-tour-j-2-marine-le-pen-et-emmanuel-macron-en-interview-de-lci-vendredi-soir-2217486.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) next_data = self._search_nextjs_data(webpage, video_id) wat_id = traverse_obj(next_data, ( 'props', 'pageProps', 'page', 'tms', 'videos', {dict.keys}, ..., {int_or_none}, any)) if wat_id is None: raise ExtractorError('Could not find wat_id') return self.url_result(f'wat:{wat_id}', WatIE, str(wat_id))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gronkh.py
yt_dlp/extractor/gronkh.py
import functools from .common import InfoExtractor from ..utils import ( OnDemandPagedList, float_or_none, traverse_obj, unified_strdate, ) class GronkhIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?gronkh\.tv/(?:watch/)?streams?/(?P<id>\d+)' _TESTS = [{ 'url': 'https://gronkh.tv/streams/657', 'info_dict': { 'id': '657', 'ext': 'mp4', 'title': 'H.O.R.D.E. - DAS ZWEiTE ZEiTALTER 🎲 Session 1', 'view_count': int, 'thumbnail': 'https://01.cdn.vod.farm/preview/9e2555d3a23bf4e5c5b7c6b3b70a9d84.jpg', 'upload_date': '20221111', 'chapters': 'count:3', 'duration': 31463, }, 'params': {'skip_download': True}, }, { 'url': 'https://gronkh.tv/stream/536', 'info_dict': { 'id': '536', 'ext': 'mp4', 'title': 'GTV0536, 2021-10-01 - MARTHA IS DEAD #FREiAB1830 !FF7 !horde !archiv', 'view_count': int, 'thumbnail': 'https://01.cdn.vod.farm/preview/6436746cce14e25f751260a692872b9b.jpg', 'upload_date': '20211001', 'duration': 32058, }, 'params': {'skip_download': True}, }, { 'url': 'https://gronkh.tv/watch/stream/546', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) data_json = self._download_json(f'https://api.gronkh.tv/v1/video/info?episode={video_id}', video_id) m3u8_url = self._download_json(f'https://api.gronkh.tv/v1/video/playlist?episode={video_id}', video_id)['playlist_url'] formats, subtitles = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id) if data_json.get('vtt_url'): subtitles.setdefault('en', []).append({ 'url': data_json['vtt_url'], 'ext': 'vtt', }) return { 'id': video_id, 'title': data_json.get('title'), 'view_count': data_json.get('views'), 'thumbnail': data_json.get('preview_url'), 'upload_date': unified_strdate(data_json.get('created_at')), 'formats': formats, 'subtitles': subtitles, 'duration': float_or_none(data_json.get('source_length')), 'chapters': traverse_obj(data_json, ( 'chapters', lambda _, v: float_or_none(v['offset']) is not None, { 'title': 'title', 'start_time': ('offset', {float_or_none}), })) or None, } class GronkhFeedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?gronkh\.tv(?:/feed)?/?(?:#|$)' IE_NAME = 'gronkh:feed' _TESTS = [{ 'url': 'https://gronkh.tv/feed', 'info_dict': { 'id': 'feed', }, 'playlist_count': 16, }, { 'url': 'https://gronkh.tv', 'only_matching': True, }] def _entries(self): for type_ in ('recent', 'views'): info = self._download_json( f'https://api.gronkh.tv/v1/video/discovery/{type_}', 'feed', note=f'Downloading {type_} API JSON') for item in traverse_obj(info, ('discovery', ...)) or []: yield self.url_result(f'https://gronkh.tv/watch/stream/{item["episode"]}', GronkhIE, item.get('title')) def _real_extract(self, url): return self.playlist_result(self._entries(), 'feed') class GronkhVodsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?gronkh\.tv/vods/streams/?(?:#|$)' IE_NAME = 'gronkh:vods' _TESTS = [{ 'url': 'https://gronkh.tv/vods/streams', 'info_dict': { 'id': 'vods', }, 'playlist_mincount': 150, }] _PER_PAGE = 25 def _fetch_page(self, page): items = traverse_obj(self._download_json( 'https://api.gronkh.tv/v1/search', 'vods', query={'offset': self._PER_PAGE * page, 'first': self._PER_PAGE}, note=f'Downloading stream video page {page + 1}'), ('results', 'videos', ...)) for item in items or []: yield self.url_result(f'https://gronkh.tv/watch/stream/{item["episode"]}', GronkhIE, item['episode'], item.get('title')) def _real_extract(self, url): entries = OnDemandPagedList(functools.partial(self._fetch_page), self._PER_PAGE) return self.playlist_result(entries, 'vods')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cineverse.py
yt_dlp/extractor/cineverse.py
import re from .common import InfoExtractor from ..utils import ( filter_dict, float_or_none, int_or_none, parse_age_limit, smuggle_url, traverse_obj, unsmuggle_url, url_or_none, ) class CineverseBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://www\.(?P<host>{})'.format('|'.join(map(re.escape, ( 'cineverse.com', 'asiancrush.com', 'dovechannel.com', 'screambox.com', 'midnightpulp.com', 'fandor.com', 'retrocrush.tv', )))) class CineverseIE(CineverseBaseIE): _VALID_URL = rf'{CineverseBaseIE._VALID_URL_BASE}/watch/(?P<id>[A-Z0-9]+)' _TESTS = [{ 'url': 'https://www.asiancrush.com/watch/DMR00018919/Women-Who-Flirt', 'skip': 'geo-blocked', 'info_dict': { 'title': 'Women Who Flirt', 'ext': 'mp4', 'id': 'DMR00018919', 'modified_timestamp': 1678744575289, 'cast': ['Xun Zhou', 'Xiaoming Huang', 'Yi-Lin Sie', 'Sonia Sui', 'Quniciren'], 'duration': 5811.597, 'description': 'md5:892fd62a05611d394141e8394ace0bc6', 'age_limit': 13, }, }, { 'url': 'https://www.retrocrush.tv/watch/1000000023016/Archenemy! Crystal Bowie', 'skip': 'geo-blocked', 'info_dict': { 'title': 'Archenemy! Crystal Bowie', 'ext': 'mp4', 'id': '1000000023016', 'episode_number': 3, 'season_number': 1, 'cast': ['Nachi Nozawa', 'Yoshiko Sakakibara', 'Toshiko Fujita'], 'age_limit': 0, 'episode': 'Episode 3', 'season': 'Season 1', 'duration': 1485.067, 'description': 'Cobra meets a beautiful bounty hunter by the name of Jane Royal.', 'series': 'Space Adventure COBRA (Original Japanese)', }, }] def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, default={}) self._initialize_geo_bypass({ 'countries': smuggled_data.get('geo_countries'), }) video_id = self._match_id(url) html = self._download_webpage(url, video_id) idetails = self._search_nextjs_data(html, video_id)['props']['pageProps']['idetails'] err_code = idetails.get('err_code') if err_code == 1002: self.raise_login_required() elif err_code == 1200: self.raise_geo_restricted( 'This video is not available from your location due to geo restriction. ' 'You may be able to bypass it by using the /details/ page instead of the /watch/ page', countries=smuggled_data.get('geo_countries')) return { 'subtitles': filter_dict({ 'en': traverse_obj(idetails, (('cc_url_vtt', 'subtitle_url'), {'url': {url_or_none}})) or None, }), 'formats': self._extract_m3u8_formats(idetails['url'], video_id), **traverse_obj(idetails, { 'title': 'title', 'id': ('details', 'item_id'), 'description': ('details', 'description'), 'duration': ('duration', {float_or_none(scale=1000)}), 'cast': ('details', 'cast', {lambda x: x.split(', ')}), 'modified_timestamp': ('details', 'updated_by', 0, 'update_time', 'time', {int_or_none}), 'season_number': ('details', 'season', {int_or_none}), 'episode_number': ('details', 'episode', {int_or_none}), 'age_limit': ('details', 'rating_code', {parse_age_limit}), 'series': ('details', 'series_details', 'title'), }), } class CineverseDetailsIE(CineverseBaseIE): _VALID_URL = rf'{CineverseBaseIE._VALID_URL_BASE}/details/(?P<id>[A-Z0-9]+)' _TESTS = [{ 'url': 'https://www.retrocrush.tv/details/1000000023012/Space-Adventure-COBRA-(Original-Japanese)', 'playlist_mincount': 30, 'info_dict': { 'title': 'Space Adventure COBRA (Original Japanese)', 'id': '1000000023012', }, }, { 'url': 'https://www.asiancrush.com/details/NNVG4938/Hansel-and-Gretel', 'info_dict': { 'id': 'NNVG4938', 'ext': 'mp4', 'title': 'Hansel and Gretel', 'description': 'md5:e3e4c35309c2e82aee044f972c2fb05d', 'cast': ['Jeong-myeong Cheon', 'Eun Won-jae', 'Shim Eun-gyeong', 'Ji-hee Jin', 'Hee-soon Park', 'Lydia Park', 'Kyeong-ik Kim'], 'duration': 7030.732, }, }] def _real_extract(self, url): host, series_id = self._match_valid_url(url).group('host', 'id') html = self._download_webpage(url, series_id) pageprops = self._search_nextjs_data(html, series_id)['props']['pageProps'] geo_countries = traverse_obj(pageprops, ('itemDetailsData', 'geo_country', {lambda x: x.split(', ')})) geoblocked = traverse_obj(pageprops, ( 'itemDetailsData', 'playback_err_msg')) == 'This title is not available in your location.' def item_result(item): item_url = f'https://www.{host}/watch/{item["item_id"]}/{item["title"]}' if geoblocked: item_url = smuggle_url(item_url, {'geo_countries': geo_countries}) return self.url_result(item_url, CineverseIE) season = traverse_obj(pageprops, ('seasonEpisodes', ..., 'episodes', lambda _, v: v['item_id'] and v['title'])) if season: return self.playlist_result([item_result(ep) for ep in season], playlist_id=series_id, playlist_title=traverse_obj(pageprops, ('itemDetailsData', 'title'))) return item_result(pageprops['itemDetailsData'])
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/phoenix.py
yt_dlp/extractor/phoenix.py
from .youtube import YoutubeIE from .zdf import ZDFBaseIE from ..utils import ( int_or_none, merge_dicts, try_get, unified_timestamp, ) class PhoenixIE(ZDFBaseIE): IE_NAME = 'phoenix.de' _VALID_URL = r'https?://(?:www\.)?phoenix\.de/(?:[^/?#]+/)*[^/?#&]*-a-(?P<id>\d+)\.html' _TESTS = [{ 'url': 'https://www.phoenix.de/sendungen/dokumentationen/spitzbergen-a-893349.html', 'md5': 'a79e86d9774d0b3f2102aff988a0bd32', 'info_dict': { 'id': '221215_phx_spitzbergen', 'ext': 'mp4', 'title': 'Spitzbergen', 'description': 'Film von Tilmann Bünz', 'duration': 728.0, 'timestamp': 1555600500, 'upload_date': '20190418', 'uploader': 'Phoenix', 'thumbnail': 'https://www.phoenix.de/sixcms/media.php/21/Bergspitzen1.png', 'series': 'Dokumentationen', 'episode': 'Spitzbergen', }, }, { 'url': 'https://www.phoenix.de/entwicklungen-in-russland-a-2044720.html', 'only_matching': True, }, { # no media 'url': 'https://www.phoenix.de/sendungen/dokumentationen/mit-dem-jumbo-durch-die-nacht-a-89625.html', 'only_matching': True, }, { # Same as https://www.zdf.de/politik/phoenix-sendungen/die-gesten-der-maechtigen-100.html 'url': 'https://www.phoenix.de/sendungen/dokumentationen/gesten-der-maechtigen-i-a-89468.html?ref=suche', 'only_matching': True, }] def _real_extract(self, url): article_id = self._match_id(url) article = self._download_json( f'https://www.phoenix.de/response/id/{article_id}', article_id, 'Downloading article JSON') video = article['absaetze'][0] title = video.get('titel') or article.get('subtitel') if video.get('typ') == 'video-youtube': video_id = video['id'] return self.url_result( video_id, ie=YoutubeIE.ie_key(), video_id=video_id, video_title=title) video_id = str(video.get('basename') or video.get('content')) details = self._download_json( 'https://www.phoenix.de/php/mediaplayer/data/beitrags_details.php', video_id, 'Downloading details JSON', query={ 'ak': 'web', 'ptmd': 'true', 'id': video_id, 'profile': 'player2', }) title = title or details['title'] content_id = details['tracking']['nielsen']['content']['assetid'] info = self._extract_ptmd( f'https://tmd.phoenix.de/tmd/2/android_native_6/vod/ptmd/phoenix/{content_id}', content_id) duration = int_or_none(try_get( details, lambda x: x['tracking']['nielsen']['content']['length'])) timestamp = unified_timestamp(details.get('editorialDate')) series = try_get( details, lambda x: x['tracking']['nielsen']['content']['program'], str) episode = title if details.get('contentType') == 'episode' else None teaser_images = try_get(details, lambda x: x['teaserImageRef']['layouts'], dict) or {} thumbnails = self._extract_thumbnails(teaser_images) return merge_dicts(info, { 'id': content_id, 'title': title, 'description': details.get('leadParagraph'), 'duration': duration, 'thumbnails': thumbnails, 'timestamp': timestamp, 'uploader': details.get('tvService'), 'series': series, 'episode': episode, })
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tenplay.py
yt_dlp/extractor/tenplay.py
import base64 import datetime as dt import itertools import json import re import time from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, encode_data_uri, filter_dict, int_or_none, jwt_decode_hs256, url_or_none, urlencode_postdata, urljoin, ) from ..utils.traversal import traverse_obj class TenPlayIE(InfoExtractor): IE_NAME = '10play' _VALID_URL = r'https?://(?:www\.)?10(?:play)?\.com\.au/(?:[^/?#]+/)+(?P<id>tpv\d{6}[a-z]{5})' _NETRC_MACHINE = '10play' _TESTS = [{ # Geo-restricted to Australia 'url': 'https://10.com.au/australian-survivor/web-extras/season-10-brains-v-brawn-ii/myless-journey/tpv250414jdmtf', 'info_dict': { 'id': '7440980000013868', 'ext': 'mp4', 'title': 'Myles\'s Journey', 'alt_title': 'Myles\'s Journey', 'description': 'Relive Myles\'s epic Brains V Brawn II journey to reach the game\'s final two', 'uploader': 'Channel 10', 'uploader_id': '2199827728001', 'age_limit': 15, 'duration': 249, 'thumbnail': r're:https://.+/.+\.jpg', 'series': 'Australian Survivor', 'season': 'Season 10', 'season_number': 10, 'timestamp': 1744629420, 'upload_date': '20250414', }, 'params': {'skip_download': 'm3u8'}, }, { # Geo-restricted to Australia 'url': 'https://10.com.au/neighbours/episodes/season-42/episode-9107/tpv240902nzqyp', 'info_dict': { 'id': '9000000000091177', 'ext': 'mp4', 'title': 'Neighbours - S42 Ep. 9107', 'alt_title': 'Thu 05 Sep', 'description': 'md5:37a1f4271be34b9ee2b533426a5fbaef', 'duration': 1388, 'episode': 'Episode 9107', 'episode_number': 9107, 'season': 'Season 42', 'season_number': 42, 'series': 'Neighbours', 'thumbnail': r're:https://.+/.+\.jpg', 'age_limit': 15, 'timestamp': 1725517860, 'upload_date': '20240905', 'uploader': 'Channel 10', 'uploader_id': '2199827728001', }, 'params': {'skip_download': 'm3u8'}, }, { # Geo-restricted to Australia; upgrading the m3u8 quality fails and we need the fallback 'url': 'https://10.com.au/tiny-chef-show/episodes/season-1/episode-2/tpv240228pofvt', 'info_dict': { 'id': '9000000000084116', 'ext': 'mp4', 'uploader': 'Channel 10', 'uploader_id': '2199827728001', 'duration': 1297, 'title': 'The Tiny Chef Show - S1 Ep. 2', 'alt_title': 'S1 Ep. 2 - Popcorn/banana', 'description': 'md5:d4758b52b5375dfaa67a78261dcb5763', 'age_limit': 0, 'series': 'The Tiny Chef Show', 'season_number': 1, 'episode_number': 2, 'timestamp': 1747957740, 'thumbnail': r're:https://.+/.+\.jpg', 'upload_date': '20250522', 'season': 'Season 1', 'episode': 'Episode 2', }, 'params': {'skip_download': 'm3u8'}, 'expected_warnings': ['Failed to download m3u8 information: HTTP Error 502'], 'skip': 'video unavailable', }, { 'url': 'https://10play.com.au/how-to-stay-married/web-extras/season-1/terrys-talks-ep-1-embracing-change/tpv190915ylupc', 'only_matching': True, }] _GEO_BYPASS = False _GEO_COUNTRIES = ['AU'] _AUS_AGES = { 'G': 0, 'PG': 15, 'M': 15, 'MA': 15, 'MA15+': 15, 'R': 18, 'X': 18, } _TOKEN_CACHE_KEY = 'token_data' _SEGMENT_BITRATE_RE = r'(?m)-(?:300|150|75|55)0000-(\d+(?:-[\da-f]+)?)\.ts$' _refresh_token = None _access_token = None @staticmethod def _filter_ads_from_m3u8(m3u8_doc): out = [] for line in m3u8_doc.splitlines(): if line.startswith('https://redirector.googlevideo.com/'): out.pop() continue out.append(line) return '\n'.join(out) @staticmethod def _generate_xnetwork_ten_auth_token(): ts = dt.datetime.now(dt.timezone.utc).strftime('%Y%m%d%H%M%S') return base64.b64encode(ts.encode()).decode() @staticmethod def _is_jwt_expired(token): return jwt_decode_hs256(token)['exp'] - time.time() < 300 def _refresh_access_token(self): try: refresh_data = self._download_json( 'https://10.com.au/api/token/refresh', None, 'Refreshing access token', headers={ 'Content-Type': 'application/json', }, data=json.dumps({ 'accessToken': self._access_token, 'refreshToken': self._refresh_token, }).encode()) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 400: self._refresh_token = self._access_token = None self.cache.store(self._NETRC_MACHINE, self._TOKEN_CACHE_KEY, [None, None]) self.report_warning('Refresh token has been invalidated; retrying with credentials') self._perform_login(*self._get_login_info()) return raise self._access_token = refresh_data['accessToken'] self._refresh_token = refresh_data['refreshToken'] self.cache.store(self._NETRC_MACHINE, self._TOKEN_CACHE_KEY, [self._refresh_token, self._access_token]) def _perform_login(self, username, password): if not self._refresh_token: self._refresh_token, self._access_token = self.cache.load( self._NETRC_MACHINE, self._TOKEN_CACHE_KEY, default=[None, None]) if self._refresh_token and self._access_token: self.write_debug('Using cached refresh token') return try: auth_data = self._download_json( 'https://10.com.au/api/user/auth', None, 'Logging in', headers={ 'Content-Type': 'application/json', 'X-Network-Ten-Auth': self._generate_xnetwork_ten_auth_token(), 'Referer': 'https://10.com.au/', }, data=json.dumps({ 'email': username, 'password': password, }).encode()) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 400: raise ExtractorError('Invalid username/password', expected=True) raise self._refresh_token = auth_data['jwt']['refreshToken'] self._access_token = auth_data['jwt']['accessToken'] self.cache.store(self._NETRC_MACHINE, self._TOKEN_CACHE_KEY, [self._refresh_token, self._access_token]) def _call_playback_api(self, content_id): if self._access_token and self._is_jwt_expired(self._access_token): self._refresh_access_token() for is_retry in (False, True): try: return self._download_json_handle( f'https://10.com.au/api/v1/videos/playback/{content_id}/', content_id, note='Downloading video JSON', query={'platform': 'samsung'}, headers=filter_dict({ 'TP-AcceptFeature': 'v1/fw;v1/drm', 'Authorization': f'Bearer {self._access_token}' if self._access_token else None, })) except ExtractorError as e: if not is_retry and isinstance(e.cause, HTTPError) and e.cause.status == 403: if self._access_token: self.to_screen('Access token has expired; refreshing') self._refresh_access_token() continue elif not self._get_login_info()[0]: self.raise_login_required('Login required to access this video', method='password') raise def _real_extract(self, url): content_id = self._match_id(url) try: data = self._download_json(f'https://10.com.au/api/v1/videos/{content_id}', content_id) except ExtractorError as e: if ( isinstance(e.cause, HTTPError) and e.cause.status == 403 and 'Error 54113' in e.cause.response.read().decode() ): self.raise_geo_restricted(countries=self._GEO_COUNTRIES) raise video_data, urlh = self._call_playback_api(content_id) content_source_id = video_data['dai']['contentSourceId'] video_id = video_data['dai']['videoId'] auth_token = urlh.get_header('x-dai-auth') if not auth_token: raise ExtractorError('Failed to get DAI auth token') dai_data = self._download_json( f'https://pubads.g.doubleclick.net/ondemand/hls/content/{content_source_id}/vid/{video_id}/streams', content_id, note='Downloading DAI JSON', data=urlencode_postdata({'auth-token': auth_token})) # Ignore subs to avoid ad break cleanup formats, _ = self._extract_m3u8_formats_and_subtitles( dai_data['stream_manifest'], content_id, 'mp4') already_have_1080p = False for fmt in formats: m3u8_doc = self._download_webpage( fmt['url'], content_id, note='Downloading m3u8 information') m3u8_doc = self._filter_ads_from_m3u8(m3u8_doc) fmt['hls_media_playlist_data'] = m3u8_doc if fmt.get('height') == 1080: already_have_1080p = True # Attempt format upgrade if not already_have_1080p and m3u8_doc and re.search(self._SEGMENT_BITRATE_RE, m3u8_doc): m3u8_doc = re.sub(self._SEGMENT_BITRATE_RE, r'-5000000-\1.ts', m3u8_doc) m3u8_doc = re.sub(r'-(?:300|150|75|55)0000\.key"', r'-5000000.key"', m3u8_doc) formats.append({ 'format_id': 'upgrade-attempt-1080p', 'url': encode_data_uri(m3u8_doc.encode(), 'application/x-mpegurl'), 'hls_media_playlist_data': m3u8_doc, 'width': 1920, 'height': 1080, 'ext': 'mp4', 'protocol': 'm3u8_native', '__needs_testing': True, }) return { 'id': content_id, 'formats': formats, 'subtitles': {'en': [{'url': data['captionUrl']}]} if url_or_none(data.get('captionUrl')) else None, 'uploader': 'Channel 10', 'uploader_id': '2199827728001', **traverse_obj(data, { 'id': ('altId', {str}), 'duration': ('duration', {int_or_none}), 'title': ('subtitle', {str}), 'alt_title': ('title', {str}), 'description': ('description', {str}), 'age_limit': ('classification', {self._AUS_AGES.get}), 'series': ('tvShow', {str}), 'season_number': ('season', {int_or_none}), 'episode_number': ('episode', {int_or_none}), 'timestamp': ('published', {int_or_none}), 'thumbnail': ('imageUrl', {url_or_none}), }), } class TenPlaySeasonIE(InfoExtractor): IE_NAME = '10play:season' _VALID_URL = r'https?://(?:www\.)?10(?:play)?\.com\.au/(?P<show>[^/?#]+)/episodes/(?P<season>[^/?#]+)/?(?:$|[?#])' _TESTS = [{ 'url': 'https://10.com.au/masterchef/episodes/season-15', 'info_dict': { 'title': 'Season 15', 'id': 'MTQ2NjMxOQ==', }, 'playlist_mincount': 50, }, { 'url': 'https://10.com.au/the-bold-and-the-beautiful-fast-tracked/episodes/season-2024', 'info_dict': { 'title': 'Season 2024', 'id': 'Mjc0OTIw', }, 'playlist_mincount': 159, }, { 'url': 'https://10play.com.au/the-bold-and-the-beautiful-fast-tracked/episodes/season-2024', 'only_matching': True, }] def _entries(self, load_more_url, display_id=None): skip_ids = [] for page in itertools.count(1): episodes_carousel = self._download_json( load_more_url, display_id, query={'skipIds[]': skip_ids}, note=f'Fetching episodes page {page}') episodes_chunk = episodes_carousel['items'] skip_ids.extend(ep['id'] for ep in episodes_chunk) for ep in episodes_chunk: yield ep['cardLink'] if not episodes_carousel['hasMore']: break def _real_extract(self, url): show, season = self._match_valid_url(url).group('show', 'season') season_info = self._download_json( f'https://10.com.au/api/shows/{show}/episodes/{season}', f'{show}/{season}') episodes_carousel = traverse_obj(season_info, ( 'content', 0, 'components', ( lambda _, v: v['title'].lower() == 'episodes', (..., {dict}), )), get_all=False) or {} playlist_id = episodes_carousel['tpId'] return self.playlist_from_matches( self._entries(urljoin(url, episodes_carousel['loadMoreUrl']), playlist_id), playlist_id, traverse_obj(season_info, ('content', 0, 'title', {str})), getter=urljoin(url))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/yandexmusic.py
yt_dlp/extractor/yandexmusic.py
import hashlib import itertools from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, try_get, ) class YandexMusicBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://music\.yandex\.(?P<tld>ru|kz|ua|by|com)' @staticmethod def _handle_error(response): if isinstance(response, dict): error = response.get('error') if error: raise ExtractorError(error, expected=True) if response.get('type') == 'captcha' or 'captcha' in response: YandexMusicBaseIE._raise_captcha() @staticmethod def _raise_captcha(): raise ExtractorError( 'YandexMusic has considered yt-dlp requests automated and ' 'asks you to solve a CAPTCHA. You can either wait for some ' 'time until unblocked and optionally use --sleep-interval ' 'in future or alternatively you can go to https://music.yandex.ru/ ' 'solve CAPTCHA, then export cookies and pass cookie file to ' 'yt-dlp with --cookies', expected=True) def _download_webpage_handle(self, *args, **kwargs): webpage = super()._download_webpage_handle(*args, **kwargs) if 'Нам очень жаль, но&nbsp;запросы, поступившие с&nbsp;вашего IP-адреса, похожи на&nbsp;автоматические.' in webpage: self._raise_captcha() return webpage def _download_json(self, *args, **kwargs): response = super()._download_json(*args, **kwargs) self._handle_error(response) return response def _call_api(self, ep, tld, url, item_id, note, query): return self._download_json( f'https://music.yandex.{tld}/handlers/{ep}.jsx', item_id, note, fatal=False, headers={ 'Referer': url, 'X-Requested-With': 'XMLHttpRequest', 'X-Retpath-Y': url, }, query=query) class YandexMusicTrackIE(YandexMusicBaseIE): IE_NAME = 'yandexmusic:track' IE_DESC = 'Яндекс.Музыка - Трек' _VALID_URL = rf'{YandexMusicBaseIE._VALID_URL_BASE}/album/(?P<album_id>\d+)/track/(?P<id>\d+)' _TESTS = [{ 'url': 'http://music.yandex.ru/album/540508/track/4878838', 'md5': 'dec8b661f12027ceaba33318787fff76', 'info_dict': { 'id': '4878838', 'ext': 'mp3', 'title': 'md5:c63e19341fdbe84e43425a30bc777856', 'filesize': int, 'duration': 193.04, 'track': 'md5:210508c6ffdfd67a493a6c378f22c3ff', 'album': 'md5:cd04fb13c4efeafdfa0a6a6aca36d01a', 'album_artist': 'md5:5f54c35462c07952df33d97cfb5fc200', 'artist': 'md5:e6fd86621825f14dc0b25db3acd68160', 'release_year': 2009, }, # 'skip': 'Travis CI servers blocked by YandexMusic', }, { # multiple disks 'url': 'http://music.yandex.ru/album/3840501/track/705105', 'md5': '82a54e9e787301dd45aba093cf6e58c0', 'info_dict': { 'id': '705105', 'ext': 'mp3', 'title': 'md5:f86d4a9188279860a83000277024c1a6', 'filesize': int, 'duration': 239.27, 'track': 'md5:40f887f0666ba1aa10b835aca44807d1', 'album': 'md5:624f5224b14f5c88a8e812fd7fbf1873', 'album_artist': 'md5:dd35f2af4e8927100cbe6f5e62e1fb12', 'artist': 'md5:dd35f2af4e8927100cbe6f5e62e1fb12', 'release_year': 2016, 'genre': 'pop', 'disc_number': 2, 'track_number': 9, }, # 'skip': 'Travis CI servers blocked by YandexMusic', }, { 'url': 'http://music.yandex.com/album/540508/track/4878838', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) tld, album_id, track_id = mobj.group('tld'), mobj.group('album_id'), mobj.group('id') track = self._call_api( 'track', tld, url, track_id, 'Downloading track JSON', {'track': f'{track_id}:{album_id}'})['track'] track_title = track['title'] download_data = self._download_json( f'https://music.yandex.ru/api/v2.1/handlers/track/{track_id}:{album_id}/web-album_track-track-track-main/download/m', track_id, 'Downloading track location url JSON', query={'hq': 1}, headers={'X-Retpath-Y': url}) fd_data = self._download_json( download_data['src'], track_id, 'Downloading track location JSON', query={'format': 'json'}) key = hashlib.md5(('XGRlBW9FXlekgbPrRHuSiA' + fd_data['path'][1:] + fd_data['s']).encode()).hexdigest() f_url = 'http://{}/get-mp3/{}/{}?track-id={} '.format(fd_data['host'], key, fd_data['ts'] + fd_data['path'], track['id']) thumbnail = None cover_uri = track.get('albums', [{}])[0].get('coverUri') if cover_uri: thumbnail = cover_uri.replace('%%', 'orig') if not thumbnail.startswith('http'): thumbnail = 'http://' + thumbnail track_info = { 'id': track_id, 'ext': 'mp3', 'url': f_url, 'filesize': int_or_none(track.get('fileSize')), 'duration': float_or_none(track.get('durationMs'), 1000), 'thumbnail': thumbnail, 'track': track_title, 'acodec': download_data.get('codec'), 'abr': int_or_none(download_data.get('bitrate')), } def extract_artist_name(artist): decomposed = artist.get('decomposed') if not isinstance(decomposed, list): return artist['name'] parts = [artist['name']] for element in decomposed: if isinstance(element, dict) and element.get('name'): parts.append(element['name']) elif isinstance(element, str): parts.append(element) return ''.join(parts) def extract_artist(artist_list): if artist_list and isinstance(artist_list, list): artists_names = [extract_artist_name(a) for a in artist_list if a.get('name')] if artists_names: return ', '.join(artists_names) albums = track.get('albums') if albums and isinstance(albums, list): album = albums[0] if isinstance(album, dict): year = album.get('year') disc_number = int_or_none(try_get( album, lambda x: x['trackPosition']['volume'])) track_number = int_or_none(try_get( album, lambda x: x['trackPosition']['index'])) track_info.update({ 'album': album.get('title'), 'album_artist': extract_artist(album.get('artists')), 'release_year': int_or_none(year), 'genre': album.get('genre'), 'disc_number': disc_number, 'track_number': track_number, }) track_artist = extract_artist(track.get('artists')) if track_artist: track_info.update({ 'artist': track_artist, 'title': f'{track_artist} - {track_title}', }) else: track_info['title'] = track_title return track_info class YandexMusicPlaylistBaseIE(YandexMusicBaseIE): def _extract_tracks(self, source, item_id, url, tld): tracks = source['tracks'] track_ids = [str(track_id) for track_id in source['trackIds']] # tracks dictionary shipped with playlist.jsx API is limited to 150 tracks, # missing tracks should be retrieved manually. if len(tracks) < len(track_ids): present_track_ids = { str(track['id']) for track in tracks if track.get('id')} missing_track_ids = [ track_id for track_id in track_ids if track_id not in present_track_ids] # Request missing tracks in chunks to avoid exceeding max HTTP header size, # see https://github.com/ytdl-org/youtube-dl/issues/27355 _TRACKS_PER_CHUNK = 250 for chunk_num in itertools.count(0): start = chunk_num * _TRACKS_PER_CHUNK end = start + _TRACKS_PER_CHUNK missing_track_ids_req = missing_track_ids[start:end] assert missing_track_ids_req missing_tracks = self._call_api( 'track-entries', tld, url, item_id, f'Downloading missing tracks JSON chunk {chunk_num + 1}', { 'entries': ','.join(missing_track_ids_req), 'lang': tld, 'external-domain': f'music.yandex.{tld}', 'overembed': 'false', 'strict': 'true', }) if missing_tracks: tracks.extend(missing_tracks) if end >= len(missing_track_ids): break return tracks def _build_playlist(self, tracks): entries = [] for track in tracks: track_id = track.get('id') or track.get('realId') if not track_id: continue albums = track.get('albums') if not albums or not isinstance(albums, list): continue album = albums[0] if not isinstance(album, dict): continue album_id = album.get('id') if not album_id: continue entries.append(self.url_result( f'http://music.yandex.ru/album/{album_id}/track/{track_id}', ie=YandexMusicTrackIE.ie_key(), video_id=track_id)) return entries class YandexMusicAlbumIE(YandexMusicPlaylistBaseIE): IE_NAME = 'yandexmusic:album' IE_DESC = 'Яндекс.Музыка - Альбом' _VALID_URL = rf'{YandexMusicBaseIE._VALID_URL_BASE}/album/(?P<id>\d+)' _TESTS = [{ 'url': 'http://music.yandex.ru/album/540508', 'info_dict': { 'id': '540508', 'title': 'md5:7ed1c3567f28d14be9f61179116f5571', }, 'playlist_count': 50, # 'skip': 'Travis CI servers blocked by YandexMusic', }, { 'url': 'https://music.yandex.ru/album/3840501', 'info_dict': { 'id': '3840501', 'title': 'md5:36733472cdaa7dcb1fd9473f7da8e50f', }, 'playlist_count': 33, # 'skip': 'Travis CI servers blocked by YandexMusic', }, { # empty artists 'url': 'https://music.yandex.ru/album/9091882', 'info_dict': { 'id': '9091882', 'title': 'ТЕД на русском', }, 'playlist_count': 187, }] @classmethod def suitable(cls, url): return False if YandexMusicTrackIE.suitable(url) else super().suitable(url) def _real_extract(self, url): mobj = self._match_valid_url(url) tld = mobj.group('tld') album_id = mobj.group('id') album = self._call_api( 'album', tld, url, album_id, 'Downloading album JSON', {'album': album_id}) entries = self._build_playlist([track for volume in album['volumes'] for track in volume]) title = album['title'] artist = try_get(album, lambda x: x['artists'][0]['name'], str) if artist: title = f'{artist} - {title}' year = album.get('year') if year: title += f' ({year})' return self.playlist_result(entries, str(album['id']), title) class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE): IE_NAME = 'yandexmusic:playlist' IE_DESC = 'Яндекс.Музыка - Плейлист' _VALID_URL = rf'{YandexMusicBaseIE._VALID_URL_BASE}/users/(?P<user>[^/]+)/playlists/(?P<id>\d+)' _TESTS = [{ 'url': 'http://music.yandex.ru/users/music.partners/playlists/1245', 'info_dict': { 'id': '1245', 'title': 'md5:841559b3fe2b998eca88d0d2e22a3097', 'description': 'md5:3b9f27b0efbe53f2ee1e844d07155cc9', }, 'playlist_count': 5, # 'skip': 'Travis CI servers blocked by YandexMusic', }, { 'url': 'https://music.yandex.ru/users/ya.playlist/playlists/1036', 'only_matching': True, }, { # playlist exceeding the limit of 150 tracks (see # https://github.com/ytdl-org/youtube-dl/issues/6666) 'url': 'https://music.yandex.ru/users/mesiaz/playlists/1364', 'info_dict': { 'id': '1364', 'title': 'md5:b3b400f997d3f878a13ae0699653f7db', }, 'playlist_mincount': 437, # 'skip': 'Travis CI servers blocked by YandexMusic', }] def _real_extract(self, url): mobj = self._match_valid_url(url) tld = mobj.group('tld') user = mobj.group('user') playlist_id = mobj.group('id') playlist = self._call_api( 'playlist', tld, url, playlist_id, 'Downloading playlist JSON', { 'owner': user, 'kinds': playlist_id, 'light': 'true', 'lang': tld, 'external-domain': f'music.yandex.{tld}', 'overembed': 'false', })['playlist'] tracks = self._extract_tracks(playlist, playlist_id, url, tld) return self.playlist_result( self._build_playlist(tracks), str(playlist_id), playlist.get('title'), playlist.get('description')) class YandexMusicArtistBaseIE(YandexMusicPlaylistBaseIE): def _call_artist(self, tld, url, artist_id): return self._call_api( 'artist', tld, url, artist_id, f'Downloading artist {self._ARTIST_WHAT} JSON', { 'artist': artist_id, 'what': self._ARTIST_WHAT, 'sort': self._ARTIST_SORT or '', 'dir': '', 'period': '', 'lang': tld, 'external-domain': f'music.yandex.{tld}', 'overembed': 'false', }) def _real_extract(self, url): mobj = self._match_valid_url(url) tld = mobj.group('tld') artist_id = mobj.group('id') data = self._call_artist(tld, url, artist_id) tracks = self._extract_tracks(data, artist_id, url, tld) title = try_get(data, lambda x: x['artist']['name'], str) return self.playlist_result( self._build_playlist(tracks), artist_id, title) class YandexMusicArtistTracksIE(YandexMusicArtistBaseIE): IE_NAME = 'yandexmusic:artist:tracks' IE_DESC = 'Яндекс.Музыка - Артист - Треки' _VALID_URL = rf'{YandexMusicBaseIE._VALID_URL_BASE}/artist/(?P<id>\d+)/tracks' _TESTS = [{ 'url': 'https://music.yandex.ru/artist/617526/tracks', 'info_dict': { 'id': '617526', 'title': 'md5:131aef29d45fd5a965ca613e708c040b', }, 'playlist_count': 507, # 'skip': 'Travis CI servers blocked by YandexMusic', }] _ARTIST_SORT = '' _ARTIST_WHAT = 'tracks' def _real_extract(self, url): mobj = self._match_valid_url(url) tld = mobj.group('tld') artist_id = mobj.group('id') data = self._call_artist(tld, url, artist_id) tracks = self._extract_tracks(data, artist_id, url, tld) artist = try_get(data, lambda x: x['artist']['name'], str) title = '{} - {}'.format(artist or artist_id, 'Треки') return self.playlist_result( self._build_playlist(tracks), artist_id, title) class YandexMusicArtistAlbumsIE(YandexMusicArtistBaseIE): IE_NAME = 'yandexmusic:artist:albums' IE_DESC = 'Яндекс.Музыка - Артист - Альбомы' _VALID_URL = rf'{YandexMusicBaseIE._VALID_URL_BASE}/artist/(?P<id>\d+)/albums' _TESTS = [{ 'url': 'https://music.yandex.ru/artist/617526/albums', 'info_dict': { 'id': '617526', 'title': 'md5:55dc58d5c85699b7fb41ee926700236c', }, 'playlist_count': 8, # 'skip': 'Travis CI servers blocked by YandexMusic', }] _ARTIST_SORT = 'year' _ARTIST_WHAT = 'albums' def _real_extract(self, url): mobj = self._match_valid_url(url) tld = mobj.group('tld') artist_id = mobj.group('id') data = self._call_artist(tld, url, artist_id) entries = [] for album in data['albums']: if not isinstance(album, dict): continue album_id = album.get('id') if not album_id: continue entries.append(self.url_result( f'http://music.yandex.ru/album/{album_id}', ie=YandexMusicAlbumIE.ie_key(), video_id=album_id)) artist = try_get(data, lambda x: x['artist']['name'], str) title = '{} - {}'.format(artist or artist_id, 'Альбомы') return self.playlist_result(entries, artist_id, title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ora.py
yt_dlp/extractor/ora.py
import re import urllib.parse from .common import InfoExtractor from ..utils import ( get_element_by_attribute, qualities, unescapeHTML, ) class OraTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:ora\.tv|unsafespeech\.com)/([^/]+/)*(?P<id>[^/\?#]+)' _TESTS = [{ 'url': 'https://www.ora.tv/larrykingnow/2015/12/16/vine-youtube-stars-zach-king-king-bach-on-their-viral-videos-0_36jupg6090pq', 'md5': 'fa33717591c631ec93b04b0e330df786', 'info_dict': { 'id': '50178', 'ext': 'mp4', 'title': 'Vine & YouTube Stars Zach King & King Bach On Their Viral Videos!', 'description': 'md5:ebbc5b1424dd5dba7be7538148287ac1', }, }, { 'url': 'http://www.unsafespeech.com/video/2016/5/10/student-self-censorship-and-the-thought-police-on-university-campuses-0_6622bnkppw4d', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_data = self._search_regex( r'"(?:video|current)"\s*:\s*({[^}]+?})', webpage, 'current video') m3u8_url = self._search_regex( r'hls_stream"?\s*:\s*"([^"]+)', video_data, 'm3u8 url', None) if m3u8_url: formats = self._extract_m3u8_formats( m3u8_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) # similar to GameSpotIE m3u8_path = urllib.parse.urlparse(m3u8_url).path QUALITIES_RE = r'((,[a-z]+\d+)+,?)' available_qualities = self._search_regex( QUALITIES_RE, m3u8_path, 'qualities').strip(',').split(',') http_path = m3u8_path[1:].split('/', 1)[1] http_template = re.sub(QUALITIES_RE, r'%s', http_path) http_template = http_template.replace('.csmil/master.m3u8', '') http_template = urllib.parse.urljoin( 'http://videocdn-pmd.ora.tv/', http_template) preference = qualities( ['mobile400', 'basic400', 'basic600', 'sd900', 'sd1200', 'sd1500', 'hd720', 'hd1080']) for q in available_qualities: formats.append({ 'url': http_template % q, 'format_id': q, 'quality': preference(q), }) else: return self.url_result(self._search_regex( r'"youtube_id"\s*:\s*"([^"]+)', webpage, 'youtube id'), 'Youtube') return { 'id': self._search_regex( r'"id"\s*:\s*(\d+)', video_data, 'video id', default=display_id), 'display_id': display_id, 'title': unescapeHTML(self._og_search_title(webpage)), 'description': get_element_by_attribute( 'class', 'video_txt_decription', webpage), 'thumbnail': self._proto_relative_url(self._search_regex( r'"thumb"\s*:\s*"([^"]+)', video_data, 'thumbnail', None)), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/owncloud.py
yt_dlp/extractor/owncloud.py
import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, determine_ext, url_or_none, urlencode_postdata, ) class OwnCloudIE(InfoExtractor): _INSTANCES_RE = '|'.join(( r'(?:[^\.]+\.)?sciebo\.de', r'cloud\.uni-koblenz-landau\.de', )) _VALID_URL = rf'https?://(?:{_INSTANCES_RE})/s/(?P<id>[\w.-]+)' _TESTS = [ { 'url': 'https://ruhr-uni-bochum.sciebo.de/s/wWhqZzh9jTumVFN', 'info_dict': { 'id': 'wWhqZzh9jTumVFN', 'ext': 'mp4', 'title': 'CmvpJST.mp4', }, }, { 'url': 'https://ruhr-uni-bochum.sciebo.de/s/WNDuFu0XuFtmm3f', 'info_dict': { 'id': 'WNDuFu0XuFtmm3f', 'ext': 'mp4', 'title': 'CmvpJST.mp4', }, 'params': { 'videopassword': '12345', }, }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage, urlh = self._download_webpage_handle(url, video_id) if re.search(r'<label[^>]+for="password"', webpage): webpage = self._verify_video_password(webpage, urlh.url, video_id) hidden_inputs = self._hidden_inputs(webpage) title = hidden_inputs.get('filename') parsed_url = urllib.parse.urlparse(url) return { 'id': video_id, 'title': title, 'url': url_or_none(hidden_inputs.get('downloadURL')) or parsed_url._replace( path=urllib.parse.urljoin(parsed_url.path, 'download')).geturl(), 'ext': determine_ext(title), } def _verify_video_password(self, webpage, url, video_id): password = self.get_param('videopassword') if password is None: raise ExtractorError( 'This video is protected by a password, use the --video-password option', expected=True) validation_response = self._download_webpage( url, video_id, 'Validating Password', 'Wrong password?', data=urlencode_postdata({ 'requesttoken': self._hidden_inputs(webpage)['requesttoken'], 'password': password, })) if re.search(r'<label[^>]+for="password"', validation_response): warning = self._search_regex( r'<div[^>]+class="warning">([^<]*)</div>', validation_response, 'warning', default='The password is wrong') raise ExtractorError(f'Opening the video failed, {self.IE_NAME} said: {warning!r}', expected=True) return validation_response
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mocha.py
yt_dlp/extractor/mocha.py
from .common import InfoExtractor from ..utils import int_or_none, traverse_obj class MochaVideoIE(InfoExtractor): _VALID_URL = r'https?://video\.mocha\.com\.vn/(?P<video_slug>[\w-]+)' _TESTS = [{ 'url': 'http://video.mocha.com.vn/chuyen-meo-gia-su-tu-thong-diep-cuoc-song-v18694039', 'info_dict': { 'id': '18694039', 'title': 'Chuyện mèo giả sư tử | Thông điệp cuộc sống', 'ext': 'mp4', 'view_count': int, 'like_count': int, 'dislike_count': int, 'display_id': 'chuyen-meo-gia-su-tu-thong-diep-cuoc-song', 'thumbnail': 'http://mcvideomd1fr.keeng.net/playnow/images/20220505/ad0a055d-2f69-42ca-b888-4790041fe6bc_640x480.jpg', 'description': '', 'duration': 70, 'timestamp': 1652254203, 'upload_date': '20220511', 'comment_count': int, 'categories': ['Kids'], }, }] def _real_extract(self, url): video_slug = self._match_valid_url(url).group('video_slug') json_data = self._download_json( 'http://apivideo.mocha.com.vn:8081/onMediaBackendBiz/mochavideo/getVideoDetail', video_slug, query={'url': url, 'token': ''})['data']['videoDetail'] video_id = str(json_data['id']) video_urls = (json_data.get('list_resolution') or []) + [json_data.get('original_path')] formats, subtitles = [], {} for video in video_urls: if isinstance(video, str): formats.extend([{'url': video, 'ext': 'mp4'}]) else: fmts, subs = self._extract_m3u8_formats_and_subtitles( video.get('video_path'), video_id, ext='mp4') formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return { 'id': video_id, 'display_id': json_data.get('slug') or video_slug, 'title': json_data.get('name'), 'formats': formats, 'subtitles': subtitles, 'description': json_data.get('description'), 'duration': json_data.get('durationS'), 'view_count': json_data.get('total_view'), 'like_count': json_data.get('total_like'), 'dislike_count': json_data.get('total_unlike'), 'thumbnail': json_data.get('image_path_thumb'), 'timestamp': int_or_none(json_data.get('publish_time'), scale=1000), 'is_live': json_data.get('isLive'), 'channel': traverse_obj(json_data, ('channels', '0', 'name')), 'channel_id': traverse_obj(json_data, ('channels', '0', 'id')), 'channel_follower_count': traverse_obj(json_data, ('channels', '0', 'numfollow')), 'categories': traverse_obj(json_data, ('categories', ..., 'categoryname')), 'comment_count': json_data.get('total_comment'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/trovo.py
yt_dlp/extractor/trovo.py
import itertools import json import random import string from .common import InfoExtractor from ..utils import ( ExtractorError, format_field, int_or_none, str_or_none, traverse_obj, try_get, ) class TrovoBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:www\.)?trovo\.live/' _HEADERS = {'Origin': 'https://trovo.live'} def _call_api(self, video_id, data): if 'persistedQuery' in data.get('extensions', {}): url = 'https://gql.trovo.live' else: url = 'https://api-web.trovo.live/graphql' resp = self._download_json( url, video_id, data=json.dumps([data]).encode(), headers={'Accept': 'application/json'}, query={ 'qid': ''.join(random.choices(string.ascii_uppercase + string.digits, k=16)), })[0] if 'errors' in resp: raise ExtractorError(f'Trovo said: {resp["errors"][0]["message"]}') return resp['data'][data['operationName']] def _extract_streamer_info(self, data): streamer_info = data.get('streamerInfo') or {} username = streamer_info.get('userName') return { 'uploader': streamer_info.get('nickName'), 'uploader_id': str_or_none(streamer_info.get('uid')), 'uploader_url': format_field(username, None, 'https://trovo.live/%s'), } class TrovoIE(TrovoBaseIE): _VALID_URL = TrovoBaseIE._VALID_URL_BASE + r'(?:s/)?(?!(?:clip|video)/)(?P<id>(?!s/)[^/?&#]+(?![^#]+[?&]vid=))' _TESTS = [{ 'url': 'https://trovo.live/Exsl', 'only_matching': True, }, { 'url': 'https://trovo.live/s/SkenonSLive/549759191497', 'only_matching': True, }, { 'url': 'https://trovo.live/s/zijo987/208251706', 'info_dict': { 'id': '104125853_104125853_1656439572', 'ext': 'flv', 'uploader_url': 'https://trovo.live/zijo987', 'uploader_id': '104125853', 'thumbnail': 'https://livecover.trovo.live/screenshot/73846_104125853_104125853-2022-06-29-04-00-22-852x480.jpg', 'uploader': 'zijo987', 'title': '💥IGRAMO IGRICE UPADAJTE💥2500/5000 2022-06-28 22:01', 'live_status': 'is_live', }, 'skip': 'May not be live', }] def _real_extract(self, url): username = self._match_id(url) live_info = self._call_api(username, data={ 'operationName': 'live_LiveReaderService_GetLiveInfo', 'variables': { 'params': { 'userName': username, }, }, }) if live_info.get('isLive') == 0: raise ExtractorError(f'{username} is offline', expected=True) program_info = live_info['programInfo'] program_id = program_info['id'] title = program_info['title'] formats = [] for stream_info in (program_info.get('streamInfo') or []): play_url = stream_info.get('playUrl') if not play_url: continue format_id = stream_info.get('desc') formats.append({ 'format_id': format_id, 'height': int_or_none(format_id[:-1]) if format_id else None, 'url': play_url, 'tbr': stream_info.get('bitrate'), 'http_headers': self._HEADERS, }) info = { 'id': program_id, 'title': title, 'formats': formats, 'thumbnail': program_info.get('coverUrl'), 'is_live': True, } info.update(self._extract_streamer_info(live_info)) return info class TrovoVodIE(TrovoBaseIE): _VALID_URL = TrovoBaseIE._VALID_URL_BASE + r'(?:clip|video|s)/(?:[^/]+/\d+[^#]*[?&]vid=)?(?P<id>(?<!/s/)[^/?&#]+)' _TESTS = [{ 'url': 'https://trovo.live/clip/lc-5285890818705062210?ltab=videos', 'params': {'getcomments': True}, 'info_dict': { 'id': 'lc-5285890818705062210', 'ext': 'mp4', 'title': 'fatal moaning for a super good🤣🤣', 'uploader': 'OneTappedYou', 'timestamp': 1621628019, 'upload_date': '20210521', 'uploader_id': '100719456', 'duration': 31, 'view_count': int, 'like_count': int, 'comment_count': int, 'comments': 'mincount:1', 'categories': ['Call of Duty: Mobile'], 'uploader_url': 'https://trovo.live/OneTappedYou', 'thumbnail': r're:^https?://.*\.jpg', }, }, { 'url': 'https://trovo.live/s/SkenonSLive/549759191497?vid=ltv-100829718_100829718_387702301737980280', 'info_dict': { 'id': 'ltv-100829718_100829718_387702301737980280', 'ext': 'mp4', 'timestamp': 1654909624, 'thumbnail': 'http://vod.trovo.live/1f09baf0vodtransger1301120758/ef9ea3f0387702301737980280/coverBySnapshot/coverBySnapshot_10_0.jpg', 'uploader_id': '100829718', 'uploader': 'SkenonSLive', 'title': 'Trovo u secanju, uz par modova i muzike :)', 'uploader_url': 'https://trovo.live/SkenonSLive', 'duration': 10830, 'view_count': int, 'like_count': int, 'upload_date': '20220611', 'comment_count': int, 'categories': ['Minecraft'], }, 'skip': 'Not available', }, { 'url': 'https://trovo.live/s/Trovo/549756886599?vid=ltv-100264059_100264059_387702304241698583', 'info_dict': { 'id': 'ltv-100264059_100264059_387702304241698583', 'ext': 'mp4', 'timestamp': 1661479563, 'thumbnail': 'http://vod.trovo.live/be5ae591vodtransusw1301120758/cccb9915387702304241698583/coverBySnapshot/coverBySnapshot_10_0.jpg', 'uploader_id': '100264059', 'uploader': 'Trovo', 'title': 'Dev Corner 8/25', 'uploader_url': 'https://trovo.live/Trovo', 'duration': 3753, 'view_count': int, 'like_count': int, 'upload_date': '20220826', 'comment_count': int, 'categories': ['Talk Shows'], }, }, { 'url': 'https://trovo.live/video/ltv-100095501_100095501_1609596043', 'only_matching': True, }, { 'url': 'https://trovo.live/s/SkenonSLive/549759191497?foo=bar&vid=ltv-100829718_100829718_387702301737980280', 'only_matching': True, }] def _real_extract(self, url): vid = self._match_id(url) # NOTE: It is also possible to extract this info from the Nuxt data on the website, # however that seems unreliable - sometimes it randomly doesn't return the data, # at least when using a non-residential IP. resp = self._call_api(vid, data={ 'operationName': 'vod_VodReaderService_BatchGetVodDetailInfo', 'variables': { 'params': { 'vids': [vid], }, }, 'extensions': {}, }) vod_detail_info = traverse_obj(resp, ('VodDetailInfos', vid), expected_type=dict) if not vod_detail_info: raise ExtractorError('This video not found or not available anymore', expected=True) vod_info = vod_detail_info.get('vodInfo') title = vod_info.get('title') if try_get(vod_info, lambda x: x['playbackRights']['playbackRights'] != 'Normal'): playback_rights_setting = vod_info['playbackRights']['playbackRightsSetting'] if playback_rights_setting == 'SubscriberOnly': raise ExtractorError('This video is only available for subscribers', expected=True) else: raise ExtractorError(f'This video is not available ({playback_rights_setting})', expected=True) language = vod_info.get('languageName') formats = [] for play_info in (vod_info.get('playInfos') or []): play_url = play_info.get('playUrl') if not play_url: continue format_id = play_info.get('desc') formats.append({ 'ext': 'mp4', 'filesize': int_or_none(play_info.get('fileSize')), 'format_id': format_id, 'height': int_or_none(format_id[:-1]) if format_id else None, 'language': language, 'protocol': 'm3u8_native', 'tbr': int_or_none(play_info.get('bitrate')), 'url': play_url, 'http_headers': self._HEADERS, }) category = vod_info.get('categoryName') get_count = lambda x: int_or_none(vod_info.get(x + 'Num')) info = { 'id': vid, 'title': title, 'formats': formats, 'thumbnail': vod_info.get('coverUrl'), 'timestamp': int_or_none(vod_info.get('publishTs')), 'duration': int_or_none(vod_info.get('duration')), 'view_count': get_count('watch'), 'like_count': get_count('like'), 'comment_count': get_count('comment'), 'categories': [category] if category else None, '__post_extractor': self.extract_comments(vid), } info.update(self._extract_streamer_info(vod_detail_info)) return info def _get_comments(self, vid): for page in itertools.count(1): comments_json = self._call_api(vid, data={ 'operationName': 'public_CommentProxyService_GetCommentList', 'variables': { 'params': { 'appInfo': { 'postID': vid, }, 'preview': {}, 'pageSize': 99, 'page': page, }, }, 'extensions': { 'singleReq': 'true', }, }) for comment in comments_json['commentList']: content = comment.get('content') if not content: continue author = comment.get('author') or {} parent = comment.get('parentID') yield { 'author': author.get('nickName'), 'author_id': str_or_none(author.get('uid')), 'id': str_or_none(comment.get('commentID')), 'text': content, 'timestamp': int_or_none(comment.get('createdAt')), 'parent': 'root' if parent == 0 else str_or_none(parent), } if comments_json['lastPage']: break class TrovoChannelBaseIE(TrovoBaseIE): def _entries(self, spacename): for page in itertools.count(1): vod_json = self._call_api(spacename, data={ 'operationName': self._OPERATION, 'variables': { 'params': { 'terminalSpaceID': { 'spaceName': spacename, }, 'currPage': page, 'pageSize': 99, }, }, 'extensions': { 'singleReq': 'true', }, }) vods = vod_json.get('vodInfos', []) for vod in vods: vid = vod.get('vid') room = traverse_obj(vod, ('spaceInfo', 'roomID')) yield self.url_result( f'https://trovo.live/s/{spacename}/{room}?vid={vid}', ie=TrovoVodIE.ie_key()) has_more = vod_json.get('hasMore') if not has_more: break def _real_extract(self, url): spacename = self._match_id(url) return self.playlist_result(self._entries(spacename), playlist_id=spacename) class TrovoChannelVodIE(TrovoChannelBaseIE): _VALID_URL = r'trovovod:(?P<id>[^\s]+)' IE_DESC = 'All VODs of a trovo.live channel; "trovovod:" prefix' _TESTS = [{ 'url': 'trovovod:OneTappedYou', 'playlist_mincount': 24, 'info_dict': { 'id': 'OneTappedYou', }, }] _OPERATION = 'vod_VodReaderService_GetChannelLtvVideoInfos' class TrovoChannelClipIE(TrovoChannelBaseIE): _VALID_URL = r'trovoclip:(?P<id>[^\s]+)' IE_DESC = 'All Clips of a trovo.live channel; "trovoclip:" prefix' _TESTS = [{ 'url': 'trovoclip:OneTappedYou', 'playlist_mincount': 29, 'info_dict': { 'id': 'OneTappedYou', }, }] _OPERATION = 'vod_VodReaderService_GetChannelClipVideoInfos'
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/arnes.py
yt_dlp/extractor/arnes.py
import urllib.parse from .common import InfoExtractor from ..utils import ( float_or_none, format_field, int_or_none, parse_iso8601, remove_start, ) class ArnesIE(InfoExtractor): IE_NAME = 'video.arnes.si' IE_DESC = 'Arnes Video' _VALID_URL = r'https?://video\.arnes\.si/(?:[a-z]{2}/)?(?:watch|embed|api/(?:asset|public/video))/(?P<id>[0-9a-zA-Z]{12})' _TESTS = [{ 'url': 'https://video.arnes.si/watch/a1qrWTOQfVoU?t=10', 'md5': '4d0f4d0a03571b33e1efac25fd4a065d', 'info_dict': { 'id': 'a1qrWTOQfVoU', 'ext': 'mp4', 'title': 'Linearna neodvisnost, definicija', 'description': 'Linearna neodvisnost, definicija', 'license': 'PRIVATE', 'creator': 'Polona Oblak', 'timestamp': 1585063725, 'upload_date': '20200324', 'channel': 'Polona Oblak', 'channel_id': 'q6pc04hw24cj', 'channel_url': 'https://video.arnes.si/?channel=q6pc04hw24cj', 'duration': 596.75, 'view_count': int, 'tags': ['linearna_algebra'], 'start_time': 10, }, }, { 'url': 'https://video.arnes.si/api/asset/s1YjnV7hadlC/play.mp4', 'only_matching': True, }, { 'url': 'https://video.arnes.si/embed/s1YjnV7hadlC', 'only_matching': True, }, { 'url': 'https://video.arnes.si/en/watch/s1YjnV7hadlC', 'only_matching': True, }, { 'url': 'https://video.arnes.si/embed/s1YjnV7hadlC?t=123&hideRelated=1', 'only_matching': True, }, { 'url': 'https://video.arnes.si/api/public/video/s1YjnV7hadlC', 'only_matching': True, }] _BASE_URL = 'https://video.arnes.si' def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( self._BASE_URL + '/api/public/video/' + video_id, video_id)['data'] title = video['title'] formats = [] for media in (video.get('media') or []): media_url = media.get('url') if not media_url: continue formats.append({ 'url': self._BASE_URL + media_url, 'format_id': remove_start(media.get('format'), 'FORMAT_'), 'format_note': media.get('formatTranslation'), 'width': int_or_none(media.get('width')), 'height': int_or_none(media.get('height')), }) channel = video.get('channel') or {} channel_id = channel.get('url') thumbnail = video.get('thumbnailUrl') return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': self._BASE_URL + thumbnail, 'description': video.get('description'), 'license': video.get('license'), 'creator': video.get('author'), 'timestamp': parse_iso8601(video.get('creationTime')), 'channel': channel.get('name'), 'channel_id': channel_id, 'channel_url': format_field(channel_id, None, f'{self._BASE_URL}/?channel=%s'), 'duration': float_or_none(video.get('duration'), 1000), 'view_count': int_or_none(video.get('views')), 'tags': video.get('hashtags'), 'start_time': int_or_none(urllib.parse.parse_qs( urllib.parse.urlparse(url).query).get('t', [None])[0]), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/teachable.py
yt_dlp/extractor/teachable.py
import re from .common import InfoExtractor from .wistia import WistiaIE from ..utils import ( ExtractorError, clean_html, get_element_by_class, int_or_none, strip_or_none, urlencode_postdata, urljoin, ) class TeachableBaseIE(InfoExtractor): _NETRC_MACHINE = 'teachable' _URL_PREFIX = 'teachable:' _SITES = { # Only notable ones here 'v1.upskillcourses.com': 'upskill', 'gns3.teachable.com': 'gns3', 'academyhacker.com': 'academyhacker', 'stackskills.com': 'stackskills', 'market.saleshacker.com': 'saleshacker', 'learnability.org': 'learnability', 'edurila.com': 'edurila', 'courses.workitdaily.com': 'workitdaily', } _VALID_URL_SUB_TUPLE = (_URL_PREFIX, '|'.join(re.escape(site) for site in _SITES)) def _real_initialize(self): self._logged_in = False def _login(self, site): if self._logged_in: return username, password = self._get_login_info(netrc_machine=self._SITES.get(site, site)) if username is None: return login_page, urlh = self._download_webpage_handle( f'https://{site}/sign_in', None, f'Downloading {site} login page') def is_logged(webpage): return any(re.search(p, webpage) for p in ( r'class=["\']user-signout', r'<a[^>]+\bhref=["\']/sign_out', r'Log\s+[Oo]ut\s*<')) if is_logged(login_page): self._logged_in = True return login_url = urlh.url login_form = self._hidden_inputs(login_page) login_form.update({ 'user[email]': username, 'user[password]': password, }) post_url = self._search_regex( r'<form[^>]+action=(["\'])(?P<url>(?:(?!\1).)+)\1', login_page, 'post url', default=login_url, group='url') if not post_url.startswith('http'): post_url = urljoin(login_url, post_url) response = self._download_webpage( post_url, None, f'Logging in to {site}', data=urlencode_postdata(login_form), headers={ 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': login_url, }) if '>I accept the new Privacy Policy<' in response: raise ExtractorError( f'Unable to login: {site} asks you to accept new Privacy Policy. ' f'Go to https://{site}/ and accept.', expected=True) # Successful login if is_logged(response): self._logged_in = True return message = get_element_by_class('alert', response) if message is not None: raise ExtractorError( f'Unable to login: {clean_html(message)}', expected=True) raise ExtractorError('Unable to log in') class TeachableIE(TeachableBaseIE): _WORKING = False _VALID_URL = r'''(?x) (?: {}https?://(?P<site_t>[^/]+)| https?://(?:www\.)?(?P<site>{}) ) /courses/[^/]+/lectures/(?P<id>\d+) '''.format(*TeachableBaseIE._VALID_URL_SUB_TUPLE) _TESTS = [{ 'url': 'https://gns3.teachable.com/courses/gns3-certified-associate/lectures/6842364', 'info_dict': { 'id': 'untlgzk1v7', 'ext': 'bin', 'title': 'Overview', 'description': 'md5:071463ff08b86c208811130ea1c2464c', 'duration': 736.4, 'timestamp': 1542315762, 'upload_date': '20181115', 'chapter': 'Welcome', 'chapter_number': 1, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://v1.upskillcourses.com/courses/119763/lectures/1747100', 'only_matching': True, }, { 'url': 'https://gns3.teachable.com/courses/423415/lectures/6885939', 'only_matching': True, }, { 'url': 'teachable:https://v1.upskillcourses.com/courses/essential-web-developer-course/lectures/1747100', 'only_matching': True, }] @staticmethod def _is_teachable(webpage): return 'teachableTracker.linker:autoLink' in webpage and re.search( r'<link[^>]+href=["\']https?://(?:process\.fs|assets)\.teachablecdn\.com', webpage) @classmethod def _extract_embed_urls(cls, url, webpage): if cls._is_teachable(webpage): if re.match(r'https?://[^/]+/(?:courses|p)', url): yield f'{cls._URL_PREFIX}{url}' raise cls.StopExtraction def _real_extract(self, url): mobj = self._match_valid_url(url) site = mobj.group('site') or mobj.group('site_t') video_id = mobj.group('id') self._login(site) prefixed = url.startswith(self._URL_PREFIX) if prefixed: url = url[len(self._URL_PREFIX):] webpage = self._download_webpage(url, video_id) wistia_urls = WistiaIE._extract_embed_urls(url, webpage) if not wistia_urls: if any(re.search(p, webpage) for p in ( r'class=["\']lecture-contents-locked', r'>\s*Lecture contents locked', r'id=["\']lecture-locked', # https://academy.tailoredtutors.co.uk/courses/108779/lectures/1955313 r'class=["\'](?:inner-)?lesson-locked', r'>LESSON LOCKED<')): self.raise_login_required('Lecture contents locked') raise ExtractorError('Unable to find video URL') title = self._og_search_title(webpage, default=None) chapter = None chapter_number = None section_item = self._search_regex( rf'(?s)(?P<li><li[^>]+\bdata-lecture-id=["\']{video_id}[^>]+>.+?</li>)', webpage, 'section item', default=None, group='li') if section_item: chapter_number = int_or_none(self._search_regex( r'data-ss-position=["\'](\d+)', section_item, 'section id', default=None)) if chapter_number is not None: sections = [] for s in re.findall( r'(?s)<div[^>]+\bclass=["\']section-title[^>]+>(.+?)</div>', webpage): section = strip_or_none(clean_html(s)) if not section: sections = [] break sections.append(section) if chapter_number <= len(sections): chapter = sections[chapter_number - 1] entries = [{ '_type': 'url_transparent', 'url': wistia_url, 'ie_key': WistiaIE.ie_key(), 'title': title, 'chapter': chapter, 'chapter_number': chapter_number, } for wistia_url in wistia_urls] return self.playlist_result(entries, video_id, title) class TeachableCourseIE(TeachableBaseIE): _VALID_URL = r'''(?x) (?: {}https?://(?P<site_t>[^/]+)| https?://(?:www\.)?(?P<site>{}) ) /(?:courses|p)/(?:enrolled/)?(?P<id>[^/?#&]+) '''.format(*TeachableBaseIE._VALID_URL_SUB_TUPLE) _TESTS = [{ 'url': 'http://v1.upskillcourses.com/courses/essential-web-developer-course/', 'info_dict': { 'id': 'essential-web-developer-course', 'title': 'The Essential Web Developer Course (Free)', }, 'playlist_count': 192, }, { 'url': 'http://v1.upskillcourses.com/courses/119763/', 'only_matching': True, }, { 'url': 'http://v1.upskillcourses.com/courses/enrolled/119763', 'only_matching': True, }, { 'url': 'https://gns3.teachable.com/courses/enrolled/423415', 'only_matching': True, }, { 'url': 'teachable:https://learn.vrdev.school/p/gear-vr-developer-mini', 'only_matching': True, }, { 'url': 'teachable:https://filmsimplified.com/p/davinci-resolve-15-crash-course', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if TeachableIE.suitable(url) else super().suitable(url) def _real_extract(self, url): mobj = self._match_valid_url(url) site = mobj.group('site') or mobj.group('site_t') course_id = mobj.group('id') self._login(site) prefixed = url.startswith(self._URL_PREFIX) if prefixed: prefix = self._URL_PREFIX url = url[len(prefix):] webpage = self._download_webpage(url, course_id) url_base = f'https://{site}/' entries = [] for mobj in re.finditer( r'(?s)(?P<li><li[^>]+class=(["\'])(?:(?!\2).)*?section-item[^>]+>.+?</li>)', webpage): li = mobj.group('li') if 'fa-youtube-play' not in li and not re.search(r'\d{1,2}:\d{2}', li): continue lecture_url = self._search_regex( r'<a[^>]+href=(["\'])(?P<url>(?:(?!\1).)+)\1', li, 'lecture url', default=None, group='url') if not lecture_url: continue lecture_id = self._search_regex( r'/lectures/(\d+)', lecture_url, 'lecture id', default=None) title = self._html_search_regex( r'<span[^>]+class=["\']lecture-name[^>]+>([^<]+)', li, 'title', default=None) entry_url = urljoin(url_base, lecture_url) if prefixed: entry_url = self._URL_PREFIX + entry_url entries.append( self.url_result( entry_url, ie=TeachableIE.ie_key(), video_id=lecture_id, video_title=clean_html(title))) course_title = self._html_search_regex( (r'(?s)<img[^>]+class=["\']course-image[^>]+>\s*<h\d>(.+?)</h', r'(?s)<h\d[^>]+class=["\']course-title[^>]+>(.+?)</h'), webpage, 'course title', fatal=False) return self.playlist_result(entries, course_id, course_title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/zingmp3.py
yt_dlp/extractor/zingmp3.py
import hashlib import hmac import itertools import json import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, join_nonempty, try_call, url_or_none, urljoin, ) from ..utils.traversal import traverse_obj class ZingMp3BaseIE(InfoExtractor): _VALID_URL_TMPL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?P<type>(?:%s))/[^/?#]+/(?P<id>\w+)(?:\.html|\?)' _GEO_COUNTRIES = ['VN'] _DOMAIN = 'https://zingmp3.vn' _PER_PAGE = 50 _API_SLUGS = { # Audio/video 'bai-hat': '/api/v2/page/get/song', 'embed': '/api/v2/page/get/song', 'video-clip': '/api/v2/page/get/video', 'lyric': '/api/v2/lyric/get/lyric', 'song-streaming': '/api/v2/song/get/streaming', 'liveradio': '/api/v2/livestream/get/info', 'eps': '/api/v2/page/get/podcast-episode', 'episode-streaming': '/api/v2/podcast/episode/get/streaming', # Playlist 'playlist': '/api/v2/page/get/playlist', 'album': '/api/v2/page/get/playlist', 'pgr': '/api/v2/page/get/podcast-program', 'pgr-list': '/api/v2/podcast/episode/get/list', 'cgr': '/api/v2/page/get/podcast-category', 'cgr-list': '/api/v2/podcast/program/get/list-by-cate', 'cgrs': '/api/v2/page/get/podcast-categories', # Chart 'zing-chart': '/api/v2/page/get/chart-home', 'zing-chart-tuan': '/api/v2/page/get/week-chart', 'moi-phat-hanh': '/api/v2/page/get/newrelease-chart', 'the-loai-video': '/api/v2/video/get/list', # User 'info-artist': '/api/v2/page/get/artist', 'user-list-song': '/api/v2/song/get/list', 'user-list-video': '/api/v2/video/get/list', 'hub': '/api/v2/page/get/hub-detail', 'new-release': '/api/v2/chart/get/new-release', 'top100': '/api/v2/page/get/top-100', 'podcast-new': '/api/v2/podcast/program/get/list-by-type', 'top-podcast': '/api/v2/podcast/program/get/top-episode', } def _api_url(self, url_type, params): api_slug = self._API_SLUGS[url_type] params.update({'ctime': '1'}) sha256 = hashlib.sha256( ''.join(f'{k}={v}' for k, v in sorted(params.items())).encode()).hexdigest() data = { **params, 'apiKey': 'X5BM3w8N7MKozC0B85o4KMlzLZKhV00y', 'sig': hmac.new(b'acOrvUS15XRW2o9JksiK1KgQ6Vbds8ZW', f'{api_slug}{sha256}'.encode(), hashlib.sha512).hexdigest(), } return f'{self._DOMAIN}{api_slug}?{urllib.parse.urlencode(data)}' def _call_api(self, url_type, params, display_id=None, **kwargs): resp = self._download_json( self._api_url(url_type, params), display_id or params.get('id'), note=f'Downloading {url_type} JSON metadata', **kwargs) return (resp or {}).get('data') or {} def _real_initialize(self): if not self._cookies_passed: self._request_webpage( self._api_url('bai-hat', {'id': ''}), None, note='Updating cookies') def _parse_items(self, items): for url in traverse_obj(items, (..., 'link')) or []: yield self.url_result(urljoin(self._DOMAIN, url)) def _fetch_page(self, id_, url_type, page): raise NotImplementedError('This method must be implemented by subclasses') def _paged_list(self, _id, url_type): count = 0 for page in itertools.count(1): data = self._fetch_page(_id, url_type, page) entries = list(self._parse_items(data.get('items'))) count += len(entries) yield from entries if not data.get('hasMore') or try_call(lambda: count > data['total']): break class ZingMp3IE(ZingMp3BaseIE): _VALID_URL = ZingMp3BaseIE._VALID_URL_TMPL % 'bai-hat|video-clip|embed|eps' IE_NAME = 'zingmp3' IE_DESC = 'zingmp3.vn' _TESTS = [{ 'url': 'https://mp3.zing.vn/bai-hat/Xa-Mai-Xa-Bao-Thy/ZWZB9WAB.html', 'md5': 'ead7ae13693b3205cbc89536a077daed', 'info_dict': { 'id': 'ZWZB9WAB', 'title': 'Xa Mãi Xa', 'ext': 'mp3', 'thumbnail': r're:^https?://.+\.jpg', 'subtitles': { 'origin': [{ 'ext': 'lrc', }], }, 'duration': 255, 'track': 'Xa Mãi Xa', 'artist': 'Bảo Thy', 'album': 'Special Album', 'album_artist': 'Bảo Thy', }, }, { 'url': 'https://zingmp3.vn/video-clip/Suong-Hoa-Dua-Loi-K-ICM-RYO/ZO8ZF7C7.html', 'md5': '92c6e7a019f06b4682a6c35ae5785fab', 'info_dict': { 'id': 'ZO8ZF7C7', 'title': 'Sương Hoa Đưa Lối', 'ext': 'mp4', 'thumbnail': r're:^https?://.+\.jpg', 'duration': 207, 'track': 'Sương Hoa Đưa Lối', 'artist': 'K-ICM, RYO', 'album': 'Sương Hoa Đưa Lối (Single)', 'album_artist': 'K-ICM, RYO', }, }, { 'url': 'https://zingmp3.vn/bai-hat/Nguoi-Yeu-Toi-Lanh-Lung-Sat-Da-Mr-Siro/ZZ6IW7OU.html', 'md5': '3e9f7a9bd0d965573dbff8d7c68b629d', 'info_dict': { 'id': 'ZZ6IW7OU', 'title': 'Người Yêu Tôi Lạnh Lùng Sắt Đá', 'ext': 'mp3', 'thumbnail': r're:^https?://.+\.jpg', 'duration': 303, 'track': 'Người Yêu Tôi Lạnh Lùng Sắt Đá', 'artist': 'Mr. Siro', 'album': 'Người Yêu Tôi Lạnh Lùng Sắt Đá (Single)', 'album_artist': 'Mr. Siro', }, }, { 'url': 'https://zingmp3.vn/eps/Cham-x-Ban-Noi-Goi-La-Nha/ZZD9ACWI.html', 'md5': 'd52f9f63e2631e004e4f15188eedcf80', 'info_dict': { 'id': 'ZZD9ACWI', 'title': 'Chạm x Bạn - Nơi Gọi Là Nhà', 'ext': 'mp3', 'duration': 3716, 'thumbnail': r're:^https?://.+\.jpg', 'track': 'Chạm x Bạn - Nơi Gọi Là Nhà', 'artist': 'On Air', 'album': 'Top Podcast', 'album_artist': 'On Air', }, }, { 'url': 'https://zingmp3.vn/embed/song/ZWZEI76B?start=false', 'only_matching': True, }, { 'url': 'https://zingmp3.vn/bai-hat/Xa-Mai-Xa-Bao-Thy/ZWZB9WAB.html', 'only_matching': True, }] def _real_extract(self, url): song_id, url_type = self._match_valid_url(url).group('id', 'type') item = self._call_api(url_type, {'id': song_id}) item_id = item.get('encodeId') or song_id if url_type == 'video-clip': source = item.get('streaming') source['mp4'] = self._download_json( 'http://api.mp3.zing.vn/api/mobile/video/getvideoinfo', item_id, query={'requestdata': json.dumps({'id': item_id})}, note='Downloading mp4 JSON metadata').get('source') elif url_type == 'eps': source = self._call_api('episode-streaming', {'id': item_id}) else: source = self._call_api('song-streaming', {'id': item_id}) formats = [] for k, v in (source or {}).items(): if not v or v == 'VIP': continue if k not in ('mp4', 'hls'): formats.append({ 'ext': 'mp3', 'format_id': k, 'tbr': int_or_none(k), 'url': self._proto_relative_url(v), 'vcodec': 'none', }) continue for res, video_url in v.items(): if not video_url: continue if k == 'hls': formats.extend(self._extract_m3u8_formats(video_url, item_id, 'mp4', m3u8_id=k, fatal=False)) continue formats.append({ 'format_id': f'mp4-{res}', 'url': video_url, 'height': int_or_none(res), }) if not formats: if item.get('msg') == 'Sorry, this content is not available in your country.': self.raise_geo_restricted(countries=self._GEO_COUNTRIES, metadata_available=True) else: self.raise_no_formats('The song is only for VIP accounts.') lyric = item.get('lyric') or self._call_api('lyric', {'id': item_id}, fatal=False).get('file') return { 'id': item_id, 'title': traverse_obj(item, 'title', 'alias'), 'thumbnail': traverse_obj(item, 'thumbnail', 'thumbnailM'), 'duration': int_or_none(item.get('duration')), 'track': traverse_obj(item, 'title', 'alias'), 'artist': traverse_obj(item, 'artistsNames', 'artists_names', ('artists', 0, 'name')), 'album': traverse_obj(item, ('album', ('name', 'title')), ('genres', 0, 'name'), get_all=False), 'album_artist': traverse_obj(item, ('album', ('artistsNames', 'artists_names')), ('artists', 0, 'name'), get_all=False), 'formats': formats, 'subtitles': {'origin': [{'url': lyric}]} if lyric else None, } class ZingMp3AlbumIE(ZingMp3BaseIE): _VALID_URL = ZingMp3BaseIE._VALID_URL_TMPL % 'album|playlist' _TESTS = [{ 'url': 'https://zingmp3.vn/album/Ca-Phe-Quan-Quen-Hoang-Dung-My-Anh-Da-LAB-Thinh-Suy/ZOC7WUZC.html', 'info_dict': { 'id': 'ZOC7WUZC', 'title': 'Cà Phê Quán Quen', }, 'playlist_mincount': 10, }, { 'url': 'https://zingmp3.vn/album/Nhung-Bai-Hat-Hay-Nhat-Cua-Mr-Siro-Mr-Siro/ZWZAEZZD.html', 'info_dict': { 'id': 'ZWZAEZZD', 'title': 'Những Bài Hát Hay Nhất Của Mr. Siro', }, 'playlist_mincount': 20, }, { 'url': 'http://mp3.zing.vn/playlist/Duong-Hong-Loan-apollobee/IWCAACCB.html', 'only_matching': True, }, { 'url': 'https://zingmp3.vn/album/Lau-Dai-Tinh-Ai-Bang-Kieu-Minh-Tuyet/ZWZBWDAF.html', 'only_matching': True, }] IE_NAME = 'zingmp3:album' def _real_extract(self, url): song_id, url_type = self._match_valid_url(url).group('id', 'type') data = self._call_api(url_type, {'id': song_id}) return self.playlist_result( self._parse_items(traverse_obj(data, ('song', 'items'))), traverse_obj(data, 'id', 'encodeId'), traverse_obj(data, 'name', 'title')) class ZingMp3ChartHomeIE(ZingMp3BaseIE): _VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?P<id>(?:zing-chart|moi-phat-hanh|top100|podcast-discover))/?(?:[#?]|$)' _TESTS = [{ 'url': 'https://zingmp3.vn/zing-chart', 'info_dict': { 'id': 'zing-chart', }, 'playlist_mincount': 100, }, { 'url': 'https://zingmp3.vn/moi-phat-hanh', 'info_dict': { 'id': 'moi-phat-hanh', }, 'playlist_mincount': 100, }, { 'url': 'https://zingmp3.vn/top100', 'info_dict': { 'id': 'top100', }, 'playlist_mincount': 50, }, { 'url': 'https://zingmp3.vn/podcast-discover', 'info_dict': { 'id': 'podcast-discover', }, 'playlist_mincount': 4, }] IE_NAME = 'zingmp3:chart-home' def _real_extract(self, url): url_type = self._match_id(url) params = {'id': url_type} if url_type == 'podcast-discover': params['type'] = 'discover' data = self._call_api(url_type, params) items = [] if url_type == 'top100': items.extend(traverse_obj(data, (..., 'items', ..., {dict}))) elif url_type == 'zing-chart': items.extend(traverse_obj(data, ('RTChart', 'items', ..., {dict}))) else: items.extend(traverse_obj(data, ('items', ..., {dict}))) return self.playlist_result(self._parse_items(items), url_type) class ZingMp3WeekChartIE(ZingMp3BaseIE): _VALID_URL = ZingMp3BaseIE._VALID_URL_TMPL % 'zing-chart-tuan' IE_NAME = 'zingmp3:week-chart' _TESTS = [{ 'url': 'https://zingmp3.vn/zing-chart-tuan/Bai-hat-Viet-Nam/IWZ9Z08I.html', 'info_dict': { 'id': 'IWZ9Z08I', 'title': 'zing-chart-vn', }, 'playlist_mincount': 10, }, { 'url': 'https://zingmp3.vn/zing-chart-tuan/Bai-hat-US-UK/IWZ9Z0BW.html', 'info_dict': { 'id': 'IWZ9Z0BW', 'title': 'zing-chart-us', }, 'playlist_mincount': 10, }, { 'url': 'https://zingmp3.vn/zing-chart-tuan/Bai-hat-KPop/IWZ9Z0BO.html', 'info_dict': { 'id': 'IWZ9Z0BO', 'title': 'zing-chart-korea', }, 'playlist_mincount': 10, }] def _real_extract(self, url): song_id, url_type = self._match_valid_url(url).group('id', 'type') data = self._call_api(url_type, {'id': song_id}) return self.playlist_result( self._parse_items(data['items']), song_id, f'zing-chart-{data.get("country", "")}') class ZingMp3ChartMusicVideoIE(ZingMp3BaseIE): _VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?P<type>the-loai-video)/(?P<regions>[^/]+)/(?P<id>[^\.]+)' IE_NAME = 'zingmp3:chart-music-video' _TESTS = [{ 'url': 'https://zingmp3.vn/the-loai-video/Viet-Nam/IWZ9Z08I.html', 'info_dict': { 'id': 'IWZ9Z08I', 'title': 'the-loai-video_Viet-Nam', }, 'playlist_mincount': 400, }, { 'url': 'https://zingmp3.vn/the-loai-video/Au-My/IWZ9Z08O.html', 'info_dict': { 'id': 'IWZ9Z08O', 'title': 'the-loai-video_Au-My', }, 'playlist_mincount': 40, }, { 'url': 'https://zingmp3.vn/the-loai-video/Han-Quoc/IWZ9Z08W.html', 'info_dict': { 'id': 'IWZ9Z08W', 'title': 'the-loai-video_Han-Quoc', }, 'playlist_mincount': 30, }, { 'url': 'https://zingmp3.vn/the-loai-video/Khong-Loi/IWZ9Z086.html', 'info_dict': { 'id': 'IWZ9Z086', 'title': 'the-loai-video_Khong-Loi', }, 'playlist_mincount': 1, }] def _fetch_page(self, song_id, url_type, page): return self._call_api(url_type, { 'id': song_id, 'type': 'genre', 'page': page, 'count': self._PER_PAGE, }) def _real_extract(self, url): song_id, regions, url_type = self._match_valid_url(url).group('id', 'regions', 'type') return self.playlist_result(self._paged_list(song_id, url_type), song_id, f'{url_type}_{regions}') class ZingMp3UserIE(ZingMp3BaseIE): _VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?P<user>[^/]+)/(?P<type>bai-hat|single|album|video|song)/?(?:[?#]|$)' IE_NAME = 'zingmp3:user' _TESTS = [{ 'url': 'https://zingmp3.vn/Mr-Siro/bai-hat', 'info_dict': { 'id': 'IWZ98609', 'title': 'Mr. Siro - bai-hat', 'description': 'md5:5bdcf45e955dc1b8d7f518f322ffef36', }, 'playlist_mincount': 91, }, { 'url': 'https://zingmp3.vn/Mr-Siro/album', 'info_dict': { 'id': 'IWZ98609', 'title': 'Mr. Siro - album', 'description': 'md5:5bdcf45e955dc1b8d7f518f322ffef36', }, 'playlist_mincount': 3, }, { 'url': 'https://zingmp3.vn/Mr-Siro/single', 'info_dict': { 'id': 'IWZ98609', 'title': 'Mr. Siro - single', 'description': 'md5:5bdcf45e955dc1b8d7f518f322ffef36', }, 'playlist_mincount': 20, }, { 'url': 'https://zingmp3.vn/Mr-Siro/video', 'info_dict': { 'id': 'IWZ98609', 'title': 'Mr. Siro - video', 'description': 'md5:5bdcf45e955dc1b8d7f518f322ffef36', }, 'playlist_mincount': 15, }, { 'url': 'https://zingmp3.vn/new-release/song', 'info_dict': { 'id': 'new-release-song', }, 'playlist_mincount': 50, }, { 'url': 'https://zingmp3.vn/new-release/album', 'info_dict': { 'id': 'new-release-album', }, 'playlist_mincount': 20, }] def _fetch_page(self, user_id, url_type, page): url_type = 'user-list-song' if url_type == 'bai-hat' else 'user-list-video' return self._call_api(url_type, { 'id': user_id, 'type': 'artist', 'page': page, 'count': self._PER_PAGE, }) def _real_extract(self, url): alias, url_type = self._match_valid_url(url).group('user', 'type') if not url_type: url_type = 'bai-hat' user_info = self._call_api('info-artist', {}, alias, query={'alias': alias}) # Handle for new-release if alias == 'new-release' and url_type in ('song', 'album'): _id = f'{alias}-{url_type}' return self.playlist_result(self._parse_items( self._call_api('new-release', params={'type': url_type}, display_id=_id)), _id) else: # Handle for user/artist if url_type in ('bai-hat', 'video'): entries = self._paged_list(user_info['id'], url_type) else: section_id = 'aAlbum' if url_type == 'album' else 'aSingle' entries = self._parse_items(traverse_obj(user_info, ( 'sections', lambda _, v: v['sectionId'] == section_id, 'items', ...))) return self.playlist_result( entries, user_info['id'], join_nonempty(user_info.get('name'), url_type, delim=' - '), user_info.get('biography')) class ZingMp3HubIE(ZingMp3BaseIE): IE_NAME = 'zingmp3:hub' _VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?P<type>hub)/[^/?#]+/(?P<id>[^./?#]+)' _TESTS = [{ 'url': 'https://zingmp3.vn/hub/Nhac-Moi/IWZ9Z0CA.html', 'info_dict': { 'id': 'IWZ9Z0CA', 'title': 'BXH Nhạc Mới', 'description': 'md5:1cc31b68a6f746427b07b2756c22a558', }, 'playlist_mincount': 20, }, { 'url': 'https://zingmp3.vn/hub/Nhac-Viet/IWZ9Z087.html', 'info_dict': { 'id': 'IWZ9Z087', 'title': 'Nhạc Việt', 'description': 'md5:acc976c8bdde64d5c6ee4a92c39f7a77', }, 'playlist_mincount': 30, }] def _real_extract(self, url): song_id, url_type = self._match_valid_url(url).group('id', 'type') hub_detail = self._call_api(url_type, {'id': song_id}) entries = self._parse_items(traverse_obj(hub_detail, ( 'sections', lambda _, v: v['sectionId'] == 'hub', 'items', ...))) return self.playlist_result( entries, song_id, hub_detail.get('title'), hub_detail.get('description')) class ZingMp3LiveRadioIE(ZingMp3BaseIE): IE_NAME = 'zingmp3:liveradio' _VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?P<type>(?:liveradio))/(?P<id>\w+)(?:\.html|\?)' _TESTS = [{ 'url': 'https://zingmp3.vn/liveradio/IWZ979UB.html', 'info_dict': { 'id': 'IWZ979UB', 'title': r're:^V\-POP', 'description': 'md5:aa857f8a91dc9ce69e862a809e4bdc10', 'ext': 'mp4', 'view_count': int, 'thumbnail': r're:^https?://.*\.jpg', 'like_count': int, 'live_status': 'is_live', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://zingmp3.vn/liveradio/IWZ97CWB.html', 'info_dict': { 'id': 'IWZ97CWB', 'title': r're:^Live\s247', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'ext': 'm4a', 'view_count': int, 'thumbnail': r're:^https?://.*\.jpg', 'like_count': int, 'live_status': 'is_live', }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): url_type, live_radio_id = self._match_valid_url(url).group('type', 'id') info = self._call_api(url_type, {'id': live_radio_id}) manifest_url = info.get('streaming') if not manifest_url: raise ExtractorError('This radio is offline.', expected=True) fmts, subtitles = self._extract_m3u8_formats_and_subtitles(manifest_url, live_radio_id, fatal=False) return { 'id': live_radio_id, 'is_live': True, 'formats': fmts, 'subtitles': subtitles, **traverse_obj(info, { 'title': 'title', 'thumbnail': (('thumbnail', 'thumbnailM', 'thumbnailV', 'thumbnailH'), {url_or_none}), 'view_count': ('activeUsers', {int_or_none}), 'like_count': ('totalReaction', {int_or_none}), 'description': 'description', }, get_all=False), } class ZingMp3PodcastEpisodeIE(ZingMp3BaseIE): IE_NAME = 'zingmp3:podcast-episode' _VALID_URL = ZingMp3BaseIE._VALID_URL_TMPL % 'pgr|cgr' _TESTS = [{ 'url': 'https://zingmp3.vn/pgr/Nhac-Moi-Moi-Ngay/68Z9W66B.html', 'info_dict': { 'id': '68Z9W66B', 'title': 'Nhạc Mới Mỗi Ngày', 'description': 'md5:2875dfa951f8e5356742f1610cf20691', }, 'playlist_mincount': 20, }, { 'url': 'https://zingmp3.vn/cgr/Am-nhac/IWZ980AO.html', 'info_dict': { 'id': 'IWZ980AO', 'title': 'Âm nhạc', }, 'playlist_mincount': 2, }] def _fetch_page(self, eps_id, url_type, page): return self._call_api(url_type, { 'id': eps_id, 'page': page, 'count': self._PER_PAGE, }) def _real_extract(self, url): podcast_id, url_type = self._match_valid_url(url).group('id', 'type') podcast_info = self._call_api(url_type, {'id': podcast_id}) entries = self._paged_list(podcast_id, 'pgr-list' if url_type == 'pgr' else 'cgr-list') return self.playlist_result( entries, podcast_id, podcast_info.get('title'), podcast_info.get('description')) class ZingMp3PodcastIE(ZingMp3BaseIE): IE_NAME = 'zingmp3:podcast' _VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?P<id>(?:cgr|top-podcast|podcast-new))/?(?:[#?]|$)' _TESTS = [{ 'url': 'https://zingmp3.vn/cgr', 'info_dict': { 'id': 'cgr', }, 'playlist_mincount': 5, }, { 'url': 'https://zingmp3.vn/top-podcast', 'info_dict': { 'id': 'top-podcast', }, 'playlist_mincount': 7, }, { 'url': 'https://zingmp3.vn/podcast-new', 'info_dict': { 'id': 'podcast-new', }, 'playlist_mincount': 4, }] def _real_extract(self, url): url_type = self._match_id(url) params = {'id': url_type} if url_type == 'podcast-new': params['type'] = 'new' items = self._call_api('cgrs' if url_type == 'cgr' else url_type, params)['items'] return self.playlist_result(self._parse_items(items), url_type)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/swearnet.py
yt_dlp/extractor/swearnet.py
from .vidyard import VidyardBaseIE from ..utils import ExtractorError, int_or_none, make_archive_id class SwearnetEpisodeIE(VidyardBaseIE): _VALID_URL = r'https?://www\.swearnet\.com/shows/(?P<id>[\w-]+)/seasons/(?P<season_num>\d+)/episodes/(?P<episode_num>\d+)' _TESTS = [{ 'url': 'https://www.swearnet.com/shows/gettin-learnt-with-ricky/seasons/1/episodes/1', 'info_dict': { 'id': 'wicK2EOzjOdxkUXGDIgcPw', 'display_id': '232819', 'ext': 'mp4', 'episode_number': 1, 'episode': 'Episode 1', 'duration': 719, 'description': r're:Are you drunk and high and craving a grilled cheese sandwich.+', 'season': 'Season 1', 'title': 'Episode 1 - Grilled Cheese Sammich', 'season_number': 1, 'thumbnail': 'https://cdn.vidyard.com/thumbnails/custom/0dd74f9b-388a-452e-b570-b407fb64435b_small.jpg', 'tags': ['Getting Learnt with Ricky', 'drunk', 'grilled cheese', 'high'], '_old_archive_ids': ['swearnetepisode 232819'], }, }] def _real_extract(self, url): slug, season_number, episode_number = self._match_valid_url(url).group('id', 'season_num', 'episode_num') webpage = self._download_webpage(url, slug) try: external_id = self._search_regex(r'externalid\s*=\s*"([^"]+)', webpage, 'externalid') except ExtractorError: if 'Upgrade Now' in webpage: self.raise_login_required() raise info = self._process_video_json(self._fetch_video_json(external_id)['chapters'][0], external_id) if info.get('display_id'): info['_old_archive_ids'] = [make_archive_id(self, info['display_id'])] return { **info, 'season_number': int_or_none(season_number), 'episode_number': int_or_none(episode_number), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/iheart.py
yt_dlp/extractor/iheart.py
from .common import InfoExtractor from ..utils import ( clean_html, clean_podcast_url, int_or_none, str_or_none, ) class IHeartRadioBaseIE(InfoExtractor): def _call_api(self, path, video_id, fatal=True, query=None): return self._download_json( 'https://api.iheart.com/api/v3/podcast/' + path, video_id, fatal=fatal, query=query) def _extract_episode(self, episode): return { 'thumbnail': episode.get('imageUrl'), 'description': clean_html(episode.get('description')), 'timestamp': int_or_none(episode.get('startDate'), 1000), 'duration': int_or_none(episode.get('duration')), } class IHeartRadioIE(IHeartRadioBaseIE): IE_NAME = 'iheartradio' _VALID_URL = r'(?:https?://(?:www\.)?iheart\.com/podcast/[^/]+/episode/(?P<display_id>[^/?&#]+)-|iheartradio:)(?P<id>\d+)' _TEST = { 'url': 'https://www.iheart.com/podcast/105-behind-the-bastards-29236323/episode/part-one-alexander-lukashenko-the-dictator-70346499/?embed=true', 'md5': 'c8609c92c8688dcb69d8541042b8abca', 'info_dict': { 'id': '70346499', 'ext': 'mp3', 'title': 'Part One: Alexander Lukashenko: The Dictator of Belarus', 'description': 'md5:96cc7297b3a5a9ebae28643801c96fae', 'timestamp': 1597741200, 'upload_date': '20200818', }, } def _real_extract(self, url): episode_id = self._match_id(url) episode = self._call_api( 'episodes/' + episode_id, episode_id)['episode'] info = self._extract_episode(episode) info.update({ 'id': episode_id, 'title': episode['title'], 'url': clean_podcast_url(episode['mediaUrl']), }) return info class IHeartRadioPodcastIE(IHeartRadioBaseIE): IE_NAME = 'iheartradio:podcast' _VALID_URL = r'https?://(?:www\.)?iheart(?:podcastnetwork)?\.com/podcast/[^/?&#]+-(?P<id>\d+)/?(?:[?#&]|$)' _TESTS = [{ 'url': 'https://www.iheart.com/podcast/1119-it-could-happen-here-30717896/', 'info_dict': { 'id': '30717896', 'title': 'It Could Happen Here', 'description': 'md5:5842117412a967eb0b01f8088eb663e2', }, 'playlist_mincount': 11, }, { 'url': 'https://www.iheartpodcastnetwork.com/podcast/105-stuff-you-should-know-26940277', 'only_matching': True, }] def _real_extract(self, url): podcast_id = self._match_id(url) path = 'podcasts/' + podcast_id episodes = self._call_api( path + '/episodes', podcast_id, query={'limit': 1000000000})['data'] entries = [] for episode in episodes: episode_id = str_or_none(episode.get('id')) if not episode_id: continue info = self._extract_episode(episode) info.update({ '_type': 'url', 'id': episode_id, 'title': episode.get('title'), 'url': 'iheartradio:' + episode_id, 'ie_key': IHeartRadioIE.ie_key(), }) entries.append(info) podcast = self._call_api(path, podcast_id, False) or {} return self.playlist_result( entries, podcast_id, podcast.get('title'), podcast.get('description'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/onefootball.py
yt_dlp/extractor/onefootball.py
from .common import InfoExtractor from .jwplatform import JWPlatformIE from ..utils import make_archive_id class OneFootballIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?onefootball\.com/[a-z]{2}/video/[^/&?#]+-(?P<id>\d+)' _TESTS = [{ 'url': 'https://onefootball.com/en/video/highlights-fc-zuerich-3-3-fc-basel-34012334', 'info_dict': { 'id': 'Y2VtcWAT', 'ext': 'mp4', 'title': 'Highlights: FC Zürich 3-3 FC Basel', 'description': 'md5:33d9855cb790702c4fe42a513700aba8', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/Y2VtcWAT/poster.jpg?width=720', 'timestamp': 1635874895, 'upload_date': '20211102', 'duration': 375.0, 'tags': ['Football', 'Soccer', 'OneFootball'], '_old_archive_ids': ['onefootball 34012334'], }, 'params': {'skip_download': True}, 'expected_warnings': ['Failed to download m3u8 information'], }, { 'url': 'https://onefootball.com/en/video/klopp-fumes-at-var-decisions-in-west-ham-defeat-34041020', 'info_dict': { 'id': 'leVJrMho', 'ext': 'mp4', 'title': 'Klopp fumes at VAR decisions in West Ham defeat', 'description': 'md5:9c50371095a01ad3f63311c73d8f51a5', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/leVJrMho/poster.jpg?width=720', 'timestamp': 1636315232, 'upload_date': '20211107', 'duration': 93.0, 'tags': ['Football', 'Soccer', 'OneFootball'], '_old_archive_ids': ['onefootball 34041020'], }, 'params': {'skip_download': True}, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data_json = self._search_json_ld(webpage, video_id, fatal=False) data_json.pop('url', None) m3u8_url = self._html_search_regex(r'(https://cdn\.jwplayer\.com/manifests/\w+\.m3u8)', webpage, 'm3u8_url') return self.url_result( m3u8_url, JWPlatformIE, video_id, _old_archive_ids=[make_archive_id(self, video_id)], **data_json, url_transparent=True)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/traileraddict.py
yt_dlp/extractor/traileraddict.py
import re from .common import InfoExtractor class TrailerAddictIE(InfoExtractor): _WORKING = False _VALID_URL = r'(?:https?://)?(?:www\.)?traileraddict\.com/(?:trailer|clip)/(?P<movie>.+?)/(?P<trailer_name>.+)' _TEST = { 'url': 'http://www.traileraddict.com/trailer/prince-avalanche/trailer', 'md5': '41365557f3c8c397d091da510e73ceb4', 'info_dict': { 'id': '76184', 'ext': 'mp4', 'title': 'Prince Avalanche Trailer', 'description': 'Trailer for Prince Avalanche.\n\nTwo highway road workers spend the summer of 1988 away from their city lives. The isolated landscape becomes a place of misadventure as the men find themselves at odds with each other and the women they left behind.', }, } def _real_extract(self, url): mobj = self._match_valid_url(url) name = mobj.group('movie') + '/' + mobj.group('trailer_name') webpage = self._download_webpage(url, name) title = self._html_extract_title(webpage, 'video title').replace(' - Trailer Addict', '') view_count_str = self._search_regex( r'<span class="views_n">([0-9,.]+)</span>', webpage, 'view count', fatal=False) view_count = ( None if view_count_str is None else int(view_count_str.replace(',', ''))) video_id = self._search_regex( r'<param\s+name="movie"\s+value="/emb/([0-9]+)"\s*/>', webpage, 'video id') # Presence of (no)watchplus function indicates HD quality is available if re.search(r'function (no)?watchplus()', webpage): fvar = 'fvarhd' else: fvar = 'fvar' info_url = f'http://www.traileraddict.com/{fvar}.php?tid={video_id!s}' info_webpage = self._download_webpage(info_url, video_id, 'Downloading the info webpage') final_url = self._search_regex(r'&fileurl=(.+)', info_webpage, 'Download url').replace('%3F', '?') thumbnail_url = self._search_regex(r'&image=(.+?)&', info_webpage, 'thumbnail url') description = self._html_search_regex( r'(?s)<div class="synopsis">.*?<div class="movie_label_info"[^>]*>(.*?)</div>', webpage, 'description', fatal=False) return { 'id': video_id, 'url': final_url, 'title': title, 'thumbnail': thumbnail_url, 'description': description, 'view_count': view_count, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mojvideo.py
yt_dlp/extractor/mojvideo.py
from .common import InfoExtractor from ..utils import ( ExtractorError, parse_duration, ) class MojvideoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?mojvideo\.com/video-(?P<display_id>[^/]+)/(?P<id>[a-f0-9]+)' _TEST = { 'url': 'http://www.mojvideo.com/video-v-avtu-pred-mano-rdecelaska-alfi-nipic/3d1ed4497707730b2906', 'md5': 'f7fd662cc8ce2be107b0d4f2c0483ae7', 'info_dict': { 'id': '3d1ed4497707730b2906', 'display_id': 'v-avtu-pred-mano-rdecelaska-alfi-nipic', 'ext': 'mp4', 'title': 'V avtu pred mano rdečelaska - Alfi Nipič', 'thumbnail': r're:^http://.*\.jpg$', 'duration': 242, }, } def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') display_id = mobj.group('display_id') # XML is malformed playerapi = self._download_webpage( f'http://www.mojvideo.com/playerapi.php?v={video_id}&t=1', display_id) if '<error>true</error>' in playerapi: error_desc = self._html_search_regex( r'<errordesc>([^<]*)</errordesc>', playerapi, 'error description', fatal=False) raise ExtractorError(f'{self.IE_NAME} said: {error_desc}', expected=True) title = self._html_extract_title(playerapi) video_url = self._html_search_regex( r'<file>([^<]+)</file>', playerapi, 'video URL') thumbnail = self._html_search_regex( r'<preview>([^<]+)</preview>', playerapi, 'thumbnail', fatal=False) duration = parse_duration(self._html_search_regex( r'<duration>([^<]+)</duration>', playerapi, 'duration', fatal=False)) return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'title': title, 'thumbnail': thumbnail, 'duration': duration, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rokfin.py
yt_dlp/extractor/rokfin.py
import datetime as dt import itertools import json import re import urllib.parse from .common import InfoExtractor, SearchInfoExtractor from ..utils import ( ExtractorError, determine_ext, float_or_none, format_field, int_or_none, str_or_none, traverse_obj, try_get, unescapeHTML, unified_timestamp, url_or_none, urlencode_postdata, ) _API_BASE_URL = 'https://prod-api-v2.production.rokfin.com/api/v2/public/' class RokfinIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rokfin\.com/(?P<id>(?P<type>post|stream)/\d+)' _NETRC_MACHINE = 'rokfin' _AUTH_BASE = 'https://secure.rokfin.com/auth/realms/rokfin-web/protocol/openid-connect' _access_mgmt_tokens = {} # OAuth 2.0: RFC 6749, Sec. 1.4-5 _TESTS = [{ 'url': 'https://www.rokfin.com/post/57548/Mitt-Romneys-Crazy-Solution-To-Climate-Change', 'info_dict': { 'id': 'post/57548', 'ext': 'mp4', 'title': 'Mitt Romney\'s Crazy Solution To Climate Change', 'thumbnail': r're:https://img\.production\.rokfin\.com/.+', 'upload_date': '20211023', 'timestamp': 1634998029, 'channel': 'Jimmy Dore', 'channel_id': '65429', 'channel_url': 'https://rokfin.com/TheJimmyDoreShow', 'availability': 'public', 'live_status': 'not_live', 'dislike_count': int, 'like_count': int, 'duration': 213, }, }, { 'url': 'https://rokfin.com/post/223/Julian-Assange-Arrested-Streaming-In-Real-Time', 'info_dict': { 'id': 'post/223', 'ext': 'mp4', 'title': 'Julian Assange Arrested: Streaming In Real Time', 'thumbnail': r're:https://img\.production\.rokfin\.com/.+', 'upload_date': '20190412', 'timestamp': 1555052644, 'channel': 'Ron Placone', 'channel_id': '10', 'channel_url': 'https://rokfin.com/RonPlacone', 'availability': 'public', 'live_status': 'not_live', 'dislike_count': int, 'like_count': int, 'tags': ['FreeThinkingMedia^', 'RealProgressives^'], }, }, { 'url': 'https://www.rokfin.com/stream/10543/Its-A-Crazy-Mess-Regional-Director-Blows-Whistle-On-Pfizers-Vaccine-Trial-Data', 'info_dict': { 'id': 'stream/10543', 'ext': 'mp4', 'title': '"It\'s A Crazy Mess" Regional Director Blows Whistle On Pfizer\'s Vaccine Trial Data', 'thumbnail': r're:https://img\.production\.rokfin\.com/.+', 'description': 'md5:324ce2d3e3b62e659506409e458b9d8e', 'channel': 'TLAVagabond', 'channel_id': '53856', 'channel_url': 'https://rokfin.com/TLAVagabond', 'availability': 'public', 'is_live': False, 'was_live': True, 'live_status': 'was_live', 'timestamp': 1635874720, 'release_timestamp': 1635874720, 'release_date': '20211102', 'upload_date': '20211102', 'dislike_count': int, 'like_count': int, 'tags': ['FreeThinkingMedia^'], }, }, { 'url': 'https://rokfin.com/post/126703/Brave-New-World--Aldous-Huxley-DEEPDIVE--Chpts-13--Quite-Frankly--Jay-Dyer', 'info_dict': { 'id': 'post/126703', 'ext': 'mp4', 'title': 'Brave New World - Aldous Huxley DEEPDIVE! (Chpts 1-3) - Quite Frankly & Jay Dyer', 'thumbnail': r're:https://img\.production\.rokfin\.com/.+', 'channel': 'Jay Dyer', 'channel_id': '186881', 'channel_url': 'https://rokfin.com/jaydyer', 'availability': 'premium_only', 'live_status': 'not_live', 'dislike_count': int, 'like_count': int, 'timestamp': 1678213357, 'upload_date': '20230307', 'tags': ['FreeThinkingMedia^', 'OpenMind^'], 'description': 'md5:cb04e32e68326c9b2b251b297bacff35', 'duration': 3100, }, }, { 'url': 'https://rokfin.com/stream/31332/The-Grayzone-live-on-Nordstream-blame-game', 'info_dict': { 'id': 'stream/31332', 'ext': 'mp4', 'title': 'The Grayzone live on Nordstream blame game', 'thumbnail': r're:https://image\.v\.rokfin\.com/.+', 'channel': 'Max Blumenthal', 'channel_id': '248902', 'channel_url': 'https://rokfin.com/MaxBlumenthal', 'availability': 'premium_only', 'live_status': 'was_live', 'dislike_count': int, 'like_count': int, 'timestamp': 1678475166, 'release_timestamp': 1678475166.0, 'release_date': '20230310', 'upload_date': '20230310', 'tags': ['FreeThinkingMedia^'], }, }] def _real_extract(self, url): video_id, video_type = self._match_valid_url(url).group('id', 'type') metadata = self._download_json_using_access_token(f'{_API_BASE_URL}{video_id}', video_id) scheduled = unified_timestamp(metadata.get('scheduledAt')) live_status = ('was_live' if metadata.get('stoppedAt') else 'is_upcoming' if scheduled else 'is_live' if video_type == 'stream' else 'not_live') video_url = traverse_obj(metadata, 'url', ('content', 'contentUrl'), expected_type=url_or_none) if video_url in (None, 'fake.m3u8'): video_url = format_field(self._search_regex( r'https?://[^/]+/([^/]+)/storyboard.vtt', traverse_obj(metadata, 'timelineUrl', ('content', 'timelineUrl'), expected_type=url_or_none), video_id, default=None), None, 'https://stream.v.rokfin.com/%s.m3u8') formats, subtitles = [{'url': video_url}] if video_url else [], {} if determine_ext(video_url) == 'm3u8': formats, subtitles = self._extract_m3u8_formats_and_subtitles( video_url, video_id, fatal=False, live=live_status == 'is_live') if not formats: if traverse_obj(metadata, 'premiumPlan', 'premium'): self.raise_login_required('This video is only available to premium users', True, method='cookies') elif scheduled: self.raise_no_formats( f'Stream is offline; scheduled for {dt.datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}', video_id=video_id, expected=True) uploader = traverse_obj(metadata, ('createdBy', 'username'), ('creator', 'username')) timestamp = (scheduled or float_or_none(metadata.get('postedAtMilli'), 1000) or unified_timestamp(metadata.get('creationDateTime'))) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'title': str_or_none(traverse_obj(metadata, 'title', ('content', 'contentTitle'))), 'duration': float_or_none(traverse_obj(metadata, ('content', 'duration'))), 'thumbnail': url_or_none(traverse_obj(metadata, 'thumbnail', ('content', 'thumbnailUrl1'))), 'description': str_or_none(traverse_obj(metadata, 'description', ('content', 'contentDescription'))), 'like_count': int_or_none(metadata.get('likeCount')), 'dislike_count': int_or_none(metadata.get('dislikeCount')), 'channel': str_or_none(traverse_obj(metadata, ('createdBy', 'name'), ('creator', 'name'))), 'channel_id': str_or_none(traverse_obj(metadata, ('createdBy', 'id'), ('creator', 'id'))), 'channel_url': url_or_none(f'https://rokfin.com/{uploader}') if uploader else None, 'timestamp': timestamp, 'release_timestamp': timestamp if live_status != 'not_live' else None, 'tags': traverse_obj(metadata, ('tags', ..., 'title'), expected_type=str_or_none), 'live_status': live_status, 'availability': self._availability( needs_premium=bool(traverse_obj(metadata, 'premiumPlan', 'premium')), is_private=False, needs_subscription=False, needs_auth=False, is_unlisted=False), # 'comment_count': metadata.get('numComments'), # Data provided by website is wrong '__post_extractor': self.extract_comments(video_id) if video_type == 'post' else None, } def _get_comments(self, video_id): pages_total = None for page_n in itertools.count(): raw_comments = self._download_json( f'{_API_BASE_URL}comment?postId={video_id[5:]}&page={page_n}&size=50', video_id, note=f'Downloading viewer comments page {page_n + 1}{format_field(pages_total, None, " of %s")}', fatal=False) or {} for comment in raw_comments.get('content') or []: yield { 'text': str_or_none(comment.get('comment')), 'author': str_or_none(comment.get('name')), 'id': comment.get('commentId'), 'author_id': comment.get('userId'), 'parent': 'root', 'like_count': int_or_none(comment.get('numLikes')), 'dislike_count': int_or_none(comment.get('numDislikes')), 'timestamp': unified_timestamp(comment.get('postedAt')), } pages_total = int_or_none(raw_comments.get('totalPages')) or None is_last = raw_comments.get('last') if not raw_comments.get('content') or is_last or (page_n > pages_total if pages_total else is_last is not False): return def _perform_login(self, username, password): # https://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth (Sec. 3.1) login_page = self._download_webpage( f'{self._AUTH_BASE}/auth?client_id=web&redirect_uri=https%3A%2F%2Frokfin.com%2Ffeed&response_mode=fragment&response_type=code&scope=openid', None, note='loading login page', errnote='error loading login page') authentication_point_url = unescapeHTML(self._search_regex( r'<form\s+[^>]+action\s*=\s*"(https://secure\.rokfin\.com/auth/realms/rokfin-web/login-actions/authenticate\?[^"]+)"', login_page, name='Authentication URL')) resp_body = self._download_webpage( authentication_point_url, None, note='logging in', fatal=False, expected_status=404, data=urlencode_postdata({'username': username, 'password': password, 'rememberMe': 'off', 'credentialId': ''})) if not self._authentication_active(): if re.search(r'(?i)(invalid\s+username\s+or\s+password)', resp_body or ''): raise ExtractorError('invalid username/password', expected=True) raise ExtractorError('Login failed') urlh = self._request_webpage( f'{self._AUTH_BASE}/auth', None, note='granting user authorization', errnote='user authorization rejected by Rokfin', query={ 'client_id': 'web', 'prompt': 'none', 'redirect_uri': 'https://rokfin.com/silent-check-sso.html', 'response_mode': 'fragment', 'response_type': 'code', 'scope': 'openid', }) self._access_mgmt_tokens = self._download_json( f'{self._AUTH_BASE}/token', None, note='getting access credentials', errnote='error getting access credentials', data=urlencode_postdata({ 'code': urllib.parse.parse_qs(urllib.parse.urldefrag(urlh.url).fragment).get('code')[0], 'client_id': 'web', 'grant_type': 'authorization_code', 'redirect_uri': 'https://rokfin.com/silent-check-sso.html', })) def _authentication_active(self): return not ( {'KEYCLOAK_IDENTITY', 'KEYCLOAK_IDENTITY_LEGACY', 'KEYCLOAK_SESSION', 'KEYCLOAK_SESSION_LEGACY'} - set(self._get_cookies(self._AUTH_BASE))) def _get_auth_token(self): return try_get(self._access_mgmt_tokens, lambda x: ' '.join([x['token_type'], x['access_token']])) def _download_json_using_access_token(self, url_or_request, video_id, headers={}, query={}): assert 'authorization' not in headers headers = headers.copy() auth_token = self._get_auth_token() refresh_token = self._access_mgmt_tokens.get('refresh_token') if auth_token: headers['authorization'] = auth_token json_string, urlh = self._download_webpage_handle( url_or_request, video_id, headers=headers, query=query, expected_status=401) if not auth_token or urlh.status != 401 or refresh_token is None: return self._parse_json(json_string, video_id) self._access_mgmt_tokens = self._download_json( f'{self._AUTH_BASE}/token', video_id, note='User authorization expired or canceled by Rokfin. Re-authorizing ...', errnote='Failed to re-authorize', data=urlencode_postdata({ 'grant_type': 'refresh_token', 'refresh_token': refresh_token, 'client_id': 'web', })) headers['authorization'] = self._get_auth_token() if headers['authorization'] is None: raise ExtractorError('User authorization lost', expected=True) return self._download_json(url_or_request, video_id, headers=headers, query=query) class RokfinPlaylistBaseIE(InfoExtractor): _TYPES = { 'video': 'post', 'audio': 'post', 'stream': 'stream', 'dead_stream': 'stream', 'stack': 'stack', } def _get_video_data(self, metadata): for content in metadata.get('content') or []: media_type = self._TYPES.get(content.get('mediaType')) video_id = content.get('id') if media_type == 'post' else content.get('mediaId') if not media_type or not video_id: continue yield self.url_result(f'https://rokfin.com/{media_type}/{video_id}', video_id=f'{media_type}/{video_id}', video_title=str_or_none(traverse_obj(content, ('content', 'contentTitle')))) class RokfinStackIE(RokfinPlaylistBaseIE): IE_NAME = 'rokfin:stack' IE_DESC = 'Rokfin Stacks' _VALID_URL = r'https?://(?:www\.)?rokfin\.com/stack/(?P<id>[^/]+)' _TESTS = [{ 'url': 'https://www.rokfin.com/stack/271/Tulsi-Gabbard-Portsmouth-Townhall-FULL--Feb-9-2020', 'playlist_count': 8, 'info_dict': { 'id': '271', }, }] def _real_extract(self, url): list_id = self._match_id(url) return self.playlist_result(self._get_video_data( self._download_json(f'{_API_BASE_URL}stack/{list_id}', list_id)), list_id) class RokfinChannelIE(RokfinPlaylistBaseIE): IE_NAME = 'rokfin:channel' IE_DESC = 'Rokfin Channels' _VALID_URL = r'https?://(?:www\.)?rokfin\.com/(?!((feed/?)|(discover/?)|(channels/?))$)(?P<id>[^/]+)/?$' _TESTS = [{ 'url': 'https://rokfin.com/TheConvoCouch', 'playlist_mincount': 100, 'info_dict': { 'id': '12071-new', 'title': 'TheConvoCouch - New', 'description': 'md5:bb622b1bca100209b91cd685f7847f06', }, }] _TABS = { 'new': 'posts', 'top': 'top', 'videos': 'video', 'podcasts': 'audio', 'streams': 'stream', 'stacks': 'stack', } def _real_initialize(self): self._validate_extractor_args() def _validate_extractor_args(self): requested_tabs = self._configuration_arg('tab', None) if requested_tabs is not None and (len(requested_tabs) > 1 or requested_tabs[0] not in self._TABS): raise ExtractorError(f'Invalid extractor-arg "tab". Must be one of {", ".join(self._TABS)}', expected=True) def _entries(self, channel_id, channel_name, tab): pages_total = None for page_n in itertools.count(0): if tab in ('posts', 'top'): data_url = f'{_API_BASE_URL}user/{channel_name}/{tab}?page={page_n}&size=50' else: data_url = f'{_API_BASE_URL}post/search/{tab}?page={page_n}&size=50&creator={channel_id}' metadata = self._download_json( data_url, channel_name, note=f'Downloading video metadata page {page_n + 1}{format_field(pages_total, None, " of %s")}') yield from self._get_video_data(metadata) pages_total = int_or_none(metadata.get('totalPages')) or None is_last = metadata.get('last') if is_last or (page_n > pages_total if pages_total else is_last is not False): return def _real_extract(self, url): channel_name = self._match_id(url) channel_info = self._download_json(f'{_API_BASE_URL}user/{channel_name}', channel_name) channel_id = channel_info['id'] tab = self._configuration_arg('tab', default=['new'])[0] return self.playlist_result( self._entries(channel_id, channel_name, self._TABS[tab]), f'{channel_id}-{tab}', f'{channel_name} - {tab.title()}', str_or_none(channel_info.get('description'))) class RokfinSearchIE(SearchInfoExtractor): IE_NAME = 'rokfin:search' IE_DESC = 'Rokfin Search' _SEARCH_KEY = 'rkfnsearch' _TYPES = { 'video': (('id', 'raw'), 'post'), 'audio': (('id', 'raw'), 'post'), 'stream': (('content_id', 'raw'), 'stream'), 'dead_stream': (('content_id', 'raw'), 'stream'), 'stack': (('content_id', 'raw'), 'stack'), } _TESTS = [{ 'url': 'rkfnsearch5:"zelenko"', 'playlist_count': 5, 'info_dict': { 'id': '"zelenko"', 'title': '"zelenko"', }, }] _db_url = None _db_access_key = None def _real_initialize(self): self._db_url, self._db_access_key = self.cache.load(self.ie_key(), 'auth', default=(None, None)) if not self._db_url: self._get_db_access_credentials() def _search_results(self, query): total_pages = None for page_number in itertools.count(1): search_results = self._run_search_query( query, data={'query': query, 'page': {'size': 100, 'current': page_number}}, note=f'Downloading page {page_number}{format_field(total_pages, None, " of ~%s")}') total_pages = traverse_obj(search_results, ('meta', 'page', 'total_pages'), expected_type=int_or_none) for result in search_results.get('results') or []: video_id_key, video_type = self._TYPES.get(traverse_obj(result, ('content_type', 'raw')), (None, None)) video_id = traverse_obj(result, video_id_key, expected_type=int_or_none) if video_id and video_type: yield self.url_result(url=f'https://rokfin.com/{video_type}/{video_id}') if not search_results.get('results'): return def _run_search_query(self, video_id, data, **kwargs): data = json.dumps(data).encode() for attempt in range(2): search_results = self._download_json( self._db_url, video_id, data=data, fatal=(attempt == 1), headers={'authorization': self._db_access_key}, **kwargs) if search_results: return search_results self.write_debug('Updating access credentials') self._get_db_access_credentials(video_id) def _get_db_access_credentials(self, video_id=None): auth_data = {'SEARCH_KEY': None, 'ENDPOINT_BASE': None} notfound_err_page = self._download_webpage( 'https://rokfin.com/discover', video_id, expected_status=404, note='Downloading home page') for js_file_path in re.findall(r'<script\b[^>]*\ssrc\s*=\s*"(/static/js/[^">]+)"', notfound_err_page): js_content = self._download_webpage( f'https://rokfin.com{js_file_path}', video_id, note='Downloading JavaScript file', fatal=False) auth_data.update(re.findall( rf'REACT_APP_({"|".join(auth_data.keys())})\s*:\s*"([^"]+)"', js_content or '')) if not all(auth_data.values()): continue self._db_url = url_or_none(f'{auth_data["ENDPOINT_BASE"]}/api/as/v1/engines/rokfin-search/search.json') self._db_access_key = f'Bearer {auth_data["SEARCH_KEY"]}' self.cache.store(self.ie_key(), 'auth', (self._db_url, self._db_access_key)) return raise ExtractorError('Unable to extract access credentials')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/xboxclips.py
yt_dlp/extractor/xboxclips.py
import re from .common import InfoExtractor from ..utils import ( int_or_none, month_by_abbreviation, parse_filesize, parse_qs, ) class XboxClipsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:xboxclips\.com|gameclips\.io)/(?:video\.php\?.*vid=|[^/]+/)(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})' _TESTS = [{ 'url': 'http://xboxclips.com/video.php?uid=2533274823424419&gamertag=Iabdulelah&vid=074a69a9-5faf-46aa-b93b-9909c1720325', 'md5': 'fbe1ec805e920aeb8eced3c3e657df5d', 'info_dict': { 'id': '074a69a9-5faf-46aa-b93b-9909c1720325', 'ext': 'mp4', 'title': 'iAbdulElah playing Titanfall', 'filesize_approx': 26800000, 'upload_date': '20140807', 'duration': 56, }, }, { 'url': 'https://gameclips.io/iAbdulElah/074a69a9-5faf-46aa-b93b-9909c1720325', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) if '/video.php' in url: qs = parse_qs(url) url = 'https://gameclips.io/{}/{}'.format(qs['gamertag'][0], qs['vid'][0]) webpage = self._download_webpage(url, video_id) info = self._parse_html5_media_entries(url, webpage, video_id)[0] title = self._html_search_meta(['og:title', 'twitter:title'], webpage) upload_date = None mobj = re.search( r'>Recorded: (\d{2})-(Jan|Feb|Mar|Apr|May|Ju[nl]|Aug|Sep|Oct|Nov|Dec)-(\d{4})', webpage) if mobj: upload_date = '%s%.2d%s' % (mobj.group(3), month_by_abbreviation(mobj.group(2)), mobj.group(1)) filesize = parse_filesize(self._html_search_regex( r'>Size: ([^<]+)<', webpage, 'file size', fatal=False)) duration = int_or_none(self._html_search_regex( r'>Duration: (\d+) Seconds<', webpage, 'duration', fatal=False)) view_count = int_or_none(self._html_search_regex( r'>Views: (\d+)<', webpage, 'view count', fatal=False)) info.update({ 'id': video_id, 'title': title, 'upload_date': upload_date, 'filesize_approx': filesize, 'duration': duration, 'view_count': view_count, }) return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/internetvideoarchive.py
yt_dlp/extractor/internetvideoarchive.py
import json import re from .common import InfoExtractor from ..utils import parse_qs class InternetVideoArchiveIE(InfoExtractor): _VALID_URL = r'https?://video\.internetvideoarchive\.net/(?:player|flash/players)/.*?\?.*?publishedid.*?' _TEST = { 'url': 'http://video.internetvideoarchive.net/player/6/configuration.ashx?customerid=69249&publishedid=194487&reporttag=vdbetatitle&playerid=641&autolist=0&domain=www.videodetective.com&maxrate=high&minrate=low&socialplayer=false', 'info_dict': { 'id': '194487', 'ext': 'mp4', 'title': 'Kick-Ass 2', 'description': 'md5:c189d5b7280400630a1d3dd17eaa8d8a', }, 'params': { # m3u8 download 'skip_download': True, }, } @staticmethod def _build_json_url(query): return 'http://video.internetvideoarchive.net/player/6/configuration.ashx?' + query def _real_extract(self, url): query = parse_qs(url) video_id = query['publishedid'][0] data = self._download_json( 'https://video.internetvideoarchive.net/videojs7/videojs7.ivasettings.ashx', video_id, data=json.dumps({ 'customerid': query['customerid'][0], 'publishedid': video_id, }).encode()) title = data['Title'] formats = self._extract_m3u8_formats( data['VideoUrl'], video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) file_url = formats[0]['url'] if '.ism/' in file_url: replace_url = lambda x: re.sub(r'\.ism/[^?]+', '.ism/' + x, file_url) formats.extend(self._extract_f4m_formats( replace_url('.f4m'), video_id, f4m_id='hds', fatal=False)) formats.extend(self._extract_mpd_formats( replace_url('.mpd'), video_id, mpd_id='dash', fatal=False)) formats.extend(self._extract_ism_formats( replace_url('Manifest'), video_id, ism_id='mss', fatal=False)) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': data.get('PosterUrl'), 'description': data.get('Description'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/snapchat.py
yt_dlp/extractor/snapchat.py
from .common import InfoExtractor from ..utils import float_or_none, int_or_none, url_or_none from ..utils.traversal import traverse_obj class SnapchatSpotlightIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?snapchat\.com/spotlight/(?P<id>\w+)' _TESTS = [{ 'url': 'https://www.snapchat.com/spotlight/W7_EDlXWTBiXAEEniNoMPwAAYYWtidGhudGZpAX1TKn0JAX1TKnXJAAAAAA', 'md5': '46c580f63592d0cbb76e974d2f9f0fcc', 'info_dict': { 'id': 'W7_EDlXWTBiXAEEniNoMPwAAYYWtidGhudGZpAX1TKn0JAX1TKnXJAAAAAA', 'ext': 'mp4', 'title': 'Views 💕', 'description': '', 'thumbnail': r're:https://cf-st\.sc-cdn\.net/d/kKJHIR1QAznRKK9jgYYDq\.256\.IRZXSOY', 'duration': 4.665, 'timestamp': 1637777831.369, 'upload_date': '20211124', 'repost_count': int, 'uploader': 'shreypatel57', 'uploader_url': 'https://www.snapchat.com/add/shreypatel57', }, }, { 'url': 'https://www.snapchat.com/spotlight/W7_EDlXWTBiXAEEniNoMPwAAYcnVjYWdwcGV1AZEaIYn5AZEaIYnrAAAAAQ', 'md5': '4cd9626458c1a0e3e6dbe72c544a9ec2', 'info_dict': { 'id': 'W7_EDlXWTBiXAEEniNoMPwAAYcnVjYWdwcGV1AZEaIYn5AZEaIYnrAAAAAQ', 'ext': 'mp4', 'title': 'Spotlight Snap', 'description': 'How he flirt her teacher🤭🤭🤩😍 #kdrama#cdrama #dramaclips #dramaspotlight', 'thumbnail': r're:https://cf-st\.sc-cdn\.net/i/ztfr6xFs0FOcFhwVczWfj\.256\.IRZXSOY', 'duration': 10.91, 'timestamp': 1722720291.307, 'upload_date': '20240803', 'view_count': int, 'repost_count': int, 'uploader': 'ganda0535', 'uploader_url': 'https://www.snapchat.com/add/ganda0535', 'tags': ['#dramaspotlight', '#dramaclips', '#cdrama', '#kdrama'], }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) page_props = self._search_nextjs_data(webpage, video_id)['props']['pageProps'] video_data = traverse_obj(page_props, ( 'spotlightFeed', 'spotlightStories', lambda _, v: v['story']['storyId']['value'] == video_id, 'metadata', any), None) return { 'id': video_id, 'ext': 'mp4', **traverse_obj(video_data, ('videoMetadata', { 'title': ('name', {str}), 'description': ('description', {str}), 'timestamp': ('uploadDateMs', {float_or_none(scale=1000)}), 'view_count': ('viewCount', {int_or_none}, {lambda x: None if x == -1 else x}), 'repost_count': ('shareCount', {int_or_none}), 'url': ('contentUrl', {url_or_none}), 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), 'duration': ('durationMs', {float_or_none(scale=1000)}), 'thumbnail': ('thumbnailUrl', {url_or_none}), 'uploader': ('creator', 'personCreator', 'username', {str}), 'uploader_url': ('creator', 'personCreator', 'url', {url_or_none}), })), **traverse_obj(video_data, { 'description': ('description', {str}), 'tags': ('hashtags', ..., {str}), 'view_count': ('engagementStats', 'viewCount', {int_or_none}, {lambda x: None if x == -1 else x}), 'repost_count': ('engagementStats', 'shareCount', {int_or_none}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/floatplane.py
yt_dlp/extractor/floatplane.py
import functools from .common import InfoExtractor from ..utils import ( ExtractorError, OnDemandPagedList, clean_html, determine_ext, float_or_none, format_field, int_or_none, join_nonempty, parse_iso8601, url_or_none, urljoin, ) from ..utils.traversal import require, traverse_obj class FloatplaneBaseIE(InfoExtractor): def _real_extract(self, url): post_id = self._match_id(url) post_data = self._download_json( f'{self._BASE_URL}/api/v3/content/post', post_id, query={'id': post_id}, note='Downloading post data', errnote='Unable to download post data', impersonate=self._IMPERSONATE_TARGET) if not any(traverse_obj(post_data, ('metadata', ('hasVideo', 'hasAudio')))): raise ExtractorError('Post does not contain a video or audio track', expected=True) uploader_url = format_field( post_data, [('creator', 'urlname')], f'{self._BASE_URL}/channel/%s/home') or None common_info = { 'uploader_url': uploader_url, 'channel_url': urljoin(f'{uploader_url}/', traverse_obj(post_data, ('channel', 'urlname'))), 'availability': self._availability(needs_subscription=True), **traverse_obj(post_data, { 'uploader': ('creator', 'title', {str}), 'uploader_id': ('creator', 'id', {str}), 'channel': ('channel', 'title', {str}), 'channel_id': ('channel', 'id', {str}), 'release_timestamp': ('releaseDate', {parse_iso8601}), }), } items = [] for media in traverse_obj(post_data, (('videoAttachments', 'audioAttachments'), ...)): media_id = media['id'] media_typ = media.get('type') or 'video' stream = self._download_json( f'{self._BASE_URL}/api/v3/delivery/info', media_id, query={'scenario': 'onDemand', 'entityId': media_id}, note=f'Downloading {media_typ} stream data', impersonate=self._IMPERSONATE_TARGET) metadata = self._download_json( f'{self._BASE_URL}/api/v3/content/{media_typ}', media_id, f'Downloading {media_typ} metadata', query={'id': media_id}, fatal=False, impersonate=self._IMPERSONATE_TARGET) cdn_base_url = traverse_obj(stream, ( 'groups', 0, 'origins', ..., 'url', {url_or_none}, any, {require('cdn base url')})) formats = [] for variant in traverse_obj(stream, ('groups', 0, 'variants', lambda _, v: v['url'])): format_url = urljoin(cdn_base_url, variant['url']) format_id = traverse_obj(variant, ('name', {str})) hls_aes = {} m3u8_data = None # If we need impersonation for the API, then we need it for HLS keys too: extract in advance if self._IMPERSONATE_TARGET is not None: m3u8_data = self._download_webpage( format_url, media_id, fatal=False, impersonate=self._IMPERSONATE_TARGET, headers=self._HEADERS, note=join_nonempty('Downloading', format_id, 'm3u8 information', delim=' '), errnote=join_nonempty('Failed to download', format_id, 'm3u8 information', delim=' ')) if not m3u8_data: continue key_url = self._search_regex( r'#EXT-X-KEY:METHOD=AES-128,URI="(https?://[^"]+)"', m3u8_data, 'HLS AES key URI', default=None) if key_url: urlh = self._request_webpage( key_url, media_id, fatal=False, impersonate=self._IMPERSONATE_TARGET, headers=self._HEADERS, note=join_nonempty('Downloading', format_id, 'HLS AES key', delim=' '), errnote=join_nonempty('Failed to download', format_id, 'HLS AES key', delim=' ')) if urlh: hls_aes['key'] = urlh.read().hex() formats.append({ **traverse_obj(variant, { 'format_note': ('label', {str}), 'width': ('meta', 'video', 'width', {int_or_none}), 'height': ('meta', 'video', 'height', {int_or_none}), 'vcodec': ('meta', 'video', 'codec', {str}), 'acodec': ('meta', 'audio', 'codec', {str}), 'vbr': ('meta', 'video', 'bitrate', 'average', {int_or_none(scale=1000)}), 'abr': ('meta', 'audio', 'bitrate', 'average', {int_or_none(scale=1000)}), 'audio_channels': ('meta', 'audio', 'channelCount', {int_or_none}), 'fps': ('meta', 'video', 'fps', {float_or_none}), }), 'url': format_url, 'ext': determine_ext(format_url.partition('/chunk.m3u8')[0], 'mp4'), 'format_id': format_id, 'hls_media_playlist_data': m3u8_data, 'hls_aes': hls_aes or None, }) subtitles = {} automatic_captions = {} for sub_data in traverse_obj(metadata, ('textTracks', lambda _, v: url_or_none(v['src']))): sub_lang = sub_data.get('language') or 'en' sub_entry = {'url': sub_data['src']} if sub_data.get('generated'): automatic_captions.setdefault(sub_lang, []).append(sub_entry) else: subtitles.setdefault(sub_lang, []).append(sub_entry) items.append({ **common_info, 'id': media_id, **traverse_obj(metadata, { 'title': ('title', {str}), 'duration': ('duration', {int_or_none}), 'thumbnail': ('thumbnail', 'path', {url_or_none}), }), 'formats': formats, 'subtitles': subtitles, 'automatic_captions': automatic_captions, }) post_info = { **common_info, 'id': post_id, 'display_id': post_id, **traverse_obj(post_data, { 'title': ('title', {str}), 'description': ('text', {clean_html}), 'like_count': ('likes', {int_or_none}), 'dislike_count': ('dislikes', {int_or_none}), 'comment_count': ('comments', {int_or_none}), 'thumbnail': ('thumbnail', 'path', {url_or_none}), }), 'http_headers': self._HEADERS, } if len(items) > 1: return self.playlist_result(items, **post_info) post_info.update(items[0]) return post_info class FloatplaneIE(FloatplaneBaseIE): _VALID_URL = r'https?://(?:(?:www|beta)\.)?floatplane\.com/post/(?P<id>\w+)' _BASE_URL = 'https://www.floatplane.com' _IMPERSONATE_TARGET = None _HEADERS = { 'Origin': _BASE_URL, 'Referer': f'{_BASE_URL}/', } _TESTS = [{ 'url': 'https://www.floatplane.com/post/2Yf3UedF7C', 'info_dict': { 'id': 'yuleLogLTT', 'ext': 'mp4', 'display_id': '2Yf3UedF7C', 'title': '8K Yule Log Fireplace with Crackling Fire Sounds - 10 Hours', 'description': 'md5:adf2970e0de1c5e3df447818bb0309f6', 'thumbnail': r're:^https?://.*\.jpe?g$', 'duration': 36035, 'comment_count': int, 'like_count': int, 'dislike_count': int, 'release_date': '20191206', 'release_timestamp': 1575657000, 'uploader': 'LinusTechTips', 'uploader_id': '59f94c0bdd241b70349eb72b', 'uploader_url': 'https://www.floatplane.com/channel/linustechtips/home', 'channel': 'Linus Tech Tips', 'channel_id': '63fe42c309e691e4e36de93d', 'channel_url': 'https://www.floatplane.com/channel/linustechtips/home/main', 'availability': 'subscriber_only', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.floatplane.com/post/j2jqG3JmgJ', 'info_dict': { 'id': 'j2jqG3JmgJ', 'title': 'TJM: Does Anyone Care About Avatar: The Way of Water?', 'description': 'md5:00bf17dc5733e4031e99b7fd6489f274', 'thumbnail': r're:^https?://.*\.jpe?g$', 'comment_count': int, 'like_count': int, 'dislike_count': int, 'release_timestamp': 1671915900, 'release_date': '20221224', 'uploader': 'LinusTechTips', 'uploader_id': '59f94c0bdd241b70349eb72b', 'uploader_url': 'https://www.floatplane.com/channel/linustechtips/home', 'channel': "They're Just Movies", 'channel_id': '64135f82fc76ab7f9fbdc876', 'channel_url': 'https://www.floatplane.com/channel/linustechtips/home/tajm', 'availability': 'subscriber_only', }, 'playlist_count': 2, }, { 'url': 'https://www.floatplane.com/post/3tK2tInhoN', 'info_dict': { 'id': '3tK2tInhoN', 'title': 'Extras - How Linus Communicates with Editors (Compensator 4)', 'description': 'md5:83cd40aae1ce124df33769600c80ca5b', 'thumbnail': r're:^https?://.*\.jpe?g$', 'comment_count': int, 'like_count': int, 'dislike_count': int, 'release_timestamp': 1700529120, 'release_date': '20231121', 'uploader': 'LinusTechTips', 'uploader_id': '59f94c0bdd241b70349eb72b', 'uploader_url': 'https://www.floatplane.com/channel/linustechtips/home', 'channel': 'FP Exclusives', 'channel_id': '6413623f5b12cca228a28e78', 'channel_url': 'https://www.floatplane.com/channel/linustechtips/home/fpexclusive', 'availability': 'subscriber_only', }, 'playlist_count': 2, }, { 'url': 'https://beta.floatplane.com/post/d870PEFXS1', 'info_dict': { 'id': 'bg9SuYKEww', 'ext': 'mp4', 'display_id': 'd870PEFXS1', 'title': 'LCS Drama, TLOU 2 Remaster, Destiny 2 Player Count Drops, + More!', 'description': 'md5:80d612dcabf41b17487afcbe303ec57d', 'thumbnail': r're:^https?://.*\.jpe?g$', 'release_timestamp': 1700622000, 'release_date': '20231122', 'duration': 513, 'like_count': int, 'dislike_count': int, 'comment_count': int, 'uploader': 'LinusTechTips', 'uploader_id': '59f94c0bdd241b70349eb72b', 'uploader_url': 'https://www.floatplane.com/channel/linustechtips/home', 'channel': 'GameLinked', 'channel_id': '649dbade3540dbc3945eeda7', 'channel_url': 'https://www.floatplane.com/channel/linustechtips/home/gamelinked', 'availability': 'subscriber_only', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.floatplane.com/post/65B5PNoBtf', 'info_dict': { 'id': '65B5PNoBtf', 'description': 'I recorded the inbuilt demo mode for your 90\'s enjoyment, thanks for being Floaties!', 'display_id': '65B5PNoBtf', 'like_count': int, 'release_timestamp': 1701249480, 'uploader': 'The Trash Network', 'availability': 'subscriber_only', 'uploader_id': '61bc20c9a131fb692bf2a513', 'uploader_url': 'https://www.floatplane.com/channel/TheTrashNetwork/home', 'channel_url': 'https://www.floatplane.com/channel/TheTrashNetwork/home/thedrumthing', 'comment_count': int, 'title': 'The $50 electronic drum kit.', 'channel_id': '64424fe73cd58cbcf8d8e131', 'thumbnail': 'https://pbs.floatplane.com/blogPost_thumbnails/65B5PNoBtf/725555379422705_1701247052743.jpeg', 'dislike_count': int, 'channel': 'The Drum Thing', 'release_date': '20231129', }, 'playlist_count': 2, 'playlist': [{ 'info_dict': { 'id': 'ISPJjexylS', 'ext': 'mp4', 'release_date': '20231129', 'release_timestamp': 1701249480, 'title': 'The $50 electronic drum kit. .mov', 'channel_id': '64424fe73cd58cbcf8d8e131', 'thumbnail': 'https://pbs.floatplane.com/video_thumbnails/ISPJjexylS/335202812134041_1701249383392.jpeg', 'availability': 'subscriber_only', 'uploader': 'The Trash Network', 'duration': 622, 'channel': 'The Drum Thing', 'uploader_id': '61bc20c9a131fb692bf2a513', 'channel_url': 'https://www.floatplane.com/channel/TheTrashNetwork/home/thedrumthing', 'uploader_url': 'https://www.floatplane.com/channel/TheTrashNetwork/home', }, }, { 'info_dict': { 'id': 'qKfxu6fEpu', 'ext': 'aac', 'release_date': '20231129', 'release_timestamp': 1701249480, 'title': 'Roland TD-7 Demo.m4a', 'channel_id': '64424fe73cd58cbcf8d8e131', 'availability': 'subscriber_only', 'uploader': 'The Trash Network', 'duration': 114, 'channel': 'The Drum Thing', 'uploader_id': '61bc20c9a131fb692bf2a513', 'channel_url': 'https://www.floatplane.com/channel/TheTrashNetwork/home/thedrumthing', 'uploader_url': 'https://www.floatplane.com/channel/TheTrashNetwork/home', }, }], 'skip': 'requires subscription: "The Trash Network"', 'params': {'skip_download': 'm3u8'}, }] def _real_initialize(self): if not self._get_cookies(self._BASE_URL).get('sails.sid'): self.raise_login_required() class FloatplaneChannelIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|beta)\.)?floatplane\.com/channel/(?P<id>[\w-]+)/home(?:/(?P<channel>[\w-]+))?' _PAGE_SIZE = 20 _TESTS = [{ 'url': 'https://www.floatplane.com/channel/linustechtips/home/ltxexpo', 'info_dict': { 'id': 'linustechtips/ltxexpo', 'title': 'LTX Expo', 'description': 'md5:9819002f9ebe7fd7c75a3a1d38a59149', }, 'playlist_mincount': 51, }, { 'url': 'https://www.floatplane.com/channel/ShankMods/home', 'info_dict': { 'id': 'ShankMods', 'title': 'Shank Mods', 'description': 'md5:6dff1bb07cad8e5448e04daad9be1b30', }, 'playlist_mincount': 14, }, { 'url': 'https://beta.floatplane.com/channel/bitwit_ultra/home', 'info_dict': { 'id': 'bitwit_ultra', 'title': 'Bitwit Ultra', 'description': 'md5:1452f280bb45962976d4789200f676dd', }, 'playlist_mincount': 200, }] def _fetch_page(self, display_id, creator_id, channel_id, page): query = { 'id': creator_id, 'limit': self._PAGE_SIZE, 'fetchAfter': page * self._PAGE_SIZE, } if channel_id: query['channel'] = channel_id page_data = self._download_json( 'https://www.floatplane.com/api/v3/content/creator', display_id, query=query, note=f'Downloading page {page + 1}') for post in page_data or []: yield self.url_result( f'https://www.floatplane.com/post/{post["id"]}', FloatplaneIE, id=post['id'], title=post.get('title'), release_timestamp=parse_iso8601(post.get('releaseDate'))) def _real_extract(self, url): creator, channel = self._match_valid_url(url).group('id', 'channel') display_id = join_nonempty(creator, channel, delim='/') creator_data = self._download_json( 'https://www.floatplane.com/api/v3/creator/named', display_id, query={'creatorURL[0]': creator})[0] channel_data = traverse_obj( creator_data, ('channels', lambda _, v: v['urlname'] == channel), get_all=False) or {} return self.playlist_result(OnDemandPagedList(functools.partial( self._fetch_page, display_id, creator_data['id'], channel_data.get('id')), self._PAGE_SIZE), display_id, title=channel_data.get('title') or creator_data.get('title'), description=channel_data.get('about') or creator_data.get('about'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/canal1.py
yt_dlp/extractor/canal1.py
from .common import InfoExtractor class Canal1IE(InfoExtractor): _VALID_URL = r'https?://(?:www\.|noticias\.)?canal1\.com\.co/(?:[^?#&])+/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://canal1.com.co/noticias/napa-i-una-cadena-de-produccion-de-arroz-que-se-quedo-en-veremos-y-abandonada-en-el-departamento-del-choco/', 'info_dict': { 'id': '63b39f6b354977084b85ab54', 'display_id': 'napa-i-una-cadena-de-produccion-de-arroz-que-se-quedo-en-veremos-y-abandonada-en-el-departamento-del-choco', 'title': 'Ñapa I Una cadena de producción de arroz que se quedó en veremos y abandonada en el departamento del Chocó', 'description': 'md5:bc49c6d64d20610ea1e7daf079a0d013', 'thumbnail': r're:^https?://[^?#]+63b39f6b354977084b85ab54', 'ext': 'mp4', }, }, { 'url': 'https://noticias.canal1.com.co/noticias/tres-i-el-triste-record-que-impuso-elon-musk-el-dueno-de-tesla-y-de-twitter/', 'info_dict': { 'id': '63b39e93f5fd223aa32250fb', 'display_id': 'tres-i-el-triste-record-que-impuso-elon-musk-el-dueno-de-tesla-y-de-twitter', 'title': 'Tres I El triste récord que impuso Elon Musk, el dueño de Tesla y de Twitter', 'description': 'md5:d9f691f131a21ce6767ca6c05d17d791', 'thumbnail': r're:^https?://[^?#]+63b39e93f5fd223aa32250fb', 'ext': 'mp4', }, }, { # Geo-restricted to Colombia 'url': 'https://canal1.com.co/programas/guerreros-canal-1/video-inedito-guerreros-despedida-kewin-zarate/', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) return self.url_result( self._search_regex(r'"embedUrl"\s*:\s*"([^"]+)', webpage, 'embed url'), display_id=display_id, url_transparent=True)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cbssports.py
yt_dlp/extractor/cbssports.py
# from .cbs import CBSBaseIE from .common import InfoExtractor from ..utils import ( int_or_none, try_get, ) # class CBSSportsEmbedIE(CBSBaseIE): class CBSSportsEmbedIE(InfoExtractor): _WORKING = False IE_NAME = 'cbssports:embed' _VALID_URL = r'''(?ix)https?://(?:(?:www\.)?cbs|embed\.247)sports\.com/player/embed.+? (?: ids%3D(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})| pcid%3D(?P<pcid>\d+) )''' _TESTS = [{ 'url': 'https://www.cbssports.com/player/embed/?args=player_id%3Db56c03a6-231a-4bbe-9c55-af3c8a8e9636%26ids%3Db56c03a6-231a-4bbe-9c55-af3c8a8e9636%26resizable%3D1%26autoplay%3Dtrue%26domain%3Dcbssports.com%26comp_ads_enabled%3Dfalse%26watchAndRead%3D0%26startTime%3D0%26env%3Dprod', 'only_matching': True, }, { 'url': 'https://embed.247sports.com/player/embed/?args=%3fplayer_id%3d1827823171591%26channel%3dcollege-football-recruiting%26pcid%3d1827823171591%26width%3d640%26height%3d360%26autoplay%3dTrue%26comp_ads_enabled%3dFalse%26uvpc%3dhttps%253a%252f%252fwww.cbssports.com%252fapi%252fcontent%252fvideo%252fconfig%252f%253fcfg%253duvp_247sports_v4%2526partner%253d247%26uvpc_m%3dhttps%253a%252f%252fwww.cbssports.com%252fapi%252fcontent%252fvideo%252fconfig%252f%253fcfg%253duvp_247sports_m_v4%2526partner_m%253d247_mobile%26utag%3d247sportssite%26resizable%3dTrue', 'only_matching': True, }] # def _extract_video_info(self, filter_query, video_id): # return self._extract_feed_info('dJ5BDC', 'VxxJg8Ymh8sE', filter_query, video_id) def _real_extract(self, url): uuid, pcid = self._match_valid_url(url).groups() query = {'id': uuid} if uuid else {'pcid': pcid} video = self._download_json( 'https://www.cbssports.com/api/content/video/', uuid or pcid, query=query)[0] video_id = video['id'] title = video['title'] metadata = video.get('metaData') or {} # return self._extract_video_info('byId=%d' % metadata['mpxOutletId'], video_id) # return self._extract_video_info('byGuid=' + metadata['mpxRefId'], video_id) formats = self._extract_m3u8_formats( metadata['files'][0]['url'], video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) image = video.get('image') thumbnails = None if image: image_path = image.get('path') if image_path: thumbnails = [{ 'url': image_path, 'width': int_or_none(image.get('width')), 'height': int_or_none(image.get('height')), 'filesize': int_or_none(image.get('size')), }] return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnails': thumbnails, 'description': video.get('description'), 'timestamp': int_or_none(try_get(video, lambda x: x['dateCreated']['epoch'])), 'duration': int_or_none(metadata.get('duration')), } class CBSSportsBaseIE(InfoExtractor): def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) iframe_url = self._search_regex( r'<iframe[^>]+(?:data-)?src="(https?://[^/]+/player/embed[^"]+)"', webpage, 'embed url') return self.url_result(iframe_url, CBSSportsEmbedIE.ie_key()) class CBSSportsIE(CBSSportsBaseIE): _WORKING = False IE_NAME = 'cbssports' _VALID_URL = r'https?://(?:www\.)?cbssports\.com/[^/]+/video/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.cbssports.com/college-football/video/cover-3-stanford-spring-gleaning/', 'info_dict': { 'id': 'b56c03a6-231a-4bbe-9c55-af3c8a8e9636', 'ext': 'mp4', 'title': 'Cover 3: Stanford Spring Gleaning', 'description': 'The Cover 3 crew break down everything you need to know about the Stanford Cardinal this spring.', 'timestamp': 1617218398, 'upload_date': '20210331', 'duration': 502, }, }] class TwentyFourSevenSportsIE(CBSSportsBaseIE): _WORKING = False IE_NAME = '247sports' _VALID_URL = r'https?://(?:www\.)?247sports\.com/Video/(?:[^/?#&]+-)?(?P<id>\d+)' _TESTS = [{ 'url': 'https://247sports.com/Video/2021-QB-Jake-Garcia-senior-highlights-through-five-games-10084854/', 'info_dict': { 'id': '4f1265cb-c3b5-44a8-bb1d-1914119a0ccc', 'ext': 'mp4', 'title': '2021 QB Jake Garcia senior highlights through five games', 'description': 'md5:8cb67ebed48e2e6adac1701e0ff6e45b', 'timestamp': 1607114223, 'upload_date': '20201204', 'duration': 208, }, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/niconico.py
yt_dlp/extractor/niconico.py
import datetime as dt import functools import itertools import json import re from .common import InfoExtractor, SearchInfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, OnDemandPagedList, clean_html, determine_ext, extract_attributes, float_or_none, int_or_none, parse_bitrate, parse_iso8601, parse_qs, parse_resolution, qualities, str_or_none, time_seconds, truncate_string, unified_timestamp, update_url_query, url_basename, url_or_none, urlencode_postdata, urljoin, ) from ..utils.traversal import ( find_element, require, traverse_obj, ) class NiconicoBaseIE(InfoExtractor): _API_BASE = 'https://nvapi.nicovideo.jp' _BASE_URL = 'https://www.nicovideo.jp' _GEO_BYPASS = False _GEO_COUNTRIES = ['JP'] _HEADERS = { 'X-Frontend-ID': '6', 'X-Frontend-Version': '0', } _LOGIN_BASE = 'https://account.nicovideo.jp' _NETRC_MACHINE = 'niconico' @property def is_logged_in(self): return bool(self._get_cookies('https://www.nicovideo.jp').get('user_session')) def _raise_login_error(self, message, expected=True): raise ExtractorError(f'Unable to login: {message}', expected=expected) def _perform_login(self, username, password): if self.is_logged_in: return self._request_webpage( f'{self._LOGIN_BASE}/login', None, 'Requesting session cookies') webpage = self._download_webpage( f'{self._LOGIN_BASE}/login/redirector', None, 'Logging in', 'Unable to log in', headers={ 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': f'{self._LOGIN_BASE}/login', }, data=urlencode_postdata({ 'mail_tel': username, 'password': password, })) if self.is_logged_in: return elif err_msg := traverse_obj(webpage, ( {find_element(cls='notice error')}, {find_element(cls='notice__text')}, {clean_html}, )): self._raise_login_error(err_msg or 'Invalid username or password') elif 'oneTimePw' in webpage: post_url = self._search_regex( r'<form[^>]+action=(["\'])(?P<url>.+?)\1', webpage, 'post url', group='url') mfa, urlh = self._download_webpage_handle( urljoin(self._LOGIN_BASE, post_url), None, 'Performing MFA', 'Unable to complete MFA', headers={ 'Content-Type': 'application/x-www-form-urlencoded', }, data=urlencode_postdata({ 'otp': self._get_tfa_info('6 digit number shown on app'), })) if self.is_logged_in: return elif 'error-code' in parse_qs(urlh.url): err_msg = traverse_obj(mfa, ({find_element(cls='pageMainMsg')}, {clean_html})) self._raise_login_error(err_msg or 'MFA session expired') elif 'formError' in mfa: err_msg = traverse_obj(mfa, ( {find_element(cls='formError')}, {find_element(tag='div')}, {clean_html})) self._raise_login_error(err_msg or 'MFA challenge failed') self._raise_login_error('Unexpected login error', expected=False) class NiconicoIE(NiconicoBaseIE): IE_NAME = 'niconico' IE_DESC = 'ニコニコ動画' _VALID_URL = r'https?://(?:(?:embed|sp|www)\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?\d+)' _ERROR_MAP = { 'FORBIDDEN': { 'ADMINISTRATOR_DELETE_VIDEO': 'Video unavailable, possibly removed by admins', 'CHANNEL_MEMBER_ONLY': 'Channel members only', 'DELETED_CHANNEL_VIDEO': 'Video unavailable, channel was closed', 'DELETED_COMMUNITY_VIDEO': 'Video unavailable, community deleted or missing', 'DEFAULT': 'Page unavailable, check the URL', 'HARMFUL_VIDEO': 'Sensitive content, login required', 'HIDDEN_VIDEO': 'Video unavailable, set to private', 'NOT_ALLOWED': 'No permission', 'PPV_VIDEO': 'PPV video, payment information required', 'PREMIUM_ONLY': 'Premium members only', }, 'INVALID_PARAMETER': { 'DEFAULT': 'Video unavailable, may not exist or was deleted', }, 'MAINTENANCE': { 'DEFAULT': 'Maintenance is in progress', }, 'NOT_FOUND': { 'DEFAULT': 'Video unavailable, may not exist or was deleted', 'RIGHT_HOLDER_DELETE_VIDEO': 'Removed by rights-holder request', }, 'UNAUTHORIZED': { 'DEFAULT': 'Invalid session, re-login required', }, 'UNKNOWN': { 'DEFAULT': 'Failed to fetch content', }, } _STATUS_MAP = { 'needs_auth': 'PPV video, payment information required', 'premium_only': 'Premium members only', 'subscriber_only': 'Channel members only', } _TESTS = [{ 'url': 'https://www.nicovideo.jp/watch/1173108780', 'info_dict': { 'id': 'sm9', 'ext': 'mp4', 'title': '新・豪血寺一族 -煩悩解放 - レッツゴー!陰陽師', 'availability': 'public', 'channel': '中の', 'channel_id': '4', 'comment_count': int, 'description': 'md5:b7f6d3e6c29552cc19fdea6a4b7dc194', 'display_id': '1173108780', 'duration': 320, 'genres': ['未設定'], 'like_count': int, 'tags': 'mincount:5', 'thumbnail': r're:https?://img\.cdn\.nimg\.jp/s/nicovideo/thumbnails/.+', 'timestamp': 1173108780, 'upload_date': '20070305', 'uploader': '中の', 'uploader_id': '4', 'view_count': int, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.nicovideo.jp/watch/sm8628149', 'info_dict': { 'id': 'sm8628149', 'ext': 'mp4', 'title': '【東方】Bad Apple!!\u3000PV【影絵】', 'availability': 'public', 'channel': 'あにら', 'channel_id': '10731211', 'comment_count': int, 'description': 'md5:1999669158cb77a45bab123c4fafe1d7', 'display_id': 'sm8628149', 'duration': 219, 'genres': ['ゲーム'], 'like_count': int, 'tags': 'mincount:3', 'thumbnail': r're:https?://img\.cdn\.nimg\.jp/s/nicovideo/thumbnails/.+', 'timestamp': 1256580802, 'upload_date': '20091026', 'uploader': 'あにら', 'uploader_id': '10731211', 'view_count': int, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.nicovideo.jp/watch/nm14296458', 'info_dict': { 'id': 'nm14296458', 'ext': 'mp4', 'title': '【鏡音リン】Dance on media【オリジナル】take2!', 'availability': 'public', 'channel': 'りょうた', 'channel_id': '18822557', 'comment_count': int, 'description': 'md5:9368f2b1f4178de64f2602c2f3d6cbf5', 'display_id': 'nm14296458', 'duration': 208, 'genres': ['音楽・サウンド'], 'like_count': int, 'tags': 'mincount:1', 'thumbnail': r're:https?://img\.cdn\.nimg\.jp/s/nicovideo/thumbnails/.+', 'timestamp': 1304065916, 'upload_date': '20110429', 'uploader': 'りょうた', 'uploader_id': '18822557', 'view_count': int, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.nicovideo.jp/watch/nl1872567', 'info_dict': { 'id': 'nl1872567', 'ext': 'mp4', 'title': '【12/25放送分】『生対談!!ひろゆきと戀塚のニコニコを作った人 』前半', 'availability': 'public', 'channel': 'nicolive', 'channel_id': '394', 'comment_count': int, 'description': 'md5:79fc3a54cfdc93ecc2b883285149e548', 'display_id': 'nl1872567', 'duration': 586, 'genres': ['エンターテイメント'], 'like_count': int, 'tags': 'mincount:3', 'thumbnail': r're:https?://img\.cdn\.nimg\.jp/s/nicovideo/thumbnails/.+', 'timestamp': 1198637246, 'upload_date': '20071226', 'uploader': 'nicolive', 'uploader_id': '394', 'view_count': int, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.nicovideo.jp/watch/so38016254', 'info_dict': { 'id': 'so38016254', 'ext': 'mp4', 'title': '「のんのんびより のんすとっぷ」 PV', 'availability': 'public', 'channel': 'のんのんびより のんすとっぷ', 'channel_id': 'ch2647028', 'comment_count': int, 'description': 'md5:6e2ff55b33e3645d59ef010869cde6a2', 'display_id': 'so38016254', 'duration': 114, 'genres': ['アニメ'], 'like_count': int, 'tags': 'mincount:4', 'thumbnail': r're:https?://img\.cdn\.nimg\.jp/s/nicovideo/thumbnails/.+', 'timestamp': 1609146000, 'upload_date': '20201228', 'uploader': 'のんのんびより のんすとっぷ', 'uploader_id': 'ch2647028', 'view_count': int, }, 'params': {'skip_download': 'm3u8'}, }, { # smile official, but marked as user video 'url': 'https://www.nicovideo.jp/watch/so37602536', 'info_dict': { 'id': 'so37602536', 'ext': 'mp4', 'title': '田中有紀とゆきだるまと! 限定放送アーカイブ(第12回)', 'availability': 'subscriber_only', 'channel': 'あみあみ16', 'channel_id': '91072761', 'comment_count': int, 'description': 'md5:2ee357ec4e76d7804fb59af77107ab67', 'display_id': 'so37602536', 'duration': 980, 'genres': ['エンターテイメント'], 'like_count': int, 'tags': 'count:4', 'thumbnail': r're:https?://img\.cdn\.nimg\.jp/s/nicovideo/thumbnails/.+', 'timestamp': 1601377200, 'upload_date': '20200929', 'uploader': 'あみあみ16', 'uploader_id': '91072761', 'view_count': int, }, 'params': {'skip_download': 'm3u8'}, 'skip': 'Channel members only', }, { 'url': 'https://www.nicovideo.jp/watch/so41370536', 'info_dict': { 'id': 'so41370536', 'ext': 'mp4', 'title': 'ZUN【出演者別】超パーティー2022', 'availability': 'premium_only', 'channel': 'ニコニコ超会議チャンネル', 'channel_id': 'ch2607134', 'comment_count': int, 'description': 'md5:5692db5ac40d3a374fc5ec182d0249c3', 'display_id': 'so41370536', 'duration': 63, 'genres': ['音楽・サウンド'], 'like_count': int, 'tags': 'mincount:5', 'thumbnail': r're:https?://img\.cdn\.nimg\.jp/s/nicovideo/thumbnails/.+', 'timestamp': 1668394800, 'upload_date': '20221114', 'uploader': 'ニコニコ超会議チャンネル', 'uploader_id': 'ch2607134', 'view_count': int, }, 'params': {'skip_download': 'm3u8'}, 'skip': 'Premium members only', }, { 'url': 'https://www.nicovideo.jp/watch/so37574174', 'info_dict': { 'id': 'so37574174', 'ext': 'mp4', 'title': 'ひぐらしのなく頃に 廿回し編\u3000第1回', 'availability': 'subscriber_only', 'channel': '「ひぐらしのなく頃に」オフィシャルチャンネル', 'channel_id': 'ch2646036', 'comment_count': int, 'description': 'md5:5296196d51d9c0b7272b73f9a99c236a', 'display_id': 'so37574174', 'duration': 1931, 'genres': ['ラジオ'], 'like_count': int, 'tags': 'mincount:5', 'thumbnail': r're:https?://img\.cdn\.nimg\.jp/s/nicovideo/thumbnails/.+', 'timestamp': 1601028000, 'upload_date': '20200925', 'uploader': '「ひぐらしのなく頃に」オフィシャルチャンネル', 'uploader_id': 'ch2646036', 'view_count': int, }, 'params': {'skip_download': 'm3u8'}, 'skip': 'Channel members only', }, { 'url': 'https://www.nicovideo.jp/watch/so44060088', 'info_dict': { 'id': 'so44060088', 'ext': 'mp4', 'title': '松田的超英雄電波。《仮面ライダーガッチャード 放送終了記念特別番組》', 'availability': 'subscriber_only', 'channel': 'あみあみチャンネル', 'channel_id': 'ch2638921', 'comment_count': int, 'description': 'md5:9dec5bb9a172b6d20a255ecb64fbd03e', 'display_id': 'so44060088', 'duration': 1881, 'genres': ['ラジオ'], 'like_count': int, 'tags': 'mincount:7', 'thumbnail': r're:https?://img\.cdn\.nimg\.jp/s/nicovideo/thumbnails/.+', 'timestamp': 1725361200, 'upload_date': '20240903', 'uploader': 'あみあみチャンネル', 'uploader_id': 'ch2638921', 'view_count': int, }, 'params': {'skip_download': 'm3u8'}, 'skip': 'Channel members only; specified continuous membership period required', }] def _extract_formats(self, api_data, video_id): fmt_filter = lambda _, v: v['isAvailable'] and v['id'] videos = traverse_obj(api_data, ('media', 'domand', 'videos', fmt_filter)) audios = traverse_obj(api_data, ('media', 'domand', 'audios', fmt_filter)) access_key = traverse_obj(api_data, ('media', 'domand', 'accessRightKey', {str})) track_id = traverse_obj(api_data, ('client', 'watchTrackId', {str})) if not all((videos, audios, access_key, track_id)): return m3u8_url = self._download_json( f'{self._API_BASE}/v1/watch/{video_id}/access-rights/hls', video_id, headers={ 'Accept': 'application/json;charset=utf-8', 'Content-Type': 'application/json', 'X-Access-Right-Key': access_key, 'X-Request-With': self._BASE_URL, **self._HEADERS, }, query={ 'actionTrackId': track_id, }, data=json.dumps({ 'outputs': list(itertools.product((v['id'] for v in videos), (a['id'] for a in audios))), }).encode(), )['data']['contentUrl'] raw_fmts = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4') formats = [] for a_fmt in traverse_obj(raw_fmts, lambda _, v: v['vcodec'] == 'none'): formats.append({ **a_fmt, **traverse_obj(audios, (lambda _, v: a_fmt['format_id'].startswith(v['id']), { 'abr': ('bitRate', {float_or_none(scale=1000)}), 'asr': ('samplingRate', {int_or_none}), 'format_id': ('id', {str}), 'quality': ('qualityLevel', {int_or_none}), }, any)), 'acodec': 'aac', }) # Sort first, keeping the lowest-tbr formats v_fmts = sorted((fmt for fmt in raw_fmts if fmt['vcodec'] != 'none'), key=lambda f: f['tbr']) self._remove_duplicate_formats(v_fmts) # Calculate the true vbr/tbr by subtracting the lowest abr min_abr = traverse_obj(audios, (..., 'bitRate', {float_or_none(scale=1000)}, all, {min})) or 0 for v_fmt in v_fmts: v_fmt['format_id'] = url_basename(v_fmt['url']).rpartition('.')[0] v_fmt['quality'] = traverse_obj(videos, ( lambda _, v: v['id'] == v_fmt['format_id'], 'qualityLevel', {int_or_none}, any)) or -1 v_fmt['tbr'] -= min_abr formats.extend(v_fmts) return formats def _real_extract(self, url): video_id = self._match_id(url) path = 'v3' if self.is_logged_in else 'v3_guest' api_resp = self._download_json( f'{self._BASE_URL}/api/watch/{path}/{video_id}', video_id, 'Downloading API JSON', 'Unable to fetch data', headers={ **self._HEADERS, **self.geo_verification_headers(), }, query={ 'actionTrackId': f'AAAAAAAAAA_{round(time_seconds() * 1000)}', }, expected_status=[400, 404]) api_data = api_resp['data'] scheduled_time = traverse_obj(api_data, ('publishScheduledAt', {str})) status = traverse_obj(api_resp, ('meta', 'status', {int})) if status != 200: err_code = traverse_obj(api_resp, ('meta', 'errorCode', {str.upper})) reason_code = traverse_obj(api_data, ('reasonCode', {str_or_none})) err_msg = traverse_obj(self._ERROR_MAP, (err_code, (reason_code, 'DEFAULT'), {str}, any)) if reason_code in ('DOMESTIC_VIDEO', 'HIGH_RISK_COUNTRY_VIDEO'): self.raise_geo_restricted(countries=self._GEO_COUNTRIES) elif reason_code == 'HARMFUL_VIDEO' and traverse_obj(api_data, ( 'viewer', 'allowSensitiveContents', {bool}, )) is False: err_msg = 'Sensitive content, adjust display settings to watch' elif reason_code == 'HIDDEN_VIDEO' and scheduled_time: err_msg = f'This content is scheduled to be released at {scheduled_time}' elif reason_code in ('CHANNEL_MEMBER_ONLY', 'HARMFUL_VIDEO', 'HIDDEN_VIDEO', 'PPV_VIDEO', 'PREMIUM_ONLY'): self.raise_login_required(err_msg) if err_msg: raise ExtractorError(err_msg, expected=True) if status and status >= 500: raise ExtractorError('Service temporarily unavailable', expected=True) raise ExtractorError(f'API returned error status {status}') availability = self._availability(**traverse_obj(api_data, ('payment', 'video', { 'needs_auth': (('isContinuationBenefit', 'isPpv'), {bool}, any), 'needs_subscription': ('isAdmission', {bool}), 'needs_premium': ('isPremium', {bool}), }))) or 'public' formats = self._extract_formats(api_data, video_id) err_msg = self._STATUS_MAP.get(availability) if not formats and err_msg: self.raise_login_required(err_msg, metadata_available=True) thumb_prefs = qualities(['url', 'middleUrl', 'largeUrl', 'player', 'ogp']) return { 'availability': availability, 'display_id': video_id, 'formats': formats, 'genres': traverse_obj(api_data, ('genre', 'label', {str}, filter, all, filter)), 'release_timestamp': parse_iso8601(scheduled_time), 'subtitles': self.extract_subtitles(video_id, api_data), 'tags': traverse_obj(api_data, ('tag', 'items', ..., 'name', {str}, filter, all, filter)), 'thumbnails': [{ 'ext': 'jpg', 'id': key, 'preference': thumb_prefs(key), 'url': url, **parse_resolution(url, lenient=True), } for key, url in traverse_obj(api_data, ( 'video', 'thumbnail', {dict}), default={}).items()], **traverse_obj(api_data, (('channel', 'owner'), any, { 'channel': (('name', 'nickname'), {str}, any), 'channel_id': ('id', {str_or_none}), 'uploader': (('name', 'nickname'), {str}, any), 'uploader_id': ('id', {str_or_none}), })), **traverse_obj(api_data, ('video', { 'id': ('id', {str_or_none}), 'title': ('title', {str}), 'description': ('description', {clean_html}, filter), 'duration': ('duration', {int_or_none}), 'timestamp': ('registeredAt', {parse_iso8601}), })), **traverse_obj(api_data, ('video', 'count', { 'comment_count': ('comment', {int_or_none}), 'like_count': ('like', {int_or_none}), 'view_count': ('view', {int_or_none}), })), } def _get_subtitles(self, video_id, api_data): comments_info = traverse_obj(api_data, ('comment', 'nvComment', {dict})) or {} if not comments_info.get('server'): return danmaku = traverse_obj(self._download_json( f'{comments_info["server"]}/v1/threads', video_id, 'Downloading comments', 'Failed to download comments', headers={ 'Content-Type': 'text/plain;charset=UTF-8', 'Origin': self._BASE_URL, 'Referer': f'{self._BASE_URL}/', 'X-Client-Os-Type': 'others', **self._HEADERS, }, data=json.dumps({ 'additionals': {}, 'params': comments_info.get('params'), 'threadKey': comments_info.get('threadKey'), }).encode(), fatal=False, ), ('data', 'threads', ..., 'comments', ...)) return { 'comments': [{ 'ext': 'json', 'data': json.dumps(danmaku), }], } class NiconicoPlaylistBaseIE(InfoExtractor): _PAGE_SIZE = 100 _API_HEADERS = { 'X-Frontend-ID': '6', 'X-Frontend-Version': '0', 'X-Niconico-Language': 'en-us', } def _call_api(self, list_id, resource, query): raise NotImplementedError('Must be implemented in subclasses') @staticmethod def _parse_owner(item): return { 'uploader': traverse_obj(item, ('owner', ('name', ('user', 'nickname')), {str}, any)), 'uploader_id': traverse_obj(item, ('owner', 'id', {str})), } def _fetch_page(self, list_id, page): page += 1 resp = self._call_api(list_id, f'page {page}', { 'page': page, 'pageSize': self._PAGE_SIZE, }) # this is needed to support both mylist and user for video in traverse_obj(resp, ('items', ..., ('video', None))) or []: video_id = video.get('id') if not video_id: # skip {"video": {"id": "blablabla", ...}} continue count = video.get('count') or {} get_count = lambda x: int_or_none(count.get(x)) yield { '_type': 'url', 'id': video_id, 'title': video.get('title'), 'url': f'https://www.nicovideo.jp/watch/{video_id}', 'description': video.get('shortDescription'), 'duration': int_or_none(video.get('duration')), 'view_count': get_count('view'), 'comment_count': get_count('comment'), 'thumbnail': traverse_obj(video, ('thumbnail', ('nHdUrl', 'largeUrl', 'listingUrl', 'url'))), 'ie_key': NiconicoIE.ie_key(), **self._parse_owner(video), } def _entries(self, list_id): return OnDemandPagedList(functools.partial(self._fetch_page, list_id), self._PAGE_SIZE) class NiconicoPlaylistIE(NiconicoPlaylistBaseIE): IE_NAME = 'niconico:playlist' _VALID_URL = r'https?://(?:(?:www\.|sp\.)?nicovideo\.jp|nico\.ms)/(?:user/\d+/)?(?:my/)?mylist/(?:#/)?(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.nicovideo.jp/mylist/27411728', 'info_dict': { 'id': '27411728', 'title': 'AKB48のオールナイトニッポン', 'description': 'md5:d89694c5ded4b6c693dea2db6e41aa08', 'uploader': 'のっく', 'uploader_id': '805442', }, 'playlist_mincount': 291, }, { 'url': 'https://www.nicovideo.jp/user/805442/mylist/27411728', 'only_matching': True, }, { 'url': 'https://www.nicovideo.jp/my/mylist/#/68048635', 'only_matching': True, }] def _call_api(self, list_id, resource, query): return self._download_json( f'https://nvapi.nicovideo.jp/v2/mylists/{list_id}', list_id, f'Downloading {resource}', query=query, headers=self._API_HEADERS)['data']['mylist'] def _real_extract(self, url): list_id = self._match_id(url) mylist = self._call_api(list_id, 'list', { 'pageSize': 1, }) return self.playlist_result( self._entries(list_id), list_id, mylist.get('name'), mylist.get('description'), **self._parse_owner(mylist)) class NiconicoSeriesIE(NiconicoPlaylistBaseIE): IE_NAME = 'niconico:series' _VALID_URL = r'https?://(?:(?:www\.|sp\.)?nicovideo\.jp(?:/user/\d+)?|nico\.ms)/series/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.nicovideo.jp/user/44113208/series/110226', 'info_dict': { 'id': '110226', 'title': 'ご立派ァ!のシリーズ', 'description': '楽しそうな外人の吹き替えをさせたら終身名誉ホモガキの右に出る人はいませんね…', 'uploader': 'アルファるふぁ', 'uploader_id': '44113208', }, 'playlist_mincount': 10, }, { 'url': 'https://www.nicovideo.jp/series/12312/', 'info_dict': { 'id': '12312', 'title': 'バトルスピリッツ お勧めカード紹介(調整中)', 'description': '', 'uploader': '野鳥', 'uploader_id': '2275360', }, 'playlist_mincount': 103, }, { 'url': 'https://nico.ms/series/203559', 'only_matching': True, }] def _call_api(self, list_id, resource, query): return self._download_json( f'https://nvapi.nicovideo.jp/v2/series/{list_id}', list_id, f'Downloading {resource}', query=query, headers=self._API_HEADERS)['data'] def _real_extract(self, url): list_id = self._match_id(url) series = self._call_api(list_id, 'list', { 'pageSize': 1, })['detail'] return self.playlist_result( self._entries(list_id), list_id, series.get('title'), series.get('description'), **self._parse_owner(series)) class NiconicoHistoryIE(NiconicoPlaylistBaseIE): IE_NAME = 'niconico:history' IE_DESC = 'NicoNico user history or likes. Requires cookies.' _VALID_URL = r'https?://(?:www\.|sp\.)?nicovideo\.jp/my/(?P<id>history(?:/like)?)' _TESTS = [{ 'note': 'PC page, with /video', 'url': 'https://www.nicovideo.jp/my/history/video', 'only_matching': True, }, { 'note': 'PC page, without /video', 'url': 'https://www.nicovideo.jp/my/history', 'only_matching': True, }, { 'note': 'mobile page, with /video', 'url': 'https://sp.nicovideo.jp/my/history/video', 'only_matching': True, }, { 'note': 'mobile page, without /video', 'url': 'https://sp.nicovideo.jp/my/history', 'only_matching': True, }, { 'note': 'PC page', 'url': 'https://www.nicovideo.jp/my/history/like', 'only_matching': True, }, { 'note': 'Mobile page', 'url': 'https://sp.nicovideo.jp/my/history/like', 'only_matching': True, }] def _call_api(self, list_id, resource, query): path = 'likes' if list_id == 'history/like' else 'watch/history' return self._download_json( f'https://nvapi.nicovideo.jp/v1/users/me/{path}', list_id, f'Downloading {resource}', query=query, headers=self._API_HEADERS)['data'] def _real_extract(self, url): list_id = self._match_id(url) try: mylist = self._call_api(list_id, 'list', {'pageSize': 1}) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 401: self.raise_login_required('You have to be logged in to get your history') raise return self.playlist_result(self._entries(list_id), list_id, **self._parse_owner(mylist)) class NicovideoSearchBaseIE(InfoExtractor): _SEARCH_TYPE = 'search' def _entries(self, url, item_id, query=None, note='Downloading page %(page)s'): query = query or {} pages = [query['page']] if 'page' in query else itertools.count(1) for page_num in pages: query['page'] = str(page_num) webpage = self._download_webpage(url, item_id, query=query, note=note % {'page': page_num}) results = re.findall(r'(?<=data-video-id=)["\']?(?P<videoid>.*?)(?=["\'])', webpage) for item in results: yield self.url_result(f'https://www.nicovideo.jp/watch/{item}', 'Niconico', item) if not results: break def _search_results(self, query): return self._entries( self._proto_relative_url(f'//www.nicovideo.jp/{self._SEARCH_TYPE}/{query}'), query) class NicovideoSearchIE(NicovideoSearchBaseIE, SearchInfoExtractor): IE_DESC = 'Nico video search' IE_NAME = 'nicovideo:search' _SEARCH_KEY = 'nicosearch' class NicovideoSearchURLIE(NicovideoSearchBaseIE): IE_NAME = f'{NicovideoSearchIE.IE_NAME}_url' IE_DESC = 'Nico video search URLs' _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/search/(?P<id>[^?#&]+)?' _TESTS = [{ 'url': 'http://www.nicovideo.jp/search/sm9', 'info_dict': { 'id': 'sm9', 'title': 'sm9', }, 'playlist_mincount': 40, }, { 'url': 'https://www.nicovideo.jp/search/sm9?sort=h&order=d&end=2020-12-31&start=2020-01-01', 'info_dict': { 'id': 'sm9', 'title': 'sm9', }, 'playlist_count': 31, }] def _real_extract(self, url): query = self._match_id(url) return self.playlist_result(self._entries(url, query), query, query) class NicovideoSearchDateIE(NicovideoSearchBaseIE, SearchInfoExtractor): IE_DESC = 'Nico video search, newest first' IE_NAME = f'{NicovideoSearchIE.IE_NAME}:date' _SEARCH_KEY = 'nicosearchdate' _TESTS = [{ 'url': 'nicosearchdateall:a', 'info_dict': { 'id': 'a', 'title': 'a', }, 'playlist_mincount': 1610, }] _START_DATE = dt.date(2007, 1, 1) _RESULTS_PER_PAGE = 32 _MAX_PAGES = 50 def _entries(self, url, item_id, start_date=None, end_date=None): start_date, end_date = start_date or self._START_DATE, end_date or dt.datetime.now().date() # If the last page has a full page of videos, we need to break down the query interval further last_page_len = len(list(self._get_entries_for_date( url, item_id, start_date, end_date, self._MAX_PAGES, note=f'Checking number of videos from {start_date} to {end_date}'))) if (last_page_len == self._RESULTS_PER_PAGE and start_date != end_date): midpoint = start_date + ((end_date - start_date) // 2) yield from self._entries(url, item_id, midpoint, end_date) yield from self._entries(url, item_id, start_date, midpoint) else: self.to_screen(f'{item_id}: Downloading results from {start_date} to {end_date}') yield from self._get_entries_for_date( url, item_id, start_date, end_date, note=' Downloading page %(page)s') def _get_entries_for_date(self, url, item_id, start_date, end_date=None, page_num=None, note=None): query = { 'start': str(start_date), 'end': str(end_date or start_date), 'sort': 'f', 'order': 'd', } if page_num: query['page'] = str(page_num)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/democracynow.py
yt_dlp/extractor/democracynow.py
import os.path import re import urllib.parse from .common import InfoExtractor from ..utils import ( remove_start, url_basename, ) class DemocracynowIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?democracynow\.org/(?P<id>[^\?]*)' IE_NAME = 'democracynow' _TESTS = [{ 'url': 'http://www.democracynow.org/shows/2015/7/3', 'md5': '3757c182d3d84da68f5c8f506c18c196', 'info_dict': { 'id': '2015-0703-001', 'ext': 'mp4', 'title': 'Daily Show for July 03, 2015', 'description': 'md5:80eb927244d6749900de6072c7cc2c86', }, }, { 'url': 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree', 'info_dict': { 'id': '2015-0703-001', 'ext': 'mp4', 'title': '"This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag', 'description': 'md5:4d2bc4f0d29f5553c2210a4bc7761a21', }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) json_data = self._parse_json(self._search_regex( r'<script[^>]+type="text/json"[^>]*>\s*({[^>]+})', webpage, 'json'), display_id) title = json_data['title'] formats = [] video_id = None for key in ('file', 'audio', 'video', 'high_res_video'): media_url = json_data.get(key, '') if not media_url: continue media_url = re.sub(r'\?.*', '', urllib.parse.urljoin(url, media_url)) video_id = video_id or remove_start(os.path.splitext(url_basename(media_url))[0], 'dn') formats.append({ 'url': media_url, 'vcodec': 'none' if key == 'audio' else None, }) default_lang = 'en' subtitles = {} def add_subtitle_item(lang, info_dict): if lang not in subtitles: subtitles[lang] = [] subtitles[lang].append(info_dict) # chapter_file are not subtitles if 'caption_file' in json_data: add_subtitle_item(default_lang, { 'url': urllib.parse.urljoin(url, json_data['caption_file']), }) for subtitle_item in json_data.get('captions', []): lang = subtitle_item.get('language', '').lower() or default_lang add_subtitle_item(lang, { 'url': urllib.parse.urljoin(url, subtitle_item['url']), }) description = self._og_search_description(webpage, default=None) return { 'id': video_id or display_id, 'title': title, 'description': description, 'thumbnail': json_data.get('image'), 'subtitles': subtitles, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/viidea.py
yt_dlp/extractor/viidea.py
import re import urllib.parse from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, js_to_json, parse_duration, parse_iso8601, ) class ViideaIE(InfoExtractor): _VALID_URL = r'''(?x)https?://(?:www\.)?(?: videolectures\.net| flexilearn\.viidea\.net| presentations\.ocwconsortium\.org| video\.travel-zoom\.si| video\.pomp-forum\.si| tv\.nil\.si| video\.hekovnik.com| video\.szko\.si| kpk\.viidea\.com| inside\.viidea\.net| video\.kiberpipa\.org| bvvideo\.si| kongres\.viidea\.net| edemokracija\.viidea\.com )(?:/lecture)?/(?P<id>[^/]+)(?:/video/(?P<part>\d+))?/*(?:[#?].*)?$''' _TESTS = [{ 'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/', 'info_dict': { 'id': '20171', 'display_id': 'promogram_igor_mekjavic_eng', 'ext': 'mp4', 'title': 'Automatics, robotics and biocybernetics', 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482', 'thumbnail': r're:http://.*\.jpg', 'timestamp': 1372349289, 'upload_date': '20130627', 'duration': 565, }, 'params': { # m3u8 download 'skip_download': True, }, }, { # video with invalid direct format links (HTTP 403) 'url': 'http://videolectures.net/russir2010_filippova_nlp/', 'info_dict': { 'id': '14891', 'display_id': 'russir2010_filippova_nlp', 'ext': 'flv', 'title': 'NLP at Google', 'description': 'md5:fc7a6d9bf0302d7cc0e53f7ca23747b3', 'thumbnail': r're:http://.*\.jpg', 'timestamp': 1284375600, 'upload_date': '20100913', 'duration': 5352, }, 'params': { # rtmp download 'skip_download': True, }, }, { # event playlist 'url': 'http://videolectures.net/deeplearning2015_montreal/', 'info_dict': { 'id': '23181', 'title': 'Deep Learning Summer School, Montreal 2015', 'description': 'md5:0533a85e4bd918df52a01f0e1ebe87b7', 'thumbnail': r're:http://.*\.jpg', 'timestamp': 1438560000, }, 'playlist_count': 30, }, { # multi part lecture 'url': 'http://videolectures.net/mlss09uk_bishop_ibi/', 'info_dict': { 'id': '9737', 'display_id': 'mlss09uk_bishop_ibi', 'title': 'Introduction To Bayesian Inference', 'thumbnail': r're:http://.*\.jpg', 'timestamp': 1251622800, }, 'playlist': [{ 'info_dict': { 'id': '9737_part1', 'display_id': 'mlss09uk_bishop_ibi_part1', 'ext': 'wmv', 'title': 'Introduction To Bayesian Inference (Part 1)', 'thumbnail': r're:http://.*\.jpg', 'duration': 4622, 'timestamp': 1251622800, 'upload_date': '20090830', }, }, { 'info_dict': { 'id': '9737_part2', 'display_id': 'mlss09uk_bishop_ibi_part2', 'ext': 'wmv', 'title': 'Introduction To Bayesian Inference (Part 2)', 'thumbnail': r're:http://.*\.jpg', 'duration': 5641, 'timestamp': 1251622800, 'upload_date': '20090830', }, }], 'playlist_count': 2, }] def _real_extract(self, url): lecture_slug, explicit_part_id = self._match_valid_url(url).groups() webpage = self._download_webpage(url, lecture_slug) cfg = self._parse_json(self._search_regex( [r'cfg\s*:\s*({.+?})\s*,\s*[\da-zA-Z_]+\s*:\s*\(?\s*function', r'cfg\s*:\s*({[^}]+})'], webpage, 'cfg'), lecture_slug, js_to_json) lecture_id = str(cfg['obj_id']) base_url = self._proto_relative_url(cfg['livepipe'], 'http:') try: lecture_data = self._download_json( f'{base_url}/site/api/lecture/{lecture_id}?format=json', lecture_id)['lecture'][0] except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 403: msg = self._parse_json( e.cause.response.read().decode('utf-8'), lecture_id) raise ExtractorError(msg['detail'], expected=True) raise lecture_info = { 'id': lecture_id, 'display_id': lecture_slug, 'title': lecture_data['title'], 'timestamp': parse_iso8601(lecture_data.get('time')), 'description': lecture_data.get('description_wiki'), 'thumbnail': lecture_data.get('thumb'), } playlist_entries = [] lecture_type = lecture_data.get('type') parts = [str(video) for video in cfg.get('videos', [])] if parts: multipart = len(parts) > 1 def extract_part(part_id): smil_url = f'{base_url}/{lecture_slug}/video/{part_id}/smil.xml' smil = self._download_smil(smil_url, lecture_id) info = self._parse_smil(smil, smil_url, lecture_id) info['id'] = lecture_id if not multipart else f'{lecture_id}_part{part_id}' info['display_id'] = lecture_slug if not multipart else f'{lecture_slug}_part{part_id}' if multipart: info['title'] += f' (Part {part_id})' switch = smil.find('.//switch') if switch is not None: info['duration'] = parse_duration(switch.attrib.get('dur')) item_info = lecture_info.copy() item_info.update(info) return item_info if explicit_part_id or not multipart: result = extract_part(explicit_part_id or parts[0]) else: result = { '_type': 'multi_video', 'entries': [extract_part(part) for part in parts], } result.update(lecture_info) # Immediately return explicitly requested part or non event item if explicit_part_id or lecture_type != 'evt': return result playlist_entries.append(result) # It's probably a playlist if not parts or lecture_type == 'evt': playlist_webpage = self._download_webpage( f'{base_url}/site/ajax/drilldown/?id={lecture_id}', lecture_id) entries = [ self.url_result(urllib.parse.urljoin(url, video_url), 'Viidea') for _, video_url in re.findall( r'<a[^>]+href=(["\'])(.+?)\1[^>]+id=["\']lec=\d+', playlist_webpage)] playlist_entries.extend(entries) playlist = self.playlist_result(playlist_entries, lecture_id) playlist.update(lecture_info) return playlist
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/palcomp3.py
yt_dlp/extractor/palcomp3.py
from .common import InfoExtractor from ..utils import ( int_or_none, str_or_none, try_get, ) class PalcoMP3BaseIE(InfoExtractor): _GQL_QUERY_TMPL = '''{ artist(slug: "%s") { %s } }''' _ARTIST_FIELDS_TMPL = '''music(slug: "%%s") { %s }''' _MUSIC_FIELDS = '''duration hls mp3File musicID plays title''' def _call_api(self, artist_slug, artist_fields): return self._download_json( 'https://www.palcomp3.com.br/graphql/', artist_slug, query={ 'query': self._GQL_QUERY_TMPL % (artist_slug, artist_fields), })['data'] def _parse_music(self, music): music_id = str(music['musicID']) title = music['title'] formats = [] hls_url = music.get('hls') if hls_url: formats.append({ 'url': hls_url, 'protocol': 'm3u8_native', 'ext': 'mp4', }) mp3_file = music.get('mp3File') if mp3_file: formats.append({ 'url': mp3_file, }) return { 'id': music_id, 'title': title, 'formats': formats, 'duration': int_or_none(music.get('duration')), 'view_count': int_or_none(music.get('plays')), } def _real_initialize(self): self._ARTIST_FIELDS_TMPL = self._ARTIST_FIELDS_TMPL % self._MUSIC_FIELDS def _real_extract(self, url): artist_slug, music_slug = self._match_valid_url(url).groups() artist_fields = self._ARTIST_FIELDS_TMPL % music_slug music = self._call_api(artist_slug, artist_fields)['artist']['music'] return self._parse_music(music) class PalcoMP3IE(PalcoMP3BaseIE): IE_NAME = 'PalcoMP3:song' _VALID_URL = r'https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<artist>[^/]+)/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://www.palcomp3.com/maiaraemaraisaoficial/nossas-composicoes-cuida-bem-dela/', 'md5': '99fd6405b2d8fd589670f6db1ba3b358', 'info_dict': { 'id': '3162927', 'ext': 'mp3', 'title': 'Nossas Composições - CUIDA BEM DELA', 'duration': 210, 'view_count': int, }, }] @classmethod def suitable(cls, url): return False if PalcoMP3VideoIE.suitable(url) else super().suitable(url) class PalcoMP3ArtistIE(PalcoMP3BaseIE): IE_NAME = 'PalcoMP3:artist' _VALID_URL = r'https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://www.palcomp3.com.br/condedoforro/', 'info_dict': { 'id': '358396', 'title': 'Conde do Forró', }, 'playlist_mincount': 188, }] _ARTIST_FIELDS_TMPL = '''artistID musics { nodes { %s } } name''' @classmethod def suitable(cls, url): return False if PalcoMP3IE._match_valid_url(url) else super().suitable(url) def _real_extract(self, url): artist_slug = self._match_id(url) artist = self._call_api(artist_slug, self._ARTIST_FIELDS_TMPL)['artist'] def entries(): for music in (try_get(artist, lambda x: x['musics']['nodes'], list) or []): yield self._parse_music(music) return self.playlist_result( entries(), str_or_none(artist.get('artistID')), artist.get('name')) class PalcoMP3VideoIE(PalcoMP3BaseIE): IE_NAME = 'PalcoMP3:video' _VALID_URL = r'https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<artist>[^/]+)/(?P<id>[^/?&#]+)/?#clipe' _TESTS = [{ 'url': 'https://www.palcomp3.com/maiaraemaraisaoficial/maiara-e-maraisa-voce-faz-falta-aqui-ao-vivo-em-vicosa-mg/#clipe', 'add_ie': ['Youtube'], 'info_dict': { 'id': '_pD1nR2qqPg', 'ext': 'mp4', 'title': 'Maiara e Maraisa - Você Faz Falta Aqui - DVD Ao Vivo Em Campo Grande', 'description': 'md5:7043342c09a224598e93546e98e49282', 'upload_date': '20161107', 'uploader_id': 'maiaramaraisaoficial', 'uploader': 'Maiara e Maraisa', }, }] _MUSIC_FIELDS = 'youtubeID' def _parse_music(self, music): youtube_id = music['youtubeID'] return self.url_result(youtube_id, 'Youtube', youtube_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dispeak.py
yt_dlp/extractor/dispeak.py
import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, remove_end, xpath_element, xpath_text, ) class DigitallySpeakingIE(InfoExtractor): _VALID_URL = r'https?://(?:s?evt\.dispeak|events\.digitallyspeaking)\.com/(?:[^/]+/)+xml/(?P<id>[^.]+)\.xml' _TESTS = [{ # From http://gdcvault.com/play/1023460/Tenacious-Design-and-The-Interface 'url': 'http://evt.dispeak.com/ubm/gdc/sf16/xml/840376_BQRC.xml', 'md5': 'a8efb6c31ed06ca8739294960b2dbabd', 'info_dict': { 'id': '840376_BQRC', 'ext': 'mp4', 'title': 'Tenacious Design and The Interface of \'Destiny\'', }, }, { # From http://www.gdcvault.com/play/1014631/Classic-Game-Postmortem-PAC 'url': 'http://events.digitallyspeaking.com/gdc/sf11/xml/12396_1299111843500GMPX.xml', 'only_matching': True, }, { # From http://www.gdcvault.com/play/1013700/Advanced-Material 'url': 'http://sevt.dispeak.com/ubm/gdc/eur10/xml/11256_1282118587281VNIT.xml', 'only_matching': True, }, { # From https://gdcvault.com/play/1016624, empty speakerVideo 'url': 'https://sevt.dispeak.com/ubm/gdc/online12/xml/201210-822101_1349794556671DDDD.xml', 'info_dict': { 'id': '201210-822101_1349794556671DDDD', 'ext': 'flv', 'title': 'Pre-launch - Preparing to Take the Plunge', }, }, { # From http://www.gdcvault.com/play/1014846/Conference-Keynote-Shigeru, empty slideVideo 'url': 'http://events.digitallyspeaking.com/gdc/project25/xml/p25-miyamoto1999_1282467389849HSVB.xml', 'only_matching': True, }] def _parse_mp4(self, metadata): video_formats = [] video_root = None mp4_video = xpath_text(metadata, './mp4video', default=None) if mp4_video is not None: mobj = re.match(r'(?P<root>https?://.*?/).*', mp4_video) video_root = mobj.group('root') if video_root is None: http_host = xpath_text(metadata, 'httpHost', default=None) if http_host: video_root = f'http://{http_host}/' if video_root is None: # Hard-coded in http://evt.dispeak.com/ubm/gdc/sf16/custom/player2.js # Works for GPUTechConf, too video_root = 'http://s3-2u.digitallyspeaking.com/' formats = metadata.findall('./MBRVideos/MBRVideo') if not formats: return None for a_format in formats: stream_name = xpath_text(a_format, 'streamName', fatal=True) video_path = re.match(r'mp4\:(?P<path>.*)', stream_name).group('path') url = video_root + video_path bitrate = xpath_text(a_format, 'bitrate') tbr = int_or_none(bitrate) vbr = int_or_none(self._search_regex( r'-(\d+)\.mp4', video_path, 'vbr', default=None)) video_formats.append({ 'format_id': bitrate, 'url': url, 'tbr': tbr, 'vbr': vbr, }) return video_formats def _parse_flv(self, metadata): formats = [] akamai_url = xpath_text(metadata, './akamaiHost', fatal=True) audios = metadata.findall('./audios/audio') for audio in audios: formats.append({ 'url': f'rtmp://{akamai_url}/ondemand?ovpfv=1.1', 'play_path': remove_end(audio.get('url'), '.flv'), 'ext': 'flv', 'vcodec': 'none', 'quality': 1, 'format_id': audio.get('code'), }) for video_key, format_id, preference in ( ('slide', 'slides', -2), ('speaker', 'speaker', -1)): video_path = xpath_text(metadata, f'./{video_key}Video') if not video_path: continue formats.append({ 'url': f'rtmp://{akamai_url}/ondemand?ovpfv=1.1', 'play_path': remove_end(video_path, '.flv'), 'ext': 'flv', 'format_note': f'{video_key} video', 'quality': preference, 'format_id': format_id, }) return formats def _real_extract(self, url): video_id = self._match_id(url) xml_description = self._download_xml(url, video_id) metadata = xpath_element(xml_description, 'metadata') video_formats = self._parse_mp4(metadata) if video_formats is None: video_formats = self._parse_flv(metadata) return { 'id': video_id, 'formats': video_formats, 'title': xpath_text(metadata, 'title', fatal=True), 'duration': parse_duration(xpath_text(metadata, 'endTime')), 'creator': xpath_text(metadata, 'speaker'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/performgroup.py
yt_dlp/extractor/performgroup.py
from .common import InfoExtractor from ..utils import int_or_none, join_nonempty class PerformGroupIE(InfoExtractor): _VALID_URL = r'https?://player\.performgroup\.com/eplayer(?:/eplayer\.html|\.js)#/?(?P<id>[0-9a-f]{26})\.(?P<auth_token>[0-9a-z]{26})' _TESTS = [{ # http://www.faz.net/aktuell/sport/fussball/wm-2018-playoffs-schweiz-besiegt-nordirland-1-0-15286104.html 'url': 'http://player.performgroup.com/eplayer/eplayer.html#d478c41c5d192f56b9aa859de8.1w4crrej5w14e1ed4s1ce4ykab', 'md5': '259cb03d142e2e52471e8837ecacb29f', 'info_dict': { 'id': 'xgrwobuzumes1lwjxtcdpwgxd', 'ext': 'mp4', 'title': 'Liga MX: Keine Einsicht nach Horrorfoul', 'description': 'md5:7cd3b459c82725b021e046ab10bf1c5b', 'timestamp': 1511533477, 'upload_date': '20171124', }, }] def _call_api(self, service, auth_token, content_id, referer_url): return self._download_json( f'http://ep3.performfeeds.com/ep{service}/{auth_token}/{content_id}/', content_id, headers={ 'Referer': referer_url, 'Origin': 'http://player.performgroup.com', }, query={ '_fmt': 'json', }) def _real_extract(self, url): player_id, auth_token = self._match_valid_url(url).groups() bootstrap = self._call_api('bootstrap', auth_token, player_id, url) video = bootstrap['config']['dataSource']['sourceItems'][0]['videos'][0] video_id = video['uuid'] vod = self._call_api('vod', auth_token, video_id, url) media = vod['videos']['video'][0]['media'] formats = [] hls_url = media.get('hls', {}).get('url') if hls_url: formats.extend(self._extract_m3u8_formats(hls_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) hds_url = media.get('hds', {}).get('url') if hds_url: formats.extend(self._extract_f4m_formats(hds_url + '?hdcore', video_id, f4m_id='hds', fatal=False)) for c in media.get('content', []): c_url = c.get('url') if not c_url: continue tbr = int_or_none(c.get('bitrate'), 1000) formats.append({ 'format_id': join_nonempty('http', tbr), 'url': c_url, 'tbr': tbr, 'width': int_or_none(c.get('width')), 'height': int_or_none(c.get('height')), 'filesize': int_or_none(c.get('fileSize')), 'vcodec': c.get('type'), 'fps': int_or_none(c.get('videoFrameRate')), 'vbr': int_or_none(c.get('videoRate'), 1000), 'abr': int_or_none(c.get('audioRate'), 1000), }) return { 'id': video_id, 'title': video['title'], 'description': video.get('description'), 'thumbnail': video.get('poster'), 'duration': int_or_none(video.get('duration')), 'timestamp': int_or_none(video.get('publishedTime'), 1000), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/reverbnation.py
yt_dlp/extractor/reverbnation.py
from .common import InfoExtractor from ..utils import ( qualities, str_or_none, ) class ReverbNationIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?reverbnation\.com/.*?/song/(?P<id>\d+).*?$' _TESTS = [{ 'url': 'http://www.reverbnation.com/alkilados/song/16965047-mona-lisa', 'md5': 'c0aaf339bcee189495fdf5a8c8ba8645', 'info_dict': { 'id': '16965047', 'ext': 'mp3', 'title': 'MONA LISA', 'uploader': 'ALKILADOS', 'uploader_id': '216429', 'thumbnail': r're:^https?://.*\.jpg', }, }] def _real_extract(self, url): song_id = self._match_id(url) api_res = self._download_json( f'https://api.reverbnation.com/song/{song_id}', song_id, note=f'Downloading information of song {song_id}', ) THUMBNAILS = ('thumbnail', 'image') quality = qualities(THUMBNAILS) thumbnails = [] for thumb_key in THUMBNAILS: if api_res.get(thumb_key): thumbnails.append({ 'url': api_res[thumb_key], 'preference': quality(thumb_key), }) return { 'id': song_id, 'title': api_res['name'], 'url': api_res['url'], 'uploader': api_res.get('artist', {}).get('name'), 'uploader_id': str_or_none(api_res.get('artist', {}).get('id')), 'thumbnails': thumbnails, 'ext': 'mp3', 'vcodec': 'none', }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rumble.py
yt_dlp/extractor/rumble.py
import itertools import re from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, UnsupportedError, clean_html, extract_attributes, format_field, get_element_by_class, get_elements_html_by_class, int_or_none, join_nonempty, parse_count, parse_iso8601, traverse_obj, unescapeHTML, urljoin, ) class RumbleEmbedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rumble\.com/embed/(?:[0-9a-z]+\.)?(?P<id>[0-9a-z]+)' _EMBED_REGEX = [fr'(?:<(?:script|iframe)[^>]+\bsrc=|["\']embedUrl["\']\s*:\s*)["\'](?P<url>{_VALID_URL})'] _TESTS = [{ 'url': 'https://rumble.com/embed/v5pv5f', 'md5': '36a18a049856720189f30977ccbb2c34', 'info_dict': { 'id': 'v5pv5f', 'ext': 'mp4', 'title': 'WMAR 2 News Latest Headlines | October 20, 6pm', 'timestamp': 1571611968, 'upload_date': '20191020', 'channel_url': 'https://rumble.com/c/WMAR', 'channel': 'WMAR', 'thumbnail': r're:https://.+\.jpg', 'duration': 234, 'uploader': 'WMAR', 'live_status': 'not_live', }, }, { 'url': 'https://rumble.com/embed/vslb7v', 'md5': '7418035de1a30a178b8af34dc2b6a52b', 'info_dict': { 'id': 'vslb7v', 'ext': 'mp4', 'title': 'Defense Sec. says US Commitment to NATO Defense \'Ironclad\'', 'timestamp': 1645142135, 'upload_date': '20220217', 'channel_url': 'https://rumble.com/c/CyberTechNews', 'channel': 'CTNews', 'thumbnail': r're:https://.+\.jpg', 'duration': 901, 'uploader': 'CTNews', 'live_status': 'not_live', }, }, { 'url': 'https://rumble.com/embed/vunh1h', 'info_dict': { 'id': 'vunh1h', 'ext': 'mp4', 'title': '‘Gideon, op zoek naar de waarheid’ including ENG SUBS', 'timestamp': 1647197663, 'upload_date': '20220313', 'channel_url': 'https://rumble.com/user/BLCKBX', 'channel': 'BLCKBX', 'thumbnail': r're:https://.+\.jpg', 'duration': 5069, 'uploader': 'BLCKBX', 'live_status': 'not_live', 'subtitles': { 'en': [ { 'url': r're:https://.+\.vtt', 'name': 'English', 'ext': 'vtt', }, ], }, }, 'params': {'skip_download': True}, }, { 'url': 'https://rumble.com/embed/v1essrt', 'info_dict': { 'id': 'v1essrt', 'ext': 'mp4', 'title': 'startswith:lofi hip hop radio 📚 - beats to relax/study to', 'timestamp': 1661519399, 'upload_date': '20220826', 'channel_url': 'https://rumble.com/c/LofiGirl', 'channel': 'Lofi Girl', 'thumbnail': r're:https://.+\.jpg', 'uploader': 'Lofi Girl', 'live_status': 'is_live', }, 'params': {'skip_download': True}, }, { 'url': 'https://rumble.com/embed/v1amumr', 'info_dict': { 'id': 'v1amumr', 'ext': 'mp4', 'fps': 60, 'title': 'Turning Point USA 2022 Student Action Summit DAY 1 - Rumble Exclusive Live', 'timestamp': 1658518457, 'upload_date': '20220722', 'channel_url': 'https://rumble.com/c/RumbleEvents', 'channel': 'Rumble Events', 'thumbnail': r're:https://.+\.jpg', 'duration': 16427, 'uploader': 'Rumble Events', 'live_status': 'was_live', }, 'params': {'skip_download': True}, }, { 'url': 'https://rumble.com/embed/v6pezdb', 'info_dict': { 'id': 'v6pezdb', 'ext': 'mp4', 'title': '"Es war einmal ein Mädchen" – Ein filmisches Zeitzeugnis aus Leningrad 1944', 'uploader': 'RT DE', 'channel': 'RT DE', 'channel_url': 'https://rumble.com/c/RTDE', 'duration': 309, 'thumbnail': 'https://1a-1791.com/video/fww1/dc/s8/1/n/z/2/y/nz2yy.qR4e-small-Es-war-einmal-ein-Mdchen-Ei.jpg', 'timestamp': 1743703500, 'upload_date': '20250403', 'live_status': 'not_live', }, 'params': {'skip_download': True}, }, { 'url': 'https://rumble.com/embed/ufe9n.v5pv5f', 'only_matching': True, }] _WEBPAGE_TESTS = [ { 'note': 'Rumble JS embed', 'url': 'https://therightscoop.com/what-does-9-plus-1-plus-1-equal-listen-to-this-audio-of-attempted-kavanaugh-assassins-call-and-youll-get-it', 'md5': '4701209ac99095592e73dbba21889690', 'info_dict': { 'id': 'v15eqxl', 'ext': 'mp4', 'channel': 'Mr Producer Media', 'duration': 92, 'title': '911 Audio From The Man Who Wanted To Kill Supreme Court Justice Kavanaugh', 'channel_url': 'https://rumble.com/c/RichSementa', 'thumbnail': 'https://sp.rmbl.ws/s8/1/P/j/f/A/PjfAe.qR4e-small-911-Audio-From-The-Man-Who-.jpg', 'timestamp': 1654892716, 'uploader': 'Mr Producer Media', 'upload_date': '20220610', 'live_status': 'not_live', }, }, ] @classmethod def _extract_embed_urls(cls, url, webpage): embeds = tuple(super()._extract_embed_urls(url, webpage)) if embeds: return embeds return [f'https://rumble.com/embed/{mobj.group("id")}' for mobj in re.finditer( r'<script>[^<]*\bRumble\(\s*"play"\s*,\s*{[^}]*[\'"]?video[\'"]?\s*:\s*[\'"](?P<id>[0-9a-z]+)[\'"]', webpage)] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'https://rumble.com/embedJS/u3/', video_id, query={'request': 'video', 'ver': 2, 'v': video_id}) sys_msg = traverse_obj(video, ('sys', 'msg')) if sys_msg: self.report_warning(sys_msg, video_id=video_id) if video.get('live') == 0: live_status = 'not_live' if video.get('livestream_has_dvr') is None else 'was_live' elif video.get('live') == 1: live_status = 'is_upcoming' if video.get('livestream_has_dvr') else 'was_live' elif video.get('live') == 2: live_status = 'is_live' else: live_status = None formats = [] for format_type, format_info in (video.get('ua') or {}).items(): if isinstance(format_info, dict): for height, video_info in format_info.items(): if not traverse_obj(video_info, ('meta', 'h', {int_or_none})): video_info.setdefault('meta', {})['h'] = height format_info = format_info.values() for video_info in format_info: meta = video_info.get('meta') or {} if not video_info.get('url'): continue # With default query params returns m3u8 variants which are duplicates, without returns tar files if format_type == 'tar': continue if format_type == 'hls': if meta.get('live') is True and video.get('live') == 1: live_status = 'post_live' formats.extend(self._extract_m3u8_formats( video_info['url'], video_id, ext='mp4', m3u8_id='hls', fatal=False, live=live_status == 'is_live')) continue is_timeline = format_type == 'timeline' is_audio = format_type == 'audio' formats.append({ 'acodec': 'none' if is_timeline else None, 'vcodec': 'none' if is_audio else None, 'url': video_info['url'], 'format_id': join_nonempty(format_type, format_field(meta, 'h', '%sp')), 'format_note': 'Timeline' if is_timeline else None, 'fps': None if is_timeline or is_audio else video.get('fps'), **traverse_obj(meta, { 'tbr': ('bitrate', {int_or_none}), 'filesize': ('size', {int_or_none}), 'width': ('w', {int_or_none}), 'height': ('h', {int_or_none}), }), }) subtitles = { lang: [{ 'url': sub_info['path'], 'name': sub_info.get('language') or '', }] for lang, sub_info in (video.get('cc') or {}).items() if sub_info.get('path') } author = video.get('author') or {} thumbnails = traverse_obj(video, ('t', ..., {'url': 'i', 'width': 'w', 'height': 'h'})) if not thumbnails and video.get('i'): thumbnails = [{'url': video['i']}] if live_status in {'is_live', 'post_live'}: duration = None else: duration = int_or_none(video.get('duration')) return { 'id': video_id, 'title': unescapeHTML(video.get('title')), 'formats': formats, 'subtitles': subtitles, 'thumbnails': thumbnails, 'timestamp': parse_iso8601(video.get('pubDate')), 'channel': author.get('name'), 'channel_url': author.get('url'), 'duration': duration, 'uploader': author.get('name'), 'live_status': live_status, } class RumbleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rumble\.com/(?P<id>v(?!ideos)[\w.-]+)[^/]*$' _EMBED_REGEX = [ r'<a class=video-item--a href=(?P<url>/v[\w.-]+\.html)>', r'<a[^>]+class="videostream__link link"[^>]+href=(?P<url>/v[\w.-]+\.html)[^>]*>'] _TESTS = [{ 'add_ie': ['RumbleEmbed'], 'url': 'https://rumble.com/vdmum1-moose-the-dog-helps-girls-dig-a-snow-fort.html', 'md5': '53af34098a7f92c4e51cf0bd1c33f009', 'info_dict': { 'id': 'vb0ofn', 'ext': 'mp4', 'timestamp': 1612662578, 'uploader': 'LovingMontana', 'channel': 'LovingMontana', 'upload_date': '20210207', 'title': 'Winter-loving dog helps girls dig a snow fort ', 'description': 'Moose the dog is more than happy to help with digging out this epic snow fort. Great job, Moose!', 'channel_url': 'https://rumble.com/c/c-546523', 'thumbnail': r're:https://.+\.jpg', 'duration': 103, 'like_count': int, 'dislike_count': int, 'view_count': int, 'live_status': 'not_live', }, }, { 'url': 'http://www.rumble.com/vDMUM1?key=value', 'only_matching': True, }, { 'note': 'timeline format', 'url': 'https://rumble.com/v2ea9qb-the-u.s.-cannot-hide-this-in-ukraine-anymore-redacted-with-natali-and-clayt.html', 'md5': '40d61fec6c0945bca3d0e1dc1aa53d79', 'params': {'format': 'wv'}, 'info_dict': { 'id': 'v2bou5f', 'ext': 'mp4', 'uploader': 'Redacted News', 'upload_date': '20230322', 'timestamp': 1679445010, 'title': 'The U.S. CANNOT hide this in Ukraine anymore | Redacted with Natali and Clayton Morris', 'duration': 892, 'channel': 'Redacted News', 'description': 'md5:aaad0c5c3426d7a361c29bdaaced7c42', 'channel_url': 'https://rumble.com/c/Redacted', 'live_status': 'not_live', 'thumbnail': 'https://sp.rmbl.ws/s8/1/d/x/2/O/dx2Oi.qR4e-small-The-U.S.-CANNOT-hide-this-i.jpg', 'like_count': int, 'dislike_count': int, 'view_count': int, }, }, { 'url': 'https://rumble.com/v2e7fju-the-covid-twitter-files-drop-protecting-fauci-while-censoring-the-truth-wma.html', 'info_dict': { 'id': 'v2blzyy', 'ext': 'mp4', 'live_status': 'was_live', 'release_timestamp': 1679446804, 'description': 'md5:2ac4908ccfecfb921f8ffa4b30c1e636', 'release_date': '20230322', 'timestamp': 1679445692, 'duration': 4435, 'upload_date': '20230322', 'title': 'The Covid Twitter Files Drop: Protecting Fauci While Censoring The Truth w/Matt Taibbi', 'uploader': 'Kim Iversen', 'channel_url': 'https://rumble.com/c/KimIversen', 'channel': 'Kim Iversen', 'thumbnail': 'https://sp.rmbl.ws/s8/1/6/b/w/O/6bwOi.qR4e-small-The-Covid-Twitter-Files-Dro.jpg', 'like_count': int, 'dislike_count': int, 'view_count': int, }, }] _WEBPAGE_TESTS = [{ 'url': 'https://rumble.com/videos?page=2', 'playlist_mincount': 24, 'info_dict': { 'id': 'videos?page=2', 'title': 'All videos', 'description': 'Browse videos uploaded to Rumble.com', 'age_limit': 0, }, }, { 'url': 'https://rumble.com/browse/live', 'playlist_mincount': 25, 'info_dict': { 'id': 'live', 'title': 'Browse', 'age_limit': 0, }, }, { 'url': 'https://rumble.com/search/video?q=rumble&sort=views', 'playlist_mincount': 24, 'info_dict': { 'id': 'video?q=rumble&sort=views', 'title': 'Search results for: rumble', 'age_limit': 0, }, }] def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) url_info = next(RumbleEmbedIE.extract_from_webpage(self._downloader, url, webpage), None) if not url_info: raise UnsupportedError(url) return { '_type': 'url_transparent', 'ie_key': url_info['ie_key'], 'url': url_info['url'], 'release_timestamp': parse_iso8601(self._search_regex( r'(?:Livestream begins|Streamed on):\s+<time datetime="([^"]+)', webpage, 'release date', default=None)), 'view_count': int_or_none(self._search_regex( r'"userInteractionCount"\s*:\s*(\d+)', webpage, 'view count', default=None)), 'like_count': parse_count(self._search_regex( r'<span data-js="rumbles_up_votes">\s*([\d,.KM]+)', webpage, 'like count', default=None)), 'dislike_count': parse_count(self._search_regex( r'<span data-js="rumbles_down_votes">\s*([\d,.KM]+)', webpage, 'dislike count', default=None)), 'description': clean_html(get_element_by_class('media-description', webpage)), } class RumbleChannelIE(InfoExtractor): _VALID_URL = r'(?P<url>https?://(?:www\.)?rumble\.com/(?:c|user)/(?P<id>[^&?#$/]+))' _TESTS = [{ 'url': 'https://rumble.com/c/Styxhexenhammer666', 'playlist_mincount': 1160, 'info_dict': { 'id': 'Styxhexenhammer666', }, }, { 'url': 'https://rumble.com/user/goldenpoodleharleyeuna', 'playlist_mincount': 4, 'info_dict': { 'id': 'goldenpoodleharleyeuna', }, }] def entries(self, url, playlist_id): for page in itertools.count(1): try: webpage = self._download_webpage(f'{url}?page={page}', playlist_id, note=f'Downloading page {page}') except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 404: break raise for video_url in traverse_obj( get_elements_html_by_class('videostream__link', webpage), (..., {extract_attributes}, 'href'), ): yield self.url_result(urljoin('https://rumble.com', video_url)) def _real_extract(self, url): url, playlist_id = self._match_valid_url(url).groups() return self.playlist_result(self.entries(url, playlist_id), playlist_id=playlist_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/urplay.py
yt_dlp/extractor/urplay.py
from .common import InfoExtractor from ..utils import ( ExtractorError, ISO639Utils, dict_get, int_or_none, parse_age_limit, try_get, unified_timestamp, url_or_none, ) from ..utils.traversal import require, traverse_obj class URPlayIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ur(?:play|skola)\.se/(?:program|Produkter)/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://urplay.se/program/203704-ur-samtiden-livet-universum-och-rymdens-markliga-musik-om-vetenskap-kritiskt-tankande-och-motstand', 'info_dict': { 'id': '203704', 'ext': 'mp4', 'title': 'UR Samtiden - Livet, universum och rymdens märkliga musik : Om vetenskap, kritiskt tänkande och motstånd', 'description': 'md5:5344508a52aa78c1ced6c1b8b9e44e9a', 'thumbnail': r're:^https?://.+\.jpg', 'timestamp': 1513292400, 'upload_date': '20171214', 'series': 'UR Samtiden - Livet, universum och rymdens märkliga musik', 'duration': 2269, 'categories': ['Kultur & historia'], 'tags': ['Kritiskt tänkande', 'Vetenskap', 'Vetenskaplig verksamhet'], 'episode': 'Om vetenskap, kritiskt tänkande och motstånd', 'age_limit': 15, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://urplay.se/program/222967-en-foralders-dagbok-mitt-barn-skadar-sig-sjalv', 'info_dict': { 'id': '222967', 'ext': 'mp4', 'title': 'En förälders dagbok : Mitt barn skadar sig själv', 'description': 'md5:9f771eef03a732a213b367b52fe826ca', 'thumbnail': r're:^https?://.+\.jpg', 'timestamp': 1629676800, 'upload_date': '20210823', 'series': 'En förälders dagbok', 'duration': 1740, 'age_limit': 15, 'episode_number': 3, 'categories': 'count:2', 'tags': 'count:7', 'episode': 'Mitt barn skadar sig själv', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://urskola.se/Produkter/190031-Tripp-Trapp-Trad-Sovkudde', 'info_dict': { 'id': '190031', 'ext': 'mp4', 'title': 'Tripp, Trapp, Träd : Sovkudde', 'description': 'md5:b86bffdae04a7e9379d1d7e5947df1d1', 'thumbnail': r're:^https?://.+\.jpg', 'timestamp': 1440086400, 'upload_date': '20150820', 'series': 'Tripp, Trapp, Träd', 'duration': 865, 'age_limit': 1, 'episode_number': 1, 'categories': [], 'tags': ['Sova'], 'episode': 'Sovkudde', 'season': 'Säsong 1', }, 'params': {'skip_download': 'm3u8'}, }, { # Only accessible through new media api 'url': 'https://urplay.se/program/242932-vulkanernas-krafter-fran-kraftfull-till-forgorande', 'info_dict': { 'id': '242932', 'ext': 'mp4', 'title': 'Vulkanernas krafter : Från kraftfull till förgörande', 'description': 'md5:742bb87048e7d5a7f209d28f9bb70ab1', 'age_limit': 15, 'duration': 2613, 'thumbnail': 'https://assets.ur.se/id/242932/images/1_hd.jpg', 'categories': ['Vetenskap & teknik'], 'tags': ['Geofysik', 'Naturvetenskap', 'Vulkaner', 'Vulkanutbrott'], 'series': 'Vulkanernas krafter', 'episode': 'Från kraftfull till förgörande', 'episode_number': 2, 'timestamp': 1763514000, 'upload_date': '20251119', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'http://urskola.se/Produkter/155794-Smasagor-meankieli-Grodan-i-vida-varlden', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) url = url.replace('skola.se/Produkter', 'play.se/program') webpage = self._download_webpage(url, video_id) urplayer_data = self._search_nextjs_data(webpage, video_id, fatal=False) or {} if urplayer_data: urplayer_data = traverse_obj(urplayer_data, ('props', 'pageProps', 'productData', {dict})) if not urplayer_data: raise ExtractorError('Unable to parse __NEXT_DATA__') else: accessible_episodes = self._parse_json(self._html_search_regex( r'data-react-class="routes/Product/components/ProgramContainer/ProgramContainer"[^>]+data-react-props="({.+?})"', webpage, 'urplayer data'), video_id)['accessibleEpisodes'] urplayer_data = next(e for e in accessible_episodes if e.get('id') == int_or_none(video_id)) episode = urplayer_data['title'] sources = self._download_json( f'https://media-api.urplay.se/config-streaming/v1/urplay/sources/{video_id}', video_id, note='Downloading streaming information') hls_url = traverse_obj(sources, ('sources', 'hls', {url_or_none}, {require('HLS URL')})) formats, subtitles = self._extract_m3u8_formats_and_subtitles( hls_url, video_id, 'mp4', m3u8_id='hls') def parse_lang_code(code): "3-character language code or None (utils candidate)" if code is None: return lang = code.lower() if not ISO639Utils.long2short(lang): lang = ISO639Utils.short2long(lang) return lang or None for stream in urplayer_data['streamingInfo'].values(): for k, v in stream.items(): if (k in ('sd', 'hd') or not isinstance(v, dict)): continue lang, sttl_url = (v.get(kk) for kk in ('language', 'location')) if not sttl_url: continue lang = parse_lang_code(lang) if not lang: continue sttl = subtitles.get(lang) or [] sttl.append({'ext': k, 'url': sttl_url}) subtitles[lang] = sttl image = urplayer_data.get('image') or {} thumbnails = [] for k, v in image.items(): t = { 'id': k, 'url': v, } wh = k.split('x') if len(wh) == 2: t.update({ 'width': int_or_none(wh[0]), 'height': int_or_none(wh[1]), }) thumbnails.append(t) series = urplayer_data.get('series') or {} series_title = dict_get(series, ('seriesTitle', 'title')) or dict_get(urplayer_data, ('seriesTitle', 'mainTitle')) return { 'id': video_id, 'title': f'{series_title} : {episode}' if series_title else episode, 'description': urplayer_data.get('description'), 'thumbnails': thumbnails, 'timestamp': unified_timestamp(urplayer_data.get('publishedAt')), 'series': series_title, 'formats': formats, 'duration': int_or_none(urplayer_data.get('duration')), 'categories': urplayer_data.get('categories'), 'tags': urplayer_data.get('keywords'), 'season': series.get('label'), 'episode': episode, 'episode_number': int_or_none(urplayer_data.get('episodeNumber')), 'age_limit': parse_age_limit(min(try_get(a, lambda x: x['from'], int) or 0 for a in urplayer_data.get('ageRanges', []))), 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rtvslo.py
yt_dlp/extractor/rtvslo.py
import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, parse_duration, traverse_obj, unified_timestamp, url_or_none, urljoin, ) class RTVSLOIE(InfoExtractor): IE_NAME = 'rtvslo.si' _VALID_URL = r'''(?x) https?://(?: (?:365|4d)\.rtvslo.si/arhiv/[^/?#&;]+| (?:www\.)?rtvslo\.si/rtv365/arhiv )/(?P<id>\d+)''' _GEO_COUNTRIES = ['SI'] _API_BASE = 'https://api.rtvslo.si/ava/{}/{}?client_id=82013fb3a531d5414f478747c1aca622' SUB_LANGS_MAP = {'Slovenski': 'sl'} _TESTS = [{ 'url': 'https://www.rtvslo.si/rtv365/arhiv/174842550?s=tv', 'info_dict': { 'id': '174842550', 'ext': 'mp4', 'release_timestamp': 1643140032, 'upload_date': '20220125', 'series': 'Dnevnik', 'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/92/dnevnik_3_wide2.jpg', 'description': 'md5:76a18692757aeb8f0f51221106277dd2', 'timestamp': 1643137046, 'title': 'Dnevnik', 'series_id': '92', 'release_date': '20220125', 'duration': 1789, }, }, { 'url': 'https://365.rtvslo.si/arhiv/utrip/174843754', 'info_dict': { 'id': '174843754', 'ext': 'mp4', 'series_id': '94', 'release_date': '20220129', 'timestamp': 1643484455, 'title': 'Utrip', 'duration': 813, 'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/94/utrip_1_wide2.jpg', 'description': 'md5:77f2892630c7b17bb7a5bb84319020c9', 'release_timestamp': 1643485825, 'upload_date': '20220129', 'series': 'Utrip', }, }, { 'url': 'https://365.rtvslo.si/arhiv/il-giornale-della-sera/174844609', 'info_dict': { 'id': '174844609', 'ext': 'mp3', 'series_id': '106615841', 'title': 'Il giornale della sera', 'duration': 1328, 'series': 'Il giornale della sera', 'timestamp': 1643743800, 'release_timestamp': 1643745424, 'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/il-giornale-della-sera_wide2.jpg', 'upload_date': '20220201', 'tbr': 128000, 'release_date': '20220201', }, }, { 'url': 'https://365.rtvslo.si/arhiv/razred-zase/148350750', 'info_dict': { 'id': '148350750', 'ext': 'mp4', 'title': 'Prvi šolski dan, mozaična oddaja za mlade', 'series': 'Razred zase', 'series_id': '148185730', 'duration': 1481, 'upload_date': '20121019', 'timestamp': 1350672122, 'release_date': '20121019', 'release_timestamp': 1350672122, 'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/148185730/razred_zase_2014_logo_4d_wide2.jpg', }, }, { 'url': 'https://4d.rtvslo.si/arhiv/dnevnik/174842550', 'only_matching': True, }] def _real_extract(self, url): v_id = self._match_id(url) meta = self._download_json(self._API_BASE.format('getRecordingDrm', v_id), v_id)['response'] thumbs = [{'id': k, 'url': v, 'http_headers': {'Accept': 'image/jpeg'}} for k, v in (meta.get('images') or {}).items()] subs = {} for s in traverse_obj(meta, 'subs', 'subtitles', default=[]): lang = self.SUB_LANGS_MAP.get(s.get('language'), s.get('language') or 'und') subs.setdefault(lang, []).append({ 'url': s.get('file'), 'ext': traverse_obj(s, 'format', expected_type=str.lower), }) jwt = meta.get('jwt') if not jwt: raise ExtractorError('Site did not provide an authentication token, cannot proceed.') media = self._download_json(self._API_BASE.format('getMedia', v_id), v_id, query={'jwt': jwt})['response'] formats = [] skip_protocols = ['smil', 'f4m', 'dash'] adaptive_url = traverse_obj(media, ('addaptiveMedia', 'hls_sec'), expected_type=url_or_none) if adaptive_url: formats = self._extract_wowza_formats(adaptive_url, v_id, skip_protocols=skip_protocols) adaptive_url = traverse_obj(media, ('addaptiveMedia_sl', 'hls_sec'), expected_type=url_or_none) if adaptive_url: for f in self._extract_wowza_formats(adaptive_url, v_id, skip_protocols=skip_protocols): formats.append({ **f, 'format_id': 'sign-' + f['format_id'], 'format_note': 'Sign language interpretation', 'preference': -10, 'language': ( 'slv' if f.get('language') == 'eng' and f.get('acodec') != 'none' else f.get('language')), }) for mediafile in traverse_obj(media, ('mediaFiles', lambda _, v: url_or_none(v['streams']['https']))): formats.append(traverse_obj(mediafile, { 'url': ('streams', 'https'), 'ext': ('mediaType', {str.lower}), 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), 'tbr': ('bitrate', {int_or_none}), 'filesize': ('filesize', {int_or_none}), })) for mediafile in traverse_obj(media, ('mediaFiles', lambda _, v: url_or_none(v['streams']['hls_sec']))): formats.extend(self._extract_wowza_formats( mediafile['streams']['hls_sec'], v_id, skip_protocols=skip_protocols)) if any('intermission.mp4' in x['url'] for x in formats): self.raise_geo_restricted(countries=self._GEO_COUNTRIES, metadata_available=True) if any('dummy_720p.mp4' in x.get('manifest_url', '') for x in formats) and meta.get('stub') == 'error': raise ExtractorError(f'{self.IE_NAME} said: Clip not available', expected=True) return { 'id': v_id, 'webpage_url': ''.join(traverse_obj(meta, ('canonical', ('domain', 'path')))), 'title': meta.get('title'), 'formats': formats, 'subtitles': subs, 'thumbnails': thumbs, 'description': meta.get('description'), 'timestamp': unified_timestamp(traverse_obj(meta, 'broadcastDate', ('broadcastDates', 0))), 'release_timestamp': unified_timestamp(meta.get('recordingDate')), 'duration': meta.get('duration') or parse_duration(meta.get('length')), 'tags': meta.get('genre'), 'series': meta.get('showName'), 'series_id': meta.get('showId'), } class RTVSLOShowIE(InfoExtractor): IE_NAME = 'rtvslo.si:show' _VALID_URL = r'https?://(?:365|4d)\.rtvslo.si/oddaja/[^/?#&]+/(?P<id>\d+)' _TESTS = [{ 'url': 'https://365.rtvslo.si/oddaja/ekipa-bled/173250997', 'info_dict': { 'id': '173250997', 'title': 'Ekipa Bled', 'description': 'md5:c88471e27a1268c448747a5325319ab7', 'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/173250997/logo_wide1.jpg', }, 'playlist_count': 18, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) return self.playlist_from_matches( re.findall(r'<a [^>]*\bhref="(/arhiv/[^"]+)"', webpage), playlist_id, self._html_extract_title(webpage), getter=urljoin('https://365.rtvslo.si'), ie=RTVSLOIE, description=self._og_search_description(webpage), thumbnail=self._og_search_thumbnail(webpage), )
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/webofstories.py
yt_dlp/extractor/webofstories.py
import re from .common import InfoExtractor from ..utils import ( int_or_none, orderedSet, ) class WebOfStoriesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?webofstories\.com/play/(?:[^/]+/)?(?P<id>[0-9]+)' _VIDEO_DOMAIN = 'http://eu-mobile.webofstories.com/' _GREAT_LIFE_STREAMER = 'rtmp://eu-cdn1.webofstories.com/cfx/st/' _USER_STREAMER = 'rtmp://eu-users.webofstories.com/cfx/st/' _TESTS = [{ 'url': 'http://www.webofstories.com/play/hans.bethe/71', 'md5': '373e4dd915f60cfe3116322642ddf364', 'info_dict': { 'id': '4536', 'ext': 'mp4', 'title': 'The temperature of the sun', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'Hans Bethe talks about calculating the temperature of the sun', 'duration': 238, }, }, { 'url': 'http://www.webofstories.com/play/55908', 'md5': '2985a698e1fe3211022422c4b5ed962c', 'info_dict': { 'id': '55908', 'ext': 'mp4', 'title': 'The story of Gemmata obscuriglobus', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'Planctomycete talks about The story of Gemmata obscuriglobus', 'duration': 169, }, 'skip': 'notfound', }, { # malformed og:title meta 'url': 'http://www.webofstories.com/play/54215?o=MS', 'info_dict': { 'id': '54215', 'ext': 'mp4', 'title': '"A Leg to Stand On"', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'Oliver Sacks talks about the death and resurrection of a limb', 'duration': 97, }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) # Sometimes og:title meta is malformed title = self._og_search_title(webpage, default=None) or self._html_search_regex( r'(?s)<strong>Title:\s*</strong>(.+?)<', webpage, 'title') description = self._html_search_meta('description', webpage) thumbnail = self._og_search_thumbnail(webpage) embed_params = [s.strip(" \r\n\t'") for s in self._search_regex( r'(?s)\$\("#embedCode"\).html\(getEmbedCode\((.*?)\)', webpage, 'embed params').split(',')] ( _, speaker_id, story_id, story_duration, speaker_type, great_life, _thumbnail, _has_subtitles, story_filename, _story_order) = embed_params is_great_life_series = great_life == 'true' duration = int_or_none(story_duration) # URL building, see: http://www.webofstories.com/scripts/player.js ms_prefix = '' if speaker_type.lower() == 'ms': ms_prefix = 'mini_sites/' if is_great_life_series: mp4_url = f'{self._VIDEO_DOMAIN}lives/{speaker_id}/{story_filename}.mp4' rtmp_ext = 'flv' streamer = self._GREAT_LIFE_STREAMER play_path = f'stories/{speaker_id}/{story_filename}' else: mp4_url = f'{self._VIDEO_DOMAIN}{ms_prefix}{speaker_id}/{story_filename}.mp4' rtmp_ext = 'mp4' streamer = self._USER_STREAMER play_path = f'mp4:{ms_prefix}{speaker_id}/{story_filename}.mp4' formats = [{ 'format_id': 'mp4_sd', 'url': mp4_url, }, { 'format_id': 'rtmp_sd', 'page_url': url, 'url': streamer, 'ext': rtmp_ext, 'play_path': play_path, }] return { 'id': story_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'description': description, 'duration': duration, } class WebOfStoriesPlaylistIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?webofstories\.com/playAll/(?P<id>[^/]+)' _TEST = { 'url': 'http://www.webofstories.com/playAll/donald.knuth', 'info_dict': { 'id': 'donald.knuth', 'title': 'Donald Knuth (Scientist)', }, 'playlist_mincount': 97, } def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) entries = [ self.url_result( f'http://www.webofstories.com/play/{video_id}', 'WebOfStories', video_id=video_id) for video_id in orderedSet(re.findall(r'\bid=["\']td_(\d+)', webpage)) ] title = self._search_regex( r'<div id="speakerName">\s*<span>([^<]+)</span>', webpage, 'speaker', default=None) if title: field = self._search_regex( r'<span id="primaryField">([^<]+)</span>', webpage, 'field', default=None) if field: title += f' ({field})' if not title: title = self._search_regex( r'<title>Play\s+all\s+stories\s*-\s*([^<]+)\s*-\s*Web\s+of\s+Stories</title>', webpage, 'title') return self.playlist_result(entries, playlist_id, title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nfhsnetwork.py
yt_dlp/extractor/nfhsnetwork.py
from .common import InfoExtractor from ..utils import try_get, unified_strdate, unified_timestamp class NFHSNetworkIE(InfoExtractor): IE_NAME = 'NFHSNetwork' _VALID_URL = r'https?://(?:www\.)?nfhsnetwork\.com/events/[\w-]+/(?P<id>(?:gam|evt|dd|)?[\w\d]{0,10})' _TESTS = [{ # Auto-generated two-team sport (pixellot) 'url': 'https://www.nfhsnetwork.com/events/rockford-high-school-rockford-mi/gamcf7e54cfbc', 'info_dict': { 'id': 'gamcf7e54cfbc', 'ext': 'mp4', 'title': 'Rockford vs Spring Lake - Girls Varsity Lacrosse 03/27/2021', 'uploader': 'MHSAA - Michigan: Rockford High School, Rockford, MI', 'uploader_id': 'cd2622cf76', 'uploader_url': 'https://www.nfhsnetwork.com/schools/rockford-high-school-rockford-mi', 'location': 'Rockford, Michigan', 'timestamp': 1616859000, 'upload_date': '20210327', }, 'params': { # m3u8 download 'skip_download': True, }, }, { # Non-sport activity with description 'url': 'https://www.nfhsnetwork.com/events/limon-high-school-limon-co/evt4a30e3726c', 'info_dict': { 'id': 'evt4a30e3726c', 'ext': 'mp4', 'title': 'Drama Performance Limon High School vs. Limon High School - 12/13/2020', 'description': 'Join the broadcast of the Limon High School Musical Performance at 2 PM.', 'uploader': 'CHSAA: Limon High School, Limon, CO', 'uploader_id': '7d2d121332', 'uploader_url': 'https://www.nfhsnetwork.com/schools/limon-high-school-limon-co', 'location': 'Limon, Colorado', 'timestamp': 1607893200, 'upload_date': '20201213', }, 'params': { # m3u8 download 'skip_download': True, }, }, { # Postseason game 'url': 'https://www.nfhsnetwork.com/events/nfhs-network-special-events/dd8de71d45', 'info_dict': { 'id': 'dd8de71d45', 'ext': 'mp4', 'title': '2015 UA Holiday Classic Tournament: National Division - 12/26/2015', 'uploader': 'SoCal Sports Productions', 'uploader_id': '063dba0150', 'uploader_url': 'https://www.nfhsnetwork.com/affiliates/socal-sports-productions', 'location': 'San Diego, California', 'timestamp': 1451187000, 'upload_date': '20151226', }, 'params': { # m3u8 download 'skip_download': True, }, }, { # Video with no broadcasts object 'url': 'https://www.nfhsnetwork.com/events/wiaa-wi/9aa2f92f82', 'info_dict': { 'id': '9aa2f92f82', 'ext': 'mp4', 'title': 'Competitive Equity - 01/21/2015', 'description': 'Committee members discuss points of their research regarding a competitive equity plan', 'uploader': 'WIAA - Wisconsin: Wisconsin Interscholastic Athletic Association', 'uploader_id': 'a49f7d1002', 'uploader_url': 'https://www.nfhsnetwork.com/associations/wiaa-wi', 'location': 'Stevens Point, Wisconsin', 'timestamp': 1421856000, 'upload_date': '20150121', }, 'params': { # m3u8 download 'skip_download': True, }, }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data = self._download_json( 'https://cfunity.nfhsnetwork.com/v2/game_or_event/' + video_id, video_id) publisher = data.get('publishers')[0] # always exists broadcast = (publisher.get('broadcasts') or publisher.get('vods'))[0] # some (older) videos don't have a broadcasts object uploader = publisher.get('formatted_name') or publisher.get('name') uploader_id = publisher.get('publisher_key') pub_type = publisher.get('type') uploader_prefix = ( 'schools' if pub_type == 'school' else 'associations' if 'association' in pub_type else 'affiliates' if (pub_type == 'publisher' or pub_type == 'affiliate') else 'schools') uploader_page = 'https://www.nfhsnetwork.com/{}/{}'.format(uploader_prefix, publisher.get('slug')) location = '{}, {}'.format(data.get('city'), data.get('state_name')) description = broadcast.get('description') is_live = broadcast.get('on_air') or broadcast.get('status') == 'on_air' or False timestamp = unified_timestamp(data.get('local_start_time')) upload_date = unified_strdate(data.get('local_start_time')) title = ( self._og_search_title(webpage) or self._html_search_regex(r'<h1 class="sr-hidden">(.*?)</h1>', webpage, 'title')) title = title.split('|')[0].strip() video_type = 'broadcasts' if is_live else 'vods' key = broadcast.get('key') if is_live else try_get(publisher, lambda x: x['vods'][0]['key']) m3u8_url = self._download_json( f'https://cfunity.nfhsnetwork.com/v2/{video_type}/{key}/url', video_id).get('video_url') formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', live=is_live) return { 'id': video_id, 'title': title, 'formats': formats, 'description': description, 'timestamp': timestamp, 'uploader': uploader, 'uploader_id': uploader_id, 'uploader_url': uploader_page, 'location': location, 'upload_date': upload_date, 'is_live': is_live, '_format_sort_fields': ('res', 'tbr'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/idolplus.py
yt_dlp/extractor/idolplus.py
from .common import InfoExtractor from ..utils import traverse_obj, try_call, url_or_none class IdolPlusIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?idolplus\.com/z[us]/(?:concert/|contents/?\?(?:[^#]+&)?albumId=)(?P<id>\w+)' _TESTS = [{ 'url': 'https://idolplus.com/zs/contents?albumId=M012077298PPV00', 'md5': '2ace3f4661c943a2f7e79f0b88cea1e7', 'info_dict': { 'id': 'M012077298PPV00', 'ext': 'mp4', 'title': '[MultiCam] Aegyo on Top of Aegyo (IZ*ONE EATING TRIP)', 'release_date': '20200707', 'formats': 'count:65', }, 'params': {'format': '532-KIM_MINJU'}, }, { 'url': 'https://idolplus.com/zs/contents?albumId=M01232H058PPV00&catId=E9TX5', 'info_dict': { 'id': 'M01232H058PPV00', 'ext': 'mp4', 'title': 'YENA (CIRCLE CHART MUSIC AWARDS 2022 RED CARPET)', 'release_date': '20230218', 'formats': 'count:5', }, 'params': {'skip_download': 'm3u8'}, }, { # live stream 'url': 'https://idolplus.com/zu/contents?albumId=M012323174PPV00', 'info_dict': { 'id': 'M012323174PPV00', 'ext': 'mp4', 'title': 'Hanteo Music Awards 2022 DAY2', 'release_date': '20230211', 'formats': 'count:5', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://idolplus.com/zs/concert/M012323039PPV00', 'info_dict': { 'id': 'M012323039PPV00', 'ext': 'mp4', 'title': 'CIRCLE CHART MUSIC AWARDS 2022', 'release_date': '20230218', 'formats': 'count:5', }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): video_id = self._match_id(url) data_list = traverse_obj(self._download_json( 'https://idolplus.com/api/zs/viewdata/ruleset/build', video_id, headers={'App_type': 'web', 'Country_Code': 'KR'}, query={ 'rulesetId': 'contents', 'albumId': video_id, 'distribute': 'PRD', 'loggedIn': 'false', 'region': 'zs', 'countryGroup': '00010', 'lang': 'en', 'saId': '999999999998', }), ('data', 'viewData', ...)) player_data = {} while data_list: player_data = data_list.pop() if traverse_obj(player_data, 'type') == 'player': break elif traverse_obj(player_data, ('dataList', ...)): data_list += player_data['dataList'] formats = self._extract_m3u8_formats(traverse_obj(player_data, ( 'vodPlayerList', 'vodProfile', 0, 'vodServer', 0, 'video_url', {url_or_none})), video_id) subtitles = {} for caption in traverse_obj(player_data, ('vodPlayerList', 'caption')) or []: subtitles.setdefault(caption.get('lang') or 'und', []).append({ 'url': caption.get('smi_url'), 'ext': 'vtt', }) # Add member multicams as alternative formats if (traverse_obj(player_data, ('detail', 'has_cuesheet')) == 'Y' and traverse_obj(player_data, ('detail', 'is_omni_member')) == 'Y'): cuesheet = traverse_obj(self._download_json( 'https://idolplus.com/gapi/contents/v1.0/content/cuesheet', video_id, 'Downloading JSON metadata for member multicams', headers={'App_type': 'web', 'Country_Code': 'KR'}, query={ 'ALBUM_ID': video_id, 'COUNTRY_GRP': '00010', 'LANG': 'en', 'SA_ID': '999999999998', 'COUNTRY_CODE': 'KR', }), ('data', 'cuesheet_item', 0)) for member in traverse_obj(cuesheet, ('members', ...)): index = try_call(lambda: int(member['omni_view_index']) - 1) member_video_url = traverse_obj(cuesheet, ('omni_view', index, 'cdn_url', 0, 'url', {url_or_none})) if not member_video_url: continue member_formats = self._extract_m3u8_formats( member_video_url, video_id, note=f'Downloading m3u8 for multicam {member["name"]}') for mf in member_formats: mf['format_id'] = f'{mf["format_id"]}-{member["name"].replace(" ", "_")}' formats.extend(member_formats) return { 'id': video_id, 'title': traverse_obj(player_data, ('detail', 'albumName')), 'formats': formats, 'subtitles': subtitles, 'release_date': traverse_obj(player_data, ('detail', 'broadcastDate')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/loom.py
yt_dlp/extractor/loom.py
import json import textwrap import urllib.parse import uuid from .common import InfoExtractor from ..utils import ( ExtractorError, determine_ext, filter_dict, int_or_none, parse_iso8601, update_url, url_or_none, ) from ..utils.traversal import traverse_obj class LoomIE(InfoExtractor): IE_NAME = 'loom' _VALID_URL = r'https?://(?:www\.)?loom\.com/(?:share|embed)/(?P<id>[\da-f]{32})' _EMBED_REGEX = [rf'<iframe[^>]+\bsrc=["\'](?P<url>{_VALID_URL})'] _TESTS = [{ # m3u8 raw-url, mp4 transcoded-url, cdn url == raw-url, json subs only 'url': 'https://www.loom.com/share/43d05f362f734614a2e81b4694a3a523', 'md5': 'bfc2d7e9c2e0eb4813212230794b6f42', 'info_dict': { 'id': '43d05f362f734614a2e81b4694a3a523', 'ext': 'mp4', 'title': 'A Ruler for Windows - 28 March 2022', 'uploader': 'wILLIAM PIP', 'upload_date': '20220328', 'timestamp': 1648454238, 'duration': 27, }, }, { # webm raw-url, mp4 transcoded-url, cdn url == transcoded-url, no subs 'url': 'https://www.loom.com/share/c43a642f815f4378b6f80a889bb73d8d', 'md5': '70f529317be8cf880fcc2c649a531900', 'info_dict': { 'id': 'c43a642f815f4378b6f80a889bb73d8d', 'ext': 'webm', 'title': 'Lilah Nielsen Intro Video', 'uploader': 'Lilah Nielsen', 'upload_date': '20200826', 'timestamp': 1598480716, 'duration': 20, }, }, { # m3u8 raw-url, mp4 transcoded-url, cdn url == raw-url, vtt sub and json subs 'url': 'https://www.loom.com/share/9458bcbf79784162aa62ffb8dd66201b', 'md5': '7b6bfdef8181c4ffc376e18919a4dcc2', 'info_dict': { 'id': '9458bcbf79784162aa62ffb8dd66201b', 'ext': 'mp4', 'title': 'Sharing screen with gpt-4', 'description': 'Sharing screen with GPT 4 vision model and asking questions to guide through blender.', 'uploader': 'Suneel Matham', 'chapters': 'count:3', 'upload_date': '20231109', 'timestamp': 1699518978, 'duration': 93, }, }, { # mpd raw-url, mp4 transcoded-url, cdn url == raw-url, no subs 'url': 'https://www.loom.com/share/24351eb8b317420289b158e4b7e96ff2', 'info_dict': { 'id': '24351eb8b317420289b158e4b7e96ff2', 'ext': 'webm', 'title': 'OMFG clown', 'description': 'md5:285c5ee9d62aa087b7e3271b08796815', 'uploader': 'Brailey Bragg', 'upload_date': '20210924', 'timestamp': 1632519618, 'duration': 210, }, 'params': {'skip_download': 'dash'}, 'expected_warnings': ['Failed to parse JSON'], # transcoded-url no longer available }, { # password-protected 'url': 'https://www.loom.com/share/50e26e8aeb7940189dff5630f95ce1f4', 'md5': '5cc7655e7d55d281d203f8ffd14771f7', 'info_dict': { 'id': '50e26e8aeb7940189dff5630f95ce1f4', 'ext': 'mp4', 'title': 'iOS Mobile Upload', 'uploader': 'Simon Curran', 'upload_date': '20200520', 'timestamp': 1590000123, 'duration': 35, }, 'params': {'videopassword': 'seniorinfants2'}, 'expected_warnings': ['Failed to parse JSON'], # transcoded-url no longer available }, { # embed, transcoded-url endpoint sends empty JSON response, split video and audio HLS formats 'url': 'https://www.loom.com/embed/ddcf1c1ad21f451ea7468b1e33917e4e', 'md5': 'f983a0f02f24331738b2f43aecb05256', 'info_dict': { 'id': 'ddcf1c1ad21f451ea7468b1e33917e4e', 'ext': 'mp4', 'title': 'CF Reset User\'s Password', 'uploader': 'Aimee Heintz', 'upload_date': '20220707', 'timestamp': 1657216459, 'duration': 181, }, 'params': {'format': 'bestvideo'}, # Test video-only fixup 'expected_warnings': ['Failed to parse JSON'], }] _WEBPAGE_TESTS = [{ 'url': 'https://www.loom.com/community/e1229802a8694a09909e8ba0fbb6d073-pg', 'md5': 'ec838cd01b576cf0386f32e1ae424609', 'info_dict': { 'id': 'e1229802a8694a09909e8ba0fbb6d073', 'ext': 'mp4', 'title': 'Rexie Jane Cimafranca - Founder\'s Presentation', 'uploader': 'Rexie Cimafranca', 'upload_date': '20230213', 'duration': 247, 'timestamp': 1676274030, }, 'skip': '404 Not Found', }] _GRAPHQL_VARIABLES = { 'GetVideoSource': { 'acceptableMimes': ['DASH', 'M3U8', 'MP4', 'WEBM'], }, } _GRAPHQL_QUERIES = { 'GetVideoSSR': textwrap.dedent('''\ query GetVideoSSR($videoId: ID!, $password: String) { getVideo(id: $videoId, password: $password) { __typename ... on PrivateVideo { id status message __typename } ... on VideoPasswordMissingOrIncorrect { id message __typename } ... on RegularUserVideo { id __typename createdAt description download_enabled folder_id is_protected needs_password owner { display_name __typename } privacy s3_id name video_properties { avgBitRate client camera_enabled client_version duration durationMs format height microphone_enabled os os_version recordingClient recording_type recording_version screen_type tab_audio trim_duration width __typename } playable_duration source_duration visibility } } }\n'''), 'GetVideoSource': textwrap.dedent('''\ query GetVideoSource($videoId: ID!, $password: String, $acceptableMimes: [CloudfrontVideoAcceptableMime]) { getVideo(id: $videoId, password: $password) { ... on RegularUserVideo { id nullableRawCdnUrl(acceptableMimes: $acceptableMimes, password: $password) { url credentials { Policy Signature KeyPairId __typename } __typename } __typename } __typename } }\n'''), 'FetchVideoTranscript': textwrap.dedent('''\ query FetchVideoTranscript($videoId: ID!, $password: String) { fetchVideoTranscript(videoId: $videoId, password: $password) { ... on VideoTranscriptDetails { id video_id source_url captions_source_url __typename } ... on GenericError { message __typename } __typename } }\n'''), 'FetchChapters': textwrap.dedent('''\ query FetchChapters($videoId: ID!, $password: String) { fetchVideoChapters(videoId: $videoId, password: $password) { ... on VideoChapters { video_id content __typename } ... on EmptyChaptersPayload { content __typename } ... on InvalidRequestWarning { message __typename } ... on Error { message __typename } __typename } }\n'''), } _APOLLO_GRAPHQL_VERSION = '45a5bd4' def _call_graphql_api(self, operation_name, video_id, note=None, errnote=None, fatal=True): password = self.get_param('videopassword') return self._download_json( 'https://www.loom.com/graphql', video_id, note or 'Downloading GraphQL JSON', errnote or 'Failed to download GraphQL JSON', headers={ 'Accept': 'application/json', 'Content-Type': 'application/json', 'x-loom-request-source': f'loom_web_{self._APOLLO_GRAPHQL_VERSION}', 'apollographql-client-name': 'web', 'apollographql-client-version': self._APOLLO_GRAPHQL_VERSION, 'graphql-operation-name': operation_name, 'Origin': 'https://www.loom.com', }, data=json.dumps({ 'operationName': operation_name, 'variables': { 'videoId': video_id, 'password': password, **self._GRAPHQL_VARIABLES.get(operation_name, {}), }, 'query': self._GRAPHQL_QUERIES[operation_name], }, separators=(',', ':')).encode(), fatal=fatal) def _call_url_api(self, endpoint, video_id): response = self._download_json( f'https://www.loom.com/api/campaigns/sessions/{video_id}/{endpoint}', video_id, f'Downloading {endpoint} JSON', f'Failed to download {endpoint} JSON', fatal=False, headers={'Accept': 'application/json', 'Content-Type': 'application/json'}, data=json.dumps({ 'anonID': str(uuid.uuid4()), 'deviceID': None, 'force_original': False, # HTTP error 401 if True 'password': self.get_param('videopassword'), }, separators=(',', ':')).encode()) return traverse_obj(response, ('url', {url_or_none})) def _extract_formats(self, video_id, metadata, video_data): formats = [] video_properties = traverse_obj(metadata, ('video_properties', { 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), 'acodec': ('microphone_enabled', {lambda x: 'none' if x is False else None}), })) def get_formats(format_url, format_id, quality): if not format_url: return ext = determine_ext(format_url) query = urllib.parse.urlparse(format_url).query if ext == 'm3u8': # Extract pre-merged HLS formats to avoid buggy parsing of metadata in split playlists format_url = format_url.replace('-split.m3u8', '.m3u8') m3u8_formats = self._extract_m3u8_formats( format_url, video_id, 'mp4', m3u8_id=f'hls-{format_id}', fatal=False, quality=quality) # Sometimes only split video/audio formats are available, need to fixup video-only formats is_not_premerged = 'none' in traverse_obj(m3u8_formats, (..., 'vcodec')) for fmt in m3u8_formats: if is_not_premerged and fmt.get('vcodec') != 'none': fmt['acodec'] = 'none' yield { **fmt, 'url': update_url(fmt['url'], query=query), 'extra_param_to_segment_url': query, } elif ext == 'mpd': dash_formats = self._extract_mpd_formats( format_url, video_id, mpd_id=f'dash-{format_id}', fatal=False) for fmt in dash_formats: yield { **fmt, 'extra_param_to_segment_url': query, 'quality': quality, } else: yield { 'url': format_url, 'ext': ext, 'format_id': f'http-{format_id}', 'quality': quality, **video_properties, } raw_url = self._call_url_api('raw-url', video_id) formats.extend(get_formats(raw_url, 'raw', quality=1)) # original quality transcoded_url = self._call_url_api('transcoded-url', video_id) formats.extend(get_formats(transcoded_url, 'transcoded', quality=-1)) # transcoded quality cdn_url = traverse_obj(video_data, ('data', 'getVideo', 'nullableRawCdnUrl', 'url', {url_or_none})) # cdn_url is usually a dupe, but the raw-url/transcoded-url endpoints could return errors valid_urls = [update_url(url, query=None) for url in (raw_url, transcoded_url) if url] if cdn_url and update_url(cdn_url, query=None) not in valid_urls: formats.extend(get_formats(cdn_url, 'cdn', quality=0)) # could be original or transcoded return formats def _get_subtitles(self, video_id): subs_data = self._call_graphql_api( 'FetchVideoTranscript', video_id, 'Downloading GraphQL subtitles JSON', fatal=False) return filter_dict({ 'en': traverse_obj(subs_data, ( 'data', 'fetchVideoTranscript', ('source_url', 'captions_source_url'), { 'url': {url_or_none}, })) or None, }) def _real_extract(self, url): video_id = self._match_id(url) metadata = traverse_obj( self._call_graphql_api('GetVideoSSR', video_id, 'Downloading GraphQL metadata JSON', fatal=False), ('data', 'getVideo', {dict})) or {} if metadata.get('__typename') == 'VideoPasswordMissingOrIncorrect': if not self.get_param('videopassword'): raise ExtractorError( 'This video is password-protected, use the --video-password option', expected=True) raise ExtractorError('Invalid video password', expected=True) video_data = self._call_graphql_api( 'GetVideoSource', video_id, 'Downloading GraphQL video JSON') chapter_data = self._call_graphql_api( 'FetchChapters', video_id, 'Downloading GraphQL chapters JSON', fatal=False) duration = traverse_obj(metadata, ('video_properties', 'duration', {int_or_none})) return { 'id': video_id, 'duration': duration, 'chapters': self._extract_chapters_from_description( traverse_obj(chapter_data, ('data', 'fetchVideoChapters', 'content', {str})), duration) or None, 'formats': self._extract_formats(video_id, metadata, video_data), 'subtitles': self.extract_subtitles(video_id), **traverse_obj(metadata, { 'title': ('name', {str}), 'description': ('description', {str}), 'uploader': ('owner', 'display_name', {str}), 'timestamp': ('createdAt', {parse_iso8601}), }), } class LoomFolderIE(InfoExtractor): _WORKING = False IE_NAME = 'loom:folder' _VALID_URL = r'https?://(?:www\.)?loom\.com/share/folder/(?P<id>[\da-f]{32})' _TESTS = [{ # 2 subfolders, no videos in root 'url': 'https://www.loom.com/share/folder/997db4db046f43e5912f10dc5f817b5c', 'playlist_mincount': 16, 'info_dict': { 'id': '997db4db046f43e5912f10dc5f817b5c', 'title': 'Blending Lessons', }, }, { # only videos, no subfolders 'url': 'https://www.loom.com/share/folder/9a8a87f6b6f546d9a400c8e7575ff7f2', 'playlist_mincount': 12, 'info_dict': { 'id': '9a8a87f6b6f546d9a400c8e7575ff7f2', 'title': 'List A- a, i, o', }, }, { # videos in root and empty subfolder 'url': 'https://www.loom.com/share/folder/886e534218c24fd292e97e9563078cc4', 'playlist_mincount': 21, 'info_dict': { 'id': '886e534218c24fd292e97e9563078cc4', 'title': 'Medicare Agent Training videos', }, }, { # videos in root and videos in subfolders 'url': 'https://www.loom.com/share/folder/b72c4ecdf04745da9403926d80a40c38', 'playlist_mincount': 21, 'info_dict': { 'id': 'b72c4ecdf04745da9403926d80a40c38', 'title': 'Quick Altos Q & A Tutorials', }, }, { # recursive folder extraction 'url': 'https://www.loom.com/share/folder/8b458a94e0e4449b8df9ea7a68fafc4e', 'playlist_count': 23, 'info_dict': { 'id': '8b458a94e0e4449b8df9ea7a68fafc4e', 'title': 'Sezer Texting Guide', }, }, { # more than 50 videos in 1 folder 'url': 'https://www.loom.com/share/folder/e056a91d290d47ca9b00c9d1df56c463', 'playlist_mincount': 61, 'info_dict': { 'id': 'e056a91d290d47ca9b00c9d1df56c463', 'title': 'User Videos', }, }, { # many subfolders 'url': 'https://www.loom.com/share/folder/c2dde8cc67454f0e99031677279d8954', 'playlist_mincount': 75, 'info_dict': { 'id': 'c2dde8cc67454f0e99031677279d8954', 'title': 'Honors 1', }, }, { 'url': 'https://www.loom.com/share/folder/bae17109a68146c7803454f2893c8cf8/Edpuzzle', 'only_matching': True, }] def _extract_folder_data(self, folder_id): return self._download_json( f'https://www.loom.com/v1/folders/{folder_id}', folder_id, 'Downloading folder info JSON', query={'limit': '10000'}) def _extract_folder_entries(self, folder_id, initial_folder_data=None): folder_data = initial_folder_data or self._extract_folder_data(folder_id) for video in traverse_obj(folder_data, ('videos', lambda _, v: v['id'])): video_id = video['id'] yield self.url_result( f'https://www.loom.com/share/{video_id}', LoomIE, video_id, video.get('name')) # Recurse into subfolders for subfolder_id in traverse_obj(folder_data, ( 'folders', lambda _, v: v['id'] != folder_id, 'id', {str})): yield from self._extract_folder_entries(subfolder_id) def _real_extract(self, url): playlist_id = self._match_id(url) playlist_data = self._extract_folder_data(playlist_id) return self.playlist_result( self._extract_folder_entries(playlist_id, playlist_data), playlist_id, traverse_obj(playlist_data, ('folder', 'name', {str.strip})))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/telequebec.py
yt_dlp/extractor/telequebec.py
from .common import InfoExtractor from ..utils import ( int_or_none, smuggle_url, try_get, unified_timestamp, ) class TeleQuebecBaseIE(InfoExtractor): BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s' @staticmethod def _brightcove_result(brightcove_id, player_id, account_id='6150020952001'): return { '_type': 'url_transparent', 'url': smuggle_url(TeleQuebecBaseIE.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, brightcove_id), {'geo_countries': ['CA']}), 'ie_key': 'BrightcoveNew', } class TeleQuebecIE(TeleQuebecBaseIE): _VALID_URL = r'''(?x) https?:// (?: zonevideo\.telequebec\.tv/media| coucou\.telequebec\.tv/videos )/(?P<id>\d+) ''' _TESTS = [{ # available till 01.01.2023 'url': 'http://zonevideo.telequebec.tv/media/37578/un-petit-choc-et-puis-repart/un-chef-a-la-cabane', 'info_dict': { 'id': '6155972771001', 'ext': 'mp4', 'title': 'Un petit choc et puis repart!', 'description': 'md5:b04a7e6b3f74e32d7b294cffe8658374', 'timestamp': 1589262469, 'uploader_id': '6150020952001', 'upload_date': '20200512', }, 'add_ie': ['BrightcoveNew'], }, { 'url': 'https://zonevideo.telequebec.tv/media/55267/le-soleil/passe-partout', 'info_dict': { 'id': '6167180337001', 'ext': 'mp4', 'title': 'Le soleil', 'description': 'md5:64289c922a8de2abbe99c354daffde02', 'uploader_id': '6150020952001', 'upload_date': '20200625', 'timestamp': 1593090307, }, 'add_ie': ['BrightcoveNew'], }, { # no description 'url': 'http://zonevideo.telequebec.tv/media/30261', 'only_matching': True, }, { 'url': 'https://coucou.telequebec.tv/videos/41788/idee-de-genie/l-heure-du-bain', 'only_matching': True, }] def _real_extract(self, url): media_id = self._match_id(url) media = self._download_json( 'https://mnmedias.api.telequebec.tv/api/v3/media/' + media_id, media_id)['media'] source_id = next(source_info['sourceId'] for source_info in media['streamInfos'] if source_info.get('source') == 'Brightcove') info = self._brightcove_result(source_id, '22gPKdt7f') product = media.get('product') or {} season = product.get('season') or {} info.update({ 'description': try_get(media, lambda x: x['descriptions'][-1]['text'], str), 'series': try_get(season, lambda x: x['serie']['titre']), 'season': season.get('name'), 'season_number': int_or_none(season.get('seasonNo')), 'episode': product.get('titre'), 'episode_number': int_or_none(product.get('episodeNo')), }) return info class TeleQuebecSquatIE(InfoExtractor): _VALID_URL = r'https?://squat\.telequebec\.tv/videos/(?P<id>\d+)' _TESTS = [{ 'url': 'https://squat.telequebec.tv/videos/9314', 'info_dict': { 'id': 'd59ae78112d542e793d83cc9d3a5b530', 'ext': 'mp4', 'title': 'Poupeflekta', 'description': 'md5:2f0718f8d2f8fece1646ee25fb7bce75', 'duration': 1351, 'timestamp': 1569057600, 'upload_date': '20190921', 'series': 'Miraculous : Les Aventures de Ladybug et Chat Noir', 'season': 'Saison 3', 'season_number': 3, 'episode_number': 57, }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( f'https://squat.api.telequebec.tv/v1/videos/{video_id}', video_id) media_id = video['sourceId'] return { '_type': 'url_transparent', 'url': f'http://zonevideo.telequebec.tv/media/{media_id}', 'ie_key': TeleQuebecIE.ie_key(), 'id': media_id, 'title': video.get('titre'), 'description': video.get('description'), 'timestamp': unified_timestamp(video.get('datePublication')), 'series': video.get('container'), 'season': video.get('saison'), 'season_number': int_or_none(video.get('noSaison')), 'episode_number': int_or_none(video.get('episode')), } class TeleQuebecEmissionIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?: [^/]+\.telequebec\.tv/emissions/| (?:www\.)?telequebec\.tv/ ) (?P<id>[^?#&]+) ''' _TESTS = [{ 'url': 'http://lindicemcsween.telequebec.tv/emissions/100430013/des-soins-esthetiques-a-377-d-interets-annuels-ca-vous-tente', 'info_dict': { 'id': '6154476028001', 'ext': 'mp4', 'title': 'Des soins esthétiques à 377 % d’intérêts annuels, ça vous tente?', 'description': 'md5:cb4d378e073fae6cce1f87c00f84ae9f', 'upload_date': '20200505', 'timestamp': 1588713424, 'uploader_id': '6150020952001', }, }, { 'url': 'http://bancpublic.telequebec.tv/emissions/emission-49/31986/jeunes-meres-sous-pression', 'only_matching': True, }, { 'url': 'http://www.telequebec.tv/masha-et-michka/epi059masha-et-michka-3-053-078', 'only_matching': True, }, { 'url': 'http://www.telequebec.tv/documentaire/bebes-sur-mesure/', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) media_id = self._search_regex( r'mediaId\s*:\s*(?P<id>\d+)', webpage, 'media id') return self.url_result( 'http://zonevideo.telequebec.tv/media/' + media_id, TeleQuebecIE.ie_key()) class TeleQuebecLiveIE(TeleQuebecBaseIE): _VALID_URL = r'https?://zonevideo\.telequebec\.tv/(?P<id>endirect)' _TEST = { 'url': 'http://zonevideo.telequebec.tv/endirect/', 'info_dict': { 'id': '6159095684001', 'ext': 'mp4', 'title': 're:^Télé-Québec [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'is_live': True, 'description': 'Canal principal de Télé-Québec', 'uploader_id': '6150020952001', 'timestamp': 1590439901, 'upload_date': '20200525', }, 'params': { 'skip_download': True, }, } def _real_extract(self, url): return self._brightcove_result('6159095684001', 'skCsmi2Uw') class TeleQuebecVideoIE(TeleQuebecBaseIE): _VALID_URL = r'https?://video\.telequebec\.tv/player(?:-live)?/(?P<id>\d+)' _TESTS = [{ 'url': 'https://video.telequebec.tv/player/31110/stream', 'info_dict': { 'id': '6202570652001', 'ext': 'mp4', 'title': 'Le coût du véhicule le plus vendu au Canada / Tous les frais liés à la procréation assistée', 'description': 'md5:685a7e4c450ba777c60adb6e71e41526', 'upload_date': '20201019', 'timestamp': 1603115930, 'uploader_id': '6101674910001', }, }, { 'url': 'https://video.telequebec.tv/player-live/28527', 'only_matching': True, }] def _call_api(self, path, video_id): return self._download_json( 'http://beacon.playback.api.brightcove.com/telequebec/api/assets/' + path, video_id, query={'device_layout': 'web', 'device_type': 'web'})['data'] def _real_extract(self, url): asset_id = self._match_id(url) asset = self._call_api(asset_id, asset_id)['asset'] stream = self._call_api( asset_id + '/streams/' + asset['streams'][0]['id'], asset_id)['stream'] stream_url = stream['url'] account_id = try_get( stream, lambda x: x['video_provider_details']['account_id']) or '6101674910001' info = self._brightcove_result(stream_url, 'default', account_id) info.update({ 'description': asset.get('long_description') or asset.get('short_description'), 'series': asset.get('series_original_name'), 'season_number': int_or_none(asset.get('season_number')), 'episode': asset.get('original_name'), 'episode_number': int_or_none(asset.get('episode_number')), }) return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rheinmaintv.py
yt_dlp/extractor/rheinmaintv.py
from .common import InfoExtractor from ..utils import extract_attributes, merge_dicts, remove_end class RheinMainTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rheinmaintv\.de/sendungen/(?:[\w-]+/)*(?P<video_id>(?P<display_id>[\w-]+)/vom-\d{2}\.\d{2}\.\d{4}(?:/\d+)?)' _TESTS = [{ 'url': 'https://www.rheinmaintv.de/sendungen/beitrag-video/auf-dem-weg-zur-deutschen-meisterschaft/vom-07.11.2022/', 'info_dict': { 'id': 'auf-dem-weg-zur-deutschen-meisterschaft-vom-07.11.2022', 'ext': 'ismv', # ismv+isma will be merged into mp4 'alt_title': 'Auf dem Weg zur Deutschen Meisterschaft', 'title': 'Auf dem Weg zur Deutschen Meisterschaft', 'upload_date': '20221108', 'view_count': int, 'display_id': 'auf-dem-weg-zur-deutschen-meisterschaft', 'thumbnail': r're:^https://.+\.jpg', 'description': 'md5:48c59b74192bc819a9b34af1d5ed1eb9', 'timestamp': 1667933057, 'duration': 243.0, }, 'params': {'skip_download': 'ism'}, }, { 'url': 'https://www.rheinmaintv.de/sendungen/beitrag-video/formationsgemeinschaft-rhein-main-bei-den-deutschen-meisterschaften/vom-14.11.2022/', 'info_dict': { 'id': 'formationsgemeinschaft-rhein-main-bei-den-deutschen-meisterschaften-vom-14.11.2022', 'ext': 'ismv', 'title': 'Formationsgemeinschaft Rhein-Main bei den Deutschen Meisterschaften', 'timestamp': 1668526214, 'display_id': 'formationsgemeinschaft-rhein-main-bei-den-deutschen-meisterschaften', 'alt_title': 'Formationsgemeinschaft Rhein-Main bei den Deutschen Meisterschaften', 'view_count': int, 'thumbnail': r're:^https://.+\.jpg', 'duration': 345.0, 'description': 'md5:9370ba29526984006c2cba1372e5c5a0', 'upload_date': '20221115', }, 'params': {'skip_download': 'ism'}, }, { 'url': 'https://www.rheinmaintv.de/sendungen/beitrag-video/casino-mainz-bei-den-deutschen-meisterschaften/vom-14.11.2022/', 'info_dict': { 'id': 'casino-mainz-bei-den-deutschen-meisterschaften-vom-14.11.2022', 'ext': 'ismv', 'title': 'Casino Mainz bei den Deutschen Meisterschaften', 'view_count': int, 'timestamp': 1668527402, 'alt_title': 'Casino Mainz bei den Deutschen Meisterschaften', 'upload_date': '20221115', 'display_id': 'casino-mainz-bei-den-deutschen-meisterschaften', 'duration': 348.0, 'thumbnail': r're:^https://.+\.jpg', 'description': 'md5:70fc1660eeba96da17199e5bdff4c0aa', }, 'params': {'skip_download': 'ism'}, }, { 'url': 'https://www.rheinmaintv.de/sendungen/beitrag-video/bricks4kids/vom-22.06.2022/', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) display_id = mobj.group('display_id') video_id = mobj.group('video_id').replace('/', '-') webpage = self._download_webpage(url, video_id) source, img = self._search_regex(r'(?s)(?P<source><source[^>]*>)(?P<img><img[^>]*>)', webpage, 'video', group=('source', 'img')) source = extract_attributes(source) img = extract_attributes(img) raw_json_ld = list(self._yield_json_ld(webpage, video_id)) json_ld = self._json_ld(raw_json_ld, video_id) json_ld.pop('url', None) ism_manifest_url = ( source.get('src') or next(json_ld.get('embedUrl') for json_ld in raw_json_ld if json_ld.get('@type') == 'VideoObject') ) formats, subtitles = self._extract_ism_formats_and_subtitles(ism_manifest_url, video_id) return merge_dicts({ 'id': video_id, 'display_id': display_id, 'title': self._html_search_regex(r'<h1><span class="title">([^<]*)</span>', webpage, 'headline', default=None) or img.get('title') or json_ld.get('title') or self._og_search_title(webpage) or remove_end(self._html_extract_title(webpage), ' -'), 'alt_title': img.get('alt'), 'description': json_ld.get('description') or self._og_search_description(webpage), 'formats': formats, 'subtitles': subtitles, 'thumbnails': [{'url': img['src']}] if 'src' in img else json_ld.get('thumbnails'), }, json_ld)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/truth.py
yt_dlp/extractor/truth.py
from .common import InfoExtractor from ..utils import ( clean_html, format_field, int_or_none, strip_or_none, traverse_obj, unified_timestamp, ) class TruthIE(InfoExtractor): _VALID_URL = r'https?://truthsocial\.com/@[^/]+/posts/(?P<id>\d+)' _TESTS = [ { 'url': 'https://truthsocial.com/@realDonaldTrump/posts/108779000807761862', 'md5': '4a5fb1470c192e493d9efd6f19e514d3', 'info_dict': { 'id': '108779000807761862', 'ext': 'qt', 'title': 'Truth video #108779000807761862', 'timestamp': 1659835827, 'upload_date': '20220807', 'uploader': 'Donald J. Trump', 'uploader_id': 'realDonaldTrump', 'uploader_url': 'https://truthsocial.com/@realDonaldTrump', 'repost_count': int, 'comment_count': int, 'like_count': int, }, }, { 'url': 'https://truthsocial.com/@ProjectVeritasAction/posts/108618228543962049', 'md5': 'fd47ba68933f9dce27accc52275be9c3', 'info_dict': { 'id': '108618228543962049', 'ext': 'mp4', 'title': 'md5:debde7186cf83f60ff7b44dbb9444e35', 'description': 'md5:de2fc49045bf92bb8dc97e56503b150f', 'timestamp': 1657382637, 'upload_date': '20220709', 'uploader': 'Project Veritas Action', 'uploader_id': 'ProjectVeritasAction', 'uploader_url': 'https://truthsocial.com/@ProjectVeritasAction', 'repost_count': int, 'comment_count': int, 'like_count': int, }, }, ] def _real_extract(self, url): video_id = self._match_id(url) status = self._download_json(f'https://truthsocial.com/api/v1/statuses/{video_id}', video_id) uploader_id = strip_or_none(traverse_obj(status, ('account', 'username'))) return { 'id': video_id, 'url': status['media_attachments'][0]['url'], 'title': '', 'description': strip_or_none(clean_html(status.get('content'))) or None, 'timestamp': unified_timestamp(status.get('created_at')), 'uploader': strip_or_none(traverse_obj(status, ('account', 'display_name'))), 'uploader_id': uploader_id, 'uploader_url': format_field(uploader_id, None, 'https://truthsocial.com/@%s'), 'repost_count': int_or_none(status.get('reblogs_count')), 'like_count': int_or_none(status.get('favourites_count')), 'comment_count': int_or_none(status.get('replies_count')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/art19.py
yt_dlp/extractor/art19.py
import re from .common import InfoExtractor from ..utils import float_or_none, int_or_none, parse_iso8601, url_or_none from ..utils.traversal import traverse_obj class Art19IE(InfoExtractor): _UUID_REGEX = r'[\da-f]{8}-?[\da-f]{4}-?[\da-f]{4}-?[\da-f]{4}-?[\da-f]{12}' _VALID_URL = [ rf'https?://(?:www\.)?art19\.com/shows/[^/#?]+/episodes/(?P<id>{_UUID_REGEX})', rf'https?://rss\.art19\.com/episodes/(?P<id>{_UUID_REGEX})\.mp3', ] _EMBED_REGEX = [rf'<iframe[^>]+\bsrc=[\'"](?P<url>{_VALID_URL[0]})'] _TESTS = [{ 'url': 'https://rss.art19.com/episodes/5ba1413c-48b8-472b-9cc3-cfd952340bdb.mp3', 'info_dict': { 'id': '5ba1413c-48b8-472b-9cc3-cfd952340bdb', 'ext': 'mp3', 'title': 'Why Did DeSantis Drop Out?', 'series': 'The Daily Briefing', 'release_timestamp': 1705941275, 'description': 'md5:da38961da4a3f7e419471365e3c6b49f', 'episode': 'Episode 582', 'thumbnail': r're:^https?://content\.production\.cdn\.art19\.com.*\.jpeg$', 'series_id': 'ed52a0ab-08b1-4def-8afc-549e4d93296d', 'upload_date': '20240122', 'timestamp': 1705940815, 'episode_number': 582, 'modified_date': '20240122', 'episode_id': '5ba1413c-48b8-472b-9cc3-cfd952340bdb', 'modified_timestamp': 1705941275, 'release_date': '20240122', 'duration': 527.4, }, }, { 'url': 'https://art19.com/shows/scamfluencers/episodes/8319b776-4153-4d22-8630-631f204a03dd', 'info_dict': { 'id': '8319b776-4153-4d22-8630-631f204a03dd', 'ext': 'mp3', 'title': 'Martha Stewart: The Homemaker Hustler Part 2', 'modified_date': '20240116', 'upload_date': '20240105', 'modified_timestamp': 1705435802, 'episode_id': '8319b776-4153-4d22-8630-631f204a03dd', 'series_id': 'd3c9b8ca-26b3-42f4-9bd8-21d1a9031e75', 'thumbnail': r're:^https?://content\.production\.cdn\.art19\.com.*\.jpeg$', 'description': 'md5:4aa7cfd1358dc57e729835bc208d7893', 'release_timestamp': 1705305660, 'release_date': '20240115', 'timestamp': 1704481536, 'episode_number': 88, 'series': 'Scamfluencers', 'duration': 2588.37501, 'episode': 'Episode 88', }, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.nu.nl/formule-1/6291456/verstappen-wordt-een-synoniem-voor-formule-1.html', 'info_dict': { 'id': '7d42626a-7301-47db-bb8a-3b6f054d77d7', 'ext': 'mp3', 'title': "'Verstappen wordt een synoniem voor Formule 1'", 'season': 'Seizoen 6', 'description': 'md5:39a7159a31c4cda312b2e893bdd5c071', 'episode_id': '7d42626a-7301-47db-bb8a-3b6f054d77d7', 'duration': 3061.82111, 'series_id': '93f4e113-2a60-4609-a564-755058fa40d8', 'release_date': '20231126', 'modified_timestamp': 1701156004, 'thumbnail': r're:^https?://content\.production\.cdn\.art19\.com.*\.jpeg$', 'season_number': 6, 'episode_number': 52, 'modified_date': '20231128', 'upload_date': '20231126', 'timestamp': 1701025981, 'season_id': '36097c1e-7455-490d-a2fe-e2f10b4d5f26', 'series': 'De Boordradio', 'release_timestamp': 1701026308, 'episode': 'Episode 52', }, }, { 'url': 'https://www.wishtv.com/podcast-episode/larry-bucshon-announces-retirement-from-congress/', 'info_dict': { 'id': '8da368bd-08d1-46d0-afaa-c134a4af7dc0', 'ext': 'mp3', 'title': 'Larry Bucshon announces retirement from congress', 'upload_date': '20240115', 'episode_number': 148, 'episode': 'Episode 148', 'thumbnail': r're:^https?://content\.production\.cdn\.art19\.com.*\.jpeg$', 'release_date': '20240115', 'timestamp': 1705328205, 'release_timestamp': 1705329275, 'series': 'All INdiana Politics', 'modified_date': '20240117', 'modified_timestamp': 1705458901, 'series_id': 'c4af6c27-b10f-4ff2-9f84-0f407df86ff1', 'episode_id': '8da368bd-08d1-46d0-afaa-c134a4af7dc0', 'description': 'md5:53b5239e4d14973a87125c217c255b2a', 'duration': 1256.18848, }, }] @classmethod def _extract_embed_urls(cls, url, webpage): yield from super()._extract_embed_urls(url, webpage) for episode_id in re.findall( rf'<div[^>]+\bclass=[\'"][^\'"]*art19-web-player[^\'"]*[\'"][^>]+\bdata-episode-id=[\'"]({cls._UUID_REGEX})[\'"]', webpage): yield f'https://rss.art19.com/episodes/{episode_id}.mp3' def _real_extract(self, url): episode_id = self._match_id(url) player_metadata = self._download_json( f'https://art19.com/episodes/{episode_id}', episode_id, note='Downloading player metadata', fatal=False, headers={'Accept': 'application/vnd.art19.v0+json'}) rss_metadata = self._download_json( f'https://rss.art19.com/episodes/{episode_id}.json', episode_id, fatal=False, note='Downloading RSS metadata') formats = [{ 'format_id': 'direct', 'url': f'https://rss.art19.com/episodes/{episode_id}.mp3', 'vcodec': 'none', 'acodec': 'mp3', }] for fmt_id, fmt_data in traverse_obj(rss_metadata, ('content', 'media', {dict.items}, ...)): if fmt_id == 'waveform_bin': continue fmt_url = traverse_obj(fmt_data, ('url', {url_or_none})) if not fmt_url: continue formats.append({ 'format_id': fmt_id, 'url': fmt_url, 'vcodec': 'none', 'acodec': fmt_id, 'quality': -2 if fmt_id == 'ogg' else -1, }) return { 'id': episode_id, 'formats': formats, **traverse_obj(player_metadata, ('episode', { 'title': ('title', {str}), 'description': ('description_plain', {str}), 'episode_id': ('id', {str}), 'episode_number': ('episode_number', {int_or_none}), 'season_id': ('season_id', {str}), 'series_id': ('series_id', {str}), 'timestamp': ('created_at', {parse_iso8601}), 'release_timestamp': ('released_at', {parse_iso8601}), 'modified_timestamp': ('updated_at', {parse_iso8601}), })), **traverse_obj(rss_metadata, ('content', { 'title': ('episode_title', {str}), 'description': ('episode_description_plain', {str}), 'episode_id': ('episode_id', {str}), 'episode_number': ('episode_number', {int_or_none}), 'season': ('season_title', {str}), 'season_id': ('season_id', {str}), 'season_number': ('season_number', {int_or_none}), 'series': ('series_title', {str}), 'series_id': ('series_id', {str}), 'thumbnail': ('cover_image', {url_or_none}), 'duration': ('duration', {float_or_none}), })), } class Art19ShowIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:www\.)?art19\.com/shows/(?P<id>[\w-]+)(?:/embed)?/?' _VALID_URL = [ rf'{_VALID_URL_BASE}(?:$|[#?])', r'https?://rss\.art19\.com/(?P<id>[\w-]+)/?(?:$|[#?])', ] _EMBED_REGEX = [rf'<iframe[^>]+\bsrc=[\'"](?P<url>{_VALID_URL_BASE}[^\'"])'] _TESTS = [{ 'url': 'https://www.art19.com/shows/5898c087-a14f-48dc-b6fc-a2280a1ff6e0/', 'info_dict': { '_type': 'playlist', 'id': '5898c087-a14f-48dc-b6fc-a2280a1ff6e0', 'display_id': 'echt-gebeurd', 'title': 'Echt Gebeurd', 'description': 'md5:5fd11dc80b76e51ffd34b6067fd5e560', 'timestamp': 1492642167, 'upload_date': '20170419', 'modified_timestamp': int, 'modified_date': str, 'tags': 'count:7', }, 'playlist_mincount': 425, }, { 'url': 'https://www.art19.com/shows/echt-gebeurd', 'info_dict': { '_type': 'playlist', 'id': '5898c087-a14f-48dc-b6fc-a2280a1ff6e0', 'display_id': 'echt-gebeurd', 'title': 'Echt Gebeurd', 'description': 'md5:5fd11dc80b76e51ffd34b6067fd5e560', 'timestamp': 1492642167, 'upload_date': '20170419', 'modified_timestamp': int, 'modified_date': str, 'tags': 'count:7', }, 'playlist_mincount': 425, }, { 'url': 'https://rss.art19.com/scamfluencers', 'info_dict': { '_type': 'playlist', 'id': 'd3c9b8ca-26b3-42f4-9bd8-21d1a9031e75', 'display_id': 'scamfluencers', 'title': 'Scamfluencers', 'description': 'md5:7d239d670c0ced6dadbf71c4caf764b7', 'timestamp': 1647368573, 'upload_date': '20220315', 'modified_timestamp': int, 'modified_date': str, 'tags': [], }, 'playlist_mincount': 90, }, { 'url': 'https://art19.com/shows/enthuellt/embed', 'info_dict': { '_type': 'playlist', 'id': 'e2cacf57-bb8a-4263-aa81-719bcdd4f80c', 'display_id': 'enthuellt', 'title': 'Enthüllt', 'description': 'md5:17752246643414a2fd51744fc9a1c08e', 'timestamp': 1601645860, 'upload_date': '20201002', 'modified_timestamp': int, 'modified_date': str, 'tags': 'count:10', }, 'playlist_mincount': 10, }] _WEBPAGE_TESTS = [{ 'url': 'https://deconstructingyourself.com/deconstructing-yourself-podcast', 'info_dict': { '_type': 'playlist', 'id': 'cfbb9b01-c295-4adb-8726-adde7c03cf21', 'display_id': 'deconstructing-yourself', 'title': 'Deconstructing Yourself', 'description': 'md5:dab5082b28b248a35476abf64768854d', 'timestamp': 1570581181, 'upload_date': '20191009', 'modified_timestamp': int, 'modified_date': str, 'tags': 'count:5', }, 'playlist_mincount': 80, }, { 'url': 'https://chicagoreader.com/columns-opinion/podcasts/ben-joravsky-show-podcast-episodes/', 'info_dict': { '_type': 'playlist', 'id': '9dfa2c37-ab87-4c13-8388-4897914313ec', 'display_id': 'the-ben-joravsky-show', 'title': 'The Ben Joravsky Show', 'description': 'md5:c0f3ec0ee0dbea764390e521adc8780a', 'timestamp': 1550875095, 'upload_date': '20190222', 'modified_timestamp': int, 'modified_date': str, 'tags': ['Chicago Politics', 'chicago', 'Ben Joravsky'], }, 'playlist_mincount': 1900, }] @classmethod def _extract_embed_urls(cls, url, webpage): yield from super()._extract_embed_urls(url, webpage) for series_id in re.findall( r'<div[^>]+\bclass=[\'"][^\'"]*art19-web-player[^\'"]*[\'"][^>]+\bdata-series-id=[\'"]([\w-]+)[\'"]', webpage): yield f'https://art19.com/shows/{series_id}' def _real_extract(self, url): series_id = self._match_id(url) series_metadata = self._download_json( f'https://art19.com/series/{series_id}', series_id, note='Downloading series metadata', headers={'Accept': 'application/vnd.art19.v0+json'}) return { '_type': 'playlist', 'entries': [ self.url_result(f'https://rss.art19.com/episodes/{episode_id}.mp3', Art19IE) for episode_id in traverse_obj(series_metadata, ('series', 'episode_ids', ..., {str})) ], **traverse_obj(series_metadata, ('series', { 'id': ('id', {str}), 'display_id': ('slug', {str}), 'title': ('title', {str}), 'description': ('description_plain', {str}), 'timestamp': ('created_at', {parse_iso8601}), 'modified_timestamp': ('updated_at', {parse_iso8601}), })), 'tags': traverse_obj(series_metadata, ('tags', ..., 'name', {str})), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/francaisfacile.py
yt_dlp/extractor/francaisfacile.py
import urllib.parse from .common import InfoExtractor from ..utils import ( float_or_none, url_or_none, ) from ..utils.traversal import traverse_obj class FrancaisFacileIE(InfoExtractor): _VALID_URL = r'https?://francaisfacile\.rfi\.fr/[a-z]{2}/(?:actualit%C3%A9|podcasts/[^/#?]+)/(?P<id>[^/#?]+)' _TESTS = [{ 'url': 'https://francaisfacile.rfi.fr/fr/actualit%C3%A9/20250305-r%C3%A9concilier-les-jeunes-avec-la-lecture-gr%C3%A2ce-aux-r%C3%A9seaux-sociaux', 'md5': '4f33674cb205744345cc835991100afa', 'info_dict': { 'id': 'WBMZ58952-FLE-FR-20250305', 'display_id': '20250305-réconcilier-les-jeunes-avec-la-lecture-grâce-aux-réseaux-sociaux', 'title': 'Réconcilier les jeunes avec la lecture grâce aux réseaux sociaux', 'url': 'https://aod-fle.akamaized.net/fle/sounds/fr/2025/03/05/6b6af52a-f9ba-11ef-a1f8-005056a97652.mp3', 'ext': 'mp3', 'description': 'md5:b903c63d8585bd59e8cc4d5f80c4272d', 'duration': 103.15, 'timestamp': 1741177984, 'upload_date': '20250305', }, }, { 'url': 'https://francaisfacile.rfi.fr/fr/actualit%C3%A9/20250307-argentine-le-sac-d-un-alpiniste-retrouv%C3%A9-40-ans-apr%C3%A8s-sa-mort', 'md5': 'b8c3a63652d4ae8e8092dda5700c1cd9', 'info_dict': { 'id': 'WBMZ59102-FLE-FR-20250307', 'display_id': '20250307-argentine-le-sac-d-un-alpiniste-retrouvé-40-ans-après-sa-mort', 'title': 'Argentine: le sac d\'un alpiniste retrouvé 40 ans après sa mort', 'url': 'https://aod-fle.akamaized.net/fle/sounds/fr/2025/03/07/8edf4082-fb46-11ef-8a37-005056bf762b.mp3', 'ext': 'mp3', 'description': 'md5:7fd088fbdf4a943bb68cf82462160dca', 'duration': 117.74, 'timestamp': 1741352789, 'upload_date': '20250307', }, }, { 'url': 'https://francaisfacile.rfi.fr/fr/podcasts/un-mot-une-histoire/20250317-le-mot-de-david-foenkinos-peut-%C3%AAtre', 'md5': 'db83c2cc2589b4c24571c6b6cf14f5f1', 'info_dict': { 'id': 'WBMZ59441-FLE-FR-20250317', 'display_id': '20250317-le-mot-de-david-foenkinos-peut-être', 'title': 'Le mot de David Foenkinos: «peut-être» - Un mot, une histoire', 'url': 'https://aod-fle.akamaized.net/fle/sounds/fr/2025/03/17/4ca6cbbe-0315-11f0-a85b-005056a97652.mp3', 'ext': 'mp3', 'description': 'md5:3fe35fae035803df696bfa7af2496e49', 'duration': 198.96, 'timestamp': 1742210897, 'upload_date': '20250317', }, }] def _real_extract(self, url): display_id = urllib.parse.unquote(self._match_id(url)) webpage = self._download_webpage(url, display_id) data = self._search_json( r'<script[^>]+\bdata-media-id=[^>]+\btype="application/json"[^>]*>', webpage, 'audio data', display_id) return { 'id': data['mediaId'], 'display_id': display_id, 'vcodec': 'none', 'title': self._html_extract_title(webpage), **self._search_json_ld(webpage, display_id, fatal=False), **traverse_obj(data, { 'title': ('title', {str}), 'url': ('sources', ..., 'url', {url_or_none}, any), 'duration': ('sources', ..., 'duration', {float_or_none}, any), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/moview.py
yt_dlp/extractor/moview.py
from .jixie import JixieBaseIE class MoviewPlayIE(JixieBaseIE): _VALID_URL = r'https?://www\.moview\.id/play/\d+/(?P<id>[\w-]+)' _TESTS = [ { # drm hls, only use direct link 'url': 'https://www.moview.id/play/174/Candy-Monster', 'info_dict': { 'id': '146182', 'ext': 'mp4', 'display_id': 'Candy-Monster', 'uploader_id': 'Mo165qXUUf', 'duration': 528.2, 'title': 'Candy Monster', 'description': 'Mengapa Candy Monster ingin mengambil permen Chloe?', 'thumbnail': 'https://video.jixie.media/1034/146182/146182_1280x720.jpg', }, }, { # non-drm hls 'url': 'https://www.moview.id/play/75/Paris-Van-Java-Episode-16', 'info_dict': { 'id': '28210', 'ext': 'mp4', 'duration': 2595.666667, 'display_id': 'Paris-Van-Java-Episode-16', 'uploader_id': 'Mo165qXUUf', 'thumbnail': 'https://video.jixie.media/1003/28210/28210_1280x720.jpg', 'description': 'md5:2a5e18d98eef9b39d7895029cac96c63', 'title': 'Paris Van Java Episode 16', }, }, ] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._search_regex( r'video_id\s*=\s*"(?P<video_id>[^"]+)', webpage, 'video_id') return self._extract_data_from_jixie_id(display_id, video_id, webpage)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/thesun.py
yt_dlp/extractor/thesun.py
import re from .common import InfoExtractor from ..utils import extract_attributes class TheSunIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?the-?sun(\.co\.uk|\.com)/[^/]+/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.thesun.co.uk/tvandshowbiz/2261604/orlando-bloom-and-katy-perry-post-adorable-instagram-video-together-celebrating-thanksgiving-after-split-rumours/', 'info_dict': { 'id': '2261604', 'title': 'md5:cba22f48bad9218b64d5bbe0e16afddf', }, 'playlist_count': 2, }, { 'url': 'https://www.the-sun.com/entertainment/7611415/1000lb-sisters-fans-rip-amy-dangerous-health-decision/', 'info_dict': { 'id': '7611415', 'title': 'md5:e0b9b976f79dc770e5c80f22f40bb844', }, 'playlist_count': 1, }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s' def _real_extract(self, url): article_id = self._match_id(url) webpage = self._download_webpage(url, article_id) entries = [] for video in re.findall( r'<video[^>]+data-video-id-pending=[^>]+>', webpage): attrs = extract_attributes(video) video_id = attrs['data-video-id-pending'] account_id = attrs.get('data-account', '5067014667001') entries.append(self.url_result( self.BRIGHTCOVE_URL_TEMPLATE % (account_id, video_id), 'BrightcoveNew', video_id)) return self.playlist_result( entries, article_id, self._og_search_title(webpage, fatal=False))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rinsefm.py
yt_dlp/extractor/rinsefm.py
from .common import InfoExtractor from ..utils import ( MEDIA_EXTENSIONS, determine_ext, parse_iso8601, url_or_none, ) from ..utils.traversal import traverse_obj class RinseFMBaseIE(InfoExtractor): _API_BASE = 'https://rinse.fm/api/query/v1' @staticmethod def _parse_entry(entry): return { **traverse_obj(entry, { 'id': ('id', {str}), 'title': ('title', {str}), 'url': ('fileUrl', {url_or_none}), 'release_timestamp': ('episodeDate', {parse_iso8601}), 'thumbnail': ('featuredImage', 0, 'filename', {str}, {lambda x: x and f'https://rinse.imgix.net/media/{x}'}), 'webpage_url': ('slug', {str}, {lambda x: x and f'https://rinse.fm/episodes/{x}'}), }), 'vcodec': 'none', 'extractor_key': RinseFMIE.ie_key(), 'extractor': RinseFMIE.IE_NAME, } class RinseFMIE(RinseFMBaseIE): _VALID_URL = r'https?://(?:www\.)?rinse\.fm/episodes/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://rinse.fm/episodes/club-glow-15-12-2023-2000/', 'md5': '76ee0b719315617df42e15e710f46c7b', 'info_dict': { 'id': '1536535', 'ext': 'mp3', 'title': 'Club Glow - 15/12/2023 - 20:00', 'thumbnail': r're:^https://.+\.(?:jpg|JPG)$', 'release_timestamp': 1702598400, 'release_date': '20231215', }, }] def _real_extract(self, url): display_id = self._match_id(url) entry = self._download_json( f'{self._API_BASE}/episodes/{display_id}', display_id, note='Downloading episode data from API')['entry'] return self._parse_entry(entry) class RinseFMArtistPlaylistIE(RinseFMBaseIE): _VALID_URL = r'https?://(?:www\.)?rinse\.fm/shows/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://rinse.fm/shows/resources/', 'info_dict': { 'id': 'resources', 'title': '[re]sources', 'description': 'md5:fd6a7254e8273510e6d49fbf50edf392', }, 'playlist_mincount': 40, }, { 'url': 'https://www.rinse.fm/shows/esk', 'info_dict': { 'id': 'esk', 'title': 'Esk', 'description': 'md5:5893d7c1d411ae8dea7fba12f109aa98', }, 'playlist_mincount': 139, }] def _entries(self, data): for episode in traverse_obj(data, ( 'episodes', lambda _, v: determine_ext(v['fileUrl']) in MEDIA_EXTENSIONS.audio), ): yield self._parse_entry(episode) def _real_extract(self, url): playlist_id = self._match_id(url) api_data = self._download_json( f'{self._API_BASE}/shows/{playlist_id}', playlist_id, note='Downloading show data from API') return self.playlist_result( self._entries(api_data), playlist_id, **traverse_obj(api_data, ('entry', { 'title': ('title', {str}), 'description': ('description', {str}), })))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/videomore.py
yt_dlp/extractor/videomore.py
from .common import InfoExtractor from ..utils import ( int_or_none, parse_qs, ) class VideomoreBaseIE(InfoExtractor): _API_BASE_URL = 'https://more.tv/api/v3/web/' _VALID_URL_BASE = r'https?://(?:videomore\.ru|more\.tv)/' def _download_page_data(self, display_id): return self._download_json( self._API_BASE_URL + 'PageData', display_id, query={ 'url': '/' + display_id, })['attributes']['response']['data'] def _track_url_result(self, track): track_vod = track['trackVod'] video_url = track_vod.get('playerLink') or track_vod['link'] return self.url_result( video_url, VideomoreIE.ie_key(), track_vod.get('hubId')) class VideomoreIE(InfoExtractor): IE_NAME = 'videomore' _VALID_URL = r'''(?x) videomore:(?P<sid>\d+)$| https?:// (?: videomore\.ru/ (?: embed| [^/]+/[^/]+ )/| (?: (?:player\.)?videomore\.ru| siren\.more\.tv/player )/[^/]*\?.*?\btrack_id=| odysseus\.more.tv/player/(?P<partner_id>\d+)/ ) (?P<id>\d+) (?:[/?#&]|\.(?:xml|json)|$) ''' _EMBED_REGEX = [r'''(?x) (?: <iframe[^>]+src=([\'"])| <object[^>]+data=(["\'])https?://videomore\.ru/player\.swf\?.*config= )(?P<url>https?://videomore\.ru/[^?#"']+/\d+(?:\.xml)?) '''] _TESTS = [{ 'url': 'http://videomore.ru/kino_v_detalayah/5_sezon/367617', 'md5': '44455a346edc0d509ac5b5a5b531dc35', 'info_dict': { 'id': '367617', 'ext': 'flv', 'title': 'Кино в деталях 5 сезон В гостях Алексей Чумаков и Юлия Ковальчук', 'series': 'Кино в деталях', 'episode': 'В гостях Алексей Чумаков и Юлия Ковальчук', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 2910, 'view_count': int, 'comment_count': int, 'age_limit': 16, }, 'skip': 'The video is not available for viewing.', }, { 'url': 'http://videomore.ru/embed/259974', 'info_dict': { 'id': '259974', 'ext': 'mp4', 'title': 'Молодежка 2 сезон 40 серия', 'series': 'Молодежка', 'season': '2 сезон', 'episode': '40 серия', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 2789, 'view_count': int, 'age_limit': 16, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://videomore.ru/molodezhka/sezon_promo/341073', 'info_dict': { 'id': '341073', 'ext': 'flv', 'title': 'Промо Команда проиграла из-за Бакина?', 'episode': 'Команда проиграла из-за Бакина?', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 29, 'age_limit': 16, 'view_count': int, }, 'params': { 'skip_download': True, }, 'skip': 'The video is not available for viewing.', }, { 'url': 'http://videomore.ru/elki_3?track_id=364623', 'only_matching': True, }, { 'url': 'http://videomore.ru/embed/364623', 'only_matching': True, }, { 'url': 'http://videomore.ru/video/tracks/364623.xml', 'only_matching': True, }, { 'url': 'http://videomore.ru/video/tracks/364623.json', 'only_matching': True, }, { 'url': 'http://videomore.ru/video/tracks/158031/quotes/33248', 'only_matching': True, }, { 'url': 'videomore:367617', 'only_matching': True, }, { 'url': 'https://player.videomore.ru/?partner_id=97&track_id=736234&autoplay=0&userToken=', 'only_matching': True, }, { 'url': 'https://odysseus.more.tv/player/1788/352317', 'only_matching': True, }, { 'url': 'https://siren.more.tv/player/config?track_id=352317&partner_id=1788&user_token=', 'only_matching': True, }] _GEO_BYPASS = False def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('sid') or mobj.group('id') partner_id = mobj.group('partner_id') or parse_qs(url).get('partner_id', [None])[0] or '97' item = self._download_json( 'https://siren.more.tv/player/config', video_id, query={ 'partner_id': partner_id, 'track_id': video_id, })['data']['playlist']['items'][0] title = item.get('title') series = item.get('project_name') season = item.get('season_name') episode = item.get('episode_name') if not title: title = [] for v in (series, season, episode): if v: title.append(v) title = ' '.join(title) streams = item.get('streams') or [] for protocol in ('DASH', 'HLS'): stream_url = item.get(protocol.lower() + '_url') if stream_url: streams.append({'protocol': protocol, 'url': stream_url}) formats = [] for stream in streams: stream_url = stream.get('url') if not stream_url: continue protocol = stream.get('protocol') if protocol == 'DASH': formats.extend(self._extract_mpd_formats( stream_url, video_id, mpd_id='dash', fatal=False)) elif protocol == 'HLS': formats.extend(self._extract_m3u8_formats( stream_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif protocol == 'MSS': formats.extend(self._extract_ism_formats( stream_url, video_id, ism_id='mss', fatal=False)) if not formats: error = item.get('error') if error: if error in ('Данное видео недоступно для просмотра на территории этой страны', 'Данное видео доступно для просмотра только на территории России'): self.raise_geo_restricted(countries=['RU'], metadata_available=True) self.raise_no_formats(error, expected=True) return { 'id': video_id, 'title': title, 'series': series, 'season': season, 'episode': episode, 'thumbnail': item.get('thumbnail_url'), 'duration': int_or_none(item.get('duration')), 'view_count': int_or_none(item.get('views')), 'age_limit': int_or_none(item.get('min_age')), 'formats': formats, } class VideomoreVideoIE(VideomoreBaseIE): IE_NAME = 'videomore:video' _VALID_URL = VideomoreBaseIE._VALID_URL_BASE + r'(?P<id>(?:(?:[^/]+/){2})?[^/?#&]+)(?:/*|[?#&].*?)$' _TESTS = [{ # single video with og:video:iframe 'url': 'http://videomore.ru/elki_3', 'info_dict': { 'id': '364623', 'ext': 'flv', 'title': 'Ёлки 3', 'description': '', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 5579, 'age_limit': 6, 'view_count': int, }, 'params': { 'skip_download': True, }, 'skip': 'Requires logging in', }, { # season single series with og:video:iframe 'url': 'http://videomore.ru/poslednii_ment/1_sezon/14_seriya', 'info_dict': { 'id': '352317', 'ext': 'mp4', 'title': 'Последний мент 1 сезон 14 серия', 'series': 'Последний мент', 'season': '1 сезон', 'episode': '14 серия', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 2464, 'age_limit': 16, 'view_count': int, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://videomore.ru/sejchas_v_seti/serii_221-240/226_vypusk', 'only_matching': True, }, { # single video without og:video:iframe 'url': 'http://videomore.ru/marin_i_ego_druzya', 'info_dict': { 'id': '359073', 'ext': 'flv', 'title': '1 серия. Здравствуй, Аквавилль!', 'description': 'md5:c6003179538b5d353e7bcd5b1372b2d7', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 754, 'age_limit': 6, 'view_count': int, }, 'params': { 'skip_download': True, }, 'skip': 'redirects to https://more.tv/', }, { 'url': 'https://videomore.ru/molodezhka/6_sezon/29_seriya?utm_so', 'only_matching': True, }, { 'url': 'https://more.tv/poslednii_ment/1_sezon/14_seriya', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if VideomoreIE.suitable(url) else super().suitable(url) def _real_extract(self, url): display_id = self._match_id(url) return self._track_url_result(self._download_page_data(display_id)) class VideomoreSeasonIE(VideomoreBaseIE): IE_NAME = 'videomore:season' _VALID_URL = VideomoreBaseIE._VALID_URL_BASE + r'(?!embed)(?P<id>[^/]+/[^/?#&]+)(?:/*|[?#&].*?)$' _TESTS = [{ 'url': 'http://videomore.ru/molodezhka/film_o_filme', 'info_dict': { 'id': 'molodezhka/film_o_filme', 'title': 'Фильм о фильме', }, 'playlist_mincount': 3, }, { 'url': 'http://videomore.ru/molodezhka/sezon_promo?utm_so', 'only_matching': True, }, { 'url': 'https://more.tv/molodezhka/film_o_filme', 'only_matching': True, }] @classmethod def suitable(cls, url): return (False if (VideomoreIE.suitable(url) or VideomoreVideoIE.suitable(url)) else super().suitable(url)) def _real_extract(self, url): display_id = self._match_id(url) season = self._download_page_data(display_id) season_id = str(season['id']) tracks = self._download_json( self._API_BASE_URL + f'seasons/{season_id}/tracks', season_id)['data'] entries = [] for track in tracks: entries.append(self._track_url_result(track)) return self.playlist_result(entries, display_id, season.get('title'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/podomatic.py
yt_dlp/extractor/podomatic.py
import json from .common import InfoExtractor from ..utils import int_or_none class PodomaticIE(InfoExtractor): _WORKING = False IE_NAME = 'podomatic' _VALID_URL = r'''(?x) (?P<proto>https?):// (?: (?P<channel>[^.]+)\.podomatic\.com/entry| (?:www\.)?podomatic\.com/podcasts/(?P<channel_2>[^/]+)/episodes )/ (?P<id>[^/?#&]+) ''' _TESTS = [{ 'url': 'http://scienceteachingtips.podomatic.com/entry/2009-01-02T16_03_35-08_00', 'md5': '84bb855fcf3429e6bf72460e1eed782d', 'info_dict': { 'id': '2009-01-02T16_03_35-08_00', 'ext': 'mp3', 'uploader': 'Science Teaching Tips', 'uploader_id': 'scienceteachingtips', 'title': '64. When the Moon Hits Your Eye', 'duration': 446, }, }, { 'url': 'http://ostbahnhof.podomatic.com/entry/2013-11-15T16_31_21-08_00', 'md5': 'd2cf443931b6148e27638650e2638297', 'info_dict': { 'id': '2013-11-15T16_31_21-08_00', 'ext': 'mp3', 'uploader': 'Ostbahnhof / Techno Mix', 'uploader_id': 'ostbahnhof', 'title': 'Einunddreizig', 'duration': 3799, }, }, { 'url': 'https://www.podomatic.com/podcasts/scienceteachingtips/episodes/2009-01-02T16_03_35-08_00', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') channel = mobj.group('channel') or mobj.group('channel_2') json_url = ('{}://{}.podomatic.com/entry/embed_params/{}?permalink=true&rtmp=0'.format( mobj.group('proto'), channel, video_id)) data_json = self._download_webpage( json_url, video_id, 'Downloading video info') data = json.loads(data_json) video_url = data['downloadLink'] if not video_url: video_url = '{}/{}'.format(data['streamer'].replace('rtmp', 'http'), data['mediaLocation']) uploader = data['podcast'] title = data['title'] thumbnail = data['imageLocation'] duration = int_or_none(data.get('length'), 1000) return { 'id': video_id, 'url': video_url, 'title': title, 'uploader': uploader, 'uploader_id': channel, 'thumbnail': thumbnail, 'duration': duration, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dlive.py
yt_dlp/extractor/dlive.py
import json from .common import InfoExtractor from ..utils import int_or_none class DLiveVODIE(InfoExtractor): IE_NAME = 'dlive:vod' _VALID_URL = r'https?://(?:www\.)?dlive\.tv/p/(?P<uploader_id>.+?)\+(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://dlive.tv/p/pdp+3mTzOl4WR', 'info_dict': { 'id': '3mTzOl4WR', 'ext': 'mp4', 'title': 'Minecraft with james charles epic', 'upload_date': '20190701', 'timestamp': 1562011015, 'uploader_id': 'pdp', }, }, { 'url': 'https://dlive.tv/p/pdpreplay+D-RD-xSZg', 'only_matching': True, }] def _real_extract(self, url): uploader_id, vod_id = self._match_valid_url(url).groups() broadcast = self._download_json( 'https://graphigo.prd.dlive.tv/', vod_id, data=json.dumps({'query': '''query { pastBroadcast(permlink:"%s+%s") { content createdAt length playbackUrl title thumbnailUrl viewCount } }''' % (uploader_id, vod_id)}).encode())['data']['pastBroadcast'] # noqa: UP031 title = broadcast['title'] formats = self._extract_m3u8_formats( broadcast['playbackUrl'], vod_id, 'mp4', 'm3u8_native') return { 'id': vod_id, 'title': title, 'uploader_id': uploader_id, 'formats': formats, 'description': broadcast.get('content'), 'thumbnail': broadcast.get('thumbnailUrl'), 'timestamp': int_or_none(broadcast.get('createdAt'), 1000), 'view_count': int_or_none(broadcast.get('viewCount')), } class DLiveStreamIE(InfoExtractor): IE_NAME = 'dlive:stream' _VALID_URL = r'https?://(?:www\.)?dlive\.tv/(?!p/)(?P<id>[\w.-]+)' def _real_extract(self, url): display_name = self._match_id(url) user = self._download_json( 'https://graphigo.prd.dlive.tv/', display_name, data=json.dumps({'query': '''query { userByDisplayName(displayname:"%s") { livestream { content createdAt title thumbnailUrl watchingCount } username } }''' % display_name}).encode())['data']['userByDisplayName'] # noqa: UP031 livestream = user['livestream'] title = livestream['title'] username = user['username'] formats = self._extract_m3u8_formats( f'https://live.prd.dlive.tv/hls/live/{username}.m3u8', display_name, 'mp4') return { 'id': display_name, 'title': title, 'uploader': display_name, 'uploader_id': username, 'formats': formats, 'description': livestream.get('content'), 'thumbnail': livestream.get('thumbnailUrl'), 'is_live': True, 'timestamp': int_or_none(livestream.get('createdAt'), 1000), 'view_count': int_or_none(livestream.get('watchingCount')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/yahoo.py
yt_dlp/extractor/yahoo.py
import hashlib import itertools import urllib.parse from .common import InfoExtractor, SearchInfoExtractor from .youtube import YoutubeIE from ..utils import ( ExtractorError, clean_html, int_or_none, join_nonempty, mimetype2ext, parse_iso8601, traverse_obj, try_get, update_url, url_or_none, ) class YahooIE(InfoExtractor): IE_NAME = 'yahoo' _VALID_URL = r'(?P<url>https?://(?:(?P<country>[a-zA-Z]{2}(?:-[a-zA-Z]{2})?|malaysia)\.)?(?:[\da-zA-Z_-]+\.)?yahoo\.com/(?:[^/]+/)*(?P<id>[^?&#]*-[0-9]+(?:-[a-z]+)?)\.html)' _TESTS = [{ 'url': 'https://news.yahoo.com/video/china-moses-crazy-blues-104538833.html', 'md5': '88e209b417f173d86186bef6e4d1f160', 'info_dict': { 'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521', 'ext': 'mp4', 'title': 'China Moses Is Crazy About the Blues', 'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0', 'duration': 128, 'timestamp': 1385721938, 'upload_date': '20131129', 'display_id': 'china-moses-crazy-blues-104538833', 'view_count': int, 'thumbnail': r're:https://media\.zenfs\.com/.+', }, }, { 'url': 'https://www.yahoo.com/movies/v/true-story-trailer-173000497.html', # 'md5': '989396ae73d20c6f057746fb226aa215', # varies between this and 'b17ac378b1134fa44370fb27db09a744' 'info_dict': { 'id': '071c4013-ce30-3a93-a5b2-e0413cd4a9d1', 'ext': 'mp4', 'title': '\'True Story\' Trailer', 'description': 'True Story', 'duration': 150, 'timestamp': 1418923800, 'upload_date': '20141218', 'display_id': 'true-story-trailer-173000497', 'view_count': int, 'thumbnail': r're:https://media\.zenfs\.com/.+\.jpg', }, }, { 'url': 'https://gma.yahoo.com/pizza-delivery-man-surprised-huge-tip-college-kids-195200785.html', 'only_matching': True, }, { 'note': 'NBC Sports embeds', 'url': 'https://sports.yahoo.com/blogs/ncaab-the-dagger/tyler-kalinoski-s-buzzer-beater-caps-davidson-s-comeback-win-185609842.html?guid=nbc_cbk_davidsonbuzzerbeater_150313', 'info_dict': { 'id': '9CsDKds0kvHI', 'ext': 'flv', 'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d', 'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson', 'upload_date': '20150313', 'uploader': 'NBCU-SPORTS', 'timestamp': 1426270238, }, 'skip': 'Page no longer has video', }, { 'url': 'https://tw.news.yahoo.com/-100120367.html', 'only_matching': True, }, { # ytwnews://cavideo/ 'url': 'https://tw.video.yahoo.com/movie-tw/單車天使-中文版預-092316541.html', 'info_dict': { 'id': 'ba133ff2-0793-3510-b636-59dfe9ff6cff', 'ext': 'mp4', 'title': '單車天使 - 中文版預', 'description': '中文版預', 'timestamp': 1476696196, 'upload_date': '20161017', 'view_count': int, 'duration': 141, 'thumbnail': r're:https://media\.zenfs\.com/.+\.jpg', 'series': '電影', 'display_id': '單車天使-中文版預-092316541', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.yahoo.com/entertainment/gwen-stefani-reveals-the-pop-hit-she-passed-on-assigns-it-to-her-voice-contestant-instead-033045672.html', 'info_dict': { 'id': '46c5d95a-528f-3d03-b732-732fcadd51de', 'title': 'Gwen Stefani reveals the pop hit she passed on, assigns it to her \'Voice\' contestant instead', 'description': 'Gwen decided not to record this hit herself, but she decided it was the perfect fit for Kyndall Inskeep.', }, 'playlist': [{ 'info_dict': { 'id': '966d4262-4fd1-3aaa-b45b-049ca6e38ba6', 'ext': 'mp4', 'title': 'Gwen Stefani reveals she turned down one of Sia\'s best songs', 'description': 'On "The Voice" Tuesday, Gwen Stefani told Taylor Swift which Sia hit was almost hers.', 'timestamp': 1572406499, 'upload_date': '20191030', 'display_id': 'gwen-stefani-reveals-she-turned-033459311', 'view_count': int, 'duration': 97, 'thumbnail': 'https://s.yimg.com/os/creatr-uploaded-images/2019-10/348bb330-fac6-11e9-8d27-38e85d573702', 'series': 'Last Night Now', }, }], }, { 'url': 'https://sports.yahoo.com/video/rams-lose-grip-nfcs-top-174614409.html', 'info_dict': { 'id': '6b15f100-cf5c-3ad0-9c96-87cbd2f72d4a', 'ext': 'mp4', 'display_id': 'rams-lose-grip-nfcs-top-174614409', 'title': 'Rams lose their grip on NFC\'s top seed — can they still secure the bye?', 'description': 'md5:5f4f98ab3c4de80e54c105b6bbb1d024', 'view_count': int, 'duration': 85, 'thumbnail': 'https://s.yimg.com/os/creatr-uploaded-images/2025-12/94fc4840-dd02-11f0-beff-38ba3a4992e3', 'timestamp': 1766166374, 'upload_date': '20251219', }, }, { 'url': 'https://malaysia.news.yahoo.com/video/bystanders-help-ontario-policeman-bust-190932818.html', 'only_matching': True, }, { 'url': 'https://es-us.noticias.yahoo.com/es-la-puerta-irrompible-que-110539379.html', 'only_matching': True, }, { 'url': 'https://www.yahoo.com/entertainment/v/longtime-cbs-news-60-minutes-032036500-cbs.html', 'only_matching': True, }] def _extract_yahoo_video(self, video_id, country): video = self._download_json( f'https://video-api.yql.yahoo.com/v1/video/sapi/streams/{video_id}', video_id, 'Downloading video JSON metadata')['query']['results']['mediaObj'][0]['meta'] if country == 'malaysia': country = 'my' is_live = traverse_obj(video, ('uplynk_live', {bool})) is True fmts = ('m3u8',) if is_live else ('webm', 'mp4') urls = [] formats = [] subtitles = {} for fmt in fmts: media_obj = self._download_json( 'https://video-api.yql.yahoo.com/v1/video/sapi/streams/' + video_id, video_id, f'Downloading {fmt} JSON metadata', headers=self.geo_verification_headers(), query={ 'format': fmt, 'region': country.upper(), })['query']['results']['mediaObj'][0] msg = media_obj.get('status', {}).get('msg') for s in media_obj.get('streams', []): host = s.get('host') path = s.get('path') if not host or not path: continue s_url = host + path if s.get('format') == 'm3u8': formats.extend(self._extract_m3u8_formats( s_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) continue tbr = int_or_none(s.get('bitrate')) formats.append({ 'url': s_url, 'format_id': join_nonempty(fmt, tbr), 'width': int_or_none(s.get('width')), 'height': int_or_none(s.get('height')), 'tbr': tbr, 'fps': int_or_none(s.get('framerate')), }) for cc in media_obj.get('closedcaptions', []): cc_url = cc.get('url') if not cc_url or cc_url in urls: continue urls.append(cc_url) subtitles.setdefault(cc.get('lang') or 'en-US', []).append({ 'url': cc_url, 'ext': mimetype2ext(cc.get('content_type')), }) if not formats and msg == 'geo restricted': self.raise_geo_restricted(metadata_available=True) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'is_live': is_live, **traverse_obj(video, { 'title': ('title', {clean_html}), 'description': ('description', {clean_html}), 'thumbnail': ('thumbnail', {url_or_none}, {update_url(scheme='https')}), 'timestamp': ('publish_time', {parse_iso8601}), 'duration': ('duration', {int_or_none}), 'view_count': ('view_count', {int_or_none}), 'series': ('show_name', {str}, filter), }), } def _real_extract(self, url): url, country, display_id = self._match_valid_url(url).groups() if not country: country = 'us' else: country = country.split('-')[0] items = self._download_json( f'https://{country}.yahoo.com/caas/content/article', display_id, 'Downloading content JSON metadata', query={ 'url': url, })['items'][0] item = items['data']['partnerData'] if item.get('type') != 'video': entries = [] cover = item.get('cover') or {} if cover.get('type') == 'yvideo': cover_url = cover.get('url') if cover_url: entries.append(self.url_result( cover_url, 'Yahoo', cover.get('uuid'))) for e in (item.get('body') or []): if e.get('type') == 'videoIframe': iframe_url = e.get('url') if iframe_url: entries.append(self.url_result(iframe_url)) if item.get('type') == 'storywithleadvideo': iframe_url = try_get(item, lambda x: x['meta']['player']['url']) if iframe_url: entries.append(self.url_result(iframe_url)) else: self.report_warning("Yahoo didn't provide an iframe url for this storywithleadvideo") if items.get('markup'): entries.extend( self.url_result(yt_url) for yt_url in YoutubeIE._extract_embed_urls(url, items['markup'])) return self.playlist_result( entries, item.get('uuid'), item.get('title'), item.get('summary')) info = self._extract_yahoo_video(item['uuid'], country) info['display_id'] = display_id return info class YahooSearchIE(SearchInfoExtractor): _MAX_RESULTS = 1000 IE_NAME = 'yahoo:search' _SEARCH_KEY = 'yvsearch' def _search_results(self, query): for pagenum in itertools.count(0): result_url = f'https://video.search.yahoo.com/search/?p={urllib.parse.quote_plus(query)}&fr=screen&o=js&gs=0&b={pagenum * 30}' info = self._download_json(result_url, query, note='Downloading results page ' + str(pagenum + 1)) yield from (self.url_result(result['rurl']) for result in info['results']) if info['m']['last'] >= info['m']['total'] - 1: break class YahooJapanNewsIE(InfoExtractor): IE_NAME = 'yahoo:japannews' IE_DESC = 'Yahoo! Japan News' _VALID_URL = r'https?://news\.yahoo\.co\.jp/(?:articles|feature)/(?P<id>[a-zA-Z0-9]+)' _GEO_COUNTRIES = ['JP'] _TESTS = [{ 'url': 'https://news.yahoo.co.jp/articles/a70fe3a064f1cfec937e2252c7fc6c1ba3201c0e', 'info_dict': { 'id': 'a70fe3a064f1cfec937e2252c7fc6c1ba3201c0e', 'ext': 'mp4', 'title': '【独自】安倍元総理「国葬」中止求め“脅迫メール”…「子ども誘拐」“送信者”を追跡', 'description': 'md5:1c06974575f930f692d8696fbcfdc546', 'thumbnail': r're:https://.+', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://news.yahoo.co.jp/feature/1356', 'only_matching': True, }] def _extract_formats(self, json_data, content_id): formats = [] for vid in traverse_obj(json_data, ('ResultSet', 'Result', ..., 'VideoUrlSet', 'VideoUrl', ...)) or []: delivery = vid.get('delivery') url = url_or_none(vid.get('Url')) if not delivery or not url: continue elif delivery == 'hls': formats.extend( self._extract_m3u8_formats( url, content_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: bitrate = int_or_none(vid.get('bitrate')) formats.append({ 'url': url, 'format_id': join_nonempty('http', bitrate), 'height': int_or_none(vid.get('height')), 'width': int_or_none(vid.get('width')), 'tbr': bitrate, }) self._remove_duplicate_formats(formats) return formats def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) preloaded_state = self._search_json(r'__PRELOADED_STATE__\s*=', webpage, 'preloaded state', video_id) content_id = traverse_obj( preloaded_state, ('articleDetail', 'paragraphs', ..., 'objectItems', ..., 'video', 'vid'), get_all=False, expected_type=int) if content_id is None: raise ExtractorError('This article does not contain a video', expected=True) HOST = 'news.yahoo.co.jp' space_id = traverse_obj(preloaded_state, ('pageData', 'spaceId'), expected_type=str) json_data = self._download_json( f'https://feapi-yvpub.yahooapis.jp/v1/content/{content_id}', video_id, query={ 'appid': 'dj0zaiZpPVZMTVFJR0FwZWpiMyZzPWNvbnN1bWVyc2VjcmV0Jng9YjU-', 'output': 'json', 'domain': HOST, 'ak': hashlib.md5('_'.join((space_id, HOST)).encode()).hexdigest() if space_id else '', 'device_type': '1100', }) title = ( traverse_obj(preloaded_state, ('articleDetail', 'headline'), ('pageData', 'pageParam', 'title'), expected_type=str) or self._html_search_meta(('og:title', 'twitter:title'), webpage, 'title', default=None) or self._html_extract_title(webpage)) description = ( traverse_obj(preloaded_state, ('pageData', 'description'), expected_type=str) or self._html_search_meta( ('og:description', 'description', 'twitter:description'), webpage, 'description', default=None)) thumbnail = ( traverse_obj(preloaded_state, ('pageData', 'ogpImage'), expected_type=str) or self._og_search_thumbnail(webpage, default=None) or self._html_search_meta('twitter:image', webpage, 'thumbnail', default=None)) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'formats': self._extract_formats(json_data, video_id), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dailywire.py
yt_dlp/extractor/dailywire.py
from .common import InfoExtractor from ..utils import ( determine_ext, float_or_none, join_nonempty, traverse_obj, url_or_none, ) class DailyWireBaseIE(InfoExtractor): _JSON_PATH = { 'episode': ('props', 'pageProps', 'episodeData', 'episode'), 'videos': ('props', 'pageProps', 'videoData', 'video'), 'podcasts': ('props', 'pageProps', 'episode'), } def _get_json(self, url): sites_type, slug = self._match_valid_url(url).group('sites_type', 'id') json_data = self._search_nextjs_data(self._download_webpage(url, slug), slug) return slug, traverse_obj(json_data, self._JSON_PATH[sites_type]) class DailyWireIE(DailyWireBaseIE): _VALID_URL = r'https?://(?:www\.)dailywire(?:\.com)/(?P<sites_type>episode|videos)/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://www.dailywire.com/episode/1-fauci', 'info_dict': { 'id': 'ckzsl50xnqpy30850in3v4bu7', 'ext': 'mp4', 'display_id': '1-fauci', 'title': '1. Fauci', 'description': 'md5:9df630347ef85081b7e97dd30bc22853', 'thumbnail': 'https://daily-wire-production.imgix.net/episodes/ckzsl50xnqpy30850in3v4bu7/ckzsl50xnqpy30850in3v4bu7-1648237399554.jpg', 'creator': 'Caroline Roberts', 'series_id': 'ckzplm0a097fn0826r2vc3j7h', 'series': 'China: The Enemy Within', }, }, { 'url': 'https://www.dailywire.com/episode/ep-124-bill-maher', 'info_dict': { 'id': 'cl0ngbaalplc80894sfdo9edf', 'ext': 'mp3', 'display_id': 'ep-124-bill-maher', 'title': 'Ep. 124 - Bill Maher', 'thumbnail': 'https://daily-wire-production.imgix.net/episodes/cl0ngbaalplc80894sfdo9edf/cl0ngbaalplc80894sfdo9edf-1647065568518.jpg', 'creator': 'Caroline Roberts', 'description': 'md5:adb0de584bcfa9c41374999d9e324e98', 'series_id': 'cjzvep7270hp00786l9hwccob', 'series': 'The Sunday Special', }, }, { 'url': 'https://www.dailywire.com/videos/the-hyperions', 'only_matching': True, }] def _real_extract(self, url): slug, episode_info = self._get_json(url) urls = traverse_obj( episode_info, (('segments', 'videoUrl'), ..., ('video', 'audio')), expected_type=url_or_none) formats, subtitles = [], {} for url in urls: if determine_ext(url) != 'm3u8': formats.append({'url': url}) continue format_, subs_ = self._extract_m3u8_formats_and_subtitles(url, slug) formats.extend(format_) self._merge_subtitles(subs_, target=subtitles) return { 'id': episode_info['id'], 'display_id': slug, 'title': traverse_obj(episode_info, 'title', 'name'), 'description': episode_info.get('description'), 'creator': join_nonempty(('createdBy', 'firstName'), ('createdBy', 'lastName'), from_dict=episode_info, delim=' '), 'duration': float_or_none(episode_info.get('duration')), 'is_live': episode_info.get('isLive'), 'thumbnail': traverse_obj(episode_info, 'thumbnail', 'image', expected_type=url_or_none), 'formats': formats, 'subtitles': subtitles, 'series_id': traverse_obj(episode_info, ('show', 'id')), 'series': traverse_obj(episode_info, ('show', 'name')), } class DailyWirePodcastIE(DailyWireBaseIE): _VALID_URL = r'https?://(?:www\.)dailywire(?:\.com)/(?P<sites_type>podcasts)/(?P<podcaster>[\w-]+/(?P<id>[\w-]+))' _TESTS = [{ 'url': 'https://www.dailywire.com/podcasts/morning-wire/get-ready-for-recession-6-15-22', 'info_dict': { 'id': 'cl4f01d0w8pbe0a98ydd0cfn1', 'ext': 'm4a', 'display_id': 'get-ready-for-recession-6-15-22', 'title': 'Get Ready for Recession | 6.15.22', 'description': 'md5:c4afbadda4e1c38a4496f6d62be55634', 'thumbnail': 'https://daily-wire-production.imgix.net/podcasts/ckx4otgd71jm508699tzb6hf4-1639506575562.jpg', 'duration': 900.117667, }, }] def _real_extract(self, url): slug, episode_info = self._get_json(url) audio_id = traverse_obj(episode_info, 'audioMuxPlaybackId', 'VUsAipTrBVSgzw73SpC2DAJD401TYYwEp') return { 'id': episode_info['id'], 'url': f'https://stream.media.dailywire.com/{audio_id}/audio.m4a', 'display_id': slug, 'title': episode_info.get('title'), 'duration': float_or_none(episode_info.get('duration')), 'thumbnail': episode_info.get('thumbnail'), 'description': episode_info.get('description'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/noodlemagazine.py
yt_dlp/extractor/noodlemagazine.py
from .common import InfoExtractor from ..utils import ( int_or_none, parse_count, parse_duration, unified_strdate, urljoin, ) from ..utils.traversal import traverse_obj class NoodleMagazineIE(InfoExtractor): _VALID_URL = r'https?://(?:www|adult\.)?noodlemagazine\.com/watch/(?P<id>[0-9-_]+)' _TEST = { 'url': 'https://adult.noodlemagazine.com/watch/-67421364_456239604', 'md5': '9e02aa763612929d0b4b850591a9248b', 'info_dict': { 'id': '-67421364_456239604', 'title': 'Aria alexander manojob', 'thumbnail': r're:^https://.*\.jpg', 'ext': 'mp4', 'duration': 903, 'view_count': int, 'like_count': int, 'description': 'Aria alexander manojob', 'tags': ['aria', 'alexander', 'manojob'], 'upload_date': '20190218', 'age_limit': 18, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage) duration = parse_duration(self._html_search_meta('video:duration', webpage, 'duration', default=None)) description = self._og_search_property('description', webpage, default='').replace(' watch online hight quality video', '') tags = self._html_search_meta('video:tag', webpage, default='').split(', ') view_count = parse_count(self._html_search_meta('ya:ovs:views_total', webpage, default=None)) like_count = parse_count(self._html_search_meta('ya:ovs:likes', webpage, default=None)) upload_date = unified_strdate(self._html_search_meta('ya:ovs:upload_date', webpage, default='')) def build_url(url_or_path): return urljoin('https://adult.noodlemagazine.com', url_or_path) playlist_info = self._search_json( r'window\.playlist\s*=', webpage, video_id, 'playlist info') formats = [] for source in traverse_obj(playlist_info, ('sources', lambda _, v: v['file'])): if source.get('type') == 'hls': formats.extend(self._extract_m3u8_formats( build_url(source['file']), video_id, 'mp4', fatal=False, m3u8_id='hls')) else: formats.append(traverse_obj(source, { 'url': ('file', {build_url}), 'format_id': 'label', 'height': ('label', {int_or_none}), 'ext': 'type', })) return { 'id': video_id, 'formats': formats, 'title': title, 'thumbnail': self._og_search_property('image', webpage, default=None) or playlist_info.get('image'), 'duration': duration, 'description': description, 'tags': tags, 'view_count': view_count, 'like_count': like_count, 'upload_date': upload_date, 'age_limit': 18, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/linkedin.py
yt_dlp/extractor/linkedin.py
import itertools import json import re from .common import InfoExtractor from ..utils import ( ExtractorError, extract_attributes, float_or_none, int_or_none, mimetype2ext, srt_subtitles_timecode, try_get, url_or_none, urlencode_postdata, urljoin, ) from ..utils.traversal import find_elements, require, traverse_obj class LinkedInBaseIE(InfoExtractor): _NETRC_MACHINE = 'linkedin' _logged_in = False def _perform_login(self, username, password): if self._logged_in: return login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading login page') action_url = urljoin(self._LOGIN_URL, self._search_regex( r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page, 'post url', default='https://www.linkedin.com/uas/login-submit', group='url')) data = self._hidden_inputs(login_page) data.update({ 'session_key': username, 'session_password': password, }) login_submit_page = self._download_webpage( action_url, None, 'Logging in', data=urlencode_postdata(data)) error = self._search_regex( r'<span[^>]+class="error"[^>]*>\s*(.+?)\s*</span>', login_submit_page, 'error', default=None) if error: raise ExtractorError(error, expected=True) LinkedInBaseIE._logged_in = True class LinkedInLearningBaseIE(LinkedInBaseIE): _LOGIN_URL = 'https://www.linkedin.com/uas/login?trk=learning' def _call_api(self, course_slug, fields, video_slug=None, resolution=None): query = { 'courseSlug': course_slug, 'fields': fields, 'q': 'slugs', } sub = '' if video_slug: query.update({ 'videoSlug': video_slug, 'resolution': f'_{resolution}', }) sub = ' %dp' % resolution api_url = 'https://www.linkedin.com/learning-api/detailedCourses' if not self._get_cookies(api_url).get('JSESSIONID'): self.raise_login_required() return self._download_json( api_url, video_slug, f'Downloading{sub} JSON metadata', headers={ 'Csrf-Token': self._get_cookies(api_url)['JSESSIONID'].value, }, query=query)['elements'][0] def _get_urn_id(self, video_data): urn = video_data.get('urn') if urn: mobj = re.search(r'urn:li:lyndaCourse:\d+,(\d+)', urn) if mobj: return mobj.group(1) def _get_video_id(self, video_data, course_slug, video_slug): return self._get_urn_id(video_data) or f'{course_slug}/{video_slug}' class LinkedInIE(LinkedInBaseIE): _VALID_URL = [ r'https?://(?:www\.)?linkedin\.com/posts/[^/?#]+-(?P<id>\d+)-\w{4}/?(?:[?#]|$)', r'https?://(?:www\.)?linkedin\.com/feed/update/urn:li:activity:(?P<id>\d+)', ] _TESTS = [{ 'url': 'https://www.linkedin.com/posts/mishalkhawaja_sendinblueviews-toronto-digitalmarketing-ugcPost-6850898786781339649-mM20', 'info_dict': { 'id': '6850898786781339649', 'ext': 'mp4', 'title': 'Mishal K. on LinkedIn: #sendinblueviews #toronto #digitalmarketing #nowhiring #sendinblue…', 'description': 'md5:2998a31f6f479376dd62831f53a80f71', 'uploader': 'Mishal K.', 'thumbnail': 're:^https?://media.licdn.com/dms/image/.*$', 'like_count': int, }, }, { 'url': 'https://www.linkedin.com/posts/the-mathworks_2_what-is-mathworks-cloud-center-activity-7151241570371948544-4Gu7', 'info_dict': { 'id': '7151241570371948544', 'ext': 'mp4', 'title': 'MathWorks on LinkedIn: What Is MathWorks Cloud Center?', 'description': 'md5:95f9d4eeb6337882fb47eefe13d7a40c', 'uploader': 'MathWorks', 'thumbnail': 're:^https?://media.licdn.com/dms/image/.*$', 'like_count': int, 'subtitles': 'mincount:1', }, }, { 'url': 'https://www.linkedin.com/feed/update/urn:li:activity:7016901149999955968/?utm_source=share&utm_medium=member_desktop', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_attrs = extract_attributes(self._search_regex(r'(<video[^>]+>)', webpage, 'video')) sources = self._parse_json(video_attrs['data-sources'], video_id) formats = [{ 'url': source['src'], 'ext': mimetype2ext(source.get('type')), 'tbr': float_or_none(source.get('data-bitrate'), scale=1000), } for source in sources] subtitles = {'en': [{ 'url': video_attrs['data-captions-url'], 'ext': 'vtt', }]} if url_or_none(video_attrs.get('data-captions-url')) else {} return { 'id': video_id, 'formats': formats, 'title': self._og_search_title(webpage, default=None) or self._html_extract_title(webpage), 'like_count': int_or_none(self._search_regex( r'\bdata-num-reactions="(\d+)"', webpage, 'reactions', default=None)), 'uploader': traverse_obj( self._yield_json_ld(webpage, video_id), (lambda _, v: v['@type'] == 'SocialMediaPosting', 'author', 'name', {str}), get_all=False), 'thumbnail': self._og_search_thumbnail(webpage), 'description': self._og_search_description(webpage, default=None), 'subtitles': subtitles, } class LinkedInLearningIE(LinkedInLearningBaseIE): IE_NAME = 'linkedin:learning' _VALID_URL = r'https?://(?:www\.)?linkedin\.com/learning/(?P<course_slug>[^/]+)/(?P<id>[^/?#]+)' _TEST = { 'url': 'https://www.linkedin.com/learning/programming-foundations-fundamentals/welcome?autoplay=true', 'md5': 'a1d74422ff0d5e66a792deb996693167', 'info_dict': { 'id': '90426', 'ext': 'mp4', 'title': 'Welcome', 'timestamp': 1430396150.82, 'upload_date': '20150430', }, } def json2srt(self, transcript_lines, duration=None): srt_data = '' for line, (line_dict, next_dict) in enumerate(itertools.zip_longest(transcript_lines, transcript_lines[1:])): start_time, caption = line_dict['transcriptStartAt'] / 1000, line_dict['caption'] end_time = next_dict['transcriptStartAt'] / 1000 if next_dict else duration or start_time + 1 srt_data += ( f'{line + 1}\n' f'{srt_subtitles_timecode(start_time)} --> {srt_subtitles_timecode(end_time)}\n' f'{caption}\n\n') return srt_data def _real_extract(self, url): course_slug, video_slug = self._match_valid_url(url).groups() formats = [] for width, height in ((640, 360), (960, 540), (1280, 720)): video_data = self._call_api( course_slug, 'selectedVideo', video_slug, height)['selectedVideo'] video_url_data = video_data.get('url') or {} progressive_url = video_url_data.get('progressiveUrl') if progressive_url: formats.append({ 'format_id': f'progressive-{height}p', 'url': progressive_url, 'ext': 'mp4', 'height': height, 'width': width, 'source_preference': 1, }) title = video_data['title'] audio_url = video_data.get('audio', {}).get('progressiveUrl') if audio_url: formats.append({ 'abr': 64, 'ext': 'm4a', 'format_id': 'audio', 'url': audio_url, 'vcodec': 'none', }) streaming_url = video_url_data.get('streamingUrl') if streaming_url: formats.extend(self._extract_m3u8_formats( streaming_url, video_slug, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) subtitles = {} duration = int_or_none(video_data.get('durationInSeconds')) transcript_lines = try_get(video_data, lambda x: x['transcript']['lines'], expected_type=list) if transcript_lines: subtitles['en'] = [{ 'ext': 'srt', 'data': self.json2srt(transcript_lines, duration), }] return { 'id': self._get_video_id(video_data, course_slug, video_slug), 'title': title, 'formats': formats, 'thumbnail': video_data.get('defaultThumbnail'), 'timestamp': float_or_none(video_data.get('publishedOn'), 1000), 'duration': duration, 'subtitles': subtitles, # It seems like this would be correctly handled by default # However, unless someone can confirm this, the old # behaviour is being kept as-is '_format_sort_fields': ('res', 'source_preference'), } class LinkedInLearningCourseIE(LinkedInLearningBaseIE): IE_NAME = 'linkedin:learning:course' _VALID_URL = r'https?://(?:www\.)?linkedin\.com/learning/(?P<id>[^/?#]+)' _TEST = { 'url': 'https://www.linkedin.com/learning/programming-foundations-fundamentals', 'info_dict': { 'id': 'programming-foundations-fundamentals', 'title': 'Programming Foundations: Fundamentals', 'description': 'md5:76e580b017694eb89dc8e8923fff5c86', }, 'playlist_mincount': 61, } @classmethod def suitable(cls, url): return False if LinkedInLearningIE.suitable(url) else super().suitable(url) def _real_extract(self, url): course_slug = self._match_id(url) course_data = self._call_api(course_slug, 'chapters,description,title') entries = [] for chapter_number, chapter in enumerate(course_data.get('chapters', []), 1): chapter_title = chapter.get('title') chapter_id = self._get_urn_id(chapter) for video in chapter.get('videos', []): video_slug = video.get('slug') if not video_slug: continue entries.append({ '_type': 'url_transparent', 'id': self._get_video_id(video, course_slug, video_slug), 'title': video.get('title'), 'url': f'https://www.linkedin.com/learning/{course_slug}/{video_slug}', 'chapter': chapter_title, 'chapter_number': chapter_number, 'chapter_id': chapter_id, 'ie_key': LinkedInLearningIE.ie_key(), }) return self.playlist_result( entries, course_slug, course_data.get('title'), course_data.get('description')) class LinkedInEventsIE(LinkedInBaseIE): IE_NAME = 'linkedin:events' _VALID_URL = r'https?://(?:www\.)?linkedin\.com/events/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://www.linkedin.com/events/7084656651378536448/comments/', 'info_dict': { 'id': '7084656651378536448', 'ext': 'mp4', 'title': '#37 Aprende a hacer una entrevista en inglés para tu próximo trabajo remoto', 'description': '¡Agarra para anotar que se viene tremendo evento!', 'duration': 1765, 'timestamp': 1689113772, 'upload_date': '20230711', 'release_timestamp': 1689174012, 'release_date': '20230712', 'live_status': 'was_live', }, }, { 'url': 'https://www.linkedin.com/events/27-02energyfreedombyenergyclub7295762520814874625/comments/', 'info_dict': { 'id': '27-02energyfreedombyenergyclub7295762520814874625', 'ext': 'mp4', 'title': '27.02 Energy Freedom by Energy Club', 'description': 'md5:1292e6f31df998914c293787a02c3b91', 'duration': 6420, 'timestamp': 1739445333, 'upload_date': '20250213', 'release_timestamp': 1740657620, 'release_date': '20250227', 'live_status': 'was_live', }, }] def _real_initialize(self): if not self._get_cookies('https://www.linkedin.com/').get('li_at'): self.raise_login_required() def _real_extract(self, url): event_id = self._match_id(url) webpage = self._download_webpage(url, event_id) base_data = traverse_obj(webpage, ( {find_elements(tag='code', attr='style', value='display: none')}, ..., {json.loads}, 'included', ...)) meta_data = traverse_obj(base_data, ( lambda _, v: v['$type'] == 'com.linkedin.voyager.dash.events.ProfessionalEvent', any)) or {} live_status = { 'PAST': 'was_live', 'ONGOING': 'is_live', 'FUTURE': 'is_upcoming', }.get(meta_data.get('lifecycleState')) if live_status == 'is_upcoming': player_data = {} if event_time := traverse_obj(meta_data, ('displayEventTime', {str})): message = f'This live event is scheduled for {event_time}' else: message = 'This live event has not yet started' self.raise_no_formats(message, expected=True, video_id=event_id) else: # TODO: Add support for audio-only live events player_data = traverse_obj(base_data, ( lambda _, v: v['$type'] == 'com.linkedin.videocontent.VideoPlayMetadata', any, {require('video player data')})) formats, subtitles = [], {} for prog_fmts in traverse_obj(player_data, ('progressiveStreams', ..., {dict})): for fmt_url in traverse_obj(prog_fmts, ('streamingLocations', ..., 'url', {url_or_none})): formats.append({ 'url': fmt_url, **traverse_obj(prog_fmts, { 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), 'tbr': ('bitRate', {int_or_none(scale=1000)}), 'filesize': ('size', {int_or_none}), 'ext': ('mediaType', {mimetype2ext}), }), }) for m3u8_url in traverse_obj(player_data, ( 'adaptiveStreams', lambda _, v: v['protocol'] == 'HLS', 'masterPlaylists', ..., 'url', {url_or_none}, )): fmts, subs = self._extract_m3u8_formats_and_subtitles( m3u8_url, event_id, 'mp4', m3u8_id='hls', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return { 'id': event_id, 'formats': formats, 'subtitles': subtitles, 'live_status': live_status, **traverse_obj(meta_data, { 'title': ('name', {str}), 'description': ('description', 'text', {str}), 'timestamp': ('createdAt', {int_or_none(scale=1000)}), # timeRange.start is available when the stream is_upcoming 'release_timestamp': ('timeRange', 'start', {int_or_none(scale=1000)}), }), **traverse_obj(player_data, { 'duration': ('duration', {int_or_none(scale=1000)}), # liveStreamCreatedAt is only available when the stream is_live or was_live 'release_timestamp': ('liveStreamCreatedAt', {int_or_none(scale=1000)}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hketv.py
yt_dlp/extractor/hketv.py
from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, int_or_none, merge_dicts, parse_count, str_or_none, try_get, unified_strdate, urlencode_postdata, urljoin, ) class HKETVIE(InfoExtractor): IE_NAME = 'hketv' IE_DESC = '香港教育局教育電視 (HKETV) Educational Television, Hong Kong Educational Bureau' _GEO_BYPASS = False _GEO_COUNTRIES = ['HK'] _VALID_URL = r'https?://(?:www\.)?hkedcity\.net/etv/resource/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.hkedcity.net/etv/resource/2932360618', 'md5': 'f193712f5f7abb208ddef3c5ea6ed0b7', 'info_dict': { 'id': '2932360618', 'ext': 'mp4', 'title': '喜閱一生(共享閱讀樂) (中、英文字幕可供選擇)', 'description': 'md5:d5286d05219ef50e0613311cbe96e560', 'upload_date': '20181024', 'duration': 900, 'subtitles': 'count:2', }, 'skip': 'Geo restricted to HK', }, { 'url': 'https://www.hkedcity.net/etv/resource/972641418', 'md5': '1ed494c1c6cf7866a8290edad9b07dc9', 'info_dict': { 'id': '972641418', 'ext': 'mp4', 'title': '衣冠楚楚 (天使系列之一)', 'description': 'md5:10bb3d659421e74f58e5db5691627b0f', 'upload_date': '20070109', 'duration': 907, 'subtitles': {}, }, 'skip': 'Geo restricted to HK', }] _CC_LANGS = { '中文(繁體中文)': 'zh-Hant', '中文(简体中文)': 'zh-Hans', 'English': 'en', 'Bahasa Indonesia': 'id', '\u0939\u093f\u0928\u094d\u0926\u0940': 'hi', '\u0928\u0947\u092a\u093e\u0932\u0940': 'ne', 'Tagalog': 'tl', '\u0e44\u0e17\u0e22': 'th', '\u0627\u0631\u062f\u0648': 'ur', } _FORMAT_HEIGHTS = { 'SD': 360, 'HD': 720, } _APPS_BASE_URL = 'https://apps.hkedcity.net' def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = ( self._html_search_meta( ('ed_title', 'search.ed_title'), webpage, default=None) or self._search_regex( r'data-favorite_title_(?:eng|chi)=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'title', default=None, group='url') or self._html_search_regex( r'<h1>([^<]+)</h1>', webpage, 'title', default=None) or self._og_search_title(webpage) ) file_id = self._search_regex( r'post_var\[["\']file_id["\']\s*\]\s*=\s*(.+?);', webpage, 'file ID') curr_url = self._search_regex( r'post_var\[["\']curr_url["\']\s*\]\s*=\s*"(.+?)";', webpage, 'curr URL') data = { 'action': 'get_info', 'curr_url': curr_url, 'file_id': file_id, 'video_url': file_id, } response = self._download_json( self._APPS_BASE_URL + '/media/play/handler.php', video_id, data=urlencode_postdata(data), headers=merge_dicts({ 'Content-Type': 'application/x-www-form-urlencoded'}, self.geo_verification_headers())) result = response['result'] if not response.get('success') or not response.get('access'): error = clean_html(response.get('access_err_msg')) if 'Video streaming is not available in your country' in error: self.raise_geo_restricted( msg=error, countries=self._GEO_COUNTRIES) else: raise ExtractorError(error, expected=True) formats = [] width = int_or_none(result.get('width')) height = int_or_none(result.get('height')) playlist0 = result['playlist'][0] for fmt in playlist0['sources']: file_url = urljoin(self._APPS_BASE_URL, fmt.get('file')) if not file_url: continue # If we ever wanted to provide the final resolved URL that # does not require cookies, albeit with a shorter lifespan: # urlh = self._downloader.urlopen(file_url) # resolved_url = urlh.url label = fmt.get('label') h = self._FORMAT_HEIGHTS.get(label) w = h * width // height if h and width and height else None formats.append({ 'format_id': label, 'ext': fmt.get('type'), 'url': file_url, 'width': w, 'height': h, }) subtitles = {} tracks = try_get(playlist0, lambda x: x['tracks'], list) or [] for track in tracks: if not isinstance(track, dict): continue track_kind = str_or_none(track.get('kind')) if not track_kind or not isinstance(track_kind, str): continue if track_kind.lower() not in ('captions', 'subtitles'): continue track_url = urljoin(self._APPS_BASE_URL, track.get('file')) if not track_url: continue track_label = track.get('label') subtitles.setdefault(self._CC_LANGS.get( track_label, track_label), []).append({ 'url': self._proto_relative_url(track_url), 'ext': 'srt', }) # Likes emotion = self._download_json( 'https://emocounter.hkedcity.net/handler.php', video_id, data=urlencode_postdata({ 'action': 'get_emotion', 'data[bucket_id]': 'etv', 'data[identifier]': video_id, }), headers={'Content-Type': 'application/x-www-form-urlencoded'}, fatal=False) or {} like_count = int_or_none(try_get( emotion, lambda x: x['data']['emotion_data'][0]['count'])) return { 'id': video_id, 'title': title, 'description': self._html_search_meta( 'description', webpage, fatal=False), 'upload_date': unified_strdate(self._html_search_meta( 'ed_date', webpage, fatal=False), day_first=False), 'duration': int_or_none(result.get('length')), 'formats': formats, 'subtitles': subtitles, 'thumbnail': urljoin(self._APPS_BASE_URL, result.get('image')), 'view_count': parse_count(result.get('view_count')), 'like_count': like_count, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/yandexdisk.py
yt_dlp/extractor/yandexdisk.py
import json from .common import InfoExtractor from ..utils import ( determine_ext, float_or_none, int_or_none, join_nonempty, mimetype2ext, try_get, urljoin, ) class YandexDiskIE(InfoExtractor): _VALID_URL = r'''(?x)https?:// (?P<domain> yadi\.sk| disk\.(?:360\.)?yandex\. (?: az| by| co(?:m(?:\.(?:am|ge|tr))?|\.il)| ee| fr| k[gz]| l[tv]| md| t[jm]| u[az]| ru ) )/(?:[di]/|public.*?\bhash=)(?P<id>[^/?#&]+)''' _TESTS = [{ 'url': 'https://yadi.sk/i/VdOeDou8eZs6Y', 'md5': 'a4a8d52958c8fddcf9845935070402ae', 'info_dict': { 'id': 'VdOeDou8eZs6Y', 'ext': 'mp4', 'title': '4.mp4', 'duration': 168.6, 'uploader': 'y.botova', 'uploader_id': '300043621', 'view_count': int, }, 'expected_warnings': ['Unable to download JSON metadata'], }, { 'url': 'https://yadi.sk/d/h3WAXvDS3Li3Ce', 'only_matching': True, }, { 'url': 'https://yadi.sk/public?hash=5DZ296JK9GWCLp02f6jrObjnctjRxMs8L6%2B%2FuhNqk38%3D', 'only_matching': True, }, { 'url': 'https://disk.360.yandex.ru/i/TM2xsIVsgjY4uw', 'only_matching': True, }] def _real_extract(self, url): domain, video_id = self._match_valid_url(url).groups() webpage = self._download_webpage(url, video_id) store = self._parse_json(self._search_regex( r'<script[^>]+id="store-prefetch"[^>]*>\s*({.+?})\s*</script>', webpage, 'store'), video_id) resource = store['resources'][store['rootResourceId']] title = resource['name'] meta = resource.get('meta') or {} public_url = meta.get('short_url') if public_url: video_id = self._match_id(public_url) source_url = (self._download_json( 'https://cloud-api.yandex.net/v1/disk/public/resources/download', video_id, query={'public_key': url}, fatal=False) or {}).get('href') video_streams = resource.get('videoStreams') or {} video_hash = resource.get('hash') or url environment = store.get('environment') or {} sk = environment.get('sk') yandexuid = environment.get('yandexuid') if sk and yandexuid and not (source_url and video_streams): self._set_cookie(domain, 'yandexuid', yandexuid) def call_api(action): return (self._download_json( urljoin(url, '/public/api/') + action, video_id, data=json.dumps({ 'hash': video_hash, 'sk': sk, }).encode(), headers={ 'Content-Type': 'text/plain', }, fatal=False) or {}).get('data') or {} if not source_url: # TODO: figure out how to detect if download limit has # been reached and then avoid unnecessary source format # extraction requests source_url = call_api('download-url').get('url') if not video_streams: video_streams = call_api('get-video-streams') formats = [] if source_url: formats.append({ 'url': source_url, 'format_id': 'source', 'ext': determine_ext(title, meta.get('ext') or mimetype2ext(meta.get('mime_type')) or 'mp4'), 'quality': 1, 'filesize': int_or_none(meta.get('size')), }) for video in (video_streams.get('videos') or []): format_url = video.get('url') if not format_url: continue if video.get('dimension') == 'adaptive': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: size = video.get('size') or {} height = int_or_none(size.get('height')) formats.append({ 'ext': 'mp4', 'format_id': join_nonempty('hls', height and f'{height}p'), 'height': height, 'protocol': 'm3u8_native', 'url': format_url, 'width': int_or_none(size.get('width')), }) uid = resource.get('uid') display_name = try_get(store, lambda x: x['users'][uid]['displayName']) return { 'id': video_id, 'title': title, 'duration': float_or_none(video_streams.get('duration'), 1000), 'uploader': display_name, 'uploader_id': uid, 'view_count': int_or_none(meta.get('views_counter')), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/laracasts.py
yt_dlp/extractor/laracasts.py
import json from .common import InfoExtractor from .vimeo import VimeoIE from ..utils import ( clean_html, extract_attributes, get_element_html_by_id, int_or_none, parse_duration, str_or_none, unified_strdate, url_or_none, urljoin, ) from ..utils.traversal import traverse_obj class LaracastsBaseIE(InfoExtractor): def _get_prop_data(self, url, display_id): webpage = self._download_webpage(url, display_id) return traverse_obj( get_element_html_by_id('app', webpage), ({extract_attributes}, 'data-page', {json.loads}, 'props')) def _parse_episode(self, episode): if not traverse_obj(episode, 'vimeoId'): self.raise_login_required('This video is only available for subscribers.') return self.url_result( VimeoIE._smuggle_referrer( f'https://player.vimeo.com/video/{episode["vimeoId"]}', 'https://laracasts.com/'), VimeoIE, url_transparent=True, **traverse_obj(episode, { 'id': ('id', {int}, {str_or_none}), 'webpage_url': ('path', {urljoin('https://laracasts.com')}), 'title': ('title', {clean_html}), 'season_number': ('chapter', {int_or_none}), 'episode_number': ('position', {int_or_none}), 'description': ('body', {clean_html}), 'thumbnail': ('largeThumbnail', {url_or_none}), 'duration': ('length', {int_or_none}), 'upload_date': ('dateSegments', 'published', {unified_strdate}), })) class LaracastsIE(LaracastsBaseIE): IE_NAME = 'laracasts' _VALID_URL = r'https?://(?:www\.)?laracasts\.com/series/(?P<id>[\w-]+/episodes/\d+)/?(?:[?#]|$)' _TESTS = [{ 'url': 'https://laracasts.com/series/30-days-to-learn-laravel-11/episodes/1', 'md5': 'c8f5e7b02ad0e438ef9280a08c8493dc', 'info_dict': { 'id': '922040563', 'title': 'Hello, Laravel', 'ext': 'mp4', 'duration': 519, 'upload_date': '20240312', 'thumbnail': 'https://laracasts.s3.amazonaws.com/videos/thumbnails/youtube/30-days-to-learn-laravel-11-1.png', 'description': 'md5:ddd658bb241975871d236555657e1dd1', 'season_number': 1, 'season': 'Season 1', 'episode_number': 1, 'episode': 'Episode 1', 'uploader': 'Laracasts', 'uploader_id': 'user20182673', 'uploader_url': 'https://vimeo.com/user20182673', }, 'expected_warnings': ['Failed to parse XML'], # TODO: Remove when vimeo extractor is fixed }] def _real_extract(self, url): display_id = self._match_id(url) return self._parse_episode(self._get_prop_data(url, display_id)['lesson']) class LaracastsPlaylistIE(LaracastsBaseIE): IE_NAME = 'laracasts:series' _VALID_URL = r'https?://(?:www\.)?laracasts\.com/series/(?P<id>[\w-]+)/?(?:[?#]|$)' _TESTS = [{ 'url': 'https://laracasts.com/series/30-days-to-learn-laravel-11', 'info_dict': { 'title': '30 Days to Learn Laravel', 'id': '210', 'thumbnail': 'https://laracasts.s3.amazonaws.com/series/thumbnails/social-cards/30-days-to-learn-laravel-11.png?v=2', 'duration': 30600.0, 'modified_date': '20240511', 'description': 'md5:27c260a1668a450984e8f901579912dd', 'categories': ['Frameworks'], 'tags': ['Laravel'], 'display_id': '30-days-to-learn-laravel-11', }, 'playlist_count': 30, }] def _real_extract(self, url): display_id = self._match_id(url) series = self._get_prop_data(url, display_id)['series'] metadata = { 'display_id': display_id, **traverse_obj(series, { 'title': ('title', {str}), 'id': ('id', {int}, {str_or_none}), 'description': ('body', {clean_html}), 'thumbnail': (('large_thumbnail', 'thumbnail'), {url_or_none}, any), 'duration': ('runTime', {parse_duration}), 'categories': ('taxonomy', 'name', {str}, all, filter), 'tags': ('topics', ..., 'name', {str}), 'modified_date': ('lastUpdated', {unified_strdate}), }), } return self.playlist_result(traverse_obj( series, ('chapters', ..., 'episodes', lambda _, v: v['vimeoId'], {self._parse_episode})), **metadata)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tv2.py
yt_dlp/extractor/tv2.py
import re from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, determine_ext, float_or_none, int_or_none, js_to_json, parse_iso8601, remove_end, strip_or_none, try_get, ) class TV2IE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tv2\.no/v(?:ideo)?\d*/(?:[^?#]+/)*(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.tv2.no/v/1791207/', 'info_dict': { 'id': '1791207', 'ext': 'mp4', 'title': 'Her kolliderer romsonden med asteroiden ', 'description': 'En romsonde har krasjet inn i en asteroide i verdensrommet. Kollisjonen skjedde klokken 01:14 natt til tirsdag 27. september norsk tid. \n\nNasa kaller det sitt første forsøk på planetforsvar.', 'timestamp': 1664238190, 'upload_date': '20220927', 'duration': 146, 'thumbnail': r're:^https://.*$', 'view_count': int, 'categories': list, }, }, { 'url': 'http://www.tv2.no/v2/916509', 'only_matching': True, }, { 'url': 'https://www.tv2.no/video/nyhetene/her-kolliderer-romsonden-med-asteroiden/1791207/', 'only_matching': True, }] _PROTOCOLS = ('HLS', 'DASH') _GEO_COUNTRIES = ['NO'] def _real_extract(self, url): video_id = self._match_id(url) asset = self._download_json('https://sumo.tv2.no/rest/assets/' + video_id, video_id, 'Downloading metadata JSON') title = asset['title'] is_live = asset.get('live') is True formats = [] format_urls = [] for protocol in self._PROTOCOLS: try: data = self._download_json(f'https://api.sumo.tv2.no/play/{video_id}?stream={protocol}', video_id, 'Downloading playabck JSON', headers={'content-type': 'application/json'}, data=b'{"device":{"id":"1-1-1","name":"Nettleser (HTML)"}}')['playback'] except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 401: error = self._parse_json(e.cause.response.read().decode(), video_id)['error'] error_code = error.get('code') if error_code == 'ASSET_PLAYBACK_INVALID_GEO_LOCATION': self.raise_geo_restricted(countries=self._GEO_COUNTRIES) elif error_code == 'SESSION_NOT_AUTHENTICATED': self.raise_login_required() raise ExtractorError(error['description']) raise items = data.get('streams', []) for item in items: video_url = item.get('url') if not video_url or video_url in format_urls: continue format_id = '{}-{}'.format(protocol.lower(), item.get('type')) if not self._is_valid_url(video_url, video_id, format_id): continue format_urls.append(video_url) ext = determine_ext(video_url) if ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id=format_id, fatal=False)) elif ext == 'm3u8': if not data.get('drmProtected'): formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', live=is_live, m3u8_id=format_id, fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( video_url, video_id, format_id, fatal=False)) elif ext == 'ism' or video_url.endswith('.ism/Manifest'): pass else: formats.append({ 'url': video_url, 'format_id': format_id, }) if not formats and data.get('drmProtected'): self.report_drm(video_id) thumbnails = [{ 'id': thumb_type, 'url': thumb_url, } for thumb_type, thumb_url in (asset.get('images') or {}).items()] return { 'id': video_id, 'url': video_url, 'title': title, 'description': strip_or_none(asset.get('description')), 'thumbnails': thumbnails, 'timestamp': parse_iso8601(asset.get('live_broadcast_time') or asset.get('update_time')), 'duration': float_or_none(asset.get('accurateDuration') or asset.get('duration')), 'view_count': int_or_none(asset.get('views')), 'categories': asset.get('tags', '').split(','), 'formats': formats, 'is_live': is_live, } class TV2ArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tv2\.no/(?!v(?:ideo)?\d*/)[^?#]+/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.tv2.no/underholdning/forraeder/katarina-flatland-angrer-etter-forraeder-exit/15095188/', 'info_dict': { 'id': '15095188', 'title': 'Katarina Flatland angrer etter Forræder-exit', 'description': 'SANDEFJORD (TV 2): Katarina Flatland (33) måtte følge i sine fars fotspor, da hun ble forvist fra Forræder.', }, 'playlist_count': 2, }, { 'url': 'http://www.tv2.no/a/6930542', 'only_matching': True, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) # Old embed pattern (looks unused nowadays) assets = re.findall(r'data-assetid=["\'](\d+)', webpage) if not assets: # New embed pattern for v in re.findall(r'(?s)(?:TV2ContentboxVideo|TV2\.TV2Video)\(({.+?})\)', webpage): video = self._parse_json( v, playlist_id, transform_source=js_to_json, fatal=False) if not video: continue asset = video.get('assetId') if asset: assets.append(asset) entries = [ self.url_result(f'http://www.tv2.no/v/{asset_id}', 'TV2') for asset_id in assets] title = remove_end(self._og_search_title(webpage), ' - TV2.no') description = remove_end(self._og_search_description(webpage), ' - TV2.no') return self.playlist_result(entries, playlist_id, title, description) class KatsomoIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?(?:katsomo|mtv(uutiset)?)\.fi/(?:sarja/[0-9a-z-]+-\d+/[0-9a-z-]+-|(?:#!/)?jakso/(?:\d+/[^/]+/)?|video/prog)(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.mtv.fi/sarja/mtv-uutiset-live-33001002003/lahden-pelicans-teki-kovan-ratkaisun-ville-nieminen-pihalle-1181321', 'info_dict': { 'id': '1181321', 'ext': 'mp4', 'title': 'Lahden Pelicans teki kovan ratkaisun – Ville Nieminen pihalle', 'description': 'Päätöksen teki Pelicansin hallitus.', 'timestamp': 1575116484, 'upload_date': '20191130', 'duration': 37.12, 'view_count': int, 'categories': list, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.katsomo.fi/#!/jakso/33001005/studio55-fi/658521/jukka-kuoppamaki-tekee-yha-lauluja-vaikka-lentokoneessa', 'only_matching': True, }, { 'url': 'https://www.mtvuutiset.fi/video/prog1311159', 'only_matching': True, }, { 'url': 'https://www.katsomo.fi/#!/jakso/1311159', 'only_matching': True, }] _API_DOMAIN = 'api.katsomo.fi' _PROTOCOLS = ('HLS', 'MPD') _GEO_COUNTRIES = ['FI'] def _real_extract(self, url): video_id = self._match_id(url) api_base = f'http://{self._API_DOMAIN}/api/web/asset/{video_id}' asset = self._download_json( api_base + '.json', video_id, 'Downloading metadata JSON')['asset'] title = asset.get('subtitle') or asset['title'] is_live = asset.get('live') is True formats = [] format_urls = [] for protocol in self._PROTOCOLS: try: data = self._download_json( api_base + f'/play.json?protocol={protocol}&videoFormat=SMIL+ISMUSP', video_id, 'Downloading play JSON')['playback'] except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 401: error = self._parse_json(e.cause.response.read().decode(), video_id)['error'] error_code = error.get('code') if error_code == 'ASSET_PLAYBACK_INVALID_GEO_LOCATION': self.raise_geo_restricted(countries=self._GEO_COUNTRIES) elif error_code == 'SESSION_NOT_AUTHENTICATED': self.raise_login_required() raise ExtractorError(error['description']) raise items = try_get(data, lambda x: x['items']['item']) if not items: continue if not isinstance(items, list): items = [items] for item in items: if not isinstance(item, dict): continue video_url = item.get('url') if not video_url or video_url in format_urls: continue format_id = '{}-{}'.format(protocol.lower(), item.get('mediaFormat')) if not self._is_valid_url(video_url, video_id, format_id): continue format_urls.append(video_url) ext = determine_ext(video_url) if ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id=format_id, fatal=False)) elif ext == 'm3u8': if not data.get('drmProtected'): formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', live=is_live, m3u8_id=format_id, fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( video_url, video_id, format_id, fatal=False)) elif ext == 'ism' or video_url.endswith('.ism/Manifest'): pass else: formats.append({ 'url': video_url, 'format_id': format_id, 'tbr': int_or_none(item.get('bitrate')), 'filesize': int_or_none(item.get('fileSize')), }) if not formats and data.get('drmProtected'): self.report_drm(video_id) thumbnails = [{ 'id': thumbnail.get('@type'), 'url': thumbnail.get('url'), } for _, thumbnail in (asset.get('imageVersions') or {}).items()] return { 'id': video_id, 'url': video_url, 'title': title, 'description': strip_or_none(asset.get('description')), 'thumbnails': thumbnails, 'timestamp': parse_iso8601(asset.get('createTime')), 'duration': float_or_none(asset.get('accurateDuration') or asset.get('duration')), 'view_count': int_or_none(asset.get('views')), 'categories': asset.get('keywords', '').split(','), 'formats': formats, 'is_live': is_live, } class MTVUutisetArticleIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)mtvuutiset\.fi/artikkeli/[^/]+/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.mtvuutiset.fi/artikkeli/tallaisia-vaurioita-viking-amorellassa-on-useamman-osaston-alla-vetta/7931384', 'info_dict': { 'id': '1311159', 'ext': 'mp4', 'title': 'Viking Amorellan matkustajien evakuointi on alkanut – tältä operaatio näyttää laivalla', 'description': 'Viking Amorellan matkustajien evakuointi on alkanut – tältä operaatio näyttää laivalla', 'timestamp': 1600608966, 'upload_date': '20200920', 'duration': 153.7886666, 'view_count': int, 'categories': list, }, 'params': { # m3u8 download 'skip_download': True, }, }, { # multiple Youtube embeds 'url': 'https://www.mtvuutiset.fi/artikkeli/50-vuotta-subarun-vastaiskua/6070962', 'only_matching': True, }] def _real_extract(self, url): article_id = self._match_id(url) article = self._download_json( 'http://api.mtvuutiset.fi/mtvuutiset/api/json/' + article_id, article_id) def entries(): for video in (article.get('videos') or []): video_type = video.get('videotype') video_url = video.get('url') if not (video_url and video_type in ('katsomo', 'youtube')): continue yield self.url_result( video_url, video_type.capitalize(), video.get('video_id')) return self.playlist_result( entries(), article_id, article.get('title'), article.get('description'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/azmedien.py
yt_dlp/extractor/azmedien.py
from .common import InfoExtractor from .kaltura import KalturaIE from ..utils.traversal import require, traverse_obj class AZMedienIE(InfoExtractor): IE_DESC = 'AZ Medien videos' _VALID_URL = r'''(?x) https?:// (?:www\.|tv\.)? (?: telezueri\.ch| telebaern\.tv| telem1\.ch| tvo-online\.ch )/ [^/?#]+/ (?P<id> [^/?#]+-\d+ ) (?: \#video= (?P<kaltura_id> [_0-9a-z]+ ) )? ''' _TESTS = [{ 'url': 'https://tv.telezueri.ch/sonntalk/bundesrats-vakanzen-eu-rahmenabkommen-133214569', 'info_dict': { 'id': '1_anruz3wy', 'ext': 'mp4', 'title': 'Bundesrats-Vakanzen / EU-Rahmenabkommen', 'uploader_id': 'TVOnline', 'upload_date': '20180930', 'timestamp': 1538328802, 'view_count': int, 'thumbnail': 'http://cfvod.kaltura.com/p/1719221/sp/171922100/thumbnail/entry_id/1_anruz3wy/version/100031', 'duration': 1930, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.telebaern.tv/telebaern-news/montag-1-oktober-2018-ganze-sendung-133531189#video=0_7xjo9lf1', 'only_matching': True, }] _PARTNER_ID = '1719221' def _real_extract(self, url): display_id, entry_id = self._match_valid_url(url).groups() if not entry_id: webpage = self._download_webpage(url, display_id) data = self._search_json( r'window\.__APOLLO_STATE__\s*=', webpage, 'video data', display_id) entry_id = traverse_obj(data, ( lambda _, v: v['__typename'] == 'KalturaData', 'kalturaId', any, {require('kaltura id')})) return self.url_result( f'kaltura:{self._PARTNER_ID}:{entry_id}', ie=KalturaIE.ie_key(), video_id=entry_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/fifa.py
yt_dlp/extractor/fifa.py
from .common import InfoExtractor from ..utils import ( int_or_none, traverse_obj, unified_timestamp, ) class FifaIE(InfoExtractor): _VALID_URL = r'https?://www\.fifa\.com/fifaplus/\w{2}/watch/([^#?]+/)?(?P<id>\w+)' _TESTS = [{ 'url': 'https://www.fifa.com/fifaplus/en/watch/7on10qPcnyLajDDU3ntg6y', 'info_dict': { 'id': '7on10qPcnyLajDDU3ntg6y', 'title': 'Italy v France | Final | 2006 FIFA World Cup Germany™ | Full Match Replay', 'description': 'md5:f4520d0ee80529c8ba4134a7d692ff8b', 'ext': 'mp4', 'categories': ['FIFA Tournaments'], 'thumbnail': 'https://digitalhub.fifa.com/transform/135e2656-3a51-407b-8810-6c34bec5b59b/FMR_2006_Italy_France_Final_Hero', 'duration': 8165, 'release_timestamp': 1152403200, 'release_date': '20060709', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.fifa.com/fifaplus/pt/watch/1cg5r5Qt6Qt12ilkDgb1sV', 'info_dict': { 'id': '1cg5r5Qt6Qt12ilkDgb1sV', 'title': 'Brazil v Germany | Semi-finals | 2014 FIFA World Cup Brazil™ | Extended Highlights', 'description': 'md5:d908c74ee66322b804ae2e521b02a855', 'ext': 'mp4', 'categories': ['FIFA Tournaments', 'Highlights'], 'thumbnail': 'https://digitalhub.fifa.com/transform/d8fe6f61-276d-4a73-a7fe-6878a35fd082/FIFAPLS_100EXTHL_2014BRAvGER_TMB', 'duration': 902, 'release_timestamp': 1404777600, 'release_date': '20140708', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.fifa.com/fifaplus/fr/watch/3C6gQH9C2DLwzNx7BMRQdp', 'info_dict': { 'id': '3C6gQH9C2DLwzNx7BMRQdp', 'title': 'Josimar goal against Northern Ireland | Classic Goals', 'description': 'md5:cbe7e7bb52f603c9f1fe9a4780fe983b', 'ext': 'mp4', 'categories': ['FIFA Tournaments', 'Goal'], 'duration': 28, 'thumbnail': 'https://digitalhub.fifa.com/transform/f9301391-f8d9-48b5-823e-c093ac5e3e11/CG_MEN_1986_JOSIMAR', }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) preconnect_link = self._search_regex( r'<link\b[^>]+\brel\s*=\s*"preconnect"[^>]+href\s*=\s*"([^"]+)"', webpage, 'Preconnect Link') video_details = self._download_json( f'{preconnect_link}/sections/videoDetails/{video_id}', video_id, 'Downloading Video Details', fatal=False) preplay_parameters = self._download_json( f'{preconnect_link}/videoPlayerData/{video_id}', video_id, 'Downloading Preplay Parameters')['preplayParameters'] content_data = self._download_json( 'https://content.uplynk.com/preplay/{contentId}/multiple.json?{queryStr}&sig={signature}'.format(**preplay_parameters), video_id, 'Downloading Content Data') formats, subtitles = self._extract_m3u8_formats_and_subtitles(content_data['playURL'], video_id) return { 'id': video_id, 'title': video_details.get('title'), 'description': video_details.get('description'), 'duration': int_or_none(video_details.get('duration')), 'release_timestamp': unified_timestamp(video_details.get('dateOfRelease')), 'categories': traverse_obj(video_details, (('videoCategory', 'videoSubcategory'),)), 'thumbnail': traverse_obj(video_details, ('backgroundImage', 'src')), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/roosterteeth.py
yt_dlp/extractor/roosterteeth.py
from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, LazyList, int_or_none, join_nonempty, parse_iso8601, parse_qs, smuggle_url, str_or_none, url_or_none, urlencode_postdata, urljoin, ) from ..utils.traversal import traverse_obj class RoosterTeethBaseIE(InfoExtractor): _NETRC_MACHINE = 'roosterteeth' _API_BASE = 'https://svod-be.roosterteeth.com' _API_BASE_URL = f'{_API_BASE}/api/v1' def _perform_login(self, username, password): if self._get_cookies(self._API_BASE_URL).get('rt_access_token'): return try: self._download_json( 'https://auth.roosterteeth.com/oauth/token', None, 'Logging in', data=urlencode_postdata({ 'client_id': '4338d2b4bdc8db1239360f28e72f0d9ddb1fd01e7a38fbb07b4b1f4ba4564cc5', 'grant_type': 'password', 'username': username, 'password': password, })) except ExtractorError as e: msg = 'Unable to login' if isinstance(e.cause, HTTPError) and e.cause.status == 401: resp = self._parse_json(e.cause.response.read().decode(), None, fatal=False) if resp: error = resp.get('extra_info') or resp.get('error_description') or resp.get('error') if error: msg += ': ' + error self.report_warning(msg) def _extract_video_info(self, data): thumbnails = [] for image in traverse_obj(data, ('included', 'images')): if image.get('type') not in ('episode_image', 'bonus_feature_image'): continue thumbnails.extend([{ 'id': name, 'url': url, } for name, url in (image.get('attributes') or {}).items() if url_or_none(url)]) attributes = data.get('attributes') or {} title = traverse_obj(attributes, 'title', 'display_title') sub_only = attributes.get('is_sponsors_only') episode_id = str_or_none(data.get('uuid')) video_id = str_or_none(data.get('id')) if video_id and 'parent_content_id' in attributes: # parent_content_id is a bonus-only key video_id += '-bonus' # there are collisions with bonus ids and regular ids elif not video_id: video_id = episode_id return { 'id': video_id, 'display_id': attributes.get('slug'), 'title': title, 'description': traverse_obj(attributes, 'description', 'caption'), 'series': traverse_obj(attributes, 'show_title', 'parent_content_title'), 'season_number': int_or_none(attributes.get('season_number')), 'season_id': str_or_none(attributes.get('season_id')), 'episode': title, 'episode_number': int_or_none(attributes.get('number')), 'episode_id': episode_id, 'channel_id': attributes.get('channel_id'), 'duration': int_or_none(attributes.get('length')), 'release_timestamp': parse_iso8601(attributes.get('original_air_date')), 'thumbnails': thumbnails, 'availability': self._availability( needs_premium=sub_only, needs_subscription=sub_only, needs_auth=sub_only, is_private=False, is_unlisted=False), 'tags': attributes.get('genres'), } class RoosterTeethIE(RoosterTeethBaseIE): _VALID_URL = r'https?://(?:.+?\.)?roosterteeth\.com/(?:bonus-feature|episode|watch)/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://roosterteeth.com/episode/million-dollars-but-season-2-million-dollars-but-the-game-announcement', 'info_dict': { 'id': '9156', 'display_id': 'million-dollars-but-season-2-million-dollars-but-the-game-announcement', 'ext': 'mp4', 'title': 'Million Dollars, But... The Game Announcement', 'description': 'md5:168a54b40e228e79f4ddb141e89fe4f5', 'thumbnail': r're:^https?://.*\.png$', 'series': 'Million Dollars, But...', 'episode': 'Million Dollars, But... The Game Announcement', 'tags': ['Game Show', 'Sketch'], 'season_number': 2, 'availability': 'public', 'episode_number': 10, 'episode_id': '00374575-464e-11e7-a302-065410f210c4', 'season': 'Season 2', 'season_id': 'ffa27d48-464d-11e7-a302-065410f210c4', 'channel_id': '92b6bb21-91d2-4b1b-bf95-3268fa0d9939', 'duration': 145, 'release_timestamp': 1462982400, 'release_date': '20160511', }, 'params': {'skip_download': True}, }, { 'url': 'https://roosterteeth.com/watch/rwby-bonus-25', 'info_dict': { 'id': '40432', 'display_id': 'rwby-bonus-25', 'title': 'Grimm', 'description': 'md5:f30ff570741213418a8d2c19868b93ab', 'episode': 'Grimm', 'channel_id': '92f780eb-ebfe-4bf5-a3b5-c6ad5460a5f1', 'thumbnail': r're:^https?://.*\.(png|jpe?g)$', 'ext': 'mp4', 'availability': 'public', 'episode_id': 'f8117b13-f068-499e-803e-eec9ea2dec8c', 'episode_number': 3, 'tags': ['Animation'], 'season_id': '4b8f0a9e-12c4-41ed-8caa-fed15a85bab8', 'season': 'Season 1', 'series': 'RWBY: World of Remnant', 'season_number': 1, 'duration': 216, 'release_timestamp': 1413489600, 'release_date': '20141016', }, 'params': {'skip_download': True}, }, { # bonus feature with /watch/ url 'url': 'https://roosterteeth.com/watch/rwby-bonus-21', 'info_dict': { 'id': '33-bonus', 'display_id': 'rwby-bonus-21', 'title': 'Volume 5 Yang Character Short', 'description': 'md5:8c2440bc763ea90c52cfe0a68093e1f7', 'episode': 'Volume 5 Yang Character Short', 'channel_id': '92f780eb-ebfe-4bf5-a3b5-c6ad5460a5f1', 'thumbnail': r're:^https?://.*\.(png|jpe?g)$', 'ext': 'mp4', 'availability': 'public', 'episode_id': 'f2a9f132-1fe2-44ad-8956-63d7c0267720', 'episode_number': 55, 'series': 'RWBY', 'duration': 255, 'release_timestamp': 1507993200, 'release_date': '20171014', }, 'params': {'skip_download': True}, }, { # only works with video_data['attributes']['url'] m3u8 url 'url': 'https://www.roosterteeth.com/watch/achievement-hunter-achievement-hunter-fatality-walkthrough-deathstroke-lex-luthor-captain-marvel-green-lantern-and-wonder-woman', 'info_dict': { 'id': '25394', 'ext': 'mp4', 'title': 'Fatality Walkthrough: Deathstroke, Lex Luthor, Captain Marvel, Green Lantern, and Wonder Woman', 'description': 'md5:91bb934698344fb9647b1c7351f16964', 'availability': 'public', 'thumbnail': r're:^https?://.*\.(png|jpe?g)$', 'episode': 'Fatality Walkthrough: Deathstroke, Lex Luthor, Captain Marvel, Green Lantern, and Wonder Woman', 'episode_number': 71, 'episode_id': 'ffaec998-464d-11e7-a302-065410f210c4', 'season': 'Season 2008', 'tags': ['Gaming'], 'series': 'Achievement Hunter', 'display_id': 'md5:4465ce4f001735f9d7a2ae529a543d31', 'season_id': 'ffa13340-464d-11e7-a302-065410f210c4', 'season_number': 2008, 'channel_id': '2cb2a70c-be50-46f5-93d7-84a1baabb4f7', 'duration': 189, 'release_timestamp': 1228317300, 'release_date': '20081203', }, 'params': {'skip_download': True}, }, { # brightcove fallback extraction needed 'url': 'https://roosterteeth.com/watch/lets-play-2013-126', 'info_dict': { 'id': '17845', 'ext': 'mp4', 'title': 'WWE \'13', 'availability': 'public', 'series': 'Let\'s Play', 'episode_number': 10, 'season_id': 'ffa23d9c-464d-11e7-a302-065410f210c4', 'channel_id': '75ba87e8-06fd-4482-bad9-52a4da2c6181', 'episode': 'WWE \'13', 'episode_id': 'ffdbe55e-464d-11e7-a302-065410f210c4', 'thumbnail': r're:^https?://.*\.(png|jpe?g)$', 'tags': ['Gaming', 'Our Favorites'], 'description': 'md5:b4a5226d2bbcf0dafbde11a2ba27262d', 'display_id': 'lets-play-2013-126', 'season_number': 3, 'season': 'Season 3', 'release_timestamp': 1359999840, 'release_date': '20130204', }, 'expected_warnings': ['Direct m3u8 URL returned HTTP Error 403'], 'params': {'skip_download': True}, }, { 'url': 'http://achievementhunter.roosterteeth.com/episode/off-topic-the-achievement-hunter-podcast-2016-i-didn-t-think-it-would-pass-31', 'only_matching': True, }, { 'url': 'http://funhaus.roosterteeth.com/episode/funhaus-shorts-2016-austin-sucks-funhaus-shorts', 'only_matching': True, }, { 'url': 'http://screwattack.roosterteeth.com/episode/death-battle-season-3-mewtwo-vs-shadow', 'only_matching': True, }, { 'url': 'http://theknow.roosterteeth.com/episode/the-know-game-news-season-1-boring-steam-sales-are-better', 'only_matching': True, }, { # only available for FIRST members 'url': 'http://roosterteeth.com/episode/rt-docs-the-world-s-greatest-head-massage-the-world-s-greatest-head-massage-an-asmr-journey-part-one', 'only_matching': True, }, { 'url': 'https://roosterteeth.com/watch/million-dollars-but-season-2-million-dollars-but-the-game-announcement', 'only_matching': True, }, { 'url': 'https://roosterteeth.com/bonus-feature/camp-camp-soundtrack-another-rap-song-about-foreign-cars-richie-branson', 'only_matching': True, }] _BRIGHTCOVE_ACCOUNT_ID = '6203312018001' def _extract_brightcove_formats_and_subtitles(self, bc_id, url, m3u8_url): account_id = self._search_regex( r'/accounts/(\d+)/videos/', m3u8_url, 'account id', default=self._BRIGHTCOVE_ACCOUNT_ID) info = self._downloader.get_info_extractor('BrightcoveNew').extract(smuggle_url( f'https://players.brightcove.net/{account_id}/default_default/index.html?videoId={bc_id}', {'referrer': url})) return info['formats'], info['subtitles'] def _real_extract(self, url): display_id = self._match_id(url) api_episode_url = f'{self._API_BASE_URL}/watch/{display_id}' try: video_data = self._download_json( api_episode_url + '/videos', display_id, 'Downloading video JSON metadata', headers={'Client-Type': 'web'})['data'][0] # web client-type yields ad-free streams except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 403: if self._parse_json(e.cause.response.read().decode(), display_id).get('access') is False: self.raise_login_required( f'{display_id} is only available for FIRST members') raise # XXX: additional ad-free URL at video_data['links']['download'] but often gives 403 errors m3u8_url = video_data['attributes']['url'] is_brightcove = traverse_obj(video_data, ('attributes', 'encoding_pipeline')) == 'brightcove' bc_id = traverse_obj(video_data, ('attributes', 'uid', {str})) try: formats, subtitles = self._extract_m3u8_formats_and_subtitles( m3u8_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls') except ExtractorError as e: if is_brightcove and bc_id and isinstance(e.cause, HTTPError) and e.cause.status == 403: self.report_warning( 'Direct m3u8 URL returned HTTP Error 403; retrying with Brightcove extraction') formats, subtitles = self._extract_brightcove_formats_and_subtitles(bc_id, url, m3u8_url) else: raise episode = self._download_json( api_episode_url, display_id, 'Downloading episode JSON metadata')['data'][0] return { 'display_id': display_id, 'formats': formats, 'subtitles': subtitles, **self._extract_video_info(episode), } class RoosterTeethSeriesIE(RoosterTeethBaseIE): _VALID_URL = r'https?://(?:.+?\.)?roosterteeth\.com/series/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://roosterteeth.com/series/rwby?season=7', 'playlist_count': 13, 'info_dict': { 'id': 'rwby-7', 'title': 'RWBY - Season 7', }, }, { 'url': 'https://roosterteeth.com/series/the-weird-place', 'playlist_count': 7, 'info_dict': { 'id': 'the-weird-place', 'title': 'The Weird Place', }, }, { 'url': 'https://roosterteeth.com/series/role-initiative', 'playlist_mincount': 16, 'info_dict': { 'id': 'role-initiative', 'title': 'Role Initiative', }, }, { 'url': 'https://roosterteeth.com/series/let-s-play-minecraft?season=9', 'playlist_mincount': 50, 'info_dict': { 'id': 'let-s-play-minecraft-9', 'title': 'Let\'s Play Minecraft - Season 9', }, }] def _entries(self, series_id, season_number): display_id = join_nonempty(series_id, season_number) def yield_episodes(data): for episode in traverse_obj(data, ('data', lambda _, v: v['canonical_links']['self'])): yield self.url_result( urljoin('https://www.roosterteeth.com', episode['canonical_links']['self']), RoosterTeethIE, **self._extract_video_info(episode)) series_data = self._download_json( f'{self._API_BASE_URL}/shows/{series_id}/seasons?order=asc&order_by', display_id) for season_data in traverse_obj(series_data, ('data', lambda _, v: v['links']['episodes'])): idx = traverse_obj(season_data, ('attributes', 'number')) if season_number is not None and idx != season_number: continue yield from yield_episodes(self._download_json( urljoin(self._API_BASE, season_data['links']['episodes']), display_id, f'Downloading season {idx} JSON metadata', query={'per_page': 1000})) if season_number is None: # extract series-level bonus features yield from yield_episodes(self._download_json( f'{self._API_BASE_URL}/shows/{series_id}/bonus_features?order=asc&order_by&per_page=1000', display_id, 'Downloading bonus features JSON metadata', fatal=False)) def _real_extract(self, url): series_id = self._match_id(url) season_number = traverse_obj(parse_qs(url), ('season', 0), expected_type=int_or_none) entries = LazyList(self._entries(series_id, season_number)) return self.playlist_result( entries, join_nonempty(series_id, season_number), join_nonempty(entries[0].get('series'), season_number, delim=' - Season '))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gamedevtv.py
yt_dlp/extractor/gamedevtv.py
import json from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, clean_html, int_or_none, join_nonempty, parse_iso8601, str_or_none, url_or_none, ) from ..utils.traversal import traverse_obj class GameDevTVDashboardIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?gamedev\.tv/dashboard/courses/(?P<course_id>\d+)(?:/(?P<lecture_id>\d+))?' _NETRC_MACHINE = 'gamedevtv' _TESTS = [{ 'url': 'https://www.gamedev.tv/dashboard/courses/25', 'info_dict': { 'id': '25', 'title': 'Complete Blender Creator 3: Learn 3D Modelling for Beginners', 'tags': ['blender', 'course', 'all', 'box modelling', 'sculpting'], 'categories': ['Blender', '3D Art'], 'thumbnail': 'https://gamedev-files.b-cdn.net/courses/qisc9pmu1jdc.jpg', 'upload_date': '20220516', 'timestamp': 1652694420, 'modified_date': '20241027', 'modified_timestamp': 1730049658, }, 'playlist_count': 100, }, { 'url': 'https://www.gamedev.tv/dashboard/courses/63/2279', 'info_dict': { 'id': 'df04f4d8-68a4-4756-a71b-9ca9446c3a01', 'ext': 'mp4', 'modified_timestamp': 1701695752, 'upload_date': '20230504', 'episode': 'MagicaVoxel Community Course Introduction', 'series_id': '63', 'title': 'MagicaVoxel Community Course Introduction', 'timestamp': 1683195397, 'modified_date': '20231204', 'categories': ['3D Art', 'MagicaVoxel'], 'season': 'MagicaVoxel Community Course', 'tags': ['MagicaVoxel', 'all', 'course'], 'series': 'MagicaVoxel 3D Art Mini Course', 'duration': 1405, 'episode_number': 1, 'season_number': 1, 'season_id': '219', 'description': 'md5:a378738c5bbec1c785d76c067652d650', 'display_id': '63-219-2279', 'alt_title': '1_CC_MVX MagicaVoxel Community Course Introduction.mp4', 'thumbnail': 'https://vz-23691c65-6fa.b-cdn.net/df04f4d8-68a4-4756-a71b-9ca9446c3a01/thumbnail.jpg', }, }] _API_HEADERS = {} def _perform_login(self, username, password): try: response = self._download_json( 'https://api.gamedev.tv/api/students/login', None, 'Logging in', headers={'Content-Type': 'application/json'}, data=json.dumps({ 'email': username, 'password': password, 'cart_items': [], }).encode()) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 401: raise ExtractorError('Invalid username/password', expected=True) raise self._API_HEADERS['Authorization'] = f'{response["token_type"]} {response["access_token"]}' def _real_initialize(self): if not self._API_HEADERS.get('Authorization'): self.raise_login_required( 'This content is only available with purchase', method='password') def _entries(self, data, course_id, course_info, selected_lecture): for section in traverse_obj(data, ('sections', ..., {dict})): section_info = traverse_obj(section, { 'season_id': ('id', {str_or_none}), 'season': ('title', {str}), 'season_number': ('order', {int_or_none}), }) for lecture in traverse_obj(section, ('lectures', lambda _, v: url_or_none(v['video']['playListUrl']))): if selected_lecture and str(lecture.get('id')) != selected_lecture: continue display_id = join_nonempty(course_id, section_info.get('season_id'), lecture.get('id')) formats, subtitles = self._extract_m3u8_formats_and_subtitles( lecture['video']['playListUrl'], display_id, 'mp4', m3u8_id='hls') yield { **course_info, **section_info, 'id': display_id, # fallback 'display_id': display_id, 'formats': formats, 'subtitles': subtitles, 'series': course_info.get('title'), 'series_id': course_id, **traverse_obj(lecture, { 'id': ('video', 'guid', {str}), 'title': ('title', {str}), 'alt_title': ('video', 'title', {str}), 'description': ('description', {clean_html}), 'episode': ('title', {str}), 'episode_number': ('order', {int_or_none}), 'duration': ('video', 'duration_in_sec', {int_or_none}), 'timestamp': ('video', 'created_at', {parse_iso8601}), 'modified_timestamp': ('video', 'updated_at', {parse_iso8601}), 'thumbnail': ('video', 'thumbnailUrl', {url_or_none}), }), } def _real_extract(self, url): course_id, lecture_id = self._match_valid_url(url).group('course_id', 'lecture_id') data = self._download_json( f'https://api.gamedev.tv/api/courses/my/{course_id}', course_id, headers=self._API_HEADERS)['data'] course_info = traverse_obj(data, { 'title': ('title', {str}), 'tags': ('tags', ..., 'name', {str}), 'categories': ('categories', ..., 'title', {str}), 'timestamp': ('created_at', {parse_iso8601}), 'modified_timestamp': ('updated_at', {parse_iso8601}), 'thumbnail': ('image', {url_or_none}), }) entries = self._entries(data, course_id, course_info, lecture_id) if lecture_id: lecture = next(entries, None) if not lecture: raise ExtractorError('Lecture not found') return lecture return self.playlist_result(entries, course_id, **course_info)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/flextv.py
yt_dlp/extractor/flextv.py
from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, UserNotLive, int_or_none, join_nonempty, parse_iso8601, str_or_none, url_or_none, ) from ..utils.traversal import traverse_obj class FlexTVIE(InfoExtractor): IE_NAME = 'ttinglive' IE_DESC = '띵라이브 (formerly FlexTV)' _VALID_URL = r'https?://(?:www\.)?(?:ttinglive\.com|flextv\.co\.kr)/channels/(?P<id>\d+)/live' _TESTS = [{ 'url': 'https://www.flextv.co.kr/channels/231638/live', 'info_dict': { 'id': '231638', 'ext': 'mp4', 'title': r're:^214하나만\.\.\. ', 'thumbnail': r're:^https?://.+\.jpg', 'upload_date': r're:\d{8}', 'timestamp': int, 'live_status': 'is_live', 'channel': 'Hi별', 'channel_id': '244396', }, 'skip': 'The channel is offline', }, { 'url': 'https://www.flextv.co.kr/channels/746/live', 'only_matching': True, }] def _real_extract(self, url): channel_id = self._match_id(url) try: stream_data = self._download_json( f'https://api.ttinglive.com/api/channels/{channel_id}/stream', channel_id, query={'option': 'all'}) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 400: raise UserNotLive(video_id=channel_id) raise formats = [] for stream in traverse_obj(stream_data, ('sources', ..., {dict})): if stream.get('format') == 'ivs' and url_or_none(stream.get('url')): formats.extend(self._extract_m3u8_formats( stream['url'], channel_id, 'mp4', live=True, fatal=False, m3u8_id='ivs')) for format_type in ['hls', 'flv']: for data in traverse_obj(stream, ( 'urlDetail', format_type, 'resolution', lambda _, v: url_or_none(v['url']))): formats.append({ 'format_id': join_nonempty(format_type, data.get('suffixName'), delim=''), 'url': data['url'], 'height': int_or_none(data.get('resolution')), 'ext': 'mp4' if format_type == 'hls' else 'flv', 'protocol': 'm3u8_native' if format_type == 'hls' else 'http', }) return { 'id': channel_id, 'formats': formats, 'is_live': True, **traverse_obj(stream_data, { 'title': ('stream', 'title', {str}), 'timestamp': ('stream', 'createdAt', {parse_iso8601}), 'thumbnail': ('thumbUrl', {url_or_none}), 'channel': ('owner', 'name', {str}), 'channel_id': ('owner', 'id', {str_or_none}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/qdance.py
yt_dlp/extractor/qdance.py
import json import time from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, jwt_decode_hs256, str_or_none, traverse_obj, try_call, url_or_none, ) class QDanceIE(InfoExtractor): _NETRC_MACHINE = 'qdance' _VALID_URL = r'https?://(?:www\.)?q-dance\.com/network/(?:library|live)/(?P<id>[\w-]+)' _TESTS = [{ 'note': 'vod', 'url': 'https://www.q-dance.com/network/library/146542138', 'info_dict': { 'id': '146542138', 'ext': 'mp4', 'title': 'Sound Rush [LIVE] | Defqon.1 Weekend Festival 2022 | Friday | RED', 'display_id': 'sound-rush-live-v3-defqon-1-weekend-festival-2022-friday-red', 'description': 'Relive Defqon.1 - Primal Energy 2022 with the sounds of Sound Rush LIVE at the RED on Friday! 🔥', 'season': 'Defqon.1 Weekend Festival 2022', 'season_id': '31840632', 'series': 'Defqon.1', 'series_id': '31840378', 'thumbnail': 'https://images.q-dance.network/1674829540-20220624171509-220624171509_delio_dn201093-2.jpg', 'availability': 'premium_only', 'duration': 1829, }, 'params': {'skip_download': 'm3u8'}, }, { 'note': 'livestream', 'url': 'https://www.q-dance.com/network/live/149170353', 'info_dict': { 'id': '149170353', 'ext': 'mp4', 'title': r're:^Defqon\.1 2023 - Friday - RED', 'display_id': 'defqon-1-2023-friday-red', 'description': 'md5:3c73fbbd4044e578e696adfc64019163', 'season': 'Defqon.1 Weekend Festival 2023', 'season_id': '141735599', 'series': 'Defqon.1', 'series_id': '31840378', 'thumbnail': 'https://images.q-dance.network/1686849069-area-thumbs_red.png', 'availability': 'subscriber_only', 'live_status': 'is_live', 'channel_id': 'qdancenetwork.video_149170353', }, 'skip': 'Completed livestream', }, { 'note': 'vod with alphanumeric id', 'url': 'https://www.q-dance.com/network/library/WhDleSIWSfeT3Q9ObBKBeA', 'info_dict': { 'id': 'WhDleSIWSfeT3Q9ObBKBeA', 'ext': 'mp4', 'title': 'Aftershock I Defqon.1 Weekend Festival 2023 I Sunday I BLUE', 'display_id': 'naam-i-defqon-1-weekend-festival-2023-i-dag-i-podium', 'description': 'Relive Defqon.1 Path of the Warrior with Aftershock at the BLUE 🔥', 'series': 'Defqon.1', 'series_id': '31840378', 'season': 'Defqon.1 Weekend Festival 2023', 'season_id': '141735599', 'duration': 3507, 'availability': 'premium_only', 'thumbnail': 'https://images.q-dance.network/1698158361-230625-135716-defqon-1-aftershock.jpg', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.q-dance.com/network/library/-uRFKXwmRZGVnve7av9uqA', 'only_matching': True, }] _access_token = None _refresh_token = None def _call_login_api(self, data, note='Logging in'): login = self._download_json( 'https://members.id-t.com/api/auth/login', None, note, headers={ 'content-type': 'application/json', 'brand': 'qdance', 'origin': 'https://www.q-dance.com', 'referer': 'https://www.q-dance.com/', }, data=json.dumps(data, separators=(',', ':')).encode(), expected_status=lambda x: True) tokens = traverse_obj(login, ('data', { '_id-t-accounts-token': ('accessToken', {str}), '_id-t-accounts-refresh': ('refreshToken', {str}), '_id-t-accounts-id-token': ('idToken', {str}), })) if not tokens.get('_id-t-accounts-token'): error = ': '.join(traverse_obj(login, ('error', ('code', 'message'), {str}))) if 'validation_error' not in error: raise ExtractorError(f'Q-Dance API said "{error}"') msg = 'Invalid username or password' if 'email' in data else 'Refresh token has expired' raise ExtractorError(msg, expected=True) for name, value in tokens.items(): self._set_cookie('.q-dance.com', name, value) def _perform_login(self, username, password): self._call_login_api({'email': username, 'password': password}) def _real_initialize(self): cookies = self._get_cookies('https://www.q-dance.com/') self._refresh_token = try_call(lambda: cookies['_id-t-accounts-refresh'].value) self._access_token = try_call(lambda: cookies['_id-t-accounts-token'].value) if not self._access_token: self.raise_login_required() def _get_auth(self): if (try_call(lambda: jwt_decode_hs256(self._access_token)['exp']) or 0) <= int(time.time() - 120): if not self._refresh_token: raise ExtractorError( 'Cannot refresh access token, login with yt-dlp or refresh cookies in browser') self._call_login_api({'refreshToken': self._refresh_token}, note='Refreshing access token') self._real_initialize() return {'Authorization': self._access_token} def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data = self._search_nuxt_data(webpage, video_id, traverse=('data', 0, 'data')) def extract_availability(level): level = int_or_none(level) or 0 return self._availability( needs_premium=(level >= 20), needs_subscription=(level >= 15), needs_auth=True) info = traverse_obj(data, { 'title': ('title', {str.strip}), 'description': ('description', {str.strip}), 'display_id': ('slug', {str}), 'thumbnail': ('thumbnail', {url_or_none}), 'duration': ('durationInSeconds', {int_or_none}, filter), 'availability': ('subscription', 'level', {extract_availability}), 'is_live': ('type', {lambda x: x.lower() == 'live'}), 'artist': ('acts', ..., {str}), 'series': ('event', 'title', {str.strip}), 'series_id': ('event', 'id', {str_or_none}), 'season': ('eventEdition', 'title', {str.strip}), 'season_id': ('eventEdition', 'id', {str_or_none}), 'channel_id': ('pubnub', 'channelName', {str}), }) stream = self._download_json( f'https://dc9h6qmsoymbq.cloudfront.net/api/content/videos/{video_id}/url', video_id, headers=self._get_auth(), expected_status=401) m3u8_url = traverse_obj(stream, ('data', 'url', {url_or_none})) if not m3u8_url and traverse_obj(stream, ('error', 'code')) == 'unauthorized': raise ExtractorError('Your account does not have access to this content', expected=True) formats = self._extract_m3u8_formats( m3u8_url, video_id, fatal=False, live=True) if m3u8_url else [] if not formats: self.raise_no_formats('No active streams found', expected=bool(info.get('is_live'))) return { **info, 'id': video_id, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/orf.py
yt_dlp/extractor/orf.py
import base64 import re from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, float_or_none, int_or_none, make_archive_id, mimetype2ext, orderedSet, parse_age_limit, parse_iso8601, remove_end, str_or_none, strip_jsonp, try_call, unified_strdate, url_or_none, ) from ..utils.traversal import traverse_obj class ORFRadioIE(InfoExtractor): IE_NAME = 'orf:radio' STATION_INFO = { 'fm4': ('fm4', 'fm4', 'orffm4'), 'noe': ('noe', 'oe2n', 'orfnoe'), 'wien': ('wie', 'oe2w', 'orfwie'), 'burgenland': ('bgl', 'oe2b', 'orfbgl'), 'ooe': ('ooe', 'oe2o', 'orfooe'), 'steiermark': ('stm', 'oe2st', 'orfstm'), 'kaernten': ('ktn', 'oe2k', 'orfktn'), 'salzburg': ('sbg', 'oe2s', 'orfsbg'), 'tirol': ('tir', 'oe2t', 'orftir'), 'vorarlberg': ('vbg', 'oe2v', 'orfvbg'), 'oe3': ('oe3', 'oe3', 'orfoe3'), 'oe1': ('oe1', 'oe1', 'orfoe1'), } _STATION_RE = '|'.join(map(re.escape, STATION_INFO.keys())) _VALID_URL = rf'''(?x) https?://(?: (?P<station>{_STATION_RE})\.orf\.at/player| radiothek\.orf\.at/(?P<station2>{_STATION_RE}) )/(?P<date>[0-9]+)/(?P<show>\w+)''' _TESTS = [{ 'url': 'https://radiothek.orf.at/ooe/20220801/OGMO', 'info_dict': { 'id': 'OGMO', 'title': 'Guten Morgen OÖ', 'description': 'md5:a3f6083399ef92b8cbe2d421b180835a', }, 'playlist': [{ 'md5': 'f33147d954a326e338ea52572c2810e8', 'info_dict': { 'id': '2022-08-01_0459_tl_66_7DaysMon1_319062', 'ext': 'mp3', 'title': 'Guten Morgen OÖ', 'upload_date': '20220801', 'duration': 18000, 'timestamp': 1659322789, 'description': 'md5:a3f6083399ef92b8cbe2d421b180835a', }, }], }, { 'url': 'https://ooe.orf.at/player/20220801/OGMO', 'info_dict': { 'id': 'OGMO', 'title': 'Guten Morgen OÖ', 'description': 'md5:a3f6083399ef92b8cbe2d421b180835a', }, 'playlist': [{ 'md5': 'f33147d954a326e338ea52572c2810e8', 'info_dict': { 'id': '2022-08-01_0459_tl_66_7DaysMon1_319062', 'ext': 'mp3', 'title': 'Guten Morgen OÖ', 'upload_date': '20220801', 'duration': 18000, 'timestamp': 1659322789, 'description': 'md5:a3f6083399ef92b8cbe2d421b180835a', }, }], }, { 'url': 'http://fm4.orf.at/player/20170107/4CC', 'only_matching': True, }, { 'url': 'https://noe.orf.at/player/20200423/NGM', 'only_matching': True, }, { 'url': 'https://wien.orf.at/player/20200423/WGUM', 'only_matching': True, }, { 'url': 'https://burgenland.orf.at/player/20200423/BGM', 'only_matching': True, }, { 'url': 'https://steiermark.orf.at/player/20200423/STGMS', 'only_matching': True, }, { 'url': 'https://kaernten.orf.at/player/20200423/KGUMO', 'only_matching': True, }, { 'url': 'https://salzburg.orf.at/player/20200423/SGUM', 'only_matching': True, }, { 'url': 'https://tirol.orf.at/player/20200423/TGUMO', 'only_matching': True, }, { 'url': 'https://vorarlberg.orf.at/player/20200423/VGUM', 'only_matching': True, }, { 'url': 'https://oe3.orf.at/player/20200424/3WEK', 'only_matching': True, }, { 'url': 'http://oe1.orf.at/player/20170108/456544', 'md5': '34d8a6e67ea888293741c86a099b745b', 'info_dict': { 'id': '2017-01-08_0759_tl_51_7DaysSun6_256141', 'ext': 'mp3', 'title': 'Morgenjournal', 'duration': 609, 'timestamp': 1483858796, 'upload_date': '20170108', }, 'skip': 'Shows from ORF radios are only available for 7 days.', }] def _entries(self, data, station): _, loop_station, old_ie = self.STATION_INFO[station] for info in data['streams']: item_id = info.get('loopStreamId') if not item_id: continue video_id = item_id.replace('.mp3', '') yield { 'id': video_id, 'ext': 'mp3', 'url': f'https://loopstream01.apa.at/?channel={loop_station}&id={item_id}', '_old_archive_ids': [make_archive_id(old_ie, video_id)], 'title': data.get('title'), 'description': clean_html(data.get('subtitle')), 'duration': try_call(lambda: (info['end'] - info['start']) / 1000), 'timestamp': int_or_none(info.get('start'), scale=1000), 'series': data.get('programTitle'), } def _real_extract(self, url): station, station2, show_date, show_id = self._match_valid_url(url).group('station', 'station2', 'date', 'show') api_station, _, _ = self.STATION_INFO[station or station2] data = self._download_json( f'http://audioapi.orf.at/{api_station}/api/json/current/broadcast/{show_id}/{show_date}', show_id) return self.playlist_result( self._entries(data, station or station2), show_id, data.get('title'), clean_html(data.get('subtitle'))) class ORFPodcastIE(InfoExtractor): IE_NAME = 'orf:podcast' _STATION_RE = '|'.join(map(re.escape, ( 'bgl', 'fm4', 'ktn', 'noe', 'oe1', 'oe3', 'ooe', 'sbg', 'stm', 'tir', 'tv', 'vbg', 'wie'))) _VALID_URL = rf'https?://sound\.orf\.at/podcast/(?P<station>{_STATION_RE})/(?P<show>[\w-]+)/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://sound.orf.at/podcast/oe3/fruehstueck-bei-mir/nicolas-stockhammer-15102023', 'md5': '526a5700e03d271a1505386a8721ab9b', 'info_dict': { 'id': 'nicolas-stockhammer-15102023', 'ext': 'mp3', 'title': 'Nicolas Stockhammer (15.10.2023)', 'duration': 3396.0, 'series': 'Frühstück bei mir', }, 'skip': 'ORF podcasts are only available for a limited time', }] def _real_extract(self, url): station, show, show_id = self._match_valid_url(url).group('station', 'show', 'id') data = self._download_json( f'https://audioapi.orf.at/radiothek/api/2.0/podcast/{station}/{show}/{show_id}', show_id) return { 'id': show_id, 'ext': 'mp3', 'vcodec': 'none', **traverse_obj(data, ('payload', { 'url': ('enclosures', 0, 'url'), 'ext': ('enclosures', 0, 'type', {mimetype2ext}), 'title': 'title', 'description': ('description', {clean_html}), 'duration': ('duration', {float_or_none(scale=1000)}), 'series': ('podcast', 'title'), })), } class ORFIPTVIE(InfoExtractor): IE_NAME = 'orf:iptv' IE_DESC = 'iptv.ORF.at' _VALID_URL = r'https?://iptv\.orf\.at/(?:#/)?stories/(?P<id>\d+)' _TEST = { 'url': 'http://iptv.orf.at/stories/2275236/', 'md5': 'c8b22af4718a4b4af58342529453e3e5', 'info_dict': { 'id': '350612', 'ext': 'flv', 'title': 'Weitere Evakuierungen um Vulkan Calbuco', 'description': 'md5:d689c959bdbcf04efeddedbf2299d633', 'duration': 68.197, 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20150425', }, } def _real_extract(self, url): story_id = self._match_id(url) webpage = self._download_webpage( f'http://iptv.orf.at/stories/{story_id}', story_id) video_id = self._search_regex( r'data-video(?:id)?="(\d+)"', webpage, 'video id') data = self._download_json( f'http://bits.orf.at/filehandler/static-api/json/current/data.json?file={video_id}', video_id)[0] duration = float_or_none(data['duration'], 1000) video = data['sources']['default'] load_balancer_url = video['loadBalancerUrl'] abr = int_or_none(video.get('audioBitrate')) vbr = int_or_none(video.get('bitrate')) fps = int_or_none(video.get('videoFps')) width = int_or_none(video.get('videoWidth')) height = int_or_none(video.get('videoHeight')) thumbnail = video.get('preview') rendition = self._download_json( load_balancer_url, video_id, transform_source=strip_jsonp) f = { 'abr': abr, 'vbr': vbr, 'fps': fps, 'width': width, 'height': height, } formats = [] for format_id, format_url in rendition['redirect'].items(): if format_id == 'rtmp': ff = f.copy() ff.update({ 'url': format_url, 'format_id': format_id, }) formats.append(ff) elif determine_ext(format_url) == 'f4m': formats.extend(self._extract_f4m_formats( format_url, video_id, f4m_id=format_id)) elif determine_ext(format_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', m3u8_id=format_id)) else: continue title = remove_end(self._og_search_title(webpage), ' - iptv.ORF.at') description = self._og_search_description(webpage) upload_date = unified_strdate(self._html_search_meta( 'dc.date', webpage, 'upload date')) return { 'id': video_id, 'title': title, 'description': description, 'duration': duration, 'thumbnail': thumbnail, 'upload_date': upload_date, 'formats': formats, } class ORFFM4StoryIE(InfoExtractor): IE_NAME = 'orf:fm4:story' IE_DESC = 'fm4.orf.at stories' _VALID_URL = r'https?://fm4\.orf\.at/stories/(?P<id>\d+)' _TEST = { 'url': 'http://fm4.orf.at/stories/2865738/', 'playlist': [{ 'md5': 'e1c2c706c45c7b34cf478bbf409907ca', 'info_dict': { 'id': '547792', 'ext': 'flv', 'title': 'Manu Delago und Inner Tongue live', 'description': 'Manu Delago und Inner Tongue haben bei der FM4 Soundpark Session live alles gegeben. Hier gibt es Fotos und die gesamte Session als Video.', 'duration': 1748.52, 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20170913', }, }, { 'md5': 'c6dd2179731f86f4f55a7b49899d515f', 'info_dict': { 'id': '547798', 'ext': 'flv', 'title': 'Manu Delago und Inner Tongue live (2)', 'duration': 1504.08, 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20170913', 'description': 'Manu Delago und Inner Tongue haben bei der FM4 Soundpark Session live alles gegeben. Hier gibt es Fotos und die gesamte Session als Video.', }, }], } def _real_extract(self, url): story_id = self._match_id(url) webpage = self._download_webpage(url, story_id) entries = [] all_ids = orderedSet(re.findall(r'data-video(?:id)?="(\d+)"', webpage)) for idx, video_id in enumerate(all_ids): data = self._download_json( f'http://bits.orf.at/filehandler/static-api/json/current/data.json?file={video_id}', video_id)[0] duration = float_or_none(data['duration'], 1000) video = data['sources']['q8c'] load_balancer_url = video['loadBalancerUrl'] abr = int_or_none(video.get('audioBitrate')) vbr = int_or_none(video.get('bitrate')) fps = int_or_none(video.get('videoFps')) width = int_or_none(video.get('videoWidth')) height = int_or_none(video.get('videoHeight')) thumbnail = video.get('preview') rendition = self._download_json( load_balancer_url, video_id, transform_source=strip_jsonp) f = { 'abr': abr, 'vbr': vbr, 'fps': fps, 'width': width, 'height': height, } formats = [] for format_id, format_url in rendition['redirect'].items(): if format_id == 'rtmp': ff = f.copy() ff.update({ 'url': format_url, 'format_id': format_id, }) formats.append(ff) elif determine_ext(format_url) == 'f4m': formats.extend(self._extract_f4m_formats( format_url, video_id, f4m_id=format_id)) elif determine_ext(format_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', m3u8_id=format_id)) else: continue title = remove_end(self._og_search_title(webpage), ' - fm4.ORF.at') if idx >= 1: # Titles are duplicates, make them unique title += ' (' + str(idx + 1) + ')' description = self._og_search_description(webpage) upload_date = unified_strdate(self._html_search_meta( 'dc.date', webpage, 'upload date')) entries.append({ 'id': video_id, 'title': title, 'description': description, 'duration': duration, 'thumbnail': thumbnail, 'upload_date': upload_date, 'formats': formats, }) return self.playlist_result(entries) class ORFONIE(InfoExtractor): IE_NAME = 'orf:on' _VALID_URL = r'https?://on\.orf\.at/video/(?P<id>\d+)(?:/(?P<segment>\d+))?' _TESTS = [{ 'url': 'https://on.orf.at/video/14210000/school-of-champions-48', 'info_dict': { 'id': '14210000', 'ext': 'mp4', 'duration': 2651.08, 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0167/98/thumb_16697671_segments_highlight_teaser.jpeg', 'title': 'School of Champions (4/8)', 'description': 'md5:d09ad279fc2e8502611e7648484b6afd', 'media_type': 'episode', 'timestamp': 1706558922, 'upload_date': '20240129', 'release_timestamp': 1706472362, 'release_date': '20240128', 'modified_timestamp': 1712756663, 'modified_date': '20240410', '_old_archive_ids': ['orftvthek 14210000'], }, }, { 'url': 'https://on.orf.at/video/3220355', 'md5': 'f94d98e667cf9a3851317efb4e136662', 'info_dict': { 'id': '3220355', 'ext': 'mp4', 'duration': 445.04, 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0002/60/thumb_159573_segments_highlight_teaser.png', 'title': '50 Jahre Burgenland: Der Festumzug', 'description': 'md5:1560bf855119544ee8c4fa5376a2a6b0', 'media_type': 'episode', 'timestamp': 52916400, 'upload_date': '19710905', 'release_timestamp': 52916400, 'release_date': '19710905', 'modified_timestamp': 1498536049, 'modified_date': '20170627', '_old_archive_ids': ['orftvthek 3220355'], }, }, { # Video with multiple segments selecting the second segment 'url': 'https://on.orf.at/video/14226549/15639808/jugendbande-einbrueche-aus-langeweile', 'md5': '90f4ebff86b4580837b8a361d0232a9e', 'info_dict': { 'id': '15639808', 'ext': 'mp4', 'duration': 97.707, 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0175/43/thumb_17442704_segments_highlight_teaser.jpg', 'title': 'Jugendbande: Einbrüche aus Langeweile', 'description': 'md5:193df0bf0d91cf16830c211078097120', 'media_type': 'segment', 'timestamp': 1715792400, 'upload_date': '20240515', 'modified_timestamp': 1715794394, 'modified_date': '20240515', '_old_archive_ids': ['orftvthek 15639808'], }, 'params': {'noplaylist': True}, }, { # Video with multiple segments and no combined version 'url': 'https://on.orf.at/video/14227864/formel-1-grosser-preis-von-monaco-2024', 'info_dict': { '_type': 'multi_video', 'id': '14227864', 'duration': 18410.52, 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0176/04/thumb_17503881_segments_highlight_teaser.jpg', 'title': 'Formel 1: Großer Preis von Monaco 2024', 'description': 'md5:aeeb010710ccf70ce28ccb4482243d4f', 'media_type': 'episode', 'timestamp': 1716721200, 'upload_date': '20240526', 'release_timestamp': 1716721802, 'release_date': '20240526', 'modified_timestamp': 1716967501, 'modified_date': '20240529', }, 'playlist_count': 42, }, { # Video with multiple segments, but with combined version 'url': 'https://on.orf.at/video/14228172', 'info_dict': { 'id': '14228172', 'ext': 'mp4', 'duration': 3294.878, 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0176/17/thumb_17516455_segments_highlight_teaser.jpg', 'title': 'Willkommen Österreich mit Stermann & Grissemann', 'description': 'md5:5de034d033a9c27f989343be3bbd4839', 'media_type': 'episode', 'timestamp': 1716926584, 'upload_date': '20240528', 'release_timestamp': 1716919202, 'release_date': '20240528', 'modified_timestamp': 1716968045, 'modified_date': '20240529', '_old_archive_ids': ['orftvthek 14228172'], }, }] @staticmethod def _parse_metadata(api_json): return traverse_obj(api_json, { 'id': ('id', {int}, {str_or_none}), 'age_limit': ('age_classification', {parse_age_limit}), 'duration': ('exact_duration', {float_or_none(scale=1000)}), 'title': (('title', 'headline'), {str}), 'description': (('description', 'teaser_text'), {str}), 'media_type': ('video_type', {str}), 'thumbnail': ('_embedded', 'image', 'public_urls', 'highlight_teaser', 'url', {url_or_none}), 'timestamp': (('date', 'episode_date'), {parse_iso8601}), 'release_timestamp': ('release_date', {parse_iso8601}), 'modified_timestamp': ('updated_at', {parse_iso8601}), }, get_all=False) def _extract_video_info(self, video_id, api_json): formats, subtitles = [], {} for manifest_type in traverse_obj(api_json, ('sources', {dict.keys}, ...)): for manifest_url in traverse_obj(api_json, ('sources', manifest_type, ..., 'src', {url_or_none})): if manifest_type == 'hls': fmts, subs = self._extract_m3u8_formats_and_subtitles( manifest_url, video_id, fatal=False, m3u8_id='hls') elif manifest_type == 'dash': fmts, subs = self._extract_mpd_formats_and_subtitles( manifest_url, video_id, fatal=False, mpd_id='dash') else: continue formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) for sub_url in traverse_obj(api_json, ( '_embedded', 'subtitle', ('xml_url', 'sami_url', 'stl_url', 'ttml_url', 'srt_url', 'vtt_url'), {url_or_none})): self._merge_subtitles({'de': [{'url': sub_url}]}, target=subtitles) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, '_old_archive_ids': [make_archive_id('ORFTVthek', video_id)], **self._parse_metadata(api_json), } def _real_extract(self, url): video_id, segment_id = self._match_valid_url(url).group('id', 'segment') encrypted_id = base64.b64encode(f'3dSlfek03nsLKdj4Jsd{video_id}'.encode()).decode() api_json = self._download_json( f'https://api-tvthek.orf.at/api/v4.3/public/episode/encrypted/{encrypted_id}', video_id) if traverse_obj(api_json, 'is_drm_protected'): self.report_drm(video_id) segments = traverse_obj(api_json, ('_embedded', 'segments', lambda _, v: v['id'])) selected_segment = traverse_obj(segments, (lambda _, v: str(v['id']) == segment_id, any)) # selected_segment will be falsy if input URL did not include a valid segment_id if selected_segment and not self._yes_playlist(video_id, segment_id, playlist_label='episode', video_label='segment'): return self._extract_video_info(segment_id, selected_segment) # Even some segmented videos have an unsegmented version available in API response root if (self._configuration_arg('prefer_segments_playlist') or not traverse_obj(api_json, ('sources', ..., ..., 'src', {url_or_none}))): return self.playlist_result( (self._extract_video_info(str(segment['id']), segment) for segment in segments), video_id, **self._parse_metadata(api_json), multi_video=True) return self._extract_video_info(video_id, api_json)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/kompas.py
yt_dlp/extractor/kompas.py
from .jixie import JixieBaseIE class KompasVideoIE(JixieBaseIE): _VALID_URL = r'https?://video\.kompas\.com/\w+/(?P<id>\d+)/(?P<slug>[\w-]+)' _TESTS = [{ 'url': 'https://video.kompas.com/watch/164474/kim-jong-un-siap-kirim-nuklir-lawan-as-dan-korsel', 'info_dict': { 'id': '164474', 'ext': 'mp4', 'title': 'Kim Jong Un Siap Kirim Nuklir Lawan AS dan Korsel', 'description': 'md5:262530c4fb7462398235f9a5dba92456', 'uploader_id': '9262bf2590d558736cac4fff7978fcb1', 'display_id': 'kim-jong-un-siap-kirim-nuklir-lawan-as-dan-korsel', 'duration': 85.066667, 'categories': ['news'], 'thumbnail': 'https://video.jixie.media/1001/164474/164474_1280x720.jpg', 'tags': 'count:9', }, }] def _real_extract(self, url): video_id, display_id = self._match_valid_url(url).group('id', 'slug') webpage = self._download_webpage(url, display_id) return self._extract_data_from_jixie_id(display_id, video_id, webpage)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/err.py
yt_dlp/extractor/err.py
from .common import InfoExtractor from ..utils import ( clean_html, int_or_none, str_or_none, url_or_none, ) from ..utils.traversal import traverse_obj class ERRJupiterIE(InfoExtractor): _VALID_URL = r'https?://(?:jupiter(?:pluss)?|lasteekraan)\.err\.ee/(?P<id>\d+)' _TESTS = [{ 'note': 'Jupiter: Movie: siin-me-oleme', 'url': 'https://jupiter.err.ee/1211107/siin-me-oleme', 'md5': '9b45d1682a98853acaa1e1b0c791f425', 'info_dict': { 'id': '1211107', 'ext': 'mp4', 'title': 'Siin me oleme!', 'alt_title': '', 'description': 'md5:1825b795f5f7584241aeb59e5bbb4f70', 'release_date': '20231226', 'upload_date': '20201217', 'modified_date': '20201217', 'release_timestamp': 1703577600, 'timestamp': 1608210000, 'modified_timestamp': 1608220800, 'release_year': 1978, }, }, { 'note': 'Jupiter: Series: Impulss', 'url': 'https://jupiter.err.ee/1609145945/impulss', 'md5': 'a378486df07ed1ba74e46cc861886243', 'info_dict': { 'id': '1609145945', 'ext': 'mp4', 'title': 'Impulss', 'alt_title': 'Loteriipilet hooldekodusse', 'description': 'md5:fa8a2ed0cdccb130211513443ee4d571', 'release_date': '20231107', 'upload_date': '20231026', 'modified_date': '20231118', 'release_timestamp': 1699380000, 'timestamp': 1698327601, 'modified_timestamp': 1700311802, 'series': 'Impulss', 'season': 'Season 1', 'season_number': 1, 'episode': 'Loteriipilet hooldekodusse', 'episode_number': 6, 'series_id': '1609108187', 'release_year': 2023, 'episode_id': '1609145945', }, }, { 'note': 'Jupiter: Radio Show: mnemoturniir episode', 'url': 'https://jupiter.err.ee/1037919/mnemoturniir', 'md5': 'f1eb95fe66f9620ff84e81bbac37076a', 'info_dict': { 'id': '1037919', 'ext': 'm4a', 'title': 'Mnemoturniir', 'alt_title': '', 'description': 'md5:626db52394e7583c26ab74d6a34d9982', 'release_date': '20240121', 'upload_date': '20240108', 'modified_date': '20240121', 'release_timestamp': 1705827900, 'timestamp': 1704675602, 'modified_timestamp': 1705827601, 'series': 'Mnemoturniir', 'season': 'Season 0', 'season_number': 0, 'episode': 'Episode 0', 'episode_number': 0, 'series_id': '1037919', 'release_year': 2024, 'episode_id': '1609215101', }, }, { 'note': 'Jupiter+: Clip: bolee-zelenyj-tallinn', 'url': 'https://jupiterpluss.err.ee/1609180445/bolee-zelenyj-tallinn', 'md5': '1b812270c4daf6ce51c06bfeaf33ed95', 'info_dict': { 'id': '1609180445', 'ext': 'mp4', 'title': 'Более зеленый Таллинн', 'alt_title': '', 'description': 'md5:fd34d9bf939c28c4a725b19a7f0d6320', 'release_date': '20231224', 'upload_date': '20231130', 'modified_date': '20231207', 'release_timestamp': 1703423400, 'timestamp': 1701338400, 'modified_timestamp': 1701967200, 'release_year': 2023, }, }, { 'note': 'Jupiter+: Series: The Sniffer', 'url': 'https://jupiterpluss.err.ee/1608311387/njuhach', 'md5': '2abdeb7131ce551bce49e8d0cea08536', 'info_dict': { 'id': '1608311387', 'ext': 'mp4', 'title': 'Нюхач', 'alt_title': '', 'description': 'md5:8c5c7d8f32ec6e54cd498c9e59ca83bc', 'release_date': '20230601', 'upload_date': '20210818', 'modified_date': '20210903', 'release_timestamp': 1685633400, 'timestamp': 1629318000, 'modified_timestamp': 1630686000, 'release_year': 2013, 'episode': 'Episode 1', 'episode_id': '1608311390', 'episode_number': 1, 'season': 'Season 1', 'season_number': 1, 'series': 'Нюхач', 'series_id': '1608311387', }, }, { 'note': 'Jupiter+: Podcast: lesnye-istorii-aisty', 'url': 'https://jupiterpluss.err.ee/1608990335/lesnye-istorii-aisty', 'md5': '8b46d7e4510b254a14b7a52211b5bf96', 'info_dict': { 'id': '1608990335', 'ext': 'm4a', 'title': 'Лесные истории | Аисты', 'alt_title': '', 'description': 'md5:065e721623e271e7a63e6540d409ca6b', 'release_date': '20230609', 'upload_date': '20230527', 'modified_date': '20230608', 'release_timestamp': 1686308700, 'timestamp': 1685145600, 'modified_timestamp': 1686252600, 'release_year': 2023, 'episode': 'Episode 0', 'episode_id': '1608990335', 'episode_number': 0, 'season': 'Season 0', 'season_number': 0, 'series': 'Лесные истории | Аисты', 'series_id': '1037497', }, }, { 'note': 'Lasteekraan: Pätu', 'url': 'https://lasteekraan.err.ee/1092243/patu', 'md5': 'a67eb9b9bcb3d201718c15d1638edf77', 'info_dict': { 'id': '1092243', 'ext': 'mp4', 'title': 'Pätu', 'alt_title': '', 'description': 'md5:64a7b5a80afd7042d3f8ec48c77befd9', 'release_date': '20230614', 'upload_date': '20200520', 'modified_date': '20200520', 'release_timestamp': 1686745800, 'timestamp': 1589975640, 'modified_timestamp': 1589975640, 'release_year': 1990, 'episode': 'Episode 1', 'episode_id': '1092243', 'episode_number': 1, 'season': 'Season 1', 'season_number': 1, 'series': 'Pätu', 'series_id': '1092236', }, }] def _real_extract(self, url): video_id = self._match_id(url) data = self._download_json( 'https://services.err.ee/api/v2/vodContent/getContentPageData', video_id, query={'contentId': video_id})['data']['mainContent'] media_data = traverse_obj(data, ('medias', ..., {dict}), get_all=False) if traverse_obj(media_data, ('restrictions', 'drm', {bool})): self.report_drm(video_id) formats, subtitles = [], {} for format_url in set(traverse_obj(media_data, ('src', ('hls', 'hls2', 'hlsNew'), {url_or_none}))): fmts, subs = self._extract_m3u8_formats_and_subtitles( format_url, video_id, 'mp4', m3u8_id='hls', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) for format_url in set(traverse_obj(media_data, ('src', ('dash', 'dashNew'), {url_or_none}))): fmts, subs = self._extract_mpd_formats_and_subtitles( format_url, video_id, mpd_id='dash', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) if format_url := traverse_obj(media_data, ('src', 'file', {url_or_none})): formats.append({ 'url': format_url, 'format_id': 'http', }) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(data, { 'title': ('heading', {str}), 'alt_title': ('subHeading', {str}), 'description': (('lead', 'body'), {clean_html}, filter), 'timestamp': ('created', {int_or_none}), 'modified_timestamp': ('updated', {int_or_none}), 'release_timestamp': (('scheduleStart', 'publicStart'), {int_or_none}), 'release_year': ('year', {int_or_none}), }, get_all=False), **(traverse_obj(data, { 'series': ('heading', {str}), 'series_id': ('rootContentId', {str_or_none}), 'episode': ('subHeading', {str}), 'season_number': ('season', {int_or_none}), 'episode_number': ('episode', {int_or_none}), 'episode_id': ('id', {str_or_none}), }) if data.get('type') == 'episode' else {}), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tele13.py
yt_dlp/extractor/tele13.py
from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import ( determine_ext, js_to_json, qualities, ) class Tele13IE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?t13\.cl/videos(?:/[^/]+)+/(?P<id>[\w-]+)' _TESTS = [ { 'url': 'http://www.t13.cl/videos/actualidad/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', 'md5': '4cb1fa38adcad8fea88487a078831755', 'info_dict': { 'id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', 'ext': 'mp4', 'title': 'El círculo de hierro de Michelle Bachelet en su regreso a La Moneda', }, 'params': { # HTTP Error 404: Not Found 'skip_download': True, }, }, { 'url': 'http://www.t13.cl/videos/mundo/tendencias/video-captan-misteriosa-bola-fuego-cielos-bangkok', 'md5': '867adf6a3b3fef932c68a71d70b70946', 'info_dict': { 'id': 'rOoKv2OMpOw', 'ext': 'mp4', 'title': 'Shooting star seen on 7-Sep-2015', 'description': 'md5:7292ff2a34b2f673da77da222ae77e1e', 'uploader': 'Porjai Jaturongkhakun', 'upload_date': '20150906', 'uploader_id': 'UCnLY_3ezwNcDSC_Wc6suZxw', }, 'add_ie': ['Youtube'], }, ] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) setup_js = self._search_regex( r"(?s)jwplayer\('player-vivo'\).setup\((\{.*?\})\)", webpage, 'setup code') sources = self._parse_json(self._search_regex( r'sources\s*:\s*(\[[^\]]+\])', setup_js, 'sources'), display_id, js_to_json) preference = qualities(['Móvil', 'SD', 'HD']) formats = [] urls = [] for f in sources: format_url = f['file'] if format_url and format_url not in urls: ext = determine_ext(format_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif YoutubeIE.suitable(format_url): return self.url_result(format_url, 'Youtube') else: formats.append({ 'url': format_url, 'format_id': f.get('label'), 'quality': preference(f.get('label')), 'ext': ext, }) urls.append(format_url) return { 'id': display_id, 'title': self._search_regex( r'title\s*:\s*"([^"]+)"', setup_js, 'title'), 'description': self._html_search_meta( 'description', webpage, 'description'), 'thumbnail': self._search_regex( r'image\s*:\s*"([^"]+)"', setup_js, 'thumbnail', default=None), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false