repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/servus.py
youtube_dl/extractor/servus.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, float_or_none, int_or_none, unified_timestamp, urlencode_postdata, url_or_none, ) class ServusIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:www\.)? (?: servus\.com/(?:(?:at|de)/p/[^/]+|tv/videos)| (?:servustv|pm-wissen)\.com/videos ) /(?P<id>[aA]{2}-\w+|\d+-\d+) ''' _TESTS = [{ # new URL schema 'url': 'https://www.servustv.com/videos/aa-1t6vbu5pw1w12/', 'md5': '60474d4c21f3eb148838f215c37f02b9', 'info_dict': { 'id': 'AA-1T6VBU5PW1W12', 'ext': 'mp4', 'title': 'Die Grünen aus Sicht des Volkes', 'alt_title': 'Talk im Hangar-7 Voxpops Gruene', 'description': 'md5:1247204d85783afe3682644398ff2ec4', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 62.442, 'timestamp': 1605193976, 'upload_date': '20201112', 'series': 'Talk im Hangar-7', 'season': 'Season 9', 'season_number': 9, 'episode': 'Episode 31 - September 14', 'episode_number': 31, } }, { # old URL schema 'url': 'https://www.servus.com/de/p/Die-Gr%C3%BCnen-aus-Sicht-des-Volkes/AA-1T6VBU5PW1W12/', 'only_matching': True, }, { 'url': 'https://www.servus.com/at/p/Wie-das-Leben-beginnt/1309984137314-381415152/', 'only_matching': True, }, { 'url': 'https://www.servus.com/tv/videos/aa-1t6vbu5pw1w12/', 'only_matching': True, }, { 'url': 'https://www.servus.com/tv/videos/1380889096408-1235196658/', 'only_matching': True, }, { 'url': 'https://www.pm-wissen.com/videos/aa-24mus4g2w2112/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url).upper() token = self._download_json( 'https://auth.redbullmediahouse.com/token', video_id, 'Downloading token', data=urlencode_postdata({ 'grant_type': 'client_credentials', }), headers={ 'Authorization': 'Basic SVgtMjJYNEhBNFdEM1cxMTpEdDRVSkFLd2ZOMG5IMjB1NGFBWTBmUFpDNlpoQ1EzNA==', }) access_token = token['access_token'] token_type = token.get('token_type', 'Bearer') video = self._download_json( 'https://sparkle-api.liiift.io/api/v1/stv/channels/international/assets/%s' % video_id, video_id, 'Downloading video JSON', headers={ 'Authorization': '%s %s' % (token_type, access_token), }) formats = [] thumbnail = None for resource in video['resources']: if not isinstance(resource, dict): continue format_url = url_or_none(resource.get('url')) if not format_url: continue extension = resource.get('extension') type_ = resource.get('type') if extension == 'jpg' or type_ == 'reference_keyframe': thumbnail = format_url continue ext = determine_ext(format_url) if type_ == 'dash' or ext == 'mpd': formats.extend(self._extract_mpd_formats( format_url, video_id, mpd_id='dash', fatal=False)) elif type_ == 'hls' or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif extension == 'mp4' or ext == 'mp4': formats.append({ 'url': format_url, 'format_id': type_, 'width': int_or_none(resource.get('width')), 'height': int_or_none(resource.get('height')), }) self._sort_formats(formats) attrs = {} for attribute in video['attributes']: if not isinstance(attribute, dict): continue key = attribute.get('fieldKey') value = attribute.get('fieldValue') if not key or not value: continue attrs[key] = value title = attrs.get('title_stv') or video_id alt_title = attrs.get('title') description = attrs.get('long_description') or attrs.get('short_description') series = attrs.get('label') season = attrs.get('season') episode = attrs.get('chapter') duration = float_or_none(attrs.get('duration'), scale=1000) season_number = int_or_none(self._search_regex( r'Season (\d+)', season or '', 'season number', default=None)) episode_number = int_or_none(self._search_regex( r'Episode (\d+)', episode or '', 'episode number', default=None)) return { 'id': video_id, 'title': title, 'alt_title': alt_title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'timestamp': unified_timestamp(video.get('lastPublished')), 'series': series, 'season': season, 'season_number': season_number, 'episode': episode, 'episode_number': episode_number, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tweakers.py
youtube_dl/extractor/tweakers.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, determine_ext, mimetype2ext, ) class TweakersIE(InfoExtractor): _VALID_URL = r'https?://tweakers\.net/video/(?P<id>\d+)' _TEST = { 'url': 'https://tweakers.net/video/9926/new-nintendo-3ds-xl-op-alle-fronten-beter.html', 'md5': 'fe73e417c093a788e0160c4025f88b15', 'info_dict': { 'id': '9926', 'ext': 'mp4', 'title': 'New Nintendo 3DS XL - Op alle fronten beter', 'description': 'md5:3789b21fed9c0219e9bcaacd43fab280', 'thumbnail': r're:^https?://.*\.jpe?g$', 'duration': 386, 'uploader_id': 's7JeEm', } } def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( 'https://tweakers.net/video/s1playlist/%s/1920/1080/playlist.json' % video_id, video_id)['items'][0] title = video_data['title'] formats = [] for location in video_data.get('locations', {}).get('progressive', []): format_id = location.get('label') width = int_or_none(location.get('width')) height = int_or_none(location.get('height')) for source in location.get('sources', []): source_url = source.get('src') if not source_url: continue ext = mimetype2ext(source.get('type')) or determine_ext(source_url) formats.append({ 'format_id': format_id, 'url': source_url, 'width': width, 'height': height, 'ext': ext, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'thumbnail': video_data.get('poster'), 'duration': int_or_none(video_data.get('duration')), 'uploader_id': video_data.get('account'), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/udemy.py
youtube_dl/extractor/udemy.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_kwargs, compat_str, compat_urllib_request, compat_urlparse, ) from ..utils import ( determine_ext, extract_attributes, ExtractorError, float_or_none, int_or_none, js_to_json, sanitized_Request, try_get, unescapeHTML, url_or_none, urlencode_postdata, ) class UdemyIE(InfoExtractor): IE_NAME = 'udemy' _VALID_URL = r'''(?x) https?:// (?:[^/]+\.)?udemy\.com/ (?: [^#]+\#/lecture/| lecture/view/?\?lectureId=| [^/]+/learn/v4/t/lecture/ ) (?P<id>\d+) ''' _LOGIN_URL = 'https://www.udemy.com/join/login-popup/?displayType=ajax&showSkipButton=1' _ORIGIN_URL = 'https://www.udemy.com' _NETRC_MACHINE = 'udemy' _TESTS = [{ 'url': 'https://www.udemy.com/java-tutorial/#/lecture/172757', 'md5': '98eda5b657e752cf945d8445e261b5c5', 'info_dict': { 'id': '160614', 'ext': 'mp4', 'title': 'Introduction and Installation', 'description': 'md5:c0d51f6f21ef4ec65f091055a5eef876', 'duration': 579.29, }, 'skip': 'Requires udemy account credentials', }, { # new URL schema 'url': 'https://www.udemy.com/electric-bass-right-from-the-start/learn/v4/t/lecture/4580906', 'only_matching': True, }, { # no url in outputs format entry 'url': 'https://www.udemy.com/learn-web-development-complete-step-by-step-guide-to-success/learn/v4/t/lecture/4125812', 'only_matching': True, }, { # only outputs rendition 'url': 'https://www.udemy.com/how-you-can-help-your-local-community-5-amazing-examples/learn/v4/t/lecture/3225750?start=0', 'only_matching': True, }, { 'url': 'https://wipro.udemy.com/java-tutorial/#/lecture/172757', 'only_matching': True, }] def _extract_course_info(self, webpage, video_id): course = self._parse_json( unescapeHTML(self._search_regex( r'ng-init=["\'].*\bcourse=({.+?})[;"\']', webpage, 'course', default='{}')), video_id, fatal=False) or {} course_id = course.get('id') or self._search_regex( [ r'data-course-id=["\'](\d+)', r'&quot;courseId&quot;\s*:\s*(\d+)' ], webpage, 'course id') return course_id, course.get('title') def _enroll_course(self, base_url, webpage, course_id): def combine_url(base_url, url): return compat_urlparse.urljoin(base_url, url) if not url.startswith('http') else url checkout_url = unescapeHTML(self._search_regex( r'href=(["\'])(?P<url>(?:https?://(?:www\.)?udemy\.com)?/(?:payment|cart)/checkout/.+?)\1', webpage, 'checkout url', group='url', default=None)) if checkout_url: raise ExtractorError( 'Course %s is not free. You have to pay for it before you can download. ' 'Use this URL to confirm purchase: %s' % (course_id, combine_url(base_url, checkout_url)), expected=True) enroll_url = unescapeHTML(self._search_regex( r'href=(["\'])(?P<url>(?:https?://(?:www\.)?udemy\.com)?/course/subscribe/.+?)\1', webpage, 'enroll url', group='url', default=None)) if enroll_url: webpage = self._download_webpage( combine_url(base_url, enroll_url), course_id, 'Enrolling in the course', headers={'Referer': base_url}) if '>You have enrolled in' in webpage: self.to_screen('%s: Successfully enrolled in the course' % course_id) def _download_lecture(self, course_id, lecture_id): return self._download_json( 'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?' % (course_id, lecture_id), lecture_id, 'Downloading lecture JSON', query={ 'fields[lecture]': 'title,description,view_html,asset', 'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,stream_urls,captions,data', }) def _handle_error(self, response): if not isinstance(response, dict): return error = response.get('error') if error: error_str = 'Udemy returned error #%s: %s' % (error.get('code'), error.get('message')) error_data = error.get('data') if error_data: error_str += ' - %s' % error_data.get('formErrors') raise ExtractorError(error_str, expected=True) def _download_webpage_handle(self, *args, **kwargs): headers = kwargs.get('headers', {}).copy() headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36' kwargs['headers'] = headers ret = super(UdemyIE, self)._download_webpage_handle( *args, **compat_kwargs(kwargs)) if not ret: return ret webpage, _ = ret if any(p in webpage for p in ( '>Please verify you are a human', 'Access to this page has been denied because we believe you are using automation tools to browse the website', '"_pxCaptcha"')): raise ExtractorError( 'Udemy asks you to solve a CAPTCHA. Login with browser, ' 'solve CAPTCHA, then export cookies and pass cookie file to ' 'youtube-dl with --cookies.', expected=True) return ret def _download_json(self, url_or_request, *args, **kwargs): headers = { 'X-Udemy-Snail-Case': 'true', 'X-Requested-With': 'XMLHttpRequest', } for cookie in self._downloader.cookiejar: if cookie.name == 'client_id': headers['X-Udemy-Client-Id'] = cookie.value elif cookie.name == 'access_token': headers['X-Udemy-Bearer-Token'] = cookie.value headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value if isinstance(url_or_request, compat_urllib_request.Request): for header, value in headers.items(): url_or_request.add_header(header, value) else: url_or_request = sanitized_Request(url_or_request, headers=headers) response = super(UdemyIE, self)._download_json(url_or_request, *args, **kwargs) self._handle_error(response) return response def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return login_popup = self._download_webpage( self._LOGIN_URL, None, 'Downloading login popup') def is_logged(webpage): return any(re.search(p, webpage) for p in ( r'href=["\'](?:https://www\.udemy\.com)?/user/logout/', r'>Logout<')) # already logged in if is_logged(login_popup): return login_form = self._form_hidden_inputs('login-form', login_popup) login_form.update({ 'email': username, 'password': password, }) response = self._download_webpage( self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(login_form), headers={ 'Referer': self._ORIGIN_URL, 'Origin': self._ORIGIN_URL, }) if not is_logged(response): error = self._html_search_regex( r'(?s)<div[^>]+class="form-errors[^"]*">(.+?)</div>', response, 'error message', default=None) if error: raise ExtractorError('Unable to login: %s' % error, expected=True) raise ExtractorError('Unable to log in') def _real_extract(self, url): lecture_id = self._match_id(url) webpage = self._download_webpage(url, lecture_id) course_id, _ = self._extract_course_info(webpage, lecture_id) try: lecture = self._download_lecture(course_id, lecture_id) except ExtractorError as e: # Error could possibly mean we are not enrolled in the course if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: self._enroll_course(url, webpage, course_id) lecture = self._download_lecture(course_id, lecture_id) else: raise title = lecture['title'] description = lecture.get('description') asset = lecture['asset'] asset_type = asset.get('asset_type') or asset.get('assetType') if asset_type != 'Video': raise ExtractorError( 'Lecture %s is not a video' % lecture_id, expected=True) stream_url = asset.get('stream_url') or asset.get('streamUrl') if stream_url: youtube_url = self._search_regex( r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url, 'youtube URL', default=None) if youtube_url: return self.url_result(youtube_url, 'Youtube') video_id = compat_str(asset['id']) thumbnail = asset.get('thumbnail_url') or asset.get('thumbnailUrl') duration = float_or_none(asset.get('data', {}).get('duration')) subtitles = {} automatic_captions = {} formats = [] def extract_output_format(src, f_id): return { 'url': src.get('url'), 'format_id': '%sp' % (src.get('height') or f_id), 'width': int_or_none(src.get('width')), 'height': int_or_none(src.get('height')), 'vbr': int_or_none(src.get('video_bitrate_in_kbps')), 'vcodec': src.get('video_codec'), 'fps': int_or_none(src.get('frame_rate')), 'abr': int_or_none(src.get('audio_bitrate_in_kbps')), 'acodec': src.get('audio_codec'), 'asr': int_or_none(src.get('audio_sample_rate')), 'tbr': int_or_none(src.get('total_bitrate_in_kbps')), 'filesize': int_or_none(src.get('file_size_in_bytes')), } outputs = asset.get('data', {}).get('outputs') if not isinstance(outputs, dict): outputs = {} def add_output_format_meta(f, key): output = outputs.get(key) if isinstance(output, dict): output_format = extract_output_format(output, key) output_format.update(f) return output_format return f def extract_formats(source_list): if not isinstance(source_list, list): return for source in source_list: video_url = url_or_none(source.get('file') or source.get('src')) if not video_url: continue if source.get('type') == 'application/x-mpegURL' or determine_ext(video_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) continue format_id = source.get('label') f = { 'url': video_url, 'format_id': '%sp' % format_id, 'height': int_or_none(format_id), } if format_id: # Some videos contain additional metadata (e.g. # https://www.udemy.com/ios9-swift/learn/#/lecture/3383208) f = add_output_format_meta(f, format_id) formats.append(f) def extract_subtitles(track_list): if not isinstance(track_list, list): return for track in track_list: if not isinstance(track, dict): continue if track.get('kind') != 'captions': continue src = url_or_none(track.get('src')) if not src: continue lang = track.get('language') or track.get( 'srclang') or track.get('label') sub_dict = automatic_captions if track.get( 'autogenerated') is True else subtitles sub_dict.setdefault(lang, []).append({ 'url': src, }) for url_kind in ('download', 'stream'): urls = asset.get('%s_urls' % url_kind) if isinstance(urls, dict): extract_formats(urls.get('Video')) captions = asset.get('captions') if isinstance(captions, list): for cc in captions: if not isinstance(cc, dict): continue cc_url = url_or_none(cc.get('url')) if not cc_url: continue lang = try_get(cc, lambda x: x['locale']['locale'], compat_str) sub_dict = (automatic_captions if cc.get('source') == 'auto' else subtitles) sub_dict.setdefault(lang or 'en', []).append({ 'url': cc_url, }) view_html = lecture.get('view_html') if view_html: view_html_urls = set() for source in re.findall(r'<source[^>]+>', view_html): attributes = extract_attributes(source) src = attributes.get('src') if not src: continue res = attributes.get('data-res') height = int_or_none(res) if src in view_html_urls: continue view_html_urls.add(src) if attributes.get('type') == 'application/x-mpegURL' or determine_ext(src) == 'm3u8': m3u8_formats = self._extract_m3u8_formats( src, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) for f in m3u8_formats: m = re.search(r'/hls_(?P<height>\d{3,4})_(?P<tbr>\d{2,})/', f['url']) if m: if not f.get('height'): f['height'] = int(m.group('height')) if not f.get('tbr'): f['tbr'] = int(m.group('tbr')) formats.extend(m3u8_formats) else: formats.append(add_output_format_meta({ 'url': src, 'format_id': '%dp' % height if height else None, 'height': height, }, res)) # react rendition since 2017.04.15 (see # https://github.com/ytdl-org/youtube-dl/issues/12744) data = self._parse_json( self._search_regex( r'videojs-setup-data=(["\'])(?P<data>{.+?})\1', view_html, 'setup data', default='{}', group='data'), video_id, transform_source=unescapeHTML, fatal=False) if data and isinstance(data, dict): extract_formats(data.get('sources')) if not duration: duration = int_or_none(data.get('duration')) extract_subtitles(data.get('tracks')) if not subtitles and not automatic_captions: text_tracks = self._parse_json( self._search_regex( r'text-tracks=(["\'])(?P<data>\[.+?\])\1', view_html, 'text tracks', default='{}', group='data'), video_id, transform_source=lambda s: js_to_json(unescapeHTML(s)), fatal=False) extract_subtitles(text_tracks) if not formats and outputs: for format_id, output in outputs.items(): f = extract_output_format(output, format_id) if f.get('url'): formats.append(f) self._sort_formats(formats, field_preference=('height', 'width', 'tbr', 'format_id')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, 'subtitles': subtitles, 'automatic_captions': automatic_captions, } class UdemyCourseIE(UdemyIE): IE_NAME = 'udemy:course' _VALID_URL = r'https?://(?:[^/]+\.)?udemy\.com/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.udemy.com/java-tutorial/', 'only_matching': True, }, { 'url': 'https://wipro.udemy.com/java-tutorial/', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if UdemyIE.suitable(url) else super(UdemyCourseIE, cls).suitable(url) def _real_extract(self, url): course_path = self._match_id(url) webpage = self._download_webpage(url, course_path) course_id, title = self._extract_course_info(webpage, course_path) self._enroll_course(url, webpage, course_id) response = self._download_json( 'https://www.udemy.com/api-2.0/courses/%s/cached-subscriber-curriculum-items' % course_id, course_id, 'Downloading course curriculum', query={ 'fields[chapter]': 'title,object_index', 'fields[lecture]': 'title,asset', 'page_size': '1000', }) entries = [] chapter, chapter_number = [None] * 2 for entry in response['results']: clazz = entry.get('_class') if clazz == 'lecture': asset = entry.get('asset') if isinstance(asset, dict): asset_type = asset.get('asset_type') or asset.get('assetType') if asset_type != 'Video': continue lecture_id = entry.get('id') if lecture_id: entry = { '_type': 'url_transparent', 'url': 'https://www.udemy.com/%s/learn/v4/t/lecture/%s' % (course_path, entry['id']), 'title': entry.get('title'), 'ie_key': UdemyIE.ie_key(), } if chapter_number: entry['chapter_number'] = chapter_number if chapter: entry['chapter'] = chapter entries.append(entry) elif clazz == 'chapter': chapter_number = entry.get('object_index') chapter = entry.get('title') return self.playlist_result(entries, course_id, title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/minds.py
youtube_dl/extractor/minds.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( clean_html, int_or_none, str_or_none, strip_or_none, ) class MindsBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:www\.)?minds\.com/' def _call_api(self, path, video_id, resource, query=None): api_url = 'https://www.minds.com/api/' + path token = self._get_cookies(api_url).get('XSRF-TOKEN') return self._download_json( api_url, video_id, 'Downloading %s JSON metadata' % resource, headers={ 'Referer': 'https://www.minds.com/', 'X-XSRF-TOKEN': token.value if token else '', }, query=query) class MindsIE(MindsBaseIE): IE_NAME = 'minds' _VALID_URL = MindsBaseIE._VALID_URL_BASE + r'(?:media|newsfeed|archive/view)/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.minds.com/media/100000000000086822', 'md5': '215a658184a419764852239d4970b045', 'info_dict': { 'id': '100000000000086822', 'ext': 'mp4', 'title': 'Minds intro sequence', 'thumbnail': r're:https?://.+\.png', 'uploader_id': 'ottman', 'upload_date': '20130524', 'timestamp': 1369404826, 'uploader': 'Bill Ottman', 'view_count': int, 'like_count': int, 'dislike_count': int, 'tags': ['animation'], 'comment_count': int, 'license': 'attribution-cc', }, }, { # entity.type == 'activity' and empty title 'url': 'https://www.minds.com/newsfeed/798025111988506624', 'md5': 'b2733a74af78d7fd3f541c4cbbaa5950', 'info_dict': { 'id': '798022190320226304', 'ext': 'mp4', 'title': '798022190320226304', 'uploader': 'ColinFlaherty', 'upload_date': '20180111', 'timestamp': 1515639316, 'uploader_id': 'ColinFlaherty', }, }, { 'url': 'https://www.minds.com/archive/view/715172106794442752', 'only_matching': True, }, { # youtube perma_url 'url': 'https://www.minds.com/newsfeed/1197131838022602752', 'only_matching': True, }] def _real_extract(self, url): entity_id = self._match_id(url) entity = self._call_api( 'v1/entities/entity/' + entity_id, entity_id, 'entity')['entity'] if entity.get('type') == 'activity': if entity.get('custom_type') == 'video': video_id = entity['entity_guid'] else: return self.url_result(entity['perma_url']) else: assert (entity['subtype'] == 'video') video_id = entity_id # 1080p and webm formats available only on the sources array video = self._call_api( 'v2/media/video/' + video_id, video_id, 'video') formats = [] for source in (video.get('sources') or []): src = source.get('src') if not src: continue formats.append({ 'format_id': source.get('label'), 'height': int_or_none(source.get('size')), 'url': src, }) self._sort_formats(formats) entity = video.get('entity') or entity owner = entity.get('ownerObj') or {} uploader_id = owner.get('username') tags = entity.get('tags') if tags and isinstance(tags, compat_str): tags = [tags] thumbnail = None poster = video.get('poster') or entity.get('thumbnail_src') if poster: urlh = self._request_webpage(poster, video_id, fatal=False) if urlh: thumbnail = urlh.geturl() return { 'id': video_id, 'title': entity.get('title') or video_id, 'formats': formats, 'description': clean_html(entity.get('description')) or None, 'license': str_or_none(entity.get('license')), 'timestamp': int_or_none(entity.get('time_created')), 'uploader': strip_or_none(owner.get('name')), 'uploader_id': uploader_id, 'uploader_url': 'https://www.minds.com/' + uploader_id if uploader_id else None, 'view_count': int_or_none(entity.get('play:count')), 'like_count': int_or_none(entity.get('thumbs:up:count')), 'dislike_count': int_or_none(entity.get('thumbs:down:count')), 'tags': tags, 'comment_count': int_or_none(entity.get('comments:count')), 'thumbnail': thumbnail, } class MindsFeedBaseIE(MindsBaseIE): _PAGE_SIZE = 150 def _entries(self, feed_id): query = {'limit': self._PAGE_SIZE, 'sync': 1} i = 1 while True: data = self._call_api( 'v2/feeds/container/%s/videos' % feed_id, feed_id, 'page %s' % i, query) entities = data.get('entities') or [] for entity in entities: guid = entity.get('guid') if not guid: continue yield self.url_result( 'https://www.minds.com/newsfeed/' + guid, MindsIE.ie_key(), guid) query['from_timestamp'] = data['load-next'] if not (query['from_timestamp'] and len(entities) == self._PAGE_SIZE): break i += 1 def _real_extract(self, url): feed_id = self._match_id(url) feed = self._call_api( 'v1/%s/%s' % (self._FEED_PATH, feed_id), feed_id, self._FEED_TYPE)[self._FEED_TYPE] return self.playlist_result( self._entries(feed['guid']), feed_id, strip_or_none(feed.get('name')), feed.get('briefdescription')) class MindsChannelIE(MindsFeedBaseIE): _FEED_TYPE = 'channel' IE_NAME = 'minds:' + _FEED_TYPE _VALID_URL = MindsBaseIE._VALID_URL_BASE + r'(?!(?:newsfeed|media|api|archive|groups)/)(?P<id>[^/?&#]+)' _FEED_PATH = 'channel' _TEST = { 'url': 'https://www.minds.com/ottman', 'info_dict': { 'id': 'ottman', 'title': 'Bill Ottman', 'description': 'Co-creator & CEO @minds', }, 'playlist_mincount': 54, } class MindsGroupIE(MindsFeedBaseIE): _FEED_TYPE = 'group' IE_NAME = 'minds:' + _FEED_TYPE _VALID_URL = MindsBaseIE._VALID_URL_BASE + r'groups/profile/(?P<id>[0-9]+)' _FEED_PATH = 'groups/group' _TEST = { 'url': 'https://www.minds.com/groups/profile/785582576369672204/feed/videos', 'info_dict': { 'id': '785582576369672204', 'title': 'Cooking Videos', }, 'playlist_mincount': 1, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/sztvhu.py
youtube_dl/extractor/sztvhu.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class SztvHuIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)' _TEST = { 'url': 'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909', 'md5': 'a6df607b11fb07d0e9f2ad94613375cb', 'info_dict': { 'id': '20130909', 'ext': 'mp4', 'title': 'Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren', 'description': 'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_file = self._search_regex( r'file: "...:(.*?)",', webpage, 'video file') title = self._html_search_regex( r'<meta name="title" content="([^"]*?) - [^-]*? - [^-]*?"', webpage, 'video title') description = self._html_search_regex( r'<meta name="description" content="([^"]*)"/>', webpage, 'video description', fatal=False) thumbnail = self._og_search_thumbnail(webpage) video_url = 'http://media.sztv.hu/vod/' + video_file return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/gameinformer.py
youtube_dl/extractor/gameinformer.py
# coding: utf-8 from __future__ import unicode_literals from .brightcove import BrightcoveNewIE from .common import InfoExtractor from ..utils import ( clean_html, get_element_by_class, get_element_by_id, ) class GameInformerIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?gameinformer\.com/(?:[^/]+/)*(?P<id>[^.?&#]+)' _TESTS = [{ # normal Brightcove embed code extracted with BrightcoveNewIE._extract_url 'url': 'http://www.gameinformer.com/b/features/archive/2015/09/26/replay-animal-crossing.aspx', 'md5': '292f26da1ab4beb4c9099f1304d2b071', 'info_dict': { 'id': '4515472681001', 'ext': 'mp4', 'title': 'Replay - Animal Crossing', 'description': 'md5:2e211891b215c85d061adc7a4dd2d930', 'timestamp': 1443457610, 'upload_date': '20150928', 'uploader_id': '694940074001', }, }, { # Brightcove id inside unique element with field--name-field-brightcove-video-id class 'url': 'https://www.gameinformer.com/video-feature/new-gameplay-today/2019/07/09/new-gameplay-today-streets-of-rogue', 'info_dict': { 'id': '6057111913001', 'ext': 'mp4', 'title': 'New Gameplay Today – Streets Of Rogue', 'timestamp': 1562699001, 'upload_date': '20190709', 'uploader_id': '694940074001', }, }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/694940074001/default_default/index.html?videoId=%s' def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage( url, display_id, headers=self.geo_verification_headers()) brightcove_id = clean_html(get_element_by_class('field--name-field-brightcove-video-id', webpage) or get_element_by_id('video-source-content', webpage)) brightcove_url = self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id if brightcove_id else BrightcoveNewIE._extract_url(self, webpage) return self.url_result(brightcove_url, 'BrightcoveNew', brightcove_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vodpl.py
youtube_dl/extractor/vodpl.py
# coding: utf-8 from __future__ import unicode_literals from .onet import OnetBaseIE class VODPlIE(OnetBaseIE): _VALID_URL = r'https?://vod\.pl/(?:[^/]+/)+(?P<id>[0-9a-zA-Z]+)' _TESTS = [{ 'url': 'https://vod.pl/filmy/chlopaki-nie-placza/3ep3jns', 'md5': 'a7dc3b2f7faa2421aefb0ecaabf7ec74', 'info_dict': { 'id': '3ep3jns', 'ext': 'mp4', 'title': 'Chłopaki nie płaczą', 'description': 'md5:f5f03b84712e55f5ac9f0a3f94445224', 'timestamp': 1463415154, 'duration': 5765, 'upload_date': '20160516', }, }, { 'url': 'https://vod.pl/seriale/belfer-na-planie-praca-kamery-online/2c10heh', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) info_dict = self._extract_from_id(self._search_mvp_id(webpage), webpage) info_dict['id'] = video_id return info_dict
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/aliexpress.py
youtube_dl/extractor/aliexpress.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( float_or_none, try_get, ) class AliExpressLiveIE(InfoExtractor): _VALID_URL = r'https?://live\.aliexpress\.com/live/(?P<id>\d+)' _TEST = { 'url': 'https://live.aliexpress.com/live/2800002704436634', 'md5': 'e729e25d47c5e557f2630eaf99b740a5', 'info_dict': { 'id': '2800002704436634', 'ext': 'mp4', 'title': 'CASIMA7.22', 'thumbnail': r're:https?://.*\.jpg', 'uploader': 'CASIMA Official Store', 'timestamp': 1500717600, 'upload_date': '20170722', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data = self._parse_json( self._search_regex( r'(?s)runParams\s*=\s*({.+?})\s*;?\s*var', webpage, 'runParams'), video_id) title = data['title'] formats = self._extract_m3u8_formats( data['replyStreamUrl'], video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') return { 'id': video_id, 'title': title, 'thumbnail': data.get('coverUrl'), 'uploader': try_get( data, lambda x: x['followBar']['name'], compat_str), 'timestamp': float_or_none(data.get('startTimeLong'), scale=1000), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/hearthisat.py
youtube_dl/extractor/hearthisat.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( HEADRequest, KNOWN_EXTENSIONS, sanitized_Request, str_to_int, urlencode_postdata, urlhandle_detect_ext, ) class HearThisAtIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?hearthis\.at/(?P<artist>[^/]+)/(?P<title>[A-Za-z0-9\-]+)/?$' _PLAYLIST_URL = 'https://hearthis.at/playlist.php' _TESTS = [{ 'url': 'https://hearthis.at/moofi/dr-kreep', 'md5': 'ab6ec33c8fed6556029337c7885eb4e0', 'info_dict': { 'id': '150939', 'ext': 'wav', 'title': 'Moofi - Dr. Kreep', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1421564134, 'description': 'Listen to Dr. Kreep by Moofi on hearthis.at - Modular, Eurorack, Mutable Intruments Braids, Valhalla-DSP', 'upload_date': '20150118', 'comment_count': int, 'view_count': int, 'like_count': int, 'duration': 71, 'categories': ['Experimental'], } }, { # 'download' link redirects to the original webpage 'url': 'https://hearthis.at/twitchsf/dj-jim-hopkins-totally-bitchin-80s-dance-mix/', 'md5': '5980ceb7c461605d30f1f039df160c6e', 'info_dict': { 'id': '811296', 'ext': 'mp3', 'title': 'TwitchSF - DJ Jim Hopkins - Totally Bitchin\' 80\'s Dance Mix!', 'description': 'Listen to DJ Jim Hopkins - Totally Bitchin\' 80\'s Dance Mix! by TwitchSF on hearthis.at - Dance', 'upload_date': '20160328', 'timestamp': 1459186146, 'thumbnail': r're:^https?://.*\.jpg$', 'comment_count': int, 'view_count': int, 'like_count': int, 'duration': 4360, 'categories': ['Dance'], }, }] def _real_extract(self, url): m = re.match(self._VALID_URL, url) display_id = '{artist:s} - {title:s}'.format(**m.groupdict()) webpage = self._download_webpage(url, display_id) track_id = self._search_regex( r'intTrackId\s*=\s*(\d+)', webpage, 'track ID') payload = urlencode_postdata({'tracks[]': track_id}) req = sanitized_Request(self._PLAYLIST_URL, payload) req.add_header('Content-type', 'application/x-www-form-urlencoded') track = self._download_json(req, track_id, 'Downloading playlist')[0] title = '{artist:s} - {title:s}'.format(**track) categories = None if track.get('category'): categories = [track['category']] description = self._og_search_description(webpage) thumbnail = self._og_search_thumbnail(webpage) meta_span = r'<span[^>]+class="%s".*?</i>([^<]+)</span>' view_count = str_to_int(self._search_regex( meta_span % 'plays_count', webpage, 'view count', fatal=False)) like_count = str_to_int(self._search_regex( meta_span % 'likes_count', webpage, 'like count', fatal=False)) comment_count = str_to_int(self._search_regex( meta_span % 'comment_count', webpage, 'comment count', fatal=False)) duration = str_to_int(self._search_regex( r'data-length="(\d+)', webpage, 'duration', fatal=False)) timestamp = str_to_int(self._search_regex( r'<span[^>]+class="calctime"[^>]+data-time="(\d+)', webpage, 'timestamp', fatal=False)) formats = [] mp3_url = self._search_regex( r'(?s)<a class="player-link"\s+(?:[a-zA-Z0-9_:-]+="[^"]+"\s+)*?data-mp3="([^"]+)"', webpage, 'mp3 URL', fatal=False) if mp3_url: formats.append({ 'format_id': 'mp3', 'vcodec': 'none', 'acodec': 'mp3', 'url': mp3_url, }) download_path = self._search_regex( r'<a class="[^"]*download_fct[^"]*"\s+href="([^"]+)"', webpage, 'download URL', default=None) if download_path: download_url = compat_urlparse.urljoin(url, download_path) ext_req = HEADRequest(download_url) ext_handle = self._request_webpage( ext_req, display_id, note='Determining extension') ext = urlhandle_detect_ext(ext_handle) if ext in KNOWN_EXTENSIONS: formats.append({ 'format_id': 'download', 'vcodec': 'none', 'ext': ext, 'url': download_url, 'preference': 2, # Usually better quality }) self._sort_formats(formats) return { 'id': track_id, 'display_id': display_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'description': description, 'duration': duration, 'timestamp': timestamp, 'view_count': view_count, 'comment_count': comment_count, 'like_count': like_count, 'categories': categories, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/heise.py
youtube_dl/extractor/heise.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from .kaltura import KalturaIE from .youtube import YoutubeIE from ..utils import ( determine_ext, int_or_none, NO_DEFAULT, parse_iso8601, smuggle_url, xpath_text, ) class HeiseIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?heise\.de/(?:[^/]+/)+[^/]+-(?P<id>[0-9]+)\.html' _TESTS = [{ # kaltura embed 'url': 'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html', 'info_dict': { 'id': '1_kkrq94sm', 'ext': 'mp4', 'title': "Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone", 'timestamp': 1512734959, 'upload_date': '20171208', 'description': 'md5:c934cbfb326c669c2bcabcbe3d3fcd20', }, 'params': { 'skip_download': True, }, }, { # YouTube embed 'url': 'http://www.heise.de/newsticker/meldung/Netflix-In-20-Jahren-vom-Videoverleih-zum-TV-Revolutionaer-3814130.html', 'md5': 'e403d2b43fea8e405e88e3f8623909f1', 'info_dict': { 'id': '6kmWbXleKW4', 'ext': 'mp4', 'title': 'NEU IM SEPTEMBER | Netflix', 'description': 'md5:2131f3c7525e540d5fd841de938bd452', 'upload_date': '20170830', 'uploader': 'Netflix Deutschland, Österreich und Schweiz', 'uploader_id': 'netflixdach', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.heise.de/video/artikel/nachgehakt-Wie-sichert-das-c-t-Tool-Restric-tor-Windows-10-ab-3700244.html', 'info_dict': { 'id': '1_ntrmio2s', 'ext': 'mp4', 'title': "nachgehakt: Wie sichert das c't-Tool Restric'tor Windows 10 ab?", 'description': 'md5:47e8ffb6c46d85c92c310a512d6db271', 'timestamp': 1512470717, 'upload_date': '20171205', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.heise.de/ct/artikel/c-t-uplink-20-8-Staubsaugerroboter-Xiaomi-Vacuum-2-AR-Brille-Meta-2-und-Android-rooten-3959893.html', 'info_dict': { 'id': '1_59mk80sf', 'ext': 'mp4', 'title': "c't uplink 20.8: Staubsaugerroboter Xiaomi Vacuum 2, AR-Brille Meta 2 und Android rooten", 'description': 'md5:f50fe044d3371ec73a8f79fcebd74afc', 'timestamp': 1517567237, 'upload_date': '20180202', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.heise.de/ct/artikel/c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2403911.html', 'only_matching': True, }, { 'url': 'http://www.heise.de/newsticker/meldung/c-t-uplink-Owncloud-Tastaturen-Peilsender-Smartphone-2404251.html?wt_mc=rss.ho.beitrag.atom', 'only_matching': True, }, { 'url': 'http://www.heise.de/ct/ausgabe/2016-12-Spiele-3214137.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) def extract_title(default=NO_DEFAULT): title = self._html_search_meta( ('fulltitle', 'title'), webpage, default=None) if not title or title == "c't": title = self._search_regex( r'<div[^>]+class="videoplayerjw"[^>]+data-title="([^"]+)"', webpage, 'title', default=None) if not title: title = self._html_search_regex( r'<h1[^>]+\bclass=["\']article_page_title[^>]+>(.+?)<', webpage, 'title', default=default) return title title = extract_title(default=None) description = self._og_search_description( webpage, default=None) or self._html_search_meta( 'description', webpage) def _make_kaltura_result(kaltura_url): return { '_type': 'url_transparent', 'url': smuggle_url(kaltura_url, {'source_url': url}), 'ie_key': KalturaIE.ie_key(), 'title': title, 'description': description, } kaltura_url = KalturaIE._extract_url(webpage) if kaltura_url: return _make_kaltura_result(kaltura_url) kaltura_id = self._search_regex( r'entry-id=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'kaltura id', default=None, group='id') if kaltura_id: return _make_kaltura_result('kaltura:2238431:%s' % kaltura_id) yt_urls = YoutubeIE._extract_urls(webpage) if yt_urls: return self.playlist_from_matches( yt_urls, video_id, title, ie=YoutubeIE.ie_key()) title = extract_title() container_id = self._search_regex( r'<div class="videoplayerjw"[^>]+data-container="([0-9]+)"', webpage, 'container ID') sequenz_id = self._search_regex( r'<div class="videoplayerjw"[^>]+data-sequenz="([0-9]+)"', webpage, 'sequenz ID') doc = self._download_xml( 'http://www.heise.de/videout/feed', video_id, query={ 'container': container_id, 'sequenz': sequenz_id, }) formats = [] for source_node in doc.findall('.//{http://rss.jwpcdn.com/}source'): label = source_node.attrib['label'] height = int_or_none(self._search_regex( r'^(.*?_)?([0-9]+)p$', label, 'height', default=None)) video_url = source_node.attrib['file'] ext = determine_ext(video_url, '') formats.append({ 'url': video_url, 'format_note': label, 'format_id': '%s_%s' % (ext, label), 'height': height, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': (xpath_text(doc, './/{http://rss.jwpcdn.com/}image') or self._og_search_thumbnail(webpage)), 'timestamp': parse_iso8601( self._html_search_meta('date', webpage)), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/discoverygo.py
youtube_dl/extractor/discoverygo.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, extract_attributes, ExtractorError, int_or_none, parse_age_limit, remove_end, unescapeHTML, url_or_none, ) class DiscoveryGoBaseIE(InfoExtractor): _VALID_URL_TEMPLATE = r'''(?x)https?://(?:www\.)?(?: discovery| investigationdiscovery| discoverylife| animalplanet| ahctv| destinationamerica| sciencechannel| tlc| velocitychannel )go\.com/%s(?P<id>[^/?#&]+)''' def _extract_video_info(self, video, stream, display_id): title = video['name'] if not stream: if video.get('authenticated') is True: raise ExtractorError( 'This video is only available via cable service provider subscription that' ' is not currently supported. You may want to use --cookies.', expected=True) else: raise ExtractorError('Unable to find stream') STREAM_URL_SUFFIX = 'streamUrl' formats = [] for stream_kind in ('', 'hds'): suffix = STREAM_URL_SUFFIX.capitalize() if stream_kind else STREAM_URL_SUFFIX stream_url = stream.get('%s%s' % (stream_kind, suffix)) if not stream_url: continue if stream_kind == '': formats.extend(self._extract_m3u8_formats( stream_url, display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif stream_kind == 'hds': formats.extend(self._extract_f4m_formats( stream_url, display_id, f4m_id=stream_kind, fatal=False)) self._sort_formats(formats) video_id = video.get('id') or display_id description = video.get('description', {}).get('detailed') duration = int_or_none(video.get('duration')) series = video.get('show', {}).get('name') season_number = int_or_none(video.get('season', {}).get('number')) episode_number = int_or_none(video.get('episodeNumber')) tags = video.get('tags') age_limit = parse_age_limit(video.get('parental', {}).get('rating')) subtitles = {} captions = stream.get('captions') if isinstance(captions, list): for caption in captions: subtitle_url = url_or_none(caption.get('fileUrl')) if not subtitle_url or not subtitle_url.startswith('http'): continue lang = caption.get('fileLang', 'en') ext = determine_ext(subtitle_url) subtitles.setdefault(lang, []).append({ 'url': subtitle_url, 'ext': 'ttml' if ext == 'xml' else ext, }) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'duration': duration, 'series': series, 'season_number': season_number, 'episode_number': episode_number, 'tags': tags, 'age_limit': age_limit, 'formats': formats, 'subtitles': subtitles, } class DiscoveryGoIE(DiscoveryGoBaseIE): _VALID_URL = DiscoveryGoBaseIE._VALID_URL_TEMPLATE % r'(?:[^/]+/)+' _GEO_COUNTRIES = ['US'] _TEST = { 'url': 'https://www.discoverygo.com/bering-sea-gold/reaper-madness/', 'info_dict': { 'id': '58c167d86b66d12f2addeb01', 'ext': 'mp4', 'title': 'Reaper Madness', 'description': 'md5:09f2c625c99afb8946ed4fb7865f6e78', 'duration': 2519, 'series': 'Bering Sea Gold', 'season_number': 8, 'episode_number': 6, 'age_limit': 14, }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) container = extract_attributes( self._search_regex( r'(<div[^>]+class=["\']video-player-container[^>]+>)', webpage, 'video container')) video = self._parse_json( container.get('data-video') or container.get('data-json'), display_id) stream = video.get('stream') return self._extract_video_info(video, stream, display_id) class DiscoveryGoPlaylistIE(DiscoveryGoBaseIE): _VALID_URL = DiscoveryGoBaseIE._VALID_URL_TEMPLATE % '' _TEST = { 'url': 'https://www.discoverygo.com/bering-sea-gold/', 'info_dict': { 'id': 'bering-sea-gold', 'title': 'Bering Sea Gold', 'description': 'md5:cc5c6489835949043c0cc3ad66c2fa0e', }, 'playlist_mincount': 6, } @classmethod def suitable(cls, url): return False if DiscoveryGoIE.suitable(url) else super( DiscoveryGoPlaylistIE, cls).suitable(url) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) entries = [] for mobj in re.finditer(r'data-json=(["\'])(?P<json>{.+?})\1', webpage): data = self._parse_json( mobj.group('json'), display_id, transform_source=unescapeHTML, fatal=False) if not isinstance(data, dict) or data.get('type') != 'episode': continue episode_url = data.get('socialUrl') if not episode_url: continue entries.append(self.url_result( episode_url, ie=DiscoveryGoIE.ie_key(), video_id=data.get('id'))) return self.playlist_result( entries, display_id, remove_end(self._og_search_title( webpage, fatal=False), ' | Discovery GO'), self._og_search_description(webpage))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/fusion.py
youtube_dl/extractor/fusion.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, mimetype2ext, parse_iso8601, ) class FusionIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?fusion\.(?:net|tv)/(?:video/|show/.+?\bvideo=)(?P<id>\d+)' _TESTS = [{ 'url': 'http://fusion.tv/video/201781/u-s-and-panamanian-forces-work-together-to-stop-a-vessel-smuggling-drugs/', 'info_dict': { 'id': '3145868', 'ext': 'mp4', 'title': 'U.S. and Panamanian forces work together to stop a vessel smuggling drugs', 'description': 'md5:0cc84a9943c064c0f46b128b41b1b0d7', 'duration': 140.0, 'timestamp': 1442589635, 'uploader': 'UNIVISON', 'upload_date': '20150918', }, 'params': { 'skip_download': True, }, 'add_ie': ['Anvato'], }, { 'url': 'http://fusion.tv/video/201781', 'only_matching': True, }, { 'url': 'https://fusion.tv/show/food-exposed-with-nelufar-hedayat/?ancla=full-episodes&video=588644', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'https://platform.fusion.net/wp-json/fusiondotnet/v1/video/' + video_id, video_id) info = { 'id': video_id, 'title': video['title'], 'description': video.get('excerpt'), 'timestamp': parse_iso8601(video.get('published')), 'series': video.get('show'), } formats = [] src = video.get('src') or {} for f_id, f in src.items(): for q_id, q in f.items(): q_url = q.get('url') if not q_url: continue ext = determine_ext(q_url, mimetype2ext(q.get('type'))) if ext == 'smil': formats.extend(self._extract_smil_formats(q_url, video_id, fatal=False)) elif f_id == 'm3u8-variant' or (ext == 'm3u8' and q_id == 'Variant'): formats.extend(self._extract_m3u8_formats( q_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'format_id': '-'.join([f_id, q_id]), 'url': q_url, 'width': int_or_none(q.get('width')), 'height': int_or_none(q.get('height')), 'tbr': int_or_none(self._search_regex(r'_(\d+)\.m(?:p4|3u8)', q_url, 'bitrate')), 'ext': 'mp4' if ext == 'm3u8' else ext, 'protocol': 'm3u8_native' if ext == 'm3u8' else 'https', }) if formats: self._sort_formats(formats) info['formats'] = formats else: info.update({ '_type': 'url', 'url': 'anvato:uni:' + video['video_ids']['anvato'], 'ie_key': 'Anvato', }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/swrmediathek.py
youtube_dl/extractor/swrmediathek.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( parse_duration, int_or_none, determine_protocol, ) class SWRMediathekIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?swrmediathek\.de/(?:content/)?player\.htm\?show=(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _TESTS = [{ 'url': 'http://swrmediathek.de/player.htm?show=849790d0-dab8-11e3-a953-0026b975f2e6', 'md5': '8c5f6f0172753368547ca8413a7768ac', 'info_dict': { 'id': '849790d0-dab8-11e3-a953-0026b975f2e6', 'ext': 'mp4', 'title': 'SWR odysso', 'description': 'md5:2012e31baad36162e97ce9eb3f157b8a', 'thumbnail': r're:^http:.*\.jpg$', 'duration': 2602, 'upload_date': '20140515', 'uploader': 'SWR Fernsehen', 'uploader_id': '990030', }, }, { 'url': 'http://swrmediathek.de/player.htm?show=0e1a8510-ddf2-11e3-9be3-0026b975f2e6', 'md5': 'b10ab854f912eecc5a6b55cd6fc1f545', 'info_dict': { 'id': '0e1a8510-ddf2-11e3-9be3-0026b975f2e6', 'ext': 'mp4', 'title': 'Nachtcafé - Alltagsdroge Alkohol - zwischen Sektempfang und Komasaufen', 'description': 'md5:e0a3adc17e47db2c23aab9ebc36dbee2', 'thumbnail': r're:http://.*\.jpg', 'duration': 5305, 'upload_date': '20140516', 'uploader': 'SWR Fernsehen', 'uploader_id': '990030', }, 'skip': 'redirect to http://swrmediathek.de/index.htm?hinweis=swrlink', }, { 'url': 'http://swrmediathek.de/player.htm?show=bba23e10-cb93-11e3-bf7f-0026b975f2e6', 'md5': '4382e4ef2c9d7ce6852535fa867a0dd3', 'info_dict': { 'id': 'bba23e10-cb93-11e3-bf7f-0026b975f2e6', 'ext': 'mp3', 'title': 'Saša Stanišic: Vor dem Fest', 'description': 'md5:5b792387dc3fbb171eb709060654e8c9', 'thumbnail': r're:http://.*\.jpg', 'duration': 3366, 'upload_date': '20140520', 'uploader': 'SWR 2', 'uploader_id': '284670', }, 'skip': 'redirect to http://swrmediathek.de/index.htm?hinweis=swrlink', }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'http://swrmediathek.de/AjaxEntry?ekey=%s' % video_id, video_id, 'Downloading video JSON') attr = video['attr'] title = attr['entry_title'] media_type = attr.get('entry_etype') formats = [] for entry in video.get('sub', []): if entry.get('name') != 'entry_media': continue entry_attr = entry.get('attr', {}) f_url = entry_attr.get('val2') if not f_url: continue codec = entry_attr.get('val0') if codec == 'm3u8': formats.extend(self._extract_m3u8_formats( f_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif codec == 'f4m': formats.extend(self._extract_f4m_formats( f_url + '?hdcore=3.7.0', video_id, f4m_id='hds', fatal=False)) else: formats.append({ 'format_id': determine_protocol({'url': f_url}), 'url': f_url, 'quality': int_or_none(entry_attr.get('val1')), 'vcodec': codec if media_type == 'Video' else 'none', 'acodec': codec if media_type == 'Audio' else None, }) self._sort_formats(formats) upload_date = None entry_pdatet = attr.get('entry_pdatet') if entry_pdatet: upload_date = entry_pdatet[:-4] return { 'id': video_id, 'title': title, 'description': attr.get('entry_descl'), 'thumbnail': attr.get('entry_image_16_9'), 'duration': parse_duration(attr.get('entry_durat')), 'upload_date': upload_date, 'uploader': attr.get('channel_title'), 'uploader_id': attr.get('channel_idkey'), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/xstream.py
youtube_dl/extractor/xstream.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, xpath_with_ns, xpath_text, find_xpath_attr, ) class XstreamIE(InfoExtractor): _VALID_URL = r'''(?x) (?: xstream:| https?://frontend\.xstream\.(?:dk|net)/ ) (?P<partner_id>[^/]+) (?: :| /feed/video/\?.*?\bid= ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'http://frontend.xstream.dk/btno/feed/video/?platform=web&id=86588', 'md5': 'd7d17e3337dc80de6d3a540aefbe441b', 'info_dict': { 'id': '86588', 'ext': 'mov', 'title': 'Otto Wollertsen', 'description': 'Vestlendingen Otto Fredrik Wollertsen', 'timestamp': 1430473209, 'upload_date': '20150501', }, }, { 'url': 'http://frontend.xstream.dk/ap/feed/video/?platform=web&id=21039', 'only_matching': True, }] def _extract_video_info(self, partner_id, video_id): data = self._download_xml( 'http://frontend.xstream.dk/%s/feed/video/?platform=web&id=%s' % (partner_id, video_id), video_id) NS_MAP = { 'atom': 'http://www.w3.org/2005/Atom', 'xt': 'http://xstream.dk/', 'media': 'http://search.yahoo.com/mrss/', } entry = data.find(xpath_with_ns('./atom:entry', NS_MAP)) title = xpath_text( entry, xpath_with_ns('./atom:title', NS_MAP), 'title') description = xpath_text( entry, xpath_with_ns('./atom:summary', NS_MAP), 'description') timestamp = parse_iso8601(xpath_text( entry, xpath_with_ns('./atom:published', NS_MAP), 'upload date')) formats = [] media_group = entry.find(xpath_with_ns('./media:group', NS_MAP)) for media_content in media_group.findall(xpath_with_ns('./media:content', NS_MAP)): media_url = media_content.get('url') if not media_url: continue tbr = int_or_none(media_content.get('bitrate')) mobj = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', media_url) if mobj: formats.append({ 'url': mobj.group('url'), 'play_path': 'mp4:%s' % mobj.group('playpath'), 'app': mobj.group('app'), 'ext': 'flv', 'tbr': tbr, 'format_id': 'rtmp-%d' % tbr, }) else: formats.append({ 'url': media_url, 'tbr': tbr, }) self._sort_formats(formats) link = find_xpath_attr( entry, xpath_with_ns('./atom:link', NS_MAP), 'rel', 'original') if link is not None: formats.append({ 'url': link.get('href'), 'format_id': link.get('rel'), 'preference': 1, }) thumbnails = [{ 'url': splash.get('url'), 'width': int_or_none(splash.get('width')), 'height': int_or_none(splash.get('height')), } for splash in media_group.findall(xpath_with_ns('./xt:splash', NS_MAP))] return { 'id': video_id, 'title': title, 'description': description, 'timestamp': timestamp, 'formats': formats, 'thumbnails': thumbnails, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) partner_id = mobj.group('partner_id') video_id = mobj.group('id') return self._extract_video_info(partner_id, video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/fivemin.py
youtube_dl/extractor/fivemin.py
from __future__ import unicode_literals from .common import InfoExtractor class FiveMinIE(InfoExtractor): IE_NAME = '5min' _VALID_URL = r'(?:5min:|https?://(?:[^/]*?5min\.com/|delivery\.vidible\.tv/aol)(?:(?:Scripts/PlayerSeed\.js|playerseed/?)?\?.*?playList=)?)(?P<id>\d+)' _TESTS = [ { # From http://www.engadget.com/2013/11/15/ipad-mini-retina-display-review/ 'url': 'http://pshared.5min.com/Scripts/PlayerSeed.js?sid=281&width=560&height=345&playList=518013791', 'md5': '4f7b0b79bf1a470e5004f7112385941d', 'info_dict': { 'id': '518013791', 'ext': 'mp4', 'title': 'iPad Mini with Retina Display Review', 'description': 'iPad mini with Retina Display review', 'duration': 177, 'uploader': 'engadget', 'upload_date': '20131115', 'timestamp': 1384515288, }, 'params': { # m3u8 download 'skip_download': True, } }, { # From http://on.aol.com/video/how-to-make-a-next-level-fruit-salad-518086247 'url': '5min:518086247', 'md5': 'e539a9dd682c288ef5a498898009f69e', 'info_dict': { 'id': '518086247', 'ext': 'mp4', 'title': 'How to Make a Next-Level Fruit Salad', 'duration': 184, }, 'skip': 'no longer available', }, { 'url': 'http://embed.5min.com/518726732/', 'only_matching': True, }, { 'url': 'http://delivery.vidible.tv/aol?playList=518013791', 'only_matching': True, } ] def _real_extract(self, url): video_id = self._match_id(url) return self.url_result('aol-video:%s' % video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/bitchute.py
youtube_dl/extractor/bitchute.py
# coding: utf-8 from __future__ import unicode_literals import itertools import re from .common import InfoExtractor from ..utils import ( orderedSet, unified_strdate, urlencode_postdata, ) class BitChuteIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?bitchute\.com/(?:video|embed|torrent/[^/]+)/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.bitchute.com/video/szoMrox2JEI/', 'md5': '66c4a70e6bfc40dcb6be3eb1d74939eb', 'info_dict': { 'id': 'szoMrox2JEI', 'ext': 'mp4', 'title': 'Fuck bitches get money', 'description': 'md5:3f21f6fb5b1d17c3dee9cf6b5fe60b3a', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Victoria X Rave', 'upload_date': '20170813', }, }, { 'url': 'https://www.bitchute.com/embed/lbb5G1hjPhw/', 'only_matching': True, }, { 'url': 'https://www.bitchute.com/torrent/Zee5BE49045h/szoMrox2JEI.webtorrent', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://www.bitchute.com/video/%s' % video_id, video_id, headers={ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.57 Safari/537.36', }) title = self._html_search_regex( (r'<[^>]+\bid=["\']video-title[^>]+>([^<]+)', r'<title>([^<]+)'), webpage, 'title', default=None) or self._html_search_meta( 'description', webpage, 'title', default=None) or self._og_search_description(webpage) format_urls = [] for mobj in re.finditer( r'addWebSeed\s*\(\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage): format_urls.append(mobj.group('url')) format_urls.extend(re.findall(r'as=(https?://[^&"\']+)', webpage)) formats = [ {'url': format_url} for format_url in orderedSet(format_urls)] if not formats: formats = self._parse_html5_media_entries( url, webpage, video_id)[0]['formats'] self._check_formats(formats, video_id) self._sort_formats(formats) description = self._html_search_regex( r'(?s)<div\b[^>]+\bclass=["\']full hidden[^>]+>(.+?)</div>', webpage, 'description', fatal=False) thumbnail = self._og_search_thumbnail( webpage, default=None) or self._html_search_meta( 'twitter:image:src', webpage, 'thumbnail') uploader = self._html_search_regex( (r'(?s)<div class=["\']channel-banner.*?<p\b[^>]+\bclass=["\']name[^>]+>(.+?)</p>', r'(?s)<p\b[^>]+\bclass=["\']video-author[^>]+>(.+?)</p>'), webpage, 'uploader', fatal=False) upload_date = unified_strdate(self._search_regex( r'class=["\']video-publish-date[^>]+>[^<]+ at \d+:\d+ UTC on (.+?)\.', webpage, 'upload date', fatal=False)) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'upload_date': upload_date, 'formats': formats, } class BitChuteChannelIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?bitchute\.com/channel/(?P<id>[^/?#&]+)' _TEST = { 'url': 'https://www.bitchute.com/channel/victoriaxrave/', 'playlist_mincount': 185, 'info_dict': { 'id': 'victoriaxrave', }, } _TOKEN = 'zyG6tQcGPE5swyAEFLqKUwMuMMuF6IO2DZ6ZDQjGfsL0e4dcTLwqkTTul05Jdve7' def _entries(self, channel_id): channel_url = 'https://www.bitchute.com/channel/%s/' % channel_id offset = 0 for page_num in itertools.count(1): data = self._download_json( '%sextend/' % channel_url, channel_id, 'Downloading channel page %d' % page_num, data=urlencode_postdata({ 'csrfmiddlewaretoken': self._TOKEN, 'name': '', 'offset': offset, }), headers={ 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Referer': channel_url, 'X-Requested-With': 'XMLHttpRequest', 'Cookie': 'csrftoken=%s' % self._TOKEN, }) if data.get('success') is False: break html = data.get('html') if not html: break video_ids = re.findall( r'class=["\']channel-videos-image-container[^>]+>\s*<a\b[^>]+\bhref=["\']/video/([^"\'/]+)', html) if not video_ids: break offset += len(video_ids) for video_id in video_ids: yield self.url_result( 'https://www.bitchute.com/video/%s' % video_id, ie=BitChuteIE.ie_key(), video_id=video_id) def _real_extract(self, url): channel_id = self._match_id(url) return self.playlist_result( self._entries(channel_id), playlist_id=channel_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/motorsport.py
youtube_dl/extractor/motorsport.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_urlparse, ) class MotorsportIE(InfoExtractor): IE_DESC = 'motorsport.com' _VALID_URL = r'https?://(?:www\.)?motorsport\.com/[^/?#]+/video/(?:[^/?#]+/)(?P<id>[^/]+)/?(?:$|[?#])' _TEST = { 'url': 'http://www.motorsport.com/f1/video/main-gallery/red-bull-racing-2014-rules-explained/', 'info_dict': { 'id': '2-T3WuR-KMM', 'ext': 'mp4', 'title': 'Red Bull Racing: 2014 Rules Explained', 'duration': 208, 'description': 'A new clip from Red Bull sees Daniel Ricciardo and Sebastian Vettel explain the 2014 Formula One regulations – which are arguably the most complex the sport has ever seen.', 'uploader': 'mcomstaff', 'uploader_id': 'UC334JIYKkVnyFoNCclfZtHQ', 'upload_date': '20140903', 'thumbnail': r're:^https?://.+\.jpg$' }, 'add_ie': ['Youtube'], 'params': { 'skip_download': True, }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) iframe_path = self._html_search_regex( r'<iframe id="player_iframe"[^>]+src="([^"]+)"', webpage, 'iframe path') iframe = self._download_webpage( compat_urlparse.urljoin(url, iframe_path), display_id, 'Downloading iframe') youtube_id = self._search_regex( r'www.youtube.com/embed/(.{11})', iframe, 'youtube id') return { '_type': 'url_transparent', 'display_id': display_id, 'url': 'https://youtube.com/watch?v=%s' % youtube_id, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/bbc.py
youtube_dl/extractor/bbc.py
# coding: utf-8 from __future__ import unicode_literals import functools import itertools import json import re from .common import InfoExtractor from ..compat import ( compat_etree_Element, compat_HTTPError, compat_parse_qs, compat_str, compat_urllib_error, compat_urllib_parse_urlparse, compat_urlparse, ) from ..utils import ( ExtractorError, OnDemandPagedList, clean_html, dict_get, float_or_none, get_element_by_class, int_or_none, js_to_json, parse_duration, parse_iso8601, strip_or_none, try_get, unescapeHTML, unified_timestamp, url_or_none, urlencode_postdata, urljoin, ) class BBCCoUkIE(InfoExtractor): IE_NAME = 'bbc.co.uk' IE_DESC = 'BBC iPlayer' _ID_REGEX = r'(?:[pbml][\da-z]{7}|w[\da-z]{7,14})' _VALID_URL = r'''(?x) https?:// (?:www\.)?bbc\.co\.uk/ (?: programmes/(?!articles/)| iplayer(?:/[^/]+)?/(?:episode/|playlist/)| music/(?:clips|audiovideo/popular)[/#]| radio/player/| sounds/play/| events/[^/]+/play/[^/]+/ ) (?P<id>%s)(?!/(?:episodes|broadcasts|clips)) ''' % _ID_REGEX _LOGIN_URL = 'https://account.bbc.com/signin' _NETRC_MACHINE = 'bbc' _MEDIA_SELECTOR_URL_TEMPL = 'https://open.live.bbc.co.uk/mediaselector/6/select/version/2.0/mediaset/%s/vpid/%s' _MEDIA_SETS = [ # Provides HQ HLS streams with even better quality that pc mediaset but fails # with geolocation in some cases when it's even not geo restricted at all (e.g. # http://www.bbc.co.uk/programmes/b06bp7lf). Also may fail with selectionunavailable. 'iptv-all', 'pc', ] _EMP_PLAYLIST_NS = 'http://bbc.co.uk/2008/emp/playlist' _TESTS = [ { 'url': 'http://www.bbc.co.uk/programmes/b039g8p7', 'info_dict': { 'id': 'b039d07m', 'ext': 'flv', 'title': 'Kaleidoscope, Leonard Cohen', 'description': 'The Canadian poet and songwriter reflects on his musical career.', }, 'params': { # rtmp download 'skip_download': True, } }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/', 'info_dict': { 'id': 'b00yng1d', 'ext': 'flv', 'title': 'The Man in Black: Series 3: The Printed Name', 'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.", 'duration': 1800, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Episode is no longer available on BBC iPlayer Radio', }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/', 'info_dict': { 'id': 'b00yng1d', 'ext': 'flv', 'title': 'The Voice UK: Series 3: Blind Auditions 5', 'description': 'Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.', 'duration': 5100, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion', 'info_dict': { 'id': 'b03k3pb7', 'ext': 'flv', 'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction", 'description': '2. Invasion', 'duration': 3600, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', }, { 'url': 'http://www.bbc.co.uk/programmes/b04v20dw', 'info_dict': { 'id': 'b04v209v', 'ext': 'flv', 'title': 'Pete Tong, The Essential New Tune Special', 'description': "Pete has a very special mix - all of 2014's Essential New Tunes!", 'duration': 10800, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Episode is no longer available on BBC iPlayer Radio', }, { 'url': 'http://www.bbc.co.uk/music/clips/p022h44b', 'note': 'Audio', 'info_dict': { 'id': 'p022h44j', 'ext': 'flv', 'title': 'BBC Proms Music Guides, Rachmaninov: Symphonic Dances', 'description': "In this Proms Music Guide, Andrew McGregor looks at Rachmaninov's Symphonic Dances.", 'duration': 227, }, 'params': { # rtmp download 'skip_download': True, } }, { 'url': 'http://www.bbc.co.uk/music/clips/p025c0zz', 'note': 'Video', 'info_dict': { 'id': 'p025c103', 'ext': 'flv', 'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)', 'description': 'Rae Morris performs Closer for BBC Three at Reading 2014', 'duration': 226, }, 'params': { # rtmp download 'skip_download': True, } }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls', 'info_dict': { 'id': 'p02n76xf', 'ext': 'flv', 'title': 'Natural World, 2015-2016: 2. Super Powered Owls', 'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d', 'duration': 3540, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'geolocation', }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition', 'info_dict': { 'id': 'b05zmgw1', 'ext': 'flv', 'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.', 'title': 'Royal Academy Summer Exhibition', 'duration': 3540, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'geolocation', }, { # iptv-all mediaset fails with geolocation however there is no geo restriction # for this programme at all 'url': 'http://www.bbc.co.uk/programmes/b06rkn85', 'info_dict': { 'id': 'b06rkms3', 'ext': 'flv', 'title': "Best of the Mini-Mixes 2015: Part 3, Annie Mac's Friday Night - BBC Radio 1", 'description': "Annie has part three in the Best of the Mini-Mixes 2015, plus the year's Most Played!", }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Now it\'s really geo-restricted', }, { # compact player (https://github.com/ytdl-org/youtube-dl/issues/8147) 'url': 'http://www.bbc.co.uk/programmes/p028bfkf/player', 'info_dict': { 'id': 'p028bfkj', 'ext': 'flv', 'title': 'Extract from BBC documentary Look Stranger - Giant Leeks and Magic Brews', 'description': 'Extract from BBC documentary Look Stranger - Giant Leeks and Magic Brews', }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'https://www.bbc.co.uk/sounds/play/m0007jzb', 'note': 'Audio', 'info_dict': { 'id': 'm0007jz9', 'ext': 'mp4', 'title': 'BBC Proms, 2019, Prom 34: West–Eastern Divan Orchestra', 'description': "Live BBC Proms. West–Eastern Divan Orchestra with Daniel Barenboim and Martha Argerich.", 'duration': 9840, }, 'params': { # rtmp download 'skip_download': True, } }, { 'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4', 'only_matching': True, }, { 'url': 'http://www.bbc.co.uk/music/clips#p02frcc3', 'only_matching': True, }, { 'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo', 'only_matching': True, }, { 'url': 'http://www.bbc.co.uk/radio/player/p03cchwf', 'only_matching': True, }, { 'url': 'https://www.bbc.co.uk/music/audiovideo/popular#p055bc55', 'only_matching': True, }, { 'url': 'http://www.bbc.co.uk/programmes/w3csv1y9', 'only_matching': True, }, { 'url': 'https://www.bbc.co.uk/programmes/m00005xn', 'only_matching': True, }, { 'url': 'https://www.bbc.co.uk/programmes/w172w4dww1jqt5s', 'only_matching': True, }] def _login(self): username, password = self._get_login_info() if username is None: return login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading signin page') login_form = self._hidden_inputs(login_page) login_form.update({ 'username': username, 'password': password, }) post_url = urljoin(self._LOGIN_URL, self._search_regex( r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page, 'post url', default=self._LOGIN_URL, group='url')) response, urlh = self._download_webpage_handle( post_url, None, 'Logging in', data=urlencode_postdata(login_form), headers={'Referer': self._LOGIN_URL}) if self._LOGIN_URL in urlh.geturl(): error = clean_html(get_element_by_class('form-message', response)) if error: raise ExtractorError( 'Unable to login: %s' % error, expected=True) raise ExtractorError('Unable to log in') def _real_initialize(self): self._login() class MediaSelectionError(Exception): def __init__(self, id): self.id = id def _extract_asx_playlist(self, connection, programme_id): asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist') return [ref.get('href') for ref in asx.findall('./Entry/ref')] def _extract_items(self, playlist): return playlist.findall('./{%s}item' % self._EMP_PLAYLIST_NS) def _extract_medias(self, media_selection): error = media_selection.get('result') if error: raise BBCCoUkIE.MediaSelectionError(error) return media_selection.get('media') or [] def _extract_connections(self, media): return media.get('connection') or [] def _get_subtitles(self, media, programme_id): subtitles = {} for connection in self._extract_connections(media): cc_url = url_or_none(connection.get('href')) if not cc_url: continue captions = self._download_xml( cc_url, programme_id, 'Downloading captions', fatal=False) if not isinstance(captions, compat_etree_Element): continue subtitles['en'] = [ { 'url': connection.get('href'), 'ext': 'ttml', }, ] break return subtitles def _raise_extractor_error(self, media_selection_error): raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, media_selection_error.id), expected=True) def _download_media_selector(self, programme_id): last_exception = None for media_set in self._MEDIA_SETS: try: return self._download_media_selector_url( self._MEDIA_SELECTOR_URL_TEMPL % (media_set, programme_id), programme_id) except BBCCoUkIE.MediaSelectionError as e: if e.id in ('notukerror', 'geolocation', 'selectionunavailable'): last_exception = e continue self._raise_extractor_error(e) self._raise_extractor_error(last_exception) def _download_media_selector_url(self, url, programme_id=None): media_selection = self._download_json( url, programme_id, 'Downloading media selection JSON', expected_status=(403, 404)) return self._process_media_selector(media_selection, programme_id) def _process_media_selector(self, media_selection, programme_id): formats = [] subtitles = None urls = [] for media in self._extract_medias(media_selection): kind = media.get('kind') if kind in ('video', 'audio'): bitrate = int_or_none(media.get('bitrate')) encoding = media.get('encoding') width = int_or_none(media.get('width')) height = int_or_none(media.get('height')) file_size = int_or_none(media.get('media_file_size')) for connection in self._extract_connections(media): href = connection.get('href') if href in urls: continue if href: urls.append(href) conn_kind = connection.get('kind') protocol = connection.get('protocol') supplier = connection.get('supplier') transfer_format = connection.get('transferFormat') format_id = supplier or conn_kind or protocol # ASX playlist if supplier == 'asx': for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)): formats.append({ 'url': ref, 'format_id': 'ref%s_%s' % (i, format_id), }) elif transfer_format == 'dash': formats.extend(self._extract_mpd_formats( href, programme_id, mpd_id=format_id, fatal=False)) elif transfer_format == 'hls': # TODO: let expected_status be passed into _extract_xxx_formats() instead try: fmts = self._extract_m3u8_formats( href, programme_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id=format_id, fatal=False) except ExtractorError as e: if not (isinstance(e.exc_info[1], compat_urllib_error.HTTPError) and e.exc_info[1].code in (403, 404)): raise fmts = [] formats.extend(fmts) elif transfer_format == 'hds': formats.extend(self._extract_f4m_formats( href, programme_id, f4m_id=format_id, fatal=False)) else: if not supplier and bitrate: format_id += '-%d' % bitrate fmt = { 'format_id': format_id, 'filesize': file_size, } if kind == 'video': fmt.update({ 'width': width, 'height': height, 'tbr': bitrate, 'vcodec': encoding, }) else: fmt.update({ 'abr': bitrate, 'acodec': encoding, 'vcodec': 'none', }) if protocol in ('http', 'https'): # Direct link fmt.update({ 'url': href, }) elif protocol == 'rtmp': application = connection.get('application', 'ondemand') auth_string = connection.get('authString') identifier = connection.get('identifier') server = connection.get('server') fmt.update({ 'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string), 'play_path': identifier, 'app': '%s?%s' % (application, auth_string), 'page_url': 'http://www.bbc.co.uk', 'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf', 'rtmp_live': False, 'ext': 'flv', }) else: continue formats.append(fmt) elif kind == 'captions': subtitles = self.extract_subtitles(media, programme_id) return formats, subtitles def _download_playlist(self, playlist_id): try: playlist = self._download_json( 'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id, playlist_id, 'Downloading playlist JSON') version = playlist.get('defaultAvailableVersion') if version: smp_config = version['smpConfig'] title = smp_config['title'] description = smp_config['summary'] for item in smp_config['items']: kind = item['kind'] if kind not in ('programme', 'radioProgramme'): continue programme_id = item.get('vpid') duration = int_or_none(item.get('duration')) formats, subtitles = self._download_media_selector(programme_id) return programme_id, title, description, duration, formats, subtitles except ExtractorError as ee: if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404): raise # fallback to legacy playlist return self._process_legacy_playlist(playlist_id) def _process_legacy_playlist_url(self, url, display_id): playlist = self._download_legacy_playlist_url(url, display_id) return self._extract_from_legacy_playlist(playlist, display_id) def _process_legacy_playlist(self, playlist_id): return self._process_legacy_playlist_url( 'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, playlist_id) def _download_legacy_playlist_url(self, url, playlist_id=None): return self._download_xml( url, playlist_id, 'Downloading legacy playlist XML') def _extract_from_legacy_playlist(self, playlist, playlist_id): no_items = playlist.find('./{%s}noItems' % self._EMP_PLAYLIST_NS) if no_items is not None: reason = no_items.get('reason') if reason == 'preAvailability': msg = 'Episode %s is not yet available' % playlist_id elif reason == 'postAvailability': msg = 'Episode %s is no longer available' % playlist_id elif reason == 'noMedia': msg = 'Episode %s is not currently available' % playlist_id else: msg = 'Episode %s is not available: %s' % (playlist_id, reason) raise ExtractorError(msg, expected=True) for item in self._extract_items(playlist): kind = item.get('kind') if kind not in ('programme', 'radioProgramme'): continue title = playlist.find('./{%s}title' % self._EMP_PLAYLIST_NS).text description_el = playlist.find('./{%s}summary' % self._EMP_PLAYLIST_NS) description = description_el.text if description_el is not None else None def get_programme_id(item): def get_from_attributes(item): for p in ('identifier', 'group'): value = item.get(p) if value and re.match(r'^[pb][\da-z]{7}$', value): return value get_from_attributes(item) mediator = item.find('./{%s}mediator' % self._EMP_PLAYLIST_NS) if mediator is not None: return get_from_attributes(mediator) programme_id = get_programme_id(item) duration = int_or_none(item.get('duration')) if programme_id: formats, subtitles = self._download_media_selector(programme_id) else: formats, subtitles = self._process_media_selector(item, playlist_id) programme_id = playlist_id return programme_id, title, description, duration, formats, subtitles def _real_extract(self, url): group_id = self._match_id(url) webpage = self._download_webpage(url, group_id, 'Downloading video page') error = self._search_regex( r'<div\b[^>]+\bclass=["\'](?:smp|playout)__message delta["\'][^>]*>\s*([^<]+?)\s*<', webpage, 'error', default=None) if error: raise ExtractorError(error, expected=True) programme_id = None duration = None tviplayer = self._search_regex( r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById', webpage, 'player', default=None) if tviplayer: player = self._parse_json(tviplayer, group_id).get('player', {}) duration = int_or_none(player.get('duration')) programme_id = player.get('vpid') if not programme_id: programme_id = self._search_regex( r'"vpid"\s*:\s*"(%s)"' % self._ID_REGEX, webpage, 'vpid', fatal=False, default=None) if programme_id: formats, subtitles = self._download_media_selector(programme_id) title = self._og_search_title(webpage, default=None) or self._html_search_regex( (r'<h2[^>]+id="parent-title"[^>]*>(.+?)</h2>', r'<div[^>]+class="info"[^>]*>\s*<h1>(.+?)</h1>'), webpage, 'title') description = self._search_regex( (r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>', r'<div[^>]+class="info_+synopsis"[^>]*>([^<]+)</div>'), webpage, 'description', default=None) if not description: description = self._html_search_meta('description', webpage) else: programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id) self._sort_formats(formats) return { 'id': programme_id, 'title': title, 'description': description, 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'duration': duration, 'formats': formats, 'subtitles': subtitles, } class BBCIE(BBCCoUkIE): IE_NAME = 'bbc' IE_DESC = 'BBC' _VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)' _MEDIA_SETS = [ 'mobile-tablet-main', 'pc', ] _TESTS = [{ # article with multiple videos embedded with data-playable containing vpids 'url': 'http://www.bbc.com/news/world-europe-32668511', 'info_dict': { 'id': 'world-europe-32668511', 'title': 'Russia stages massive WW2 parade', 'description': 'md5:00ff61976f6081841f759a08bf78cc9c', }, 'playlist_count': 2, }, { # article with multiple videos embedded with data-playable (more videos) 'url': 'http://www.bbc.com/news/business-28299555', 'info_dict': { 'id': 'business-28299555', 'title': 'Farnborough Airshow: Video highlights', 'description': 'BBC reports and video highlights at the Farnborough Airshow.', }, 'playlist_count': 9, 'skip': 'Save time', }, { # article with multiple videos embedded with `new SMP()` # broken 'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460', 'info_dict': { 'id': '3662a707-0af9-3149-963f-47bea720b460', 'title': 'BUGGER', }, 'playlist_count': 18, }, { # single video embedded with data-playable containing vpid 'url': 'http://www.bbc.com/news/world-europe-32041533', 'info_dict': { 'id': 'p02mprgb', 'ext': 'mp4', 'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV', 'description': 'md5:2868290467291b37feda7863f7a83f54', 'duration': 47, 'timestamp': 1427219242, 'upload_date': '20150324', }, 'params': { # rtmp download 'skip_download': True, } }, { # article with single video embedded with data-playable containing XML playlist # with direct video links as progressiveDownloadUrl (for now these are extracted) # and playlist with f4m and m3u8 as streamingUrl 'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu', 'info_dict': { 'id': '150615_telabyad_kentin_cogu', 'ext': 'mp4', 'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde", 'description': 'md5:33a4805a855c9baf7115fcbde57e7025', 'timestamp': 1434397334, 'upload_date': '20150615', }, 'params': { 'skip_download': True, } }, { # single video embedded with data-playable containing XML playlists (regional section) 'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw', 'info_dict': { 'id': '150619_video_honduras_militares_hospitales_corrupcion_aw', 'ext': 'mp4', 'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción', 'description': 'md5:1525f17448c4ee262b64b8f0c9ce66c8', 'timestamp': 1434713142, 'upload_date': '20150619', }, 'params': { 'skip_download': True, } }, { # single video from video playlist embedded with vxp-playlist-data JSON 'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376', 'info_dict': { 'id': 'p02w6qjc', 'ext': 'mp4', 'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''', 'duration': 56, 'description': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''', }, 'params': { 'skip_download': True, } }, { # single video story with digitalData 'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret', 'info_dict': { 'id': 'p02q6gc4', 'ext': 'flv', 'title': 'Sri Lanka’s spicy secret', 'description': 'As a new train line to Jaffna opens up the country’s north, travellers can experience a truly distinct slice of Tamil culture.', 'timestamp': 1437674293, 'upload_date': '20150723', }, 'params': { # rtmp download 'skip_download': True, } }, { # single video story without digitalData 'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star', 'info_dict': { 'id': 'p018zqqg', 'ext': 'mp4', 'title': 'Hyundai Santa Fe Sport: Rock star', 'description': 'md5:b042a26142c4154a6e472933cf20793d', 'timestamp': 1415867444, 'upload_date': '20141113', }, 'params': { # rtmp download 'skip_download': True, } }, { # single video embedded with Morph 'url': 'http://www.bbc.co.uk/sport/live/olympics/36895975', 'info_dict': { 'id': 'p041vhd0', 'ext': 'mp4', 'title': "Nigeria v Japan - Men's First Round", 'description': 'Live coverage of the first round from Group B at the Amazonia Arena.', 'duration': 7980, 'uploader': 'BBC Sport', 'uploader_id': 'bbc_sport', }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': 'Georestricted to UK', }, { # single video with playlist.sxml URL in playlist param 'url': 'http://www.bbc.com/sport/0/football/33653409', 'info_dict': { 'id': 'p02xycnp', 'ext': 'mp4', 'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?', 'description': 'BBC Sport\'s David Ornstein has the latest transfer gossip, including rumours of a Manchester United return for Cristiano Ronaldo.', 'duration': 140, }, 'params': { # rtmp download 'skip_download': True, } }, { # article with multiple videos embedded with playlist.sxml in playlist param 'url': 'http://www.bbc.com/sport/0/football/34475836', 'info_dict': { 'id': '34475836', 'title': 'Jurgen Klopp: Furious football from a witty and winning coach', 'description': 'Fast-paced football, wit, wisdom and a ready smile - why Liverpool fans should come to love new boss Jurgen Klopp.', }, 'playlist_count': 3, }, { # school report article with single video 'url': 'http://www.bbc.co.uk/schoolreport/35744779', 'info_dict': { 'id': '35744779', 'title': 'School which breaks down barriers in Jerusalem', }, 'playlist_count': 1, }, { # single video with playlist URL from weather section 'url': 'http://www.bbc.com/weather/features/33601775', 'only_matching': True, }, { # custom redirection to www.bbc.com # also, video with window.__INITIAL_DATA__ 'url': 'http://www.bbc.co.uk/news/science-environment-33661876', 'info_dict': { 'id': 'p02xzws1', 'ext': 'mp4', 'title': "Pluto may have 'nitrogen glaciers'", 'description': 'md5:6a95b593f528d7a5f2605221bc56912f', 'thumbnail': r're:https?://.+/.+\.jpg', 'timestamp': 1437785037, 'upload_date': '20150725', }, }, { # video with window.__INITIAL_DATA__ and value as JSON string 'url': 'https://www.bbc.com/news/av/world-europe-59468682', 'info_dict': {
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/xhamster.py
youtube_dl/extractor/xhamster.py
# coding: utf-8 from __future__ import unicode_literals import itertools import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( clean_html, determine_ext, dict_get, extract_attributes, ExtractorError, float_or_none, int_or_none, parse_duration, str_or_none, try_get, unified_strdate, url_or_none, urljoin, ) class XHamsterIE(InfoExtractor): _DOMAINS = r'(?:xhamster\.(?:com|one|desi)|xhms\.pro|xhamster\d+\.com|xhday\.com|xhvid\.com)' _VALID_URL = r'''(?x) https?:// (?:.+?\.)?%s/ (?: movies/(?P<id>[\dA-Za-z]+)/(?P<display_id>[^/]*)\.html| videos/(?P<display_id_2>[^/]*)-(?P<id_2>[\dA-Za-z]+) ) ''' % _DOMAINS _TESTS = [{ 'url': 'https://xhamster.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445', 'md5': '34e1ab926db5dc2750fed9e1f34304bb', 'info_dict': { 'id': '1509445', 'display_id': 'femaleagent-shy-beauty-takes-the-bait', 'ext': 'mp4', 'title': 'FemaleAgent Shy beauty takes the bait', 'timestamp': 1350194821, 'upload_date': '20121014', 'uploader': 'Ruseful2011', 'uploader_id': 'ruseful2011', 'duration': 893, 'age_limit': 18, }, }, { 'url': 'https://xhamster.com/videos/britney-spears-sexy-booty-2221348?hd=', 'info_dict': { 'id': '2221348', 'display_id': 'britney-spears-sexy-booty', 'ext': 'mp4', 'title': 'Britney Spears Sexy Booty', 'timestamp': 1379123460, 'upload_date': '20130914', 'uploader': 'jojo747400', 'duration': 200, 'age_limit': 18, }, 'params': { 'skip_download': True, }, }, { # empty seo, unavailable via new URL schema 'url': 'http://xhamster.com/movies/5667973/.html', 'info_dict': { 'id': '5667973', 'ext': 'mp4', 'title': '....', 'timestamp': 1454948101, 'upload_date': '20160208', 'uploader': 'parejafree', 'uploader_id': 'parejafree', 'duration': 72, 'age_limit': 18, }, 'params': { 'skip_download': True, }, }, { # mobile site 'url': 'https://m.xhamster.com/videos/cute-teen-jacqueline-solo-masturbation-8559111', 'only_matching': True, }, { 'url': 'https://xhamster.com/movies/2272726/amber_slayed_by_the_knight.html', 'only_matching': True, }, { # This video is visible for marcoalfa123456's friends only 'url': 'https://it.xhamster.com/movies/7263980/la_mia_vicina.html', 'only_matching': True, }, { # new URL schema 'url': 'https://pt.xhamster.com/videos/euro-pedal-pumping-7937821', 'only_matching': True, }, { 'url': 'https://xhamster.one/videos/femaleagent-shy-beauty-takes-the-bait-1509445', 'only_matching': True, }, { 'url': 'https://xhamster.desi/videos/femaleagent-shy-beauty-takes-the-bait-1509445', 'only_matching': True, }, { 'url': 'https://xhamster2.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445', 'only_matching': True, }, { 'url': 'https://xhamster11.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445', 'only_matching': True, }, { 'url': 'https://xhamster26.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445', 'only_matching': True, }, { 'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html', 'only_matching': True, }, { 'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd', 'only_matching': True, }, { 'url': 'http://de.xhamster.com/videos/skinny-girl-fucks-herself-hard-in-the-forest-xhnBJZx', 'only_matching': True, }, { 'url': 'https://xhday.com/videos/strapless-threesome-xhh7yVf', 'only_matching': True, }, { 'url': 'https://xhvid.com/videos/lk-mm-xhc6wn6', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') or mobj.group('id_2') display_id = mobj.group('display_id') or mobj.group('display_id_2') desktop_url = re.sub(r'^(https?://(?:.+?\.)?)m\.', r'\1', url) webpage, urlh = self._download_webpage_handle(desktop_url, video_id) error = self._html_search_regex( r'<div[^>]+id=["\']videoClosed["\'][^>]*>(.+?)</div>', webpage, 'error', default=None) if error: raise ExtractorError(error, expected=True) age_limit = self._rta_search(webpage) def get_height(s): return int_or_none(self._search_regex( r'^(\d+)[pP]', s, 'height', default=None)) initials = self._parse_json( self._search_regex( (r'window\.initials\s*=\s*({.+?})\s*;\s*</script>', r'window\.initials\s*=\s*({.+?})\s*;'), webpage, 'initials', default='{}'), video_id, fatal=False) if initials: video = initials['videoModel'] title = video['title'] formats = [] format_urls = set() format_sizes = {} sources = try_get(video, lambda x: x['sources'], dict) or {} for format_id, formats_dict in sources.items(): if not isinstance(formats_dict, dict): continue download_sources = try_get(sources, lambda x: x['download'], dict) or {} for quality, format_dict in download_sources.items(): if not isinstance(format_dict, dict): continue format_sizes[quality] = float_or_none(format_dict.get('size')) for quality, format_item in formats_dict.items(): if format_id == 'download': # Download link takes some time to be generated, # skipping for now continue format_url = format_item format_url = url_or_none(format_url) if not format_url or format_url in format_urls: continue format_urls.add(format_url) formats.append({ 'format_id': '%s-%s' % (format_id, quality), 'url': format_url, 'ext': determine_ext(format_url, 'mp4'), 'height': get_height(quality), 'filesize': format_sizes.get(quality), 'http_headers': { 'Referer': urlh.geturl(), }, }) xplayer_sources = try_get( initials, lambda x: x['xplayerSettings']['sources'], dict) if xplayer_sources: hls_sources = xplayer_sources.get('hls') if isinstance(hls_sources, dict): for hls_format_key in ('url', 'fallback'): hls_url = hls_sources.get(hls_format_key) if not hls_url: continue hls_url = urljoin(url, hls_url) if not hls_url or hls_url in format_urls: continue format_urls.add(hls_url) formats.extend(self._extract_m3u8_formats( hls_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) standard_sources = xplayer_sources.get('standard') if isinstance(standard_sources, dict): for format_id, formats_list in standard_sources.items(): if not isinstance(formats_list, list): continue for standard_format in formats_list: if not isinstance(standard_format, dict): continue for standard_format_key in ('url', 'fallback'): standard_url = standard_format.get(standard_format_key) if not standard_url: continue standard_url = urljoin(url, standard_url) if not standard_url or standard_url in format_urls: continue format_urls.add(standard_url) ext = determine_ext(standard_url, 'mp4') if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( standard_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) continue quality = (str_or_none(standard_format.get('quality')) or str_or_none(standard_format.get('label')) or '') formats.append({ 'format_id': '%s-%s' % (format_id, quality), 'url': standard_url, 'ext': ext, 'height': get_height(quality), 'filesize': format_sizes.get(quality), 'http_headers': { 'Referer': standard_url, }, }) self._sort_formats(formats, field_preference=('height', 'width', 'tbr', 'format_id')) categories_list = video.get('categories') if isinstance(categories_list, list): categories = [] for c in categories_list: if not isinstance(c, dict): continue c_name = c.get('name') if isinstance(c_name, compat_str): categories.append(c_name) else: categories = None uploader_url = url_or_none(try_get(video, lambda x: x['author']['pageURL'])) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': video.get('description'), 'timestamp': int_or_none(video.get('created')), 'uploader': try_get( video, lambda x: x['author']['name'], compat_str), 'uploader_url': uploader_url, 'uploader_id': uploader_url.split('/')[-1] if uploader_url else None, 'thumbnail': video.get('thumbURL'), 'duration': int_or_none(video.get('duration')), 'view_count': int_or_none(video.get('views')), 'like_count': int_or_none(try_get( video, lambda x: x['rating']['likes'], int)), 'dislike_count': int_or_none(try_get( video, lambda x: x['rating']['dislikes'], int)), 'comment_count': int_or_none(video.get('views')), 'age_limit': age_limit if age_limit is not None else 18, 'categories': categories, 'formats': formats, } # Old layout fallback title = self._html_search_regex( [r'<h1[^>]*>([^<]+)</h1>', r'<meta[^>]+itemprop=".*?caption.*?"[^>]+content="(.+?)"', r'<title[^>]*>(.+?)(?:,\s*[^,]*?\s*Porn\s*[^,]*?:\s*xHamster[^<]*| - xHamster\.com)</title>'], webpage, 'title') formats = [] format_urls = set() sources = self._parse_json( self._search_regex( r'sources\s*:\s*({.+?})\s*,?\s*\n', webpage, 'sources', default='{}'), video_id, fatal=False) for format_id, format_url in sources.items(): format_url = url_or_none(format_url) if not format_url: continue if format_url in format_urls: continue format_urls.add(format_url) formats.append({ 'format_id': format_id, 'url': format_url, 'height': get_height(format_id), }) video_url = self._search_regex( [r'''file\s*:\s*(?P<q>["'])(?P<mp4>.+?)(?P=q)''', r'''<a\s+href=(?P<q>["'])(?P<mp4>.+?)(?P=q)\s+class=["']mp4Thumb''', r'''<video[^>]+file=(?P<q>["'])(?P<mp4>.+?)(?P=q)[^>]*>'''], webpage, 'video url', group='mp4', default=None) if video_url and video_url not in format_urls: formats.append({ 'url': video_url, }) self._sort_formats(formats) # Only a few videos have an description mobj = re.search(r'<span>Description: </span>([^<]+)', webpage) description = mobj.group(1) if mobj else None upload_date = unified_strdate(self._search_regex( r'hint=["\'](\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}', webpage, 'upload date', fatal=False)) uploader = self._html_search_regex( r'<span[^>]+itemprop=["\']author[^>]+><a[^>]+><span[^>]+>([^<]+)', webpage, 'uploader', default='anonymous') thumbnail = self._search_regex( [r'''["']thumbUrl["']\s*:\s*(?P<q>["'])(?P<thumbnail>.+?)(?P=q)''', r'''<video[^>]+"poster"=(?P<q>["'])(?P<thumbnail>.+?)(?P=q)[^>]*>'''], webpage, 'thumbnail', fatal=False, group='thumbnail') duration = parse_duration(self._search_regex( [r'<[^<]+\bitemprop=["\']duration["\'][^<]+\bcontent=["\'](.+?)["\']', r'Runtime:\s*</span>\s*([\d:]+)'], webpage, 'duration', fatal=False)) view_count = int_or_none(self._search_regex( r'content=["\']User(?:View|Play)s:(\d+)', webpage, 'view count', fatal=False)) mobj = re.search(r'hint=[\'"](?P<likecount>\d+) Likes / (?P<dislikecount>\d+) Dislikes', webpage) (like_count, dislike_count) = (mobj.group('likecount'), mobj.group('dislikecount')) if mobj else (None, None) mobj = re.search(r'</label>Comments \((?P<commentcount>\d+)\)</div>', webpage) comment_count = mobj.group('commentcount') if mobj else 0 categories_html = self._search_regex( r'(?s)<table.+?(<span>Categories:.+?)</table>', webpage, 'categories', default=None) categories = [clean_html(category) for category in re.findall( r'<a[^>]+>(.+?)</a>', categories_html)] if categories_html else None return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'upload_date': upload_date, 'uploader': uploader, 'uploader_id': uploader.lower() if uploader else None, 'thumbnail': thumbnail, 'duration': duration, 'view_count': view_count, 'like_count': int_or_none(like_count), 'dislike_count': int_or_none(dislike_count), 'comment_count': int_or_none(comment_count), 'age_limit': age_limit, 'categories': categories, 'formats': formats, } class XHamsterEmbedIE(InfoExtractor): _VALID_URL = r'https?://(?:.+?\.)?%s/xembed\.php\?video=(?P<id>\d+)' % XHamsterIE._DOMAINS _TEST = { 'url': 'http://xhamster.com/xembed.php?video=3328539', 'info_dict': { 'id': '3328539', 'ext': 'mp4', 'title': 'Pen Masturbation', 'timestamp': 1406581861, 'upload_date': '20140728', 'uploader': 'ManyakisArt', 'duration': 5, 'age_limit': 18, } } @staticmethod def _extract_urls(webpage): return [url for _, url in re.findall( r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?xhamster\.com/xembed\.php\?video=\d+)\1', webpage)] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_url = self._search_regex( r'href="(https?://xhamster\.com/(?:movies/{0}/[^"]*\.html|videos/[^/]*-{0})[^"]*)"'.format(video_id), webpage, 'xhamster url', default=None) if not video_url: vars = self._parse_json( self._search_regex(r'vars\s*:\s*({.+?})\s*,\s*\n', webpage, 'vars'), video_id) video_url = dict_get(vars, ('downloadLink', 'homepageLink', 'commentsLink', 'shareUrl')) return self.url_result(video_url, 'XHamster') class XHamsterUserIE(InfoExtractor): _VALID_URL = r'https?://(?:.+?\.)?%s/users/(?P<id>[^/?#&]+)' % XHamsterIE._DOMAINS _TESTS = [{ # Paginated user profile 'url': 'https://xhamster.com/users/netvideogirls/videos', 'info_dict': { 'id': 'netvideogirls', }, 'playlist_mincount': 267, }, { # Non-paginated user profile 'url': 'https://xhamster.com/users/firatkaan/videos', 'info_dict': { 'id': 'firatkaan', }, 'playlist_mincount': 1, }, { 'url': 'https://xhday.com/users/mobhunter', 'only_matching': True, }, { 'url': 'https://xhvid.com/users/pelushe21', 'only_matching': True, }] def _entries(self, user_id): next_page_url = 'https://xhamster.com/users/%s/videos/1' % user_id for pagenum in itertools.count(1): page = self._download_webpage( next_page_url, user_id, 'Downloading page %s' % pagenum) for video_tag in re.findall( r'(<a[^>]+class=["\'].*?\bvideo-thumb__image-container[^>]+>)', page): video = extract_attributes(video_tag) video_url = url_or_none(video.get('href')) if not video_url or not XHamsterIE.suitable(video_url): continue video_id = XHamsterIE._match_id(video_url) yield self.url_result( video_url, ie=XHamsterIE.ie_key(), video_id=video_id) mobj = re.search(r'<a[^>]+data-page=["\']next[^>]+>', page) if not mobj: break next_page = extract_attributes(mobj.group(0)) next_page_url = url_or_none(next_page.get('href')) if not next_page_url: break def _real_extract(self, url): user_id = self._match_id(url) return self.playlist_result(self._entries(user_id), user_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/kaltura.py
youtube_dl/extractor/kaltura.py
# coding: utf-8 from __future__ import unicode_literals import re import base64 from .common import InfoExtractor from ..compat import ( compat_urlparse, compat_parse_qs, ) from ..utils import ( clean_html, ExtractorError, int_or_none, unsmuggle_url, smuggle_url, ) class KalturaIE(InfoExtractor): _VALID_URL = r'''(?x) (?: kaltura:(?P<partner_id>\d+):(?P<id>[0-9a-z_]+)| https?:// (:?(?:www|cdnapi(?:sec)?)\.)?kaltura\.com(?::\d+)?/ (?: (?: # flash player index\.php/(?:kwidget|extwidget/preview)| # html5 player html5/html5lib/[^/]+/mwEmbedFrame\.php ) )(?:/(?P<path>[^?]+))?(?:\?(?P<query>.*))? ) ''' _SERVICE_URL = 'http://cdnapi.kaltura.com' _SERVICE_BASE = '/api_v3/index.php' # See https://github.com/kaltura/server/blob/master/plugins/content/caption/base/lib/model/enums/CaptionType.php _CAPTION_TYPES = { 1: 'srt', 2: 'ttml', 3: 'vtt', } _TESTS = [ { 'url': 'kaltura:269692:1_1jc2y3e4', 'md5': '3adcbdb3dcc02d647539e53f284ba171', 'info_dict': { 'id': '1_1jc2y3e4', 'ext': 'mp4', 'title': 'Straight from the Heart', 'upload_date': '20131219', 'uploader_id': 'mlundberg@wolfgangsvault.com', 'description': 'The Allman Brothers Band, 12/16/1981', 'thumbnail': 're:^https?://.*/thumbnail/.*', 'timestamp': int, }, }, { 'url': 'http://www.kaltura.com/index.php/kwidget/cache_st/1300318621/wid/_269692/uiconf_id/3873291/entry_id/1_1jc2y3e4', 'only_matching': True, }, { 'url': 'https://cdnapisec.kaltura.com/index.php/kwidget/wid/_557781/uiconf_id/22845202/entry_id/1_plr1syf3', 'only_matching': True, }, { 'url': 'https://cdnapisec.kaltura.com/html5/html5lib/v2.30.2/mwEmbedFrame.php/p/1337/uiconf_id/20540612/entry_id/1_sf5ovm7u?wid=_243342', 'only_matching': True, }, { # video with subtitles 'url': 'kaltura:111032:1_cw786r8q', 'only_matching': True, }, { # video with ttml subtitles (no fileExt) 'url': 'kaltura:1926081:0_l5ye1133', 'info_dict': { 'id': '0_l5ye1133', 'ext': 'mp4', 'title': 'What Can You Do With Python?', 'upload_date': '20160221', 'uploader_id': 'stork', 'thumbnail': 're:^https?://.*/thumbnail/.*', 'timestamp': int, 'subtitles': { 'en': [{ 'ext': 'ttml', }], }, }, 'skip': 'Gone. Maybe https://www.safaribooksonline.com/library/tutorials/introduction-to-python-anon/3469/', 'params': { 'skip_download': True, }, }, { 'url': 'https://www.kaltura.com/index.php/extwidget/preview/partner_id/1770401/uiconf_id/37307382/entry_id/0_58u8kme7/embed/iframe?&flashvars[streamerType]=auto', 'only_matching': True, }, { 'url': 'https://www.kaltura.com:443/index.php/extwidget/preview/partner_id/1770401/uiconf_id/37307382/entry_id/0_58u8kme7/embed/iframe?&flashvars[streamerType]=auto', 'only_matching': True, }, { # unavailable source format 'url': 'kaltura:513551:1_66x4rg7o', 'only_matching': True, } ] @staticmethod def _extract_url(webpage): urls = KalturaIE._extract_urls(webpage) return urls[0] if urls else None @staticmethod def _extract_urls(webpage): # Embed codes: https://knowledge.kaltura.com/embedding-kaltura-media-players-your-site finditer = ( list(re.finditer( r"""(?xs) kWidget\.(?:thumb)?[Ee]mbed\( \{.*? (?P<q1>['"])wid(?P=q1)\s*:\s* (?P<q2>['"])_?(?P<partner_id>(?:(?!(?P=q2)).)+)(?P=q2),.*? (?P<q3>['"])entry_?[Ii]d(?P=q3)\s*:\s* (?P<q4>['"])(?P<id>(?:(?!(?P=q4)).)+)(?P=q4)(?:,|\s*\}) """, webpage)) or list(re.finditer( r'''(?xs) (?P<q1>["']) (?:https?:)?//cdnapi(?:sec)?\.kaltura\.com(?::\d+)?/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)(?:(?!(?P=q1)).)* (?P=q1).*? (?: (?: entry_?[Ii]d| (?P<q2>["'])entry_?[Ii]d(?P=q2) )\s*:\s*| \[\s*(?P<q2_1>["'])entry_?[Ii]d(?P=q2_1)\s*\]\s*=\s* ) (?P<q3>["'])(?P<id>(?:(?!(?P=q3)).)+)(?P=q3) ''', webpage)) or list(re.finditer( r'''(?xs) <(?:iframe[^>]+src|meta[^>]+\bcontent)=(?P<q1>["'])\s* (?:https?:)?//(?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+) (?:(?!(?P=q1)).)* [?&;]entry_id=(?P<id>(?:(?!(?P=q1))[^&])+) (?:(?!(?P=q1)).)* (?P=q1) ''', webpage)) ) urls = [] for mobj in finditer: embed_info = mobj.groupdict() for k, v in embed_info.items(): if v: embed_info[k] = v.strip() url = 'kaltura:%(partner_id)s:%(id)s' % embed_info escaped_pid = re.escape(embed_info['partner_id']) service_mobj = re.search( r'<script[^>]+src=(["\'])(?P<id>(?:https?:)?//(?:(?!\1).)+)/p/%s/sp/%s00/embedIframeJs' % (escaped_pid, escaped_pid), webpage) if service_mobj: url = smuggle_url(url, {'service_url': service_mobj.group('id')}) urls.append(url) return urls def _kaltura_api_call(self, video_id, actions, service_url=None, *args, **kwargs): params = actions[0] if len(actions) > 1: for i, a in enumerate(actions[1:], start=1): for k, v in a.items(): params['%d:%s' % (i, k)] = v data = self._download_json( (service_url or self._SERVICE_URL) + self._SERVICE_BASE, video_id, query=params, *args, **kwargs) status = data if len(actions) == 1 else data[0] if status.get('objectType') == 'KalturaAPIException': raise ExtractorError( '%s said: %s' % (self.IE_NAME, status['message'])) return data def _get_video_info(self, video_id, partner_id, service_url=None): actions = [ { 'action': 'null', 'apiVersion': '3.1.5', 'clientTag': 'kdp:v3.8.5', 'format': 1, # JSON, 2 = XML, 3 = PHP 'service': 'multirequest', }, { 'expiry': 86400, 'service': 'session', 'action': 'startWidgetSession', 'widgetId': '_%s' % partner_id, }, { 'action': 'get', 'entryId': video_id, 'service': 'baseentry', 'ks': '{1:result:ks}', 'responseProfile:fields': 'createdAt,dataUrl,duration,name,plays,thumbnailUrl,userId', 'responseProfile:type': 1, }, { 'action': 'getbyentryid', 'entryId': video_id, 'service': 'flavorAsset', 'ks': '{1:result:ks}', }, { 'action': 'list', 'filter:entryIdEqual': video_id, 'service': 'caption_captionasset', 'ks': '{1:result:ks}', }, ] return self._kaltura_api_call( video_id, actions, service_url, note='Downloading video info JSON') def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) mobj = re.match(self._VALID_URL, url) partner_id, entry_id = mobj.group('partner_id', 'id') ks = None captions = None if partner_id and entry_id: _, info, flavor_assets, captions = self._get_video_info(entry_id, partner_id, smuggled_data.get('service_url')) else: path, query = mobj.group('path', 'query') if not path and not query: raise ExtractorError('Invalid URL', expected=True) params = {} if query: params = compat_parse_qs(query) if path: splitted_path = path.split('/') params.update(dict((zip(splitted_path[::2], [[v] for v in splitted_path[1::2]])))) if 'wid' in params: partner_id = params['wid'][0][1:] elif 'p' in params: partner_id = params['p'][0] elif 'partner_id' in params: partner_id = params['partner_id'][0] else: raise ExtractorError('Invalid URL', expected=True) if 'entry_id' in params: entry_id = params['entry_id'][0] _, info, flavor_assets, captions = self._get_video_info(entry_id, partner_id) elif 'uiconf_id' in params and 'flashvars[referenceId]' in params: reference_id = params['flashvars[referenceId]'][0] webpage = self._download_webpage(url, reference_id) entry_data = self._parse_json(self._search_regex( r'window\.kalturaIframePackageData\s*=\s*({.*});', webpage, 'kalturaIframePackageData'), reference_id)['entryResult'] info, flavor_assets = entry_data['meta'], entry_data['contextData']['flavorAssets'] entry_id = info['id'] # Unfortunately, data returned in kalturaIframePackageData lacks # captions so we will try requesting the complete data using # regular approach since we now know the entry_id try: _, info, flavor_assets, captions = self._get_video_info( entry_id, partner_id) except ExtractorError: # Regular scenario failed but we already have everything # extracted apart from captions and can process at least # with this pass else: raise ExtractorError('Invalid URL', expected=True) ks = params.get('flashvars[ks]', [None])[0] source_url = smuggled_data.get('source_url') if source_url: referrer = base64.b64encode( '://'.join(compat_urlparse.urlparse(source_url)[:2]) .encode('utf-8')).decode('utf-8') else: referrer = None def sign_url(unsigned_url): if ks: unsigned_url += '/ks/%s' % ks if referrer: unsigned_url += '?referrer=%s' % referrer return unsigned_url data_url = info['dataUrl'] if '/flvclipper/' in data_url: data_url = re.sub(r'/flvclipper/.*', '/serveFlavor', data_url) formats = [] for f in flavor_assets: # Continue if asset is not ready if f.get('status') != 2: continue # Original format that's not available (e.g. kaltura:1926081:0_c03e1b5g) # skip for now. if f.get('fileExt') == 'chun': continue # DRM-protected video, cannot be decrypted if f.get('fileExt') == 'wvm': continue if not f.get('fileExt'): # QT indicates QuickTime; some videos have broken fileExt if f.get('containerFormat') == 'qt': f['fileExt'] = 'mov' else: f['fileExt'] = 'mp4' video_url = sign_url( '%s/flavorId/%s' % (data_url, f['id'])) format_id = '%(fileExt)s-%(bitrate)s' % f # Source format may not be available (e.g. kaltura:513551:1_66x4rg7o) if f.get('isOriginal') is True and not self._is_valid_url( video_url, entry_id, format_id): continue # audio-only has no videoCodecId (e.g. kaltura:1926081:0_c03e1b5g # -f mp4-56) vcodec = 'none' if 'videoCodecId' not in f and f.get( 'frameRate') == 0 else f.get('videoCodecId') formats.append({ 'format_id': format_id, 'ext': f.get('fileExt'), 'tbr': int_or_none(f['bitrate']), 'fps': int_or_none(f.get('frameRate')), 'filesize_approx': int_or_none(f.get('size'), invscale=1024), 'container': f.get('containerFormat'), 'vcodec': vcodec, 'height': int_or_none(f.get('height')), 'width': int_or_none(f.get('width')), 'url': video_url, }) if '/playManifest/' in data_url: m3u8_url = sign_url(data_url.replace( 'format/url', 'format/applehttp')) formats.extend(self._extract_m3u8_formats( m3u8_url, entry_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) self._sort_formats(formats) subtitles = {} if captions: for caption in captions.get('objects', []): # Continue if caption is not ready if caption.get('status') != 2: continue if not caption.get('id'): continue caption_format = int_or_none(caption.get('format')) subtitles.setdefault(caption.get('languageCode') or caption.get('language'), []).append({ 'url': '%s/api_v3/service/caption_captionasset/action/serve/captionAssetId/%s' % (self._SERVICE_URL, caption['id']), 'ext': caption.get('fileExt') or self._CAPTION_TYPES.get(caption_format) or 'ttml', }) return { 'id': entry_id, 'title': info['name'], 'formats': formats, 'subtitles': subtitles, 'description': clean_html(info.get('description')), 'thumbnail': info.get('thumbnailUrl'), 'duration': info.get('duration'), 'timestamp': info.get('createdAt'), 'uploader_id': info.get('userId') if info.get('userId') != 'None' else None, 'view_count': int_or_none(info.get('plays')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/viqeo.py
youtube_dl/extractor/viqeo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, str_or_none, url_or_none, ) class ViqeoIE(InfoExtractor): _VALID_URL = r'''(?x) (?: viqeo:| https?://cdn\.viqeo\.tv/embed/*\?.*?\bvid=| https?://api\.viqeo\.tv/v\d+/data/startup?.*?\bvideo(?:%5B%5D|\[\])= ) (?P<id>[\da-f]+) ''' _TESTS = [{ 'url': 'https://cdn.viqeo.tv/embed/?vid=cde96f09d25f39bee837', 'md5': 'a169dd1a6426b350dca4296226f21e76', 'info_dict': { 'id': 'cde96f09d25f39bee837', 'ext': 'mp4', 'title': 'cde96f09d25f39bee837', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 76, }, }, { 'url': 'viqeo:cde96f09d25f39bee837', 'only_matching': True, }, { 'url': 'https://api.viqeo.tv/v1/data/startup?video%5B%5D=71bbec412ade45c3216c&profile=112', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return [ mobj.group('url') for mobj in re.finditer( r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//cdn\.viqeo\.tv/embed/*\?.*?\bvid=[\da-f]+.*?)\1', webpage)] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://cdn.viqeo.tv/embed/?vid=%s' % video_id, video_id) data = self._parse_json( self._search_regex( r'SLOT_DATA\s*=\s*({.+?})\s*;', webpage, 'slot data'), video_id) formats = [] thumbnails = [] for media_file in data['mediaFiles']: if not isinstance(media_file, dict): continue media_url = url_or_none(media_file.get('url')) if not media_url or not media_url.startswith(('http', '//')): continue media_type = str_or_none(media_file.get('type')) if not media_type: continue media_kind = media_type.split('/')[0].lower() f = { 'url': media_url, 'width': int_or_none(media_file.get('width')), 'height': int_or_none(media_file.get('height')), } format_id = str_or_none(media_file.get('quality')) if media_kind == 'image': f['id'] = format_id thumbnails.append(f) elif media_kind in ('video', 'audio'): is_audio = media_kind == 'audio' f.update({ 'format_id': 'audio' if is_audio else format_id, 'fps': int_or_none(media_file.get('fps')), 'vcodec': 'none' if is_audio else None, }) formats.append(f) self._sort_formats(formats) duration = int_or_none(data.get('duration')) return { 'id': video_id, 'title': video_id, 'duration': duration, 'thumbnails': thumbnails, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rutv.py
youtube_dl/extractor/rutv.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, str_to_int ) class RUTVIE(InfoExtractor): IE_DESC = 'RUTV.RU' _VALID_URL = r'''(?x) https?:// (?:test)?player\.(?:rutv\.ru|vgtrk\.com)/ (?P<path> flash\d+v/container\.swf\?id=| iframe/(?P<type>swf|video|live)/id/| index/iframe/cast_id/ ) (?P<id>\d+) ''' _TESTS = [ { 'url': 'http://player.rutv.ru/flash2v/container.swf?id=774471&sid=kultura&fbv=true&isPlay=true&ssl=false&i=560&acc_video_id=episode_id/972347/video_id/978186/brand_id/31724', 'info_dict': { 'id': '774471', 'ext': 'mp4', 'title': 'Монологи на все времена', 'description': 'md5:18d8b5e6a41fb1faa53819471852d5d5', 'duration': 2906, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://player.vgtrk.com/flash2v/container.swf?id=774016&sid=russiatv&fbv=true&isPlay=true&ssl=false&i=560&acc_video_id=episode_id/972098/video_id/977760/brand_id/57638', 'info_dict': { 'id': '774016', 'ext': 'mp4', 'title': 'Чужой в семье Сталина', 'description': '', 'duration': 2539, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://player.rutv.ru/iframe/swf/id/766888/sid/hitech/?acc_video_id=4000', 'info_dict': { 'id': '766888', 'ext': 'mp4', 'title': 'Вести.net: интернет-гиганты начали перетягивание программных "одеял"', 'description': 'md5:65ddd47f9830c4f42ed6475f8730c995', 'duration': 279, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://player.rutv.ru/iframe/video/id/771852/start_zoom/true/showZoomBtn/false/sid/russiatv/?acc_video_id=episode_id/970443/video_id/975648/brand_id/5169', 'info_dict': { 'id': '771852', 'ext': 'mp4', 'title': 'Прямой эфир. Жертвы загадочной болезни: смерть от старости в 17 лет', 'description': 'md5:b81c8c55247a4bd996b43ce17395b2d8', 'duration': 3096, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://player.rutv.ru/iframe/live/id/51499/showZoomBtn/false/isPlay/true/sid/sochi2014', 'info_dict': { 'id': '51499', 'ext': 'flv', 'title': 'Сочи-2014. Биатлон. Индивидуальная гонка. Мужчины ', 'description': 'md5:9e0ed5c9d2fa1efbfdfed90c9a6d179c', }, 'skip': 'Translation has finished', }, { 'url': 'http://player.rutv.ru/iframe/live/id/21/showZoomBtn/false/isPlay/true/', 'info_dict': { 'id': '21', 'ext': 'mp4', 'title': 're:^Россия 24. Прямой эфир [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'is_live': True, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://testplayer.vgtrk.com/iframe/live/id/19201/showZoomBtn/false/isPlay/true/', 'only_matching': True, }, ] @classmethod def _extract_url(cls, webpage): mobj = re.search( r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:test)?player\.(?:rutv\.ru|vgtrk\.com)/(?:iframe/(?:swf|video|live)/id|index/iframe/cast_id)/.+?)\1', webpage) if mobj: return mobj.group('url') mobj = re.search( r'<meta[^>]+?property=(["\'])og:video\1[^>]+?content=(["\'])(?P<url>https?://(?:test)?player\.(?:rutv\.ru|vgtrk\.com)/flash\d+v/container\.swf\?id=.+?\2)', webpage) if mobj: return mobj.group('url') def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') video_path = mobj.group('path') if re.match(r'flash\d+v', video_path): video_type = 'video' elif video_path.startswith('iframe'): video_type = mobj.group('type') if video_type == 'swf': video_type = 'video' elif video_path.startswith('index/iframe/cast_id'): video_type = 'live' is_live = video_type == 'live' json_data = self._download_json( 'http://player.rutv.ru/iframe/data%s/id/%s' % ('live' if is_live else 'video', video_id), video_id, 'Downloading JSON') if json_data['errors']: raise ExtractorError('%s said: %s' % (self.IE_NAME, json_data['errors']), expected=True) playlist = json_data['data']['playlist'] medialist = playlist['medialist'] media = medialist[0] if media['errors']: raise ExtractorError('%s said: %s' % (self.IE_NAME, media['errors']), expected=True) view_count = playlist.get('count_views') priority_transport = playlist['priority_transport'] thumbnail = media['picture'] width = int_or_none(media['width']) height = int_or_none(media['height']) description = media['anons'] title = media['title'] duration = int_or_none(media.get('duration')) formats = [] for transport, links in media['sources'].items(): for quality, url in links.items(): preference = -1 if priority_transport == transport else -2 if transport == 'rtmp': mobj = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+))/(?P<playpath>.+)$', url) if not mobj: continue fmt = { 'url': mobj.group('url'), 'play_path': mobj.group('playpath'), 'app': mobj.group('app'), 'page_url': 'http://player.rutv.ru', 'player_url': 'http://player.rutv.ru/flash3v/osmf.swf?i=22', 'rtmp_live': True, 'ext': 'flv', 'vbr': str_to_int(quality), 'preference': preference, } elif transport == 'm3u8': formats.extend(self._extract_m3u8_formats( url, video_id, 'mp4', preference=preference, m3u8_id='hls')) continue else: fmt = { 'url': url } fmt.update({ 'width': width, 'height': height, 'format_id': '%s-%s' % (transport, quality), }) formats.append(fmt) self._sort_formats(formats) return { 'id': video_id, 'title': self._live_title(title) if is_live else title, 'description': description, 'thumbnail': thumbnail, 'view_count': view_count, 'duration': duration, 'formats': formats, 'is_live': is_live, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/nowness.py
youtube_dl/extractor/nowness.py
# coding: utf-8 from __future__ import unicode_literals from .brightcove import ( BrightcoveLegacyIE, BrightcoveNewIE, ) from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, sanitized_Request, ) class NownessBaseIE(InfoExtractor): def _extract_url_result(self, post): if post['type'] == 'video': for media in post['media']: if media['type'] == 'video': video_id = media['content'] source = media['source'] if source == 'brightcove': player_code = self._download_webpage( 'http://www.nowness.com/iframe?id=%s' % video_id, video_id, note='Downloading player JavaScript', errnote='Unable to download player JavaScript') bc_url = BrightcoveLegacyIE._extract_brightcove_url(player_code) if bc_url: return self.url_result(bc_url, BrightcoveLegacyIE.ie_key()) bc_url = BrightcoveNewIE._extract_url(self, player_code) if bc_url: return self.url_result(bc_url, BrightcoveNewIE.ie_key()) raise ExtractorError('Could not find player definition') elif source == 'vimeo': return self.url_result('http://vimeo.com/%s' % video_id, 'Vimeo') elif source == 'youtube': return self.url_result(video_id, 'Youtube') elif source == 'cinematique': # youtube-dl currently doesn't support cinematique # return self.url_result('http://cinematique.com/embed/%s' % video_id, 'Cinematique') pass def _api_request(self, url, request_path): display_id = self._match_id(url) request = sanitized_Request( 'http://api.nowness.com/api/' + request_path % display_id, headers={ 'X-Nowness-Language': 'zh-cn' if 'cn.nowness.com' in url else 'en-us', }) return display_id, self._download_json(request, display_id) class NownessIE(NownessBaseIE): IE_NAME = 'nowness' _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/(?:story|(?:series|category)/[^/]+)/(?P<id>[^/]+?)(?:$|[?#])' _TESTS = [{ 'url': 'https://www.nowness.com/story/candor-the-art-of-gesticulation', 'md5': '068bc0202558c2e391924cb8cc470676', 'info_dict': { 'id': '2520295746001', 'ext': 'mp4', 'title': 'Candor: The Art of Gesticulation', 'description': 'Candor: The Art of Gesticulation', 'thumbnail': r're:^https?://.*\.jpg', 'timestamp': 1446745676, 'upload_date': '20151105', 'uploader_id': '2385340575001', }, 'add_ie': ['BrightcoveNew'], }, { 'url': 'https://cn.nowness.com/story/kasper-bjorke-ft-jaakko-eino-kalevi-tnr', 'md5': 'e79cf125e387216f86b2e0a5b5c63aa3', 'info_dict': { 'id': '3716354522001', 'ext': 'mp4', 'title': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR', 'description': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR', 'thumbnail': r're:^https?://.*\.jpg', 'timestamp': 1407315371, 'upload_date': '20140806', 'uploader_id': '2385340575001', }, 'add_ie': ['BrightcoveNew'], }, { # vimeo 'url': 'https://www.nowness.com/series/nowness-picks/jean-luc-godard-supercut', 'md5': '9a5a6a8edf806407e411296ab6bc2a49', 'info_dict': { 'id': '130020913', 'ext': 'mp4', 'title': 'Bleu, Blanc, Rouge - A Godard Supercut', 'description': 'md5:f0ea5f1857dffca02dbd37875d742cec', 'thumbnail': r're:^https?://.*\.jpg', 'upload_date': '20150607', 'uploader': 'Cinema Sem Lei', 'uploader_id': 'cinemasemlei', }, 'add_ie': ['Vimeo'], }] def _real_extract(self, url): _, post = self._api_request(url, 'post/getBySlug/%s') return self._extract_url_result(post) class NownessPlaylistIE(NownessBaseIE): IE_NAME = 'nowness:playlist' _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/playlist/(?P<id>\d+)' _TEST = { 'url': 'https://www.nowness.com/playlist/3286/i-guess-thats-why-they-call-it-the-blues', 'info_dict': { 'id': '3286', }, 'playlist_mincount': 8, } def _real_extract(self, url): playlist_id, playlist = self._api_request(url, 'post?PlaylistId=%s') entries = [self._extract_url_result(item) for item in playlist['items']] return self.playlist_result(entries, playlist_id) class NownessSeriesIE(NownessBaseIE): IE_NAME = 'nowness:series' _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/series/(?P<id>[^/]+?)(?:$|[?#])' _TEST = { 'url': 'https://www.nowness.com/series/60-seconds', 'info_dict': { 'id': '60', 'title': '60 Seconds', 'description': 'One-minute wisdom in a new NOWNESS series', }, 'playlist_mincount': 4, } def _real_extract(self, url): display_id, series = self._api_request(url, 'series/getBySlug/%s') entries = [self._extract_url_result(post) for post in series['posts']] series_title = None series_description = None translations = series.get('translations', []) if translations: series_title = translations[0].get('title') or translations[0]['seoTitle'] series_description = translations[0].get('seoDescription') return self.playlist_result( entries, compat_str(series['id']), series_title, series_description)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/mediaset.py
youtube_dl/extractor/mediaset.py
# coding: utf-8 from __future__ import unicode_literals import re from .theplatform import ThePlatformBaseIE from ..compat import ( compat_parse_qs, compat_urllib_parse_urlparse, ) from ..utils import ( ExtractorError, int_or_none, update_url_query, ) class MediasetIE(ThePlatformBaseIE): _TP_TLD = 'eu' _VALID_URL = r'''(?x) (?: mediaset:| https?:// (?:(?:www|static3)\.)?mediasetplay\.mediaset\.it/ (?: (?:video|on-demand|movie)/(?:[^/]+/)+[^/]+_| player(?:/v\d+)?/index\.html\?.*?\bprogramGuid= ) )(?P<id>[0-9A-Z]{16,}) ''' _TESTS = [{ # full episode 'url': 'https://www.mediasetplay.mediaset.it/video/hellogoodbye/quarta-puntata_FAFU000000661824', 'md5': '9b75534d42c44ecef7bf1ffeacb7f85d', 'info_dict': { 'id': 'FAFU000000661824', 'ext': 'mp4', 'title': 'Quarta puntata', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1414.26, 'upload_date': '20161107', 'series': 'Hello Goodbye', 'timestamp': 1478532900, 'uploader': 'Rete 4', 'uploader_id': 'R4', }, }, { 'url': 'https://www.mediasetplay.mediaset.it/video/matrix/puntata-del-25-maggio_F309013801000501', 'md5': '288532f0ad18307705b01e581304cd7b', 'info_dict': { 'id': 'F309013801000501', 'ext': 'mp4', 'title': 'Puntata del 25 maggio', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 6565.007, 'upload_date': '20180526', 'series': 'Matrix', 'timestamp': 1527326245, 'uploader': 'Canale 5', 'uploader_id': 'C5', }, }, { # clip 'url': 'https://www.mediasetplay.mediaset.it/video/gogglebox/un-grande-classico-della-commedia-sexy_FAFU000000661680', 'only_matching': True, }, { # iframe simple 'url': 'https://static3.mediasetplay.mediaset.it/player/index.html?appKey=5ad3966b1de1c4000d5cec48&programGuid=FAFU000000665924&id=665924', 'only_matching': True, }, { # iframe twitter (from http://www.wittytv.it/se-prima-mi-fidavo-zero/) 'url': 'https://static3.mediasetplay.mediaset.it/player/index.html?appKey=5ad3966b1de1c4000d5cec48&programGuid=FAFU000000665104&id=665104', 'only_matching': True, }, { # embedUrl (from https://www.wittytv.it/amici/est-ce-que-tu-maimes-gabriele-5-dicembre-copia/) 'url': 'https://static3.mediasetplay.mediaset.it/player/v2/index.html?partnerId=wittytv&configId=&programGuid=FD00000000153323&autoplay=true&purl=http://www.wittytv.it/amici/est-ce-que-tu-maimes-gabriele-5-dicembre-copia/', 'only_matching': True, }, { 'url': 'mediaset:FAFU000000665924', 'only_matching': True, }, { 'url': 'https://www.mediasetplay.mediaset.it/video/mediasethaacuoreilfuturo/palmieri-alicudi-lisola-dei-tre-bambini-felici--un-decreto-per-alicudi-e-tutte-le-microscuole_FD00000000102295', 'only_matching': True, }, { 'url': 'https://www.mediasetplay.mediaset.it/video/cherryseason/anticipazioni-degli-episodi-del-23-ottobre_F306837101005C02', 'only_matching': True, }, { 'url': 'https://www.mediasetplay.mediaset.it/video/tg5/ambiente-onda-umana-per-salvare-il-pianeta_F309453601079D01', 'only_matching': True, }, { 'url': 'https://www.mediasetplay.mediaset.it/video/grandefratellovip/benedetta-una-doccia-gelata_F309344401044C135', 'only_matching': True, }, { 'url': 'https://www.mediasetplay.mediaset.it/movie/herculeslaleggendahainizio/hercules-la-leggenda-ha-inizio_F305927501000102', 'only_matching': True, }] @staticmethod def _extract_urls(ie, webpage): def _qs(url): return compat_parse_qs(compat_urllib_parse_urlparse(url).query) def _program_guid(qs): return qs.get('programGuid', [None])[0] entries = [] for mobj in re.finditer( r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?video\.mediaset\.it/player/playerIFrame(?:Twitter)?\.shtml.*?)\1', webpage): embed_url = mobj.group('url') embed_qs = _qs(embed_url) program_guid = _program_guid(embed_qs) if program_guid: entries.append(embed_url) continue video_id = embed_qs.get('id', [None])[0] if not video_id: continue urlh = ie._request_webpage( embed_url, video_id, note='Following embed URL redirect') embed_url = urlh.geturl() program_guid = _program_guid(_qs(embed_url)) if program_guid: entries.append(embed_url) return entries def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None): for video in smil.findall(self._xpath_ns('.//video', namespace)): video.attrib['src'] = re.sub(r'(https?://vod05)t(-mediaset-it\.akamaized\.net/.+?.mpd)\?.+', r'\1\2', video.attrib['src']) return super(MediasetIE, self)._parse_smil_formats(smil, smil_url, video_id, namespace, f4m_params, transform_rtmp_url) def _real_extract(self, url): guid = self._match_id(url) tp_path = 'PR1GhC/media/guid/2702976343/' + guid info = self._extract_theplatform_metadata(tp_path, guid) formats = [] subtitles = {} first_e = None for asset_type in ('SD', 'HD'): # TODO: fixup ISM+none manifest URLs for f in ('MPEG4', 'MPEG-DASH+none', 'M3U+none'): try: tp_formats, tp_subtitles = self._extract_theplatform_smil( update_url_query('http://link.theplatform.%s/s/%s' % (self._TP_TLD, tp_path), { 'mbr': 'true', 'formats': f, 'assetTypes': asset_type, }), guid, 'Downloading %s %s SMIL data' % (f.split('+')[0], asset_type)) except ExtractorError as e: if not first_e: first_e = e break for tp_f in tp_formats: tp_f['quality'] = 1 if asset_type == 'HD' else 0 formats.extend(tp_formats) subtitles = self._merge_subtitles(subtitles, tp_subtitles) if first_e and not formats: raise first_e self._sort_formats(formats) fields = [] for templ, repls in (('tvSeason%sNumber', ('', 'Episode')), ('mediasetprogram$%s', ('brandTitle', 'numberOfViews', 'publishInfo'))): fields.extend(templ % repl for repl in repls) feed_data = self._download_json( 'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs/guid/-/' + guid, guid, fatal=False, query={'fields': ','.join(fields)}) if feed_data: publish_info = feed_data.get('mediasetprogram$publishInfo') or {} info.update({ 'episode_number': int_or_none(feed_data.get('tvSeasonEpisodeNumber')), 'season_number': int_or_none(feed_data.get('tvSeasonNumber')), 'series': feed_data.get('mediasetprogram$brandTitle'), 'uploader': publish_info.get('description'), 'uploader_id': publish_info.get('channel'), 'view_count': int_or_none(feed_data.get('mediasetprogram$numberOfViews')), }) info.update({ 'id': guid, 'formats': formats, 'subtitles': subtitles, }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/nova.py
youtube_dl/extractor/nova.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, int_or_none, js_to_json, qualities, unified_strdate, url_or_none, ) class NovaEmbedIE(InfoExtractor): _VALID_URL = r'https?://media\.cms\.nova\.cz/embed/(?P<id>[^/?#&]+)' _TEST = { 'url': 'https://media.cms.nova.cz/embed/8o0n0r?autoplay=1', 'md5': 'ee009bafcc794541570edd44b71cbea3', 'info_dict': { 'id': '8o0n0r', 'ext': 'mp4', 'title': '2180. díl', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 2578, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) duration = None formats = [] player = self._parse_json( self._search_regex( r'Player\.init\s*\([^,]+,\s*({.+?})\s*,\s*{.+?}\s*\)\s*;', webpage, 'player', default='{}'), video_id, fatal=False) if player: for format_id, format_list in player['tracks'].items(): if not isinstance(format_list, list): format_list = [format_list] for format_dict in format_list: if not isinstance(format_dict, dict): continue format_url = url_or_none(format_dict.get('src')) format_type = format_dict.get('type') ext = determine_ext(format_url) if (format_type == 'application/x-mpegURL' or format_id == 'HLS' or ext == 'm3u8'): formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif (format_type == 'application/dash+xml' or format_id == 'DASH' or ext == 'mpd'): formats.extend(self._extract_mpd_formats( format_url, video_id, mpd_id='dash', fatal=False)) else: formats.append({ 'url': format_url, }) duration = int_or_none(player.get('duration')) else: # Old path, not actual as of 08.04.2020 bitrates = self._parse_json( self._search_regex( r'(?s)(?:src|bitrates)\s*=\s*({.+?})\s*;', webpage, 'formats'), video_id, transform_source=js_to_json) QUALITIES = ('lq', 'mq', 'hq', 'hd') quality_key = qualities(QUALITIES) for format_id, format_list in bitrates.items(): if not isinstance(format_list, list): format_list = [format_list] for format_url in format_list: format_url = url_or_none(format_url) if not format_url: continue if format_id == 'hls': formats.extend(self._extract_m3u8_formats( format_url, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) continue f = { 'url': format_url, } f_id = format_id for quality in QUALITIES: if '%s.mp4' % quality in format_url: f_id += '-%s' % quality f.update({ 'quality': quality_key(quality), 'format_note': quality.upper(), }) break f['format_id'] = f_id formats.append(f) self._sort_formats(formats) title = self._og_search_title( webpage, default=None) or self._search_regex( (r'<value>(?P<title>[^<]+)', r'videoTitle\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1'), webpage, 'title', group='value') thumbnail = self._og_search_thumbnail( webpage, default=None) or self._search_regex( r'poster\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'thumbnail', fatal=False, group='value') duration = int_or_none(self._search_regex( r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=duration)) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, } class NovaIE(InfoExtractor): IE_DESC = 'TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz' _VALID_URL = r'https?://(?:[^.]+\.)?(?P<site>tv(?:noviny)?|tn|novaplus|vymena|fanda|krasna|doma|prask)\.nova\.cz/(?:[^/]+/)+(?P<id>[^/]+?)(?:\.html|/|$)' _TESTS = [{ 'url': 'http://tn.nova.cz/clanek/tajemstvi-ukryte-v-podzemi-specialni-nemocnice-v-prazske-krci.html#player_13260', 'md5': '249baab7d0104e186e78b0899c7d5f28', 'info_dict': { 'id': '1757139', 'display_id': 'tajemstvi-ukryte-v-podzemi-specialni-nemocnice-v-prazske-krci', 'ext': 'mp4', 'title': 'Podzemní nemocnice v pražské Krči', 'description': 'md5:f0a42dd239c26f61c28f19e62d20ef53', 'thumbnail': r're:^https?://.*\.(?:jpg)', } }, { 'url': 'http://fanda.nova.cz/clanek/fun-and-games/krvavy-epos-zaklinac-3-divoky-hon-vychazi-vyhrajte-ho-pro-sebe.html', 'info_dict': { 'id': '1753621', 'ext': 'mp4', 'title': 'Zaklínač 3: Divoký hon', 'description': 're:.*Pokud se stejně jako my nemůžete.*', 'thumbnail': r're:https?://.*\.jpg(\?.*)?', 'upload_date': '20150521', }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'gone', }, { # media.cms.nova.cz embed 'url': 'https://novaplus.nova.cz/porad/ulice/epizoda/18760-2180-dil', 'info_dict': { 'id': '8o0n0r', 'ext': 'mp4', 'title': '2180. díl', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 2578, }, 'params': { 'skip_download': True, }, 'add_ie': [NovaEmbedIE.ie_key()], 'skip': 'CHYBA 404: STRÁNKA NENALEZENA', }, { 'url': 'http://sport.tn.nova.cz/clanek/sport/hokej/nhl/zivot-jde-dal-hodnotil-po-vyrazeni-z-playoff-jiri-sekac.html', 'only_matching': True, }, { 'url': 'http://fanda.nova.cz/clanek/fun-and-games/krvavy-epos-zaklinac-3-divoky-hon-vychazi-vyhrajte-ho-pro-sebe.html', 'only_matching': True, }, { 'url': 'http://doma.nova.cz/clanek/zdravi/prijdte-se-zapsat-do-registru-kostni-drene-jiz-ve-stredu-3-cervna.html', 'only_matching': True, }, { 'url': 'http://prask.nova.cz/clanek/novinky/co-si-na-sobe-nase-hvezdy-nechaly-pojistit.html', 'only_matching': True, }, { 'url': 'http://tv.nova.cz/clanek/novinky/zivot-je-zivot-bondovsky-trailer.html', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id = mobj.group('id') site = mobj.group('site') webpage = self._download_webpage(url, display_id) description = clean_html(self._og_search_description(webpage, default=None)) if site == 'novaplus': upload_date = unified_strdate(self._search_regex( r'(\d{1,2}-\d{1,2}-\d{4})$', display_id, 'upload date', default=None)) elif site == 'fanda': upload_date = unified_strdate(self._search_regex( r'<span class="date_time">(\d{1,2}\.\d{1,2}\.\d{4})', webpage, 'upload date', default=None)) else: upload_date = None # novaplus embed_id = self._search_regex( r'<iframe[^>]+\bsrc=["\'](?:https?:)?//media\.cms\.nova\.cz/embed/([^/?#&]+)', webpage, 'embed url', default=None) if embed_id: return { '_type': 'url_transparent', 'url': 'https://media.cms.nova.cz/embed/%s' % embed_id, 'ie_key': NovaEmbedIE.ie_key(), 'id': embed_id, 'description': description, 'upload_date': upload_date } video_id = self._search_regex( [r"(?:media|video_id)\s*:\s*'(\d+)'", r'media=(\d+)', r'id="article_video_(\d+)"', r'id="player_(\d+)"'], webpage, 'video id') config_url = self._search_regex( r'src="(https?://(?:tn|api)\.nova\.cz/bin/player/videojs/config\.php\?[^"]+)"', webpage, 'config url', default=None) config_params = {} if not config_url: player = self._parse_json( self._search_regex( r'(?s)Player\s*\(.+?\s*,\s*({.+?\bmedia\b["\']?\s*:\s*["\']?\d+.+?})\s*\)', webpage, 'player', default='{}'), video_id, transform_source=js_to_json, fatal=False) if player: config_url = url_or_none(player.get('configUrl')) params = player.get('configParams') if isinstance(params, dict): config_params = params if not config_url: DEFAULT_SITE_ID = '23000' SITES = { 'tvnoviny': DEFAULT_SITE_ID, 'novaplus': DEFAULT_SITE_ID, 'vymena': DEFAULT_SITE_ID, 'krasna': DEFAULT_SITE_ID, 'fanda': '30', 'tn': '30', 'doma': '30', } site_id = self._search_regex( r'site=(\d+)', webpage, 'site id', default=None) or SITES.get( site, DEFAULT_SITE_ID) config_url = 'https://api.nova.cz/bin/player/videojs/config.php' config_params = { 'site': site_id, 'media': video_id, 'quality': 3, 'version': 1, } config = self._download_json( config_url, display_id, 'Downloading config JSON', query=config_params, transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1]) mediafile = config['mediafile'] video_url = mediafile['src'] m = re.search(r'^(?P<url>rtmpe?://[^/]+/(?P<app>[^/]+?))/&*(?P<playpath>.+)$', video_url) if m: formats = [{ 'url': m.group('url'), 'app': m.group('app'), 'play_path': m.group('playpath'), 'player_path': 'http://tvnoviny.nova.cz/static/shared/app/videojs/video-js.swf', 'ext': 'flv', }] else: formats = [{ 'url': video_url, }] self._sort_formats(formats) title = mediafile.get('meta', {}).get('title') or self._og_search_title(webpage) thumbnail = config.get('poster') return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'upload_date': upload_date, 'thumbnail': thumbnail, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/coub.py
youtube_dl/extractor/coub.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, parse_iso8601, qualities, ) class CoubIE(InfoExtractor): _VALID_URL = r'(?:coub:|https?://(?:coub\.com/(?:view|embed|coubs)/|c-cdn\.coub\.com/fb-player\.swf\?.*\bcoub(?:ID|id)=))(?P<id>[\da-z]+)' _TESTS = [{ 'url': 'http://coub.com/view/5u5n1', 'info_dict': { 'id': '5u5n1', 'ext': 'mp4', 'title': 'The Matrix Moonwalk', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 4.6, 'timestamp': 1428527772, 'upload_date': '20150408', 'uploader': 'Artyom Loskutnikov', 'uploader_id': 'artyom.loskutnikov', 'view_count': int, 'like_count': int, 'repost_count': int, 'age_limit': 0, }, }, { 'url': 'http://c-cdn.coub.com/fb-player.swf?bot_type=vk&coubID=7w5a4', 'only_matching': True, }, { 'url': 'coub:5u5n1', 'only_matching': True, }, { # longer video id 'url': 'http://coub.com/view/237d5l5h', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) coub = self._download_json( 'http://coub.com/api/v2/coubs/%s.json' % video_id, video_id) if coub.get('error'): raise ExtractorError( '%s said: %s' % (self.IE_NAME, coub['error']), expected=True) title = coub['title'] file_versions = coub['file_versions'] QUALITIES = ('low', 'med', 'high') MOBILE = 'mobile' IPHONE = 'iphone' HTML5 = 'html5' SOURCE_PREFERENCE = (MOBILE, IPHONE, HTML5) quality_key = qualities(QUALITIES) preference_key = qualities(SOURCE_PREFERENCE) formats = [] for kind, items in file_versions.get(HTML5, {}).items(): if kind not in ('video', 'audio'): continue if not isinstance(items, dict): continue for quality, item in items.items(): if not isinstance(item, dict): continue item_url = item.get('url') if not item_url: continue formats.append({ 'url': item_url, 'format_id': '%s-%s-%s' % (HTML5, kind, quality), 'filesize': int_or_none(item.get('size')), 'vcodec': 'none' if kind == 'audio' else None, 'quality': quality_key(quality), 'preference': preference_key(HTML5), }) iphone_url = file_versions.get(IPHONE, {}).get('url') if iphone_url: formats.append({ 'url': iphone_url, 'format_id': IPHONE, 'preference': preference_key(IPHONE), }) mobile_url = file_versions.get(MOBILE, {}).get('audio_url') if mobile_url: formats.append({ 'url': mobile_url, 'format_id': '%s-audio' % MOBILE, 'preference': preference_key(MOBILE), }) self._sort_formats(formats) thumbnail = coub.get('picture') duration = float_or_none(coub.get('duration')) timestamp = parse_iso8601(coub.get('published_at') or coub.get('created_at')) uploader = coub.get('channel', {}).get('title') uploader_id = coub.get('channel', {}).get('permalink') view_count = int_or_none(coub.get('views_count') or coub.get('views_increase_count')) like_count = int_or_none(coub.get('likes_count')) repost_count = int_or_none(coub.get('recoubs_count')) age_restricted = coub.get('age_restricted', coub.get('age_restricted_by_admin')) if age_restricted is not None: age_limit = 18 if age_restricted is True else 0 else: age_limit = None return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'timestamp': timestamp, 'uploader': uploader, 'uploader_id': uploader_id, 'view_count': view_count, 'like_count': like_count, 'repost_count': repost_count, 'age_limit': age_limit, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/youtube.py
youtube_dl/extractor/youtube.py
# coding: utf-8 from __future__ import unicode_literals import collections import hashlib import itertools import json import os.path import random import re import string import time import traceback from .common import InfoExtractor, SearchInfoExtractor from ..compat import ( compat_chr, compat_HTTPError, compat_map as map, compat_dict as o_dict, compat_dict_items as dict_items, compat_str, compat_urllib_parse, compat_urllib_parse_parse_qs as compat_parse_qs, compat_urllib_parse_unquote_plus, compat_urllib_parse_urlparse, compat_zip as zip, ) from ..jsinterp import JSInterpreter from ..utils import ( bug_reports_message, clean_html, dict_get, error_to_compat_str, ExtractorError, filter_dict, float_or_none, get_first, extract_attributes, get_element_by_attribute, int_or_none, join_nonempty, js_to_json, LazyList, merge_dicts, mimetype2ext, NO_DEFAULT, parse_codecs, parse_count, parse_duration, parse_qs, qualities, remove_end, remove_start, smuggle_url, str_or_none, str_to_int, T, traverse_obj, try_call, try_get, txt_or_none, unescapeHTML, unified_strdate, unsmuggle_url, update_url, update_url_query, url_or_none, urlencode_postdata, urljoin, variadic, ) class YoutubeBaseInfoExtractor(InfoExtractor): """Provide base functions for Youtube extractors""" _LOGIN_URL = 'https://accounts.google.com/ServiceLogin' _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge' _LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup' _CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge' _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}' _NETRC_MACHINE = 'youtube' # If True it will raise an error if no login info is provided _LOGIN_REQUIRED = False _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM)' # priority order for now _INNERTUBE_CLIENTS = o_dict(( # Doesn't require a PoToken for some reason: thx yt-dlp/yt-dlp#14693 ('android_sdkless', { 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'ANDROID', 'clientVersion': '20.10.38', 'userAgent': 'com.google.android.youtube/20.10.38 (Linux; U; Android 11) gzip', 'osName': 'Android', 'osVersion': '11', }, }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 3, 'REQUIRE_JS_PLAYER': False, 'WITH_COOKIES': False, }), ('ios', { 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'IOS', 'clientVersion': '20.10.4', 'deviceMake': 'Apple', 'deviceModel': 'iPhone16,2', 'userAgent': 'com.google.ios.youtube/20.10.4 (iPhone16,2; U; CPU iOS 18_3_2 like Mac OS X;)', 'osName': 'iPhone', 'osVersion': '18.3.2.22D82', }, }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 5, 'REQUIRE_PO_TOKEN': True, 'REQUIRE_JS_PLAYER': False, 'WITH_COOKIES': False, }), # mweb has 'ultralow' formats # See: https://github.com/yt-dlp/yt-dlp/pull/557 ('mweb', { 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'MWEB', 'clientVersion': '2.2.20250925.01.00', # mweb previously did not require PO Token with this UA 'userAgent': 'Mozilla/5.0 (iPad; CPU OS 16_7_10 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1,gzip(gfe)', }, }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 2, 'REQUIRE_PO_TOKEN': True, }), ('tv_downgraded', { 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '4', # avoids SABR formats, thx yt-dlp/yt-dlp#14887 'userAgent': 'Mozilla/5.0 (ChromiumStylePlatform) Cobalt/Version', }, }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 7, 'SUPPORTS_COOKIES': True, }), ('tv', { 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '7.20250312.16.00', # See: https://github.com/youtube/cobalt/blob/main/cobalt/browser/user_agent/user_agent_platform_info.cc#L506 'userAgent': 'Mozilla/5.0 (ChromiumStylePlatform) Cobalt/25.lts.30.1034943-gold (unlike Gecko), Unknown_TV_Unknown_0/Unknown (Unknown, Unknown)', }, }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 7, }), ('web', { 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'WEB', 'clientVersion': '2.20250925.01.00', 'userAgent': 'Mozilla/5.0', }, }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 1, 'REQUIRE_PO_TOKEN': True, }), ('web_embedded', { 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'WEB_EMBEDDED_PLAYER', 'clientVersion': '1.20250923.21.00', 'embedUrl': 'https://www.youtube.com/', # Can be any valid URL }, }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 56, 'SUPPORTS_COOKIES': True, }), # Safari UA returns pre-merged video+audio 144p/240p/360p/720p/1080p HLS formats ('web_safari', { 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'WEB', 'clientVersion': '2.20250925.01.00', 'userAgent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.5 Safari/605.1.15,gzip(gfe)', }, }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 1, 'SUPPORTS_COOKIES': True, 'REQUIRE_PO': True, }), # This client now requires sign-in for every video ('web_creator', { 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'WEB_CREATOR', 'clientVersion': '1.20250922.03.00', }, }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 62, 'REQUIRE_AUTH': True, 'SUPPORTS_COOKIES': True, 'WITH_COOKIES': True, }), )) def _login(self): """ Attempt to log in to YouTube. True is returned if successful or skipped. False is returned if login failed. If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised. """ username, password = self._get_login_info() # No authentication to be performed if username is None: if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None: raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True) return True login_page = self._download_webpage( self._LOGIN_URL, None, note='Downloading login page', errnote='unable to fetch login page', fatal=False) if login_page is False: return login_form = self._hidden_inputs(login_page) def req(url, f_req, note, errnote): data = login_form.copy() data.update({ 'pstMsg': 1, 'checkConnection': 'youtube', 'checkedDomains': 'youtube', 'hl': 'en', 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]', 'f.req': json.dumps(f_req), 'flowName': 'GlifWebSignIn', 'flowEntry': 'ServiceLogin', # TODO: reverse actual botguard identifier generation algo 'bgRequest': '["identifier",""]', }) return self._download_json( url, None, note=note, errnote=errnote, transform_source=lambda s: re.sub(r'^[^[]*', '', s), fatal=False, data=urlencode_postdata(data), headers={ 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8', 'Google-Accounts-XSRF': 1, }) def warn(message): self._downloader.report_warning(message) lookup_req = [ username, None, [], None, 'US', None, None, 2, False, True, [ None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4], 1, [None, None, []], None, None, None, True, ], username, ] lookup_results = req( self._LOOKUP_URL, lookup_req, 'Looking up account info', 'Unable to look up account info') if lookup_results is False: return False user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str) if not user_hash: warn('Unable to extract user hash') return False challenge_req = [ user_hash, None, 1, None, [1, None, None, None, [password, None, True]], [ None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4], 1, [None, None, []], None, None, None, True, ]] challenge_results = req( self._CHALLENGE_URL, challenge_req, 'Logging in', 'Unable to log in') if challenge_results is False: return login_res = try_get(challenge_results, lambda x: x[0][5], list) if login_res: login_msg = try_get(login_res, lambda x: x[5], compat_str) warn( 'Unable to login: %s' % 'Invalid password' if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg) return False res = try_get(challenge_results, lambda x: x[0][-1], list) if not res: warn('Unable to extract result entry') return False login_challenge = try_get(res, lambda x: x[0][0], list) if login_challenge: challenge_str = try_get(login_challenge, lambda x: x[2], compat_str) if challenge_str == 'TWO_STEP_VERIFICATION': # SEND_SUCCESS - TFA code has been successfully sent to phone # QUOTA_EXCEEDED - reached the limit of TFA codes status = try_get(login_challenge, lambda x: x[5], compat_str) if status == 'QUOTA_EXCEEDED': warn('Exceeded the limit of TFA codes, try later') return False tl = try_get(challenge_results, lambda x: x[1][2], compat_str) if not tl: warn('Unable to extract TL') return False tfa_code = self._get_tfa_info('2-step verification code') if not tfa_code: warn( 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>' '(Note that only TOTP (Google Authenticator App) codes work at this time.)') return False tfa_code = remove_start(tfa_code, 'G-') tfa_req = [ user_hash, None, 2, None, [ 9, None, None, None, None, None, None, None, [None, tfa_code, True, 2], ]] tfa_results = req( self._TFA_URL.format(tl), tfa_req, 'Submitting TFA code', 'Unable to submit TFA code') if tfa_results is False: return False tfa_res = try_get(tfa_results, lambda x: x[0][5], list) if tfa_res: tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str) warn( 'Unable to finish TFA: %s' % 'Invalid TFA code' if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg) return False check_cookie_url = try_get( tfa_results, lambda x: x[0][-1][2], compat_str) else: CHALLENGES = { 'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.", 'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.', 'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.", } challenge = CHALLENGES.get( challenge_str, '%s returned error %s.' % (self.IE_NAME, challenge_str)) warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge) return False else: check_cookie_url = try_get(res, lambda x: x[2], compat_str) if not check_cookie_url: warn('Unable to extract CheckCookie URL') return False check_cookie_results = self._download_webpage( check_cookie_url, None, 'Checking cookie', fatal=False) if check_cookie_results is False: return False if 'https://myaccount.google.com/' not in check_cookie_results: warn('Unable to log in') return False return True def _initialize_consent(self): cookies = self._get_cookies('https://www.youtube.com/') if cookies.get('__Secure-3PSID'): return socs = cookies.get('SOCS') if socs and not socs.value.startswith('CAA'): # not consented return self._set_cookie('.youtube.com', 'SOCS', 'CAI', secure=True) # accept all (required for mixes) def _real_initialize(self): self._initialize_consent() if self._downloader is None: return if not self._login(): return _DEFAULT_API_DATA = {'context': _INNERTUBE_CLIENTS['web']['INNERTUBE_CONTEXT']} _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;' _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;' _YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)' _SAPISID = None def _generate_sapisidhash_header(self, origin='https://www.youtube.com'): time_now = round(time.time()) if self._SAPISID is None: yt_cookies = self._get_cookies('https://www.youtube.com') # Sometimes SAPISID cookie isn't present but __Secure-3PAPISID is. # See: https://github.com/yt-dlp/yt-dlp/issues/393 sapisid_cookie = dict_get( yt_cookies, ('__Secure-3PAPISID', 'SAPISID')) if sapisid_cookie and sapisid_cookie.value: self._SAPISID = sapisid_cookie.value self.write_debug('Extracted SAPISID cookie') # SAPISID cookie is required if not already present if not yt_cookies.get('SAPISID'): self.write_debug('Copying __Secure-3PAPISID cookie to SAPISID cookie') self._set_cookie( '.youtube.com', 'SAPISID', self._SAPISID, secure=True, expire_time=time_now + 3600) else: self._SAPISID = False if not self._SAPISID: return None # SAPISIDHASH algorithm from https://stackoverflow.com/a/32065323 sapisidhash = hashlib.sha1( '{0} {1} {2}'.format(time_now, self._SAPISID, origin).encode('utf-8')).hexdigest() return 'SAPISIDHASH {0}_{1}'.format(time_now, sapisidhash) def _call_api(self, ep, query, video_id, fatal=True, headers=None, note='Downloading API JSON'): data = self._DEFAULT_API_DATA.copy() data.update(query) real_headers = {'content-type': 'application/json'} if headers: real_headers.update(headers) # was: 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8' api_key = self.get_param('youtube_innertube_key') return self._download_json( 'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id, note=note, errnote='Unable to download API page', data=json.dumps(data).encode('utf8'), fatal=fatal, headers=real_headers, query=filter_dict({ 'key': api_key, 'prettyPrint': 'false', })) def _extract_yt_initial_data(self, video_id, webpage): return self._parse_json( self._search_regex( (r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE), self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'), video_id) def _extract_visitor_data(self, *args): """ Extract visitorData from an API response or ytcfg Appears to be used to track session state """ visitor_data = self.get_param('youtube_visitor_data') if visitor_data: return visitor_data return get_first( args, (('VISITOR_DATA', ('INNERTUBE_CONTEXT', 'client', 'visitorData'), ('responseContext', 'visitorData')), T(compat_str))) # @functools.cached_property def is_authenticated(self, _cache={}): if self not in _cache: _cache[self] = bool(self._generate_sapisidhash_header()) return _cache[self] def _extract_ytcfg(self, video_id, webpage): ytcfg = self._search_json( r'ytcfg\.set\s*\(', webpage, 'ytcfg', video_id, end_pattern=r'\)\s*;', default={}) traverse_obj(ytcfg, ( 'INNERTUBE_CONTEXT', 'client', 'configInfo', T(lambda x: x.pop('appInstallData', None)))) return ytcfg def _extract_video(self, renderer): video_id = renderer['videoId'] title = try_get( renderer, (lambda x: x['title']['runs'][0]['text'], lambda x: x['title']['simpleText'], lambda x: x['headline']['simpleText']), compat_str) description = try_get( renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'], compat_str) duration = parse_duration(try_get( renderer, lambda x: x['lengthText']['simpleText'], compat_str)) view_count_text = try_get( renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or '' view_count = str_to_int(self._search_regex( r'^([\d,]+)', re.sub(r'\s', '', view_count_text), 'view count', default=None)) uploader = try_get( renderer, (lambda x: x['ownerText']['runs'][0]['text'], lambda x: x['shortBylineText']['runs'][0]['text']), compat_str) return { '_type': 'url', 'ie_key': YoutubeIE.ie_key(), 'id': video_id, 'url': video_id, 'title': title, 'description': description, 'duration': duration, 'view_count': view_count, 'uploader': uploader, } @staticmethod def _get_text(data, *path_list, **kw_max_runs): max_runs = kw_max_runs.get('max_runs') for path in path_list or [None]: if path is None: obj = [data] # shortcut else: obj = traverse_obj(data, tuple(variadic(path) + (all,))) for runs in traverse_obj( obj, ('simpleText', {'text': T(compat_str)}, all, filter), ('runs', lambda _, r: isinstance(r.get('text'), compat_str), all, filter), (T(list), lambda _, r: isinstance(r.get('text'), compat_str)), default=[]): max_runs = int_or_none(max_runs, default=len(runs)) if max_runs < len(runs): runs = runs[:max_runs] text = ''.join(traverse_obj(runs, (Ellipsis, 'text'))) if text: return text @staticmethod def _extract_thumbnails(data, *path_list, **kw_final_key): """ Extract thumbnails from thumbnails dict @param path_list: path list to level that contains 'thumbnails' key """ final_key = kw_final_key.get('final_key', 'thumbnails') return traverse_obj(data, (( tuple(variadic(path) + (final_key, Ellipsis) for path in path_list or [()])), { 'url': ('url', T(url_or_none), # Sometimes youtube gives a wrong thumbnail URL. See: # https://github.com/yt-dlp/yt-dlp/issues/233 # https://github.com/ytdl-org/youtube-dl/issues/28023 T(lambda u: update_url(u, query=None) if u and 'maxresdefault' in u else u)), 'height': ('height', T(int_or_none)), 'width': ('width', T(int_or_none)), }, T(lambda t: t if t.get('url') else None))) def _search_results(self, query, params): data = { 'context': { 'client': { 'clientName': 'WEB', 'clientVersion': '2.20201021.03.00', }, }, 'query': query, } if params: data['params'] = params for page_num in itertools.count(1): search = self._download_json( 'https://www.youtube.com/youtubei/v1/search', video_id='query "%s"' % query, note='Downloading page %s' % page_num, errnote='Unable to download API page', fatal=False, data=json.dumps(data).encode('utf8'), query={ # 'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8', 'prettyPrint': 'false', }, headers={'content-type': 'application/json'}) if not search: break slr_contents = traverse_obj( search, ('contents', 'twoColumnSearchResultsRenderer', 'primaryContents', 'sectionListRenderer', 'contents'), ('onResponseReceivedCommands', 0, 'appendContinuationItemsAction', 'continuationItems'), expected_type=list) if not slr_contents: break for video in traverse_obj( slr_contents, (Ellipsis, 'itemSectionRenderer', 'contents', Ellipsis, 'videoRenderer', T(lambda v: v if v.get('videoId') else None))): yield self._extract_video(video) token = traverse_obj( slr_contents, (-1, 'continuationItemRenderer', 'continuationEndpoint', 'continuationCommand', 'token', T(compat_str))) if not token: break data['continuation'] = token @staticmethod def _owner_endpoints_path(): return [ Ellipsis, lambda k, _: k.endswith('SecondaryInfoRenderer'), ('owner', 'videoOwner'), 'videoOwnerRenderer', 'title', 'runs', Ellipsis] def _extract_channel_id(self, webpage, videodetails={}, metadata={}, renderers=[]): channel_id = None if any((videodetails, metadata, renderers)): channel_id = ( traverse_obj(videodetails, 'channelId') or traverse_obj(metadata, 'externalChannelId', 'externalId') or traverse_obj(renderers, self._owner_endpoints_path() + [ 'navigationEndpoint', 'browseEndpoint', 'browseId'], get_all=False) ) return channel_id or self._html_search_meta( 'channelId', webpage, 'channel id', default=None) def _extract_author_var(self, webpage, var_name, videodetails={}, metadata={}, renderers=[]): result = None paths = { # (HTML, videodetails, metadata, renderers) 'name': ('content', 'author', (('ownerChannelName', None), 'title'), ['text']), 'url': ('href', 'ownerProfileUrl', 'vanityChannelUrl', ['navigationEndpoint', 'browseEndpoint', 'canonicalBaseUrl']), } if any((videodetails, metadata, renderers)): result = ( traverse_obj(videodetails, paths[var_name][1], get_all=False) or traverse_obj(metadata, paths[var_name][2], get_all=False) or traverse_obj(renderers, self._owner_endpoints_path() + paths[var_name][3], get_all=False) ) return result or traverse_obj( extract_attributes(self._search_regex( r'''(?s)(<link\b[^>]+\bitemprop\s*=\s*("|')%s\2[^>]*>)''' % re.escape(var_name), get_element_by_attribute('itemprop', 'author', webpage or '') or '', 'author link', default='')), paths[var_name][0]) @staticmethod def _yt_urljoin(url_or_path): return urljoin('https://www.youtube.com', url_or_path) def _extract_uploader_id(self, uploader_url): return self._search_regex( r'/(?:(?:channel|user)/|(?=@))([^/?&#]+)', uploader_url or '', 'uploader id', default=None) class YoutubeIE(YoutubeBaseInfoExtractor): IE_DESC = 'YouTube.com' _INVIDIOUS_SITES = ( # invidious-redirect websites r'(?:www\.)?redirect\.invidious\.io', r'(?:(?:www|dev)\.)?invidio\.us', # Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md r'(?:(?:www|no)\.)?invidiou\.sh', r'(?:(?:www|fi)\.)?invidious\.snopyta\.org', r'(?:www\.)?invidious\.kabi\.tk', r'(?:www\.)?invidious\.13ad\.de', r'(?:www\.)?invidious\.mastodon\.host', r'(?:www\.)?invidious\.zapashcanon\.fr', r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks', r'(?:www\.)?invidious\.tinfoil-hat\.net', r'(?:www\.)?invidious\.himiko\.cloud', r'(?:www\.)?invidious\.reallyancient\.tech', r'(?:www\.)?invidious\.tube', r'(?:www\.)?invidiou\.site', r'(?:www\.)?invidious\.site', r'(?:www\.)?invidious\.xyz', r'(?:www\.)?invidious\.nixnet\.xyz', r'(?:www\.)?invidious\.048596\.xyz', r'(?:www\.)?invidious\.drycat\.fr', r'(?:www\.)?inv\.skyn3t\.in', r'(?:www\.)?tube\.poal\.co', r'(?:www\.)?tube\.connect\.cafe', r'(?:www\.)?vid\.wxzm\.sx', r'(?:www\.)?vid\.mint\.lgbt', r'(?:www\.)?vid\.puffyan\.us', r'(?:www\.)?yewtu\.be', r'(?:www\.)?yt\.elukerio\.org', r'(?:www\.)?yt\.lelux\.fi', r'(?:www\.)?invidious\.ggc-project\.de', r'(?:www\.)?yt\.maisputain\.ovh', r'(?:www\.)?ytprivate\.com', r'(?:www\.)?invidious\.13ad\.de', r'(?:www\.)?invidious\.toot\.koeln', r'(?:www\.)?invidious\.fdn\.fr', r'(?:www\.)?watch\.nettohikari\.com', r'(?:www\.)?invidious\.namazso\.eu', r'(?:www\.)?invidious\.silkky\.cloud', r'(?:www\.)?invidious\.exonip\.de', r'(?:www\.)?invidious\.riverside\.rocks', r'(?:www\.)?invidious\.blamefran\.net', r'(?:www\.)?invidious\.moomoo\.de', r'(?:www\.)?ytb\.trom\.tf', r'(?:www\.)?yt\.cyberhost\.uk', r'(?:www\.)?kgg2m7yk5aybusll\.onion', r'(?:www\.)?qklhadlycap4cnod\.onion', r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion', r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion', r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion', r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion', r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p', r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion', r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion', r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion', r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion', r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion', ) _VALID_URL = r"""(?x)^ ( (?:https?://|//) # http(s):// or protocol-independent URL (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com| (?:www\.)?deturl\.com/www\.youtube\.com| (?:www\.)?pwnyoutube\.com| (?:www\.)?hooktube\.com| (?:www\.)?yourepeat\.com| tube\.majestyc\.net| %(invidious)s| youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains (?:.*?\#/)? # handle anchor (#/) redirect urls (?: # the various things that can precede the ID: (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/ |shorts/ |(?: # or the v= param in all its forms (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) (?:\?|\#!?) # the params delimiter ? or # or #! (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY) v= ) )) |(?: youtu\.be| # just youtu.be/xxxx vid\.plus| # or vid.plus/xxxx zwearz\.com/watch| # or zwearz.com/watch/xxxx %(invidious)s )/ |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId= ) )? # all until now is optional -> you can pass the naked ID (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID (?(1).+)? # if we found the ID, everything can follow $""" % { 'invidious': '|'.join(_INVIDIOUS_SITES), } _PLAYER_INFO_RE = (
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/voicerepublic.py
youtube_dl/extractor/voicerepublic.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, determine_ext, int_or_none, urljoin, ) class VoiceRepublicIE(InfoExtractor): _VALID_URL = r'https?://voicerepublic\.com/(?:talks|embed)/(?P<id>[0-9a-z-]+)' _TESTS = [{ 'url': 'http://voicerepublic.com/talks/watching-the-watchers-building-a-sousveillance-state', 'md5': 'b9174d651323f17783000876347116e3', 'info_dict': { 'id': '2296', 'display_id': 'watching-the-watchers-building-a-sousveillance-state', 'ext': 'm4a', 'title': 'Watching the Watchers: Building a Sousveillance State', 'description': 'Secret surveillance programs have metadata too. The people and companies that operate secret surveillance programs can be surveilled.', 'duration': 1556, 'view_count': int, } }, { 'url': 'http://voicerepublic.com/embed/watching-the-watchers-building-a-sousveillance-state', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) if '>Queued for processing, please stand by...<' in webpage: raise ExtractorError( 'Audio is still queued for processing', expected=True) talk = self._parse_json(self._search_regex( r'initialSnapshot\s*=\s*({.+?});', webpage, 'talk'), display_id)['talk'] title = talk['title'] formats = [{ 'url': urljoin(url, talk_url), 'format_id': format_id, 'ext': determine_ext(talk_url) or format_id, 'vcodec': 'none', } for format_id, talk_url in talk['media_links'].items()] self._sort_formats(formats) return { 'id': compat_str(talk.get('id') or display_id), 'display_id': display_id, 'title': title, 'description': talk.get('teaser'), 'thumbnail': talk.get('image_url'), 'duration': int_or_none(talk.get('archived_duration')), 'view_count': int_or_none(talk.get('play_count')), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ntvcojp.py
youtube_dl/extractor/ntvcojp.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( js_to_json, smuggle_url, ) class NTVCoJpCUIE(InfoExtractor): IE_NAME = 'cu.ntv.co.jp' IE_DESC = 'Nippon Television Network' _VALID_URL = r'https?://cu\.ntv\.co\.jp/(?!program)(?P<id>[^/?&#]+)' _TEST = { 'url': 'https://cu.ntv.co.jp/televiva-chill-gohan_181031/', 'info_dict': { 'id': '5978891207001', 'ext': 'mp4', 'title': '桜エビと炒り卵がポイント! 「中華風 エビチリおにぎり」──『美虎』五十嵐美幸', 'upload_date': '20181213', 'description': 'md5:211b52f4fd60f3e0e72b68b0c6ba52a9', 'uploader_id': '3855502814001', 'timestamp': 1544669941, }, 'params': { # m3u8 download 'skip_download': True, }, } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s' def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) player_config = self._parse_json(self._search_regex( r'(?s)PLAYER_CONFIG\s*=\s*({.+?})', webpage, 'player config'), display_id, js_to_json) video_id = player_config['videoId'] account_id = player_config.get('account') or '3855502814001' return { '_type': 'url_transparent', 'id': video_id, 'display_id': display_id, 'title': self._search_regex(r'<h1[^>]+class="title"[^>]*>([^<]+)', webpage, 'title').strip(), 'description': self._html_search_meta(['description', 'og:description'], webpage), 'url': smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % (account_id, video_id), {'geo_countries': ['JP']}), 'ie_key': 'BrightcoveNew', }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/lci.py
youtube_dl/extractor/lci.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class LCIIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?lci\.fr/[^/]+/[\w-]+-(?P<id>\d+)\.html' _TEST = { 'url': 'http://www.lci.fr/international/etats-unis-a-j-62-hillary-clinton-reste-sans-voix-2001679.html', 'md5': '2fdb2538b884d4d695f9bd2bde137e6c', 'info_dict': { 'id': '13244802', 'ext': 'mp4', 'title': 'Hillary Clinton et sa quinte de toux, en plein meeting', 'description': 'md5:a4363e3a960860132f8124b62f4a01c9', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) wat_id = self._search_regex( (r'data-watid=[\'"](\d+)', r'idwat["\']?\s*:\s*["\']?(\d+)'), webpage, 'wat id') return self.url_result('wat:' + wat_id, 'Wat', wat_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/extremetube.py
youtube_dl/extractor/extremetube.py
from __future__ import unicode_literals from ..utils import str_to_int from .keezmovies import KeezMoviesIE class ExtremeTubeIE(KeezMoviesIE): _VALID_URL = r'https?://(?:www\.)?extremetube\.com/(?:[^/]+/)?video/(?P<id>[^/#?&]+)' _TESTS = [{ 'url': 'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431', 'md5': '92feaafa4b58e82f261e5419f39c60cb', 'info_dict': { 'id': 'music-video-14-british-euro-brit-european-cumshots-swallow-652431', 'ext': 'mp4', 'title': 'Music Video 14 british euro brit european cumshots swallow', 'uploader': 'anonim', 'view_count': int, 'age_limit': 18, } }, { 'url': 'http://www.extremetube.com/gay/video/abcde-1234', 'only_matching': True, }, { 'url': 'http://www.extremetube.com/video/latina-slut-fucked-by-fat-black-dick', 'only_matching': True, }, { 'url': 'http://www.extremetube.com/video/652431', 'only_matching': True, }] def _real_extract(self, url): webpage, info = self._extract_info(url) if not info['title']: info['title'] = self._search_regex( r'<h1[^>]+title="([^"]+)"[^>]*>', webpage, 'title') uploader = self._html_search_regex( r'Uploaded by:\s*</[^>]+>\s*<a[^>]+>(.+?)</a>', webpage, 'uploader', fatal=False) view_count = str_to_int(self._search_regex( r'Views:\s*</[^>]+>\s*<[^>]+>([\d,\.]+)</', webpage, 'view count', fatal=False)) info.update({ 'uploader': uploader, 'view_count': view_count, }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/phoenix.py
youtube_dl/extractor/phoenix.py
# coding: utf-8 from __future__ import unicode_literals import re from .youtube import YoutubeIE from .zdf import ZDFBaseIE from ..compat import compat_str from ..utils import ( int_or_none, merge_dicts, try_get, unified_timestamp, urljoin, ) class PhoenixIE(ZDFBaseIE): IE_NAME = 'phoenix.de' _VALID_URL = r'https?://(?:www\.)?phoenix\.de/(?:[^/]+/)*[^/?#&]*-a-(?P<id>\d+)\.html' _TESTS = [{ # Same as https://www.zdf.de/politik/phoenix-sendungen/wohin-fuehrt-der-protest-in-der-pandemie-100.html 'url': 'https://www.phoenix.de/sendungen/ereignisse/corona-nachgehakt/wohin-fuehrt-der-protest-in-der-pandemie-a-2050630.html', 'md5': '34ec321e7eb34231fd88616c65c92db0', 'info_dict': { 'id': '210222_phx_nachgehakt_corona_protest', 'ext': 'mp4', 'title': 'Wohin führt der Protest in der Pandemie?', 'description': 'md5:7d643fe7f565e53a24aac036b2122fbd', 'duration': 1691, 'timestamp': 1613902500, 'upload_date': '20210221', 'uploader': 'Phoenix', 'series': 'corona nachgehakt', 'episode': 'Wohin führt der Protest in der Pandemie?', }, }, { # Youtube embed 'url': 'https://www.phoenix.de/sendungen/gespraeche/phoenix-streitgut-brennglas-corona-a-1965505.html', 'info_dict': { 'id': 'hMQtqFYjomk', 'ext': 'mp4', 'title': 'phoenix streitgut: Brennglas Corona - Wie gerecht ist unsere Gesellschaft?', 'description': 'md5:ac7a02e2eb3cb17600bc372e4ab28fdd', 'duration': 3509, 'upload_date': '20201219', 'uploader': 'phoenix', 'uploader_id': 'phoenix', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.phoenix.de/entwicklungen-in-russland-a-2044720.html', 'only_matching': True, }, { # no media 'url': 'https://www.phoenix.de/sendungen/dokumentationen/mit-dem-jumbo-durch-die-nacht-a-89625.html', 'only_matching': True, }, { # Same as https://www.zdf.de/politik/phoenix-sendungen/die-gesten-der-maechtigen-100.html 'url': 'https://www.phoenix.de/sendungen/dokumentationen/gesten-der-maechtigen-i-a-89468.html?ref=suche', 'only_matching': True, }] def _real_extract(self, url): article_id = self._match_id(url) article = self._download_json( 'https://www.phoenix.de/response/id/%s' % article_id, article_id, 'Downloading article JSON') video = article['absaetze'][0] title = video.get('titel') or article.get('subtitel') if video.get('typ') == 'video-youtube': video_id = video['id'] return self.url_result( video_id, ie=YoutubeIE.ie_key(), video_id=video_id, video_title=title) video_id = compat_str(video.get('basename') or video.get('content')) details = self._download_json( 'https://www.phoenix.de/php/mediaplayer/data/beitrags_details.php', video_id, 'Downloading details JSON', query={ 'ak': 'web', 'ptmd': 'true', 'id': video_id, 'profile': 'player2', }) title = title or details['title'] content_id = details['tracking']['nielsen']['content']['assetid'] info = self._extract_ptmd( 'https://tmd.phoenix.de/tmd/2/ngplayer_2_3/vod/ptmd/phoenix/%s' % content_id, content_id, None, url) duration = int_or_none(try_get( details, lambda x: x['tracking']['nielsen']['content']['length'])) timestamp = unified_timestamp(details.get('editorialDate')) series = try_get( details, lambda x: x['tracking']['nielsen']['content']['program'], compat_str) episode = title if details.get('contentType') == 'episode' else None thumbnails = [] teaser_images = try_get(details, lambda x: x['teaserImageRef']['layouts'], dict) or {} for thumbnail_key, thumbnail_url in teaser_images.items(): thumbnail_url = urljoin(url, thumbnail_url) if not thumbnail_url: continue thumbnail = { 'url': thumbnail_url, } m = re.match('^([0-9]+)x([0-9]+)$', thumbnail_key) if m: thumbnail['width'] = int(m.group(1)) thumbnail['height'] = int(m.group(2)) thumbnails.append(thumbnail) return merge_dicts(info, { 'id': content_id, 'title': title, 'description': details.get('leadParagraph'), 'duration': duration, 'thumbnails': thumbnails, 'timestamp': timestamp, 'uploader': details.get('tvService'), 'series': series, 'episode': episode, })
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tenplay.py
youtube_dl/extractor/tenplay.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( HEADRequest, parse_age_limit, parse_iso8601, # smuggle_url, ) class TenPlayIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?10play\.com\.au/(?:[^/]+/)+(?P<id>tpv\d{6}[a-z]{5})' _TESTS = [{ 'url': 'https://10play.com.au/masterchef/episodes/season-1/masterchef-s1-ep-1/tpv190718kwzga', 'info_dict': { 'id': '6060533435001', 'ext': 'mp4', 'title': 'MasterChef - S1 Ep. 1', 'description': 'md5:4fe7b78e28af8f2d900cd20d900ef95c', 'age_limit': 10, 'timestamp': 1240828200, 'upload_date': '20090427', 'uploader_id': '2199827728001', }, 'params': { # 'format': 'bestvideo', 'skip_download': True, } }, { 'url': 'https://10play.com.au/how-to-stay-married/web-extras/season-1/terrys-talks-ep-1-embracing-change/tpv190915ylupc', 'only_matching': True, }] # BRIGHTCOVE_URL_TEMPLATE = 'https://players.brightcove.net/2199827728001/cN6vRtRQt_default/index.html?videoId=%s' _GEO_BYPASS = False _FASTLY_URL_TEMPL = 'https://10-selector.global.ssl.fastly.net/s/kYEXFC/media/%s?mbr=true&manifest=m3u&format=redirect' def _real_extract(self, url): content_id = self._match_id(url) data = self._download_json( 'https://10play.com.au/api/video/' + content_id, content_id) video = data.get('video') or {} metadata = data.get('metaData') or {} brightcove_id = video.get('videoId') or metadata['showContentVideoId'] # brightcove_url = smuggle_url( # self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, # {'geo_countries': ['AU']}) m3u8_url = self._request_webpage(HEADRequest( self._FASTLY_URL_TEMPL % brightcove_id), brightcove_id).geturl() if '10play-not-in-oz' in m3u8_url: self.raise_geo_restricted(countries=['AU']) formats = self._extract_m3u8_formats(m3u8_url, brightcove_id, 'mp4') self._sort_formats(formats) return { # '_type': 'url_transparent', # 'url': brightcove_url, 'formats': formats, 'id': brightcove_id, 'title': video.get('title') or metadata.get('pageContentName') or metadata['showContentName'], 'description': video.get('description'), 'age_limit': parse_age_limit(video.get('showRatingClassification') or metadata.get('showProgramClassification')), 'series': metadata.get('showName'), 'season': metadata.get('showContentSeason'), 'timestamp': parse_iso8601(metadata.get('contentPublishDate') or metadata.get('pageContentPublishDate')), 'thumbnail': video.get('poster'), 'uploader_id': '2199827728001', # 'ie_key': 'BrightcoveNew', }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/yandexmusic.py
youtube_dl/extractor/yandexmusic.py
# coding: utf-8 from __future__ import unicode_literals import hashlib import itertools import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, int_or_none, float_or_none, try_get, ) class YandexMusicBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://music\.yandex\.(?P<tld>ru|kz|ua|by|com)' @staticmethod def _handle_error(response): if isinstance(response, dict): error = response.get('error') if error: raise ExtractorError(error, expected=True) if response.get('type') == 'captcha' or 'captcha' in response: YandexMusicBaseIE._raise_captcha() @staticmethod def _raise_captcha(): raise ExtractorError( 'YandexMusic has considered youtube-dl requests automated and ' 'asks you to solve a CAPTCHA. You can either wait for some ' 'time until unblocked and optionally use --sleep-interval ' 'in future or alternatively you can go to https://music.yandex.ru/ ' 'solve CAPTCHA, then export cookies and pass cookie file to ' 'youtube-dl with --cookies', expected=True) def _download_webpage_handle(self, *args, **kwargs): webpage = super(YandexMusicBaseIE, self)._download_webpage_handle(*args, **kwargs) if 'Нам очень жаль, но&nbsp;запросы, поступившие с&nbsp;вашего IP-адреса, похожи на&nbsp;автоматические.' in webpage: self._raise_captcha() return webpage def _download_json(self, *args, **kwargs): response = super(YandexMusicBaseIE, self)._download_json(*args, **kwargs) self._handle_error(response) return response def _call_api(self, ep, tld, url, item_id, note, query): return self._download_json( 'https://music.yandex.%s/handlers/%s.jsx' % (tld, ep), item_id, note, fatal=False, headers={ 'Referer': url, 'X-Requested-With': 'XMLHttpRequest', 'X-Retpath-Y': url, }, query=query) class YandexMusicTrackIE(YandexMusicBaseIE): IE_NAME = 'yandexmusic:track' IE_DESC = 'Яндекс.Музыка - Трек' _VALID_URL = r'%s/album/(?P<album_id>\d+)/track/(?P<id>\d+)' % YandexMusicBaseIE._VALID_URL_BASE _TESTS = [{ 'url': 'http://music.yandex.ru/album/540508/track/4878838', 'md5': 'dec8b661f12027ceaba33318787fff76', 'info_dict': { 'id': '4878838', 'ext': 'mp3', 'title': 'md5:c63e19341fdbe84e43425a30bc777856', 'filesize': int, 'duration': 193.04, 'track': 'md5:210508c6ffdfd67a493a6c378f22c3ff', 'album': 'md5:cd04fb13c4efeafdfa0a6a6aca36d01a', 'album_artist': 'md5:5f54c35462c07952df33d97cfb5fc200', 'artist': 'md5:e6fd86621825f14dc0b25db3acd68160', 'release_year': 2009, }, # 'skip': 'Travis CI servers blocked by YandexMusic', }, { # multiple disks 'url': 'http://music.yandex.ru/album/3840501/track/705105', 'md5': '82a54e9e787301dd45aba093cf6e58c0', 'info_dict': { 'id': '705105', 'ext': 'mp3', 'title': 'md5:f86d4a9188279860a83000277024c1a6', 'filesize': int, 'duration': 239.27, 'track': 'md5:40f887f0666ba1aa10b835aca44807d1', 'album': 'md5:624f5224b14f5c88a8e812fd7fbf1873', 'album_artist': 'md5:dd35f2af4e8927100cbe6f5e62e1fb12', 'artist': 'md5:dd35f2af4e8927100cbe6f5e62e1fb12', 'release_year': 2016, 'genre': 'pop', 'disc_number': 2, 'track_number': 9, }, # 'skip': 'Travis CI servers blocked by YandexMusic', }, { 'url': 'http://music.yandex.com/album/540508/track/4878838', 'only_matching': True, }, { 'url': 'https://music.yandex.ru/album/16302456/track/85430762', 'md5': '11b8d50ab03b57738deeaadf661a0a48', 'info_dict': { 'id': '85430762', 'ext': 'mp3', 'abr': 128, 'title': 'Haddadi Von Engst, Phonic Youth, Super Flu - Til The End (Super Flu Remix)', 'filesize': int, 'duration': 431.14, 'track': 'Til The End (Super Flu Remix)', 'album': 'Til The End', 'album_artist': 'Haddadi Von Engst, Phonic Youth', 'artist': 'Haddadi Von Engst, Phonic Youth, Super Flu', 'release_year': 2021, 'genre': 'house', 'disc_number': 1, 'track_number': 2, } }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) tld, album_id, track_id = mobj.group('tld'), mobj.group('album_id'), mobj.group('id') track = self._call_api( 'track', tld, url, track_id, 'Downloading track JSON', {'track': '%s:%s' % (track_id, album_id)})['track'] track_title = track['title'] track_version = track.get('version') if track_version: track_title = '%s (%s)' % (track_title, track_version) download_data = self._download_json( 'https://music.yandex.ru/api/v2.1/handlers/track/%s:%s/web-album_track-track-track-main/download/m' % (track_id, album_id), track_id, 'Downloading track location url JSON', query={'hq': 1}, headers={'X-Retpath-Y': url}) fd_data = self._download_json( download_data['src'], track_id, 'Downloading track location JSON', query={'format': 'json'}) key = hashlib.md5(('XGRlBW9FXlekgbPrRHuSiA' + fd_data['path'][1:] + fd_data['s']).encode('utf-8')).hexdigest() f_url = 'http://%s/get-mp3/%s/%s?track-id=%s ' % (fd_data['host'], key, fd_data['ts'] + fd_data['path'], track['id']) thumbnail = None cover_uri = track.get('albums', [{}])[0].get('coverUri') if cover_uri: thumbnail = cover_uri.replace('%%', 'orig') if not thumbnail.startswith('http'): thumbnail = 'http://' + thumbnail track_info = { 'id': track_id, 'ext': 'mp3', 'url': f_url, 'filesize': int_or_none(track.get('fileSize')), 'duration': float_or_none(track.get('durationMs'), 1000), 'thumbnail': thumbnail, 'track': track_title, 'acodec': download_data.get('codec'), 'abr': int_or_none(download_data.get('bitrate')), } def extract_artist_name(artist): decomposed = artist.get('decomposed') if not isinstance(decomposed, list): return artist['name'] parts = [artist['name']] for element in decomposed: if isinstance(element, dict) and element.get('name'): parts.append(element['name']) elif isinstance(element, compat_str): parts.append(element) return ''.join(parts) def extract_artist(artist_list): if artist_list and isinstance(artist_list, list): artists_names = [extract_artist_name(a) for a in artist_list if a.get('name')] if artists_names: return ', '.join(artists_names) albums = track.get('albums') if albums and isinstance(albums, list): album = albums[0] if isinstance(album, dict): year = album.get('year') disc_number = int_or_none(try_get( album, lambda x: x['trackPosition']['volume'])) track_number = int_or_none(try_get( album, lambda x: x['trackPosition']['index'])) track_info.update({ 'album': album.get('title'), 'album_artist': extract_artist(album.get('artists')), 'release_year': int_or_none(year), 'genre': album.get('genre'), 'disc_number': disc_number, 'track_number': track_number, }) track_artist = extract_artist(track.get('artists')) if track_artist: track_info.update({ 'artist': track_artist, 'title': '%s - %s' % (track_artist, track_title), }) else: track_info['title'] = track_title return track_info class YandexMusicPlaylistBaseIE(YandexMusicBaseIE): def _extract_tracks(self, source, item_id, url, tld): tracks = source['tracks'] track_ids = [compat_str(track_id) for track_id in source['trackIds']] # tracks dictionary shipped with playlist.jsx API is limited to 150 tracks, # missing tracks should be retrieved manually. if len(tracks) < len(track_ids): present_track_ids = set([ compat_str(track['id']) for track in tracks if track.get('id')]) missing_track_ids = [ track_id for track_id in track_ids if track_id not in present_track_ids] # Request missing tracks in chunks to avoid exceeding max HTTP header size, # see https://github.com/ytdl-org/youtube-dl/issues/27355 _TRACKS_PER_CHUNK = 250 for chunk_num in itertools.count(0): start = chunk_num * _TRACKS_PER_CHUNK end = start + _TRACKS_PER_CHUNK missing_track_ids_req = missing_track_ids[start:end] assert missing_track_ids_req missing_tracks = self._call_api( 'track-entries', tld, url, item_id, 'Downloading missing tracks JSON chunk %d' % (chunk_num + 1), { 'entries': ','.join(missing_track_ids_req), 'lang': tld, 'external-domain': 'music.yandex.%s' % tld, 'overembed': 'false', 'strict': 'true', }) if missing_tracks: tracks.extend(missing_tracks) if end >= len(missing_track_ids): break return tracks def _build_playlist(self, tracks): entries = [] for track in tracks: track_id = track.get('id') or track.get('realId') if not track_id: continue albums = track.get('albums') if not albums or not isinstance(albums, list): continue album = albums[0] if not isinstance(album, dict): continue album_id = album.get('id') if not album_id: continue entries.append(self.url_result( 'http://music.yandex.ru/album/%s/track/%s' % (album_id, track_id), ie=YandexMusicTrackIE.ie_key(), video_id=track_id)) return entries class YandexMusicAlbumIE(YandexMusicPlaylistBaseIE): IE_NAME = 'yandexmusic:album' IE_DESC = 'Яндекс.Музыка - Альбом' _VALID_URL = r'%s/album/(?P<id>\d+)' % YandexMusicBaseIE._VALID_URL_BASE _TESTS = [{ 'url': 'http://music.yandex.ru/album/540508', 'info_dict': { 'id': '540508', 'title': 'md5:7ed1c3567f28d14be9f61179116f5571', }, 'playlist_count': 50, # 'skip': 'Travis CI servers blocked by YandexMusic', }, { 'url': 'https://music.yandex.ru/album/3840501', 'info_dict': { 'id': '3840501', 'title': 'md5:36733472cdaa7dcb1fd9473f7da8e50f', }, 'playlist_count': 33, # 'skip': 'Travis CI servers blocked by YandexMusic', }, { # empty artists 'url': 'https://music.yandex.ru/album/9091882', 'info_dict': { 'id': '9091882', 'title': 'ТЕД на русском', }, 'playlist_count': 187, }] @classmethod def suitable(cls, url): return False if YandexMusicTrackIE.suitable(url) else super(YandexMusicAlbumIE, cls).suitable(url) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) tld = mobj.group('tld') album_id = mobj.group('id') album = self._call_api( 'album', tld, url, album_id, 'Downloading album JSON', {'album': album_id}) entries = self._build_playlist([track for volume in album['volumes'] for track in volume]) title = album['title'] artist = try_get(album, lambda x: x['artists'][0]['name'], compat_str) if artist: title = '%s - %s' % (artist, title) year = album.get('year') if year: title += ' (%s)' % year return self.playlist_result(entries, compat_str(album['id']), title) class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE): IE_NAME = 'yandexmusic:playlist' IE_DESC = 'Яндекс.Музыка - Плейлист' _VALID_URL = r'%s/users/(?P<user>[^/]+)/playlists/(?P<id>\d+)' % YandexMusicBaseIE._VALID_URL_BASE _TESTS = [{ 'url': 'http://music.yandex.ru/users/music.partners/playlists/1245', 'info_dict': { 'id': '1245', 'title': 'md5:841559b3fe2b998eca88d0d2e22a3097', 'description': 'md5:3b9f27b0efbe53f2ee1e844d07155cc9', }, 'playlist_count': 5, # 'skip': 'Travis CI servers blocked by YandexMusic', }, { 'url': 'https://music.yandex.ru/users/ya.playlist/playlists/1036', 'only_matching': True, }, { # playlist exceeding the limit of 150 tracks (see # https://github.com/ytdl-org/youtube-dl/issues/6666) 'url': 'https://music.yandex.ru/users/mesiaz/playlists/1364', 'info_dict': { 'id': '1364', 'title': 'md5:b3b400f997d3f878a13ae0699653f7db', }, 'playlist_mincount': 437, # 'skip': 'Travis CI servers blocked by YandexMusic', }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) tld = mobj.group('tld') user = mobj.group('user') playlist_id = mobj.group('id') playlist = self._call_api( 'playlist', tld, url, playlist_id, 'Downloading playlist JSON', { 'owner': user, 'kinds': playlist_id, 'light': 'true', 'lang': tld, 'external-domain': 'music.yandex.%s' % tld, 'overembed': 'false', })['playlist'] tracks = self._extract_tracks(playlist, playlist_id, url, tld) return self.playlist_result( self._build_playlist(tracks), compat_str(playlist_id), playlist.get('title'), playlist.get('description')) class YandexMusicArtistBaseIE(YandexMusicPlaylistBaseIE): def _call_artist(self, tld, url, artist_id): return self._call_api( 'artist', tld, url, artist_id, 'Downloading artist %s JSON' % self._ARTIST_WHAT, { 'artist': artist_id, 'what': self._ARTIST_WHAT, 'sort': self._ARTIST_SORT or '', 'dir': '', 'period': '', 'lang': tld, 'external-domain': 'music.yandex.%s' % tld, 'overembed': 'false', }) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) tld = mobj.group('tld') artist_id = mobj.group('id') data = self._call_artist(tld, url, artist_id) tracks = self._extract_tracks(data, artist_id, url, tld) title = try_get(data, lambda x: x['artist']['name'], compat_str) return self.playlist_result( self._build_playlist(tracks), artist_id, title) class YandexMusicArtistTracksIE(YandexMusicArtistBaseIE): IE_NAME = 'yandexmusic:artist:tracks' IE_DESC = 'Яндекс.Музыка - Артист - Треки' _VALID_URL = r'%s/artist/(?P<id>\d+)/tracks' % YandexMusicBaseIE._VALID_URL_BASE _TESTS = [{ 'url': 'https://music.yandex.ru/artist/617526/tracks', 'info_dict': { 'id': '617526', 'title': 'md5:131aef29d45fd5a965ca613e708c040b', }, 'playlist_count': 507, # 'skip': 'Travis CI servers blocked by YandexMusic', }] _ARTIST_SORT = '' _ARTIST_WHAT = 'tracks' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) tld = mobj.group('tld') artist_id = mobj.group('id') data = self._call_artist(tld, url, artist_id) tracks = self._extract_tracks(data, artist_id, url, tld) artist = try_get(data, lambda x: x['artist']['name'], compat_str) title = '%s - %s' % (artist or artist_id, 'Треки') return self.playlist_result( self._build_playlist(tracks), artist_id, title) class YandexMusicArtistAlbumsIE(YandexMusicArtistBaseIE): IE_NAME = 'yandexmusic:artist:albums' IE_DESC = 'Яндекс.Музыка - Артист - Альбомы' _VALID_URL = r'%s/artist/(?P<id>\d+)/albums' % YandexMusicBaseIE._VALID_URL_BASE _TESTS = [{ 'url': 'https://music.yandex.ru/artist/617526/albums', 'info_dict': { 'id': '617526', 'title': 'md5:55dc58d5c85699b7fb41ee926700236c', }, 'playlist_count': 8, # 'skip': 'Travis CI servers blocked by YandexMusic', }] _ARTIST_SORT = 'year' _ARTIST_WHAT = 'albums' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) tld = mobj.group('tld') artist_id = mobj.group('id') data = self._call_artist(tld, url, artist_id) entries = [] for album in data['albums']: if not isinstance(album, dict): continue album_id = album.get('id') if not album_id: continue entries.append(self.url_result( 'http://music.yandex.ru/album/%s' % album_id, ie=YandexMusicAlbumIE.ie_key(), video_id=album_id)) artist = try_get(data, lambda x: x['artist']['name'], compat_str) title = '%s - %s' % (artist or artist_id, 'Альбомы') return self.playlist_result(entries, artist_id, title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ora.py
youtube_dl/extractor/ora.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( get_element_by_attribute, qualities, unescapeHTML, ) class OraTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:ora\.tv|unsafespeech\.com)/([^/]+/)*(?P<id>[^/\?#]+)' _TESTS = [{ 'url': 'https://www.ora.tv/larrykingnow/2015/12/16/vine-youtube-stars-zach-king-king-bach-on-their-viral-videos-0_36jupg6090pq', 'md5': 'fa33717591c631ec93b04b0e330df786', 'info_dict': { 'id': '50178', 'ext': 'mp4', 'title': 'Vine & YouTube Stars Zach King & King Bach On Their Viral Videos!', 'description': 'md5:ebbc5b1424dd5dba7be7538148287ac1', } }, { 'url': 'http://www.unsafespeech.com/video/2016/5/10/student-self-censorship-and-the-thought-police-on-university-campuses-0_6622bnkppw4d', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_data = self._search_regex( r'"(?:video|current)"\s*:\s*({[^}]+?})', webpage, 'current video') m3u8_url = self._search_regex( r'hls_stream"?\s*:\s*"([^"]+)', video_data, 'm3u8 url', None) if m3u8_url: formats = self._extract_m3u8_formats( m3u8_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) # similar to GameSpotIE m3u8_path = compat_urlparse.urlparse(m3u8_url).path QUALITIES_RE = r'((,[a-z]+\d+)+,?)' available_qualities = self._search_regex( QUALITIES_RE, m3u8_path, 'qualities').strip(',').split(',') http_path = m3u8_path[1:].split('/', 1)[1] http_template = re.sub(QUALITIES_RE, r'%s', http_path) http_template = http_template.replace('.csmil/master.m3u8', '') http_template = compat_urlparse.urljoin( 'http://videocdn-pmd.ora.tv/', http_template) preference = qualities( ['mobile400', 'basic400', 'basic600', 'sd900', 'sd1200', 'sd1500', 'hd720', 'hd1080']) for q in available_qualities: formats.append({ 'url': http_template % q, 'format_id': q, 'preference': preference(q), }) self._sort_formats(formats) else: return self.url_result(self._search_regex( r'"youtube_id"\s*:\s*"([^"]+)', webpage, 'youtube id'), 'Youtube') return { 'id': self._search_regex( r'"id"\s*:\s*(\d+)', video_data, 'video id', default=display_id), 'display_id': display_id, 'title': unescapeHTML(self._og_search_title(webpage)), 'description': get_element_by_attribute( 'class', 'video_txt_decription', webpage), 'thumbnail': self._proto_relative_url(self._search_regex( r'"thumb"\s*:\s*"([^"]+)', video_data, 'thumbnail', None)), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/atttechchannel.py
youtube_dl/extractor/atttechchannel.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import unified_strdate class ATTTechChannelIE(InfoExtractor): _VALID_URL = r'https?://techchannel\.att\.com/play-video\.cfm/([^/]+/)*(?P<id>.+)' _TEST = { 'url': 'http://techchannel.att.com/play-video.cfm/2014/1/27/ATT-Archives-The-UNIX-System-Making-Computers-Easier-to-Use', 'info_dict': { 'id': '11316', 'display_id': 'ATT-Archives-The-UNIX-System-Making-Computers-Easier-to-Use', 'ext': 'flv', 'title': 'AT&T Archives : The UNIX System: Making Computers Easier to Use', 'description': 'A 1982 film about UNIX is the foundation for software in use around Bell Labs and AT&T.', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20140127', }, 'params': { # rtmp download 'skip_download': True, }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_url = self._search_regex( r"url\s*:\s*'(rtmp://[^']+)'", webpage, 'video URL') video_id = self._search_regex( r'mediaid\s*=\s*(\d+)', webpage, 'video id', fatal=False) title = self._og_search_title(webpage) description = self._og_search_description(webpage) thumbnail = self._og_search_thumbnail(webpage) upload_date = unified_strdate(self._search_regex( r'[Rr]elease\s+date:\s*(\d{1,2}/\d{1,2}/\d{4})', webpage, 'upload date', fatal=False), False) return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'ext': 'flv', 'title': title, 'description': description, 'thumbnail': thumbnail, 'upload_date': upload_date, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/trovo.py
youtube_dl/extractor/trovo.py
# coding: utf-8 from __future__ import unicode_literals import json from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, str_or_none, try_get, ) class TrovoBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:www\.)?trovo\.live/' def _extract_streamer_info(self, data): streamer_info = data.get('streamerInfo') or {} username = streamer_info.get('userName') return { 'uploader': streamer_info.get('nickName'), 'uploader_id': str_or_none(streamer_info.get('uid')), 'uploader_url': 'https://trovo.live/' + username if username else None, } class TrovoIE(TrovoBaseIE): _VALID_URL = TrovoBaseIE._VALID_URL_BASE + r'(?!(?:clip|video)/)(?P<id>[^/?&#]+)' def _real_extract(self, url): username = self._match_id(url) live_info = self._download_json( 'https://gql.trovo.live/', username, query={ 'query': '''{ getLiveInfo(params: {userName: "%s"}) { isLive programInfo { coverUrl id streamInfo { desc playUrl } title } streamerInfo { nickName uid userName } } }''' % username, })['data']['getLiveInfo'] if live_info.get('isLive') == 0: raise ExtractorError('%s is offline' % username, expected=True) program_info = live_info['programInfo'] program_id = program_info['id'] title = self._live_title(program_info['title']) formats = [] for stream_info in (program_info.get('streamInfo') or []): play_url = stream_info.get('playUrl') if not play_url: continue format_id = stream_info.get('desc') formats.append({ 'format_id': format_id, 'height': int_or_none(format_id[:-1]) if format_id else None, 'url': play_url, }) self._sort_formats(formats) info = { 'id': program_id, 'title': title, 'formats': formats, 'thumbnail': program_info.get('coverUrl'), 'is_live': True, } info.update(self._extract_streamer_info(live_info)) return info class TrovoVodIE(TrovoBaseIE): _VALID_URL = TrovoBaseIE._VALID_URL_BASE + r'(?:clip|video)/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://trovo.live/video/ltv-100095501_100095501_1609596043', 'info_dict': { 'id': 'ltv-100095501_100095501_1609596043', 'ext': 'mp4', 'title': 'Spontaner 12 Stunden Stream! - Ok Boomer!', 'uploader': 'Exsl', 'timestamp': 1609640305, 'upload_date': '20210103', 'uploader_id': '100095501', 'duration': 43977, 'view_count': int, 'like_count': int, 'comment_count': int, 'comments': 'mincount:8', 'categories': ['Grand Theft Auto V'], }, }, { 'url': 'https://trovo.live/clip/lc-5285890810184026005', 'only_matching': True, }] def _real_extract(self, url): vid = self._match_id(url) resp = self._download_json( 'https://gql.trovo.live/', vid, data=json.dumps([{ 'query': '''{ batchGetVodDetailInfo(params: {vids: ["%s"]}) { VodDetailInfos } }''' % vid, }, { 'query': '''{ getCommentList(params: {appInfo: {postID: "%s"}, pageSize: 1000000000, preview: {}}) { commentList { author { nickName uid } commentID content createdAt parentID } } }''' % vid, }]).encode(), headers={ 'Content-Type': 'application/json', }) vod_detail_info = resp[0]['data']['batchGetVodDetailInfo']['VodDetailInfos'][vid] vod_info = vod_detail_info['vodInfo'] title = vod_info['title'] language = vod_info.get('languageName') formats = [] for play_info in (vod_info.get('playInfos') or []): play_url = play_info.get('playUrl') if not play_url: continue format_id = play_info.get('desc') formats.append({ 'ext': 'mp4', 'filesize': int_or_none(play_info.get('fileSize')), 'format_id': format_id, 'height': int_or_none(format_id[:-1]) if format_id else None, 'language': language, 'protocol': 'm3u8_native', 'tbr': int_or_none(play_info.get('bitrate')), 'url': play_url, 'http_headers': {'Origin': 'https://trovo.live'}, }) self._sort_formats(formats) category = vod_info.get('categoryName') get_count = lambda x: int_or_none(vod_info.get(x + 'Num')) comment_list = try_get(resp, lambda x: x[1]['data']['getCommentList']['commentList'], list) or [] comments = [] for comment in comment_list: content = comment.get('content') if not content: continue author = comment.get('author') or {} parent = comment.get('parentID') comments.append({ 'author': author.get('nickName'), 'author_id': str_or_none(author.get('uid')), 'id': str_or_none(comment.get('commentID')), 'text': content, 'timestamp': int_or_none(comment.get('createdAt')), 'parent': 'root' if parent == 0 else str_or_none(parent), }) info = { 'id': vid, 'title': title, 'formats': formats, 'thumbnail': vod_info.get('coverUrl'), 'timestamp': int_or_none(vod_info.get('publishTs')), 'duration': int_or_none(vod_info.get('duration')), 'view_count': get_count('watch'), 'like_count': get_count('like'), 'comment_count': get_count('comment'), 'comments': comments, 'categories': [category] if category else None, } info.update(self._extract_streamer_info(vod_detail_info)) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/arnes.py
youtube_dl/extractor/arnes.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urllib_parse_urlparse, ) from ..utils import ( float_or_none, int_or_none, parse_iso8601, remove_start, ) class ArnesIE(InfoExtractor): IE_NAME = 'video.arnes.si' IE_DESC = 'Arnes Video' _VALID_URL = r'https?://video\.arnes\.si/(?:[a-z]{2}/)?(?:watch|embed|api/(?:asset|public/video))/(?P<id>[0-9a-zA-Z]{12})' _TESTS = [{ 'url': 'https://video.arnes.si/watch/a1qrWTOQfVoU?t=10', 'md5': '4d0f4d0a03571b33e1efac25fd4a065d', 'info_dict': { 'id': 'a1qrWTOQfVoU', 'ext': 'mp4', 'title': 'Linearna neodvisnost, definicija', 'description': 'Linearna neodvisnost, definicija', 'license': 'PRIVATE', 'creator': 'Polona Oblak', 'timestamp': 1585063725, 'upload_date': '20200324', 'channel': 'Polona Oblak', 'channel_id': 'q6pc04hw24cj', 'channel_url': 'https://video.arnes.si/?channel=q6pc04hw24cj', 'duration': 596.75, 'view_count': int, 'tags': ['linearna_algebra'], 'start_time': 10, } }, { 'url': 'https://video.arnes.si/api/asset/s1YjnV7hadlC/play.mp4', 'only_matching': True, }, { 'url': 'https://video.arnes.si/embed/s1YjnV7hadlC', 'only_matching': True, }, { 'url': 'https://video.arnes.si/en/watch/s1YjnV7hadlC', 'only_matching': True, }, { 'url': 'https://video.arnes.si/embed/s1YjnV7hadlC?t=123&hideRelated=1', 'only_matching': True, }, { 'url': 'https://video.arnes.si/api/public/video/s1YjnV7hadlC', 'only_matching': True, }] _BASE_URL = 'https://video.arnes.si' def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( self._BASE_URL + '/api/public/video/' + video_id, video_id)['data'] title = video['title'] formats = [] for media in (video.get('media') or []): media_url = media.get('url') if not media_url: continue formats.append({ 'url': self._BASE_URL + media_url, 'format_id': remove_start(media.get('format'), 'FORMAT_'), 'format_note': media.get('formatTranslation'), 'width': int_or_none(media.get('width')), 'height': int_or_none(media.get('height')), }) self._sort_formats(formats) channel = video.get('channel') or {} channel_id = channel.get('url') thumbnail = video.get('thumbnailUrl') return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': self._BASE_URL + thumbnail, 'description': video.get('description'), 'license': video.get('license'), 'creator': video.get('author'), 'timestamp': parse_iso8601(video.get('creationTime')), 'channel': channel.get('name'), 'channel_id': channel_id, 'channel_url': self._BASE_URL + '/?channel=' + channel_id if channel_id else None, 'duration': float_or_none(video.get('duration'), 1000), 'view_count': int_or_none(video.get('views')), 'tags': video.get('hashtags'), 'start_time': int_or_none(compat_parse_qs( compat_urllib_parse_urlparse(url).query).get('t', [None])[0]), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tvnet.py
youtube_dl/extractor/tvnet.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, unescapeHTML, url_or_none, ) class TVNetIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+)\.tvnet\.gov\.vn/[^/]+/(?:\d+/)?(?P<id>\d+)(?:/|$)' _TESTS = [{ # video 'url': 'http://de.tvnet.gov.vn/video/109788/vtv1---bac-tuyet-tai-lao-cai-va-ha-giang/tin-nong-24h', 'md5': 'b4d7abe0252c9b47774760b7519c7558', 'info_dict': { 'id': '109788', 'ext': 'mp4', 'title': 'VTV1 - Bắc tuyết tại Lào Cai và Hà Giang', 'thumbnail': r're:(?i)https?://.*\.(?:jpg|png)', 'is_live': False, 'view_count': int, }, }, { # audio 'url': 'http://vn.tvnet.gov.vn/radio/27017/vov1---ban-tin-chieu-10062018/doi-song-va-xa-hoi', 'md5': 'b5875ce9b0a2eecde029216d0e6db2ae', 'info_dict': { 'id': '27017', 'ext': 'm4a', 'title': 'VOV1 - Bản tin chiều (10/06/2018)', 'thumbnail': r're:(?i)https?://.*\.(?:jpg|png)', 'is_live': False, }, }, { 'url': 'http://us.tvnet.gov.vn/video/118023/129999/ngay-0705', 'info_dict': { 'id': '129999', 'ext': 'mp4', 'title': 'VTV1 - Quốc hội với cử tri (11/06/2018)', 'thumbnail': r're:(?i)https?://.*\.(?:jpg|png)', 'is_live': False, }, 'params': { 'skip_download': True, }, }, { # live stream 'url': 'http://us.tvnet.gov.vn/kenh-truyen-hinh/1011/vtv1', 'info_dict': { 'id': '1011', 'ext': 'mp4', 'title': r're:^VTV1 \| LiveTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'thumbnail': r're:(?i)https?://.*\.(?:jpg|png)', 'is_live': True, }, 'params': { 'skip_download': True, }, }, { # radio live stream 'url': 'http://vn.tvnet.gov.vn/kenh-truyen-hinh/1014', 'info_dict': { 'id': '1014', 'ext': 'm4a', 'title': r're:VOV1 \| LiveTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'thumbnail': r're:(?i)https?://.*\.(?:jpg|png)', 'is_live': True, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://us.tvnet.gov.vn/phim/6136/25510/vtv3---ca-mot-doi-an-oan-tap-1-50/phim-truyen-hinh', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_title( webpage, default=None) or self._html_search_meta( 'title', webpage, default=None) or self._search_regex( r'<title>([^<]+)<', webpage, 'title') title = re.sub(r'\s*-\s*TV Net\s*$', '', title) if '/video/' in url or '/radio/' in url: is_live = False elif '/kenh-truyen-hinh/' in url: is_live = True else: is_live = None data_file = unescapeHTML(self._search_regex( r'data-file=(["\'])(?P<url>(?:https?:)?//.+?)\1', webpage, 'data file', group='url')) stream_urls = set() formats = [] for stream in self._download_json(data_file, video_id): if not isinstance(stream, dict): continue stream_url = url_or_none(stream.get('url')) if stream_url in stream_urls or not stream_url: continue stream_urls.add(stream_url) formats.extend(self._extract_m3u8_formats( stream_url, video_id, 'mp4', entry_protocol='m3u8' if is_live else 'm3u8_native', m3u8_id='hls', fatal=False)) self._sort_formats(formats) # better support for radio streams if title.startswith('VOV'): for f in formats: f.update({ 'ext': 'm4a', 'vcodec': 'none', }) thumbnail = self._og_search_thumbnail( webpage, default=None) or unescapeHTML( self._search_regex( r'data-image=(["\'])(?P<url>(?:https?:)?//.+?)\1', webpage, 'thumbnail', default=None, group='url')) if is_live: title = self._live_title(title) view_count = int_or_none(self._search_regex( r'(?s)<div[^>]+\bclass=["\'].*?view-count[^>]+>.*?(\d+).*?</div>', webpage, 'view count', default=None)) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'is_live': is_live, 'view_count': view_count, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/teachable.py
youtube_dl/extractor/teachable.py
from __future__ import unicode_literals import re from .common import InfoExtractor from .wistia import WistiaIE from ..utils import ( clean_html, ExtractorError, int_or_none, get_element_by_class, strip_or_none, urlencode_postdata, urljoin, ) class TeachableBaseIE(InfoExtractor): _NETRC_MACHINE = 'teachable' _URL_PREFIX = 'teachable:' _SITES = { # Only notable ones here 'v1.upskillcourses.com': 'upskill', 'gns3.teachable.com': 'gns3', 'academyhacker.com': 'academyhacker', 'stackskills.com': 'stackskills', 'market.saleshacker.com': 'saleshacker', 'learnability.org': 'learnability', 'edurila.com': 'edurila', 'courses.workitdaily.com': 'workitdaily', } _VALID_URL_SUB_TUPLE = (_URL_PREFIX, '|'.join(re.escape(site) for site in _SITES.keys())) def _real_initialize(self): self._logged_in = False def _login(self, site): if self._logged_in: return username, password = self._get_login_info( netrc_machine=self._SITES.get(site, site)) if username is None: return login_page, urlh = self._download_webpage_handle( 'https://%s/sign_in' % site, None, 'Downloading %s login page' % site) def is_logged(webpage): return any(re.search(p, webpage) for p in ( r'class=["\']user-signout', r'<a[^>]+\bhref=["\']/sign_out', r'Log\s+[Oo]ut\s*<')) if is_logged(login_page): self._logged_in = True return login_url = urlh.geturl() login_form = self._hidden_inputs(login_page) login_form.update({ 'user[email]': username, 'user[password]': password, }) post_url = self._search_regex( r'<form[^>]+action=(["\'])(?P<url>(?:(?!\1).)+)\1', login_page, 'post url', default=login_url, group='url') if not post_url.startswith('http'): post_url = urljoin(login_url, post_url) response = self._download_webpage( post_url, None, 'Logging in to %s' % site, data=urlencode_postdata(login_form), headers={ 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': login_url, }) if '>I accept the new Privacy Policy<' in response: raise ExtractorError( 'Unable to login: %s asks you to accept new Privacy Policy. ' 'Go to https://%s/ and accept.' % (site, site), expected=True) # Successful login if is_logged(response): self._logged_in = True return message = get_element_by_class('alert', response) if message is not None: raise ExtractorError( 'Unable to login: %s' % clean_html(message), expected=True) raise ExtractorError('Unable to log in') class TeachableIE(TeachableBaseIE): _VALID_URL = r'''(?x) (?: %shttps?://(?P<site_t>[^/]+)| https?://(?:www\.)?(?P<site>%s) ) /courses/[^/]+/lectures/(?P<id>\d+) ''' % TeachableBaseIE._VALID_URL_SUB_TUPLE _TESTS = [{ 'url': 'https://gns3.teachable.com/courses/gns3-certified-associate/lectures/6842364', 'info_dict': { 'id': 'untlgzk1v7', 'ext': 'bin', 'title': 'Overview', 'description': 'md5:071463ff08b86c208811130ea1c2464c', 'duration': 736.4, 'timestamp': 1542315762, 'upload_date': '20181115', 'chapter': 'Welcome', 'chapter_number': 1, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://v1.upskillcourses.com/courses/119763/lectures/1747100', 'only_matching': True, }, { 'url': 'https://gns3.teachable.com/courses/423415/lectures/6885939', 'only_matching': True, }, { 'url': 'teachable:https://v1.upskillcourses.com/courses/essential-web-developer-course/lectures/1747100', 'only_matching': True, }] @staticmethod def _is_teachable(webpage): return 'teachableTracker.linker:autoLink' in webpage and re.search( r'<link[^>]+href=["\']https?://(?:process\.fs|assets)\.teachablecdn\.com', webpage) @staticmethod def _extract_url(webpage, source_url): if not TeachableIE._is_teachable(webpage): return if re.match(r'https?://[^/]+/(?:courses|p)', source_url): return '%s%s' % (TeachableBaseIE._URL_PREFIX, source_url) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) site = mobj.group('site') or mobj.group('site_t') video_id = mobj.group('id') self._login(site) prefixed = url.startswith(self._URL_PREFIX) if prefixed: url = url[len(self._URL_PREFIX):] webpage = self._download_webpage(url, video_id) wistia_urls = WistiaIE._extract_urls(webpage) if not wistia_urls: if any(re.search(p, webpage) for p in ( r'class=["\']lecture-contents-locked', r'>\s*Lecture contents locked', r'id=["\']lecture-locked', # https://academy.tailoredtutors.co.uk/courses/108779/lectures/1955313 r'class=["\'](?:inner-)?lesson-locked', r'>LESSON LOCKED<')): self.raise_login_required('Lecture contents locked') raise ExtractorError('Unable to find video URL') title = self._og_search_title(webpage, default=None) chapter = None chapter_number = None section_item = self._search_regex( r'(?s)(?P<li><li[^>]+\bdata-lecture-id=["\']%s[^>]+>.+?</li>)' % video_id, webpage, 'section item', default=None, group='li') if section_item: chapter_number = int_or_none(self._search_regex( r'data-ss-position=["\'](\d+)', section_item, 'section id', default=None)) if chapter_number is not None: sections = [] for s in re.findall( r'(?s)<div[^>]+\bclass=["\']section-title[^>]+>(.+?)</div>', webpage): section = strip_or_none(clean_html(s)) if not section: sections = [] break sections.append(section) if chapter_number <= len(sections): chapter = sections[chapter_number - 1] entries = [{ '_type': 'url_transparent', 'url': wistia_url, 'ie_key': WistiaIE.ie_key(), 'title': title, 'chapter': chapter, 'chapter_number': chapter_number, } for wistia_url in wistia_urls] return self.playlist_result(entries, video_id, title) class TeachableCourseIE(TeachableBaseIE): _VALID_URL = r'''(?x) (?: %shttps?://(?P<site_t>[^/]+)| https?://(?:www\.)?(?P<site>%s) ) /(?:courses|p)/(?:enrolled/)?(?P<id>[^/?#&]+) ''' % TeachableBaseIE._VALID_URL_SUB_TUPLE _TESTS = [{ 'url': 'http://v1.upskillcourses.com/courses/essential-web-developer-course/', 'info_dict': { 'id': 'essential-web-developer-course', 'title': 'The Essential Web Developer Course (Free)', }, 'playlist_count': 192, }, { 'url': 'http://v1.upskillcourses.com/courses/119763/', 'only_matching': True, }, { 'url': 'http://v1.upskillcourses.com/courses/enrolled/119763', 'only_matching': True, }, { 'url': 'https://gns3.teachable.com/courses/enrolled/423415', 'only_matching': True, }, { 'url': 'teachable:https://learn.vrdev.school/p/gear-vr-developer-mini', 'only_matching': True, }, { 'url': 'teachable:https://filmsimplified.com/p/davinci-resolve-15-crash-course', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if TeachableIE.suitable(url) else super( TeachableCourseIE, cls).suitable(url) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) site = mobj.group('site') or mobj.group('site_t') course_id = mobj.group('id') self._login(site) prefixed = url.startswith(self._URL_PREFIX) if prefixed: prefix = self._URL_PREFIX url = url[len(prefix):] webpage = self._download_webpage(url, course_id) url_base = 'https://%s/' % site entries = [] for mobj in re.finditer( r'(?s)(?P<li><li[^>]+class=(["\'])(?:(?!\2).)*?section-item[^>]+>.+?</li>)', webpage): li = mobj.group('li') if 'fa-youtube-play' not in li and not re.search(r'\d{1,2}:\d{2}', li): continue lecture_url = self._search_regex( r'<a[^>]+href=(["\'])(?P<url>(?:(?!\1).)+)\1', li, 'lecture url', default=None, group='url') if not lecture_url: continue lecture_id = self._search_regex( r'/lectures/(\d+)', lecture_url, 'lecture id', default=None) title = self._html_search_regex( r'<span[^>]+class=["\']lecture-name[^>]+>([^<]+)', li, 'title', default=None) entry_url = urljoin(url_base, lecture_url) if prefixed: entry_url = self._URL_PREFIX + entry_url entries.append( self.url_result( entry_url, ie=TeachableIE.ie_key(), video_id=lecture_id, video_title=clean_html(title))) course_title = self._html_search_regex( (r'(?s)<img[^>]+class=["\']course-image[^>]+>\s*<h\d>(.+?)</h', r'(?s)<h\d[^>]+class=["\']course-title[^>]+>(.+?)</h'), webpage, 'course title', fatal=False) return self.playlist_result(entries, course_id, course_title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/zingmp3.py
youtube_dl/extractor/zingmp3.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, ) class ZingMp3BaseIE(InfoExtractor): _VALID_URL_TMPL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?:%s)/[^/]+/(?P<id>\w+)\.html' _GEO_COUNTRIES = ['VN'] def _extract_item(self, item, fatal): item_id = item['id'] title = item.get('name') or item['title'] formats = [] for k, v in (item.get('source') or {}).items(): if not v: continue if k in ('mp4', 'hls'): for res, video_url in v.items(): if not video_url: continue if k == 'hls': formats.extend(self._extract_m3u8_formats( video_url, item_id, 'mp4', 'm3u8_native', m3u8_id=k, fatal=False)) elif k == 'mp4': formats.append({ 'format_id': 'mp4-' + res, 'url': video_url, 'height': int_or_none(self._search_regex( r'^(\d+)p', res, 'resolution', default=None)), }) else: formats.append({ 'ext': 'mp3', 'format_id': k, 'tbr': int_or_none(k), 'url': self._proto_relative_url(v), 'vcodec': 'none', }) if not formats: if not fatal: return msg = item['msg'] if msg == 'Sorry, this content is not available in your country.': self.raise_geo_restricted(countries=self._GEO_COUNTRIES) raise ExtractorError(msg, expected=True) self._sort_formats(formats) subtitles = None lyric = item.get('lyric') if lyric: subtitles = { 'origin': [{ 'url': lyric, }], } album = item.get('album') or {} return { 'id': item_id, 'title': title, 'formats': formats, 'thumbnail': item.get('thumbnail'), 'subtitles': subtitles, 'duration': int_or_none(item.get('duration')), 'track': title, 'artist': item.get('artists_names'), 'album': album.get('name') or album.get('title'), 'album_artist': album.get('artists_names'), } def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage( url.replace('://zingmp3.vn/', '://mp3.zing.vn/'), page_id, query={'play_song': 1}) data_path = self._search_regex( r'data-xml="([^"]+)', webpage, 'data path') return self._process_data(self._download_json( 'https://mp3.zing.vn/xhr' + data_path, page_id)['data']) class ZingMp3IE(ZingMp3BaseIE): _VALID_URL = ZingMp3BaseIE._VALID_URL_TMPL % 'bai-hat|video-clip' _TESTS = [{ 'url': 'http://mp3.zing.vn/bai-hat/Xa-Mai-Xa-Bao-Thy/ZWZB9WAB.html', 'md5': 'ead7ae13693b3205cbc89536a077daed', 'info_dict': { 'id': 'ZWZB9WAB', 'title': 'Xa Mãi Xa', 'ext': 'mp3', 'thumbnail': r're:^https?://.+\.jpg', 'subtitles': { 'origin': [{ 'ext': 'lrc', }] }, 'duration': 255, 'track': 'Xa Mãi Xa', 'artist': 'Bảo Thy', 'album': 'Special Album', 'album_artist': 'Bảo Thy', }, }, { 'url': 'https://mp3.zing.vn/video-clip/Suong-Hoa-Dua-Loi-K-ICM-RYO/ZO8ZF7C7.html', 'md5': 'e9c972b693aa88301ef981c8151c4343', 'info_dict': { 'id': 'ZO8ZF7C7', 'title': 'Sương Hoa Đưa Lối', 'ext': 'mp4', 'thumbnail': r're:^https?://.+\.jpg', 'duration': 207, 'track': 'Sương Hoa Đưa Lối', 'artist': 'K-ICM, RYO', }, }, { 'url': 'https://zingmp3.vn/bai-hat/Xa-Mai-Xa-Bao-Thy/ZWZB9WAB.html', 'only_matching': True, }] IE_NAME = 'zingmp3' IE_DESC = 'mp3.zing.vn' def _process_data(self, data): return self._extract_item(data, True) class ZingMp3AlbumIE(ZingMp3BaseIE): _VALID_URL = ZingMp3BaseIE._VALID_URL_TMPL % 'album|playlist' _TESTS = [{ 'url': 'http://mp3.zing.vn/album/Lau-Dai-Tinh-Ai-Bang-Kieu-Minh-Tuyet/ZWZBWDAF.html', 'info_dict': { '_type': 'playlist', 'id': 'ZWZBWDAF', 'title': 'Lâu Đài Tình Ái', }, 'playlist_count': 10, }, { 'url': 'http://mp3.zing.vn/playlist/Duong-Hong-Loan-apollobee/IWCAACCB.html', 'only_matching': True, }, { 'url': 'https://zingmp3.vn/album/Lau-Dai-Tinh-Ai-Bang-Kieu-Minh-Tuyet/ZWZBWDAF.html', 'only_matching': True, }] IE_NAME = 'zingmp3:album' def _process_data(self, data): def entries(): for item in (data.get('items') or []): entry = self._extract_item(item, False) if entry: yield entry info = data.get('info') or {} return self.playlist_result( entries(), info.get('id'), info.get('name') or info.get('title'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/iheart.py
youtube_dl/extractor/iheart.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( clean_html, clean_podcast_url, int_or_none, str_or_none, ) class IHeartRadioBaseIE(InfoExtractor): def _call_api(self, path, video_id, fatal=True, query=None): return self._download_json( 'https://api.iheart.com/api/v3/podcast/' + path, video_id, fatal=fatal, query=query) def _extract_episode(self, episode): return { 'thumbnail': episode.get('imageUrl'), 'description': clean_html(episode.get('description')), 'timestamp': int_or_none(episode.get('startDate'), 1000), 'duration': int_or_none(episode.get('duration')), } class IHeartRadioIE(IHeartRadioBaseIE): IENAME = 'iheartradio' _VALID_URL = r'(?:https?://(?:www\.)?iheart\.com/podcast/[^/]+/episode/(?P<display_id>[^/?&#]+)-|iheartradio:)(?P<id>\d+)' _TEST = { 'url': 'https://www.iheart.com/podcast/105-behind-the-bastards-29236323/episode/part-one-alexander-lukashenko-the-dictator-70346499/?embed=true', 'md5': 'c8609c92c8688dcb69d8541042b8abca', 'info_dict': { 'id': '70346499', 'ext': 'mp3', 'title': 'Part One: Alexander Lukashenko: The Dictator of Belarus', 'description': 'md5:96cc7297b3a5a9ebae28643801c96fae', 'timestamp': 1597741200, 'upload_date': '20200818', } } def _real_extract(self, url): episode_id = self._match_id(url) episode = self._call_api( 'episodes/' + episode_id, episode_id)['episode'] info = self._extract_episode(episode) info.update({ 'id': episode_id, 'title': episode['title'], 'url': clean_podcast_url(episode['mediaUrl']), }) return info class IHeartRadioPodcastIE(IHeartRadioBaseIE): IE_NAME = 'iheartradio:podcast' _VALID_URL = r'https?://(?:www\.)?iheart(?:podcastnetwork)?\.com/podcast/[^/?&#]+-(?P<id>\d+)/?(?:[?#&]|$)' _TESTS = [{ 'url': 'https://www.iheart.com/podcast/1119-it-could-happen-here-30717896/', 'info_dict': { 'id': '30717896', 'title': 'It Could Happen Here', 'description': 'md5:5842117412a967eb0b01f8088eb663e2', }, 'playlist_mincount': 11, }, { 'url': 'https://www.iheartpodcastnetwork.com/podcast/105-stuff-you-should-know-26940277', 'only_matching': True, }] def _real_extract(self, url): podcast_id = self._match_id(url) path = 'podcasts/' + podcast_id episodes = self._call_api( path + '/episodes', podcast_id, query={'limit': 1000000000})['data'] entries = [] for episode in episodes: episode_id = str_or_none(episode.get('id')) if not episode_id: continue info = self._extract_episode(episode) info.update({ '_type': 'url', 'id': episode_id, 'title': episode.get('title'), 'url': 'iheartradio:' + episode_id, 'ie_key': IHeartRadioIE.ie_key(), }) entries.append(info) podcast = self._call_api(path, podcast_id, False) or {} return self.playlist_result( entries, podcast_id, podcast.get('title'), podcast.get('description'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/traileraddict.py
youtube_dl/extractor/traileraddict.py
from __future__ import unicode_literals import re from .common import InfoExtractor class TrailerAddictIE(InfoExtractor): _WORKING = False _VALID_URL = r'(?:https?://)?(?:www\.)?traileraddict\.com/(?:trailer|clip)/(?P<movie>.+?)/(?P<trailer_name>.+)' _TEST = { 'url': 'http://www.traileraddict.com/trailer/prince-avalanche/trailer', 'md5': '41365557f3c8c397d091da510e73ceb4', 'info_dict': { 'id': '76184', 'ext': 'mp4', 'title': 'Prince Avalanche Trailer', 'description': 'Trailer for Prince Avalanche.\n\nTwo highway road workers spend the summer of 1988 away from their city lives. The isolated landscape becomes a place of misadventure as the men find themselves at odds with each other and the women they left behind.', } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) name = mobj.group('movie') + '/' + mobj.group('trailer_name') webpage = self._download_webpage(url, name) title = self._search_regex(r'<title>(.+?)</title>', webpage, 'video title').replace(' - Trailer Addict', '') view_count_str = self._search_regex( r'<span class="views_n">([0-9,.]+)</span>', webpage, 'view count', fatal=False) view_count = ( None if view_count_str is None else int(view_count_str.replace(',', ''))) video_id = self._search_regex( r'<param\s+name="movie"\s+value="/emb/([0-9]+)"\s*/>', webpage, 'video id') # Presence of (no)watchplus function indicates HD quality is available if re.search(r'function (no)?watchplus()', webpage): fvar = 'fvarhd' else: fvar = 'fvar' info_url = 'http://www.traileraddict.com/%s.php?tid=%s' % (fvar, str(video_id)) info_webpage = self._download_webpage(info_url, video_id, 'Downloading the info webpage') final_url = self._search_regex(r'&fileurl=(.+)', info_webpage, 'Download url').replace('%3F', '?') thumbnail_url = self._search_regex(r'&image=(.+?)&', info_webpage, 'thumbnail url') description = self._html_search_regex( r'(?s)<div class="synopsis">.*?<div class="movie_label_info"[^>]*>(.*?)</div>', webpage, 'description', fatal=False) return { 'id': video_id, 'url': final_url, 'title': title, 'thumbnail': thumbnail_url, 'description': description, 'view_count': view_count, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/mojvideo.py
youtube_dl/extractor/mojvideo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, parse_duration, ) class MojvideoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?mojvideo\.com/video-(?P<display_id>[^/]+)/(?P<id>[a-f0-9]+)' _TEST = { 'url': 'http://www.mojvideo.com/video-v-avtu-pred-mano-rdecelaska-alfi-nipic/3d1ed4497707730b2906', 'md5': 'f7fd662cc8ce2be107b0d4f2c0483ae7', 'info_dict': { 'id': '3d1ed4497707730b2906', 'display_id': 'v-avtu-pred-mano-rdecelaska-alfi-nipic', 'ext': 'mp4', 'title': 'V avtu pred mano rdečelaska - Alfi Nipič', 'thumbnail': r're:^http://.*\.jpg$', 'duration': 242, } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') # XML is malformed playerapi = self._download_webpage( 'http://www.mojvideo.com/playerapi.php?v=%s&t=1' % video_id, display_id) if '<error>true</error>' in playerapi: error_desc = self._html_search_regex( r'<errordesc>([^<]*)</errordesc>', playerapi, 'error description', fatal=False) raise ExtractorError('%s said: %s' % (self.IE_NAME, error_desc), expected=True) title = self._html_search_regex( r'<title>([^<]+)</title>', playerapi, 'title') video_url = self._html_search_regex( r'<file>([^<]+)</file>', playerapi, 'video URL') thumbnail = self._html_search_regex( r'<preview>([^<]+)</preview>', playerapi, 'thumbnail', fatal=False) duration = parse_duration(self._html_search_regex( r'<duration>([^<]+)</duration>', playerapi, 'duration', fatal=False)) return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'title': title, 'thumbnail': thumbnail, 'duration': duration, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/xboxclips.py
youtube_dl/extractor/xboxclips.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urllib_parse_urlparse, ) from ..utils import ( int_or_none, month_by_abbreviation, parse_filesize, ) class XboxClipsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:xboxclips\.com|gameclips\.io)/(?:video\.php\?.*vid=|[^/]+/)(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})' _TESTS = [{ 'url': 'http://xboxclips.com/video.php?uid=2533274823424419&gamertag=Iabdulelah&vid=074a69a9-5faf-46aa-b93b-9909c1720325', 'md5': 'fbe1ec805e920aeb8eced3c3e657df5d', 'info_dict': { 'id': '074a69a9-5faf-46aa-b93b-9909c1720325', 'ext': 'mp4', 'title': 'iAbdulElah playing Titanfall', 'filesize_approx': 26800000, 'upload_date': '20140807', 'duration': 56, } }, { 'url': 'https://gameclips.io/iAbdulElah/074a69a9-5faf-46aa-b93b-9909c1720325', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) if '/video.php' in url: qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query) url = 'https://gameclips.io/%s/%s' % (qs['gamertag'][0], qs['vid'][0]) webpage = self._download_webpage(url, video_id) info = self._parse_html5_media_entries(url, webpage, video_id)[0] title = self._html_search_meta(['og:title', 'twitter:title'], webpage) upload_date = None mobj = re.search( r'>Recorded: (\d{2})-(Jan|Feb|Mar|Apr|May|Ju[nl]|Aug|Sep|Oct|Nov|Dec)-(\d{4})', webpage) if mobj: upload_date = '%s%.2d%s' % (mobj.group(3), month_by_abbreviation(mobj.group(2)), mobj.group(1)) filesize = parse_filesize(self._html_search_regex( r'>Size: ([^<]+)<', webpage, 'file size', fatal=False)) duration = int_or_none(self._html_search_regex( r'>Duration: (\d+) Seconds<', webpage, 'duration', fatal=False)) view_count = int_or_none(self._html_search_regex( r'>Views: (\d+)<', webpage, 'view count', fatal=False)) info.update({ 'id': video_id, 'title': title, 'upload_date': upload_date, 'filesize_approx': filesize, 'duration': duration, 'view_count': view_count, }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/internetvideoarchive.py
youtube_dl/extractor/internetvideoarchive.py
from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urlparse, ) class InternetVideoArchiveIE(InfoExtractor): _VALID_URL = r'https?://video\.internetvideoarchive\.net/(?:player|flash/players)/.*?\?.*?publishedid.*?' _TEST = { 'url': 'http://video.internetvideoarchive.net/player/6/configuration.ashx?customerid=69249&publishedid=194487&reporttag=vdbetatitle&playerid=641&autolist=0&domain=www.videodetective.com&maxrate=high&minrate=low&socialplayer=false', 'info_dict': { 'id': '194487', 'ext': 'mp4', 'title': 'Kick-Ass 2', 'description': 'md5:c189d5b7280400630a1d3dd17eaa8d8a', }, 'params': { # m3u8 download 'skip_download': True, }, } @staticmethod def _build_json_url(query): return 'http://video.internetvideoarchive.net/player/6/configuration.ashx?' + query def _real_extract(self, url): query = compat_parse_qs(compat_urlparse.urlparse(url).query) video_id = query['publishedid'][0] data = self._download_json( 'https://video.internetvideoarchive.net/videojs7/videojs7.ivasettings.ashx', video_id, data=json.dumps({ 'customerid': query['customerid'][0], 'publishedid': video_id, }).encode()) title = data['Title'] formats = self._extract_m3u8_formats( data['VideoUrl'], video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) file_url = formats[0]['url'] if '.ism/' in file_url: replace_url = lambda x: re.sub(r'\.ism/[^?]+', '.ism/' + x, file_url) formats.extend(self._extract_f4m_formats( replace_url('.f4m'), video_id, f4m_id='hds', fatal=False)) formats.extend(self._extract_mpd_formats( replace_url('.mpd'), video_id, mpd_id='dash', fatal=False)) formats.extend(self._extract_ism_formats( replace_url('Manifest'), video_id, ism_id='mss', fatal=False)) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': data.get('PosterUrl'), 'description': data.get('Description'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cbssports.py
youtube_dl/extractor/cbssports.py
from __future__ import unicode_literals import re # from .cbs import CBSBaseIE from .common import InfoExtractor from ..utils import ( int_or_none, try_get, ) # class CBSSportsEmbedIE(CBSBaseIE): class CBSSportsEmbedIE(InfoExtractor): IE_NAME = 'cbssports:embed' _VALID_URL = r'''(?ix)https?://(?:(?:www\.)?cbs|embed\.247)sports\.com/player/embed.+? (?: ids%3D(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})| pcid%3D(?P<pcid>\d+) )''' _TESTS = [{ 'url': 'https://www.cbssports.com/player/embed/?args=player_id%3Db56c03a6-231a-4bbe-9c55-af3c8a8e9636%26ids%3Db56c03a6-231a-4bbe-9c55-af3c8a8e9636%26resizable%3D1%26autoplay%3Dtrue%26domain%3Dcbssports.com%26comp_ads_enabled%3Dfalse%26watchAndRead%3D0%26startTime%3D0%26env%3Dprod', 'only_matching': True, }, { 'url': 'https://embed.247sports.com/player/embed/?args=%3fplayer_id%3d1827823171591%26channel%3dcollege-football-recruiting%26pcid%3d1827823171591%26width%3d640%26height%3d360%26autoplay%3dTrue%26comp_ads_enabled%3dFalse%26uvpc%3dhttps%253a%252f%252fwww.cbssports.com%252fapi%252fcontent%252fvideo%252fconfig%252f%253fcfg%253duvp_247sports_v4%2526partner%253d247%26uvpc_m%3dhttps%253a%252f%252fwww.cbssports.com%252fapi%252fcontent%252fvideo%252fconfig%252f%253fcfg%253duvp_247sports_m_v4%2526partner_m%253d247_mobile%26utag%3d247sportssite%26resizable%3dTrue', 'only_matching': True, }] # def _extract_video_info(self, filter_query, video_id): # return self._extract_feed_info('dJ5BDC', 'VxxJg8Ymh8sE', filter_query, video_id) def _real_extract(self, url): uuid, pcid = re.match(self._VALID_URL, url).groups() query = {'id': uuid} if uuid else {'pcid': pcid} video = self._download_json( 'https://www.cbssports.com/api/content/video/', uuid or pcid, query=query)[0] video_id = video['id'] title = video['title'] metadata = video.get('metaData') or {} # return self._extract_video_info('byId=%d' % metadata['mpxOutletId'], video_id) # return self._extract_video_info('byGuid=' + metadata['mpxRefId'], video_id) formats = self._extract_m3u8_formats( metadata['files'][0]['url'], video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) self._sort_formats(formats) image = video.get('image') thumbnails = None if image: image_path = image.get('path') if image_path: thumbnails = [{ 'url': image_path, 'width': int_or_none(image.get('width')), 'height': int_or_none(image.get('height')), 'filesize': int_or_none(image.get('size')), }] return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnails': thumbnails, 'description': video.get('description'), 'timestamp': int_or_none(try_get(video, lambda x: x['dateCreated']['epoch'])), 'duration': int_or_none(metadata.get('duration')), } class CBSSportsBaseIE(InfoExtractor): def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) iframe_url = self._search_regex( r'<iframe[^>]+(?:data-)?src="(https?://[^/]+/player/embed[^"]+)"', webpage, 'embed url') return self.url_result(iframe_url, CBSSportsEmbedIE.ie_key()) class CBSSportsIE(CBSSportsBaseIE): IE_NAME = 'cbssports' _VALID_URL = r'https?://(?:www\.)?cbssports\.com/[^/]+/video/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.cbssports.com/college-football/video/cover-3-stanford-spring-gleaning/', 'info_dict': { 'id': 'b56c03a6-231a-4bbe-9c55-af3c8a8e9636', 'ext': 'mp4', 'title': 'Cover 3: Stanford Spring Gleaning', 'description': 'The Cover 3 crew break down everything you need to know about the Stanford Cardinal this spring.', 'timestamp': 1617218398, 'upload_date': '20210331', 'duration': 502, }, }] class TwentyFourSevenSportsIE(CBSSportsBaseIE): IE_NAME = '247sports' _VALID_URL = r'https?://(?:www\.)?247sports\.com/Video/(?:[^/?#&]+-)?(?P<id>\d+)' _TESTS = [{ 'url': 'https://247sports.com/Video/2021-QB-Jake-Garcia-senior-highlights-through-five-games-10084854/', 'info_dict': { 'id': '4f1265cb-c3b5-44a8-bb1d-1914119a0ccc', 'ext': 'mp4', 'title': '2021 QB Jake Garcia senior highlights through five games', 'description': 'md5:8cb67ebed48e2e6adac1701e0ff6e45b', 'timestamp': 1607114223, 'upload_date': '20201204', 'duration': 208, }, }]
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/niconico.py
youtube_dl/extractor/niconico.py
# coding: utf-8 from __future__ import unicode_literals import datetime import itertools import json import re from .common import InfoExtractor, SearchInfoExtractor from ..postprocessor.ffmpeg import FFmpegPostProcessor from ..compat import ( compat_parse_qs, compat_str, compat_urllib_parse_urlparse, ) from ..utils import ( ExtractorError, dict_get, float_or_none, int_or_none, OnDemandPagedList, parse_duration, parse_iso8601, PostProcessingError, remove_start, str_or_none, try_get, unified_timestamp, urlencode_postdata, xpath_text, ) class NiconicoIE(InfoExtractor): IE_NAME = 'niconico' IE_DESC = 'ニコニコ動画' _TESTS = [{ 'url': 'http://www.nicovideo.jp/watch/sm22312215', 'md5': 'a5bad06f1347452102953f323c69da34s', 'info_dict': { 'id': 'sm22312215', 'ext': 'mp4', 'title': 'Big Buck Bunny', 'thumbnail': r're:https?://.*', 'uploader': 'takuya0301', 'uploader_id': '2698420', 'upload_date': '20131123', 'timestamp': int, # timestamp is unstable 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org', 'duration': 33, 'view_count': int, 'comment_count': int, }, 'skip': 'Requires an account', }, { # File downloaded with and without credentials are different, so omit # the md5 field 'url': 'http://www.nicovideo.jp/watch/nm14296458', 'info_dict': { 'id': 'nm14296458', 'ext': 'swf', 'title': '【鏡音リン】Dance on media【オリジナル】take2!', 'description': 'md5:689f066d74610b3b22e0f1739add0f58', 'thumbnail': r're:https?://.*', 'uploader': 'りょうた', 'uploader_id': '18822557', 'upload_date': '20110429', 'timestamp': 1304065916, 'duration': 209, }, 'skip': 'Requires an account', }, { # 'video exists but is marked as "deleted" # md5 is unstable 'url': 'http://www.nicovideo.jp/watch/sm10000', 'info_dict': { 'id': 'sm10000', 'ext': 'unknown_video', 'description': 'deleted', 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>', 'thumbnail': r're:https?://.*', 'upload_date': '20071224', 'timestamp': int, # timestamp field has different value if logged in 'duration': 304, 'view_count': int, }, 'skip': 'Requires an account', }, { 'url': 'http://www.nicovideo.jp/watch/so22543406', 'info_dict': { 'id': '1388129933', 'ext': 'mp4', 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~', 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1', 'thumbnail': r're:https?://.*', 'timestamp': 1388851200, 'upload_date': '20140104', 'uploader': 'アニメロチャンネル', 'uploader_id': '312', }, 'skip': 'The viewing period of the video you were searching for has expired.', }, { # video not available via `getflv`; "old" HTML5 video 'url': 'http://www.nicovideo.jp/watch/sm1151009', 'md5': '8fa81c364eb619d4085354eab075598a', 'info_dict': { 'id': 'sm1151009', 'ext': 'mp4', 'title': 'マスターシステム本体内蔵のスペハリのメインテーマ(PSG版)', 'description': 'md5:6ee077e0581ff5019773e2e714cdd0b7', 'thumbnail': r're:https?://.*', 'duration': 184, 'timestamp': 1190868283, 'upload_date': '20070927', 'uploader': 'denden2', 'uploader_id': '1392194', 'view_count': int, 'comment_count': int, }, 'skip': 'Requires an account', }, { # "New" HTML5 video # md5 is unstable 'url': 'http://www.nicovideo.jp/watch/sm31464864', 'info_dict': { 'id': 'sm31464864', 'ext': 'mp4', 'title': '新作TVアニメ「戦姫絶唱シンフォギアAXZ」PV 最高画質', 'description': 'md5:e52974af9a96e739196b2c1ca72b5feb', 'timestamp': 1498514060, 'upload_date': '20170626', 'uploader': 'ゲスト', 'uploader_id': '40826363', 'thumbnail': r're:https?://.*', 'duration': 198, 'view_count': int, 'comment_count': int, }, 'skip': 'Requires an account', }, { # Video without owner 'url': 'http://www.nicovideo.jp/watch/sm18238488', 'md5': 'd265680a1f92bdcbbd2a507fc9e78a9e', 'info_dict': { 'id': 'sm18238488', 'ext': 'mp4', 'title': '【実写版】ミュータントタートルズ', 'description': 'md5:15df8988e47a86f9e978af2064bf6d8e', 'timestamp': 1341160408, 'upload_date': '20120701', 'uploader': None, 'uploader_id': None, 'thumbnail': r're:https?://.*', 'duration': 5271, 'view_count': int, 'comment_count': int, }, 'skip': 'Requires an account', }, { 'url': 'http://sp.nicovideo.jp/watch/sm28964488?ss_pos=1&cp_in=wt_tg', 'only_matching': True, }, { # DMC video with heartbeat 'url': 'https://www.nicovideo.jp/watch/sm34815188', 'md5': '9360c6e1f1519d7759e2fe8e1326ae83', 'info_dict': { 'id': 'sm34815188', 'ext': 'mp4', 'title': 'md5:aee93e9f3366db72f902f6cd5d389cb7', 'description': 'md5:7b9149fc7a00ab053cafaf5c19662704', 'thumbnail': r're:https?://.*', 'uploader': 'md5:2762e18fa74dbb40aa1ad27c6291ee32', 'uploader_id': '67449889', 'upload_date': '20190322', 'timestamp': int, # timestamp is unstable 'duration': 1082.0, 'view_count': int, 'comment_count': int, }, }] _VALID_URL = r'https?://(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)' _NETRC_MACHINE = 'niconico' _API_HEADERS = { 'X-Frontend-ID': '6', 'X-Frontend-Version': '0' } def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() # No authentication to be performed if not username: return True # Log in login_ok = True login_form_strs = { 'mail_tel': username, 'password': password, } urlh = self._request_webpage( 'https://account.nicovideo.jp/api/v1/login', None, note='Logging in', errnote='Unable to log in', data=urlencode_postdata(login_form_strs)) if urlh is False: login_ok = False else: parts = compat_urllib_parse_urlparse(urlh.geturl()) if compat_parse_qs(parts.query).get('message', [None])[0] == 'cant_login': login_ok = False if not login_ok: self._downloader.report_warning('unable to log in: bad username or password') return login_ok def _get_heartbeat_info(self, info_dict): video_id, video_src_id, audio_src_id = info_dict['url'].split(':')[1].split('/') api_data = ( info_dict.get('_api_data') or self._parse_json( self._html_search_regex( 'data-api-data="([^"]+)"', self._download_webpage('http://www.nicovideo.jp/watch/' + video_id, video_id), 'API data', default='{}'), video_id)) session_api_data = try_get(api_data, lambda x: x['media']['delivery']['movie']['session']) session_api_endpoint = try_get(session_api_data, lambda x: x['urls'][0]) def ping(): status = try_get( self._download_json( 'https://nvapi.nicovideo.jp/v1/2ab0cbaa/watch', video_id, query={'t': try_get(api_data, lambda x: x['media']['delivery']['trackingId'])}, note='Acquiring permission for downloading video', headers=self._API_HEADERS), lambda x: x['meta']['status']) if status != 200: self.report_warning('Failed to acquire permission for playing video. The video may not download.') yesno = lambda x: 'yes' if x else 'no' # m3u8 (encryption) if try_get(api_data, lambda x: x['media']['delivery']['encryption']) is not None: protocol = 'm3u8' encryption = self._parse_json(session_api_data['token'], video_id)['hls_encryption'] session_api_http_parameters = { 'parameters': { 'hls_parameters': { 'encryption': { encryption: { 'encrypted_key': try_get(api_data, lambda x: x['media']['delivery']['encryption']['encryptedKey']), 'key_uri': try_get(api_data, lambda x: x['media']['delivery']['encryption']['keyUri']) } }, 'transfer_preset': '', 'use_ssl': yesno(session_api_endpoint['isSsl']), 'use_well_known_port': yesno(session_api_endpoint['isWellKnownPort']), 'segment_duration': 6000, } } } # http else: protocol = 'http' session_api_http_parameters = { 'parameters': { 'http_output_download_parameters': { 'use_ssl': yesno(session_api_endpoint['isSsl']), 'use_well_known_port': yesno(session_api_endpoint['isWellKnownPort']), } } } session_response = self._download_json( session_api_endpoint['url'], video_id, query={'_format': 'json'}, headers={'Content-Type': 'application/json'}, note='Downloading JSON metadata for %s' % info_dict['format_id'], data=json.dumps({ 'session': { 'client_info': { 'player_id': session_api_data.get('playerId'), }, 'content_auth': { 'auth_type': try_get(session_api_data, lambda x: x['authTypes'][session_api_data['protocols'][0]]), 'content_key_timeout': session_api_data.get('contentKeyTimeout'), 'service_id': 'nicovideo', 'service_user_id': session_api_data.get('serviceUserId') }, 'content_id': session_api_data.get('contentId'), 'content_src_id_sets': [{ 'content_src_ids': [{ 'src_id_to_mux': { 'audio_src_ids': [audio_src_id], 'video_src_ids': [video_src_id], } }] }], 'content_type': 'movie', 'content_uri': '', 'keep_method': { 'heartbeat': { 'lifetime': session_api_data.get('heartbeatLifetime') } }, 'priority': session_api_data.get('priority'), 'protocol': { 'name': 'http', 'parameters': { 'http_parameters': session_api_http_parameters } }, 'recipe_id': session_api_data.get('recipeId'), 'session_operation_auth': { 'session_operation_auth_by_signature': { 'signature': session_api_data.get('signature'), 'token': session_api_data.get('token'), } }, 'timing_constraint': 'unlimited' } }).encode()) info_dict['url'] = session_response['data']['session']['content_uri'] info_dict['protocol'] = protocol # get heartbeat info heartbeat_info_dict = { 'url': session_api_endpoint['url'] + '/' + session_response['data']['session']['id'] + '?_format=json&_method=PUT', 'data': json.dumps(session_response['data']), # interval, convert milliseconds to seconds, then halve to make a buffer. 'interval': float_or_none(session_api_data.get('heartbeatLifetime'), scale=3000), 'ping': ping } return info_dict, heartbeat_info_dict def _extract_format_for_quality(self, api_data, video_id, audio_quality, video_quality): def parse_format_id(id_code): mobj = re.match(r'''(?x) (?:archive_)? (?:(?P<codec>[^_]+)_)? (?:(?P<br>[\d]+)kbps_)? (?:(?P<res>[\d+]+)p_)? ''', '%s_' % id_code) return mobj.groupdict() if mobj else {} protocol = 'niconico_dmc' format_id = '-'.join(map(lambda s: remove_start(s['id'], 'archive_'), [video_quality, audio_quality])) vdict = parse_format_id(video_quality['id']) adict = parse_format_id(audio_quality['id']) resolution = try_get(video_quality, lambda x: x['metadata']['resolution'], dict) or {'height': vdict.get('res')} vbr = try_get(video_quality, lambda x: x['metadata']['bitrate'], float) return { 'url': '%s:%s/%s/%s' % (protocol, video_id, video_quality['id'], audio_quality['id']), 'format_id': format_id, 'format_note': 'DMC %s' % try_get(video_quality, lambda x: x['metadata']['label'], compat_str), 'ext': 'mp4', # Session API are used in HTML5, which always serves mp4 'vcodec': vdict.get('codec'), 'acodec': adict.get('codec'), 'vbr': float_or_none(vbr, 1000) or float_or_none(vdict.get('br')), 'abr': float_or_none(audio_quality.get('bitrate'), 1000) or float_or_none(adict.get('br')), 'height': int_or_none(resolution.get('height', vdict.get('res'))), 'width': int_or_none(resolution.get('width')), 'quality': -2 if 'low' in format_id else -1, # Default quality value is -1 'protocol': protocol, 'http_headers': { 'Origin': 'https://www.nicovideo.jp', 'Referer': 'https://www.nicovideo.jp/watch/' + video_id, } } def _real_extract(self, url): video_id = self._match_id(url) # Get video webpage for API data. webpage, handle = self._download_webpage_handle( 'http://www.nicovideo.jp/watch/' + video_id, video_id) if video_id.startswith('so'): video_id = self._match_id(handle.geturl()) api_data = self._parse_json(self._html_search_regex( 'data-api-data="([^"]+)"', webpage, 'API data', default='{}'), video_id) def get_video_info_web(items): return dict_get(api_data['video'], items) # Get video info video_info_xml = self._download_xml( 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id, video_id, note='Downloading video info page') def get_video_info_xml(items): if not isinstance(items, list): items = [items] for item in items: ret = xpath_text(video_info_xml, './/' + item) if ret: return ret if get_video_info_xml('error'): error_code = get_video_info_xml('code') if error_code == 'DELETED': raise ExtractorError('The video has been deleted.', expected=True) elif error_code == 'NOT_FOUND': raise ExtractorError('The video is not found.', expected=True) elif error_code == 'COMMUNITY': self.to_screen('%s: The video is community members only.' % video_id) else: raise ExtractorError('%s reports error: %s' % (self.IE_NAME, error_code)) # Start extracting video formats formats = [] # Get HTML5 videos info quality_info = try_get(api_data, lambda x: x['media']['delivery']['movie']) if not quality_info: raise ExtractorError('The video can\'t be downloaded', expected=True) for audio_quality in quality_info.get('audios') or {}: for video_quality in quality_info.get('videos') or {}: if not audio_quality.get('isAvailable') or not video_quality.get('isAvailable'): continue formats.append(self._extract_format_for_quality( api_data, video_id, audio_quality, video_quality)) # Get flv/swf info timestamp = None video_real_url = try_get(api_data, lambda x: x['video']['smileInfo']['url']) if video_real_url: is_economy = video_real_url.endswith('low') if is_economy: self.report_warning('Site is currently in economy mode! You will only have access to lower quality streams') # Invoking ffprobe to determine resolution pp = FFmpegPostProcessor(self._downloader) cookies = self._get_cookies('https://nicovideo.jp').output(header='', sep='; path=/; domain=nicovideo.jp;\n') self.to_screen('%s: %s' % (video_id, 'Checking smile format with ffprobe')) try: metadata = pp.get_metadata_object(video_real_url, ['-cookies', cookies]) except PostProcessingError as err: raise ExtractorError(err.msg, expected=True) v_stream = a_stream = {} # Some complex swf files doesn't have video stream (e.g. nm4809023) for stream in metadata['streams']: if stream['codec_type'] == 'video': v_stream = stream elif stream['codec_type'] == 'audio': a_stream = stream # Community restricted videos seem to have issues with the thumb API not returning anything at all filesize = int( (get_video_info_xml('size_high') if not is_economy else get_video_info_xml('size_low')) or metadata['format']['size'] ) extension = ( get_video_info_xml('movie_type') or 'mp4' if 'mp4' in metadata['format']['format_name'] else metadata['format']['format_name'] ) # 'creation_time' tag on video stream of re-encoded SMILEVIDEO mp4 files are '1970-01-01T00:00:00.000000Z'. timestamp = ( parse_iso8601(get_video_info_web('first_retrieve')) or unified_timestamp(get_video_info_web('postedDateTime')) ) metadata_timestamp = ( parse_iso8601(try_get(v_stream, lambda x: x['tags']['creation_time'])) or timestamp if extension != 'mp4' else 0 ) # According to compconf, smile videos from pre-2017 are always better quality than their DMC counterparts smile_threshold_timestamp = parse_iso8601('2016-12-08T00:00:00+09:00') is_source = timestamp < smile_threshold_timestamp or metadata_timestamp > 0 # If movie file size is unstable, old server movie is not source movie. if filesize > 1: formats.append({ 'url': video_real_url, 'format_id': 'smile' if not is_economy else 'smile_low', 'format_note': 'SMILEVIDEO source' if not is_economy else 'SMILEVIDEO low quality', 'ext': extension, 'container': extension, 'vcodec': v_stream.get('codec_name'), 'acodec': a_stream.get('codec_name'), # Some complex swf files doesn't have total bit rate metadata (e.g. nm6049209) 'tbr': int_or_none(metadata['format'].get('bit_rate'), scale=1000), 'vbr': int_or_none(v_stream.get('bit_rate'), scale=1000), 'abr': int_or_none(a_stream.get('bit_rate'), scale=1000), 'height': int_or_none(v_stream.get('height')), 'width': int_or_none(v_stream.get('width')), 'source_preference': 5 if not is_economy else -2, 'quality': 5 if is_source and not is_economy else None, 'filesize': filesize }) self._sort_formats(formats) # Start extracting information title = ( get_video_info_xml('title') # prefer to get the untranslated original title or get_video_info_web(['originalTitle', 'title']) or self._og_search_title(webpage, default=None) or self._html_search_regex( r'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>', webpage, 'video title')) watch_api_data_string = self._html_search_regex( r'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>', webpage, 'watch api data', default=None) watch_api_data = self._parse_json(watch_api_data_string, video_id) if watch_api_data_string else {} video_detail = watch_api_data.get('videoDetail', {}) thumbnail = ( self._html_search_regex(r'<meta property="og:image" content="([^"]+)">', webpage, 'thumbnail data', default=None) or dict_get( # choose highest from 720p to 240p get_video_info_web('thumbnail'), ['ogp', 'player', 'largeUrl', 'middleUrl', 'url']) or self._html_search_meta('image', webpage, 'thumbnail', default=None) or video_detail.get('thumbnail')) description = get_video_info_web('description') if not timestamp: match = self._html_search_meta('datePublished', webpage, 'date published', default=None) if match: timestamp = parse_iso8601(match.replace('+', ':00+')) if not timestamp and video_detail.get('postedAt'): timestamp = parse_iso8601( video_detail['postedAt'].replace('/', '-'), delimiter=' ', timezone=datetime.timedelta(hours=9)) timestamp = timestamp or try_get(api_data, lambda x: parse_iso8601(x['video']['registeredAt'])) view_count = int_or_none(get_video_info_web(['view_counter', 'viewCount'])) if not view_count: match = self._html_search_regex( r'>Views: <strong[^>]*>([^<]+)</strong>', webpage, 'view count', default=None) if match: view_count = int_or_none(match.replace(',', '')) view_count = ( view_count or video_detail.get('viewCount') or try_get(api_data, lambda x: x['video']['count']['view'])) comment_count = ( int_or_none(get_video_info_web('comment_num')) or video_detail.get('commentCount') or try_get(api_data, lambda x: x['video']['count']['comment'])) if not comment_count: match = self._html_search_regex( r'>Comments: <strong[^>]*>([^<]+)</strong>', webpage, 'comment count', default=None) if match: comment_count = int_or_none(match.replace(',', '')) duration = (parse_duration( get_video_info_web('length') or self._html_search_meta( 'video:duration', webpage, 'video duration', default=None)) or video_detail.get('length') or get_video_info_web('duration')) webpage_url = get_video_info_web('watch_url') or url # for channel movie and community movie channel_id = try_get( api_data, (lambda x: x['channel']['globalId'], lambda x: x['community']['globalId'])) channel = try_get( api_data, (lambda x: x['channel']['name'], lambda x: x['community']['name'])) # Note: cannot use api_data.get('owner', {}) because owner may be set to "null" # in the JSON, which will cause None to be returned instead of {}. owner = try_get(api_data, lambda x: x.get('owner'), dict) or {} uploader_id = str_or_none( get_video_info_web(['ch_id', 'user_id']) or owner.get('id') or channel_id ) uploader = ( get_video_info_web(['ch_name', 'user_nickname']) or owner.get('nickname') or channel ) return { 'id': video_id, '_api_data': api_data, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'description': description, 'uploader': uploader, 'timestamp': timestamp, 'uploader_id': uploader_id, 'channel': channel, 'channel_id': channel_id, 'view_count': view_count, 'comment_count': comment_count, 'duration': duration, 'webpage_url': webpage_url, } class NiconicoPlaylistIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/(?:user/\d+/|my/)?mylist/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.nicovideo.jp/mylist/27411728', 'info_dict': { 'id': '27411728', 'title': 'AKB48のオールナイトニッポン', 'description': 'md5:d89694c5ded4b6c693dea2db6e41aa08', 'uploader': 'のっく', 'uploader_id': '805442', }, 'playlist_mincount': 225, }, { 'url': 'https://www.nicovideo.jp/user/805442/mylist/27411728', 'only_matching': True, }] _API_HEADERS = { 'X-Frontend-ID': '6', 'X-Frontend-Version': '0' } def _real_extract(self, url): list_id = self._match_id(url) def get_page_data(pagenum, pagesize): return self._download_json( 'http://nvapi.nicovideo.jp/v2/mylists/' + list_id, list_id, query={'page': 1 + pagenum, 'pageSize': pagesize}, headers=self._API_HEADERS).get('data').get('mylist') data = get_page_data(0, 1) title = data.get('name') description = data.get('description') uploader = data.get('owner').get('name') uploader_id = data.get('owner').get('id') def pagefunc(pagenum): data = get_page_data(pagenum, 25) return ({ '_type': 'url', 'url': 'http://www.nicovideo.jp/watch/' + item.get('watchId'), } for item in data.get('items')) return { '_type': 'playlist', 'id': list_id, 'title': title, 'description': description, 'uploader': uploader, 'uploader_id': uploader_id, 'entries': OnDemandPagedList(pagefunc, 25), } class NicovideoSearchBaseIE(InfoExtractor): _MAX_RESULTS = float('inf') def _entries(self, url, item_id, query=None, note='Downloading page %(page)s'): query = query or {} pages = [query['page']] if 'page' in query else itertools.count(1) for page_num in pages: query['page'] = str(page_num) webpage = self._download_webpage(url, item_id, query=query, note=note % {'page': page_num}) results = re.findall(r'(?<=data-video-id=)["\']?(?P<videoid>.+?)(?=["\'])', webpage) for item in results: yield self.url_result('http://www.nicovideo.jp/watch/%s' % item, 'Niconico', item) if not results: break def _get_n_results(self, query, n): entries = self._entries(self._proto_relative_url('//www.nicovideo.jp/search/%s' % query), query) if n < self._MAX_RESULTS: entries = itertools.islice(entries, 0, n) return self.playlist_result(entries, query, query) class NicovideoSearchIE(NicovideoSearchBaseIE, SearchInfoExtractor): IE_DESC = 'Nico video search' IE_NAME = 'nicovideo:search' _SEARCH_KEY = 'nicosearch' def _search_results(self, query): return self._entries( self._proto_relative_url('//www.nicovideo.jp/search/%s' % query), query) class NicovideoSearchURLIE(NicovideoSearchBaseIE): IE_NAME = '%s_url' % NicovideoSearchIE.IE_NAME IE_DESC = 'Nico video search URLs' _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/search/(?P<id>[^?#&]+)?' _TESTS = [{ 'url': 'http://www.nicovideo.jp/search/sm9', 'info_dict': { 'id': 'sm9', 'title': 'sm9' }, 'playlist_mincount': 40, }, { 'url': 'https://www.nicovideo.jp/search/sm9?sort=h&order=d&end=2020-12-31&start=2020-01-01', 'info_dict': { 'id': 'sm9', 'title': 'sm9' }, 'playlist_count': 31, }] def _real_extract(self, url): query = self._match_id(url) return self.playlist_result(self._entries(url, query), query, query) class NicovideoSearchDateIE(NicovideoSearchBaseIE, SearchInfoExtractor): IE_DESC = 'Nico video search, newest first' IE_NAME = '%s:date' % NicovideoSearchIE.IE_NAME _SEARCH_KEY = 'nicosearchdate' _TESTS = [{ 'url': 'nicosearchdateall:a', 'info_dict': { 'id': 'a', 'title': 'a' }, 'playlist_mincount': 1610, }] _START_DATE = datetime.date(2007, 1, 1) _RESULTS_PER_PAGE = 32 _MAX_PAGES = 50 def _entries(self, url, item_id, start_date=None, end_date=None): start_date, end_date = start_date or self._START_DATE, end_date or datetime.datetime.now().date() # If the last page has a full page of videos, we need to break down the query interval further last_page_len = len(list(self._get_entries_for_date( url, item_id, start_date, end_date, self._MAX_PAGES, note='Checking number of videos from {0} to {1}'.format(start_date, end_date)))) if (last_page_len == self._RESULTS_PER_PAGE and start_date != end_date): midpoint = start_date + ((end_date - start_date) // 2) for entry in itertools.chain( iter(self._entries(url, item_id, midpoint, end_date)), iter(self._entries(url, item_id, start_date, midpoint))): yield entry else: self.to_screen('{0}: Downloading results from {1} to {2}'.format(item_id, start_date, end_date)) for entry in iter(self._get_entries_for_date( url, item_id, start_date, end_date, note=' Downloading page %(page)s')): yield entry def _get_entries_for_date(self, url, item_id, start_date, end_date=None, page_num=None, note=None): query = { 'start': compat_str(start_date), 'end': compat_str(end_date or start_date), 'sort': 'f', 'order': 'd', } if page_num: query['page'] = compat_str(page_num) for entry in iter(super(NicovideoSearchDateIE, self)._entries(url, item_id, query=query, note=note)): yield entry class NiconicoUserIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/user/(?P<id>\d+)/?(?:$|[#?])' _TEST = { 'url': 'https://www.nicovideo.jp/user/419948', 'info_dict': { 'id': '419948', }, 'playlist_mincount': 101, } _API_URL = "https://nvapi.nicovideo.jp/v1/users/%s/videos?sortKey=registeredAt&sortOrder=desc&pageSize=%s&page=%s" _PAGE_SIZE = 100 _API_HEADERS = { 'X-Frontend-ID': '6',
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/democracynow.py
youtube_dl/extractor/democracynow.py
# coding: utf-8 from __future__ import unicode_literals import re import os.path from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( url_basename, remove_start, ) class DemocracynowIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?democracynow\.org/(?P<id>[^\?]*)' IE_NAME = 'democracynow' _TESTS = [{ 'url': 'http://www.democracynow.org/shows/2015/7/3', 'md5': '3757c182d3d84da68f5c8f506c18c196', 'info_dict': { 'id': '2015-0703-001', 'ext': 'mp4', 'title': 'Daily Show for July 03, 2015', 'description': 'md5:80eb927244d6749900de6072c7cc2c86', }, }, { 'url': 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree', 'info_dict': { 'id': '2015-0703-001', 'ext': 'mp4', 'title': '"This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag', 'description': 'md5:4d2bc4f0d29f5553c2210a4bc7761a21', }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) json_data = self._parse_json(self._search_regex( r'<script[^>]+type="text/json"[^>]*>\s*({[^>]+})', webpage, 'json'), display_id) title = json_data['title'] formats = [] video_id = None for key in ('file', 'audio', 'video', 'high_res_video'): media_url = json_data.get(key, '') if not media_url: continue media_url = re.sub(r'\?.*', '', compat_urlparse.urljoin(url, media_url)) video_id = video_id or remove_start(os.path.splitext(url_basename(media_url))[0], 'dn') formats.append({ 'url': media_url, 'vcodec': 'none' if key == 'audio' else None, }) self._sort_formats(formats) default_lang = 'en' subtitles = {} def add_subtitle_item(lang, info_dict): if lang not in subtitles: subtitles[lang] = [] subtitles[lang].append(info_dict) # chapter_file are not subtitles if 'caption_file' in json_data: add_subtitle_item(default_lang, { 'url': compat_urlparse.urljoin(url, json_data['caption_file']), }) for subtitle_item in json_data.get('captions', []): lang = subtitle_item.get('language', '').lower() or default_lang add_subtitle_item(lang, { 'url': compat_urlparse.urljoin(url, subtitle_item['url']), }) description = self._og_search_description(webpage, default=None) return { 'id': video_id or display_id, 'title': title, 'description': description, 'thumbnail': json_data.get('image'), 'subtitles': subtitles, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/sprout.py
youtube_dl/extractor/sprout.py
# coding: utf-8 from __future__ import unicode_literals from .adobepass import AdobePassIE from ..utils import ( int_or_none, smuggle_url, update_url_query, ) class SproutIE(AdobePassIE): _VALID_URL = r'https?://(?:www\.)?(?:sproutonline|universalkids)\.com/(?:watch|(?:[^/]+/)*videos)/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.universalkids.com/shows/remy-and-boo/season/1/videos/robot-bike-race', 'info_dict': { 'id': 'bm0foJFaTKqb', 'ext': 'mp4', 'title': 'Robot Bike Race', 'description': 'md5:436b1d97117cc437f54c383f4debc66d', 'timestamp': 1606148940, 'upload_date': '20201123', 'uploader': 'NBCU-MPAT', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.sproutonline.com/watch/cowboy-adventure', 'only_matching': True, }, { 'url': 'https://www.universalkids.com/watch/robot-bike-race', 'only_matching': True, }] _GEO_COUNTRIES = ['US'] def _real_extract(self, url): display_id = self._match_id(url) mpx_metadata = self._download_json( # http://nbcuunikidsprod.apps.nbcuni.com/networks/universalkids/content/videos/ 'https://www.universalkids.com/_api/videos/' + display_id, display_id)['mpxMetadata'] media_pid = mpx_metadata['mediaPid'] theplatform_url = 'https://link.theplatform.com/s/HNK2IC/' + media_pid query = { 'mbr': 'true', 'manifest': 'm3u', } if mpx_metadata.get('entitlement') == 'auth': query['auth'] = self._extract_mvpd_auth(url, media_pid, 'sprout', 'sprout') theplatform_url = smuggle_url( update_url_query(theplatform_url, query), { 'force_smil_url': True, 'geo_countries': self._GEO_COUNTRIES, }) return { '_type': 'url_transparent', 'id': media_pid, 'url': theplatform_url, 'series': mpx_metadata.get('seriesName'), 'season_number': int_or_none(mpx_metadata.get('seasonNumber')), 'episode_number': int_or_none(mpx_metadata.get('episodeNumber')), 'ie_key': 'ThePlatform', }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/bravotv.py
youtube_dl/extractor/bravotv.py
# coding: utf-8 from __future__ import unicode_literals import re from .adobepass import AdobePassIE from ..utils import ( smuggle_url, update_url_query, int_or_none, ) class BravoTVIE(AdobePassIE): _VALID_URL = r'https?://(?:www\.)?(?P<req_id>bravotv|oxygen)\.com/(?:[^/]+/)+(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.bravotv.com/top-chef/season-16/episode-15/videos/the-top-chef-season-16-winner-is', 'md5': 'e34684cfea2a96cd2ee1ef3a60909de9', 'info_dict': { 'id': 'epL0pmK1kQlT', 'ext': 'mp4', 'title': 'The Top Chef Season 16 Winner Is...', 'description': 'Find out who takes the title of Top Chef!', 'uploader': 'NBCU-BRAV', 'upload_date': '20190314', 'timestamp': 1552591860, } }, { 'url': 'http://www.bravotv.com/below-deck/season-3/ep-14-reunion-part-1', 'only_matching': True, }, { 'url': 'https://www.oxygen.com/in-ice-cold-blood/season-2/episode-16/videos/handling-the-horwitz-house-after-the-murder-season-2', 'only_matching': True, }] def _real_extract(self, url): site, display_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, display_id) settings = self._parse_json(self._search_regex( r'<script[^>]+data-drupal-selector="drupal-settings-json"[^>]*>({.+?})</script>', webpage, 'drupal settings'), display_id) info = {} query = { 'mbr': 'true', } account_pid, release_pid = [None] * 2 tve = settings.get('ls_tve') if tve: query['manifest'] = 'm3u' mobj = re.search(r'<[^>]+id="pdk-player"[^>]+data-url=["\']?(?:https?:)?//player\.theplatform\.com/p/([^/]+)/(?:[^/]+/)*select/([^?#&"\']+)', webpage) if mobj: account_pid, tp_path = mobj.groups() release_pid = tp_path.strip('/').split('/')[-1] else: account_pid = 'HNK2IC' tp_path = release_pid = tve['release_pid'] if tve.get('entitlement') == 'auth': adobe_pass = settings.get('tve_adobe_auth', {}) if site == 'bravotv': site = 'bravo' resource = self._get_mvpd_resource( adobe_pass.get('adobePassResourceId') or site, tve['title'], release_pid, tve.get('rating')) query['auth'] = self._extract_mvpd_auth( url, release_pid, adobe_pass.get('adobePassRequestorId') or site, resource) else: shared_playlist = settings['ls_playlist'] account_pid = shared_playlist['account_pid'] metadata = shared_playlist['video_metadata'][shared_playlist['default_clip']] tp_path = release_pid = metadata.get('release_pid') if not release_pid: release_pid = metadata['guid'] tp_path = 'media/guid/2140479951/' + release_pid info.update({ 'title': metadata['title'], 'description': metadata.get('description'), 'season_number': int_or_none(metadata.get('season_num')), 'episode_number': int_or_none(metadata.get('episode_num')), }) query['switch'] = 'progressive' info.update({ '_type': 'url_transparent', 'id': release_pid, 'url': smuggle_url(update_url_query( 'http://link.theplatform.com/s/%s/%s' % (account_pid, tp_path), query), {'force_smil_url': True}), 'ie_key': 'ThePlatform', }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/viidea.py
youtube_dl/extractor/viidea.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_str, compat_urlparse, ) from ..utils import ( ExtractorError, js_to_json, parse_duration, parse_iso8601, ) class ViideaIE(InfoExtractor): _VALID_URL = r'''(?x)https?://(?:www\.)?(?: videolectures\.net| flexilearn\.viidea\.net| presentations\.ocwconsortium\.org| video\.travel-zoom\.si| video\.pomp-forum\.si| tv\.nil\.si| video\.hekovnik.com| video\.szko\.si| kpk\.viidea\.com| inside\.viidea\.net| video\.kiberpipa\.org| bvvideo\.si| kongres\.viidea\.net| edemokracija\.viidea\.com )(?:/lecture)?/(?P<id>[^/]+)(?:/video/(?P<part>\d+))?/*(?:[#?].*)?$''' _TESTS = [{ 'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/', 'info_dict': { 'id': '20171', 'display_id': 'promogram_igor_mekjavic_eng', 'ext': 'mp4', 'title': 'Automatics, robotics and biocybernetics', 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482', 'thumbnail': r're:http://.*\.jpg', 'timestamp': 1372349289, 'upload_date': '20130627', 'duration': 565, }, 'params': { # m3u8 download 'skip_download': True, }, }, { # video with invalid direct format links (HTTP 403) 'url': 'http://videolectures.net/russir2010_filippova_nlp/', 'info_dict': { 'id': '14891', 'display_id': 'russir2010_filippova_nlp', 'ext': 'flv', 'title': 'NLP at Google', 'description': 'md5:fc7a6d9bf0302d7cc0e53f7ca23747b3', 'thumbnail': r're:http://.*\.jpg', 'timestamp': 1284375600, 'upload_date': '20100913', 'duration': 5352, }, 'params': { # rtmp download 'skip_download': True, }, }, { # event playlist 'url': 'http://videolectures.net/deeplearning2015_montreal/', 'info_dict': { 'id': '23181', 'title': 'Deep Learning Summer School, Montreal 2015', 'description': 'md5:0533a85e4bd918df52a01f0e1ebe87b7', 'thumbnail': r're:http://.*\.jpg', 'timestamp': 1438560000, }, 'playlist_count': 30, }, { # multi part lecture 'url': 'http://videolectures.net/mlss09uk_bishop_ibi/', 'info_dict': { 'id': '9737', 'display_id': 'mlss09uk_bishop_ibi', 'title': 'Introduction To Bayesian Inference', 'thumbnail': r're:http://.*\.jpg', 'timestamp': 1251622800, }, 'playlist': [{ 'info_dict': { 'id': '9737_part1', 'display_id': 'mlss09uk_bishop_ibi_part1', 'ext': 'wmv', 'title': 'Introduction To Bayesian Inference (Part 1)', 'thumbnail': r're:http://.*\.jpg', 'duration': 4622, 'timestamp': 1251622800, 'upload_date': '20090830', }, }, { 'info_dict': { 'id': '9737_part2', 'display_id': 'mlss09uk_bishop_ibi_part2', 'ext': 'wmv', 'title': 'Introduction To Bayesian Inference (Part 2)', 'thumbnail': r're:http://.*\.jpg', 'duration': 5641, 'timestamp': 1251622800, 'upload_date': '20090830', }, }], 'playlist_count': 2, }] def _real_extract(self, url): lecture_slug, explicit_part_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, lecture_slug) cfg = self._parse_json(self._search_regex( [r'cfg\s*:\s*({.+?})\s*,\s*[\da-zA-Z_]+\s*:\s*\(?\s*function', r'cfg\s*:\s*({[^}]+})'], webpage, 'cfg'), lecture_slug, js_to_json) lecture_id = compat_str(cfg['obj_id']) base_url = self._proto_relative_url(cfg['livepipe'], 'http:') try: lecture_data = self._download_json( '%s/site/api/lecture/%s?format=json' % (base_url, lecture_id), lecture_id)['lecture'][0] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: msg = self._parse_json( e.cause.read().decode('utf-8'), lecture_id) raise ExtractorError(msg['detail'], expected=True) raise lecture_info = { 'id': lecture_id, 'display_id': lecture_slug, 'title': lecture_data['title'], 'timestamp': parse_iso8601(lecture_data.get('time')), 'description': lecture_data.get('description_wiki'), 'thumbnail': lecture_data.get('thumb'), } playlist_entries = [] lecture_type = lecture_data.get('type') parts = [compat_str(video) for video in cfg.get('videos', [])] if parts: multipart = len(parts) > 1 def extract_part(part_id): smil_url = '%s/%s/video/%s/smil.xml' % (base_url, lecture_slug, part_id) smil = self._download_smil(smil_url, lecture_id) info = self._parse_smil(smil, smil_url, lecture_id) self._sort_formats(info['formats']) info['id'] = lecture_id if not multipart else '%s_part%s' % (lecture_id, part_id) info['display_id'] = lecture_slug if not multipart else '%s_part%s' % (lecture_slug, part_id) if multipart: info['title'] += ' (Part %s)' % part_id switch = smil.find('.//switch') if switch is not None: info['duration'] = parse_duration(switch.attrib.get('dur')) item_info = lecture_info.copy() item_info.update(info) return item_info if explicit_part_id or not multipart: result = extract_part(explicit_part_id or parts[0]) else: result = { '_type': 'multi_video', 'entries': [extract_part(part) for part in parts], } result.update(lecture_info) # Immediately return explicitly requested part or non event item if explicit_part_id or lecture_type != 'evt': return result playlist_entries.append(result) # It's probably a playlist if not parts or lecture_type == 'evt': playlist_webpage = self._download_webpage( '%s/site/ajax/drilldown/?id=%s' % (base_url, lecture_id), lecture_id) entries = [ self.url_result(compat_urlparse.urljoin(url, video_url), 'Viidea') for _, video_url in re.findall( r'<a[^>]+href=(["\'])(.+?)\1[^>]+id=["\']lec=\d+', playlist_webpage)] playlist_entries.extend(entries) playlist = self.playlist_result(playlist_entries, lecture_id) playlist.update(lecture_info) return playlist
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/palcomp3.py
youtube_dl/extractor/palcomp3.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, str_or_none, traverse_obj, ) class PalcoMP3BaseIE(InfoExtractor): _GQL_QUERY_TMPL = '''{ artist(slug: "%s") { %s } }''' _ARTIST_FIELDS_TMPL = '''music(slug: "%%s") { %s }''' _MUSIC_FIELDS = '''duration hls mp3File musicID plays title''' def _call_api(self, artist_slug, artist_fields): return self._download_json( 'https://www.palcomp3.com.br/graphql/', artist_slug, query={ 'query': self._GQL_QUERY_TMPL % (artist_slug, artist_fields), })['data'] def _parse_music(self, music): music_id = compat_str(music['musicID']) title = music['title'] formats = [] hls_url = music.get('hls') if hls_url: formats.append({ 'url': hls_url, 'protocol': 'm3u8_native', 'ext': 'mp4', }) mp3_file = music.get('mp3File') if mp3_file: formats.append({ 'url': mp3_file, }) return { 'id': music_id, 'title': title, 'formats': formats, 'duration': int_or_none(music.get('duration')), 'view_count': int_or_none(music.get('plays')), } def _real_initialize(self): self._ARTIST_FIELDS_TMPL = self._ARTIST_FIELDS_TMPL % self._MUSIC_FIELDS def _real_extract(self, url): artist_slug, music_slug = re.match(self._VALID_URL, url).groups() artist_fields = self._ARTIST_FIELDS_TMPL % music_slug music = self._call_api(artist_slug, artist_fields)['artist']['music'] return self._parse_music(music) class PalcoMP3IE(PalcoMP3BaseIE): IE_NAME = 'PalcoMP3:song' _VALID_URL = r'https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<artist>[^/]+)/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://www.palcomp3.com/maiaraemaraisaoficial/nossas-composicoes-cuida-bem-dela/', 'md5': '99fd6405b2d8fd589670f6db1ba3b358', 'info_dict': { 'id': '3162927', 'ext': 'mp3', 'title': 'Nossas Composições - CUIDA BEM DELA', 'duration': 210, 'view_count': int, } }] @classmethod def suitable(cls, url): return False if PalcoMP3VideoIE.suitable(url) else super(PalcoMP3IE, cls).suitable(url) class PalcoMP3ArtistIE(PalcoMP3BaseIE): IE_NAME = 'PalcoMP3:artist' _VALID_URL = r'https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://www.palcomp3.com.br/condedoforro/', 'info_dict': { 'id': '358396', 'title': 'Conde do Forró', }, 'playlist_mincount': 188, }] _ARTIST_FIELDS_TMPL = '''artistID musics { nodes { %s } } name''' @classmethod def suitable(cls, url): return False if re.match(PalcoMP3IE._VALID_URL, url) else super(PalcoMP3ArtistIE, cls).suitable(url) def _real_extract(self, url): artist_slug = self._match_id(url) artist = self._call_api(artist_slug, self._ARTIST_FIELDS_TMPL)['artist'] def entries(): for music in traverse_obj(artist, ( 'musics', 'nodes', lambda _, m: m['musicID'])): yield self._parse_music(music) return self.playlist_result( entries(), str_or_none(artist.get('artistID')), artist.get('name')) class PalcoMP3VideoIE(PalcoMP3BaseIE): IE_NAME = 'PalcoMP3:video' _VALID_URL = r'https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<artist>[^/]+)/(?P<id>[^/?&#]+)/?#clipe' _TESTS = [{ 'url': 'https://www.palcomp3.com/maiaraemaraisaoficial/maiara-e-maraisa-voce-faz-falta-aqui-ao-vivo-em-vicosa-mg/#clipe', 'add_ie': ['Youtube'], 'info_dict': { 'id': '_pD1nR2qqPg', 'ext': 'mp4', 'title': 'Maiara e Maraisa - Você Faz Falta Aqui - DVD Ao Vivo Em Campo Grande', 'description': 'md5:7043342c09a224598e93546e98e49282', 'upload_date': '20161107', 'uploader_id': '@maiaramaraisaoficial', 'uploader': 'Maiara e Maraisa', } }] _MUSIC_FIELDS = 'youtubeID' def _parse_music(self, music): youtube_id = music['youtubeID'] return self.url_result(youtube_id, 'Youtube', youtube_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/dispeak.py
youtube_dl/extractor/dispeak.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, remove_end, xpath_element, xpath_text, ) class DigitallySpeakingIE(InfoExtractor): _VALID_URL = r'https?://(?:s?evt\.dispeak|events\.digitallyspeaking)\.com/(?:[^/]+/)+xml/(?P<id>[^.]+)\.xml' _TESTS = [{ # From http://gdcvault.com/play/1023460/Tenacious-Design-and-The-Interface 'url': 'http://evt.dispeak.com/ubm/gdc/sf16/xml/840376_BQRC.xml', 'md5': 'a8efb6c31ed06ca8739294960b2dbabd', 'info_dict': { 'id': '840376_BQRC', 'ext': 'mp4', 'title': 'Tenacious Design and The Interface of \'Destiny\'', }, }, { # From http://www.gdcvault.com/play/1014631/Classic-Game-Postmortem-PAC 'url': 'http://events.digitallyspeaking.com/gdc/sf11/xml/12396_1299111843500GMPX.xml', 'only_matching': True, }, { # From http://www.gdcvault.com/play/1013700/Advanced-Material 'url': 'http://sevt.dispeak.com/ubm/gdc/eur10/xml/11256_1282118587281VNIT.xml', 'only_matching': True, }, { # From https://gdcvault.com/play/1016624, empty speakerVideo 'url': 'https://sevt.dispeak.com/ubm/gdc/online12/xml/201210-822101_1349794556671DDDD.xml', 'info_dict': { 'id': '201210-822101_1349794556671DDDD', 'ext': 'flv', 'title': 'Pre-launch - Preparing to Take the Plunge', }, }, { # From http://www.gdcvault.com/play/1014846/Conference-Keynote-Shigeru, empty slideVideo 'url': 'http://events.digitallyspeaking.com/gdc/project25/xml/p25-miyamoto1999_1282467389849HSVB.xml', 'only_matching': True, }] def _parse_mp4(self, metadata): video_formats = [] video_root = None mp4_video = xpath_text(metadata, './mp4video', default=None) if mp4_video is not None: mobj = re.match(r'(?P<root>https?://.*?/).*', mp4_video) video_root = mobj.group('root') if video_root is None: http_host = xpath_text(metadata, 'httpHost', default=None) if http_host: video_root = 'http://%s/' % http_host if video_root is None: # Hard-coded in http://evt.dispeak.com/ubm/gdc/sf16/custom/player2.js # Works for GPUTechConf, too video_root = 'http://s3-2u.digitallyspeaking.com/' formats = metadata.findall('./MBRVideos/MBRVideo') if not formats: return None for a_format in formats: stream_name = xpath_text(a_format, 'streamName', fatal=True) video_path = re.match(r'mp4\:(?P<path>.*)', stream_name).group('path') url = video_root + video_path bitrate = xpath_text(a_format, 'bitrate') tbr = int_or_none(bitrate) vbr = int_or_none(self._search_regex( r'-(\d+)\.mp4', video_path, 'vbr', default=None)) abr = tbr - vbr if tbr and vbr else None video_formats.append({ 'format_id': bitrate, 'url': url, 'tbr': tbr, 'vbr': vbr, 'abr': abr, }) return video_formats def _parse_flv(self, metadata): formats = [] akamai_url = xpath_text(metadata, './akamaiHost', fatal=True) audios = metadata.findall('./audios/audio') for audio in audios: formats.append({ 'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url, 'play_path': remove_end(audio.get('url'), '.flv'), 'ext': 'flv', 'vcodec': 'none', 'format_id': audio.get('code'), }) for video_key, format_id, preference in ( ('slide', 'slides', -2), ('speaker', 'speaker', -1)): video_path = xpath_text(metadata, './%sVideo' % video_key) if not video_path: continue formats.append({ 'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url, 'play_path': remove_end(video_path, '.flv'), 'ext': 'flv', 'format_note': '%s video' % video_key, 'quality': preference, 'preference': preference, 'format_id': format_id, }) return formats def _real_extract(self, url): video_id = self._match_id(url) xml_description = self._download_xml(url, video_id) metadata = xpath_element(xml_description, 'metadata') video_formats = self._parse_mp4(metadata) if video_formats is None: video_formats = self._parse_flv(metadata) return { 'id': video_id, 'formats': video_formats, 'title': xpath_text(metadata, 'title', fatal=True), 'duration': parse_duration(xpath_text(metadata, 'endTime')), 'creator': xpath_text(metadata, 'speaker'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/performgroup.py
youtube_dl/extractor/performgroup.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import int_or_none class PerformGroupIE(InfoExtractor): _VALID_URL = r'https?://player\.performgroup\.com/eplayer(?:/eplayer\.html|\.js)#/?(?P<id>[0-9a-f]{26})\.(?P<auth_token>[0-9a-z]{26})' _TESTS = [{ # http://www.faz.net/aktuell/sport/fussball/wm-2018-playoffs-schweiz-besiegt-nordirland-1-0-15286104.html 'url': 'http://player.performgroup.com/eplayer/eplayer.html#d478c41c5d192f56b9aa859de8.1w4crrej5w14e1ed4s1ce4ykab', 'md5': '259cb03d142e2e52471e8837ecacb29f', 'info_dict': { 'id': 'xgrwobuzumes1lwjxtcdpwgxd', 'ext': 'mp4', 'title': 'Liga MX: Keine Einsicht nach Horrorfoul', 'description': 'md5:7cd3b459c82725b021e046ab10bf1c5b', 'timestamp': 1511533477, 'upload_date': '20171124', } }] def _call_api(self, service, auth_token, content_id, referer_url): return self._download_json( 'http://ep3.performfeeds.com/ep%s/%s/%s/' % (service, auth_token, content_id), content_id, headers={ 'Referer': referer_url, 'Origin': 'http://player.performgroup.com', }, query={ '_fmt': 'json', }) def _real_extract(self, url): player_id, auth_token = re.search(self._VALID_URL, url).groups() bootstrap = self._call_api('bootstrap', auth_token, player_id, url) video = bootstrap['config']['dataSource']['sourceItems'][0]['videos'][0] video_id = video['uuid'] vod = self._call_api('vod', auth_token, video_id, url) media = vod['videos']['video'][0]['media'] formats = [] hls_url = media.get('hls', {}).get('url') if hls_url: formats.extend(self._extract_m3u8_formats(hls_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) hds_url = media.get('hds', {}).get('url') if hds_url: formats.extend(self._extract_f4m_formats(hds_url + '?hdcore', video_id, f4m_id='hds', fatal=False)) for c in media.get('content', []): c_url = c.get('url') if not c_url: continue tbr = int_or_none(c.get('bitrate'), 1000) format_id = 'http' if tbr: format_id += '-%d' % tbr formats.append({ 'format_id': format_id, 'url': c_url, 'tbr': tbr, 'width': int_or_none(c.get('width')), 'height': int_or_none(c.get('height')), 'filesize': int_or_none(c.get('fileSize')), 'vcodec': c.get('type'), 'fps': int_or_none(c.get('videoFrameRate')), 'vbr': int_or_none(c.get('videoRate'), 1000), 'abr': int_or_none(c.get('audioRate'), 1000), }) self._sort_formats(formats) return { 'id': video_id, 'title': video['title'], 'description': video.get('description'), 'thumbnail': video.get('poster'), 'duration': int_or_none(video.get('duration')), 'timestamp': int_or_none(video.get('publishedTime'), 1000), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/reverbnation.py
youtube_dl/extractor/reverbnation.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( qualities, str_or_none, ) class ReverbNationIE(InfoExtractor): _VALID_URL = r'^https?://(?:www\.)?reverbnation\.com/.*?/song/(?P<id>\d+).*?$' _TESTS = [{ 'url': 'http://www.reverbnation.com/alkilados/song/16965047-mona-lisa', 'md5': 'c0aaf339bcee189495fdf5a8c8ba8645', 'info_dict': { 'id': '16965047', 'ext': 'mp3', 'title': 'MONA LISA', 'uploader': 'ALKILADOS', 'uploader_id': '216429', 'thumbnail': r're:^https?://.*\.jpg', }, }] def _real_extract(self, url): song_id = self._match_id(url) api_res = self._download_json( 'https://api.reverbnation.com/song/%s' % song_id, song_id, note='Downloading information of song %s' % song_id ) THUMBNAILS = ('thumbnail', 'image') quality = qualities(THUMBNAILS) thumbnails = [] for thumb_key in THUMBNAILS: if api_res.get(thumb_key): thumbnails.append({ 'url': api_res[thumb_key], 'preference': quality(thumb_key) }) return { 'id': song_id, 'title': api_res['name'], 'url': api_res['url'], 'uploader': api_res.get('artist', {}).get('name'), 'uploader_id': str_or_none(api_res.get('artist', {}).get('id')), 'thumbnails': thumbnails, 'ext': 'mp3', 'vcodec': 'none', }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rumble.py
youtube_dl/extractor/rumble.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( determine_ext, int_or_none, parse_iso8601, try_get, ) class RumbleEmbedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rumble\.com/embed/(?:[0-9a-z]+\.)?(?P<id>[0-9a-z]+)' _TESTS = [{ 'url': 'https://rumble.com/embed/v5pv5f', 'md5': '36a18a049856720189f30977ccbb2c34', 'info_dict': { 'id': 'v5pv5f', 'ext': 'mp4', 'title': 'WMAR 2 News Latest Headlines | October 20, 6pm', 'timestamp': 1571611968, 'upload_date': '20191020', } }, { 'url': 'https://rumble.com/embed/ufe9n.v5pv5f', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'https://rumble.com/embedJS/', video_id, query={'request': 'video', 'v': video_id}) title = video['title'] formats = [] for height, ua in (video.get('ua') or {}).items(): for i in range(2): f_url = try_get(ua, lambda x: x[i], compat_str) if f_url: ext = determine_ext(f_url) f = { 'ext': ext, 'format_id': '%s-%sp' % (ext, height), 'height': int_or_none(height), 'url': f_url, } bitrate = try_get(ua, lambda x: x[i + 2]['bitrate']) if bitrate: f['tbr'] = int_or_none(bitrate) formats.append(f) self._sort_formats(formats) author = video.get('author') or {} return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': video.get('i'), 'timestamp': parse_iso8601(video.get('pubDate')), 'channel': author.get('name'), 'channel_url': author.get('url'), 'duration': int_or_none(video.get('duration')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/urplay.py
youtube_dl/extractor/urplay.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( dict_get, ExtractorError, int_or_none, ISO639Utils, parse_age_limit, try_get, unified_timestamp, ) class URPlayIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ur(?:play|skola)\.se/(?:program|Produkter)/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://urplay.se/program/203704-ur-samtiden-livet-universum-och-rymdens-markliga-musik-om-vetenskap-kritiskt-tankande-och-motstand', 'md5': 'ff5b0c89928f8083c74bbd5099c9292d', 'info_dict': { 'id': '203704', 'ext': 'mp4', 'title': 'UR Samtiden - Livet, universum och rymdens märkliga musik : Om vetenskap, kritiskt tänkande och motstånd', 'description': 'md5:5344508a52aa78c1ced6c1b8b9e44e9a', 'timestamp': 1513292400, 'upload_date': '20171214', 'series': 'UR Samtiden - Livet, universum och rymdens märkliga musik', 'duration': 2269, 'categories': ['Vetenskap & teknik'], 'tags': ['Kritiskt tänkande', 'Vetenskap', 'Vetenskaplig verksamhet'], 'episode': 'Om vetenskap, kritiskt tänkande och motstånd', 'age_limit': 15, }, }, { 'url': 'https://urskola.se/Produkter/190031-Tripp-Trapp-Trad-Sovkudde', 'info_dict': { 'id': '190031', 'ext': 'mp4', 'title': 'Tripp, Trapp, Träd : Sovkudde', 'description': 'md5:b86bffdae04a7e9379d1d7e5947df1d1', 'timestamp': 1440086400, 'upload_date': '20150820', 'series': 'Tripp, Trapp, Träd', 'duration': 865, 'tags': ['Sova'], 'episode': 'Sovkudde', }, }, { 'url': 'http://urskola.se/Produkter/155794-Smasagor-meankieli-Grodan-i-vida-varlden', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) url = url.replace('skola.se/Produkter', 'play.se/program') webpage = self._download_webpage(url, video_id) urplayer_data = self._search_regex( r'(?s)\bid\s*=\s*"__NEXT_DATA__"[^>]*>\s*({.+?})\s*</script', webpage, 'urplayer next data', fatal=False) or {} if urplayer_data: urplayer_data = self._parse_json(urplayer_data, video_id, fatal=False) urplayer_data = try_get(urplayer_data, lambda x: x['props']['pageProps']['program'], dict) if not urplayer_data: raise ExtractorError('Unable to parse __NEXT_DATA__') else: accessible_episodes = self._parse_json(self._html_search_regex( r'data-react-class="routes/Product/components/ProgramContainer/ProgramContainer"[^>]+data-react-props="({.+?})"', webpage, 'urplayer data'), video_id)['accessibleEpisodes'] urplayer_data = next(e for e in accessible_episodes if e.get('id') == int_or_none(video_id)) episode = urplayer_data['title'] raw_streaming_info = urplayer_data['streamingInfo']['raw'] host = self._download_json( 'http://streaming-loadbalancer.ur.se/loadbalancer.json', video_id)['redirect'] formats = [] for k, v in raw_streaming_info.items(): if not (k in ('sd', 'hd') and isinstance(v, dict)): continue file_http = v.get('location') if file_http: formats.extend(self._extract_wowza_formats( 'http://%s/%splaylist.m3u8' % (host, file_http), video_id, skip_protocols=['f4m', 'rtmp', 'rtsp'])) self._sort_formats(formats) subtitles = {} def parse_lang_code(code): "3-character language code or None (utils candidate)" if code is None: return lang = code.lower() if not ISO639Utils.long2short(lang): lang = ISO639Utils.short2long(lang) return lang or None for k, v in (urplayer_data['streamingInfo'].get('sweComplete') or {}).items(): if (k in ('sd', 'hd') or not isinstance(v, dict)): continue lang, sttl_url = (v.get(kk) for kk in ('language', 'location', )) if not sttl_url: continue lang = parse_lang_code(lang) if not lang: continue sttl = subtitles.get(lang) or [] sttl.append({'ext': k, 'url': sttl_url, }) subtitles[lang] = sttl image = urplayer_data.get('image') or {} thumbnails = [] for k, v in image.items(): t = { 'id': k, 'url': v, } wh = k.split('x') if len(wh) == 2: t.update({ 'width': int_or_none(wh[0]), 'height': int_or_none(wh[1]), }) thumbnails.append(t) series = urplayer_data.get('series') or {} series_title = dict_get(series, ('seriesTitle', 'title')) or dict_get(urplayer_data, ('seriesTitle', 'mainTitle')) return { 'id': video_id, 'title': '%s : %s' % (series_title, episode) if series_title else episode, 'description': urplayer_data.get('description'), 'thumbnails': thumbnails, 'timestamp': unified_timestamp(urplayer_data.get('publishedAt')), 'series': series_title, 'formats': formats, 'duration': int_or_none(urplayer_data.get('duration')), 'categories': urplayer_data.get('categories'), 'tags': urplayer_data.get('keywords'), 'season': series.get('label'), 'episode': episode, 'episode_number': int_or_none(urplayer_data.get('episodeNumber')), 'age_limit': parse_age_limit(min(try_get(a, lambda x: x['from'], int) or 0 for a in urplayer_data.get('ageRanges', []))), 'subtitles': subtitles, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/webofstories.py
youtube_dl/extractor/webofstories.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, orderedSet, ) class WebOfStoriesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?webofstories\.com/play/(?:[^/]+/)?(?P<id>[0-9]+)' _VIDEO_DOMAIN = 'http://eu-mobile.webofstories.com/' _GREAT_LIFE_STREAMER = 'rtmp://eu-cdn1.webofstories.com/cfx/st/' _USER_STREAMER = 'rtmp://eu-users.webofstories.com/cfx/st/' _TESTS = [{ 'url': 'http://www.webofstories.com/play/hans.bethe/71', 'md5': '373e4dd915f60cfe3116322642ddf364', 'info_dict': { 'id': '4536', 'ext': 'mp4', 'title': 'The temperature of the sun', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'Hans Bethe talks about calculating the temperature of the sun', 'duration': 238, } }, { 'url': 'http://www.webofstories.com/play/55908', 'md5': '2985a698e1fe3211022422c4b5ed962c', 'info_dict': { 'id': '55908', 'ext': 'mp4', 'title': 'The story of Gemmata obscuriglobus', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'Planctomycete talks about The story of Gemmata obscuriglobus', 'duration': 169, }, 'skip': 'notfound', }, { # malformed og:title meta 'url': 'http://www.webofstories.com/play/54215?o=MS', 'info_dict': { 'id': '54215', 'ext': 'mp4', 'title': '"A Leg to Stand On"', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'Oliver Sacks talks about the death and resurrection of a limb', 'duration': 97, }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) # Sometimes og:title meta is malformed title = self._og_search_title(webpage, default=None) or self._html_search_regex( r'(?s)<strong>Title:\s*</strong>(.+?)<', webpage, 'title') description = self._html_search_meta('description', webpage) thumbnail = self._og_search_thumbnail(webpage) embed_params = [s.strip(" \r\n\t'") for s in self._search_regex( r'(?s)\$\("#embedCode"\).html\(getEmbedCode\((.*?)\)', webpage, 'embed params').split(',')] ( _, speaker_id, story_id, story_duration, speaker_type, great_life, _thumbnail, _has_subtitles, story_filename, _story_order) = embed_params is_great_life_series = great_life == 'true' duration = int_or_none(story_duration) # URL building, see: http://www.webofstories.com/scripts/player.js ms_prefix = '' if speaker_type.lower() == 'ms': ms_prefix = 'mini_sites/' if is_great_life_series: mp4_url = '{0:}lives/{1:}/{2:}.mp4'.format( self._VIDEO_DOMAIN, speaker_id, story_filename) rtmp_ext = 'flv' streamer = self._GREAT_LIFE_STREAMER play_path = 'stories/{0:}/{1:}'.format( speaker_id, story_filename) else: mp4_url = '{0:}{1:}{2:}/{3:}.mp4'.format( self._VIDEO_DOMAIN, ms_prefix, speaker_id, story_filename) rtmp_ext = 'mp4' streamer = self._USER_STREAMER play_path = 'mp4:{0:}{1:}/{2}.mp4'.format( ms_prefix, speaker_id, story_filename) formats = [{ 'format_id': 'mp4_sd', 'url': mp4_url, }, { 'format_id': 'rtmp_sd', 'page_url': url, 'url': streamer, 'ext': rtmp_ext, 'play_path': play_path, }] self._sort_formats(formats) return { 'id': story_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'description': description, 'duration': duration, } class WebOfStoriesPlaylistIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?webofstories\.com/playAll/(?P<id>[^/]+)' _TEST = { 'url': 'http://www.webofstories.com/playAll/donald.knuth', 'info_dict': { 'id': 'donald.knuth', 'title': 'Donald Knuth (Scientist)', }, 'playlist_mincount': 97, } def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) entries = [ self.url_result( 'http://www.webofstories.com/play/%s' % video_id, 'WebOfStories', video_id=video_id) for video_id in orderedSet(re.findall(r'\bid=["\']td_(\d+)', webpage)) ] title = self._search_regex( r'<div id="speakerName">\s*<span>([^<]+)</span>', webpage, 'speaker', default=None) if title: field = self._search_regex( r'<span id="primaryField">([^<]+)</span>', webpage, 'field', default=None) if field: title += ' (%s)' % field if not title: title = self._search_regex( r'<title>Play\s+all\s+stories\s*-\s*([^<]+)\s*-\s*Web\s+of\s+Stories</title>', webpage, 'title') return self.playlist_result(entries, playlist_id, title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/telequebec.py
youtube_dl/extractor/telequebec.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, smuggle_url, try_get, unified_timestamp, ) class TeleQuebecBaseIE(InfoExtractor): BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s' @staticmethod def _brightcove_result(brightcove_id, player_id, account_id='6150020952001'): return { '_type': 'url_transparent', 'url': smuggle_url(TeleQuebecBaseIE.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, brightcove_id), {'geo_countries': ['CA']}), 'ie_key': 'BrightcoveNew', } class TeleQuebecIE(TeleQuebecBaseIE): _VALID_URL = r'''(?x) https?:// (?: zonevideo\.telequebec\.tv/media| coucou\.telequebec\.tv/videos )/(?P<id>\d+) ''' _TESTS = [{ # available till 01.01.2023 'url': 'http://zonevideo.telequebec.tv/media/37578/un-petit-choc-et-puis-repart/un-chef-a-la-cabane', 'info_dict': { 'id': '6155972771001', 'ext': 'mp4', 'title': 'Un petit choc et puis repart!', 'description': 'md5:b04a7e6b3f74e32d7b294cffe8658374', 'timestamp': 1589262469, 'uploader_id': '6150020952001', 'upload_date': '20200512', }, 'params': { 'format': 'bestvideo', }, 'add_ie': ['BrightcoveNew'], }, { 'url': 'https://zonevideo.telequebec.tv/media/55267/le-soleil/passe-partout', 'info_dict': { 'id': '6167180337001', 'ext': 'mp4', 'title': 'Le soleil', 'description': 'md5:64289c922a8de2abbe99c354daffde02', 'uploader_id': '6150020952001', 'upload_date': '20200625', 'timestamp': 1593090307, }, 'params': { 'format': 'bestvideo', }, 'add_ie': ['BrightcoveNew'], }, { # no description 'url': 'http://zonevideo.telequebec.tv/media/30261', 'only_matching': True, }, { 'url': 'https://coucou.telequebec.tv/videos/41788/idee-de-genie/l-heure-du-bain', 'only_matching': True, }] def _real_extract(self, url): media_id = self._match_id(url) media = self._download_json( 'https://mnmedias.api.telequebec.tv/api/v3/media/' + media_id, media_id)['media'] source_id = next(source_info['sourceId'] for source_info in media['streamInfos'] if source_info.get('source') == 'Brightcove') info = self._brightcove_result(source_id, '22gPKdt7f') product = media.get('product') or {} season = product.get('season') or {} info.update({ 'description': try_get(media, lambda x: x['descriptions'][-1]['text'], compat_str), 'series': try_get(season, lambda x: x['serie']['titre']), 'season': season.get('name'), 'season_number': int_or_none(season.get('seasonNo')), 'episode': product.get('titre'), 'episode_number': int_or_none(product.get('episodeNo')), }) return info class TeleQuebecSquatIE(InfoExtractor): _VALID_URL = r'https://squat\.telequebec\.tv/videos/(?P<id>\d+)' _TESTS = [{ 'url': 'https://squat.telequebec.tv/videos/9314', 'info_dict': { 'id': 'd59ae78112d542e793d83cc9d3a5b530', 'ext': 'mp4', 'title': 'Poupeflekta', 'description': 'md5:2f0718f8d2f8fece1646ee25fb7bce75', 'duration': 1351, 'timestamp': 1569057600, 'upload_date': '20190921', 'series': 'Miraculous : Les Aventures de Ladybug et Chat Noir', 'season': 'Saison 3', 'season_number': 3, 'episode_number': 57, }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'https://squat.api.telequebec.tv/v1/videos/%s' % video_id, video_id) media_id = video['sourceId'] return { '_type': 'url_transparent', 'url': 'http://zonevideo.telequebec.tv/media/%s' % media_id, 'ie_key': TeleQuebecIE.ie_key(), 'id': media_id, 'title': video.get('titre'), 'description': video.get('description'), 'timestamp': unified_timestamp(video.get('datePublication')), 'series': video.get('container'), 'season': video.get('saison'), 'season_number': int_or_none(video.get('noSaison')), 'episode_number': int_or_none(video.get('episode')), } class TeleQuebecEmissionIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?: [^/]+\.telequebec\.tv/emissions/| (?:www\.)?telequebec\.tv/ ) (?P<id>[^?#&]+) ''' _TESTS = [{ 'url': 'http://lindicemcsween.telequebec.tv/emissions/100430013/des-soins-esthetiques-a-377-d-interets-annuels-ca-vous-tente', 'info_dict': { 'id': '6154476028001', 'ext': 'mp4', 'title': 'Des soins esthétiques à 377 % d’intérêts annuels, ça vous tente?', 'description': 'md5:cb4d378e073fae6cce1f87c00f84ae9f', 'upload_date': '20200505', 'timestamp': 1588713424, 'uploader_id': '6150020952001', }, 'params': { 'format': 'bestvideo', }, }, { 'url': 'http://bancpublic.telequebec.tv/emissions/emission-49/31986/jeunes-meres-sous-pression', 'only_matching': True, }, { 'url': 'http://www.telequebec.tv/masha-et-michka/epi059masha-et-michka-3-053-078', 'only_matching': True, }, { 'url': 'http://www.telequebec.tv/documentaire/bebes-sur-mesure/', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) media_id = self._search_regex( r'mediaId\s*:\s*(?P<id>\d+)', webpage, 'media id') return self.url_result( 'http://zonevideo.telequebec.tv/media/' + media_id, TeleQuebecIE.ie_key()) class TeleQuebecLiveIE(TeleQuebecBaseIE): _VALID_URL = r'https?://zonevideo\.telequebec\.tv/(?P<id>endirect)' _TEST = { 'url': 'http://zonevideo.telequebec.tv/endirect/', 'info_dict': { 'id': '6159095684001', 'ext': 'mp4', 'title': 're:^Télé-Québec [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'is_live': True, 'description': 'Canal principal de Télé-Québec', 'uploader_id': '6150020952001', 'timestamp': 1590439901, 'upload_date': '20200525', }, 'params': { 'skip_download': True, }, } def _real_extract(self, url): return self._brightcove_result('6159095684001', 'skCsmi2Uw') class TeleQuebecVideoIE(TeleQuebecBaseIE): _VALID_URL = r'https?://video\.telequebec\.tv/player(?:-live)?/(?P<id>\d+)' _TESTS = [{ 'url': 'https://video.telequebec.tv/player/31110/stream', 'info_dict': { 'id': '6202570652001', 'ext': 'mp4', 'title': 'Le coût du véhicule le plus vendu au Canada / Tous les frais liés à la procréation assistée', 'description': 'md5:685a7e4c450ba777c60adb6e71e41526', 'upload_date': '20201019', 'timestamp': 1603115930, 'uploader_id': '6101674910001', }, 'params': { 'format': 'bestvideo', }, }, { 'url': 'https://video.telequebec.tv/player-live/28527', 'only_matching': True, }] def _call_api(self, path, video_id): return self._download_json( 'http://beacon.playback.api.brightcove.com/telequebec/api/assets/' + path, video_id, query={'device_layout': 'web', 'device_type': 'web'})['data'] def _real_extract(self, url): asset_id = self._match_id(url) asset = self._call_api(asset_id, asset_id)['asset'] stream = self._call_api( asset_id + '/streams/' + asset['streams'][0]['id'], asset_id)['stream'] stream_url = stream['url'] account_id = try_get( stream, lambda x: x['video_provider_details']['account_id']) or '6101674910001' info = self._brightcove_result(stream_url, 'default', account_id) info.update({ 'description': asset.get('long_description') or asset.get('short_description'), 'series': asset.get('series_original_name'), 'season_number': int_or_none(asset.get('season_number')), 'episode': asset.get('original_name'), 'episode_number': int_or_none(asset.get('episode_number')), }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/xuite.py
youtube_dl/extractor/xuite.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, get_element_by_attribute, parse_iso8601, remove_end, ) class XuiteIE(InfoExtractor): IE_DESC = '隨意窩Xuite影音' _REGEX_BASE64 = r'(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?' _VALID_URL = r'https?://vlog\.xuite\.net/(?:play|embed)/(?P<id>%s)' % _REGEX_BASE64 _TESTS = [{ # Audio 'url': 'http://vlog.xuite.net/play/RGkzc1ZULTM4NjA5MTQuZmx2', 'md5': 'e79284c87b371424885448d11f6398c8', 'info_dict': { 'id': '3860914', 'ext': 'mp3', 'title': '孤單南半球-歐德陽', 'description': '孤單南半球-歐德陽', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 247.246, 'timestamp': 1314932940, 'upload_date': '20110902', 'uploader': '阿能', 'uploader_id': '15973816', 'categories': ['個人短片'], }, }, { # Video with only one format 'url': 'http://vlog.xuite.net/play/WUxxR2xCLTI1OTI1MDk5LmZsdg==', 'md5': '21f7b39c009b5a4615b4463df6eb7a46', 'info_dict': { 'id': '25925099', 'ext': 'mp4', 'title': 'BigBuckBunny_320x180', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 596.458, 'timestamp': 1454242500, 'upload_date': '20160131', 'uploader': '屁姥', 'uploader_id': '12158353', 'categories': ['個人短片'], 'description': 'http://download.blender.org/peach/bigbuckbunny_movies/BigBuckBunny_320x180.mp4', }, }, { # Video with two formats 'url': 'http://vlog.xuite.net/play/bWo1N1pLLTIxMzAxMTcwLmZsdg==', 'md5': '1166e0f461efe55b62e26a2d2a68e6de', 'info_dict': { 'id': '21301170', 'ext': 'mp4', 'title': '暗殺教室 02', 'description': '字幕:【極影字幕社】', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1384.907, 'timestamp': 1421481240, 'upload_date': '20150117', 'uploader': '我只是想認真點', 'uploader_id': '242127761', 'categories': ['電玩動漫'], }, 'skip': 'Video removed', }, { # Video with encoded media id # from http://forgetfulbc.blogspot.com/2016/06/date.html 'url': 'http://vlog.xuite.net/embed/cE1xbENoLTI3NDQ3MzM2LmZsdg==?ar=0&as=0', 'info_dict': { 'id': '27447336', 'ext': 'mp4', 'title': '男女平權只是口號?專家解釋約會時男生是否該幫女生付錢 (中字)', 'description': 'md5:1223810fa123b179083a3aed53574706', 'timestamp': 1466160960, 'upload_date': '20160617', 'uploader': 'B.C. & Lowy', 'uploader_id': '232279340', }, }, { 'url': 'http://vlog.xuite.net/play/S1dDUjdyLTMyOTc3NjcuZmx2/%E5%AD%AB%E7%87%95%E5%A7%BF-%E7%9C%BC%E6%B7%9A%E6%88%90%E8%A9%A9', 'only_matching': True, }] def _real_extract(self, url): # /play/ URLs provide embedded video URL and more metadata url = url.replace('/embed/', '/play/') video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) error_msg = self._search_regex( r'<div id="error-message-content">([^<]+)', webpage, 'error message', default=None) if error_msg: raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, error_msg), expected=True) media_info = self._parse_json(self._search_regex( r'var\s+mediaInfo\s*=\s*({.*});', webpage, 'media info'), video_id) video_id = media_info['MEDIA_ID'] formats = [] for key in ('html5Url', 'html5HQUrl'): video_url = media_info.get(key) if not video_url: continue format_id = self._search_regex( r'\bq=(.+?)\b', video_url, 'format id', default=None) formats.append({ 'url': video_url, 'ext': 'mp4' if format_id.isnumeric() else format_id, 'format_id': format_id, 'height': int(format_id) if format_id.isnumeric() else None, }) self._sort_formats(formats) timestamp = media_info.get('PUBLISH_DATETIME') if timestamp: timestamp = parse_iso8601(timestamp + ' +0800', ' ') category = media_info.get('catName') categories = [category] if category else [] uploader = media_info.get('NICKNAME') uploader_url = None author_div = get_element_by_attribute('itemprop', 'author', webpage) if author_div: uploader = uploader or self._html_search_meta('name', author_div) uploader_url = self._html_search_regex( r'<link[^>]+itemprop="url"[^>]+href="([^"]+)"', author_div, 'uploader URL', fatal=False) return { 'id': video_id, 'title': media_info['TITLE'], 'description': remove_end(media_info.get('metaDesc'), ' (Xuite 影音)'), 'thumbnail': media_info.get('ogImageUrl'), 'timestamp': timestamp, 'uploader': uploader, 'uploader_id': media_info.get('MEMBER_ID'), 'uploader_url': uploader_url, 'duration': float_or_none(media_info.get('MEDIA_DURATION'), 1000000), 'categories': categories, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/spankwire.py
youtube_dl/extractor/spankwire.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, merge_dicts, str_or_none, str_to_int, url_or_none, ) class SpankwireIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:www\.)?spankwire\.com/ (?: [^/]+/video| EmbedPlayer\.aspx/?\?.*?\bArticleId= ) (?P<id>\d+) ''' _TESTS = [{ # download URL pattern: */<height>P_<tbr>K_<video_id>.mp4 'url': 'http://www.spankwire.com/Buckcherry-s-X-Rated-Music-Video-Crazy-Bitch/video103545/', 'md5': '5aa0e4feef20aad82cbcae3aed7ab7cd', 'info_dict': { 'id': '103545', 'ext': 'mp4', 'title': 'Buckcherry`s X Rated Music Video Crazy Bitch', 'description': 'Crazy Bitch X rated music video.', 'duration': 222, 'uploader': 'oreusz', 'uploader_id': '124697', 'timestamp': 1178587885, 'upload_date': '20070508', 'average_rating': float, 'view_count': int, 'comment_count': int, 'age_limit': 18, 'categories': list, 'tags': list, }, }, { # download URL pattern: */mp4_<format_id>_<video_id>.mp4 'url': 'http://www.spankwire.com/Titcums-Compiloation-I/video1921551/', 'md5': '09b3c20833308b736ae8902db2f8d7e6', 'info_dict': { 'id': '1921551', 'ext': 'mp4', 'title': 'Titcums Compiloation I', 'description': 'cum on tits', 'uploader': 'dannyh78999', 'uploader_id': '3056053', 'upload_date': '20150822', 'age_limit': 18, }, 'params': { 'proxy': '127.0.0.1:8118' }, 'skip': 'removed', }, { 'url': 'https://www.spankwire.com/EmbedPlayer.aspx/?ArticleId=156156&autostart=true', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?spankwire\.com/EmbedPlayer\.aspx/?\?.*?\bArticleId=\d+)', webpage) def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'https://www.spankwire.com/api/video/%s.json' % video_id, video_id) title = video['title'] formats = [] videos = video.get('videos') if isinstance(videos, dict): for format_id, format_url in videos.items(): video_url = url_or_none(format_url) if not format_url: continue height = int_or_none(self._search_regex( r'(\d+)[pP]', format_id, 'height', default=None)) m = re.search( r'/(?P<height>\d+)[pP]_(?P<tbr>\d+)[kK]', video_url) if m: tbr = int(m.group('tbr')) height = height or int(m.group('height')) else: tbr = None formats.append({ 'url': video_url, 'format_id': '%dp' % height if height else format_id, 'height': height, 'tbr': tbr, }) m3u8_url = url_or_none(video.get('HLS')) if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) self._sort_formats(formats, ('height', 'tbr', 'width', 'format_id')) view_count = str_to_int(video.get('viewed')) thumbnails = [] for preference, t in enumerate(('', '2x'), start=0): thumbnail_url = url_or_none(video.get('poster%s' % t)) if not thumbnail_url: continue thumbnails.append({ 'url': thumbnail_url, 'preference': preference, }) def extract_names(key): entries_list = video.get(key) if not isinstance(entries_list, list): return entries = [] for entry in entries_list: name = str_or_none(entry.get('name')) if name: entries.append(name) return entries categories = extract_names('categories') tags = extract_names('tags') uploader = None info = {} webpage = self._download_webpage( 'https://www.spankwire.com/_/video%s/' % video_id, video_id, fatal=False) if webpage: info = self._search_json_ld(webpage, video_id, default={}) thumbnail_url = None if 'thumbnail' in info: thumbnail_url = url_or_none(info['thumbnail']) del info['thumbnail'] if not thumbnail_url: thumbnail_url = self._og_search_thumbnail(webpage) if thumbnail_url: thumbnails.append({ 'url': thumbnail_url, 'preference': 10, }) uploader = self._html_search_regex( r'(?s)by\s*<a[^>]+\bclass=["\']uploaded__by[^>]*>(.+?)</a>', webpage, 'uploader', fatal=False) if not view_count: view_count = str_to_int(self._search_regex( r'data-views=["\']([\d,.]+)', webpage, 'view count', fatal=False)) return merge_dicts({ 'id': video_id, 'title': title, 'description': video.get('description'), 'duration': int_or_none(video.get('duration')), 'thumbnails': thumbnails, 'uploader': uploader, 'uploader_id': str_or_none(video.get('userId')), 'timestamp': int_or_none(video.get('time_approved_on')), 'average_rating': float_or_none(video.get('rating')), 'view_count': view_count, 'comment_count': int_or_none(video.get('comments')), 'age_limit': 18, 'categories': categories, 'tags': tags, 'formats': formats, }, info)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/thesun.py
youtube_dl/extractor/thesun.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import extract_attributes class TheSunIE(InfoExtractor): _VALID_URL = r'https://(?:www\.)?thesun\.co\.uk/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'https://www.thesun.co.uk/tvandshowbiz/2261604/orlando-bloom-and-katy-perry-post-adorable-instagram-video-together-celebrating-thanksgiving-after-split-rumours/', 'info_dict': { 'id': '2261604', 'title': 'md5:cba22f48bad9218b64d5bbe0e16afddf', }, 'playlist_count': 2, } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s' def _real_extract(self, url): article_id = self._match_id(url) webpage = self._download_webpage(url, article_id) entries = [] for video in re.findall( r'<video[^>]+data-video-id-pending=[^>]+>', webpage): attrs = extract_attributes(video) video_id = attrs['data-video-id-pending'] account_id = attrs.get('data-account', '5067014667001') entries.append(self.url_result( self.BRIGHTCOVE_URL_TEMPLATE % (account_id, video_id), 'BrightcoveNew', video_id)) return self.playlist_result( entries, article_id, self._og_search_title(webpage, fatal=False))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/einthusan.py
youtube_dl/extractor/einthusan.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import ( compat_b64decode, compat_str, compat_urlparse, ) from ..utils import ( extract_attributes, ExtractorError, get_elements_by_class, urlencode_postdata, ) class EinthusanIE(InfoExtractor): _VALID_URL = r'https?://(?P<host>einthusan\.(?:tv|com|ca))/movie/watch/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://einthusan.tv/movie/watch/9097/', 'md5': 'ff0f7f2065031b8a2cf13a933731c035', 'info_dict': { 'id': '9097', 'ext': 'mp4', 'title': 'Ae Dil Hai Mushkil', 'description': 'md5:33ef934c82a671a94652a9b4e54d931b', 'thumbnail': r're:^https?://.*\.jpg$', } }, { 'url': 'https://einthusan.tv/movie/watch/51MZ/?lang=hindi', 'only_matching': True, }, { 'url': 'https://einthusan.com/movie/watch/9097/', 'only_matching': True, }, { 'url': 'https://einthusan.ca/movie/watch/4E9n/?lang=hindi', 'only_matching': True, }] # reversed from jsoncrypto.prototype.decrypt() in einthusan-PGMovieWatcher.js def _decrypt(self, encrypted_data, video_id): return self._parse_json(compat_b64decode(( encrypted_data[:10] + encrypted_data[-1] + encrypted_data[12:-1] )).decode('utf-8'), video_id) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) host = mobj.group('host') video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) title = self._html_search_regex(r'<h3>([^<]+)</h3>', webpage, 'title') player_params = extract_attributes(self._search_regex( r'(<section[^>]+id="UIVideoPlayer"[^>]+>)', webpage, 'player parameters')) page_id = self._html_search_regex( '<html[^>]+data-pageid="([^"]+)"', webpage, 'page ID') video_data = self._download_json( 'https://%s/ajax/movie/watch/%s/' % (host, video_id), video_id, data=urlencode_postdata({ 'xEvent': 'UIVideoPlayer.PingOutcome', 'xJson': json.dumps({ 'EJOutcomes': player_params['data-ejpingables'], 'NativeHLS': False }), 'arcVersion': 3, 'appVersion': 59, 'gorilla.csrf.Token': page_id, }))['Data'] if isinstance(video_data, compat_str) and video_data.startswith('/ratelimited/'): raise ExtractorError( 'Download rate reached. Please try again later.', expected=True) ej_links = self._decrypt(video_data['EJLinks'], video_id) formats = [] m3u8_url = ej_links.get('HLSLink') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, ext='mp4', entry_protocol='m3u8_native')) mp4_url = ej_links.get('MP4Link') if mp4_url: formats.append({ 'url': mp4_url, }) self._sort_formats(formats) description = get_elements_by_class('synopsis', webpage)[0] thumbnail = self._html_search_regex( r'''<img[^>]+src=(["'])(?P<url>(?!\1).+?/moviecovers/(?!\1).+?)\1''', webpage, 'thumbnail url', fatal=False, group='url') if thumbnail is not None: thumbnail = compat_urlparse.urljoin(url, thumbnail) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'description': description, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/videomore.py
youtube_dl/extractor/videomore.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_str, compat_urllib_parse_urlparse, ) from ..utils import ( ExtractorError, int_or_none, ) class VideomoreBaseIE(InfoExtractor): _API_BASE_URL = 'https://more.tv/api/v3/web/' _VALID_URL_BASE = r'https?://(?:videomore\.ru|more\.tv)/' def _download_page_data(self, display_id): return self._download_json( self._API_BASE_URL + 'PageData', display_id, query={ 'url': '/' + display_id, })['attributes']['response']['data'] def _track_url_result(self, track): track_vod = track['trackVod'] video_url = track_vod.get('playerLink') or track_vod['link'] return self.url_result( video_url, VideomoreIE.ie_key(), track_vod.get('hubId')) class VideomoreIE(InfoExtractor): IE_NAME = 'videomore' _VALID_URL = r'''(?x) videomore:(?P<sid>\d+)$| https?:// (?: videomore\.ru/ (?: embed| [^/]+/[^/]+ )/| (?: (?:player\.)?videomore\.ru| siren\.more\.tv/player )/[^/]*\?.*?\btrack_id=| odysseus\.more.tv/player/(?P<partner_id>\d+)/ ) (?P<id>\d+) (?:[/?#&]|\.(?:xml|json)|$) ''' _TESTS = [{ 'url': 'http://videomore.ru/kino_v_detalayah/5_sezon/367617', 'md5': '44455a346edc0d509ac5b5a5b531dc35', 'info_dict': { 'id': '367617', 'ext': 'flv', 'title': 'Кино в деталях 5 сезон В гостях Алексей Чумаков и Юлия Ковальчук', 'series': 'Кино в деталях', 'episode': 'В гостях Алексей Чумаков и Юлия Ковальчук', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 2910, 'view_count': int, 'comment_count': int, 'age_limit': 16, }, 'skip': 'The video is not available for viewing.', }, { 'url': 'http://videomore.ru/embed/259974', 'info_dict': { 'id': '259974', 'ext': 'mp4', 'title': 'Молодежка 2 сезон 40 серия', 'series': 'Молодежка', 'season': '2 сезон', 'episode': '40 серия', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 2789, 'view_count': int, 'age_limit': 16, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://videomore.ru/molodezhka/sezon_promo/341073', 'info_dict': { 'id': '341073', 'ext': 'flv', 'title': 'Промо Команда проиграла из-за Бакина?', 'episode': 'Команда проиграла из-за Бакина?', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 29, 'age_limit': 16, 'view_count': int, }, 'params': { 'skip_download': True, }, 'skip': 'The video is not available for viewing.', }, { 'url': 'http://videomore.ru/elki_3?track_id=364623', 'only_matching': True, }, { 'url': 'http://videomore.ru/embed/364623', 'only_matching': True, }, { 'url': 'http://videomore.ru/video/tracks/364623.xml', 'only_matching': True, }, { 'url': 'http://videomore.ru/video/tracks/364623.json', 'only_matching': True, }, { 'url': 'http://videomore.ru/video/tracks/158031/quotes/33248', 'only_matching': True, }, { 'url': 'videomore:367617', 'only_matching': True, }, { 'url': 'https://player.videomore.ru/?partner_id=97&track_id=736234&autoplay=0&userToken=', 'only_matching': True, }, { 'url': 'https://odysseus.more.tv/player/1788/352317', 'only_matching': True, }, { 'url': 'https://siren.more.tv/player/config?track_id=352317&partner_id=1788&user_token=', 'only_matching': True, }] _GEO_BYPASS = False @staticmethod def _extract_url(webpage): mobj = re.search( r'<object[^>]+data=(["\'])https?://videomore\.ru/player\.swf\?.*config=(?P<url>https?://videomore\.ru/(?:[^/]+/)+\d+\.xml).*\1', webpage) if not mobj: mobj = re.search( r'<iframe[^>]+src=([\'"])(?P<url>https?://videomore\.ru/embed/\d+)', webpage) if mobj: return mobj.group('url') def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('sid') or mobj.group('id') partner_id = mobj.group('partner_id') or compat_parse_qs(compat_urllib_parse_urlparse(url).query).get('partner_id', [None])[0] or '97' item = self._download_json( 'https://siren.more.tv/player/config', video_id, query={ 'partner_id': partner_id, 'track_id': video_id, })['data']['playlist']['items'][0] title = item.get('title') series = item.get('project_name') season = item.get('season_name') episode = item.get('episode_name') if not title: title = [] for v in (series, season, episode): if v: title.append(v) title = ' '.join(title) streams = item.get('streams') or [] for protocol in ('DASH', 'HLS'): stream_url = item.get(protocol.lower() + '_url') if stream_url: streams.append({'protocol': protocol, 'url': stream_url}) formats = [] for stream in streams: stream_url = stream.get('url') if not stream_url: continue protocol = stream.get('protocol') if protocol == 'DASH': formats.extend(self._extract_mpd_formats( stream_url, video_id, mpd_id='dash', fatal=False)) elif protocol == 'HLS': formats.extend(self._extract_m3u8_formats( stream_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif protocol == 'MSS': formats.extend(self._extract_ism_formats( stream_url, video_id, ism_id='mss', fatal=False)) if not formats: error = item.get('error') if error: if error in ('Данное видео недоступно для просмотра на территории этой страны', 'Данное видео доступно для просмотра только на территории России'): self.raise_geo_restricted(countries=['RU']) raise ExtractorError(error, expected=True) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'series': series, 'season': season, 'episode': episode, 'thumbnail': item.get('thumbnail_url'), 'duration': int_or_none(item.get('duration')), 'view_count': int_or_none(item.get('views')), 'age_limit': int_or_none(item.get('min_age')), 'formats': formats, } class VideomoreVideoIE(VideomoreBaseIE): IE_NAME = 'videomore:video' _VALID_URL = VideomoreBaseIE._VALID_URL_BASE + r'(?P<id>(?:(?:[^/]+/){2})?[^/?#&]+)(?:/*|[?#&].*?)$' _TESTS = [{ # single video with og:video:iframe 'url': 'http://videomore.ru/elki_3', 'info_dict': { 'id': '364623', 'ext': 'flv', 'title': 'Ёлки 3', 'description': '', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 5579, 'age_limit': 6, 'view_count': int, }, 'params': { 'skip_download': True, }, 'skip': 'Requires logging in', }, { # season single series with og:video:iframe 'url': 'http://videomore.ru/poslednii_ment/1_sezon/14_seriya', 'info_dict': { 'id': '352317', 'ext': 'mp4', 'title': 'Последний мент 1 сезон 14 серия', 'series': 'Последний мент', 'season': '1 сезон', 'episode': '14 серия', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 2464, 'age_limit': 16, 'view_count': int, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://videomore.ru/sejchas_v_seti/serii_221-240/226_vypusk', 'only_matching': True, }, { # single video without og:video:iframe 'url': 'http://videomore.ru/marin_i_ego_druzya', 'info_dict': { 'id': '359073', 'ext': 'flv', 'title': '1 серия. Здравствуй, Аквавилль!', 'description': 'md5:c6003179538b5d353e7bcd5b1372b2d7', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 754, 'age_limit': 6, 'view_count': int, }, 'params': { 'skip_download': True, }, 'skip': 'redirects to https://more.tv/' }, { 'url': 'https://videomore.ru/molodezhka/6_sezon/29_seriya?utm_so', 'only_matching': True, }, { 'url': 'https://more.tv/poslednii_ment/1_sezon/14_seriya', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if VideomoreIE.suitable(url) else super(VideomoreVideoIE, cls).suitable(url) def _real_extract(self, url): display_id = self._match_id(url) return self._track_url_result(self._download_page_data(display_id)) class VideomoreSeasonIE(VideomoreBaseIE): IE_NAME = 'videomore:season' _VALID_URL = VideomoreBaseIE._VALID_URL_BASE + r'(?!embed)(?P<id>[^/]+/[^/?#&]+)(?:/*|[?#&].*?)$' _TESTS = [{ 'url': 'http://videomore.ru/molodezhka/film_o_filme', 'info_dict': { 'id': 'molodezhka/film_o_filme', 'title': 'Фильм о фильме', }, 'playlist_mincount': 3, }, { 'url': 'http://videomore.ru/molodezhka/sezon_promo?utm_so', 'only_matching': True, }, { 'url': 'https://more.tv/molodezhka/film_o_filme', 'only_matching': True, }] @classmethod def suitable(cls, url): return (False if (VideomoreIE.suitable(url) or VideomoreVideoIE.suitable(url)) else super(VideomoreSeasonIE, cls).suitable(url)) def _real_extract(self, url): display_id = self._match_id(url) season = self._download_page_data(display_id) season_id = compat_str(season['id']) tracks = self._download_json( self._API_BASE_URL + 'seasons/%s/tracks' % season_id, season_id)['data'] entries = [] for track in tracks: entries.append(self._track_url_result(track)) return self.playlist_result(entries, display_id, season.get('title'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/podomatic.py
youtube_dl/extractor/podomatic.py
from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import int_or_none class PodomaticIE(InfoExtractor): IE_NAME = 'podomatic' _VALID_URL = r'''(?x) (?P<proto>https?):// (?: (?P<channel>[^.]+)\.podomatic\.com/entry| (?:www\.)?podomatic\.com/podcasts/(?P<channel_2>[^/]+)/episodes )/ (?P<id>[^/?#&]+) ''' _TESTS = [{ 'url': 'http://scienceteachingtips.podomatic.com/entry/2009-01-02T16_03_35-08_00', 'md5': '84bb855fcf3429e6bf72460e1eed782d', 'info_dict': { 'id': '2009-01-02T16_03_35-08_00', 'ext': 'mp3', 'uploader': 'Science Teaching Tips', 'uploader_id': 'scienceteachingtips', 'title': '64. When the Moon Hits Your Eye', 'duration': 446, } }, { 'url': 'http://ostbahnhof.podomatic.com/entry/2013-11-15T16_31_21-08_00', 'md5': 'd2cf443931b6148e27638650e2638297', 'info_dict': { 'id': '2013-11-15T16_31_21-08_00', 'ext': 'mp3', 'uploader': 'Ostbahnhof / Techno Mix', 'uploader_id': 'ostbahnhof', 'title': 'Einunddreizig', 'duration': 3799, } }, { 'url': 'https://www.podomatic.com/podcasts/scienceteachingtips/episodes/2009-01-02T16_03_35-08_00', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') channel = mobj.group('channel') or mobj.group('channel_2') json_url = (('%s://%s.podomatic.com/entry/embed_params/%s' + '?permalink=true&rtmp=0') % (mobj.group('proto'), channel, video_id)) data_json = self._download_webpage( json_url, video_id, 'Downloading video info') data = json.loads(data_json) video_url = data['downloadLink'] if not video_url: video_url = '%s/%s' % (data['streamer'].replace('rtmp', 'http'), data['mediaLocation']) uploader = data['podcast'] title = data['title'] thumbnail = data['imageLocation'] duration = int_or_none(data.get('length'), 1000) return { 'id': video_id, 'url': video_url, 'title': title, 'uploader': uploader, 'uploader_id': channel, 'thumbnail': thumbnail, 'duration': duration, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/dlive.py
youtube_dl/extractor/dlive.py
from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import int_or_none class DLiveVODIE(InfoExtractor): IE_NAME = 'dlive:vod' _VALID_URL = r'https?://(?:www\.)?dlive\.tv/p/(?P<uploader_id>.+?)\+(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://dlive.tv/p/pdp+3mTzOl4WR', 'info_dict': { 'id': '3mTzOl4WR', 'ext': 'mp4', 'title': 'Minecraft with james charles epic', 'upload_date': '20190701', 'timestamp': 1562011015, 'uploader_id': 'pdp', } }, { 'url': 'https://dlive.tv/p/pdpreplay+D-RD-xSZg', 'only_matching': True, }] def _real_extract(self, url): uploader_id, vod_id = re.match(self._VALID_URL, url).groups() broadcast = self._download_json( 'https://graphigo.prd.dlive.tv/', vod_id, data=json.dumps({'query': '''query { pastBroadcast(permlink:"%s+%s") { content createdAt length playbackUrl title thumbnailUrl viewCount } }''' % (uploader_id, vod_id)}).encode())['data']['pastBroadcast'] title = broadcast['title'] formats = self._extract_m3u8_formats( broadcast['playbackUrl'], vod_id, 'mp4', 'm3u8_native') self._sort_formats(formats) return { 'id': vod_id, 'title': title, 'uploader_id': uploader_id, 'formats': formats, 'description': broadcast.get('content'), 'thumbnail': broadcast.get('thumbnailUrl'), 'timestamp': int_or_none(broadcast.get('createdAt'), 1000), 'view_count': int_or_none(broadcast.get('viewCount')), } class DLiveStreamIE(InfoExtractor): IE_NAME = 'dlive:stream' _VALID_URL = r'https?://(?:www\.)?dlive\.tv/(?!p/)(?P<id>[\w.-]+)' def _real_extract(self, url): display_name = self._match_id(url) user = self._download_json( 'https://graphigo.prd.dlive.tv/', display_name, data=json.dumps({'query': '''query { userByDisplayName(displayname:"%s") { livestream { content createdAt title thumbnailUrl watchingCount } username } }''' % display_name}).encode())['data']['userByDisplayName'] livestream = user['livestream'] title = livestream['title'] username = user['username'] formats = self._extract_m3u8_formats( 'https://live.prd.dlive.tv/hls/live/%s.m3u8' % username, display_name, 'mp4') self._sort_formats(formats) return { 'id': display_name, 'title': self._live_title(title), 'uploader': display_name, 'uploader_id': username, 'formats': formats, 'description': livestream.get('content'), 'thumbnail': livestream.get('thumbnailUrl'), 'is_live': True, 'timestamp': int_or_none(livestream.get('createdAt'), 1000), 'view_count': int_or_none(livestream.get('watchingCount')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/yahoo.py
youtube_dl/extractor/yahoo.py
# coding: utf-8 from __future__ import unicode_literals import hashlib import itertools import re from .common import InfoExtractor, SearchInfoExtractor from ..compat import ( compat_str, compat_urllib_parse, ) from ..utils import ( clean_html, ExtractorError, int_or_none, mimetype2ext, parse_iso8601, smuggle_url, try_get, url_or_none, ) from .brightcove import BrightcoveNewIE class YahooIE(InfoExtractor): IE_DESC = 'Yahoo screen and movies' _VALID_URL = r'(?P<url>https?://(?:(?P<country>[a-zA-Z]{2}(?:-[a-zA-Z]{2})?|malaysia)\.)?(?:[\da-zA-Z_-]+\.)?yahoo\.com/(?:[^/]+/)*(?P<id>[^?&#]*-[0-9]+(?:-[a-z]+)?)\.html)' _TESTS = [{ 'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html', 'info_dict': { 'id': '2d25e626-2378-391f-ada0-ddaf1417e588', 'ext': 'mp4', 'title': 'Julian Smith & Travis Legg Watch Julian Smith', 'description': 'Julian and Travis watch Julian Smith', 'duration': 6863, 'timestamp': 1369812016, 'upload_date': '20130529', }, }, { 'url': 'https://screen.yahoo.com/community/community-sizzle-reel-203225340.html?format=embed', 'md5': '7993e572fac98e044588d0b5260f4352', 'info_dict': { 'id': '4fe78544-8d48-39d8-97cd-13f205d9fcdb', 'ext': 'mp4', 'title': "Yahoo Saves 'Community'", 'description': 'md5:4d4145af2fd3de00cbb6c1d664105053', 'duration': 170, 'timestamp': 1406838636, 'upload_date': '20140731', }, }, { 'url': 'https://uk.screen.yahoo.com/editor-picks/cute-raccoon-freed-drain-using-091756545.html', 'md5': '71298482f7c64cbb7fa064e4553ff1c1', 'info_dict': { 'id': 'b3affa53-2e14-3590-852b-0e0db6cd1a58', 'ext': 'webm', 'title': 'Cute Raccoon Freed From Drain\u00a0Using Angle Grinder', 'description': 'md5:f66c890e1490f4910a9953c941dee944', 'duration': 97, 'timestamp': 1414489862, 'upload_date': '20141028', } }, { 'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html', 'md5': '88e209b417f173d86186bef6e4d1f160', 'info_dict': { 'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521', 'ext': 'mp4', 'title': 'China Moses Is Crazy About the Blues', 'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0', 'duration': 128, 'timestamp': 1385722202, 'upload_date': '20131129', } }, { 'url': 'https://www.yahoo.com/movies/v/true-story-trailer-173000497.html', 'md5': '2a9752f74cb898af5d1083ea9f661b58', 'info_dict': { 'id': '071c4013-ce30-3a93-a5b2-e0413cd4a9d1', 'ext': 'mp4', 'title': '\'True Story\' Trailer', 'description': 'True Story', 'duration': 150, 'timestamp': 1418919206, 'upload_date': '20141218', }, }, { 'url': 'https://gma.yahoo.com/pizza-delivery-man-surprised-huge-tip-college-kids-195200785.html', 'only_matching': True, }, { 'note': 'NBC Sports embeds', 'url': 'http://sports.yahoo.com/blogs/ncaab-the-dagger/tyler-kalinoski-s-buzzer-beater-caps-davidson-s-comeback-win-185609842.html?guid=nbc_cbk_davidsonbuzzerbeater_150313', 'info_dict': { 'id': '9CsDKds0kvHI', 'ext': 'flv', 'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d', 'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson', 'upload_date': '20150313', 'uploader': 'NBCU-SPORTS', 'timestamp': 1426270238, }, }, { 'url': 'https://tw.news.yahoo.com/-100120367.html', 'only_matching': True, }, { # Query result is embedded in webpage, but explicit request to video API fails with geo restriction 'url': 'https://screen.yahoo.com/community/communitary-community-episode-1-ladders-154501237.html', 'md5': '4fbafb9c9b6f07aa8f870629f6671b35', 'info_dict': { 'id': '1f32853c-a271-3eef-8cb6-f6d6872cb504', 'ext': 'mp4', 'title': 'Communitary - Community Episode 1: Ladders', 'description': 'md5:8fc39608213295748e1e289807838c97', 'duration': 1646, 'timestamp': 1440436550, 'upload_date': '20150824', 'series': 'Communitary', 'season_number': 6, 'episode_number': 1, }, }, { # ytwnews://cavideo/ 'url': 'https://tw.video.yahoo.com/movie-tw/單車天使-中文版預-092316541.html', 'info_dict': { 'id': 'ba133ff2-0793-3510-b636-59dfe9ff6cff', 'ext': 'mp4', 'title': '單車天使 - 中文版預', 'description': '中文版預', 'timestamp': 1476696196, 'upload_date': '20161017', }, 'params': { 'skip_download': True, }, }, { # Contains both a Yahoo hosted video and multiple Youtube embeds 'url': 'https://www.yahoo.com/entertainment/gwen-stefani-reveals-the-pop-hit-she-passed-on-assigns-it-to-her-voice-contestant-instead-033045672.html', 'info_dict': { 'id': '46c5d95a-528f-3d03-b732-732fcadd51de', 'title': 'Gwen Stefani reveals the pop hit she passed on, assigns it to her \'Voice\' contestant instead', 'description': 'Gwen decided not to record this hit herself, but she decided it was the perfect fit for Kyndall Inskeep.', }, 'playlist': [{ 'info_dict': { 'id': '966d4262-4fd1-3aaa-b45b-049ca6e38ba6', 'ext': 'mp4', 'title': 'Gwen Stefani reveals she turned down one of Sia\'s best songs', 'description': 'On "The Voice" Tuesday, Gwen Stefani told Taylor Swift which Sia hit was almost hers.', 'timestamp': 1572406500, 'upload_date': '20191030', }, }, { 'info_dict': { 'id': '352CFDOQrKg', 'ext': 'mp4', 'title': 'Kyndal Inskeep "Performs the Hell Out of" Sia\'s "Elastic Heart" - The Voice Knockouts 2019', 'description': 'md5:35b61e94c2ae214bc965ff4245f80d11', 'uploader': 'The Voice', 'uploader_id': 'NBCTheVoice', 'upload_date': '20191029', }, }], 'params': { 'playlistend': 2, }, 'expected_warnings': ['HTTP Error 404'], }, { 'url': 'https://malaysia.news.yahoo.com/video/bystanders-help-ontario-policeman-bust-190932818.html', 'only_matching': True, }, { 'url': 'https://es-us.noticias.yahoo.com/es-la-puerta-irrompible-que-110539379.html', 'only_matching': True, }, { 'url': 'https://www.yahoo.com/entertainment/v/longtime-cbs-news-60-minutes-032036500-cbs.html', 'only_matching': True, }] def _extract_yahoo_video(self, video_id, country): video = self._download_json( 'https://%s.yahoo.com/_td/api/resource/VideoService.videos;view=full;video_ids=["%s"]' % (country, video_id), video_id, 'Downloading video JSON metadata')[0] title = video['title'] if country == 'malaysia': country = 'my' is_live = video.get('live_state') == 'live' fmts = ('m3u8',) if is_live else ('webm', 'mp4') urls = [] formats = [] subtitles = {} for fmt in fmts: media_obj = self._download_json( 'https://video-api.yql.yahoo.com/v1/video/sapi/streams/' + video_id, video_id, 'Downloading %s JSON metadata' % fmt, headers=self.geo_verification_headers(), query={ 'format': fmt, 'region': country.upper(), })['query']['results']['mediaObj'][0] msg = media_obj.get('status', {}).get('msg') for s in media_obj.get('streams', []): host = s.get('host') path = s.get('path') if not host or not path: continue s_url = host + path if s.get('format') == 'm3u8': formats.extend(self._extract_m3u8_formats( s_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) continue tbr = int_or_none(s.get('bitrate')) formats.append({ 'url': s_url, 'format_id': fmt + ('-%d' % tbr if tbr else ''), 'width': int_or_none(s.get('width')), 'height': int_or_none(s.get('height')), 'tbr': tbr, 'fps': int_or_none(s.get('framerate')), }) for cc in media_obj.get('closedcaptions', []): cc_url = cc.get('url') if not cc_url or cc_url in urls: continue urls.append(cc_url) subtitles.setdefault(cc.get('lang') or 'en-US', []).append({ 'url': cc_url, 'ext': mimetype2ext(cc.get('content_type')), }) streaming_url = video.get('streaming_url') if streaming_url and not is_live: formats.extend(self._extract_m3u8_formats( streaming_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) if not formats and msg == 'geo restricted': self.raise_geo_restricted() self._sort_formats(formats) thumbnails = [] for thumb in video.get('thumbnails', []): thumb_url = thumb.get('url') if not thumb_url: continue thumbnails.append({ 'id': thumb.get('tag'), 'url': thumb.get('url'), 'width': int_or_none(thumb.get('width')), 'height': int_or_none(thumb.get('height')), }) series_info = video.get('series_info') or {} return { 'id': video_id, 'title': self._live_title(title) if is_live else title, 'formats': formats, 'thumbnails': thumbnails, 'description': clean_html(video.get('description')), 'timestamp': parse_iso8601(video.get('publish_time')), 'subtitles': subtitles, 'duration': int_or_none(video.get('duration')), 'view_count': int_or_none(video.get('view_count')), 'is_live': is_live, 'series': video.get('show_name'), 'season_number': int_or_none(series_info.get('season_number')), 'episode_number': int_or_none(series_info.get('episode_number')), } def _real_extract(self, url): url, country, display_id = re.match(self._VALID_URL, url).groups() if not country: country = 'us' else: country = country.split('-')[0] item = self._download_json( 'https://%s.yahoo.com/caas/content/article' % country, display_id, 'Downloading content JSON metadata', query={ 'url': url })['items'][0]['data']['partnerData'] if item.get('type') != 'video': entries = [] cover = item.get('cover') or {} if cover.get('type') == 'yvideo': cover_url = cover.get('url') if cover_url: entries.append(self.url_result( cover_url, 'Yahoo', cover.get('uuid'))) for e in (item.get('body') or []): if e.get('type') == 'videoIframe': iframe_url = e.get('url') if not iframe_url: continue entries.append(self.url_result(iframe_url)) return self.playlist_result( entries, item.get('uuid'), item.get('title'), item.get('summary')) info = self._extract_yahoo_video(item['uuid'], country) info['display_id'] = display_id return info class YahooSearchIE(SearchInfoExtractor): IE_DESC = 'Yahoo screen search' _MAX_RESULTS = 1000 IE_NAME = 'screen.yahoo:search' _SEARCH_KEY = 'yvsearch' def _get_n_results(self, query, n): """Get a specified number of results for a query""" entries = [] for pagenum in itertools.count(0): result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30) info = self._download_json(result_url, query, note='Downloading results page ' + str(pagenum + 1)) m = info['m'] results = info['results'] for (i, r) in enumerate(results): if (pagenum * 30) + i >= n: break mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r) e = self.url_result('http://' + mobj.group('url'), 'Yahoo') entries.append(e) if (pagenum * 30 + i >= n) or (m['last'] >= (m['total'] - 1)): break return { '_type': 'playlist', 'id': query, 'entries': entries, } class YahooGyaOPlayerIE(InfoExtractor): IE_NAME = 'yahoo:gyao:player' _VALID_URL = r'https?://(?:gyao\.yahoo\.co\.jp/(?:player|episode/[^/]+)|streaming\.yahoo\.co\.jp/c/y)/(?P<id>\d+/v\d+/v\d+|[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _TESTS = [{ 'url': 'https://gyao.yahoo.co.jp/player/00998/v00818/v0000000000000008564/', 'info_dict': { 'id': '5993125228001', 'ext': 'mp4', 'title': 'フューリー 【字幕版】', 'description': 'md5:21e691c798a15330eda4db17a8fe45a5', 'uploader_id': '4235717419001', 'upload_date': '20190124', 'timestamp': 1548294365, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://streaming.yahoo.co.jp/c/y/01034/v00133/v0000000000000000706/', 'only_matching': True, }, { 'url': 'https://gyao.yahoo.co.jp/episode/%E3%81%8D%E3%81%AE%E3%81%86%E4%BD%95%E9%A3%9F%E3%81%B9%E3%81%9F%EF%BC%9F%20%E7%AC%AC2%E8%A9%B1%202019%2F4%2F12%E6%94%BE%E9%80%81%E5%88%86/5cb02352-b725-409e-9f8d-88f947a9f682', 'only_matching': True, }] _GEO_BYPASS = False def _real_extract(self, url): video_id = self._match_id(url).replace('/', ':') headers = self.geo_verification_headers() headers['Accept'] = 'application/json' resp = self._download_json( 'https://gyao.yahoo.co.jp/apis/playback/graphql', video_id, query={ 'appId': 'dj00aiZpPUNJeDh2cU1RazU3UCZzPWNvbnN1bWVyc2VjcmV0Jng9NTk-', 'query': '''{ content(parameter: {contentId: "%s", logicaAgent: PC_WEB}) { video { delivery { id } title } } }''' % video_id, }, headers=headers) content = resp['data']['content'] if not content: msg = resp['errors'][0]['message'] if msg == 'not in japan': self.raise_geo_restricted(countries=['JP']) raise ExtractorError(msg) video = content['video'] return { '_type': 'url_transparent', 'id': video_id, 'title': video['title'], 'url': smuggle_url( 'http://players.brightcove.net/4235717419001/SyG5P0gjb_default/index.html?videoId=' + video['delivery']['id'], {'geo_countries': ['JP']}), 'ie_key': BrightcoveNewIE.ie_key(), } class YahooGyaOIE(InfoExtractor): IE_NAME = 'yahoo:gyao' _VALID_URL = r'https?://(?:gyao\.yahoo\.co\.jp/(?:p|title(?:/[^/]+)?)|streaming\.yahoo\.co\.jp/p/y)/(?P<id>\d+/v\d+|[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _TESTS = [{ 'url': 'https://gyao.yahoo.co.jp/p/00449/v03102/', 'info_dict': { 'id': '00449:v03102', }, 'playlist_count': 2, }, { 'url': 'https://streaming.yahoo.co.jp/p/y/01034/v00133/', 'only_matching': True, }, { 'url': 'https://gyao.yahoo.co.jp/title/%E3%81%97%E3%82%83%E3%81%B9%E3%81%8F%E3%82%8A007/5b025a49-b2e5-4dc7-945c-09c6634afacf', 'only_matching': True, }, { 'url': 'https://gyao.yahoo.co.jp/title/5b025a49-b2e5-4dc7-945c-09c6634afacf', 'only_matching': True, }] def _real_extract(self, url): program_id = self._match_id(url).replace('/', ':') videos = self._download_json( 'https://gyao.yahoo.co.jp/api/programs/%s/videos' % program_id, program_id)['videos'] entries = [] for video in videos: video_id = video.get('id') if not video_id: continue entries.append(self.url_result( 'https://gyao.yahoo.co.jp/player/%s/' % video_id.replace(':', '/'), YahooGyaOPlayerIE.ie_key(), video_id)) return self.playlist_result(entries, program_id) class YahooJapanNewsIE(InfoExtractor): IE_NAME = 'yahoo:japannews' IE_DESC = 'Yahoo! Japan News' _VALID_URL = r'https?://(?P<host>(?:news|headlines)\.yahoo\.co\.jp)[^\d]*(?P<id>\d[\d-]*\d)?' _GEO_COUNTRIES = ['JP'] _TESTS = [{ 'url': 'https://headlines.yahoo.co.jp/videonews/ann?a=20190716-00000071-ann-int', 'info_dict': { 'id': '1736242', 'ext': 'mp4', 'title': 'ムン大統領が対日批判を強化“現金化”効果は?(テレビ朝日系(ANN)) - Yahoo!ニュース', 'description': '韓国の元徴用工らを巡る裁判の原告が弁護士が差し押さえた三菱重工業の資産を売却して - Yahoo!ニュース(テレビ朝日系(ANN))', 'thumbnail': r're:^https?://.*\.[a-zA-Z\d]{3,4}$', }, 'params': { 'skip_download': True, }, }, { # geo restricted 'url': 'https://headlines.yahoo.co.jp/hl?a=20190721-00000001-oxv-l04', 'only_matching': True, }, { 'url': 'https://headlines.yahoo.co.jp/videonews/', 'only_matching': True, }, { 'url': 'https://news.yahoo.co.jp', 'only_matching': True, }, { 'url': 'https://news.yahoo.co.jp/byline/hashimotojunji/20190628-00131977/', 'only_matching': True, }, { 'url': 'https://news.yahoo.co.jp/feature/1356', 'only_matching': True }] def _extract_formats(self, json_data, content_id): formats = [] video_data = try_get( json_data, lambda x: x['ResultSet']['Result'][0]['VideoUrlSet']['VideoUrl'], list) for vid in video_data or []: delivery = vid.get('delivery') url = url_or_none(vid.get('Url')) if not delivery or not url: continue elif delivery == 'hls': formats.extend( self._extract_m3u8_formats( url, content_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'url': url, 'format_id': 'http-%s' % compat_str(vid.get('bitrate', '')), 'height': int_or_none(vid.get('height')), 'width': int_or_none(vid.get('width')), 'tbr': int_or_none(vid.get('bitrate')), }) self._remove_duplicate_formats(formats) self._sort_formats(formats) return formats def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) host = mobj.group('host') display_id = mobj.group('id') or host webpage = self._download_webpage(url, display_id) title = self._html_search_meta( ['og:title', 'twitter:title'], webpage, 'title', default=None ) or self._html_search_regex('<title>([^<]+)</title>', webpage, 'title') if display_id == host: # Headline page (w/ multiple BC playlists) ('news.yahoo.co.jp', 'headlines.yahoo.co.jp/videonews/', ...) stream_plists = re.findall(r'plist=(\d+)', webpage) or re.findall(r'plist["\']:\s*["\']([^"\']+)', webpage) entries = [ self.url_result( smuggle_url( 'http://players.brightcove.net/5690807595001/HyZNerRl7_default/index.html?playlistId=%s' % plist_id, {'geo_countries': ['JP']}), ie='BrightcoveNew', video_id=plist_id) for plist_id in stream_plists] return self.playlist_result(entries, playlist_title=title) # Article page description = self._html_search_meta( ['og:description', 'description', 'twitter:description'], webpage, 'description', default=None) thumbnail = self._og_search_thumbnail( webpage, default=None) or self._html_search_meta( 'twitter:image', webpage, 'thumbnail', default=None) space_id = self._search_regex([ r'<script[^>]+class=["\']yvpub-player["\'][^>]+spaceid=([^&"\']+)', r'YAHOO\.JP\.srch\.\w+link\.onLoad[^;]+spaceID["\' ]*:["\' ]+([^"\']+)', r'<!--\s+SpaceID=(\d+)' ], webpage, 'spaceid') content_id = self._search_regex( r'<script[^>]+class=["\']yvpub-player["\'][^>]+contentid=(?P<contentid>[^&"\']+)', webpage, 'contentid', group='contentid') json_data = self._download_json( 'https://feapi-yvpub.yahooapis.jp/v1/content/%s' % content_id, content_id, query={ 'appid': 'dj0zaiZpPVZMTVFJR0FwZWpiMyZzPWNvbnN1bWVyc2VjcmV0Jng9YjU-', 'output': 'json', 'space_id': space_id, 'domain': host, 'ak': hashlib.md5('_'.join((space_id, host)).encode()).hexdigest(), 'device_type': '1100', }) formats = self._extract_formats(json_data, content_id) return { 'id': content_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rice.py
youtube_dl/extractor/rice.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_parse_qs from ..utils import ( xpath_text, xpath_element, int_or_none, parse_iso8601, ExtractorError, ) class RICEIE(InfoExtractor): _VALID_URL = r'https?://mediahub\.rice\.edu/app/[Pp]ortal/video\.aspx\?(?P<query>.+)' _TEST = { 'url': 'https://mediahub.rice.edu/app/Portal/video.aspx?PortalID=25ffd62c-3d01-4b29-8c70-7c94270efb3e&DestinationID=66bc9434-03bd-4725-b47e-c659d8d809db&ContentID=YEWIvbhb40aqdjMD1ALSqw', 'md5': '9b83b4a2eead4912dc3b7fac7c449b6a', 'info_dict': { 'id': 'YEWIvbhb40aqdjMD1ALSqw', 'ext': 'mp4', 'title': 'Active Learning in Archeology', 'upload_date': '20140616', 'timestamp': 1402926346, } } _NS = 'http://schemas.datacontract.org/2004/07/ensembleVideo.Data.Service.Contracts.Models.Player.Config' def _real_extract(self, url): qs = compat_parse_qs(re.match(self._VALID_URL, url).group('query')) if not qs.get('PortalID') or not qs.get('DestinationID') or not qs.get('ContentID'): raise ExtractorError('Invalid URL', expected=True) portal_id = qs['PortalID'][0] playlist_id = qs['DestinationID'][0] content_id = qs['ContentID'][0] content_data = self._download_xml('https://mediahub.rice.edu/api/portal/GetContentTitle', content_id, query={ 'portalId': portal_id, 'playlistId': playlist_id, 'contentId': content_id }) metadata = xpath_element(content_data, './/metaData', fatal=True) title = xpath_text(metadata, 'primaryTitle', fatal=True) encodings = xpath_element(content_data, './/encodings', fatal=True) player_data = self._download_xml('https://mediahub.rice.edu/api/player/GetPlayerConfig', content_id, query={ 'temporaryLinkId': xpath_text(encodings, 'temporaryLinkId', fatal=True), 'contentId': content_id, }) common_fmt = {} dimensions = xpath_text(encodings, 'dimensions') if dimensions: wh = dimensions.split('x') if len(wh) == 2: common_fmt.update({ 'width': int_or_none(wh[0]), 'height': int_or_none(wh[1]), }) formats = [] rtsp_path = xpath_text(player_data, self._xpath_ns('RtspPath', self._NS)) if rtsp_path: fmt = { 'url': rtsp_path, 'format_id': 'rtsp', } fmt.update(common_fmt) formats.append(fmt) for source in player_data.findall(self._xpath_ns('.//Source', self._NS)): video_url = xpath_text(source, self._xpath_ns('File', self._NS)) if not video_url: continue if '.m3u8' in video_url: formats.extend(self._extract_m3u8_formats(video_url, content_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: fmt = { 'url': video_url, 'format_id': video_url.split(':')[0], } fmt.update(common_fmt) rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', video_url) if rtmp: fmt.update({ 'url': rtmp.group('url'), 'play_path': rtmp.group('playpath'), 'app': rtmp.group('app'), 'ext': 'flv', }) formats.append(fmt) self._sort_formats(formats) thumbnails = [] for content_asset in content_data.findall('.//contentAssets'): asset_type = xpath_text(content_asset, 'type') if asset_type == 'image': image_url = xpath_text(content_asset, 'httpPath') if not image_url: continue thumbnails.append({ 'id': xpath_text(content_asset, 'ID'), 'url': image_url, }) return { 'id': content_id, 'title': title, 'description': xpath_text(metadata, 'abstract'), 'duration': int_or_none(xpath_text(metadata, 'duration')), 'timestamp': parse_iso8601(xpath_text(metadata, 'dateUpdated')), 'thumbnails': thumbnails, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/linkedin.py
youtube_dl/extractor/linkedin.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, urlencode_postdata, urljoin, ) class LinkedInLearningBaseIE(InfoExtractor): _NETRC_MACHINE = 'linkedin' _LOGIN_URL = 'https://www.linkedin.com/uas/login?trk=learning' def _call_api(self, course_slug, fields, video_slug=None, resolution=None): query = { 'courseSlug': course_slug, 'fields': fields, 'q': 'slugs', } sub = '' if video_slug: query.update({ 'videoSlug': video_slug, 'resolution': '_%s' % resolution, }) sub = ' %dp' % resolution api_url = 'https://www.linkedin.com/learning-api/detailedCourses' return self._download_json( api_url, video_slug, 'Downloading%s JSON metadata' % sub, headers={ 'Csrf-Token': self._get_cookies(api_url)['JSESSIONID'].value, }, query=query)['elements'][0] def _get_urn_id(self, video_data): urn = video_data.get('urn') if urn: mobj = re.search(r'urn:li:lyndaCourse:\d+,(\d+)', urn) if mobj: return mobj.group(1) def _get_video_id(self, video_data, course_slug, video_slug): return self._get_urn_id(video_data) or '%s/%s' % (course_slug, video_slug) def _real_initialize(self): email, password = self._get_login_info() if email is None: return login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading login page') action_url = urljoin(self._LOGIN_URL, self._search_regex( r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page, 'post url', default='https://www.linkedin.com/uas/login-submit', group='url')) data = self._hidden_inputs(login_page) data.update({ 'session_key': email, 'session_password': password, }) login_submit_page = self._download_webpage( action_url, None, 'Logging in', data=urlencode_postdata(data)) error = self._search_regex( r'<span[^>]+class="error"[^>]*>\s*(.+?)\s*</span>', login_submit_page, 'error', default=None) if error: raise ExtractorError(error, expected=True) class LinkedInLearningIE(LinkedInLearningBaseIE): IE_NAME = 'linkedin:learning' _VALID_URL = r'https?://(?:www\.)?linkedin\.com/learning/(?P<course_slug>[^/]+)/(?P<id>[^/?#]+)' _TEST = { 'url': 'https://www.linkedin.com/learning/programming-foundations-fundamentals/welcome?autoplay=true', 'md5': 'a1d74422ff0d5e66a792deb996693167', 'info_dict': { 'id': '90426', 'ext': 'mp4', 'title': 'Welcome', 'timestamp': 1430396150.82, 'upload_date': '20150430', }, } def _real_extract(self, url): course_slug, video_slug = re.match(self._VALID_URL, url).groups() video_data = None formats = [] for width, height in ((640, 360), (960, 540), (1280, 720)): video_data = self._call_api( course_slug, 'selectedVideo', video_slug, height)['selectedVideo'] video_url_data = video_data.get('url') or {} progressive_url = video_url_data.get('progressiveUrl') if progressive_url: formats.append({ 'format_id': 'progressive-%dp' % height, 'url': progressive_url, 'height': height, 'width': width, 'source_preference': 1, }) title = video_data['title'] audio_url = video_data.get('audio', {}).get('progressiveUrl') if audio_url: formats.append({ 'abr': 64, 'ext': 'm4a', 'format_id': 'audio', 'url': audio_url, 'vcodec': 'none', }) streaming_url = video_url_data.get('streamingUrl') if streaming_url: formats.extend(self._extract_m3u8_formats( streaming_url, video_slug, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) self._sort_formats(formats, ('width', 'height', 'source_preference', 'tbr', 'abr')) return { 'id': self._get_video_id(video_data, course_slug, video_slug), 'title': title, 'formats': formats, 'thumbnail': video_data.get('defaultThumbnail'), 'timestamp': float_or_none(video_data.get('publishedOn'), 1000), 'duration': int_or_none(video_data.get('durationInSeconds')), } class LinkedInLearningCourseIE(LinkedInLearningBaseIE): IE_NAME = 'linkedin:learning:course' _VALID_URL = r'https?://(?:www\.)?linkedin\.com/learning/(?P<id>[^/?#]+)' _TEST = { 'url': 'https://www.linkedin.com/learning/programming-foundations-fundamentals', 'info_dict': { 'id': 'programming-foundations-fundamentals', 'title': 'Programming Foundations: Fundamentals', 'description': 'md5:76e580b017694eb89dc8e8923fff5c86', }, 'playlist_mincount': 61, } @classmethod def suitable(cls, url): return False if LinkedInLearningIE.suitable(url) else super(LinkedInLearningCourseIE, cls).suitable(url) def _real_extract(self, url): course_slug = self._match_id(url) course_data = self._call_api(course_slug, 'chapters,description,title') entries = [] for chapter_number, chapter in enumerate(course_data.get('chapters', []), 1): chapter_title = chapter.get('title') chapter_id = self._get_urn_id(chapter) for video in chapter.get('videos', []): video_slug = video.get('slug') if not video_slug: continue entries.append({ '_type': 'url_transparent', 'id': self._get_video_id(video, course_slug, video_slug), 'title': video.get('title'), 'url': 'https://www.linkedin.com/learning/%s/%s' % (course_slug, video_slug), 'chapter': chapter_title, 'chapter_number': chapter_number, 'chapter_id': chapter_id, 'ie_key': LinkedInLearningIE.ie_key(), }) return self.playlist_result( entries, course_slug, course_data.get('title'), course_data.get('description'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/hketv.py
youtube_dl/extractor/hketv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( clean_html, ExtractorError, int_or_none, merge_dicts, parse_count, str_or_none, try_get, unified_strdate, urlencode_postdata, urljoin, ) class HKETVIE(InfoExtractor): IE_NAME = 'hketv' IE_DESC = '香港教育局教育電視 (HKETV) Educational Television, Hong Kong Educational Bureau' _GEO_BYPASS = False _GEO_COUNTRIES = ['HK'] _VALID_URL = r'https?://(?:www\.)?hkedcity\.net/etv/resource/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.hkedcity.net/etv/resource/2932360618', 'md5': 'f193712f5f7abb208ddef3c5ea6ed0b7', 'info_dict': { 'id': '2932360618', 'ext': 'mp4', 'title': '喜閱一生(共享閱讀樂) (中、英文字幕可供選擇)', 'description': 'md5:d5286d05219ef50e0613311cbe96e560', 'upload_date': '20181024', 'duration': 900, 'subtitles': 'count:2', }, 'skip': 'Geo restricted to HK', }, { 'url': 'https://www.hkedcity.net/etv/resource/972641418', 'md5': '1ed494c1c6cf7866a8290edad9b07dc9', 'info_dict': { 'id': '972641418', 'ext': 'mp4', 'title': '衣冠楚楚 (天使系列之一)', 'description': 'md5:10bb3d659421e74f58e5db5691627b0f', 'upload_date': '20070109', 'duration': 907, 'subtitles': {}, }, 'params': { 'geo_verification_proxy': '<HK proxy here>', }, 'skip': 'Geo restricted to HK', }] _CC_LANGS = { '中文(繁體中文)': 'zh-Hant', '中文(简体中文)': 'zh-Hans', 'English': 'en', 'Bahasa Indonesia': 'id', '\u0939\u093f\u0928\u094d\u0926\u0940': 'hi', '\u0928\u0947\u092a\u093e\u0932\u0940': 'ne', 'Tagalog': 'tl', '\u0e44\u0e17\u0e22': 'th', '\u0627\u0631\u062f\u0648': 'ur', } _FORMAT_HEIGHTS = { 'SD': 360, 'HD': 720, } _APPS_BASE_URL = 'https://apps.hkedcity.net' def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = ( self._html_search_meta( ('ed_title', 'search.ed_title'), webpage, default=None) or self._search_regex( r'data-favorite_title_(?:eng|chi)=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'title', default=None, group='url') or self._html_search_regex( r'<h1>([^<]+)</h1>', webpage, 'title', default=None) or self._og_search_title(webpage) ) file_id = self._search_regex( r'post_var\[["\']file_id["\']\s*\]\s*=\s*(.+?);', webpage, 'file ID') curr_url = self._search_regex( r'post_var\[["\']curr_url["\']\s*\]\s*=\s*"(.+?)";', webpage, 'curr URL') data = { 'action': 'get_info', 'curr_url': curr_url, 'file_id': file_id, 'video_url': file_id, } response = self._download_json( self._APPS_BASE_URL + '/media/play/handler.php', video_id, data=urlencode_postdata(data), headers=merge_dicts({ 'Content-Type': 'application/x-www-form-urlencoded'}, self.geo_verification_headers())) result = response['result'] if not response.get('success') or not response.get('access'): error = clean_html(response.get('access_err_msg')) if 'Video streaming is not available in your country' in error: self.raise_geo_restricted( msg=error, countries=self._GEO_COUNTRIES) else: raise ExtractorError(error, expected=True) formats = [] width = int_or_none(result.get('width')) height = int_or_none(result.get('height')) playlist0 = result['playlist'][0] for fmt in playlist0['sources']: file_url = urljoin(self._APPS_BASE_URL, fmt.get('file')) if not file_url: continue # If we ever wanted to provide the final resolved URL that # does not require cookies, albeit with a shorter lifespan: # urlh = self._downloader.urlopen(file_url) # resolved_url = urlh.geturl() label = fmt.get('label') h = self._FORMAT_HEIGHTS.get(label) w = h * width // height if h and width and height else None formats.append({ 'format_id': label, 'ext': fmt.get('type'), 'url': file_url, 'width': w, 'height': h, }) self._sort_formats(formats) subtitles = {} tracks = try_get(playlist0, lambda x: x['tracks'], list) or [] for track in tracks: if not isinstance(track, dict): continue track_kind = str_or_none(track.get('kind')) if not track_kind or not isinstance(track_kind, compat_str): continue if track_kind.lower() not in ('captions', 'subtitles'): continue track_url = urljoin(self._APPS_BASE_URL, track.get('file')) if not track_url: continue track_label = track.get('label') subtitles.setdefault(self._CC_LANGS.get( track_label, track_label), []).append({ 'url': self._proto_relative_url(track_url), 'ext': 'srt', }) # Likes emotion = self._download_json( 'https://emocounter.hkedcity.net/handler.php', video_id, data=urlencode_postdata({ 'action': 'get_emotion', 'data[bucket_id]': 'etv', 'data[identifier]': video_id, }), headers={'Content-Type': 'application/x-www-form-urlencoded'}, fatal=False) or {} like_count = int_or_none(try_get( emotion, lambda x: x['data']['emotion_data'][0]['count'])) return { 'id': video_id, 'title': title, 'description': self._html_search_meta( 'description', webpage, fatal=False), 'upload_date': unified_strdate(self._html_search_meta( 'ed_date', webpage, fatal=False), day_first=False), 'duration': int_or_none(result.get('length')), 'formats': formats, 'subtitles': subtitles, 'thumbnail': urljoin(self._APPS_BASE_URL, result.get('image')), 'view_count': parse_count(result.get('view_count')), 'like_count': like_count, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/yandexdisk.py
youtube_dl/extractor/yandexdisk.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import ( determine_ext, float_or_none, int_or_none, mimetype2ext, try_get, urljoin, ) class YandexDiskIE(InfoExtractor): _VALID_URL = r'''(?x)https?:// (?P<domain> yadi\.sk| disk\.yandex\. (?: az| by| co(?:m(?:\.(?:am|ge|tr))?|\.il)| ee| fr| k[gz]| l[tv]| md| t[jm]| u[az]| ru ) )/(?:[di]/|public.*?\bhash=)(?P<id>[^/?#&]+)''' _TESTS = [{ 'url': 'https://yadi.sk/i/VdOeDou8eZs6Y', 'md5': 'a4a8d52958c8fddcf9845935070402ae', 'info_dict': { 'id': 'VdOeDou8eZs6Y', 'ext': 'mp4', 'title': '4.mp4', 'duration': 168.6, 'uploader': 'y.botova', 'uploader_id': '300043621', 'view_count': int, }, 'expected_warnings': ['Unable to download JSON metadata'], }, { 'url': 'https://yadi.sk/d/h3WAXvDS3Li3Ce', 'only_matching': True, }, { 'url': 'https://yadi.sk/public?hash=5DZ296JK9GWCLp02f6jrObjnctjRxMs8L6%2B%2FuhNqk38%3D', 'only_matching': True, }] def _real_extract(self, url): domain, video_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, video_id) store = self._parse_json(self._search_regex( r'<script[^>]+id="store-prefetch"[^>]*>\s*({.+?})\s*</script>', webpage, 'store'), video_id) resource = store['resources'][store['rootResourceId']] title = resource['name'] meta = resource.get('meta') or {} public_url = meta.get('short_url') if public_url: video_id = self._match_id(public_url) source_url = (self._download_json( 'https://cloud-api.yandex.net/v1/disk/public/resources/download', video_id, query={'public_key': url}, fatal=False) or {}).get('href') video_streams = resource.get('videoStreams') or {} video_hash = resource.get('hash') or url environment = store.get('environment') or {} sk = environment.get('sk') yandexuid = environment.get('yandexuid') if sk and yandexuid and not (source_url and video_streams): self._set_cookie(domain, 'yandexuid', yandexuid) def call_api(action): return (self._download_json( urljoin(url, '/public/api/') + action, video_id, data=json.dumps({ 'hash': video_hash, 'sk': sk, }).encode(), headers={ 'Content-Type': 'text/plain', }, fatal=False) or {}).get('data') or {} if not source_url: # TODO: figure out how to detect if download limit has # been reached and then avoid unnecessary source format # extraction requests source_url = call_api('download-url').get('url') if not video_streams: video_streams = call_api('get-video-streams') formats = [] if source_url: formats.append({ 'url': source_url, 'format_id': 'source', 'ext': determine_ext(title, meta.get('ext') or mimetype2ext(meta.get('mime_type')) or 'mp4'), 'quality': 1, 'filesize': int_or_none(meta.get('size')) }) for video in (video_streams.get('videos') or []): format_url = video.get('url') if not format_url: continue if video.get('dimension') == 'adaptive': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: size = video.get('size') or {} height = int_or_none(size.get('height')) format_id = 'hls' if height: format_id += '-%dp' % height formats.append({ 'ext': 'mp4', 'format_id': format_id, 'height': height, 'protocol': 'm3u8_native', 'url': format_url, 'width': int_or_none(size.get('width')), }) self._sort_formats(formats) uid = resource.get('uid') display_name = try_get(store, lambda x: x['users'][uid]['displayName']) return { 'id': video_id, 'title': title, 'duration': float_or_none(video_streams.get('duration'), 1000), 'uploader': display_name, 'uploader_id': uid, 'view_count': int_or_none(meta.get('views_counter')), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tv2.py
youtube_dl/extractor/tv2.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( determine_ext, ExtractorError, int_or_none, float_or_none, js_to_json, parse_iso8601, remove_end, strip_or_none, try_get, ) class TV2IE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tv2\.no/v/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.tv2.no/v/916509/', 'info_dict': { 'id': '916509', 'ext': 'flv', 'title': 'Se Frode Gryttens hyllest av Steven Gerrard', 'description': 'TV 2 Sportens huspoet tar avskjed med Liverpools kaptein Steven Gerrard.', 'timestamp': 1431715610, 'upload_date': '20150515', 'duration': 156.967, 'view_count': int, 'categories': list, }, }] _API_DOMAIN = 'sumo.tv2.no' _PROTOCOLS = ('HDS', 'HLS', 'DASH') _GEO_COUNTRIES = ['NO'] def _real_extract(self, url): video_id = self._match_id(url) api_base = 'http://%s/api/web/asset/%s' % (self._API_DOMAIN, video_id) asset = self._download_json( api_base + '.json', video_id, 'Downloading metadata JSON')['asset'] title = asset.get('subtitle') or asset['title'] is_live = asset.get('live') is True formats = [] format_urls = [] for protocol in self._PROTOCOLS: try: data = self._download_json( api_base + '/play.json?protocol=%s&videoFormat=SMIL+ISMUSP' % protocol, video_id, 'Downloading play JSON')['playback'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: error = self._parse_json(e.cause.read().decode(), video_id)['error'] error_code = error.get('code') if error_code == 'ASSET_PLAYBACK_INVALID_GEO_LOCATION': self.raise_geo_restricted(countries=self._GEO_COUNTRIES) elif error_code == 'SESSION_NOT_AUTHENTICATED': self.raise_login_required() raise ExtractorError(error['description']) raise items = try_get(data, lambda x: x['items']['item']) if not items: continue if not isinstance(items, list): items = [items] for item in items: if not isinstance(item, dict): continue video_url = item.get('url') if not video_url or video_url in format_urls: continue format_id = '%s-%s' % (protocol.lower(), item.get('mediaFormat')) if not self._is_valid_url(video_url, video_id, format_id): continue format_urls.append(video_url) ext = determine_ext(video_url) if ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id=format_id, fatal=False)) elif ext == 'm3u8': if not data.get('drmProtected'): formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', 'm3u8' if is_live else 'm3u8_native', m3u8_id=format_id, fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( video_url, video_id, format_id, fatal=False)) elif ext == 'ism' or video_url.endswith('.ism/Manifest'): pass else: formats.append({ 'url': video_url, 'format_id': format_id, 'tbr': int_or_none(item.get('bitrate')), 'filesize': int_or_none(item.get('fileSize')), }) if not formats and data.get('drmProtected'): raise ExtractorError('This video is DRM protected.', expected=True) self._sort_formats(formats) thumbnails = [{ 'id': thumbnail.get('@type'), 'url': thumbnail.get('url'), } for _, thumbnail in (asset.get('imageVersions') or {}).items()] return { 'id': video_id, 'url': video_url, 'title': self._live_title(title) if is_live else title, 'description': strip_or_none(asset.get('description')), 'thumbnails': thumbnails, 'timestamp': parse_iso8601(asset.get('createTime')), 'duration': float_or_none(asset.get('accurateDuration') or asset.get('duration')), 'view_count': int_or_none(asset.get('views')), 'categories': asset.get('keywords', '').split(','), 'formats': formats, 'is_live': is_live, } class TV2ArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tv2\.no/(?:a|\d{4}/\d{2}/\d{2}(/[^/]+)+)/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.tv2.no/2015/05/16/nyheter/alesund/krim/pingvin/6930542', 'info_dict': { 'id': '6930542', 'title': 'Russen hetses etter pingvintyveri - innrømmer å ha åpnet luken på buret', 'description': 'De fire siktede nekter fortsatt for å ha stjålet pingvinbabyene, men innrømmer å ha åpnet luken til de små kyllingene.', }, 'playlist_count': 2, }, { 'url': 'http://www.tv2.no/a/6930542', 'only_matching': True, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) # Old embed pattern (looks unused nowadays) assets = re.findall(r'data-assetid=["\'](\d+)', webpage) if not assets: # New embed pattern for v in re.findall(r'(?s)TV2ContentboxVideo\(({.+?})\)', webpage): video = self._parse_json( v, playlist_id, transform_source=js_to_json, fatal=False) if not video: continue asset = video.get('assetId') if asset: assets.append(asset) entries = [ self.url_result('http://www.tv2.no/v/%s' % asset_id, 'TV2') for asset_id in assets] title = remove_end(self._og_search_title(webpage), ' - TV2.no') description = remove_end(self._og_search_description(webpage), ' - TV2.no') return self.playlist_result(entries, playlist_id, title, description) class KatsomoIE(TV2IE): _VALID_URL = r'https?://(?:www\.)?(?:katsomo|mtv(uutiset)?)\.fi/(?:sarja/[0-9a-z-]+-\d+/[0-9a-z-]+-|(?:#!/)?jakso/(?:\d+/[^/]+/)?|video/prog)(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.mtv.fi/sarja/mtv-uutiset-live-33001002003/lahden-pelicans-teki-kovan-ratkaisun-ville-nieminen-pihalle-1181321', 'info_dict': { 'id': '1181321', 'ext': 'mp4', 'title': 'Lahden Pelicans teki kovan ratkaisun – Ville Nieminen pihalle', 'description': 'Päätöksen teki Pelicansin hallitus.', 'timestamp': 1575116484, 'upload_date': '20191130', 'duration': 37.12, 'view_count': int, 'categories': list, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.katsomo.fi/#!/jakso/33001005/studio55-fi/658521/jukka-kuoppamaki-tekee-yha-lauluja-vaikka-lentokoneessa', 'only_matching': True, }, { 'url': 'https://www.mtvuutiset.fi/video/prog1311159', 'only_matching': True, }, { 'url': 'https://www.katsomo.fi/#!/jakso/1311159', 'only_matching': True, }] _API_DOMAIN = 'api.katsomo.fi' _PROTOCOLS = ('HLS', 'MPD') _GEO_COUNTRIES = ['FI'] class MTVUutisetArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)mtvuutiset\.fi/artikkeli/[^/]+/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.mtvuutiset.fi/artikkeli/tallaisia-vaurioita-viking-amorellassa-on-useamman-osaston-alla-vetta/7931384', 'info_dict': { 'id': '1311159', 'ext': 'mp4', 'title': 'Viking Amorellan matkustajien evakuointi on alkanut – tältä operaatio näyttää laivalla', 'description': 'Viking Amorellan matkustajien evakuointi on alkanut – tältä operaatio näyttää laivalla', 'timestamp': 1600608966, 'upload_date': '20200920', 'duration': 153.7886666, 'view_count': int, 'categories': list, }, 'params': { # m3u8 download 'skip_download': True, }, }, { # multiple Youtube embeds 'url': 'https://www.mtvuutiset.fi/artikkeli/50-vuotta-subarun-vastaiskua/6070962', 'only_matching': True, }] def _real_extract(self, url): article_id = self._match_id(url) article = self._download_json( 'http://api.mtvuutiset.fi/mtvuutiset/api/json/' + article_id, article_id) def entries(): for video in (article.get('videos') or []): video_type = video.get('videotype') video_url = video.get('url') if not (video_url and video_type in ('katsomo', 'youtube')): continue yield self.url_result( video_url, video_type.capitalize(), video.get('video_id')) return self.playlist_result( entries(), article_id, article.get('title'), article.get('description'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/azmedien.py
youtube_dl/extractor/azmedien.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from .kaltura import KalturaIE class AZMedienIE(InfoExtractor): IE_DESC = 'AZ Medien videos' _VALID_URL = r'''(?x) https?:// (?:www\.)? (?P<host> telezueri\.ch| telebaern\.tv| telem1\.ch )/ [^/]+/ (?P<id> [^/]+-(?P<article_id>\d+) ) (?: \#video= (?P<kaltura_id> [_0-9a-z]+ ) )? ''' _TESTS = [{ 'url': 'https://www.telezueri.ch/sonntalk/bundesrats-vakanzen-eu-rahmenabkommen-133214569', 'info_dict': { 'id': '1_anruz3wy', 'ext': 'mp4', 'title': 'Bundesrats-Vakanzen / EU-Rahmenabkommen', 'uploader_id': 'TVOnline', 'upload_date': '20180930', 'timestamp': 1538328802, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.telebaern.tv/telebaern-news/montag-1-oktober-2018-ganze-sendung-133531189#video=0_7xjo9lf1', 'only_matching': True }] _API_TEMPL = 'https://www.%s/api/pub/gql/%s/NewsArticleTeaser/a4016f65fe62b81dc6664dd9f4910e4ab40383be' _PARTNER_ID = '1719221' def _real_extract(self, url): host, display_id, article_id, entry_id = re.match(self._VALID_URL, url).groups() if not entry_id: entry_id = self._download_json( self._API_TEMPL % (host, host.split('.')[0]), display_id, query={ 'variables': json.dumps({ 'contextId': 'NewsArticle:' + article_id, }), })['data']['context']['mainAsset']['video']['kaltura']['kalturaId'] return self.url_result( 'kaltura:%s:%s' % (self._PARTNER_ID, entry_id), ie=KalturaIE.ie_key(), video_id=entry_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/fifa.py
youtube_dl/extractor/fifa.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, traverse_obj, unified_timestamp, ) if not callable(getattr(InfoExtractor, '_match_valid_url', None)): BaseInfoExtractor = InfoExtractor import re class InfoExtractor(BaseInfoExtractor): @classmethod def _match_valid_url(cls, url): return re.match(cls._VALID_URL, url) class FifaIE(InfoExtractor): _VALID_URL = r'https?://www.fifa.com/fifaplus/(?P<locale>\w{2})/watch/([^#?]+/)?(?P<id>\w+)' _TESTS = [{ 'url': 'https://www.fifa.com/fifaplus/en/watch/7on10qPcnyLajDDU3ntg6y', 'info_dict': { 'id': '7on10qPcnyLajDDU3ntg6y', 'title': 'Italy v France | Final | 2006 FIFA World Cup Germany™ | Full Match Replay', 'description': 'md5:f4520d0ee80529c8ba4134a7d692ff8b', 'ext': 'mp4', 'categories': ['FIFA Tournaments'], 'thumbnail': 'https://digitalhub.fifa.com/transform/135e2656-3a51-407b-8810-6c34bec5b59b/FMR_2006_Italy_France_Final_Hero', 'duration': 8165, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.fifa.com/fifaplus/pt/watch/1cg5r5Qt6Qt12ilkDgb1sV', 'info_dict': { 'id': '1cg5r5Qt6Qt12ilkDgb1sV', 'title': 'Brazil v Germany | Semi-finals | 2014 FIFA World Cup Brazil™ | Extended Highlights', 'description': 'md5:d908c74ee66322b804ae2e521b02a855', 'ext': 'mp4', 'categories': ['FIFA Tournaments', 'Highlights'], 'thumbnail': 'https://digitalhub.fifa.com/transform/d8fe6f61-276d-4a73-a7fe-6878a35fd082/FIFAPLS_100EXTHL_2014BRAvGER_TMB', 'duration': 902, 'release_timestamp': 1404777600, 'release_date': '20140708', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.fifa.com/fifaplus/fr/watch/3C6gQH9C2DLwzNx7BMRQdp', 'info_dict': { 'id': '3C6gQH9C2DLwzNx7BMRQdp', 'title': 'Josimar goal against Northern Ireland | Classic Goals', 'description': 'md5:cbe7e7bb52f603c9f1fe9a4780fe983b', 'ext': 'mp4', 'categories': ['FIFA Tournaments', 'Goal'], 'duration': 28, 'thumbnail': 'https://digitalhub.fifa.com/transform/f9301391-f8d9-48b5-823e-c093ac5e3e11/CG_MEN_1986_JOSIMAR', }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): video_id, locale = self._match_valid_url(url).group('id', 'locale') webpage = self._download_webpage(url, video_id) preconnect_link = self._search_regex( r'<link\b[^>]+\brel\s*=\s*"preconnect"[^>]+href\s*=\s*"([^"]+)"', webpage, 'Preconnect Link') video_details = self._download_json( '{preconnect_link}/sections/videoDetails/{video_id}'.format(**locals()), video_id, 'Downloading Video Details', fatal=False) preplay_parameters = self._download_json( '{preconnect_link}/videoPlayerData/{video_id}'.format(**locals()), video_id, 'Downloading Preplay Parameters')['preplayParameters'] content_data = self._download_json( # 1. query string is expected to be sent as-is # 2. `sig` must be appended # 3. if absent, the call appears to work but the manifest is bad (404) 'https://content.uplynk.com/preplay/{contentId}/multiple.json?{queryStr}&sig={signature}'.format(**preplay_parameters), video_id, 'Downloading Content Data') # formats, subtitles = self._extract_m3u8_formats_and_subtitles(content_data['playURL'], video_id) formats, subtitles = self._extract_m3u8_formats(content_data['playURL'], video_id, ext='mp4', entry_protocol='m3u8_native'), None self._sort_formats(formats) return { 'id': video_id, 'title': video_details['title'], 'description': video_details.get('description'), 'duration': int_or_none(video_details.get('duration')), 'release_timestamp': unified_timestamp(video_details.get('dateOfRelease')), 'categories': traverse_obj(video_details, (('videoCategory', 'videoSubcategory'),)), 'thumbnail': traverse_obj(video_details, ('backgroundImage', 'src')), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/roosterteeth.py
youtube_dl/extractor/roosterteeth.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_str, ) from ..utils import ( ExtractorError, int_or_none, str_or_none, urlencode_postdata, ) class RoosterTeethIE(InfoExtractor): _VALID_URL = r'https?://(?:.+?\.)?roosterteeth\.com/(?:episode|watch)/(?P<id>[^/?#&]+)' _NETRC_MACHINE = 'roosterteeth' _TESTS = [{ 'url': 'http://roosterteeth.com/episode/million-dollars-but-season-2-million-dollars-but-the-game-announcement', 'md5': 'e2bd7764732d785ef797700a2489f212', 'info_dict': { 'id': '9156', 'display_id': 'million-dollars-but-season-2-million-dollars-but-the-game-announcement', 'ext': 'mp4', 'title': 'Million Dollars, But... The Game Announcement', 'description': 'md5:168a54b40e228e79f4ddb141e89fe4f5', 'thumbnail': r're:^https?://.*\.png$', 'series': 'Million Dollars, But...', 'episode': 'Million Dollars, But... The Game Announcement', }, }, { 'url': 'http://achievementhunter.roosterteeth.com/episode/off-topic-the-achievement-hunter-podcast-2016-i-didn-t-think-it-would-pass-31', 'only_matching': True, }, { 'url': 'http://funhaus.roosterteeth.com/episode/funhaus-shorts-2016-austin-sucks-funhaus-shorts', 'only_matching': True, }, { 'url': 'http://screwattack.roosterteeth.com/episode/death-battle-season-3-mewtwo-vs-shadow', 'only_matching': True, }, { 'url': 'http://theknow.roosterteeth.com/episode/the-know-game-news-season-1-boring-steam-sales-are-better', 'only_matching': True, }, { # only available for FIRST members 'url': 'http://roosterteeth.com/episode/rt-docs-the-world-s-greatest-head-massage-the-world-s-greatest-head-massage-an-asmr-journey-part-one', 'only_matching': True, }, { 'url': 'https://roosterteeth.com/watch/million-dollars-but-season-2-million-dollars-but-the-game-announcement', 'only_matching': True, }] _EPISODE_BASE_URL = 'https://svod-be.roosterteeth.com/api/v1/episodes/' def _login(self): username, password = self._get_login_info() if username is None: return try: self._download_json( 'https://auth.roosterteeth.com/oauth/token', None, 'Logging in', data=urlencode_postdata({ 'client_id': '4338d2b4bdc8db1239360f28e72f0d9ddb1fd01e7a38fbb07b4b1f4ba4564cc5', 'grant_type': 'password', 'username': username, 'password': password, })) except ExtractorError as e: msg = 'Unable to login' if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: resp = self._parse_json(e.cause.read().decode(), None, fatal=False) if resp: error = resp.get('extra_info') or resp.get('error_description') or resp.get('error') if error: msg += ': ' + error self.report_warning(msg) def _real_initialize(self): if self._get_cookies(self._EPISODE_BASE_URL).get('rt_access_token'): return self._login() def _real_extract(self, url): display_id = self._match_id(url) api_episode_url = self._EPISODE_BASE_URL + display_id try: m3u8_url = self._download_json( api_episode_url + '/videos', display_id, 'Downloading video JSON metadata')['data'][0]['attributes']['url'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: if self._parse_json(e.cause.read().decode(), display_id).get('access') is False: self.raise_login_required( '%s is only available for FIRST members' % display_id) raise formats = self._extract_m3u8_formats( m3u8_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls') self._sort_formats(formats) episode = self._download_json( api_episode_url, display_id, 'Downloading episode JSON metadata')['data'][0] attributes = episode['attributes'] title = attributes.get('title') or attributes['display_title'] video_id = compat_str(episode['id']) thumbnails = [] for image in episode.get('included', {}).get('images', []): if image.get('type') == 'episode_image': img_attributes = image.get('attributes') or {} for k in ('thumb', 'small', 'medium', 'large'): img_url = img_attributes.get(k) if img_url: thumbnails.append({ 'id': k, 'url': img_url, }) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': attributes.get('description') or attributes.get('caption'), 'thumbnails': thumbnails, 'series': attributes.get('show_title'), 'season_number': int_or_none(attributes.get('season_number')), 'season_id': attributes.get('season_id'), 'episode': title, 'episode_number': int_or_none(attributes.get('number')), 'episode_id': str_or_none(episode.get('uuid')), 'formats': formats, 'channel_id': attributes.get('channel_id'), 'duration': int_or_none(attributes.get('length')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/konserthusetplay.py
youtube_dl/extractor/konserthusetplay.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, float_or_none, int_or_none, url_or_none, ) class KonserthusetPlayIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:konserthusetplay|rspoplay)\.se/\?.*\bm=(?P<id>[^&]+)' _TESTS = [{ 'url': 'http://www.konserthusetplay.se/?m=CKDDnlCY-dhWAAqiMERd-A', 'md5': 'e3fd47bf44e864bd23c08e487abe1967', 'info_dict': { 'id': 'CKDDnlCY-dhWAAqiMERd-A', 'ext': 'mp4', 'title': 'Orkesterns instrument: Valthornen', 'description': 'md5:f10e1f0030202020396a4d712d2fa827', 'thumbnail': 're:^https?://.*$', 'duration': 398.76, }, }, { 'url': 'http://rspoplay.se/?m=elWuEH34SMKvaO4wO_cHBw', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) e = self._search_regex( r'https?://csp\.picsearch\.com/rest\?.*\be=(.+?)[&"\']', webpage, 'e') rest = self._download_json( 'http://csp.picsearch.com/rest?e=%s&containerId=mediaplayer&i=object' % e, video_id, transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1]) media = rest['media'] player_config = media['playerconfig'] playlist = player_config['playlist'] source = next(f for f in playlist if f.get('bitrates') or f.get('provider')) FORMAT_ID_REGEX = r'_([^_]+)_h264m\.mp4' formats = [] m3u8_url = source.get('url') if m3u8_url and determine_ext(m3u8_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) fallback_url = source.get('fallbackUrl') fallback_format_id = None if fallback_url: fallback_format_id = self._search_regex( FORMAT_ID_REGEX, fallback_url, 'format id', default=None) connection_url = (player_config.get('rtmp', {}).get( 'netConnectionUrl') or player_config.get( 'plugins', {}).get('bwcheck', {}).get('netConnectionUrl')) if connection_url: for f in source['bitrates']: video_url = f.get('url') if not video_url: continue format_id = self._search_regex( FORMAT_ID_REGEX, video_url, 'format id', default=None) f_common = { 'vbr': int_or_none(f.get('bitrate')), 'width': int_or_none(f.get('width')), 'height': int_or_none(f.get('height')), } f = f_common.copy() f.update({ 'url': connection_url, 'play_path': video_url, 'format_id': 'rtmp-%s' % format_id if format_id else 'rtmp', 'ext': 'flv', }) formats.append(f) if format_id and format_id == fallback_format_id: f = f_common.copy() f.update({ 'url': fallback_url, 'format_id': 'http-%s' % format_id if format_id else 'http', }) formats.append(f) if not formats and fallback_url: formats.append({ 'url': fallback_url, }) self._sort_formats(formats) title = player_config.get('title') or media['title'] description = player_config.get('mediaInfo', {}).get('description') thumbnail = media.get('image') duration = float_or_none(media.get('duration'), 1000) subtitles = {} captions = source.get('captionsAvailableLanguages') if isinstance(captions, dict): for lang, subtitle_url in captions.items(): subtitle_url = url_or_none(subtitle_url) if lang != 'none' and subtitle_url: subtitles.setdefault(lang, []).append({'url': subtitle_url}) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/carambatv.py
youtube_dl/extractor/carambatv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( float_or_none, int_or_none, try_get, ) from .videomore import VideomoreIE class CarambaTVIE(InfoExtractor): _VALID_URL = r'(?:carambatv:|https?://video1\.carambatv\.ru/v/)(?P<id>\d+)' _TESTS = [{ 'url': 'http://video1.carambatv.ru/v/191910501', 'md5': '2f4a81b7cfd5ab866ee2d7270cb34a2a', 'info_dict': { 'id': '191910501', 'ext': 'mp4', 'title': '[BadComedian] - Разборка в Маниле (Абсолютный обзор)', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 2678.31, }, }, { 'url': 'carambatv:191910501', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'http://video1.carambatv.ru/v/%s/videoinfo.js' % video_id, video_id) title = video['title'] base_url = video.get('video') or 'http://video1.carambatv.ru/v/%s/' % video_id formats = [{ 'url': base_url + f['fn'], 'height': int_or_none(f.get('height')), 'format_id': '%sp' % f['height'] if f.get('height') else None, } for f in video['qualities'] if f.get('fn')] self._sort_formats(formats) thumbnail = video.get('splash') duration = float_or_none(try_get( video, lambda x: x['annotations'][0]['end_time'], compat_str)) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, } class CarambaTVPageIE(InfoExtractor): _VALID_URL = r'https?://carambatv\.ru/(?:[^/]+/)+(?P<id>[^/?#&]+)' _TEST = { 'url': 'http://carambatv.ru/movie/bad-comedian/razborka-v-manile/', 'md5': 'a49fb0ec2ad66503eeb46aac237d3c86', 'info_dict': { 'id': '475222', 'ext': 'flv', 'title': '[BadComedian] - Разборка в Маниле (Абсолютный обзор)', 'thumbnail': r're:^https?://.*\.jpg', # duration reported by videomore is incorrect 'duration': int, }, 'add_ie': [VideomoreIE.ie_key()], } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) videomore_url = VideomoreIE._extract_url(webpage) if not videomore_url: videomore_id = self._search_regex( r'getVMCode\s*\(\s*["\']?(\d+)', webpage, 'videomore id', default=None) if videomore_id: videomore_url = 'videomore:%s' % videomore_id if videomore_url: title = self._og_search_title(webpage) return { '_type': 'url_transparent', 'url': videomore_url, 'ie_key': VideomoreIE.ie_key(), 'title': title, } video_url = self._og_search_property('video:iframe', webpage, default=None) if not video_url: video_id = self._search_regex( r'(?:video_id|crmb_vuid)\s*[:=]\s*["\']?(\d+)', webpage, 'video id') video_url = 'carambatv:%s' % video_id return self.url_result(video_url, CarambaTVIE.ie_key())
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tvland.py
youtube_dl/extractor/tvland.py
# coding: utf-8 from __future__ import unicode_literals from .spike import ParamountNetworkIE class TVLandIE(ParamountNetworkIE): IE_NAME = 'tvland.com' _VALID_URL = r'https?://(?:www\.)?tvland\.com/(?:video-clips|(?:full-)?episodes)/(?P<id>[^/?#.]+)' _FEED_URL = 'http://www.tvland.com/feeds/mrss/' _TESTS = [{ # Geo-restricted. Without a proxy metadata are still there. With a # proxy it redirects to http://m.tvland.com/app/ 'url': 'https://www.tvland.com/episodes/s04pzf/everybody-loves-raymond-the-dog-season-1-ep-19', 'info_dict': { 'description': 'md5:84928e7a8ad6649371fbf5da5e1ad75a', 'title': 'The Dog', }, 'playlist_mincount': 5, }, { 'url': 'https://www.tvland.com/video-clips/4n87f2/younger-a-first-look-at-younger-season-6', 'md5': 'e2c6389401cf485df26c79c247b08713', 'info_dict': { 'id': '891f7d3c-5b5b-4753-b879-b7ba1a601757', 'ext': 'mp4', 'title': 'Younger|April 30, 2019|6|NO-EPISODE#|A First Look at Younger Season 6', 'description': 'md5:595ea74578d3a888ae878dfd1c7d4ab2', 'upload_date': '20190430', 'timestamp': 1556658000, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.tvland.com/full-episodes/iu0hz6/younger-a-kiss-is-just-a-kiss-season-3-ep-301', 'only_matching': True, }]
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/orf.py
youtube_dl/extractor/orf.py
# coding: utf-8 from __future__ import unicode_literals import base64 import functools import re from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import ( clean_html, determine_ext, ExtractorError, float_or_none, int_or_none, merge_dicts, mimetype2ext, parse_age_limit, parse_iso8601, strip_jsonp, txt_or_none, unified_strdate, update_url_query, url_or_none, ) from ..traversal import T, traverse_obj k_float_or_none = functools.partial(float_or_none, scale=1000) class ORFRadioBase(InfoExtractor): STATION_INFO = { 'fm4': ('fm4', 'fm4', 'orffm4'), 'noe': ('noe', 'oe2n', 'orfnoe'), 'wien': ('wie', 'oe2w', 'orfwie'), 'burgenland': ('bgl', 'oe2b', 'orfbgl'), 'ooe': ('ooe', 'oe2o', 'orfooe'), 'steiermark': ('stm', 'oe2st', 'orfstm'), 'kaernten': ('ktn', 'oe2k', 'orfktn'), 'salzburg': ('sbg', 'oe2s', 'orfsbg'), 'tirol': ('tir', 'oe2t', 'orftir'), 'vorarlberg': ('vbg', 'oe2v', 'orfvbg'), 'oe3': ('oe3', 'oe3', 'orfoe3'), 'oe1': ('oe1', 'oe1', 'orfoe1'), } _ID_NAMES = ('id', 'guid', 'program') @classmethod def _get_item_id(cls, data): return traverse_obj(data, *cls._ID_NAMES, expected_type=txt_or_none) @classmethod def _get_api_payload(cls, data, expected_id, in_payload=False): if expected_id not in traverse_obj(data, ('payload',)[:1 if in_payload else 0] + (cls._ID_NAMES, T(txt_or_none))): raise ExtractorError('Unexpected API data result', video_id=expected_id) return data['payload'] @staticmethod def _extract_podcast_upload(data): return traverse_obj(data, { 'url': ('enclosures', 0, 'url'), 'ext': ('enclosures', 0, 'type', T(mimetype2ext)), 'filesize': ('enclosures', 0, 'length', T(int_or_none)), 'title': ('title', T(txt_or_none)), 'description': ('description', T(clean_html)), 'timestamp': (('published', 'postDate'), T(parse_iso8601)), 'duration': ('duration', T(k_float_or_none)), 'series': ('podcast', 'title'), 'uploader': ((('podcast', 'author'), 'station'), T(txt_or_none)), 'uploader_id': ('podcast', 'channel', T(txt_or_none)), }, get_all=False) @classmethod def _entries(cls, data, station, item_type=None): if item_type in ('upload', 'podcast-episode'): yield merge_dicts({ 'id': cls._get_item_id(data), 'ext': 'mp3', 'vcodec': 'none', }, cls._extract_podcast_upload(data), rev=True) return loop_station = cls.STATION_INFO[station][1] for info in traverse_obj(data, ((('streams', Ellipsis), 'stream'), T(lambda v: v if v['loopStreamId'] else None))): item_id = info['loopStreamId'] host = info.get('host') or 'loopstream01.apa.at' yield merge_dicts({ 'id': item_id.replace('.mp3', ''), 'ext': 'mp3', 'url': update_url_query('https://{0}/'.format(host), { 'channel': loop_station, 'id': item_id, }), 'vcodec': 'none', # '_old_archive_ids': [make_archive_id(old_ie, video_id)], }, traverse_obj(data, { 'title': ('title', T(txt_or_none)), 'description': ('subtitle', T(clean_html)), 'uploader': 'station', 'series': ('programTitle', T(txt_or_none)), }), traverse_obj(info, { 'duration': (('duration', (None, T(lambda x: x['end'] - x['start']))), T(k_float_or_none), any), 'timestamp': (('start', 'startISO'), T(parse_iso8601), any), })) class ORFRadioIE(ORFRadioBase): IE_NAME = 'orf:sound' _STATION_RE = '|'.join(map(re.escape, ORFRadioBase.STATION_INFO.keys())) _VALID_URL = ( r'https?://sound\.orf\.at/radio/(?P<station>{0})/sendung/(?P<id>\d+)(?:/(?P<show>\w+))?'.format(_STATION_RE), r'https?://(?P<station>{0})\.orf\.at/player/(?P<date>\d{{8}})/(?P<id>\d+)'.format(_STATION_RE), ) _TESTS = [{ 'url': 'https://sound.orf.at/radio/ooe/sendung/37802/guten-morgen-oberoesterreich-am-feiertag', 'info_dict': { 'id': '37802', 'title': 'Guten Morgen Oberösterreich am Feiertag', 'description': 'Oberösterreichs meistgehörte regionale Frühsendung.\nRegionale Nachrichten zu jeder halben Stunde.\nModeration: Wolfgang Lehner\nNachrichten: Stephan Schnabl', }, 'playlist': [{ 'md5': 'f9ff8517dd681b642a2c900e2c9e6085', 'info_dict': { 'id': '2024-05-30_0559_tl_66_7DaysThu1_443862', 'ext': 'mp3', 'title': 'Guten Morgen Oberösterreich am Feiertag', 'description': 'Oberösterreichs meistgehörte regionale Frühsendung.\nRegionale Nachrichten zu jeder halben Stunde.\nModeration: Wolfgang Lehner\nNachrichten: Stephan Schnabl', 'timestamp': 1717041587, 'upload_date': '20240530', 'uploader': 'ooe', 'duration': 14413.0, } }], 'skip': 'Shows from ORF Sound are only available for 30 days.' }, { 'url': 'https://oe1.orf.at/player/20240531/758136', 'md5': '2397717aaf3ae9c22a4f090ee3b8d374', 'info_dict': { 'id': '2024-05-31_1905_tl_51_7DaysFri35_2413387', 'ext': 'mp3', 'title': '"Who Cares?"', 'description': 'Europas größte Netzkonferenz re:publica 2024', 'timestamp': 1717175100, 'upload_date': '20240531', 'uploader': 'oe1', 'duration': 1500, }, 'skip': 'Shows from ORF Sound are only available for 30 days.' }] def _real_extract(self, url): m = self._match_valid_url(url) station, show_id = m.group('station', 'id') api_station, _, _ = self.STATION_INFO[station] if 'date' in m.groupdict(): data = self._download_json( 'https://audioapi.orf.at/{0}/json/4.0/broadcast/{1}/{2}?_o={3}.orf.at'.format( api_station, show_id, m.group('date'), station), show_id) show_id = data['id'] else: data = self._download_json( 'https://audioapi.orf.at/{0}/api/json/5.0/broadcast/{1}?_o=sound.orf.at'.format( api_station, show_id), show_id) data = self._get_api_payload(data, show_id, in_payload=True) # site sends ISO8601 GMT date-times with separate TZ offset, ignored # TODO: should `..._date` be calculated relative to TZ? return merge_dicts( {'_type': 'multi_video'}, self.playlist_result( self._entries(data, station), show_id, txt_or_none(data.get('title')), clean_html(data.get('subtitle')))) class ORFRadioCollectionIE(ORFRadioBase): IE_NAME = 'orf:collection' _VALID_URL = r'https?://sound\.orf\.at/collection/(?P<coll_id>\d+)(?:/(?P<item_id>\d+))?' _TESTS = [{ 'url': 'https://sound.orf.at/collection/4/61908/was-das-uberschreiten-des-15-limits-bedeutet', 'info_dict': { 'id': '2577582', }, 'playlist': [{ 'md5': '5789cec7d75575ff58d19c0428c80eb3', 'info_dict': { 'id': '2024-06-06_1659_tl_54_7DaysThu6_153926', 'ext': 'mp3', 'title': 'Klimakrise: Was das Überschreiten des 1,5°-Limits bedeutet', 'timestamp': 1717686674, 'upload_date': '20240606', 'uploader': 'fm4', }, }], 'skip': 'Shows from ORF Sound are only available for 30 days.' }, { # persistent playlist (FM4 Highlights) 'url': 'https://sound.orf.at/collection/4/', 'info_dict': { 'id': '4', }, 'playlist_mincount': 10, 'playlist_maxcount': 13, }] def _real_extract(self, url): coll_id, item_id = self._match_valid_url(url).group('coll_id', 'item_id') data = self._download_json( 'https://collector.orf.at/api/frontend/collections/{0}?_o=sound.orf.at'.format( coll_id), coll_id) data = self._get_api_payload(data, coll_id, in_payload=True) def yield_items(): for item in traverse_obj(data, ( 'content', 'items', lambda _, v: any(k in v['target']['params'] for k in self._ID_NAMES))): if item_id is None or item_id == txt_or_none(item.get('id')): target = item['target'] typed_item_id = self._get_item_id(target['params']) station = target['params'].get('station') item_type = target.get('type') if typed_item_id and (station or item_type): yield station, typed_item_id, item_type if item_id is not None: break else: if item_id is not None: raise ExtractorError('Item not found in collection', video_id=coll_id, expected=True) def item_playlist(station, typed_item_id, item_type): if item_type == 'upload': item_data = self._download_json('https://audioapi.orf.at/radiothek/api/2.0/upload/{0}?_o=sound.orf.at'.format( typed_item_id), typed_item_id) elif item_type == 'podcast-episode': item_data = self._download_json('https://audioapi.orf.at/radiothek/api/2.0/episode/{0}?_o=sound.orf.at'.format( typed_item_id), typed_item_id) else: api_station, _, _ = self.STATION_INFO[station] item_data = self._download_json( 'https://audioapi.orf.at/{0}/api/json/5.0/{1}/{2}?_o=sound.orf.at'.format( api_station, item_type or 'broadcastitem', typed_item_id), typed_item_id) item_data = self._get_api_payload(item_data, typed_item_id, in_payload=True) return merge_dicts( {'_type': 'multi_video'}, self.playlist_result( self._entries(item_data, station, item_type), typed_item_id, txt_or_none(data.get('title')), clean_html(data.get('subtitle')))) def yield_item_entries(): for station, typed_id, item_type in yield_items(): yield item_playlist(station, typed_id, item_type) if item_id is not None: # coll_id = '/'.join((coll_id, item_id)) return next(yield_item_entries()) return self.playlist_result(yield_item_entries(), coll_id, data.get('title')) class ORFPodcastIE(ORFRadioBase): IE_NAME = 'orf:podcast' _STATION_RE = '|'.join(map(re.escape, (x[0] for x in ORFRadioBase.STATION_INFO.values()))) + '|tv' _VALID_URL = r'https?://sound\.orf\.at/podcast/(?P<station>{0})/(?P<show>[\w-]+)/(?P<id>[\w-]+)'.format(_STATION_RE) _TESTS = [{ 'url': 'https://sound.orf.at/podcast/stm/der-kraeutertipp-von-christine-lackner/rotklee', 'md5': '1f2bab2ba90c2ce0c2754196ea78b35f', 'info_dict': { 'id': 'der-kraeutertipp-von-christine-lackner/rotklee', 'ext': 'mp3', 'title': 'Rotklee', 'description': 'In der Natur weit verbreitet - in der Medizin längst anerkennt: Rotklee. Dieser Podcast begleitet die Sendung "Radio Steiermark am Vormittag", Radio Steiermark, 28. Mai 2024.', 'timestamp': 1716891761, 'upload_date': '20240528', 'uploader_id': 'stm_kraeutertipp', 'uploader': 'ORF Radio Steiermark', 'duration': 101, 'series': 'Der Kräutertipp von Christine Lackner', }, 'skip': 'ORF podcasts are only available for a limited time' }] _ID_NAMES = ('slug', 'guid') def _real_extract(self, url): station, show, show_id = self._match_valid_url(url).group('station', 'show', 'id') data = self._download_json( 'https://audioapi.orf.at/radiothek/api/2.0/podcast/{0}/{1}/{2}'.format( station, show, show_id), show_id) data = self._get_api_payload(data, show_id, in_payload=True) return merge_dicts({ 'id': '/'.join((show, show_id)), 'ext': 'mp3', 'vcodec': 'none', }, self._extract_podcast_upload(data), rev=True) class ORFIPTVBase(InfoExtractor): _TITLE_STRIP_RE = '' def _extract_video(self, video_id, webpage, fatal=False): data = self._download_json( 'http://bits.orf.at/filehandler/static-api/json/current/data.json?file=%s' % video_id, video_id)[0] video = traverse_obj(data, ( 'sources', ('default', 'q8c'), T(lambda x: x if x['loadBalancerUrl'] else None), any)) load_balancer_url = video['loadBalancerUrl'] try: rendition = self._download_json( load_balancer_url, video_id, transform_source=strip_jsonp) except ExtractorError: rendition = None if not rendition: rendition = { 'redirect': { 'smil': re.sub( r'(/)jsonp(/.+\.)mp4$', r'\1dash\2smil/manifest.mpd', load_balancer_url), }, } f = traverse_obj(video, { 'abr': ('audioBitrate', T(int_or_none)), 'vbr': ('bitrate', T(int_or_none)), 'fps': ('videoFps', T(int_or_none)), 'width': ('videoWidth', T(int_or_none)), 'height': ('videoHeight', T(int_or_none)), }) formats = [] for format_id, format_url in traverse_obj(rendition, ( 'redirect', T(dict.items), Ellipsis)): if format_id == 'rtmp': ff = f.copy() ff.update({ 'url': format_url, 'format_id': format_id, }) formats.append(ff) elif determine_ext(format_url) == 'f4m': formats.extend(self._extract_f4m_formats( format_url, video_id, f4m_id=format_id)) elif determine_ext(format_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', m3u8_id=format_id, entry_protocol='m3u8_native')) elif determine_ext(format_url) == 'mpd': formats.extend(self._extract_mpd_formats( format_url, video_id, mpd_id=format_id)) if formats or fatal: self._sort_formats(formats) else: return return merge_dicts({ 'id': video_id, 'title': re.sub(self._TITLE_STRIP_RE, '', self._og_search_title(webpage)), 'description': self._og_search_description(webpage), 'upload_date': unified_strdate(self._html_search_meta( 'dc.date', webpage, 'upload date', fatal=False)), 'formats': formats, }, traverse_obj(data, { 'duration': ('duration', T(k_float_or_none)), 'thumbnail': ('sources', 'default', 'preview', T(url_or_none)), }), rev=True) class ORFIPTVIE(ORFIPTVBase): IE_NAME = 'orf:iptv' IE_DESC = 'iptv.ORF.at' _WORKING = False # URLs redirect to orf.at/ _VALID_URL = r'https?://iptv\.orf\.at/(?:#/)?stories/(?P<id>\d+)' _TITLE_STRIP_RE = r'\s+-\s+iptv\.ORF\.at\S*$' _TEST = { 'url': 'http://iptv.orf.at/stories/2275236/', 'md5': 'c8b22af4718a4b4af58342529453e3e5', 'info_dict': { 'id': '350612', 'ext': 'flv', 'title': 'Weitere Evakuierungen um Vulkan Calbuco', 'description': 'md5:d689c959bdbcf04efeddedbf2299d633', 'duration': 68.197, 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20150425', }, } def _real_extract(self, url): story_id = self._match_id(url) webpage = self._download_webpage( 'http://iptv.orf.at/stories/%s' % story_id, story_id) video_id = self._search_regex( r'data-video(?:id)?="(\d+)"', webpage, 'video id') return self._extract_video(video_id, webpage) class ORFFM4StoryIE(ORFIPTVBase): IE_NAME = 'orf:fm4:story' IE_DESC = 'fm4.orf.at stories' _VALID_URL = r'https?://fm4\.orf\.at/stories/(?P<id>\d+)' _TITLE_STRIP_RE = r'\s+-\s+fm4\.ORF\.at\s*$' _TESTS = [{ 'url': 'https://fm4.orf.at/stories/3041554/', 'add_ie': ['Youtube'], 'info_dict': { 'id': '3041554', 'title': 'Is The EU Green Deal In Mortal Danger?', }, 'playlist_count': 4, 'params': { 'format': 'bestvideo', }, }, { 'url': 'http://fm4.orf.at/stories/2865738/', 'info_dict': { 'id': '2865738', 'title': 'Manu Delago und Inner Tongue live', }, 'playlist': [{ 'md5': 'e1c2c706c45c7b34cf478bbf409907ca', 'info_dict': { 'id': '547792', 'ext': 'flv', 'title': 'Manu Delago und Inner Tongue live', 'description': 'Manu Delago und Inner Tongue haben bei der FM4 Soundpark Session live alles gegeben. Hier gibt es Fotos und die gesamte Session als Video.', 'duration': 1748.52, 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20170913', }, }, { 'md5': 'c6dd2179731f86f4f55a7b49899d515f', 'info_dict': { 'id': '547798', 'ext': 'flv', 'title': 'Manu Delago und Inner Tongue https://vod-ww.mdn.ors.at/cms-worldwide_episodes_nas/_definst_/nas/cms-worldwide_episodes/online/14228823_0005.smil/chunklist_b992000_vo.m3u8live (2)', 'duration': 1504.08, 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20170913', 'description': 'Manu Delago und Inner Tongue haben bei der FM4 Soundpark Session live alles gegeben. Hier gibt es Fotos und die gesamte Session als Video.', }, }], 'skip': 'Videos gone', }] def _real_extract(self, url): story_id = self._match_id(url) webpage = self._download_webpage(url, story_id) entries = [] seen_ids = set() for idx, video_id in enumerate(re.findall(r'data-video(?:id)?="(\d+)"', webpage)): if video_id in seen_ids: continue seen_ids.add(video_id) entry = self._extract_video(video_id, webpage, fatal=False) if not entry: continue if idx >= 1: # Titles are duplicates, make them unique entry['title'] = '%s (%d)' % (entry['title'], idx) entries.append(entry) seen_ids = set() for yt_id in re.findall( r'data-id\s*=\s*["\']([\w-]+)[^>]+\bclass\s*=\s*["\']youtube\b', webpage): if yt_id in seen_ids: continue seen_ids.add(yt_id) if YoutubeIE.suitable(yt_id): entries.append(self.url_result(yt_id, ie='Youtube', video_id=yt_id)) return self.playlist_result( entries, story_id, re.sub(self._TITLE_STRIP_RE, '', self._og_search_title(webpage, default='') or None)) class ORFONBase(InfoExtractor): _ENC_PFX = '3dSlfek03nsLKdj4Jsd' _API_PATH = 'episode' def _call_api(self, video_id, **kwargs): encrypted_id = base64.b64encode('{0}{1}'.format( self._ENC_PFX, video_id).encode('utf-8')).decode('ascii') return self._download_json( 'https://api-tvthek.orf.at/api/v4.3/public/{0}/encrypted/{1}'.format( self._API_PATH, encrypted_id), video_id, **kwargs) @classmethod def _parse_metadata(cls, api_json): return traverse_obj(api_json, { 'id': ('id', T(int), T(txt_or_none)), 'age_limit': ('age_classification', T(parse_age_limit)), 'duration': ((('exact_duration', T(k_float_or_none)), ('duration_second', T(float_or_none))),), 'title': (('title', 'headline'), T(txt_or_none)), 'description': (('description', 'teaser_text'), T(txt_or_none)), # 'media_type': ('video_type', T(txt_or_none)), 'thumbnail': ('_embedded', 'image', 'public_urls', 'highlight_teaser', 'url', T(url_or_none)), 'timestamp': (('date', 'episode_date'), T(parse_iso8601)), 'release_timestamp': ('release_date', T(parse_iso8601)), # 'modified_timestamp': ('updated_at', T(parse_iso8601)), }, get_all=False) def _extract_video(self, video_id, segment_id): # Not a segmented episode: return single video # Segmented episode without valid segment id: return entire playlist # Segmented episode with valid segment id and yes-playlist: return entire playlist # Segmented episode with valid segment id and no-playlist: return single video corresponding to segment id # If a multi_video playlist would be returned, but an unsegmented source exists, that source is chosen instead. api_json = self._call_api(video_id) if traverse_obj(api_json, 'is_drm_protected'): self.report_drm(video_id) # updates formats, subtitles def extract_sources(src_json, video_id): for manifest_type in traverse_obj(src_json, ('sources', T(dict.keys), Ellipsis)): for manifest_url in traverse_obj(src_json, ('sources', manifest_type, Ellipsis, 'src', T(url_or_none))): if manifest_type == 'hls': fmts, subs = self._extract_m3u8_formats( manifest_url, video_id, fatal=False, m3u8_id='hls', ext='mp4', entry_protocol='m3u8_native'), {} for f in fmts: if '_vo.' in f['url']: f['acodec'] = 'none' elif manifest_type == 'dash': fmts, subs = self._extract_mpd_formats_and_subtitles( manifest_url, video_id, fatal=False, mpd_id='dash') else: continue formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) formats, subtitles = [], {} if segment_id is None: extract_sources(api_json, video_id) if not formats: segments = traverse_obj(api_json, ( '_embedded', 'segments', lambda _, v: v['id'])) if len(segments) > 1 and segment_id is not None: if not self._yes_playlist(video_id, segment_id, playlist_label='collection', video_label='segment'): segments = [next(s for s in segments if txt_or_none(s['id']) == segment_id)] entries = [] for seg in segments: formats, subtitles = [], {} extract_sources(seg, segment_id) self._sort_formats(formats) entries.append(merge_dicts({ 'formats': formats, 'subtitles': subtitles, }, self._parse_metadata(seg), rev=True)) result = merge_dicts( {'_type': 'multi_video' if len(entries) > 1 else 'playlist'}, self._parse_metadata(api_json), self.playlist_result(entries, video_id)) # not yet processed in core for playlist/multi self._downloader._fill_common_fields(result) return result else: self._sort_formats(formats) for sub_url in traverse_obj(api_json, ( '_embedded', 'subtitle', ('xml_url', 'sami_url', 'stl_url', 'ttml_url', 'srt_url', 'vtt_url'), T(url_or_none))): self._merge_subtitles({'de': [{'url': sub_url}]}, target=subtitles) return merge_dicts({ 'id': video_id, 'formats': formats, 'subtitles': subtitles, # '_old_archive_ids': [self._downloader._make_archive_id({'ie_key': 'ORFTVthek', 'id': video_id})], }, self._parse_metadata(api_json), rev=True) def _real_extract(self, url): video_id, segment_id = self._match_valid_url(url).group('id', 'segment') webpage = self._download_webpage(url, video_id) # ORF doesn't like 410 or 404 if self._search_regex(r'<div\b[^>]*>\s*(Nicht mehr verfügbar)\s*</div>', webpage, 'Availability', default=False): raise ExtractorError('Content is no longer available', expected=True, video_id=video_id) return merge_dicts({ 'id': video_id, 'title': self._html_search_meta(['og:title', 'twitter:title'], webpage, default=None), 'description': self._html_search_meta( ['description', 'og:description', 'twitter:description'], webpage, default=None), }, self._search_json_ld(webpage, video_id, default={}), self._extract_video(video_id, segment_id), rev=True) class ORFONIE(ORFONBase): IE_NAME = 'orf:on' _VALID_URL = r'https?://on\.orf\.at/video/(?P<id>\d+)(?:/(?P<segment>\d+))?' _TESTS = [{ 'url': 'https://on.orf.at/video/14210000/school-of-champions-48', 'info_dict': { 'id': '14210000', 'ext': 'mp4', 'duration': 2651.08, 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0167/98/thumb_16697671_segments_highlight_teaser.jpeg', 'title': 'School of Champions (4/8)', 'description': r're:(?s)Luca hat sein ganzes Leben in den Bergen Südtirols verbracht und ist bei seiner Mutter aufgewachsen, .{1029} Leo$', # 'media_type': 'episode', 'timestamp': 1706558922, 'upload_date': '20240129', 'release_timestamp': 1706472362, 'release_date': '20240128', # 'modified_timestamp': 1712756663, # 'modified_date': '20240410', # '_old_archive_ids': ['orftvthek 14210000'], }, 'params': { 'format': 'bestvideo', }, 'skip': 'Available until 2024-08-12', }, { 'url': 'https://on.orf.at/video/3220355', 'md5': '925a93b2b9a37da5c9b979d7cf71aa2e', 'info_dict': { 'id': '3220355', 'ext': 'mp4', 'duration': 445.04, 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0002/60/thumb_159573_segments_highlight_teaser.png', 'title': '50 Jahre Burgenland: Der Festumzug', 'description': r're:(?s)Aus allen Landesteilen zogen festlich geschmückte Wagen und Musikkapellen .{270} Jenakowitsch$', # 'media_type': 'episode', 'timestamp': 52916400, 'upload_date': '19710905', 'release_timestamp': 52916400, 'release_date': '19710905', # 'modified_timestamp': 1498536049, # 'modified_date': '20170627', # '_old_archive_ids': ['orftvthek 3220355'], }, }, { # Video with multiple segments selecting the second segment 'url': 'https://on.orf.at/video/14226549/15639808/jugendbande-einbrueche-aus-langeweile', 'md5': 'fc151bba8c05ea77ab5693617e4a33d3', 'info_dict': { 'id': '15639808', 'ext': 'mp4', 'duration': 97.707, 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0175/43/thumb_17442704_segments_highlight_teaser.jpg', 'title': 'Jugendbande: Einbrüche aus Langeweile', 'description': r're:Jugendbande: Einbrüche aus Langeweile \| Neuer Kinder- und .{259} Wanda$', # 'media_type': 'segment', 'timestamp': 1715792400, 'upload_date': '20240515', # 'modified_timestamp': 1715794394, # 'modified_date': '20240515', # '_old_archive_ids': ['orftvthek 15639808'], }, 'params': { 'noplaylist': True, 'format': 'bestvideo', }, 'skip': 'Available until 2024-06-14', }, { # Video with multiple segments and no combined version 'url': 'https://on.orf.at/video/14227864/formel-1-grosser-preis-von-monaco-2024', 'info_dict': { '_type': 'multi_video', 'id': '14227864', 'duration': 18410.52, 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0176/04/thumb_17503881_segments_highlight_teaser.jpg', 'title': 'Formel 1: Großer Preis von Monaco 2024', 'description': 'md5:aeeb010710ccf70ce28ccb4482243d4f', # 'media_type': 'episode', 'timestamp': 1716721200, 'upload_date': '20240526', 'release_timestamp': 1716721802, 'release_date': '20240526', # 'modified_timestamp': 1716884702, # 'modified_date': '20240528', }, 'playlist_count': 42, 'skip': 'Gone: Nicht mehr verfügbar', }, { # Video with multiple segments, but with combined version 'url': 'https://on.orf.at/video/14228172', 'info_dict': { 'id': '14228172', 'ext': 'mp4', 'duration': 3294.878, 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0176/29/thumb_17528242_segments_highlight_teaser.jpg', 'title': 'Willkommen Österreich mit Stermann & Grissemann', 'description': r're:Zum Saisonfinale freuen sich die urlaubsreifen Gastgeber Stermann und .{1863} Geschichten\.$', # 'media_type': 'episode', 'timestamp': 1716926584, 'upload_date': '20240528', 'release_timestamp': 1716919202, 'release_date': '20240528', # 'modified_timestamp': 1716968045, # 'modified_date': '20240529', # '_old_archive_ids': ['orftvthek 14228172'], }, 'params': { 'format': 'bestvideo', }, 'skip': 'Gone: Nicht mehr verfügbar', }] class ORFONLiveIE(ORFONBase): _ENC_PFX = '8876324jshjd7293ktd' _API_PATH = 'livestream' _VALID_URL = r'https?://on\.orf\.at/livestream/(?P<id>\d+)(?:/(?P<segment>\d+))?' _TESTS = [{ 'url': 'https://on.orf.at/livestream/14320204/pressekonferenz-neos-zu-aktuellen-entwicklungen', 'info_dict': { 'id': '14320204', 'ext': 'mp4', 'title': 'Pressekonferenz: Neos zu aktuellen Entwicklungen', 'description': r're:(?s)Neos-Chefin Beate Meinl-Reisinger informi.{598}ng\."', 'timestamp': 1716886335, 'upload_date': '20240528', # 'modified_timestamp': 1712756663, # 'modified_date': '20240410', # '_old_archive_ids': ['orftvthek 14210000'], }, 'params': { 'format': 'bestvideo', }, }] @classmethod def _parse_metadata(cls, api_json): return merge_dicts( super(ORFONLiveIE, cls)._parse_metadata(api_json), traverse_obj(api_json, { 'timestamp': ('updated_at', T(parse_iso8601)), 'release_timestamp': ('start', T(parse_iso8601)), 'is_live': True, }))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/hitbox.py
youtube_dl/extractor/hitbox.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, parse_iso8601, float_or_none, int_or_none, compat_str, determine_ext, ) class HitboxIE(InfoExtractor): IE_NAME = 'hitbox' _VALID_URL = r'https?://(?:www\.)?(?:hitbox|smashcast)\.tv/(?:[^/]+/)*videos?/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://www.hitbox.tv/video/203213', 'info_dict': { 'id': '203213', 'title': 'hitbox @ gamescom, Sub Button Hype extended, Giveaway - hitbox News Update with Oxy', 'alt_title': 'hitboxlive - Aug 9th #6', 'description': '', 'ext': 'mp4', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 215.1666, 'resolution': 'HD 720p', 'uploader': 'hitboxlive', 'view_count': int, 'timestamp': 1407576133, 'upload_date': '20140809', 'categories': ['Live Show'], }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://www.smashcast.tv/hitboxlive/videos/203213', 'only_matching': True, }] def _extract_metadata(self, url, video_id): thumb_base = 'https://edge.sf.hitbox.tv' metadata = self._download_json( '%s/%s' % (url, video_id), video_id, 'Downloading metadata JSON') date = 'media_live_since' media_type = 'livestream' if metadata.get('media_type') == 'video': media_type = 'video' date = 'media_date_added' video_meta = metadata.get(media_type, [])[0] title = video_meta.get('media_status') alt_title = video_meta.get('media_title') description = clean_html( video_meta.get('media_description') or video_meta.get('media_description_md')) duration = float_or_none(video_meta.get('media_duration')) uploader = video_meta.get('media_user_name') views = int_or_none(video_meta.get('media_views')) timestamp = parse_iso8601(video_meta.get(date), ' ') categories = [video_meta.get('category_name')] thumbs = [{ 'url': thumb_base + video_meta.get('media_thumbnail'), 'width': 320, 'height': 180 }, { 'url': thumb_base + video_meta.get('media_thumbnail_large'), 'width': 768, 'height': 432 }] return { 'id': video_id, 'title': title, 'alt_title': alt_title, 'description': description, 'ext': 'mp4', 'thumbnails': thumbs, 'duration': duration, 'uploader': uploader, 'view_count': views, 'timestamp': timestamp, 'categories': categories, } def _real_extract(self, url): video_id = self._match_id(url) player_config = self._download_json( 'https://www.smashcast.tv/api/player/config/video/%s' % video_id, video_id, 'Downloading video JSON') formats = [] for video in player_config['clip']['bitrates']: label = video.get('label') if label == 'Auto': continue video_url = video.get('url') if not video_url: continue bitrate = int_or_none(video.get('bitrate')) if determine_ext(video_url) == 'm3u8': if not video_url.startswith('http'): continue formats.append({ 'url': video_url, 'ext': 'mp4', 'tbr': bitrate, 'format_note': label, 'protocol': 'm3u8_native', }) else: formats.append({ 'url': video_url, 'tbr': bitrate, 'format_note': label, }) self._sort_formats(formats) metadata = self._extract_metadata( 'https://www.smashcast.tv/api/media/video', video_id) metadata['formats'] = formats return metadata class HitboxLiveIE(HitboxIE): IE_NAME = 'hitbox:live' _VALID_URL = r'https?://(?:www\.)?(?:hitbox|smashcast)\.tv/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://www.hitbox.tv/dimak', 'info_dict': { 'id': 'dimak', 'ext': 'mp4', 'description': 'md5:c9f80fa4410bc588d7faa40003fc7d0e', 'timestamp': int, 'upload_date': compat_str, 'title': compat_str, 'uploader': 'Dimak', }, 'params': { # live 'skip_download': True, }, }, { 'url': 'https://www.smashcast.tv/dimak', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if HitboxIE.suitable(url) else super(HitboxLiveIE, cls).suitable(url) def _real_extract(self, url): video_id = self._match_id(url) player_config = self._download_json( 'https://www.smashcast.tv/api/player/config/live/%s' % video_id, video_id) formats = [] cdns = player_config.get('cdns') servers = [] for cdn in cdns: # Subscribe URLs are not playable if cdn.get('rtmpSubscribe') is True: continue base_url = cdn.get('netConnectionUrl') host = re.search(r'.+\.([^\.]+\.[^\./]+)/.+', base_url).group(1) if base_url not in servers: servers.append(base_url) for stream in cdn.get('bitrates'): label = stream.get('label') if label == 'Auto': continue stream_url = stream.get('url') if not stream_url: continue bitrate = int_or_none(stream.get('bitrate')) if stream.get('provider') == 'hls' or determine_ext(stream_url) == 'm3u8': if not stream_url.startswith('http'): continue formats.append({ 'url': stream_url, 'ext': 'mp4', 'tbr': bitrate, 'format_note': label, 'rtmp_live': True, }) else: formats.append({ 'url': '%s/%s' % (base_url, stream_url), 'ext': 'mp4', 'tbr': bitrate, 'rtmp_live': True, 'format_note': host, 'page_url': url, 'player_url': 'http://www.hitbox.tv/static/player/flowplayer/flowplayer.commercial-3.2.16.swf', }) self._sort_formats(formats) metadata = self._extract_metadata( 'https://www.smashcast.tv/api/media/live', video_id) metadata['formats'] = formats metadata['is_live'] = True metadata['title'] = self._live_title(metadata.get('title')) return metadata
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/eagleplatform.py
youtube_dl/extractor/eagleplatform.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( ExtractorError, int_or_none, unsmuggle_url, url_or_none, ) class EaglePlatformIE(InfoExtractor): _VALID_URL = r'''(?x) (?: eagleplatform:(?P<custom_host>[^/]+):| https?://(?P<host>.+?\.media\.eagleplatform\.com)/index/player\?.*\brecord_id= ) (?P<id>\d+) ''' _TESTS = [{ # http://lenta.ru/news/2015/03/06/navalny/ 'url': 'http://lentaru.media.eagleplatform.com/index/player?player=new&record_id=227304&player_template_id=5201', # Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used 'info_dict': { 'id': '227304', 'ext': 'mp4', 'title': 'Навальный вышел на свободу', 'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 87, 'view_count': int, 'age_limit': 0, }, }, { # http://muz-tv.ru/play/7129/ # http://media.clipyou.ru/index/player?record_id=12820&width=730&height=415&autoplay=true 'url': 'eagleplatform:media.clipyou.ru:12820', 'md5': '358597369cf8ba56675c1df15e7af624', 'info_dict': { 'id': '12820', 'ext': 'mp4', 'title': "'O Sole Mio", 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 216, 'view_count': int, }, 'skip': 'Georestricted', }, { # referrer protected video (https://tvrain.ru/lite/teleshow/kak_vse_nachinalos/namin-418921/) 'url': 'eagleplatform:tvrainru.media.eagleplatform.com:582306', 'only_matching': True, }] @staticmethod def _extract_url(webpage): # Regular iframe embedding mobj = re.search( r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//.+?\.media\.eagleplatform\.com/index/player\?.+?)\1', webpage) if mobj is not None: return mobj.group('url') PLAYER_JS_RE = r''' <script[^>]+ src=(?P<qjs>["\'])(?:https?:)?//(?P<host>(?:(?!(?P=qjs)).)+\.media\.eagleplatform\.com)/player/player\.js(?P=qjs) .+? ''' # "Basic usage" embedding (see http://dultonmedia.github.io/eplayer/) mobj = re.search( r'''(?xs) %s <div[^>]+ class=(?P<qclass>["\'])eagleplayer(?P=qclass)[^>]+ data-id=["\'](?P<id>\d+) ''' % PLAYER_JS_RE, webpage) if mobj is not None: return 'eagleplatform:%(host)s:%(id)s' % mobj.groupdict() # Generalization of "Javascript code usage", "Combined usage" and # "Usage without attaching to DOM" embeddings (see # http://dultonmedia.github.io/eplayer/) mobj = re.search( r'''(?xs) %s <script> .+? new\s+EaglePlayer\( (?:[^,]+\s*,\s*)? { .+? \bid\s*:\s*["\']?(?P<id>\d+) .+? } \s*\) .+? </script> ''' % PLAYER_JS_RE, webpage) if mobj is not None: return 'eagleplatform:%(host)s:%(id)s' % mobj.groupdict() @staticmethod def _handle_error(response): status = int_or_none(response.get('status', 200)) if status != 200: raise ExtractorError(' '.join(response['errors']), expected=True) def _download_json(self, url_or_request, video_id, *args, **kwargs): try: response = super(EaglePlatformIE, self)._download_json( url_or_request, video_id, *args, **kwargs) except ExtractorError as ee: if isinstance(ee.cause, compat_HTTPError): response = self._parse_json(ee.cause.read().decode('utf-8'), video_id) self._handle_error(response) raise return response def _get_video_url(self, url_or_request, video_id, note='Downloading JSON metadata'): return self._download_json(url_or_request, video_id, note)['data'][0] def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) mobj = re.match(self._VALID_URL, url) host, video_id = mobj.group('custom_host') or mobj.group('host'), mobj.group('id') headers = {} query = { 'id': video_id, } referrer = smuggled_data.get('referrer') if referrer: headers['Referer'] = referrer query['referrer'] = referrer player_data = self._download_json( 'http://%s/api/player_data' % host, video_id, headers=headers, query=query) media = player_data['data']['playlist']['viewports'][0]['medialist'][0] title = media['title'] description = media.get('description') thumbnail = self._proto_relative_url(media.get('snapshot'), 'http:') duration = int_or_none(media.get('duration')) view_count = int_or_none(media.get('views')) age_restriction = media.get('age_restriction') age_limit = None if age_restriction: age_limit = 0 if age_restriction == 'allow_all' else 18 secure_m3u8 = self._proto_relative_url(media['sources']['secure_m3u8']['auto'], 'http:') formats = [] m3u8_url = self._get_video_url(secure_m3u8, video_id, 'Downloading m3u8 JSON') m3u8_formats = self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) formats.extend(m3u8_formats) m3u8_formats_dict = {} for f in m3u8_formats: if f.get('height') is not None: m3u8_formats_dict[f['height']] = f mp4_data = self._download_json( # Secure mp4 URL is constructed according to Player.prototype.mp4 from # http://lentaru.media.eagleplatform.com/player/player.js re.sub(r'm3u8|hlsvod|hls|f4m', 'mp4s', secure_m3u8), video_id, 'Downloading mp4 JSON', fatal=False) if mp4_data: for format_id, format_url in mp4_data.get('data', {}).items(): if not url_or_none(format_url): continue height = int_or_none(format_id) if height is not None and m3u8_formats_dict.get(height): f = m3u8_formats_dict[height].copy() f.update({ 'format_id': f['format_id'].replace('hls', 'http'), 'protocol': 'http', }) else: f = { 'format_id': 'http-%s' % format_id, 'height': int_or_none(format_id), } f['url'] = format_url formats.append(f) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'view_count': view_count, 'age_limit': age_limit, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tele13.py
youtube_dl/extractor/tele13.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import ( js_to_json, qualities, determine_ext, ) class Tele13IE(InfoExtractor): _VALID_URL = r'^https?://(?:www\.)?t13\.cl/videos(?:/[^/]+)+/(?P<id>[\w-]+)' _TESTS = [ { 'url': 'http://www.t13.cl/videos/actualidad/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', 'md5': '4cb1fa38adcad8fea88487a078831755', 'info_dict': { 'id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', 'ext': 'mp4', 'title': 'El círculo de hierro de Michelle Bachelet en su regreso a La Moneda', }, 'params': { # HTTP Error 404: Not Found 'skip_download': True, }, }, { 'url': 'http://www.t13.cl/videos/mundo/tendencias/video-captan-misteriosa-bola-fuego-cielos-bangkok', 'md5': '867adf6a3b3fef932c68a71d70b70946', 'info_dict': { 'id': 'rOoKv2OMpOw', 'ext': 'mp4', 'title': 'Shooting star seen on 7-Sep-2015', 'description': 'md5:7292ff2a34b2f673da77da222ae77e1e', 'uploader': 'Porjai Jaturongkhakun', 'upload_date': '20150906', 'uploader_id': 'UCnLY_3ezwNcDSC_Wc6suZxw', }, 'add_ie': ['Youtube'], } ] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) setup_js = self._search_regex( r"(?s)jwplayer\('player-vivo'\).setup\((\{.*?\})\)", webpage, 'setup code') sources = self._parse_json(self._search_regex( r'sources\s*:\s*(\[[^\]]+\])', setup_js, 'sources'), display_id, js_to_json) preference = qualities(['Móvil', 'SD', 'HD']) formats = [] urls = [] for f in sources: format_url = f['file'] if format_url and format_url not in urls: ext = determine_ext(format_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif YoutubeIE.suitable(format_url): return self.url_result(format_url, 'Youtube') else: formats.append({ 'url': format_url, 'format_id': f.get('label'), 'preference': preference(f.get('label')), 'ext': ext, }) urls.append(format_url) self._sort_formats(formats) return { 'id': display_id, 'title': self._search_regex( r'title\s*:\s*"([^"]+)"', setup_js, 'title'), 'description': self._html_search_meta( 'description', webpage, 'description'), 'thumbnail': self._search_regex( r'image\s*:\s*"([^"]+)"', setup_js, 'thumbnail', default=None), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/adn.py
youtube_dl/extractor/adn.py
# coding: utf-8 from __future__ import unicode_literals import base64 import binascii import json import os import random from .common import InfoExtractor from ..aes import aes_cbc_decrypt from ..compat import ( compat_HTTPError, compat_b64decode, compat_ord, ) from ..utils import ( bytes_to_intlist, bytes_to_long, ExtractorError, float_or_none, int_or_none, intlist_to_bytes, long_to_bytes, pkcs1pad, strip_or_none, try_get, unified_strdate, urlencode_postdata, ) class ADNIE(InfoExtractor): IE_DESC = 'Animation Digital Network' _VALID_URL = r'https?://(?:www\.)?(?:animation|anime)digitalnetwork\.fr/video/[^/]+/(?P<id>\d+)' _TESTS = [{ 'url': 'https://animationdigitalnetwork.fr/video/fruits-basket/9841-episode-1-a-ce-soir', 'md5': '1c9ef066ceb302c86f80c2b371615261', 'info_dict': { 'id': '9841', 'ext': 'mp4', 'title': 'Fruits Basket - Episode 1', 'description': 'md5:14be2f72c3c96809b0ca424b0097d336', 'series': 'Fruits Basket', 'duration': 1437, 'release_date': '20190405', 'comment_count': int, 'average_rating': float, 'season_number': 1, 'episode': 'À ce soir !', 'episode_number': 1, }, 'skip': 'Only available in region (FR, ...)', }, { 'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites', 'only_matching': True, }] _NETRC_MACHINE = 'animationdigitalnetwork' _BASE = 'animationdigitalnetwork.fr' _API_BASE_URL = 'https://gw.api.' + _BASE + '/' _PLAYER_BASE_URL = _API_BASE_URL + 'player/' _HEADERS = {} _LOGIN_ERR_MESSAGE = 'Unable to log in' _RSA_KEY = (0x9B42B08905199A5CCE2026274399CA560ECB209EE9878A708B1C0812E1BB8CB5D1FB7441861147C1A1F2F3A0476DD63A9CAC20D3E983613346850AA6CB38F16DC7D720FD7D86FC6E5B3D5BBC72E14CD0BF9E869F2CEA2CCAD648F1DCE38F1FF916CEFB2D339B64AA0264372344BC775E265E8A852F88144AB0BD9AA06C1A4ABB, 65537) _POS_ALIGN_MAP = { 'start': 1, 'end': 3, } _LINE_ALIGN_MAP = { 'middle': 8, 'end': 4, } @staticmethod def _ass_subtitles_timecode(seconds): return '%01d:%02d:%02d.%02d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 100) def _get_subtitles(self, sub_url, video_id): if not sub_url: return None enc_subtitles = self._download_webpage( sub_url, video_id, 'Downloading subtitles location', fatal=False) or '{}' subtitle_location = (self._parse_json(enc_subtitles, video_id, fatal=False) or {}).get('location') if subtitle_location: enc_subtitles = self._download_webpage( subtitle_location, video_id, 'Downloading subtitles data', fatal=False, headers={'Origin': 'https://' + self._BASE}) if not enc_subtitles: return None # http://animationdigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js dec_subtitles = intlist_to_bytes(aes_cbc_decrypt( bytes_to_intlist(compat_b64decode(enc_subtitles[24:])), bytes_to_intlist(binascii.unhexlify(self._K + '7fac1178830cfe0c')), bytes_to_intlist(compat_b64decode(enc_subtitles[:24])) )) subtitles_json = self._parse_json( dec_subtitles[:-compat_ord(dec_subtitles[-1])].decode(), None, fatal=False) if not subtitles_json: return None subtitles = {} for sub_lang, sub in subtitles_json.items(): ssa = '''[Script Info] ScriptType:V4.00 [V4 Styles] Format: Name,Fontname,Fontsize,PrimaryColour,SecondaryColour,TertiaryColour,BackColour,Bold,Italic,BorderStyle,Outline,Shadow,Alignment,MarginL,MarginR,MarginV,AlphaLevel,Encoding Style: Default,Arial,18,16777215,16777215,16777215,0,-1,0,1,1,0,2,20,20,20,0,0 [Events] Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text''' for current in sub: start, end, text, line_align, position_align = ( float_or_none(current.get('startTime')), float_or_none(current.get('endTime')), current.get('text'), current.get('lineAlign'), current.get('positionAlign')) if start is None or end is None or text is None: continue alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0) ssa += os.linesep + 'Dialogue: Marked=0,%s,%s,Default,,0,0,0,,%s%s' % ( self._ass_subtitles_timecode(start), self._ass_subtitles_timecode(end), '{\\a%d}' % alignment if alignment != 2 else '', text.replace('\n', '\\N').replace('<i>', '{\\i1}').replace('</i>', '{\\i0}')) if sub_lang == 'vostf': sub_lang = 'fr' subtitles.setdefault(sub_lang, []).extend([{ 'ext': 'json', 'data': json.dumps(sub), }, { 'ext': 'ssa', 'data': ssa, }]) return subtitles def _real_initialize(self): username, password = self._get_login_info() if not username: return try: url = self._API_BASE_URL + 'authentication/login' access_token = (self._download_json( url, None, 'Logging in', self._LOGIN_ERR_MESSAGE, fatal=False, data=urlencode_postdata({ 'password': password, 'rememberMe': False, 'source': 'Web', 'username': username, })) or {}).get('accessToken') if access_token: self._HEADERS = {'authorization': 'Bearer ' + access_token} except ExtractorError as e: message = None if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: resp = self._parse_json( self._webpage_read_content(e.cause, url, username), username, fatal=False) or {} message = resp.get('message') or resp.get('code') self.report_warning(message or self._LOGIN_ERR_MESSAGE) def _real_extract(self, url): video_id = self._match_id(url) video_base_url = self._PLAYER_BASE_URL + 'video/%s/' % video_id player = self._download_json( video_base_url + 'configuration', video_id, 'Downloading player config JSON metadata', headers=self._HEADERS)['player'] options = player['options'] user = options['user'] if not user.get('hasAccess'): self.raise_login_required() token = self._download_json( user.get('refreshTokenUrl') or (self._PLAYER_BASE_URL + 'refresh/token'), video_id, 'Downloading access token', headers={ 'x-player-refresh-token': user['refreshToken'] }, data=b'')['token'] links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link') self._K = ''.join([random.choice('0123456789abcdef') for _ in range(16)]) message = bytes_to_intlist(json.dumps({ 'k': self._K, 't': token, })) # Sometimes authentication fails for no good reason, retry with # a different random padding links_data = None for _ in range(3): padded_message = intlist_to_bytes(pkcs1pad(message, 128)) n, e = self._RSA_KEY encrypted_message = long_to_bytes(pow(bytes_to_long(padded_message), e, n)) authorization = base64.b64encode(encrypted_message).decode() try: links_data = self._download_json( links_url, video_id, 'Downloading links JSON metadata', headers={ 'X-Player-Token': authorization }, query={ 'freeWithAds': 'true', 'adaptive': 'false', 'withMetadata': 'true', 'source': 'Web' }) break except ExtractorError as e: if not isinstance(e.cause, compat_HTTPError): raise e if e.cause.code == 401: # This usually goes away with a different random pkcs1pad, so retry continue error = self._parse_json( self._webpage_read_content(e.cause, links_url, video_id), video_id, fatal=False) or {} message = error.get('message') if e.cause.code == 403 and error.get('code') == 'player-bad-geolocation-country': self.raise_geo_restricted(msg=message) raise ExtractorError(message) else: raise ExtractorError('Giving up retrying') links = links_data.get('links') or {} metas = links_data.get('metadata') or {} sub_url = (links.get('subtitles') or {}).get('all') video_info = links_data.get('video') or {} title = metas['title'] formats = [] for format_id, qualities in (links.get('streaming') or {}).items(): if not isinstance(qualities, dict): continue for quality, load_balancer_url in qualities.items(): load_balancer_data = self._download_json( load_balancer_url, video_id, 'Downloading %s %s JSON metadata' % (format_id, quality), fatal=False) or {} m3u8_url = load_balancer_data.get('location') if not m3u8_url: continue m3u8_formats = self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False) if format_id == 'vf': for f in m3u8_formats: f['language'] = 'fr' formats.extend(m3u8_formats) self._sort_formats(formats) video = (self._download_json( self._API_BASE_URL + 'video/%s' % video_id, video_id, 'Downloading additional video metadata', fatal=False) or {}).get('video') or {} show = video.get('show') or {} return { 'id': video_id, 'title': title, 'description': strip_or_none(metas.get('summary') or video.get('summary')), 'thumbnail': video_info.get('image') or player.get('image'), 'formats': formats, 'subtitles': self.extract_subtitles(sub_url, video_id), 'episode': metas.get('subtitle') or video.get('name'), 'episode_number': int_or_none(video.get('shortNumber')), 'series': show.get('title'), 'season_number': int_or_none(video.get('season')), 'duration': int_or_none(video_info.get('duration') or video.get('duration')), 'release_date': unified_strdate(video.get('releaseDate')), 'average_rating': float_or_none(video.get('rating') or metas.get('rating')), 'comment_count': int_or_none(video.get('commentsCount')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rtlnl.py
youtube_dl/extractor/rtlnl.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, ) class RtlNlIE(InfoExtractor): IE_NAME = 'rtl.nl' IE_DESC = 'rtl.nl and rtlxl.nl' _VALID_URL = r'''(?x) https?://(?:(?:www|static)\.)? (?: rtlxl\.nl/(?:[^\#]*\#!|programma)/[^/]+/| rtl\.nl/(?:(?:system/videoplayer/(?:[^/]+/)+(?:video_)?embed\.html|embed)\b.+?\buuid=|video/)| embed\.rtl\.nl/\#uuid= ) (?P<id>[0-9a-f-]+)''' _TESTS = [{ # new URL schema 'url': 'https://www.rtlxl.nl/programma/rtl-nieuws/0bd1384d-d970-3086-98bb-5c104e10c26f', 'md5': '490428f1187b60d714f34e1f2e3af0b6', 'info_dict': { 'id': '0bd1384d-d970-3086-98bb-5c104e10c26f', 'ext': 'mp4', 'title': 'RTL Nieuws', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'timestamp': 1593293400, 'upload_date': '20200627', 'duration': 661.08, }, }, { # old URL schema 'url': 'http://www.rtlxl.nl/#!/rtl-nieuws-132237/82b1aad1-4a14-3d7b-b554-b0aed1b2c416', 'md5': '473d1946c1fdd050b2c0161a4b13c373', 'info_dict': { 'id': '82b1aad1-4a14-3d7b-b554-b0aed1b2c416', 'ext': 'mp4', 'title': 'RTL Nieuws', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'timestamp': 1461951000, 'upload_date': '20160429', 'duration': 1167.96, }, 'skip': '404', }, { # best format available a3t 'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed/autoplay=false', 'md5': 'dea7474214af1271d91ef332fb8be7ea', 'info_dict': { 'id': '84ae5571-ac25-4225-ae0c-ef8d9efb2aed', 'ext': 'mp4', 'timestamp': 1424039400, 'title': 'RTL Nieuws - Nieuwe beelden Kopenhagen: chaos direct na aanslag', 'thumbnail': r're:^https?://screenshots\.rtl\.nl/(?:[^/]+/)*sz=[0-9]+x[0-9]+/uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed$', 'upload_date': '20150215', 'description': 'Er zijn nieuwe beelden vrijgegeven die vlak na de aanslag in Kopenhagen zijn gemaakt. Op de video is goed te zien hoe omstanders zich bekommeren om één van de slachtoffers, terwijl de eerste agenten ter plaatse komen.', } }, { # empty synopsis and missing episodes (see https://github.com/ytdl-org/youtube-dl/issues/6275) # best format available nettv 'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a/autoplay=false', 'info_dict': { 'id': 'f536aac0-1dc3-4314-920e-3bd1c5b3811a', 'ext': 'mp4', 'title': 'RTL Nieuws - Meer beelden van overval juwelier', 'thumbnail': r're:^https?://screenshots\.rtl\.nl/(?:[^/]+/)*sz=[0-9]+x[0-9]+/uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a$', 'timestamp': 1437233400, 'upload_date': '20150718', 'duration': 30.474, }, 'params': { 'skip_download': True, }, }, { # encrypted m3u8 streams, georestricted 'url': 'http://www.rtlxl.nl/#!/afl-2-257632/52a74543-c504-4cde-8aa8-ec66fe8d68a7', 'only_matching': True, }, { 'url': 'http://www.rtl.nl/system/videoplayer/derden/embed.html#!/uuid=bb0353b0-d6a4-1dad-90e9-18fe75b8d1f0', 'only_matching': True, }, { 'url': 'http://rtlxl.nl/?_ga=1.204735956.572365465.1466978370#!/rtl-nieuws-132237/3c487912-023b-49ac-903e-2c5d79f8410f', 'only_matching': True, }, { 'url': 'https://www.rtl.nl/video/c603c9c2-601d-4b5e-8175-64f1e942dc7d/', 'only_matching': True, }, { 'url': 'https://static.rtl.nl/embed/?uuid=1a2970fc-5c0b-43ff-9fdc-927e39e6d1bc&autoplay=false&publicatiepunt=rtlnieuwsnl', 'only_matching': True, }, { # new embed URL schema 'url': 'https://embed.rtl.nl/#uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed/autoplay=false', 'only_matching': True, }] def _real_extract(self, url): uuid = self._match_id(url) info = self._download_json( 'http://www.rtl.nl/system/s4m/vfd/version=2/uuid=%s/fmt=adaptive/' % uuid, uuid) material = info['material'][0] title = info['abstracts'][0]['name'] subtitle = material.get('title') if subtitle: title += ' - %s' % subtitle description = material.get('synopsis') meta = info.get('meta', {}) videopath = material['videopath'] m3u8_url = meta.get('videohost', 'http://manifest.us.rtl.nl') + videopath formats = self._extract_m3u8_formats( m3u8_url, uuid, 'mp4', m3u8_id='hls', fatal=False) self._sort_formats(formats) thumbnails = [] for p in ('poster_base_url', '"thumb_base_url"'): if not meta.get(p): continue thumbnails.append({ 'url': self._proto_relative_url(meta[p] + uuid), 'width': int_or_none(self._search_regex( r'/sz=([0-9]+)', meta[p], 'thumbnail width', fatal=False)), 'height': int_or_none(self._search_regex( r'/sz=[0-9]+x([0-9]+)', meta[p], 'thumbnail height', fatal=False)) }) return { 'id': uuid, 'title': title, 'formats': formats, 'timestamp': material['original_date'], 'description': description, 'duration': parse_duration(material.get('duration')), 'thumbnails': thumbnails, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/bilibili.py
youtube_dl/extractor/bilibili.py
# coding: utf-8 from __future__ import unicode_literals import hashlib import re from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urlparse, ) from ..utils import ( ExtractorError, int_or_none, float_or_none, parse_iso8601, smuggle_url, str_or_none, strip_jsonp, unified_timestamp, unsmuggle_url, urlencode_postdata, ) class BiliBiliIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:(?:www|bangumi)\.)? bilibili\.(?:tv|com)/ (?: (?: video/[aA][vV]| anime/(?P<anime_id>\d+)/play\# )(?P<id_bv>\d+)| video/[bB][vV](?P<id>[^/?#&]+) ) ''' _TESTS = [{ 'url': 'http://www.bilibili.tv/video/av1074402/', 'md5': '5f7d29e1a2872f3df0cf76b1f87d3788', 'info_dict': { 'id': '1074402', 'ext': 'flv', 'title': '【金坷垃】金泡沫', 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923', 'duration': 308.067, 'timestamp': 1398012678, 'upload_date': '20140420', 'thumbnail': r're:^https?://.+\.jpg', 'uploader': '菊子桑', 'uploader_id': '156160', }, }, { # Tested in BiliBiliBangumiIE 'url': 'http://bangumi.bilibili.com/anime/1869/play#40062', 'only_matching': True, }, { 'url': 'http://bangumi.bilibili.com/anime/5802/play#100643', 'md5': '3f721ad1e75030cc06faf73587cfec57', 'info_dict': { 'id': '100643', 'ext': 'mp4', 'title': 'CHAOS;CHILD', 'description': '如果你是神明,并且能够让妄想成为现实。那你会进行怎么样的妄想?是淫靡的世界?独裁社会?毁灭性的制裁?还是……2015年,涩谷。从6年前发生的大灾害“涩谷地震”之后复兴了的这个街区里新设立的私立高中...', }, 'skip': 'Geo-restricted to China', }, { # Title with double quotes 'url': 'http://www.bilibili.com/video/av8903802/', 'info_dict': { 'id': '8903802', 'title': '阿滴英文|英文歌分享#6 "Closer', 'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文', }, 'playlist': [{ 'info_dict': { 'id': '8903802_part1', 'ext': 'flv', 'title': '阿滴英文|英文歌分享#6 "Closer', 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a', 'uploader': '阿滴英文', 'uploader_id': '65880958', 'timestamp': 1488382634, 'upload_date': '20170301', }, 'params': { 'skip_download': True, # Test metadata only }, }, { 'info_dict': { 'id': '8903802_part2', 'ext': 'flv', 'title': '阿滴英文|英文歌分享#6 "Closer', 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a', 'uploader': '阿滴英文', 'uploader_id': '65880958', 'timestamp': 1488382634, 'upload_date': '20170301', }, 'params': { 'skip_download': True, # Test metadata only }, }] }, { # new BV video id format 'url': 'https://www.bilibili.com/video/BV1JE411F741', 'only_matching': True, }] _APP_KEY = 'iVGUTjsxvpLeuDCf' _BILIBILI_KEY = 'aHRmhWMLkdeMuILqORnYZocwMBpMEOdt' def _report_error(self, result): if 'message' in result: raise ExtractorError('%s said: %s' % (self.IE_NAME, result['message']), expected=True) elif 'code' in result: raise ExtractorError('%s returns error %d' % (self.IE_NAME, result['code']), expected=True) else: raise ExtractorError('Can\'t extract Bangumi episode ID') def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') or mobj.group('id_bv') anime_id = mobj.group('anime_id') webpage = self._download_webpage(url, video_id) if 'anime/' not in url: cid = self._search_regex( r'\bcid(?:["\']:|=)(\d+)', webpage, 'cid', default=None ) or compat_parse_qs(self._search_regex( [r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)', r'EmbedPlayer\([^)]+,\s*\\"([^"]+)\\"\)', r'<iframe[^>]+src="https://secure\.bilibili\.com/secure,([^"]+)"'], webpage, 'player parameters'))['cid'][0] else: if 'no_bangumi_tip' not in smuggled_data: self.to_screen('Downloading episode %s. To download all videos in anime %s, re-run youtube-dl with %s' % ( video_id, anime_id, compat_urlparse.urljoin(url, '//bangumi.bilibili.com/anime/%s' % anime_id))) headers = { 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Referer': url } headers.update(self.geo_verification_headers()) js = self._download_json( 'http://bangumi.bilibili.com/web_api/get_source', video_id, data=urlencode_postdata({'episode_id': video_id}), headers=headers) if 'result' not in js: self._report_error(js) cid = js['result']['cid'] headers = { 'Accept': 'application/json', 'Referer': url } headers.update(self.geo_verification_headers()) entries = [] RENDITIONS = ('qn=80&quality=80&type=', 'quality=2&type=mp4') for num, rendition in enumerate(RENDITIONS, start=1): payload = 'appkey=%s&cid=%s&otype=json&%s' % (self._APP_KEY, cid, rendition) sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest() video_info = self._download_json( 'http://interface.bilibili.com/v2/playurl?%s&sign=%s' % (payload, sign), video_id, note='Downloading video info page', headers=headers, fatal=num == len(RENDITIONS)) if not video_info: continue if 'durl' not in video_info: if num < len(RENDITIONS): continue self._report_error(video_info) for idx, durl in enumerate(video_info['durl']): formats = [{ 'url': durl['url'], 'filesize': int_or_none(durl['size']), }] for backup_url in durl.get('backup_url', []): formats.append({ 'url': backup_url, # backup URLs have lower priorities 'preference': -2 if 'hd.mp4' in backup_url else -3, }) for a_format in formats: a_format.setdefault('http_headers', {}).update({ 'Referer': url, }) self._sort_formats(formats) entries.append({ 'id': '%s_part%s' % (video_id, idx), 'duration': float_or_none(durl.get('length'), 1000), 'formats': formats, }) break title = self._html_search_regex( ('<h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1', '(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title', group='title') description = self._html_search_meta('description', webpage) timestamp = unified_timestamp(self._html_search_regex( r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time', default=None) or self._html_search_meta( 'uploadDate', webpage, 'timestamp', default=None)) thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage) # TODO 'view_count' requires deobfuscating Javascript info = { 'id': video_id, 'title': title, 'description': description, 'timestamp': timestamp, 'thumbnail': thumbnail, 'duration': float_or_none(video_info.get('timelength'), scale=1000), } uploader_mobj = re.search( r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]*>(?P<name>[^<]+)', webpage) if uploader_mobj: info.update({ 'uploader': uploader_mobj.group('name').strip(), 'uploader_id': uploader_mobj.group('id'), }) if not info.get('uploader'): info['uploader'] = self._html_search_meta( 'author', webpage, 'uploader', default=None) for entry in entries: entry.update(info) if len(entries) == 1: return entries[0] else: for idx, entry in enumerate(entries): entry['id'] = '%s_part%d' % (video_id, (idx + 1)) return { '_type': 'multi_video', 'id': video_id, 'title': title, 'description': description, 'entries': entries, } class BiliBiliBangumiIE(InfoExtractor): _VALID_URL = r'https?://bangumi\.bilibili\.com/anime/(?P<id>\d+)' IE_NAME = 'bangumi.bilibili.com' IE_DESC = 'BiliBili番剧' _TESTS = [{ 'url': 'http://bangumi.bilibili.com/anime/1869', 'info_dict': { 'id': '1869', 'title': '混沌武士', 'description': 'md5:6a9622b911565794c11f25f81d6a97d2', }, 'playlist_count': 26, }, { 'url': 'http://bangumi.bilibili.com/anime/1869', 'info_dict': { 'id': '1869', 'title': '混沌武士', 'description': 'md5:6a9622b911565794c11f25f81d6a97d2', }, 'playlist': [{ 'md5': '91da8621454dd58316851c27c68b0c13', 'info_dict': { 'id': '40062', 'ext': 'mp4', 'title': '混沌武士', 'description': '故事发生在日本的江户时代。风是一个小酒馆的打工女。一日,酒馆里来了一群恶霸,虽然他们的举动令风十分不满,但是毕竟风只是一届女流,无法对他们采取什么行动,只能在心里嘟哝。这时,酒家里又进来了个“不良份子...', 'timestamp': 1414538739, 'upload_date': '20141028', 'episode': '疾风怒涛 Tempestuous Temperaments', 'episode_number': 1, }, }], 'params': { 'playlist_items': '1', }, }] @classmethod def suitable(cls, url): return False if BiliBiliIE.suitable(url) else super(BiliBiliBangumiIE, cls).suitable(url) def _real_extract(self, url): bangumi_id = self._match_id(url) # Sometimes this API returns a JSONP response season_info = self._download_json( 'http://bangumi.bilibili.com/jsonp/seasoninfo/%s.ver' % bangumi_id, bangumi_id, transform_source=strip_jsonp)['result'] entries = [{ '_type': 'url_transparent', 'url': smuggle_url(episode['webplay_url'], {'no_bangumi_tip': 1}), 'ie_key': BiliBiliIE.ie_key(), 'timestamp': parse_iso8601(episode.get('update_time'), delimiter=' '), 'episode': episode.get('index_title'), 'episode_number': int_or_none(episode.get('index')), } for episode in season_info['episodes']] entries = sorted(entries, key=lambda entry: entry.get('episode_number')) return self.playlist_result( entries, bangumi_id, season_info.get('bangumi_title'), season_info.get('evaluate')) class BilibiliAudioBaseIE(InfoExtractor): def _call_api(self, path, sid, query=None): if not query: query = {'sid': sid} return self._download_json( 'https://www.bilibili.com/audio/music-service-c/web/' + path, sid, query=query)['data'] class BilibiliAudioIE(BilibiliAudioBaseIE): _VALID_URL = r'https?://(?:www\.)?bilibili\.com/audio/au(?P<id>\d+)' _TEST = { 'url': 'https://www.bilibili.com/audio/au1003142', 'md5': 'fec4987014ec94ef9e666d4d158ad03b', 'info_dict': { 'id': '1003142', 'ext': 'm4a', 'title': '【tsukimi】YELLOW / 神山羊', 'artist': 'tsukimi', 'comment_count': int, 'description': 'YELLOW的mp3版!', 'duration': 183, 'subtitles': { 'origin': [{ 'ext': 'lrc', }], }, 'thumbnail': r're:^https?://.+\.jpg', 'timestamp': 1564836614, 'upload_date': '20190803', 'uploader': 'tsukimi-つきみぐー', 'view_count': int, }, } def _real_extract(self, url): au_id = self._match_id(url) play_data = self._call_api('url', au_id) formats = [{ 'url': play_data['cdns'][0], 'filesize': int_or_none(play_data.get('size')), }] for a_format in formats: a_format.setdefault('http_headers', {}).update({ 'Referer': url, }) song = self._call_api('song/info', au_id) title = song['title'] statistic = song.get('statistic') or {} subtitles = None lyric = song.get('lyric') if lyric: subtitles = { 'origin': [{ 'url': lyric, }] } return { 'id': au_id, 'title': title, 'formats': formats, 'artist': song.get('author'), 'comment_count': int_or_none(statistic.get('comment')), 'description': song.get('intro'), 'duration': int_or_none(song.get('duration')), 'subtitles': subtitles, 'thumbnail': song.get('cover'), 'timestamp': int_or_none(song.get('passtime')), 'uploader': song.get('uname'), 'view_count': int_or_none(statistic.get('play')), } class BilibiliAudioAlbumIE(BilibiliAudioBaseIE): _VALID_URL = r'https?://(?:www\.)?bilibili\.com/audio/am(?P<id>\d+)' _TEST = { 'url': 'https://www.bilibili.com/audio/am10624', 'info_dict': { 'id': '10624', 'title': '每日新曲推荐(每日11:00更新)', 'description': '每天11:00更新,为你推送最新音乐', }, 'playlist_count': 19, } def _real_extract(self, url): am_id = self._match_id(url) songs = self._call_api( 'song/of-menu', am_id, {'sid': am_id, 'pn': 1, 'ps': 100})['data'] entries = [] for song in songs: sid = str_or_none(song.get('id')) if not sid: continue entries.append(self.url_result( 'https://www.bilibili.com/audio/au' + sid, BilibiliAudioIE.ie_key(), sid)) if entries: album_data = self._call_api('menu/info', am_id) or {} album_title = album_data.get('title') if album_title: for entry in entries: entry['album'] = album_title return self.playlist_result( entries, am_id, album_title, album_data.get('intro')) return self.playlist_result(entries, am_id) class BiliBiliPlayerIE(InfoExtractor): _VALID_URL = r'https?://player\.bilibili\.com/player\.html\?.*?\baid=(?P<id>\d+)' _TEST = { 'url': 'http://player.bilibili.com/player.html?aid=92494333&cid=157926707&page=1', 'only_matching': True, } def _real_extract(self, url): video_id = self._match_id(url) return self.url_result( 'http://www.bilibili.tv/video/av%s/' % video_id, ie=BiliBiliIE.ie_key(), video_id=video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/box.py
youtube_dl/extractor/box.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import ( determine_ext, parse_iso8601, # try_get, update_url_query, ) class BoxIE(InfoExtractor): _VALID_URL = r'https?://(?:[^.]+\.)?app\.box\.com/s/(?P<shared_name>[^/]+)/file/(?P<id>\d+)' _TEST = { 'url': 'https://mlssoccer.app.box.com/s/0evd2o3e08l60lr4ygukepvnkord1o1x/file/510727257538', 'md5': '1f81b2fd3960f38a40a3b8823e5fcd43', 'info_dict': { 'id': '510727257538', 'ext': 'mp4', 'title': 'Garber St. Louis will be 28th MLS team +scarving.mp4', 'uploader': 'MLS Video', 'timestamp': 1566320259, 'upload_date': '20190820', 'uploader_id': '235196876', } } def _real_extract(self, url): shared_name, file_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, file_id) request_token = self._parse_json(self._search_regex( r'Box\.config\s*=\s*({.+?});', webpage, 'Box config'), file_id)['requestToken'] access_token = self._download_json( 'https://app.box.com/app-api/enduserapp/elements/tokens', file_id, 'Downloading token JSON metadata', data=json.dumps({'fileIDs': [file_id]}).encode(), headers={ 'Content-Type': 'application/json', 'X-Request-Token': request_token, 'X-Box-EndUser-API': 'sharedName=' + shared_name, })[file_id]['read'] shared_link = 'https://app.box.com/s/' + shared_name f = self._download_json( 'https://api.box.com/2.0/files/' + file_id, file_id, 'Downloading file JSON metadata', headers={ 'Authorization': 'Bearer ' + access_token, 'BoxApi': 'shared_link=' + shared_link, 'X-Rep-Hints': '[dash]', # TODO: extract `hls` formats }, query={ 'fields': 'authenticated_download_url,created_at,created_by,description,extension,is_download_available,name,representations,size' }) title = f['name'] query = { 'access_token': access_token, 'shared_link': shared_link } formats = [] # for entry in (try_get(f, lambda x: x['representations']['entries'], list) or []): # entry_url_template = try_get( # entry, lambda x: x['content']['url_template']) # if not entry_url_template: # continue # representation = entry.get('representation') # if representation == 'dash': # TODO: append query to every fragment URL # formats.extend(self._extract_mpd_formats( # entry_url_template.replace('{+asset_path}', 'manifest.mpd'), # file_id, query=query)) authenticated_download_url = f.get('authenticated_download_url') if authenticated_download_url and f.get('is_download_available'): formats.append({ 'ext': f.get('extension') or determine_ext(title), 'filesize': f.get('size'), 'format_id': 'download', 'url': update_url_query(authenticated_download_url, query), }) self._sort_formats(formats) creator = f.get('created_by') or {} return { 'id': file_id, 'title': title, 'formats': formats, 'description': f.get('description') or None, 'uploader': creator.get('name'), 'timestamp': parse_iso8601(f.get('created_at')), 'uploader_id': creator.get('id'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/yinyuetai.py
youtube_dl/extractor/yinyuetai.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ExtractorError class YinYueTaiIE(InfoExtractor): IE_NAME = 'yinyuetai:video' IE_DESC = '音悦Tai' _VALID_URL = r'https?://v\.yinyuetai\.com/video(?:/h5)?/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://v.yinyuetai.com/video/2322376', 'md5': '6e3abe28d38e3a54b591f9f040595ce0', 'info_dict': { 'id': '2322376', 'ext': 'mp4', 'title': '少女时代_PARTY_Music Video Teaser', 'creator': '少女时代', 'duration': 25, 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'http://v.yinyuetai.com/video/h5/2322376', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) info = self._download_json( 'http://ext.yinyuetai.com/main/get-h-mv-info?json=true&videoId=%s' % video_id, video_id, 'Downloading mv info')['videoInfo']['coreVideoInfo'] if info['error']: raise ExtractorError(info['errorMsg'], expected=True) formats = [{ 'url': format_info['videoUrl'], 'format_id': format_info['qualityLevel'], 'format': format_info.get('qualityLevelName'), 'filesize': format_info.get('fileSize'), # though URLs ends with .flv, the downloaded files are in fact mp4 'ext': 'mp4', 'tbr': format_info.get('bitrate'), } for format_info in info['videoUrlModels']] self._sort_formats(formats) return { 'id': video_id, 'title': info['videoName'], 'thumbnail': info.get('bigHeadImage'), 'creator': info.get('artistNames'), 'duration': info.get('duration'), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/screencast.py
youtube_dl/extractor/screencast.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urllib_request, ) from ..utils import ( ExtractorError, ) class ScreencastIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?screencast\.com/t/(?P<id>[a-zA-Z0-9]+)' _TESTS = [{ 'url': 'http://www.screencast.com/t/3ZEjQXlT', 'md5': '917df1c13798a3e96211dd1561fded83', 'info_dict': { 'id': '3ZEjQXlT', 'ext': 'm4v', 'title': 'Color Measurement with Ocean Optics Spectrometers', 'description': 'md5:240369cde69d8bed61349a199c5fb153', 'thumbnail': r're:^https?://.*\.(?:gif|jpg)$', } }, { 'url': 'http://www.screencast.com/t/V2uXehPJa1ZI', 'md5': 'e8e4b375a7660a9e7e35c33973410d34', 'info_dict': { 'id': 'V2uXehPJa1ZI', 'ext': 'mov', 'title': 'The Amadeus Spectrometer', 'description': 're:^In this video, our friends at.*To learn more about Amadeus, visit', 'thumbnail': r're:^https?://.*\.(?:gif|jpg)$', } }, { 'url': 'http://www.screencast.com/t/aAB3iowa', 'md5': 'dedb2734ed00c9755761ccaee88527cd', 'info_dict': { 'id': 'aAB3iowa', 'ext': 'mp4', 'title': 'Google Earth Export', 'description': 'Provides a demo of a CommunityViz export to Google Earth, one of the 3D viewing options.', 'thumbnail': r're:^https?://.*\.(?:gif|jpg)$', } }, { 'url': 'http://www.screencast.com/t/X3ddTrYh', 'md5': '669ee55ff9c51988b4ebc0877cc8b159', 'info_dict': { 'id': 'X3ddTrYh', 'ext': 'wmv', 'title': 'Toolkit 6 User Group Webinar (2014-03-04) - Default Judgment and First Impression', 'description': 'md5:7b9f393bc92af02326a5c5889639eab0', 'thumbnail': r're:^https?://.*\.(?:gif|jpg)$', } }, { 'url': 'http://screencast.com/t/aAB3iowa', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_url = self._html_search_regex( r'<embed name="Video".*?src="([^"]+)"', webpage, 'QuickTime embed', default=None) if video_url is None: flash_vars_s = self._html_search_regex( r'<param name="flashVars" value="([^"]+)"', webpage, 'flash vars', default=None) if not flash_vars_s: flash_vars_s = self._html_search_regex( r'<param name="initParams" value="([^"]+)"', webpage, 'flash vars', default=None) if flash_vars_s: flash_vars_s = flash_vars_s.replace(',', '&') if flash_vars_s: flash_vars = compat_parse_qs(flash_vars_s) video_url_raw = compat_urllib_request.quote( flash_vars['content'][0]) video_url = video_url_raw.replace('http%3A', 'http:') if video_url is None: video_meta = self._html_search_meta( 'og:video', webpage, default=None) if video_meta: video_url = self._search_regex( r'src=(.*?)(?:$|&)', video_meta, 'meta tag video URL', default=None) if video_url is None: video_url = self._html_search_regex( r'MediaContentUrl["\']\s*:(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'video url', default=None, group='url') if video_url is None: video_url = self._html_search_meta( 'og:video', webpage, default=None) if video_url is None: raise ExtractorError('Cannot find video') title = self._og_search_title(webpage, default=None) if title is None: title = self._html_search_regex( [r'<b>Title:</b> ([^<]+)</div>', r'class="tabSeperator">></span><span class="tabText">(.+?)<', r'<title>([^<]+)</title>'], webpage, 'title') thumbnail = self._og_search_thumbnail(webpage) description = self._og_search_description(webpage, default=None) if description is None: description = self._html_search_meta('description', webpage) return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/hentaistigma.py
youtube_dl/extractor/hentaistigma.py
from __future__ import unicode_literals from .common import InfoExtractor class HentaiStigmaIE(InfoExtractor): _VALID_URL = r'^https?://hentai\.animestigma\.com/(?P<id>[^/]+)' _TEST = { 'url': 'http://hentai.animestigma.com/inyouchuu-etsu-bonus/', 'md5': '4e3d07422a68a4cc363d8f57c8bf0d23', 'info_dict': { 'id': 'inyouchuu-etsu-bonus', 'ext': 'mp4', 'title': 'Inyouchuu Etsu Bonus', 'age_limit': 18, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<h2[^>]+class="posttitle"[^>]*><a[^>]*>([^<]+)</a>', webpage, 'title') wrap_url = self._html_search_regex( r'<iframe[^>]+src="([^"]+mp4)"', webpage, 'wrapper url') wrap_webpage = self._download_webpage(wrap_url, video_id) video_url = self._html_search_regex( r'file\s*:\s*"([^"]+)"', wrap_webpage, 'video url') return { 'id': video_id, 'url': video_url, 'title': title, 'age_limit': 18, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/bigo.py
youtube_dl/extractor/bigo.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ExtractorError, urlencode_postdata class BigoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?bigo\.tv/(?:[a-z]{2,}/)?(?P<id>[^/]+)' _TESTS = [{ 'url': 'https://www.bigo.tv/ja/221338632', 'info_dict': { 'id': '6576287577575737440', 'title': '土よ〜💁‍♂️ 休憩室/REST room', 'thumbnail': r're:https?://.+', 'uploader': '✨Shin💫', 'uploader_id': '221338632', 'is_live': True, }, 'skip': 'livestream', }, { 'url': 'https://www.bigo.tv/th/Tarlerm1304', 'only_matching': True, }, { 'url': 'https://bigo.tv/115976881', 'only_matching': True, }] def _real_extract(self, url): user_id = self._match_id(url) info_raw = self._download_json( 'https://bigo.tv/studio/getInternalStudioInfo', user_id, data=urlencode_postdata({'siteId': user_id})) if not isinstance(info_raw, dict): raise ExtractorError('Received invalid JSON data') if info_raw.get('code'): raise ExtractorError( 'Bigo says: %s (code %s)' % (info_raw.get('msg'), info_raw.get('code')), expected=True) info = info_raw.get('data') or {} if not info.get('alive'): raise ExtractorError('This user is offline.', expected=True) return { 'id': info.get('roomId') or user_id, 'title': info.get('roomTopic') or info.get('nick_name') or user_id, 'formats': [{ 'url': info.get('hls_src'), 'ext': 'mp4', 'protocol': 'm3u8', }], 'thumbnail': info.get('snapshot'), 'uploader': info.get('nick_name'), 'uploader_id': user_id, 'is_live': True, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tonline.py
youtube_dl/extractor/tonline.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import int_or_none class TOnlineIE(InfoExtractor): IE_NAME = 't-online.de' _VALID_URL = r'https?://(?:www\.)?t-online\.de/tv/(?:[^/]+/)*id_(?P<id>\d+)' _TEST = { 'url': 'http://www.t-online.de/tv/sport/fussball/id_79166266/drittes-remis-zidane-es-muss-etwas-passieren-.html', 'md5': '7d94dbdde5f9d77c5accc73c39632c29', 'info_dict': { 'id': '79166266', 'ext': 'mp4', 'title': 'Drittes Remis! Zidane: "Es muss etwas passieren"', 'description': 'Es läuft nicht rund bei Real Madrid. Das 1:1 gegen den SD Eibar war das dritte Unentschieden in Folge in der Liga.', } } def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( 'http://www.t-online.de/tv/id_%s/tid_json_video' % video_id, video_id) title = video_data['subtitle'] formats = [] for asset in video_data.get('assets', []): asset_source = asset.get('source') or asset.get('source2') if not asset_source: continue formats_id = [] for field_key in ('type', 'profile'): field_value = asset.get(field_key) if field_value: formats_id.append(field_value) formats.append({ 'format_id': '-'.join(formats_id), 'url': asset_source, }) thumbnails = [] for image in video_data.get('images', []): image_source = image.get('source') if not image_source: continue thumbnails.append({ 'url': image_source, }) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'duration': int_or_none(video_data.get('duration')), 'thumbnails': thumbnails, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/nextmedia.py
youtube_dl/extractor/nextmedia.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( clean_html, get_element_by_class, int_or_none, parse_iso8601, remove_start, unified_timestamp, ) class NextMediaIE(InfoExtractor): IE_DESC = '蘋果日報' _VALID_URL = r'https?://hk\.apple\.nextmedia\.com/[^/]+/[^/]+/(?P<date>\d+)/(?P<id>\d+)' _TESTS = [{ 'url': 'http://hk.apple.nextmedia.com/realtime/news/20141108/53109199', 'md5': 'dff9fad7009311c421176d1ac90bfe4f', 'info_dict': { 'id': '53109199', 'ext': 'mp4', 'title': '【佔領金鐘】50外國領事議員撐場 讚學生勇敢香港有希望', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:28222b9912b6665a21011b034c70fcc7', 'timestamp': 1415456273, 'upload_date': '20141108', } }] _URL_PATTERN = r'\{ url: \'(.+)\' \}' def _real_extract(self, url): news_id = self._match_id(url) page = self._download_webpage(url, news_id) return self._extract_from_nextmedia_page(news_id, url, page) def _extract_from_nextmedia_page(self, news_id, url, page): redirection_url = self._search_regex( r'window\.location\.href\s*=\s*([\'"])(?P<url>(?!\1).+)\1', page, 'redirection URL', default=None, group='url') if redirection_url: return self.url_result(compat_urlparse.urljoin(url, redirection_url)) title = self._fetch_title(page) video_url = self._search_regex(self._URL_PATTERN, page, 'video url') attrs = { 'id': news_id, 'title': title, 'url': video_url, # ext can be inferred from url 'thumbnail': self._fetch_thumbnail(page), 'description': self._fetch_description(page), } timestamp = self._fetch_timestamp(page) if timestamp: attrs['timestamp'] = timestamp else: attrs['upload_date'] = self._fetch_upload_date(url) return attrs def _fetch_title(self, page): return self._og_search_title(page) def _fetch_thumbnail(self, page): return self._og_search_thumbnail(page) def _fetch_timestamp(self, page): dateCreated = self._search_regex('"dateCreated":"([^"]+)"', page, 'created time') return parse_iso8601(dateCreated) def _fetch_upload_date(self, url): return self._search_regex(self._VALID_URL, url, 'upload date', group='date') def _fetch_description(self, page): return self._og_search_property('description', page) class NextMediaActionNewsIE(NextMediaIE): IE_DESC = '蘋果日報 - 動新聞' _VALID_URL = r'https?://hk\.dv\.nextmedia\.com/actionnews/[^/]+/(?P<date>\d+)/(?P<id>\d+)/\d+' _TESTS = [{ 'url': 'http://hk.dv.nextmedia.com/actionnews/hit/20150121/19009428/20061460', 'md5': '05fce8ffeed7a5e00665d4b7cf0f9201', 'info_dict': { 'id': '19009428', 'ext': 'mp4', 'title': '【壹週刊】細10年男友偷食 50歲邵美琪再失戀', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:cd802fad1f40fd9ea178c1e2af02d659', 'timestamp': 1421791200, 'upload_date': '20150120', } }] def _real_extract(self, url): news_id = self._match_id(url) actionnews_page = self._download_webpage(url, news_id) article_url = self._og_search_url(actionnews_page) article_page = self._download_webpage(article_url, news_id) return self._extract_from_nextmedia_page(news_id, url, article_page) class AppleDailyIE(NextMediaIE): IE_DESC = '臺灣蘋果日報' _VALID_URL = r'https?://(www|ent)\.appledaily\.com\.tw/[^/]+/[^/]+/[^/]+/(?P<date>\d+)/(?P<id>\d+)(/.*)?' _TESTS = [{ 'url': 'http://ent.appledaily.com.tw/enews/article/entertainment/20150128/36354694', 'md5': 'a843ab23d150977cc55ef94f1e2c1e4d', 'info_dict': { 'id': '36354694', 'ext': 'mp4', 'title': '周亭羽走過摩鐵陰霾2男陪吃 九把刀孤寒看醫生', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:2acd430e59956dc47cd7f67cb3c003f4', 'upload_date': '20150128', } }, { 'url': 'http://www.appledaily.com.tw/realtimenews/article/strange/20150128/550549/%E4%B8%8D%E6%BB%BF%E8%A2%AB%E8%B8%A9%E8%85%B3%E3%80%80%E5%B1%B1%E6%9D%B1%E5%85%A9%E5%A4%A7%E5%AA%BD%E4%B8%80%E8%B7%AF%E6%89%93%E4%B8%8B%E8%BB%8A', 'md5': '86b4e9132d158279c7883822d94ccc49', 'info_dict': { 'id': '550549', 'ext': 'mp4', 'title': '不滿被踩腳 山東兩大媽一路打下車', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:175b4260c1d7c085993474217e4ab1b4', 'upload_date': '20150128', } }, { 'url': 'http://www.appledaily.com.tw/animation/realtimenews/new/20150128/5003671', 'md5': '03df296d95dedc2d5886debbb80cb43f', 'info_dict': { 'id': '5003671', 'ext': 'mp4', 'title': '20正妹熱舞 《刀龍傳說Online》火辣上市', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:23c0aac567dc08c9c16a3161a2c2e3cd', 'upload_date': '20150128', }, 'skip': 'redirect to http://www.appledaily.com.tw/animation/', }, { # No thumbnail 'url': 'http://www.appledaily.com.tw/animation/realtimenews/new/20150128/5003673/', 'md5': 'b06182cd386ea7bc6115ec7ff0f72aeb', 'info_dict': { 'id': '5003673', 'ext': 'mp4', 'title': '半夜尿尿 好像會看到___', 'description': 'md5:61d2da7fe117fede148706cdb85ac066', 'upload_date': '20150128', }, 'expected_warnings': [ 'video thumbnail', ], 'skip': 'redirect to http://www.appledaily.com.tw/animation/', }, { 'url': 'http://www.appledaily.com.tw/appledaily/article/supplement/20140417/35770334/', 'md5': 'eaa20e6b9df418c912d7f5dec2ba734d', 'info_dict': { 'id': '35770334', 'ext': 'mp4', 'title': '咖啡占卜測 XU裝熟指數', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:7b859991a6a4fedbdf3dd3b66545c748', 'upload_date': '20140417', }, }, { 'url': 'http://www.appledaily.com.tw/actionnews/appledaily/7/20161003/960588/', 'only_matching': True, }, { # Redirected from http://ent.appledaily.com.tw/enews/article/entertainment/20150128/36354694 'url': 'http://ent.appledaily.com.tw/section/article/headline/20150128/36354694', 'only_matching': True, }] _URL_PATTERN = r'\{url: \'(.+)\'\}' def _fetch_title(self, page): return (self._html_search_regex(r'<h1 id="h1">([^<>]+)</h1>', page, 'news title', default=None) or self._html_search_meta('description', page, 'news title')) def _fetch_thumbnail(self, page): return self._html_search_regex(r"setInitialImage\(\'([^']+)'\)", page, 'video thumbnail', fatal=False) def _fetch_timestamp(self, page): return None def _fetch_description(self, page): return self._html_search_meta('description', page, 'news description') class NextTVIE(InfoExtractor): IE_DESC = '壹電視' _VALID_URL = r'https?://(?:www\.)?nexttv\.com\.tw/(?:[^/]+/)+(?P<id>\d+)' _TEST = { 'url': 'http://www.nexttv.com.tw/news/realtime/politics/11779671', 'info_dict': { 'id': '11779671', 'ext': 'mp4', 'title': '「超收稅」近4千億! 藍議員籲發消費券', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1484825400, 'upload_date': '20170119', 'view_count': int, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<h1[^>]*>([^<]+)</h1>', webpage, 'title') data = self._hidden_inputs(webpage) video_url = data['ntt-vod-src-detailview'] date_str = get_element_by_class('date', webpage) timestamp = unified_timestamp(date_str + '+0800') if date_str else None view_count = int_or_none(remove_start( clean_html(get_element_by_class('click', webpage)), '點閱:')) return { 'id': video_id, 'title': title, 'url': video_url, 'thumbnail': data.get('ntt-vod-img-src'), 'timestamp': timestamp, 'view_count': view_count, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/lynda.py
youtube_dl/extractor/lynda.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( ExtractorError, int_or_none, urlencode_postdata, ) class LyndaBaseIE(InfoExtractor): _SIGNIN_URL = 'https://www.lynda.com/signin/lynda' _PASSWORD_URL = 'https://www.lynda.com/signin/password' _USER_URL = 'https://www.lynda.com/signin/user' _ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to provide lynda.com account credentials.' _NETRC_MACHINE = 'lynda' def _real_initialize(self): self._login() @staticmethod def _check_error(json_string, key_or_keys): keys = [key_or_keys] if isinstance(key_or_keys, compat_str) else key_or_keys for key in keys: error = json_string.get(key) if error: raise ExtractorError('Unable to login: %s' % error, expected=True) def _login_step(self, form_html, fallback_action_url, extra_form_data, note, referrer_url): action_url = self._search_regex( r'<form[^>]+action=(["\'])(?P<url>.+?)\1', form_html, 'post url', default=fallback_action_url, group='url') if not action_url.startswith('http'): action_url = compat_urlparse.urljoin(self._SIGNIN_URL, action_url) form_data = self._hidden_inputs(form_html) form_data.update(extra_form_data) response = self._download_json( action_url, None, note, data=urlencode_postdata(form_data), headers={ 'Referer': referrer_url, 'X-Requested-With': 'XMLHttpRequest', }, expected_status=(418, 500, )) self._check_error(response, ('email', 'password', 'ErrorMessage')) return response, action_url def _login(self): username, password = self._get_login_info() if username is None: return # Step 1: download signin page signin_page = self._download_webpage( self._SIGNIN_URL, None, 'Downloading signin page') # Already logged in if any(re.search(p, signin_page) for p in ( r'isLoggedIn\s*:\s*true', r'logout\.aspx', r'>Log out<')): return # Step 2: submit email signin_form = self._search_regex( r'(?s)(<form[^>]+data-form-name=["\']signin["\'][^>]*>.+?</form>)', signin_page, 'signin form') signin_page, signin_url = self._login_step( signin_form, self._PASSWORD_URL, {'email': username}, 'Submitting email', self._SIGNIN_URL) # Step 3: submit password password_form = signin_page['body'] self._login_step( password_form, self._USER_URL, {'email': username, 'password': password}, 'Submitting password', signin_url) class LyndaIE(LyndaBaseIE): IE_NAME = 'lynda' IE_DESC = 'lynda.com videos' _VALID_URL = r'''(?x) https?:// (?:www\.)?(?:lynda\.com|educourse\.ga)/ (?: (?:[^/]+/){2,3}(?P<course_id>\d+)| player/embed )/ (?P<id>\d+) ''' _TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]' _TESTS = [{ 'url': 'https://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html', # md5 is unstable 'info_dict': { 'id': '114408', 'ext': 'mp4', 'title': 'Using the exercise files', 'duration': 68 } }, { 'url': 'https://www.lynda.com/player/embed/133770?tr=foo=1;bar=g;fizz=rt&fs=0', 'only_matching': True, }, { 'url': 'https://educourse.ga/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html', 'only_matching': True, }, { 'url': 'https://www.lynda.com/de/Graphic-Design-tutorials/Willkommen-Grundlagen-guten-Gestaltung/393570/393572-4.html', 'only_matching': True, }, { # Status="NotFound", Message="Transcript not found" 'url': 'https://www.lynda.com/ASP-NET-tutorials/What-you-should-know/5034180/2811512-4.html', 'only_matching': True, }] def _raise_unavailable(self, video_id): self.raise_login_required( 'Video %s is only available for members' % video_id) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') course_id = mobj.group('course_id') query = { 'videoId': video_id, 'type': 'video', } video = self._download_json( 'https://www.lynda.com/ajax/player', video_id, 'Downloading video JSON', fatal=False, query=query) # Fallback scenario if not video: query['courseId'] = course_id play = self._download_json( 'https://www.lynda.com/ajax/course/%s/%s/play' % (course_id, video_id), video_id, 'Downloading play JSON') if not play: self._raise_unavailable(video_id) formats = [] for formats_dict in play: urls = formats_dict.get('urls') if not isinstance(urls, dict): continue cdn = formats_dict.get('name') for format_id, format_url in urls.items(): if not format_url: continue formats.append({ 'url': format_url, 'format_id': '%s-%s' % (cdn, format_id) if cdn else format_id, 'height': int_or_none(format_id), }) self._sort_formats(formats) conviva = self._download_json( 'https://www.lynda.com/ajax/player/conviva', video_id, 'Downloading conviva JSON', query=query) return { 'id': video_id, 'title': conviva['VideoTitle'], 'description': conviva.get('VideoDescription'), 'release_year': int_or_none(conviva.get('ReleaseYear')), 'duration': int_or_none(conviva.get('Duration')), 'creator': conviva.get('Author'), 'formats': formats, } if 'Status' in video: raise ExtractorError( 'lynda returned error: %s' % video['Message'], expected=True) if video.get('HasAccess') is False: self._raise_unavailable(video_id) video_id = compat_str(video.get('ID') or video_id) duration = int_or_none(video.get('DurationInSeconds')) title = video['Title'] formats = [] fmts = video.get('Formats') if fmts: formats.extend([{ 'url': f['Url'], 'ext': f.get('Extension'), 'width': int_or_none(f.get('Width')), 'height': int_or_none(f.get('Height')), 'filesize': int_or_none(f.get('FileSize')), 'format_id': compat_str(f.get('Resolution')) if f.get('Resolution') else None, } for f in fmts if f.get('Url')]) prioritized_streams = video.get('PrioritizedStreams') if prioritized_streams: for prioritized_stream_id, prioritized_stream in prioritized_streams.items(): formats.extend([{ 'url': video_url, 'height': int_or_none(format_id), 'format_id': '%s-%s' % (prioritized_stream_id, format_id), } for format_id, video_url in prioritized_stream.items()]) self._check_formats(formats, video_id) self._sort_formats(formats) subtitles = self.extract_subtitles(video_id) return { 'id': video_id, 'title': title, 'duration': duration, 'subtitles': subtitles, 'formats': formats } def _fix_subtitles(self, subs): srt = '' seq_counter = 0 for pos in range(0, len(subs) - 1): seq_current = subs[pos] m_current = re.match(self._TIMECODE_REGEX, seq_current['Timecode']) if m_current is None: continue seq_next = subs[pos + 1] m_next = re.match(self._TIMECODE_REGEX, seq_next['Timecode']) if m_next is None: continue appear_time = m_current.group('timecode') disappear_time = m_next.group('timecode') text = seq_current['Caption'].strip() if text: seq_counter += 1 srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (seq_counter, appear_time, disappear_time, text) if srt: return srt def _get_subtitles(self, video_id): url = 'https://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id subs = self._download_webpage( url, video_id, 'Downloading subtitles JSON', fatal=False) if not subs or 'Status="NotFound"' in subs: return {} subs = self._parse_json(subs, video_id, fatal=False) if not subs: return {} fixed_subs = self._fix_subtitles(subs) if fixed_subs: return {'en': [{'ext': 'srt', 'data': fixed_subs}]} return {} class LyndaCourseIE(LyndaBaseIE): IE_NAME = 'lynda:course' IE_DESC = 'lynda.com online courses' # Course link equals to welcome/introduction video link of same course # We will recognize it as course link _VALID_URL = r'https?://(?:www|m)\.(?:lynda\.com|educourse\.ga)/(?P<coursepath>(?:[^/]+/){2,3}(?P<courseid>\d+))-2\.html' _TESTS = [{ 'url': 'https://www.lynda.com/Graphic-Design-tutorials/Grundlagen-guten-Gestaltung/393570-2.html', 'only_matching': True, }, { 'url': 'https://www.lynda.com/de/Graphic-Design-tutorials/Grundlagen-guten-Gestaltung/393570-2.html', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) course_path = mobj.group('coursepath') course_id = mobj.group('courseid') item_template = 'https://www.lynda.com/%s/%%s-4.html' % course_path course = self._download_json( 'https://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id, course_id, 'Downloading course JSON', fatal=False) if not course: webpage = self._download_webpage(url, course_id) entries = [ self.url_result( item_template % video_id, ie=LyndaIE.ie_key(), video_id=video_id) for video_id in re.findall( r'data-video-id=["\'](\d+)', webpage)] return self.playlist_result( entries, course_id, self._og_search_title(webpage, fatal=False), self._og_search_description(webpage)) if course.get('Status') == 'NotFound': raise ExtractorError( 'Course %s does not exist' % course_id, expected=True) unaccessible_videos = 0 entries = [] # Might want to extract videos right here from video['Formats'] as it seems 'Formats' is not provided # by single video API anymore for chapter in course['Chapters']: for video in chapter.get('Videos', []): if video.get('HasAccess') is False: unaccessible_videos += 1 continue video_id = video.get('ID') if video_id: entries.append({ '_type': 'url_transparent', 'url': item_template % video_id, 'ie_key': LyndaIE.ie_key(), 'chapter': chapter.get('Title'), 'chapter_number': int_or_none(chapter.get('ChapterIndex')), 'chapter_id': compat_str(chapter.get('ID')), }) if unaccessible_videos > 0: self._downloader.report_warning( '%s videos are only available for members (or paid members) and will not be downloaded. ' % unaccessible_videos + self._ACCOUNT_CREDENTIALS_HINT) course_title = course.get('Title') course_description = course.get('Description') return self.playlist_result(entries, course_id, course_title, course_description)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/imdb.py
youtube_dl/extractor/imdb.py
from __future__ import unicode_literals import base64 import json import re from .common import InfoExtractor from ..utils import ( determine_ext, mimetype2ext, parse_duration, qualities, try_get, url_or_none, ) class ImdbIE(InfoExtractor): IE_NAME = 'imdb' IE_DESC = 'Internet Movie Database trailers' _VALID_URL = r'https?://(?:www|m)\.imdb\.com/(?:video|title|list).*?[/-]vi(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.imdb.com/video/imdb/vi2524815897', 'info_dict': { 'id': '2524815897', 'ext': 'mp4', 'title': 'No. 2', 'description': 'md5:87bd0bdc61e351f21f20d2d7441cb4e7', 'duration': 152, } }, { 'url': 'http://www.imdb.com/video/_/vi2524815897', 'only_matching': True, }, { 'url': 'http://www.imdb.com/title/tt1667889/?ref_=ext_shr_eml_vi#lb-vi2524815897', 'only_matching': True, }, { 'url': 'http://www.imdb.com/title/tt1667889/#lb-vi2524815897', 'only_matching': True, }, { 'url': 'http://www.imdb.com/videoplayer/vi1562949145', 'only_matching': True, }, { 'url': 'http://www.imdb.com/title/tt4218696/videoplayer/vi2608641561', 'only_matching': True, }, { 'url': 'https://www.imdb.com/list/ls009921623/videoplayer/vi260482329', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) data = self._download_json( 'https://www.imdb.com/ve/data/VIDEO_PLAYBACK_DATA', video_id, query={ 'key': base64.b64encode(json.dumps({ 'type': 'VIDEO_PLAYER', 'subType': 'FORCE_LEGACY', 'id': 'vi%s' % video_id, }).encode()).decode(), })[0] quality = qualities(('SD', '480p', '720p', '1080p')) formats = [] for encoding in data['videoLegacyEncodings']: if not encoding or not isinstance(encoding, dict): continue video_url = url_or_none(encoding.get('url')) if not video_url: continue ext = mimetype2ext(encoding.get( 'mimeType')) or determine_ext(video_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', preference=1, m3u8_id='hls', fatal=False)) continue format_id = encoding.get('definition') formats.append({ 'format_id': format_id, 'url': video_url, 'ext': ext, 'quality': quality(format_id), }) self._sort_formats(formats) webpage = self._download_webpage( 'https://www.imdb.com/video/vi' + video_id, video_id) video_metadata = self._parse_json(self._search_regex( r'args\.push\(\s*({.+?})\s*\)\s*;', webpage, 'video metadata'), video_id) video_info = video_metadata.get('VIDEO_INFO') if video_info and isinstance(video_info, dict): info = try_get( video_info, lambda x: x[list(video_info.keys())[0]][0], dict) else: info = {} title = self._html_search_meta( ['og:title', 'twitter:title'], webpage) or self._html_search_regex( r'<title>(.+?)</title>', webpage, 'title', default=None) or info['videoTitle'] return { 'id': video_id, 'title': title, 'alt_title': info.get('videoSubTitle'), 'formats': formats, 'description': info.get('videoDescription'), 'thumbnail': url_or_none(try_get( video_metadata, lambda x: x['videoSlate']['source'])), 'duration': parse_duration(info.get('videoRuntime')), } class ImdbListIE(InfoExtractor): IE_NAME = 'imdb:list' IE_DESC = 'Internet Movie Database lists' _VALID_URL = r'https?://(?:www\.)?imdb\.com/list/ls(?P<id>\d{9})(?!/videoplayer/vi\d+)' _TEST = { 'url': 'https://www.imdb.com/list/ls009921623/', 'info_dict': { 'id': '009921623', 'title': 'The Bourne Legacy', 'description': 'A list of trailers, clips, and more from The Bourne Legacy, starring Jeremy Renner and Rachel Weisz.', }, 'playlist_count': 8, } def _real_extract(self, url): list_id = self._match_id(url) webpage = self._download_webpage(url, list_id) entries = [ self.url_result('http://www.imdb.com' + m, 'Imdb') for m in re.findall(r'href="(/list/ls%s/videoplayer/vi[^"]+)"' % list_id, webpage)] list_title = self._html_search_regex( r'<h1[^>]+class="[^"]*header[^"]*"[^>]*>(.*?)</h1>', webpage, 'list title') list_description = self._html_search_regex( r'<div[^>]+class="[^"]*list-description[^"]*"[^>]*><p>(.*?)</p>', webpage, 'list description') return self.playlist_result(entries, list_id, list_title, list_description)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/nick.py
youtube_dl/extractor/nick.py
# coding: utf-8 from __future__ import unicode_literals import re from .mtv import MTVServicesInfoExtractor from ..utils import update_url_query class NickIE(MTVServicesInfoExtractor): # None of videos on the website are still alive? IE_NAME = 'nick.com' _VALID_URL = r'https?://(?P<domain>(?:(?:www|beta)\.)?nick(?:jr)?\.com)/(?:[^/]+/)?(?:videos/clip|[^/]+/videos)/(?P<id>[^/?#.]+)' _FEED_URL = 'http://udat.mtvnservices.com/service1/dispatch.htm' _GEO_COUNTRIES = ['US'] _TESTS = [{ 'url': 'http://www.nick.com/videos/clip/alvinnn-and-the-chipmunks-112-full-episode.html', 'playlist': [ { 'md5': '6e5adc1e28253bbb1b28ab05403dd4d4', 'info_dict': { 'id': 'be6a17b0-412d-11e5-8ff7-0026b9414f30', 'ext': 'mp4', 'title': 'ALVINNN!!! and The Chipmunks: "Mojo Missing/Who\'s The Animal" S1', 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks’ big concert.\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.', } }, { 'md5': 'd7be441fc53a1d4882fa9508a1e5b3ce', 'info_dict': { 'id': 'be6b8f96-412d-11e5-8ff7-0026b9414f30', 'ext': 'mp4', 'title': 'ALVINNN!!! and The Chipmunks: "Mojo Missing/Who\'s The Animal" S2', 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks’ big concert.\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.', } }, { 'md5': 'efffe1728a234b2b0d2f2b343dd1946f', 'info_dict': { 'id': 'be6cf7e6-412d-11e5-8ff7-0026b9414f30', 'ext': 'mp4', 'title': 'ALVINNN!!! and The Chipmunks: "Mojo Missing/Who\'s The Animal" S3', 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks’ big concert.\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.', } }, { 'md5': '1ec6690733ab9f41709e274a1d5c7556', 'info_dict': { 'id': 'be6e3354-412d-11e5-8ff7-0026b9414f30', 'ext': 'mp4', 'title': 'ALVINNN!!! and The Chipmunks: "Mojo Missing/Who\'s The Animal" S4', 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks’ big concert.\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.', } }, ], }, { 'url': 'http://www.nickjr.com/paw-patrol/videos/pups-save-a-goldrush-s3-ep302-full-episode/', 'only_matching': True, }, { 'url': 'http://beta.nick.com/nicky-ricky-dicky-and-dawn/videos/nicky-ricky-dicky-dawn-301-full-episode/', 'only_matching': True, }] def _get_feed_query(self, uri): return { 'feed': 'nick_arc_player_prime', 'mgid': uri, } def _real_extract(self, url): domain, display_id = re.match(self._VALID_URL, url).groups() video_data = self._download_json( 'http://%s/data/video.endLevel.json' % domain, display_id, query={ 'urlKey': display_id, }) return self._get_videos_info(video_data['player'] + video_data['id']) class NickBrIE(MTVServicesInfoExtractor): IE_NAME = 'nickelodeon:br' _VALID_URL = r'''(?x) https?:// (?: (?P<domain>(?:www\.)?nickjr|mundonick\.uol)\.com\.br| (?:www\.)?nickjr\.[a-z]{2}| (?:www\.)?nickelodeonjunior\.fr ) /(?:programas/)?[^/]+/videos/(?:episodios/)?(?P<id>[^/?\#.]+) ''' _TESTS = [{ 'url': 'http://www.nickjr.com.br/patrulha-canina/videos/210-labirinto-de-pipoca/', 'only_matching': True, }, { 'url': 'http://mundonick.uol.com.br/programas/the-loud-house/videos/muitas-irmas/7ljo9j', 'only_matching': True, }, { 'url': 'http://www.nickjr.nl/paw-patrol/videos/311-ge-wol-dig-om-terug-te-zijn/', 'only_matching': True, }, { 'url': 'http://www.nickjr.de/blaze-und-die-monster-maschinen/videos/f6caaf8f-e4e8-4cc1-b489-9380d6dcd059/', 'only_matching': True, }, { 'url': 'http://www.nickelodeonjunior.fr/paw-patrol-la-pat-patrouille/videos/episode-401-entier-paw-patrol/', 'only_matching': True, }] def _real_extract(self, url): domain, display_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, display_id) uri = self._search_regex( r'data-(?:contenturi|mgid)="([^"]+)', webpage, 'mgid') video_id = self._id_from_uri(uri) config = self._download_json( 'http://media.mtvnservices.com/pmt/e1/access/index.html', video_id, query={ 'uri': uri, 'configtype': 'edge', }, headers={ 'Referer': url, }) info_url = self._remove_template_parameter(config['feedWithQueryParams']) if info_url == 'None': if domain.startswith('www.'): domain = domain[4:] content_domain = { 'mundonick.uol': 'mundonick.com.br', 'nickjr': 'br.nickelodeonjunior.tv', }[domain] query = { 'mgid': uri, 'imageEp': content_domain, 'arcEp': content_domain, } if domain == 'nickjr.com.br': query['ep'] = 'c4b16088' info_url = update_url_query( 'http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed', query) return self._get_videos_info_from_url(info_url, video_id) class NickDeIE(MTVServicesInfoExtractor): IE_NAME = 'nick.de' _VALID_URL = r'https?://(?:www\.)?(?P<host>nick\.(?:de|com\.pl|ch)|nickelodeon\.(?:nl|be|at|dk|no|se))/[^/]+/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://www.nick.de/playlist/3773-top-videos/videos/episode/17306-zu-wasser-und-zu-land-rauchende-erdnusse', 'only_matching': True, }, { 'url': 'http://www.nick.de/shows/342-icarly', 'only_matching': True, }, { 'url': 'http://www.nickelodeon.nl/shows/474-spongebob/videos/17403-een-kijkje-in-de-keuken-met-sandy-van-binnenuit', 'only_matching': True, }, { 'url': 'http://www.nickelodeon.at/playlist/3773-top-videos/videos/episode/77993-das-letzte-gefecht', 'only_matching': True, }, { 'url': 'http://www.nick.com.pl/seriale/474-spongebob-kanciastoporty/wideo/17412-teatr-to-jest-to-rodeo-oszolom', 'only_matching': True, }, { 'url': 'http://www.nickelodeon.no/program/2626-bulderhuset/videoer/90947-femteklasse-veronica-vs-vanzilla', 'only_matching': True, }, { 'url': 'http://www.nickelodeon.dk/serier/2626-hojs-hus/videoer/761-tissepause', 'only_matching': True, }, { 'url': 'http://www.nickelodeon.se/serier/2626-lugn-i-stormen/videos/998-', 'only_matching': True, }, { 'url': 'http://www.nick.ch/shows/2304-adventure-time-abenteuerzeit-mit-finn-und-jake', 'only_matching': True, }, { 'url': 'http://www.nickelodeon.be/afspeellijst/4530-top-videos/videos/episode/73917-inval-broodschapper-lariekoek-arie', 'only_matching': True, }] def _extract_mrss_url(self, webpage, host): return update_url_query(self._search_regex( r'data-mrss=(["\'])(?P<url>http.+?)\1', webpage, 'mrss url', group='url'), {'siteKey': host}) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') host = mobj.group('host') webpage = self._download_webpage(url, video_id) mrss_url = self._extract_mrss_url(webpage, host) return self._get_videos_info_from_url(mrss_url, video_id) class NickNightIE(NickDeIE): IE_NAME = 'nicknight' _VALID_URL = r'https?://(?:www\.)(?P<host>nicknight\.(?:de|at|tv))/(?:playlist|shows)/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://www.nicknight.at/shows/977-awkward/videos/85987-nimmer-beste-freunde', 'only_matching': True, }, { 'url': 'http://www.nicknight.at/shows/977-awkward', 'only_matching': True, }, { 'url': 'http://www.nicknight.at/shows/1900-faking-it', 'only_matching': True, }] def _extract_mrss_url(self, webpage, *args): return self._search_regex( r'mrss\s*:\s*(["\'])(?P<url>http.+?)\1', webpage, 'mrss url', group='url') class NickRuIE(MTVServicesInfoExtractor): IE_NAME = 'nickelodeonru' _VALID_URL = r'https?://(?:www\.)nickelodeon\.(?:ru|fr|es|pt|ro|hu|com\.tr)/[^/]+/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://www.nickelodeon.ru/shows/henrydanger/videos/episodes/3-sezon-15-seriya-licenziya-na-polyot/pmomfb#playlist/7airc6', 'only_matching': True, }, { 'url': 'http://www.nickelodeon.ru/videos/smotri-na-nickelodeon-v-iyule/g9hvh7', 'only_matching': True, }, { 'url': 'http://www.nickelodeon.fr/programmes/bob-l-eponge/videos/le-marathon-de-booh-kini-bottom-mardi-31-octobre/nfn7z0', 'only_matching': True, }, { 'url': 'http://www.nickelodeon.es/videos/nickelodeon-consejos-tortitas/f7w7xy', 'only_matching': True, }, { 'url': 'http://www.nickelodeon.pt/series/spongebob-squarepants/videos/a-bolha-de-tinta-gigante/xutq1b', 'only_matching': True, }, { 'url': 'http://www.nickelodeon.ro/emisiuni/shimmer-si-shine/video/nahal-din-bomboane/uw5u2k', 'only_matching': True, }, { 'url': 'http://www.nickelodeon.hu/musorok/spongyabob-kockanadrag/videok/episodes/buborekfujas-az-elszakadt-nadrag/q57iob#playlist/k6te4y', 'only_matching': True, }, { 'url': 'http://www.nickelodeon.com.tr/programlar/sunger-bob/videolar/kayip-yatak/mgqbjy', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) mgid = self._extract_mgid(webpage) return self.url_result('http://media.mtvnservices.com/embed/%s' % mgid)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/radiojavan.py
youtube_dl/extractor/radiojavan.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( parse_resolution, str_to_int, unified_strdate, urlencode_postdata, urljoin, ) class RadioJavanIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?radiojavan\.com/videos/video/(?P<id>[^/]+)/?' _TEST = { 'url': 'http://www.radiojavan.com/videos/video/chaartaar-ashoobam', 'md5': 'e85208ffa3ca8b83534fca9fe19af95b', 'info_dict': { 'id': 'chaartaar-ashoobam', 'ext': 'mp4', 'title': 'Chaartaar - Ashoobam', 'thumbnail': r're:^https?://.*\.jpe?g$', 'upload_date': '20150215', 'view_count': int, 'like_count': int, 'dislike_count': int, } } def _real_extract(self, url): video_id = self._match_id(url) download_host = self._download_json( 'https://www.radiojavan.com/videos/video_host', video_id, data=urlencode_postdata({'id': video_id}), headers={ 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': url, }).get('host', 'https://host1.rjmusicmedia.com') webpage = self._download_webpage(url, video_id) formats = [] for format_id, _, video_path in re.findall( r'RJ\.video(?P<format_id>\d+[pPkK])\s*=\s*(["\'])(?P<url>(?:(?!\2).)+)\2', webpage): f = parse_resolution(format_id) f.update({ 'url': urljoin(download_host, video_path), 'format_id': format_id, }) formats.append(f) self._sort_formats(formats) title = self._og_search_title(webpage) thumbnail = self._og_search_thumbnail(webpage) upload_date = unified_strdate(self._search_regex( r'class="date_added">Date added: ([^<]+)<', webpage, 'upload date', fatal=False)) view_count = str_to_int(self._search_regex( r'class="views">Plays: ([\d,]+)', webpage, 'view count', fatal=False)) like_count = str_to_int(self._search_regex( r'class="rating">([\d,]+) likes', webpage, 'like count', fatal=False)) dislike_count = str_to_int(self._search_regex( r'class="rating">([\d,]+) dislikes', webpage, 'dislike count', fatal=False)) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'upload_date': upload_date, 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/videa.py
youtube_dl/extractor/videa.py
# coding: utf-8 from __future__ import unicode_literals import random import re import string from .common import InfoExtractor from ..compat import ( compat_b64decode, compat_ord, compat_struct_pack, ) from ..utils import ( ExtractorError, int_or_none, mimetype2ext, parse_codecs, parse_qs, update_url_query, urljoin, xpath_element, xpath_text, ) def compat_random_choices(population, *args, **kwargs): # weights=None, *, cum_weights=None, k=1 # limited implementation needed here weights = args[0] if args else kwargs.get('weights') assert all(w is None for w in (weights, kwargs.get('cum_weights'))) k = kwargs.get('k', 1) return ''.join(random.choice(population) for _ in range(k)) class VideaIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// videa(?:kid)?\.hu/ (?: videok/(?:[^/]+/)*[^?#&]+-| (?:videojs_)?player\?.*?\bv=| player/v/ ) (?P<id>[^?#&]+) ''' _EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//videa\.hu/player\?.*?\bv=.+?)\1'] _TESTS = [{ 'url': 'http://videa.hu/videok/allatok/az-orult-kigyasz-285-kigyot-kigyo-8YfIAjxwWGwT8HVQ', 'md5': '97a7af41faeaffd9f1fc864a7c7e7603', 'info_dict': { 'id': '8YfIAjxwWGwT8HVQ', 'ext': 'mp4', 'title': 'Az őrült kígyász 285 kígyót enged szabadon', 'thumbnail': r're:^https?://.*', 'duration': 21, 'age_limit': 0, }, }, { 'url': 'http://videa.hu/videok/origo/jarmuvek/supercars-elozes-jAHDWfWSJH5XuFhH', 'md5': 'd57ccd8812c7fd491d33b1eab8c99975', 'info_dict': { 'id': 'jAHDWfWSJH5XuFhH', 'ext': 'mp4', 'title': 'Supercars előzés', 'thumbnail': r're:^https?://.*', 'duration': 64, 'age_limit': 0, }, }, { 'url': 'http://videa.hu/player?v=8YfIAjxwWGwT8HVQ', 'md5': '97a7af41faeaffd9f1fc864a7c7e7603', 'info_dict': { 'id': '8YfIAjxwWGwT8HVQ', 'ext': 'mp4', 'title': 'Az őrült kígyász 285 kígyót enged szabadon', 'thumbnail': r're:^https?://.*', 'duration': 21, 'age_limit': 0, }, }, { 'url': 'http://videa.hu/player/v/8YfIAjxwWGwT8HVQ?autoplay=1', 'only_matching': True, }, { 'url': 'https://videakid.hu/videok/origo/jarmuvek/supercars-elozes-jAHDWfWSJH5XuFhH', 'only_matching': True, }, { 'url': 'https://videakid.hu/player?v=8YfIAjxwWGwT8HVQ', 'only_matching': True, }, { 'url': 'https://videakid.hu/player/v/8YfIAjxwWGwT8HVQ?autoplay=1', 'only_matching': True, }] _STATIC_SECRET = 'xHb0ZvME5q8CBcoQi6AngerDu3FGO9fkUlwPmLVY_RTzj2hJIS4NasXWKy1td7p' @classmethod def _extract_urls(cls, webpage): def yield_urls(): for pattern in cls._EMBED_REGEX: for m in re.finditer(pattern, webpage): yield m.group('url') return list(yield_urls()) @staticmethod def rc4(cipher_text, key): res = b'' key_len = len(key) S = list(range(256)) j = 0 for i in range(256): j = (j + S[i] + ord(key[i % key_len])) % 256 S[i], S[j] = S[j], S[i] i = 0 j = 0 for m in range(len(cipher_text)): i = (i + 1) % 256 j = (j + S[i]) % 256 S[i], S[j] = S[j], S[i] k = S[(S[i] + S[j]) % 256] res += compat_struct_pack('B', k ^ compat_ord(cipher_text[m])) return res.decode('utf-8') def _real_extract(self, url): video_id = self._match_id(url) video_page = self._download_webpage(url, video_id) if 'videa.hu/player' in url: player_url = url player_page = video_page else: player_url = self._search_regex( r'<iframe.*?src="(/player\?[^"]+)"', video_page, 'player url') player_url = urljoin(url, player_url) player_page = self._download_webpage(player_url, video_id) nonce = self._search_regex( r'_xt\s*=\s*"([^"]+)"', player_page, 'nonce') l = nonce[:32] s = nonce[32:] result = '' for i in range(0, 32): result += s[i - (self._STATIC_SECRET.index(l[i]) - 31)] query = parse_qs(player_url) random_seed = ''.join(compat_random_choices(string.ascii_letters + string.digits, k=8)) query['_s'] = random_seed query['_t'] = result[:16] b64_info, handle = self._download_webpage_handle( 'http://videa.hu/player/xml', video_id, query=query) if b64_info.startswith('<?xml'): info = self._parse_xml(b64_info, video_id) else: key = result[16:] + random_seed + handle.headers['x-videa-xs'] info = self._parse_xml(self.rc4( compat_b64decode(b64_info), key), video_id) video = xpath_element(info, './video', 'video') if video is None: raise ExtractorError(xpath_element( info, './error', fatal=True), expected=True) sources = xpath_element( info, './video_sources', 'sources', fatal=True) hash_values = xpath_element( info, './hash_values', 'hash values', fatal=False) title = xpath_text(video, './title', fatal=True) formats = [] for source in sources.findall('./video_source'): source_url = source.text source_name = source.get('name') source_exp = source.get('exp') if not (source_url and source_name): continue hash_value = ( xpath_text(hash_values, 'hash_value_' + source_name) if hash_values is not None else None) if hash_value and source_exp: source_url = update_url_query(source_url, { 'md5': hash_value, 'expires': source_exp, }) f = parse_codecs(source.get('codecs')) f.update({ 'url': self._proto_relative_url(source_url), 'ext': mimetype2ext(source.get('mimetype')) or 'mp4', 'format_id': source.get('name'), 'width': int_or_none(source.get('width')), 'height': int_or_none(source.get('height')), }) formats.append(f) self._sort_formats(formats) thumbnail = self._proto_relative_url(xpath_text(video, './poster_src')) age_limit = None is_adult = xpath_text(video, './is_adult_content', default=None) if is_adult: age_limit = 18 if is_adult == '1' else 0 return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': int_or_none(xpath_text(video, './duration')), 'age_limit': age_limit, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/videodetective.py
youtube_dl/extractor/videodetective.py
from __future__ import unicode_literals from .common import InfoExtractor from .internetvideoarchive import InternetVideoArchiveIE class VideoDetectiveIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?videodetective\.com/[^/]+/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'http://www.videodetective.com/movies/kick-ass-2/194487', 'info_dict': { 'id': '194487', 'ext': 'mp4', 'title': 'Kick-Ass 2', 'description': 'md5:c189d5b7280400630a1d3dd17eaa8d8a', }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) query = 'customerid=69249&publishedid=' + video_id return self.url_result( InternetVideoArchiveIE._build_json_url(query), ie=InternetVideoArchiveIE.ie_key())
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rtbf.py
youtube_dl/extractor/rtbf.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, strip_or_none, ) class RTBFIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?:www\.)?rtbf\.be/ (?: video/[^?]+\?.*\bid=| ouftivi/(?:[^/]+/)*[^?]+\?.*\bvideoId=| auvio/[^/]+\?.*\b(?P<live>l)?id= )(?P<id>\d+)''' _TESTS = [{ 'url': 'https://www.rtbf.be/video/detail_les-diables-au-coeur-episode-2?id=1921274', 'md5': '8c876a1cceeb6cf31b476461ade72384', 'info_dict': { 'id': '1921274', 'ext': 'mp4', 'title': 'Les Diables au coeur (épisode 2)', 'description': '(du 25/04/2014)', 'duration': 3099.54, 'upload_date': '20140425', 'timestamp': 1398456300, } }, { # geo restricted 'url': 'http://www.rtbf.be/ouftivi/heros/detail_scooby-doo-mysteres-associes?id=1097&videoId=2057442', 'only_matching': True, }, { 'url': 'http://www.rtbf.be/ouftivi/niouzz?videoId=2055858', 'only_matching': True, }, { 'url': 'http://www.rtbf.be/auvio/detail_jeudi-en-prime-siegfried-bracke?id=2102996', 'only_matching': True, }, { # Live 'url': 'https://www.rtbf.be/auvio/direct_pure-fm?lid=134775', 'only_matching': True, }, { # Audio 'url': 'https://www.rtbf.be/auvio/detail_cinq-heures-cinema?id=2360811', 'only_matching': True, }, { # With Subtitle 'url': 'https://www.rtbf.be/auvio/detail_les-carnets-du-bourlingueur?id=2361588', 'only_matching': True, }] _IMAGE_HOST = 'http://ds1.ds.static.rtbf.be' _PROVIDERS = { 'YOUTUBE': 'Youtube', 'DAILYMOTION': 'Dailymotion', 'VIMEO': 'Vimeo', } _QUALITIES = [ ('mobile', 'SD'), ('web', 'MD'), ('high', 'HD'), ] def _real_extract(self, url): live, media_id = re.match(self._VALID_URL, url).groups() embed_page = self._download_webpage( 'https://www.rtbf.be/auvio/embed/' + ('direct' if live else 'media'), media_id, query={'id': media_id}) data = self._parse_json(self._html_search_regex( r'data-media="([^"]+)"', embed_page, 'media data'), media_id) error = data.get('error') if error: raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True) provider = data.get('provider') if provider in self._PROVIDERS: return self.url_result(data['url'], self._PROVIDERS[provider]) title = data['title'] is_live = data.get('isLive') if is_live: title = self._live_title(title) height_re = r'-(\d+)p\.' formats = [] m3u8_url = data.get('urlHlsAes128') or data.get('urlHls') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False)) fix_url = lambda x: x.replace('//rtbf-vod.', '//rtbf.') if '/geo/drm/' in x else x http_url = data.get('url') if formats and http_url and re.search(height_re, http_url): http_url = fix_url(http_url) for m3u8_f in formats[:]: height = m3u8_f.get('height') if not height: continue f = m3u8_f.copy() del f['protocol'] f.update({ 'format_id': m3u8_f['format_id'].replace('hls-', 'http-'), 'url': re.sub(height_re, '-%dp.' % height, http_url), }) formats.append(f) else: sources = data.get('sources') or {} for key, format_id in self._QUALITIES: format_url = sources.get(key) if not format_url: continue height = int_or_none(self._search_regex( height_re, format_url, 'height', default=None)) formats.append({ 'format_id': format_id, 'url': fix_url(format_url), 'height': height, }) mpd_url = data.get('urlDash') if not data.get('drm') and mpd_url: formats.extend(self._extract_mpd_formats( mpd_url, media_id, mpd_id='dash', fatal=False)) audio_url = data.get('urlAudio') if audio_url: formats.append({ 'format_id': 'audio', 'url': audio_url, 'vcodec': 'none', }) self._sort_formats(formats) subtitles = {} for track in (data.get('tracks') or {}).values(): sub_url = track.get('url') if not sub_url: continue subtitles.setdefault(track.get('lang') or 'fr', []).append({ 'url': sub_url, }) return { 'id': media_id, 'formats': formats, 'title': title, 'description': strip_or_none(data.get('description')), 'thumbnail': data.get('thumbnail'), 'duration': float_or_none(data.get('realDuration')), 'timestamp': int_or_none(data.get('liveFrom')), 'series': data.get('programLabel'), 'subtitles': subtitles, 'is_live': is_live, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false