repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/kth.py
yt_dlp/extractor/kth.py
from .common import InfoExtractor from ..utils import smuggle_url class KTHIE(InfoExtractor): _VALID_URL = r'https?://play\.kth\.se/(?:[^/]+/)+(?P<id>[a-z0-9_]+)' _TEST = { 'url': 'https://play.kth.se/media/Lunch+breakA+De+nya+aff%C3%A4rerna+inom+Fordonsdalen/0_uoop6oz9', 'md5': 'd83ada6d00ca98b73243a88efe19e8a6', 'info_dict': { 'id': '0_uoop6oz9', 'ext': 'mp4', 'title': 'md5:bd1d6931facb6828762a33e6ce865f37', 'thumbnail': 're:https?://.+/thumbnail/.+', 'duration': 3516, 'timestamp': 1647345358, 'upload_date': '20220315', 'uploader_id': 'md5:0ec23e33a89e795a4512930c8102509f', }, } def _real_extract(self, url): video_id = self._match_id(url) return self.url_result( smuggle_url(f'kaltura:308:{video_id}', { 'service_url': 'https://api.kaltura.nordu.net'}), 'Kaltura')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/appletrailers.py
yt_dlp/extractor/appletrailers.py
import json import re import urllib.parse from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, unified_strdate, ) class AppleTrailersIE(InfoExtractor): IE_NAME = 'appletrailers' _VALID_URL = r'https?://(?:www\.|movie)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)' _TESTS = [{ 'url': 'http://trailers.apple.com/trailers/wb/manofsteel/', 'info_dict': { 'id': '5111', 'title': 'Man of Steel', }, 'playlist': [ { 'md5': 'd97a8e575432dbcb81b7c3acb741f8a8', 'info_dict': { 'id': 'manofsteel-trailer4', 'ext': 'mov', 'duration': 111, 'title': 'Trailer 4', 'upload_date': '20130523', 'uploader_id': 'wb', }, }, { 'md5': 'b8017b7131b721fb4e8d6f49e1df908c', 'info_dict': { 'id': 'manofsteel-trailer3', 'ext': 'mov', 'duration': 182, 'title': 'Trailer 3', 'upload_date': '20130417', 'uploader_id': 'wb', }, }, { 'md5': 'd0f1e1150989b9924679b441f3404d48', 'info_dict': { 'id': 'manofsteel-trailer', 'ext': 'mov', 'duration': 148, 'title': 'Trailer', 'upload_date': '20121212', 'uploader_id': 'wb', }, }, { 'md5': '5fe08795b943eb2e757fa95cb6def1cb', 'info_dict': { 'id': 'manofsteel-teaser', 'ext': 'mov', 'duration': 93, 'title': 'Teaser', 'upload_date': '20120721', 'uploader_id': 'wb', }, }, ], }, { 'url': 'http://trailers.apple.com/trailers/magnolia/blackthorn/', 'info_dict': { 'id': '4489', 'title': 'Blackthorn', }, 'playlist_mincount': 2, 'expected_warnings': ['Unable to download JSON metadata'], }, { # json data only available from http://trailers.apple.com/trailers/feeds/data/15881.json 'url': 'http://trailers.apple.com/trailers/fox/kungfupanda3/', 'info_dict': { 'id': '15881', 'title': 'Kung Fu Panda 3', }, 'playlist_mincount': 4, }, { 'url': 'http://trailers.apple.com/ca/metropole/autrui/', 'only_matching': True, }, { 'url': 'http://movietrailers.apple.com/trailers/focus_features/kuboandthetwostrings/', 'only_matching': True, }] _JSON_RE = r'iTunes.playURL\((.*?)\);' def _real_extract(self, url): mobj = self._match_valid_url(url) movie = mobj.group('movie') uploader_id = mobj.group('company') webpage = self._download_webpage(url, movie) film_id = self._search_regex(r"FilmId\s*=\s*'(\d+)'", webpage, 'film id') film_data = self._download_json( f'http://trailers.apple.com/trailers/feeds/data/{film_id}.json', film_id, fatal=False) if film_data: entries = [] for clip in film_data.get('clips', []): clip_title = clip['title'] formats = [] for version, version_data in clip.get('versions', {}).items(): for size, size_data in version_data.get('sizes', {}).items(): src = size_data.get('src') if not src: continue formats.append({ 'format_id': f'{version}-{size}', 'url': re.sub(r'_(\d+p\.mov)', r'_h\1', src), 'width': int_or_none(size_data.get('width')), 'height': int_or_none(size_data.get('height')), 'language': version[:2], }) entries.append({ 'id': movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', clip_title).lower(), 'formats': formats, 'title': clip_title, 'thumbnail': clip.get('screen') or clip.get('thumb'), 'duration': parse_duration(clip.get('runtime') or clip.get('faded')), 'upload_date': unified_strdate(clip.get('posted')), 'uploader_id': uploader_id, }) page_data = film_data.get('page', {}) return self.playlist_result(entries, film_id, page_data.get('movie_title')) playlist_url = urllib.parse.urljoin(url, 'includes/playlists/itunes.inc') def fix_html(s): s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s) s = re.sub(r'<img ([^<]*?)/?>', r'<img \1/>', s) # The ' in the onClick attributes are not escaped, it couldn't be parsed # like: http://trailers.apple.com/trailers/wb/gravity/ def _clean_json(m): return 'iTunes.playURL({});'.format(m.group(1).replace('\'', '&#39;')) s = re.sub(self._JSON_RE, _clean_json, s) return f'<html>{s}</html>' doc = self._download_xml(playlist_url, movie, transform_source=fix_html) playlist = [] for li in doc.findall('./div/ul/li'): on_click = li.find('.//a').attrib['onClick'] trailer_info_json = self._search_regex(self._JSON_RE, on_click, 'trailer info') trailer_info = json.loads(trailer_info_json) first_url = trailer_info.get('url') if not first_url: continue title = trailer_info['title'] video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower() thumbnail = li.find('.//img').attrib['src'] upload_date = trailer_info['posted'].replace('-', '') runtime = trailer_info['runtime'] m = re.search(r'(?P<minutes>[0-9]+):(?P<seconds>[0-9]{1,2})', runtime) duration = None if m: duration = 60 * int(m.group('minutes')) + int(m.group('seconds')) trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower() settings_json_url = urllib.parse.urljoin(url, f'includes/settings/{trailer_id}.json') settings = self._download_json(settings_json_url, trailer_id, 'Downloading settings json') formats = [] for fmt in settings['metadata']['sizes']: # The src is a file pointing to the real video file format_url = re.sub(r'_(\d*p\.mov)', r'_h\1', fmt['src']) formats.append({ 'url': format_url, 'format': fmt['type'], 'width': int_or_none(fmt['width']), 'height': int_or_none(fmt['height']), }) playlist.append({ '_type': 'video', 'id': video_id, 'formats': formats, 'title': title, 'duration': duration, 'thumbnail': thumbnail, 'upload_date': upload_date, 'uploader_id': uploader_id, 'http_headers': { 'User-Agent': 'QuickTime compatible (yt-dlp)', }, }) return { '_type': 'playlist', 'id': movie, 'entries': playlist, } class AppleTrailersSectionIE(InfoExtractor): IE_NAME = 'appletrailers:section' _SECTIONS = { 'justadded': { 'feed_path': 'just_added', 'title': 'Just Added', }, 'exclusive': { 'feed_path': 'exclusive', 'title': 'Exclusive', }, 'justhd': { 'feed_path': 'just_hd', 'title': 'Just HD', }, 'mostpopular': { 'feed_path': 'most_pop', 'title': 'Most Popular', }, 'moviestudios': { 'feed_path': 'studios', 'title': 'Movie Studios', }, } _VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/#section=(?P<id>{})'.format('|'.join(_SECTIONS)) _TESTS = [{ 'url': 'http://trailers.apple.com/#section=justadded', 'info_dict': { 'title': 'Just Added', 'id': 'justadded', }, 'playlist_mincount': 80, }, { 'url': 'http://trailers.apple.com/#section=exclusive', 'info_dict': { 'title': 'Exclusive', 'id': 'exclusive', }, 'playlist_mincount': 80, }, { 'url': 'http://trailers.apple.com/#section=justhd', 'info_dict': { 'title': 'Just HD', 'id': 'justhd', }, 'playlist_mincount': 80, }, { 'url': 'http://trailers.apple.com/#section=mostpopular', 'info_dict': { 'title': 'Most Popular', 'id': 'mostpopular', }, 'playlist_mincount': 30, }, { 'url': 'http://trailers.apple.com/#section=moviestudios', 'info_dict': { 'title': 'Movie Studios', 'id': 'moviestudios', }, 'playlist_mincount': 80, }] def _real_extract(self, url): section = self._match_id(url) section_data = self._download_json( 'http://trailers.apple.com/trailers/home/feeds/{}.json'.format(self._SECTIONS[section]['feed_path']), section) entries = [ self.url_result('http://trailers.apple.com' + e['location']) for e in section_data] return self.playlist_result(entries, section, self._SECTIONS[section]['title'])
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tva.py
yt_dlp/extractor/tva.py
import re from .brightcove import BrightcoveNewIE from .common import InfoExtractor from ..utils import float_or_none, int_or_none, smuggle_url, strip_or_none from ..utils.traversal import traverse_obj class TVAIE(InfoExtractor): IE_NAME = 'tvaplus' IE_DESC = 'TVA+' _VALID_URL = r'https?://(?:www\.)?tvaplus\.ca/(?:[^/?#]+/)*[\w-]+-(?P<id>\d+)(?:$|[#?])' _TESTS = [{ 'url': 'https://www.tvaplus.ca/tva/alerte-amber/saison-1/episode-01-1000036619', 'md5': '949490fd0e7aee11d0543777611fbd53', 'info_dict': { 'id': '6084352463001', 'ext': 'mp4', 'title': 'Mon dernier jour', 'uploader_id': '5481942443001', 'upload_date': '20190907', 'timestamp': 1567899756, 'description': 'md5:9c0d7fbb90939420c651fd977df90145', 'thumbnail': r're:https://.+\.jpg', 'episode': 'Mon dernier jour', 'episode_number': 1, 'tags': ['alerte amber', 'alerte amber saison 1', 'surdemande'], 'duration': 2625.963, 'season': 'Season 1', 'season_number': 1, 'series': 'Alerte Amber', 'channel': 'TVA', }, }, { 'url': 'https://www.tvaplus.ca/tva/le-baiser-du-barbu/le-baiser-du-barbu-886644190', 'info_dict': { 'id': '6354448043112', 'ext': 'mp4', 'title': 'Le Baiser du barbu', 'uploader_id': '5481942443001', 'upload_date': '20240606', 'timestamp': 1717694023, 'description': 'md5:025b1219086c1cbf4bc27e4e034e8b57', 'thumbnail': r're:https://.+\.jpg', 'episode': 'Le Baiser du barbu', 'tags': ['fullepisode', 'films'], 'duration': 6053.504, 'series': 'Le Baiser du barbu', 'channel': 'TVA', }, }] _BC_URL_TMPL = 'https://players.brightcove.net/5481942443001/default_default/index.html?videoId={}' def _real_extract(self, url): entity_id = self._match_id(url) webpage = self._download_webpage(url, entity_id) entity = self._search_nextjs_data(webpage, entity_id)['props']['pageProps']['staticEntity'] video_id = entity['videoId'] episode = strip_or_none(entity.get('name')) return { '_type': 'url_transparent', 'url': smuggle_url(self._BC_URL_TMPL.format(video_id), {'geo_countries': ['CA']}), 'ie_key': BrightcoveNewIE.ie_key(), 'id': video_id, 'title': episode, 'episode': episode, **traverse_obj(entity, { 'description': ('longDescription', {str}), 'duration': ('durationMillis', {float_or_none(scale=1000)}), 'channel': ('knownEntities', 'channel', 'name', {str}), 'series': ('knownEntities', 'videoShow', 'name', {str}), 'season_number': ('slug', {lambda x: re.search(r'/s(?:ai|ea)son-(\d+)/', x)}, 1, {int_or_none}), 'episode_number': ('episodeNumber', {int_or_none}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/elonet.py
yt_dlp/extractor/elonet.py
from .common import InfoExtractor from ..utils import determine_ext class ElonetIE(InfoExtractor): _VALID_URL = r'https?://elonet\.finna\.fi/Record/kavi\.elonet_elokuva_(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://elonet.finna.fi/Record/kavi.elonet_elokuva_107867', 'info_dict': { 'id': '107867', 'ext': 'mp4', 'title': 'Valkoinen peura', 'thumbnail': r're:^https?://elonet\.finna\.fi/Cover/Show\?id=kavi\.elonet_elokuva_107867.+', 'description': 'md5:bded4201c9677fab10854884fe8f7312', }, 'params': {'skip_download': 'dash'}, }, { # DASH with subtitles 'url': 'https://elonet.finna.fi/Record/kavi.elonet_elokuva_116539', 'info_dict': { 'id': '116539', 'ext': 'mp4', 'title': 'Minulla on tiikeri', 'thumbnail': r're:^https?://elonet\.finna\.fi/Cover/Show\?id=kavi\.elonet_elokuva_116539.+', 'description': 'md5:5ab72b3fe76d3414e46cc8f277104419', }, 'params': {'skip_download': 'dash'}, }, { # Page with multiple videos, download the main one 'url': 'https://elonet.finna.fi/Record/kavi.elonet_elokuva_117396', 'info_dict': { 'id': '117396', 'ext': 'mp4', 'title': 'Sampo', 'thumbnail': r're:^https?://elonet\.finna\.fi/Cover/Show\?id=kavi\.elonet_elokuva_117396.+', 'description': 'md5:ec69572a5b054d0ecafe8086b1fa96f7', }, 'params': {'skip_download': 'dash'}, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) src = self._parse_json(self._html_search_regex( r'id=\'video-data\'[^>]+data-video-sources="([^"]+)"', webpage, 'json'), video_id)[0]['src'] ext = determine_ext(src) if ext == 'm3u8': formats, subtitles = self._extract_m3u8_formats_and_subtitles(src, video_id, fatal=False) elif ext == 'mpd': formats, subtitles = self._extract_mpd_formats_and_subtitles(src, video_id, fatal=False) else: formats, subtitles = [], {} self.raise_no_formats(f'Unknown streaming format {ext}') return { 'id': video_id, 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/startv.py
yt_dlp/extractor/startv.py
from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, int_or_none, traverse_obj, ) class StarTVIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?:www\.)?startv\.com\.tr/ (?: (?:dizi|program)/(?:[^/?#&]+)/(?:bolumler|fragmanlar|ekstralar)| video/arsiv/(?:dizi|program)/(?:[^/?#&]+) )/ (?P<id>[^/?#&]+) ''' IE_NAME = 'startv' _TESTS = [ { 'url': 'https://www.startv.com.tr/dizi/cocuk/bolumler/3-bolum', 'md5': '72381a32bcc2e2eb5841e8c8bf68f127', 'info_dict': { 'id': '904972', 'display_id': '3-bolum', 'ext': 'mp4', 'title': '3. Bölüm', 'description': 'md5:3a8049f05a75c2e8747116a673275de4', 'thumbnail': r're:^https?://.*\.jpg(?:\?.*?)?$', 'timestamp': 1569281400, 'upload_date': '20190923', }, }, { 'url': 'https://www.startv.com.tr/video/arsiv/dizi/avlu/44-bolum', 'only_matching': True, }, { 'url': 'https://www.startv.com.tr/dizi/cocuk/fragmanlar/5-bolum-fragmani', 'only_matching': True, }, { 'url': 'https://www.startv.com.tr/dizi/cocuk/ekstralar/5-bolumun-nefes-kesen-final-sahnesi', 'only_matching': True, }, { 'url': 'https://www.startv.com.tr/program/burcu-ile-haftasonu/bolumler/1-bolum', 'only_matching': True, }, { 'url': 'https://www.startv.com.tr/program/burcu-ile-haftasonu/fragmanlar/2-fragman', 'only_matching': True, }, { 'url': 'https://www.startv.com.tr/video/arsiv/program/buyukrisk/14-bolumde-hangi-unlu-ne-sordu-', 'only_matching': True, }, { 'url': 'https://www.startv.com.tr/video/arsiv/program/buyukrisk/buyuk-risk-334-bolum', 'only_matching': True, }, { 'url': 'https://www.startv.com.tr/video/arsiv/program/dada/dada-58-bolum', 'only_matching': True, }, ] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) info_url = self._search_regex( r'(["\'])videoUrl\1\s*:\s*\1(?P<url>(?:(?!\1).)+)\1\s*', webpage, 'video info url', group='url') info = traverse_obj(self._download_json(info_url, display_id), 'data', expected_type=dict) if not info: raise ExtractorError('Failed to extract API data') video_id = str(info.get('id')) title = info.get('title') or self._og_search_title(webpage) description = clean_html(info.get('description')) or self._og_search_description(webpage, default=None) thumbnail = self._proto_relative_url( self._og_search_thumbnail(webpage), scheme='http:') formats = self._extract_m3u8_formats( traverse_obj(info, ('flavors', 'hls')), video_id, entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': int_or_none(info.get('release_date')), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/godresource.py
yt_dlp/extractor/godresource.py
from .common import InfoExtractor from ..utils import ( ExtractorError, determine_ext, str_or_none, unified_timestamp, url_or_none, ) from ..utils.traversal import traverse_obj class GodResourceIE(InfoExtractor): _VALID_URL = r'https?://new\.godresource\.com/video/(?P<id>\w+)' _TESTS = [{ # hls stream 'url': 'https://new.godresource.com/video/A01mTKjyf6w', 'info_dict': { 'id': 'A01mTKjyf6w', 'ext': 'mp4', 'view_count': int, 'timestamp': 1710978666, 'channel_id': '5', 'thumbnail': 'https://cdn-02.godresource.com/e42968ac-9e8b-4231-ab86-f4f9d775841f/thumbnail.jpg', 'channel': 'Stedfast Baptist Church', 'upload_date': '20240320', 'title': 'GodResource video #A01mTKjyf6w', }, }, { # mp4 link 'url': 'https://new.godresource.com/video/01DXmBbQv_X', 'md5': '0e8f72aa89a106b9d5c011ba6f8717b7', 'info_dict': { 'id': '01DXmBbQv_X', 'ext': 'mp4', 'channel_id': '12', 'view_count': int, 'timestamp': 1687996800, 'thumbnail': 'https://cdn-02.godresource.com/sodomitedeception/thumbnail.jpg', 'channel': 'Documentaries', 'title': 'The Sodomite Deception', 'upload_date': '20230629', }, }] def _real_extract(self, url): display_id = self._match_id(url) api_data = self._download_json( f'https://api.godresource.com/api/Streams/{display_id}', display_id) video_url = api_data['streamUrl'] is_live = api_data.get('isLive') or False if (ext := determine_ext(video_url)) == 'm3u8': formats, subtitles = self._extract_m3u8_formats_and_subtitles( video_url, display_id, live=is_live) elif ext == 'mp4': formats, subtitles = [{ 'url': video_url, 'ext': ext, }], {} else: raise ExtractorError(f'Unexpected video format {ext}') return { 'id': display_id, 'formats': formats, 'subtitles': subtitles, 'title': '', 'is_live': is_live, **traverse_obj(api_data, { 'title': ('title', {str}), 'thumbnail': ('thumbnail', {url_or_none}), 'view_count': ('views', {int}), 'channel': ('channelName', {str}), 'channel_id': ('channelId', {str_or_none}), 'timestamp': ('streamDateCreated', {unified_timestamp}), 'modified_timestamp': ('streamDataModified', {unified_timestamp}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/radiocomercial.py
yt_dlp/extractor/radiocomercial.py
import itertools from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, extract_attributes, get_element_by_class, get_element_html_by_class, get_element_text_and_html_by_tag, get_elements_html_by_class, int_or_none, join_nonempty, try_call, unified_strdate, update_url, urljoin, ) from ..utils.traversal import traverse_obj class RadioComercialIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?radiocomercial\.pt/podcasts/[^/?#]+/t?(?P<season>\d+)/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://radiocomercial.pt/podcasts/o-homem-que-mordeu-o-cao/t6/taylor-swift-entranhando-se-que-nem-uma-espada-no-ventre-dos-fas#page-content-wrapper', 'md5': '5f4fe8e485b29d2e8fd495605bc2c7e4', 'info_dict': { 'id': 'taylor-swift-entranhando-se-que-nem-uma-espada-no-ventre-dos-fas', 'ext': 'mp3', 'title': 'Taylor Swift entranhando-se que nem uma espada no ventre dos fãs.', 'release_date': '20231025', 'thumbnail': r're:https://radiocomercial.pt/upload/[^.]+.jpg', 'season': 'Season 6', 'season_number': 6, }, }, { 'url': 'https://radiocomercial.pt/podcasts/convenca-me-num-minuto/t3/convenca-me-num-minuto-que-os-lobisomens-existem', 'md5': '47e96c273aef96a8eb160cd6cf46d782', 'info_dict': { 'id': 'convenca-me-num-minuto-que-os-lobisomens-existem', 'ext': 'mp3', 'title': 'Convença-me num minuto que os lobisomens existem', 'release_date': '20231026', 'thumbnail': r're:https://radiocomercial.pt/upload/[^.]+.jpg', 'season': 'Season 3', 'season_number': 3, }, }, { 'url': 'https://radiocomercial.pt/podcasts/inacreditavel-by-ines-castel-branco/t2/o-desastre-de-aviao', 'md5': '69be64255420fec23b7259955d771e54', 'info_dict': { 'id': 'o-desastre-de-aviao', 'ext': 'mp3', 'title': 'O desastre de avião', 'description': 'md5:8a82beeb372641614772baab7246245f', 'release_date': '20231101', 'thumbnail': r're:https://radiocomercial.pt/upload/[^.]+.jpg', 'season': 'Season 2', 'season_number': 2, }, 'params': { # inconsistant md5 'skip_download': True, }, }, { 'url': 'https://radiocomercial.pt/podcasts/tnt-todos-no-top/2023/t-n-t-29-de-outubro', 'md5': '91d32d4d4b1407272068b102730fc9fa', 'info_dict': { 'id': 't-n-t-29-de-outubro', 'ext': 'mp3', 'title': 'T.N.T 29 de outubro', 'release_date': '20231029', 'thumbnail': r're:https://radiocomercial.pt/upload/[^.]+.jpg', 'season': 'Season 2023', 'season_number': 2023, }, }] def _real_extract(self, url): video_id, season = self._match_valid_url(url).group('id', 'season') webpage = self._download_webpage(url, video_id) return { 'id': video_id, 'title': self._html_extract_title(webpage), 'description': self._og_search_description(webpage, default=None), 'release_date': unified_strdate(get_element_by_class( 'date', get_element_html_by_class('descriptions', webpage) or '')), 'thumbnail': self._og_search_thumbnail(webpage), 'season_number': int_or_none(season), 'url': extract_attributes(get_element_html_by_class('audiofile', webpage) or '').get('href'), } class RadioComercialPlaylistIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?radiocomercial\.pt/podcasts/(?P<id>[\w-]+)(?:/t?(?P<season>\d+))?/?(?:$|[?#])' _TESTS = [{ 'url': 'https://radiocomercial.pt/podcasts/convenca-me-num-minuto/t3', 'info_dict': { 'id': 'convenca-me-num-minuto_t3', 'title': 'Convença-me num Minuto - Temporada 3', }, 'playlist_mincount': 32, }, { 'url': 'https://radiocomercial.pt/podcasts/o-homem-que-mordeu-o-cao', 'info_dict': { 'id': 'o-homem-que-mordeu-o-cao', 'title': 'O Homem Que Mordeu o Cão', }, 'playlist_mincount': 19, }, { 'url': 'https://radiocomercial.pt/podcasts/as-minhas-coisas-favoritas', 'info_dict': { 'id': 'as-minhas-coisas-favoritas', 'title': 'As Minhas Coisas Favoritas', }, 'playlist_mincount': 131, }, { 'url': 'https://radiocomercial.pt/podcasts/tnt-todos-no-top/t2023', 'info_dict': { 'id': 'tnt-todos-no-top_t2023', 'title': 'TNT - Todos No Top - Temporada 2023', }, 'playlist_mincount': 39, }] def _entries(self, url, playlist_id): for page in itertools.count(1): try: webpage = self._download_webpage( f'{url}/{page}', playlist_id, f'Downloading page {page}') except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 404: break raise episodes = get_elements_html_by_class('tm-ouvir-podcast', webpage) if not episodes: break for url_path in traverse_obj(episodes, (..., {extract_attributes}, 'href')): episode_url = urljoin(url, url_path) if RadioComercialIE.suitable(episode_url): yield episode_url def _real_extract(self, url): podcast, season = self._match_valid_url(url).group('id', 'season') playlist_id = join_nonempty(podcast, season, delim='_t') url = update_url(url, query=None, fragment=None) webpage = self._download_webpage(url, playlist_id) name = try_call(lambda: get_element_text_and_html_by_tag('h1', webpage)[0]) title = name if name == season else join_nonempty(name, season, delim=' - Temporada ') return self.playlist_from_matches( self._entries(url, playlist_id), playlist_id, title, ie=RadioComercialIE)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dropbox.py
yt_dlp/extractor/dropbox.py
import base64 import os.path import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, update_url, update_url_query, url_basename, urlencode_postdata, ) class DropboxIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?dropbox\.com/(?:(?:e/)?scl/f[io]|sh?)/(?P<id>\w+)' _TESTS = [ { 'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0', 'info_dict': { 'id': 'nelirfsxnmcfbfh', 'ext': 'mp4', 'title': 'youtube-dl test video \'ä"BaW_jenozKc', }, }, { 'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh', 'only_matching': True, }, { 'url': 'https://www.dropbox.com/sh/2mgpiuq7kv8nqdf/AABy-fW4dkydT4GmWi2mdOUDa?dl=0&preview=Drone+Shot.mp4', 'only_matching': True, }, { 'url': 'https://www.dropbox.com/scl/fi/r2kd2skcy5ylbbta5y1pz/DJI_0003.MP4?dl=0&rlkey=wcdgqangn7t3lnmmv6li9mu9h', 'only_matching': True, }, { 'url': 'https://www.dropbox.com/e/scl/fi/r2kd2skcy5ylbbta5y1pz/DJI_0003.MP4?dl=0&rlkey=wcdgqangn7t3lnmmv6li9mu9h', 'only_matching': True, }, { 'url': 'https://www.dropbox.com/scl/fo/zjfqse5txqfd7twa8iewj/AOfZzSYWUSKle2HD7XF7kzQ/A-BEAT%20C.mp4?rlkey=6tg3jkp4tv6a5vt58a6dag0mm&dl=0', 'only_matching': True, }, ] def _yield_decoded_parts(self, webpage): for encoded in reversed(re.findall(r'registerStreamedPrefetch\s*\(\s*"[\w/+=]+"\s*,\s*"([\w/+=]+)"', webpage)): yield base64.b64decode(encoded).decode('utf-8', 'ignore') def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) fn = urllib.parse.unquote(url_basename(url)) title = os.path.splitext(fn)[0] content_id = None for part in self._yield_decoded_parts(webpage): if '/sm/password' in part: content_id = self._search_regex(r'content_id=([\w.+=/-]+)', part, 'content ID') break if content_id: password = self.get_param('videopassword') if not password: raise ExtractorError('Password protected video, use --video-password <password>', expected=True) response = self._download_json( 'https://www.dropbox.com/sm/auth', video_id, 'POSTing video password', data=urlencode_postdata({ 'is_xhr': 'true', 't': self._get_cookies('https://www.dropbox.com')['t'].value, 'content_id': content_id, 'password': password, 'url': update_url(url, scheme='', netloc=''), })) if response.get('status') != 'authed': raise ExtractorError('Invalid password', expected=True) webpage = self._download_webpage(url, video_id) formats, subtitles = [], {} has_anonymous_download = False thumbnail = None for part in self._yield_decoded_parts(webpage): if not has_anonymous_download: has_anonymous_download = self._search_regex( r'(anonymous:\tanonymous)', part, 'anonymous', default=False) transcode_url = self._search_regex( r'\n.?(https://[^\x03\x08\x12\n]+\.m3u8)', part, 'transcode url', default=None) if not transcode_url: continue formats, subtitles = self._extract_m3u8_formats_and_subtitles(transcode_url, video_id, 'mp4') thumbnail = self._search_regex( r'(https://www\.dropbox\.com/temp_thumb_from_token/[\w/?&=]+)', part, 'thumbnail', default=None) break # downloads enabled we can get the original file if has_anonymous_download: formats.append({ 'url': update_url_query(url, {'dl': '1'}), 'format_id': 'original', 'format_note': 'Original', 'quality': 1, }) return { 'id': video_id, 'title': title, 'formats': formats, 'subtitles': subtitles, 'thumbnail': thumbnail, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/younow.py
yt_dlp/extractor/younow.py
import itertools from .common import InfoExtractor from ..utils import ( ExtractorError, format_field, int_or_none, str_or_none, try_get, ) CDN_API_BASE = 'https://cdn.younow.com/php/api' MOMENT_URL_FORMAT = f'{CDN_API_BASE}/moment/fetch/id=%s' class YouNowLiveIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?younow\.com/(?P<id>[^/?#&]+)' _TEST = { 'url': 'https://www.younow.com/AmandaPadeezy', 'info_dict': { 'id': 'AmandaPadeezy', 'ext': 'mp4', 'is_live': True, 'title': 'March 26, 2017', 'thumbnail': r're:^https?://.*\.jpg$', 'tags': ['girls'], 'categories': ['girls'], 'uploader': 'AmandaPadeezy', 'uploader_id': '6716501', 'uploader_url': 'https://www.younow.com/AmandaPadeezy', 'creator': 'AmandaPadeezy', }, 'skip': True, } @classmethod def suitable(cls, url): return (False if YouNowChannelIE.suitable(url) or YouNowMomentIE.suitable(url) else super().suitable(url)) def _real_extract(self, url): username = self._match_id(url) data = self._download_json( f'https://api.younow.com/php/api/broadcast/info/curId=0/user={username}', username) if data.get('errorCode') != 0: raise ExtractorError(data['errorMsg'], expected=True) uploader = try_get( data, lambda x: x['user']['profileUrlString'], str) or username return { 'id': uploader, 'is_live': True, 'title': uploader, 'thumbnail': data.get('awsUrl'), 'tags': data.get('tags'), 'categories': data.get('tags'), 'uploader': uploader, 'uploader_id': data.get('userId'), 'uploader_url': f'https://www.younow.com/{username}', 'creator': uploader, 'view_count': int_or_none(data.get('viewers')), 'like_count': int_or_none(data.get('likes')), 'formats': [{ 'url': '{}/broadcast/videoPath/hls=1/broadcastId={}/channelId={}'.format(CDN_API_BASE, data['broadcastId'], data['userId']), 'ext': 'mp4', 'protocol': 'm3u8', }], } def _extract_moment(item, fatal=True): moment_id = item.get('momentId') if not moment_id: if not fatal: return raise ExtractorError('Unable to extract moment id') moment_id = str(moment_id) title = item.get('text') if not title: title = 'YouNow %s' % ( item.get('momentType') or item.get('titleType') or 'moment') uploader = try_get(item, lambda x: x['owner']['name'], str) uploader_id = try_get(item, lambda x: x['owner']['userId']) uploader_url = format_field(uploader, None, 'https://www.younow.com/%s') return { 'extractor_key': 'YouNowMoment', 'id': moment_id, 'title': title, 'view_count': int_or_none(item.get('views')), 'like_count': int_or_none(item.get('likes')), 'timestamp': int_or_none(item.get('created')), 'creator': uploader, 'uploader': uploader, 'uploader_id': str_or_none(uploader_id), 'uploader_url': uploader_url, 'formats': [{ 'url': f'https://hls.younow.com/momentsplaylists/live/{moment_id}/{moment_id}.m3u8', 'ext': 'mp4', 'protocol': 'm3u8_native', }], } class YouNowChannelIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?younow\.com/(?P<id>[^/]+)/channel' _TEST = { 'url': 'https://www.younow.com/its_Kateee_/channel', 'info_dict': { 'id': '14629760', 'title': 'its_Kateee_ moments', }, 'playlist_mincount': 8, } def _entries(self, username, channel_id): created_before = 0 for page_num in itertools.count(1): if created_before is None: break info = self._download_json( f'{CDN_API_BASE}/moment/profile/channelId={channel_id}/createdBefore={created_before}/records=20', username, note=f'Downloading moments page {page_num}') items = info.get('items') if not items or not isinstance(items, list): break for item in items: if not isinstance(item, dict): continue item_type = item.get('type') if item_type == 'moment': entry = _extract_moment(item, fatal=False) if entry: yield entry elif item_type == 'collection': moments = item.get('momentsIds') if isinstance(moments, list): for moment_id in moments: m = self._download_json( MOMENT_URL_FORMAT % moment_id, username, note=f'Downloading {moment_id} moment JSON', fatal=False) if m and isinstance(m, dict) and m.get('item'): entry = _extract_moment(m['item']) if entry: yield entry created_before = int_or_none(item.get('created')) def _real_extract(self, url): username = self._match_id(url) channel_id = str(self._download_json( f'https://api.younow.com/php/api/broadcast/info/curId=0/user={username}', username, note='Downloading user information')['userId']) return self.playlist_result( self._entries(username, channel_id), channel_id, f'{username} moments') class YouNowMomentIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?younow\.com/[^/]+/(?P<id>[^/?#&]+)' _TEST = { 'url': 'https://www.younow.com/GABO.../20712117/36319236/3b316doc/m', 'md5': 'a30c70eadb9fb39a1aa3c8c0d22a0807', 'info_dict': { 'id': '20712117', 'ext': 'mp4', 'title': 'YouNow capture', 'view_count': int, 'like_count': int, 'timestamp': 1490432040, 'upload_date': '20170325', 'uploader': 'GABO...', 'uploader_id': '35917228', }, } @classmethod def suitable(cls, url): return (False if YouNowChannelIE.suitable(url) else super().suitable(url)) def _real_extract(self, url): video_id = self._match_id(url) item = self._download_json(MOMENT_URL_FORMAT % video_id, video_id) return _extract_moment(item['item'])
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/epidemicsound.py
yt_dlp/extractor/epidemicsound.py
from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, join_nonempty, orderedSet, parse_iso8601, parse_qs, parse_resolution, str_or_none, traverse_obj, url_or_none, ) class EpidemicSoundIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?epidemicsound\.com/(?:(?P<sfx>sound-effects/tracks)|track)/(?P<id>[0-9a-zA-Z-]+)' _TESTS = [{ 'url': 'https://www.epidemicsound.com/track/yFfQVRpSPz/', 'md5': 'd98ff2ddb49e8acab9716541cbc9dfac', 'info_dict': { 'id': '45014', 'display_id': 'yFfQVRpSPz', 'ext': 'mp3', 'title': 'Door Knock Door 1', 'alt_title': 'Door Knock Door 1', 'tags': ['foley', 'door', 'knock', 'glass', 'window', 'glass door knock'], 'categories': ['Misc. Door'], 'duration': 1, 'thumbnail': 'https://cdn.epidemicsound.com/curation-assets/commercial-release-cover-images/default-sfx/3000x3000.jpg', 'timestamp': 1415320353, 'upload_date': '20141107', }, }, { 'url': 'https://www.epidemicsound.com/track/mj8GTTwsZd/', 'md5': 'c82b745890f9baf18dc2f8d568ee3830', 'info_dict': { 'id': '148700', 'display_id': 'mj8GTTwsZd', 'ext': 'mp3', 'title': 'Noplace', 'tags': ['liquid drum n bass', 'energetic'], 'categories': ['drum and bass'], 'duration': 237, 'timestamp': 1694426482, 'thumbnail': 'https://cdn.epidemicsound.com/curation-assets/commercial-release-cover-images/11138/3000x3000.jpg', 'upload_date': '20230911', 'release_timestamp': 1700535606, 'release_date': '20231121', }, }, { 'url': 'https://www.epidemicsound.com/sound-effects/tracks/2f02f54b-9faa-4daf-abac-1cfe9e9cef69/', 'md5': '35d7cf05bd8b614a84f0495a05de9388', 'info_dict': { 'id': '208931', 'ext': 'mp3', 'upload_date': '20240603', 'timestamp': 1717436529, 'categories': ['appliance'], 'display_id': '6b2NXLURPr', 'duration': 1.0, 'title': 'Oven, Grill, Door Open 01', 'thumbnail': 'https://cdn.epidemicsound.com/curation-assets/commercial-release-cover-images/default-sfx/3000x3000.jpg', }, }] @staticmethod def _epidemic_parse_thumbnail(url: str): if not url_or_none(url): return None return { 'url': url, **(traverse_obj(url, ({parse_qs}, { 'width': ('width', 0, {int_or_none}), 'height': ('height', 0, {int_or_none}), })) or parse_resolution(url)), } @staticmethod def _epidemic_fmt_or_none(f): if not f.get('format'): f['format'] = f.get('format_id') elif not f.get('format_id'): f['format_id'] = f['format'] if not f['url'] or not f['format']: return None if f.get('format_note'): f['format_note'] = f'track ID {f["format_note"]}' if f['format'] != 'full': f['preference'] = -2 return f def _real_extract(self, url): video_id, is_sfx = self._match_valid_url(url).group('id', 'sfx') json_data = self._download_json(join_nonempty( 'https://www.epidemicsound.com/json/track', is_sfx and 'kosmos-id', video_id, delim='/'), video_id) thumbnails = traverse_obj(json_data, [('imageUrl', 'cover')]) thumb_base_url = traverse_obj(json_data, ('coverArt', 'baseUrl', {url_or_none})) if thumb_base_url: thumbnails.extend(traverse_obj(json_data, ( 'coverArt', 'sizes', ..., {thumb_base_url.__add__}))) return traverse_obj(json_data, { 'id': ('id', {str_or_none}), 'display_id': ('publicSlug', {str}), 'title': ('title', {str}), 'alt_title': ('oldTitle', {str}), 'duration': ('length', {float_or_none}), 'timestamp': ('added', {parse_iso8601}), 'release_timestamp': ('releaseDate', {parse_iso8601}), 'categories': ('genres', ..., 'tag', {str}), 'tags': ('metadataTags', ..., {str}), 'age_limit': ('isExplicit', {lambda b: 18 if b else None}), 'thumbnails': ({lambda _: thumbnails}, {orderedSet}, ..., {self._epidemic_parse_thumbnail}), 'formats': ('stems', {dict.items}, ..., { 'format': (0, {str_or_none}), 'format_note': (1, 's3TrackId', {str_or_none}), 'format_id': (1, 'stemType', {str}), 'url': (1, 'lqMp3Url', {url_or_none}), }, {self._epidemic_fmt_or_none}), })
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lovehomeporn.py
yt_dlp/extractor/lovehomeporn.py
from .nuevo import NuevoBaseIE class LoveHomePornIE(NuevoBaseIE): _VALID_URL = r'https?://(?:www\.)?lovehomeporn\.com/video/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?' _TEST = { 'url': 'http://lovehomeporn.com/video/48483/stunning-busty-brunette-girlfriend-sucking-and-riding-a-big-dick#menu', 'info_dict': { 'id': '48483', 'display_id': 'stunning-busty-brunette-girlfriend-sucking-and-riding-a-big-dick', 'ext': 'mp4', 'title': 'Stunning busty brunette girlfriend sucking and riding a big dick', 'age_limit': 18, 'duration': 238.47, }, 'params': { 'skip_download': True, }, } def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') display_id = mobj.group('display_id') info = self._extract_nuevo( f'http://lovehomeporn.com/media/nuevo/config.php?key={video_id}', video_id) info.update({ 'display_id': display_id, 'age_limit': 18, }) return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/jeuxvideo.py
yt_dlp/extractor/jeuxvideo.py
from .common import InfoExtractor class JeuxVideoIE(InfoExtractor): _WORKING = False _ENABLED = None # XXX: pass through to GenericIE _VALID_URL = r'https?://.*?\.jeuxvideo\.com/.*/(.*?)\.htm' _TESTS = [{ 'url': 'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm', 'md5': '046e491afb32a8aaac1f44dd4ddd54ee', 'info_dict': { 'id': '114765', 'ext': 'mp4', 'title': 'Tearaway : GC 2013 : Tearaway nous présente ses papiers d\'identité', 'description': 'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.', }, }, { 'url': 'http://www.jeuxvideo.com/videos/chroniques/434220/l-histoire-du-jeu-video-la-saturn.htm', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) title = mobj.group(1) webpage = self._download_webpage(url, title) title = self._html_search_meta('name', webpage) or self._og_search_title(webpage) config_url = self._html_search_regex( r'data-src(?:set-video)?="(/contenu/medias/video\.php.*?)"', webpage, 'config URL') config_url = 'http://www.jeuxvideo.com' + config_url video_id = self._search_regex( r'id=(\d+)', config_url, 'video ID') config = self._download_json( config_url, title, 'Downloading JSON config') formats = [{ 'url': source['file'], 'format_id': source['label'], 'resolution': source['label'], } for source in reversed(config['sources'])] return { 'id': video_id, 'title': title, 'formats': formats, 'description': self._og_search_description(webpage), 'thumbnail': config.get('image'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/reuters.py
yt_dlp/extractor/reuters.py
import re from .common import InfoExtractor from ..utils import ( int_or_none, js_to_json, unescapeHTML, ) class ReutersIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?reuters\.com/.*?\?.*?videoId=(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.reuters.com/video/2016/05/20/san-francisco-police-chief-resigns?videoId=368575562', 'md5': '8015113643a0b12838f160b0b81cc2ee', 'info_dict': { 'id': '368575562', 'ext': 'mp4', 'title': 'San Francisco police chief resigns', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( f'http://www.reuters.com/assets/iframe/yovideo?videoId={video_id}', video_id) video_data = js_to_json(self._search_regex( r'(?s)Reuters\.yovideo\.drawPlayer\(({.*?})\);', webpage, 'video data')) def get_json_value(key, fatal=False): return self._search_regex(rf'"{key}"\s*:\s*"([^"]+)"', video_data, key, fatal=fatal) title = unescapeHTML(get_json_value('title', fatal=True)) mmid, fid = re.search(r',/(\d+)\?f=(\d+)', get_json_value('flv', fatal=True)).groups() mas_data = self._download_json( f'http://mas-e.cds1.yospace.com/mas/{mmid}/{fid}?trans=json', video_id, transform_source=js_to_json) formats = [] for f in mas_data: f_url = f.get('url') if not f_url: continue method = f.get('method') if method == 'hls': formats.extend(self._extract_m3u8_formats( f_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: container = f.get('container') ext = '3gp' if method == 'mobile' else container formats.append({ 'format_id': ext, 'url': f_url, 'ext': ext, 'container': container if method != 'mobile' else None, }) return { 'id': video_id, 'title': title, 'thumbnail': get_json_value('thumb'), 'duration': int_or_none(get_json_value('seconds')), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dlf.py
yt_dlp/extractor/dlf.py
import re from .common import InfoExtractor from ..utils import ( determine_ext, extract_attributes, int_or_none, traverse_obj, url_or_none, ) class DLFBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:www\.)?deutschlandfunk\.de/' _BUTTON_REGEX = r'(<button[^>]+alt="Anhören"[^>]+data-audio-diraid[^>]*>)' def _parse_button_attrs(self, button, audio_id=None): attrs = extract_attributes(button) audio_id = audio_id or attrs['data-audio-diraid'] url = traverse_obj( attrs, 'data-audio-download-src', 'data-audio', 'data-audioreference', 'data-audio-src', expected_type=url_or_none) ext = determine_ext(url) return { 'id': audio_id, 'extractor_key': DLFIE.ie_key(), 'extractor': DLFIE.IE_NAME, **traverse_obj(attrs, { 'title': (('data-audiotitle', 'data-audio-title', 'data-audio-download-tracking-title'), {str}), 'duration': (('data-audioduration', 'data-audio-duration'), {int_or_none}), 'thumbnail': ('data-audioimage', {url_or_none}), 'uploader': 'data-audio-producer', 'series': 'data-audio-series', 'channel': 'data-audio-origin-site-name', 'webpage_url': ('data-audio-download-tracking-path', {url_or_none}), }, get_all=False), 'formats': (self._extract_m3u8_formats(url, audio_id, fatal=False) if ext == 'm3u8' else [{'url': url, 'ext': ext, 'vcodec': 'none'}]), } class DLFIE(DLFBaseIE): IE_NAME = 'dlf' _VALID_URL = DLFBaseIE._VALID_URL_BASE + r'[\w-]+-dlf-(?P<id>[\da-f]{8})-100\.html' _TESTS = [ # Audio as an HLS stream { 'url': 'https://www.deutschlandfunk.de/tanz-der-saiteninstrumente-das-wild-strings-trio-aus-slowenien-dlf-03a3eb19-100.html', 'info_dict': { 'id': '03a3eb19', 'title': r're:Tanz der Saiteninstrumente [-/] Das Wild Strings Trio aus Slowenien', 'ext': 'm4a', 'duration': 3298, 'thumbnail': 'https://assets.deutschlandfunk.de/FALLBACK-IMAGE-AUDIO/512x512.png?t=1603714364673', 'uploader': 'Deutschlandfunk', 'series': 'On Stage', 'channel': 'deutschlandfunk', }, 'params': { 'skip_download': 'm3u8', }, 'skip': 'This webpage no longer exists', }, { 'url': 'https://www.deutschlandfunk.de/russische-athleten-kehren-zurueck-auf-die-sportbuehne-ein-gefaehrlicher-tueroeffner-dlf-d9cc1856-100.html', 'info_dict': { 'id': 'd9cc1856', 'title': 'Russische Athleten kehren zurück auf die Sportbühne: Ein gefährlicher Türöffner', 'ext': 'mp3', 'duration': 291, 'thumbnail': 'https://assets.deutschlandfunk.de/FALLBACK-IMAGE-AUDIO/512x512.png?t=1603714364673', 'uploader': 'Deutschlandfunk', 'series': 'Kommentare und Themen der Woche', 'channel': 'deutschlandfunk', }, }, ] def _real_extract(self, url): audio_id = self._match_id(url) webpage = self._download_webpage(url, audio_id) return self._parse_button_attrs( self._search_regex(self._BUTTON_REGEX, webpage, 'button'), audio_id) class DLFCorpusIE(DLFBaseIE): IE_NAME = 'dlf:corpus' IE_DESC = 'DLF Multi-feed Archives' _VALID_URL = DLFBaseIE._VALID_URL_BASE + r'(?P<id>(?![\w-]+-dlf-[\da-f]{8})[\w-]+-\d+)\.html' _TESTS = [ # Recorded news broadcast with referrals to related broadcasts { 'url': 'https://www.deutschlandfunk.de/fechten-russland-belarus-ukraine-protest-100.html', 'info_dict': { 'id': 'fechten-russland-belarus-ukraine-protest-100', 'title': r're:Wiederzulassung als neutrale Athleten [-/] Was die Rückkehr russischer und belarussischer Sportler beim Fechten bedeutet', 'description': 'md5:91340aab29c71aa7518ad5be13d1e8ad', }, 'playlist_mincount': 5, 'playlist': [{ 'info_dict': { 'id': '1fc5d64a', 'title': r're:Wiederzulassung als neutrale Athleten [-/] Was die Rückkehr russischer und belarussischer Sportler beim Fechten bedeutet', 'ext': 'mp3', 'duration': 252, 'thumbnail': 'https://assets.deutschlandfunk.de/aad16241-6b76-4a09-958b-96d0ee1d6f57/512x512.jpg?t=1679480020313', 'uploader': 'Deutschlandfunk', 'series': 'Sport', 'channel': 'deutschlandfunk', }, }, { 'info_dict': { 'id': '2ada145f', 'title': r're:(?:Sportpolitik / )?Fechtverband votiert für Rückkehr russischer Athleten', 'ext': 'mp3', 'duration': 336, 'thumbnail': 'https://assets.deutschlandfunk.de/FILE_93982766f7317df30409b8a184ac044a/512x512.jpg?t=1678547581005', 'uploader': 'Deutschlandfunk', 'series': 'Deutschlandfunk Nova', 'channel': 'deutschlandfunk-nova', }, }, { 'info_dict': { 'id': '5e55e8c9', 'title': r're:Wiederzulassung von Russland und Belarus [-/] "Herumlavieren" des Fechter-Bundes sorgt für Unverständnis', 'ext': 'mp3', 'duration': 187, 'thumbnail': 'https://assets.deutschlandfunk.de/a595989d-1ed1-4a2e-8370-b64d7f11d757/512x512.jpg?t=1679173825412', 'uploader': 'Deutschlandfunk', 'series': 'Sport am Samstag', 'channel': 'deutschlandfunk', }, }, { 'info_dict': { 'id': '47e1a096', 'title': r're:Rückkehr Russlands im Fechten [-/] "Fassungslos, dass es einfach so passiert ist"', 'ext': 'mp3', 'duration': 602, 'thumbnail': 'https://assets.deutschlandfunk.de/da4c494a-21cc-48b4-9cc7-40e09fd442c2/512x512.jpg?t=1678562155770', 'uploader': 'Deutschlandfunk', 'series': 'Sport am Samstag', 'channel': 'deutschlandfunk', }, }, { 'info_dict': { 'id': '5e55e8c9', 'title': r're:Wiederzulassung von Russland und Belarus [-/] "Herumlavieren" des Fechter-Bundes sorgt für Unverständnis', 'ext': 'mp3', 'duration': 187, 'thumbnail': 'https://assets.deutschlandfunk.de/a595989d-1ed1-4a2e-8370-b64d7f11d757/512x512.jpg?t=1679173825412', 'uploader': 'Deutschlandfunk', 'series': 'Sport am Samstag', 'channel': 'deutschlandfunk', }, }], }, # Podcast feed with tag buttons, playlist count fluctuates { 'url': 'https://www.deutschlandfunk.de/kommentare-und-themen-der-woche-100.html', 'info_dict': { 'id': 'kommentare-und-themen-der-woche-100', 'title': 'Meinung - Kommentare und Themen der Woche', 'description': 'md5:2901bbd65cd2d45e116d399a099ce5d5', }, 'playlist_mincount': 10, }, # Podcast feed with no description { 'url': 'https://www.deutschlandfunk.de/podcast-tolle-idee-100.html', 'info_dict': { 'id': 'podcast-tolle-idee-100', 'title': 'Wissenschaftspodcast - Tolle Idee! - Was wurde daraus?', }, 'playlist_mincount': 11, }, ] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) return { '_type': 'playlist', 'id': playlist_id, 'description': self._html_search_meta( ['description', 'og:description', 'twitter:description'], webpage, default=None), 'title': self._html_search_meta( ['og:title', 'twitter:title'], webpage, default=None), 'entries': map(self._parse_button_attrs, re.findall(self._BUTTON_REGEX, webpage)), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gopro.py
yt_dlp/extractor/gopro.py
from .common import InfoExtractor from ..utils import ( int_or_none, remove_end, str_or_none, try_get, unified_timestamp, url_or_none, ) class GoProIE(InfoExtractor): _VALID_URL = r'https?://(www\.)?gopro\.com/v/(?P<id>[A-Za-z0-9]+)' _TESTS = [{ 'url': 'https://gopro.com/v/ZNVvED8QDzR5V', 'info_dict': { 'id': 'ZNVvED8QDzR5V', 'title': 'My GoPro Adventure - 9/19/21', 'thumbnail': r're:https?://.+', 'ext': 'mp4', 'timestamp': 1632072947, 'upload_date': '20210919', 'uploader_id': 'fireydive30018', 'duration': 396062, }, }, { 'url': 'https://gopro.com/v/KRm6Vgp2peg4e', 'info_dict': { 'id': 'KRm6Vgp2peg4e', 'title': 'じゃがいも カリカリ オーブン焼き', 'thumbnail': r're:https?://.+', 'ext': 'mp4', 'timestamp': 1607231125, 'upload_date': '20201206', 'uploader_id': 'dc9bcb8b-47d2-47c6-afbc-4c48f9a3769e', 'duration': 45187, 'track': 'The Sky Machine', }, }, { 'url': 'https://gopro.com/v/kVrK9wlJvBMwn', 'info_dict': { 'id': 'kVrK9wlJvBMwn', 'title': 'DARKNESS', 'thumbnail': r're:https?://.+', 'ext': 'mp4', 'timestamp': 1594183735, 'upload_date': '20200708', 'uploader_id': '闇夜乃皇帝', 'duration': 313075, 'track': 'Battery (Live)', 'artist': 'Metallica', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) metadata = self._search_json( r'window\.__reflectData\s*=', webpage, 'metadata', video_id) video_info = metadata['collectionMedia'][0] media_data = self._download_json( 'https://api.gopro.com/media/{}/download'.format(video_info['id']), video_id) formats = [] for fmt in try_get(media_data, lambda x: x['_embedded']['variations']) or []: format_url = url_or_none(fmt.get('url')) if not format_url: continue formats.append({ 'url': format_url, 'format_id': str_or_none(fmt.get('quality')), 'format_note': str_or_none(fmt.get('label')), 'ext': str_or_none(fmt.get('type')), 'width': int_or_none(fmt.get('width')), 'height': int_or_none(fmt.get('height')), }) title = str_or_none( try_get(metadata, lambda x: x['collection']['title']) or self._html_search_meta(['og:title', 'twitter:title'], webpage) or remove_end(self._html_search_regex( r'<title[^>]*>([^<]+)</title>', webpage, 'title', fatal=False), ' | GoPro')) if title: title = title.replace('\n', ' ') return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': url_or_none( self._html_search_meta(['og:image', 'twitter:image'], webpage)), 'timestamp': unified_timestamp( try_get(metadata, lambda x: x['collection']['created_at'])), 'uploader_id': str_or_none( try_get(metadata, lambda x: x['account']['nickname'])), 'duration': int_or_none( video_info.get('source_duration')), 'artist': str_or_none( video_info.get('music_track_artist')) or None, 'track': str_or_none( video_info.get('music_track_name')) or None, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/deuxm.py
yt_dlp/extractor/deuxm.py
from .common import InfoExtractor from ..utils import url_or_none class DeuxMIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?2m\.ma/[^/]+/replay/single/(?P<id>([\w.]{1,24})+)' _TESTS = [{ 'url': 'https://2m.ma/fr/replay/single/6351d439b15e1a613b3debe8', 'md5': '5f761f04c9d686e553b685134dca5d32', 'info_dict': { 'id': '6351d439b15e1a613b3debe8', 'ext': 'mp4', 'title': 'Grand Angle : Jeudi 20 Octobre 2022', 'thumbnail': r're:^https?://2msoread-ww.amagi.tv/mediasfiles/videos/images/.*\.png$', }, }, { 'url': 'https://2m.ma/fr/replay/single/635c0aeab4eec832622356da', 'md5': 'ad6af2f5e4d5b2ad2194a84b6e890b4c', 'info_dict': { 'id': '635c0aeab4eec832622356da', 'ext': 'mp4', 'title': 'Journal Amazigh : Vendredi 28 Octobre 2022', 'thumbnail': r're:^https?://2msoread-ww.amagi.tv/mediasfiles/videos/images/.*\.png$', }, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( f'https://2m.ma/api/watchDetail/{video_id}', video_id)['response']['News'] return { 'id': video_id, 'title': video.get('titre'), 'url': video['url'], 'description': video.get('description'), 'thumbnail': url_or_none(video.get('image')), } class DeuxMNewsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?2m\.ma/(?P<lang>\w+)/news/(?P<id>[^/#?]+)' _TESTS = [{ 'url': 'https://2m.ma/fr/news/Kan-Ya-Mkan-d%C3%A9poussi%C3%A8re-l-histoire-du-phare-du-Cap-Beddouza-20221028', 'md5': '43d5e693a53fa0b71e8a5204c7d4542a', 'info_dict': { 'id': '635c5d1233b83834e35b282e', 'ext': 'mp4', 'title': 'Kan Ya Mkan d\u00e9poussi\u00e8re l\u2019histoire du phare du Cap Beddouza', 'description': 'md5:99dcf29b82f1d7f2a4acafed1d487527', 'thumbnail': r're:^https?://2msoread-ww.amagi.tv/mediasfiles/videos/images/.*\.png$', }, }, { 'url': 'https://2m.ma/fr/news/Interview-Casablanca-hors-des-sentiers-battus-avec-Abderrahim-KASSOU-Replay--20221017', 'md5': '7aca29f02230945ef635eb8290283c0c', 'info_dict': { 'id': '634d9e108b70d40bc51a844b', 'ext': 'mp4', 'title': 'Interview: Casablanca hors des sentiers battus avec Abderrahim KASSOU (Replay) ', 'description': 'md5:3b8e78111de9fcc6ef7f7dd6cff2430c', 'thumbnail': r're:^https?://2msoread-ww.amagi.tv/mediasfiles/videos/images/.*\.png$', }, }] def _real_extract(self, url): article_name, lang = self._match_valid_url(url).group('id', 'lang') video = self._download_json( f'https://2m.ma/api/articlesByUrl?lang={lang}&url=/news/{article_name}', article_name)['response']['article'][0] return { 'id': video['id'], 'title': video.get('title'), 'url': video['image'][0], 'description': video.get('content'), 'thumbnail': url_or_none(video.get('cover')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tbsjp.py
yt_dlp/extractor/tbsjp.py
from .streaks import StreaksBaseIE from ..utils import ( clean_html, int_or_none, str_or_none, unified_timestamp, url_or_none, ) from ..utils.traversal import traverse_obj class TBSJPBaseIE(StreaksBaseIE): def _search_window_app_json(self, webpage, name, item_id, **kwargs): return self._search_json(r'window\.app\s*=', webpage, f'{name} info', item_id, **kwargs) class TBSJPEpisodeIE(TBSJPBaseIE): _VALID_URL = r'https?://cu\.tbs\.co\.jp/episode/(?P<id>[\d_]+)' _TESTS = [{ 'url': 'https://cu.tbs.co.jp/episode/14694_2094162_1000123656', 'skip': 'geo-blocked to japan + 7-day expiry', 'info_dict': { 'title': 'クロちゃん、寝て起きたら川のほとりにいてその向こう岸に亡くなった父親がいたら死の淵にいるかと思う説 ほか', 'id': '14694_2094162_1000123656', 'ext': 'mp4', 'display_id': 'ref:14694_2094162_1000123656', 'description': 'md5:1a82fcdeb5e2e82190544bb72721c46e', 'uploader': 'TBS', 'uploader_id': 'tbs', 'duration': 2752, 'thumbnail': 'md5:d8855c8c292683c95a84cafdb42300bc', 'categories': ['エンタメ', '水曜日のダウンタウン', 'ダウンタウン', '浜田雅功', '松本人志', '水ダウ', '動画', 'バラエティ'], 'cast': ['浜田 雅功', '藤本 敏史', 'ビビる 大木', '千原 ジュニア', '横澤 夏子', 'せいや', 'あの', '服部 潤'], 'genres': ['variety'], 'series': '水曜日のダウンタウン', 'series_id': '14694', 'episode': 'クロちゃん、寝て起きたら川のほとりにいてその向こう岸に亡くなった父親がいたら死の淵にいるかと思う説 ほか', 'episode_number': 341, 'episode_id': '14694_2094162_1000123656', 'timestamp': 1753778992, 'upload_date': '20250729', 'release_timestamp': 1753880402, 'release_date': '20250730', 'modified_timestamp': 1753880741, 'modified_date': '20250730', 'live_status': 'not_live', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) meta = self._search_window_app_json(webpage, 'episode', video_id, fatal=False) episode = traverse_obj(meta, ('falcorCache', 'catalog', 'episode', video_id, 'value')) return { **self._extract_from_streaks_api( 'tbs', f'ref:{video_id}', headers={'Origin': 'https://cu.tbs.co.jp'}), **traverse_obj(episode, { 'title': ('title', ..., 'value', {str}, any), 'cast': ( 'credit', ..., 'name', ..., 'value', {clean_html}, any, {lambda x: x.split(',')}, ..., {str.strip}, filter, all, filter), 'categories': ('keywords', ..., {str}, filter, all, filter), 'description': ('description', ..., 'value', {clean_html}, any), 'duration': ('tv_episode_info', 'duration', {int_or_none}), 'episode': ('title', lambda _, v: not v.get('is_phonetic'), 'value', {str}, any), 'episode_id': ('content_id', {str}), 'episode_number': ('tv_episode_info', 'episode_number', {int_or_none}), 'genres': ('genre', ..., {str}, filter, all, filter), 'release_timestamp': ('pub_date', {unified_timestamp}), 'series': ('custom_data', 'program_name', {str}), 'tags': ('tags', ..., {str}, filter, all, filter), 'thumbnail': ('artwork', ..., 'url', {url_or_none}, any), 'timestamp': ('created_at', {unified_timestamp}), 'uploader': ('tv_show_info', 'networks', ..., {str}, any), }), **traverse_obj(episode, ('tv_episode_info', { 'duration': ('duration', {int_or_none}), 'episode_number': ('episode_number', {int_or_none}), 'series_id': ('show_content_id', {str}), })), 'id': video_id, } class TBSJPProgramIE(TBSJPBaseIE): _VALID_URL = r'https?://cu\.tbs\.co\.jp/program/(?P<id>\d+)' _TESTS = [{ 'url': 'https://cu.tbs.co.jp/program/14694', 'playlist_mincount': 1, 'info_dict': { 'id': '14694', 'title': '水曜日のダウンタウン', 'description': 'md5:cf1d46c76c2755d7f87512498718b837', 'categories': ['エンタメ', '水曜日のダウンタウン', 'ダウンタウン', '浜田雅功', '松本人志', '水ダウ', '動画', 'バラエティ'], 'series': '水曜日のダウンタウン', }, }] def _real_extract(self, url): programme_id = self._match_id(url) webpage = self._download_webpage(url, programme_id) meta = self._search_window_app_json(webpage, 'programme', programme_id) programme = traverse_obj(meta, ('falcorCache', 'catalog', 'program', programme_id, 'false', 'value')) return { '_type': 'playlist', 'entries': [self.url_result(f'https://cu.tbs.co.jp/episode/{video_id}', TBSJPEpisodeIE, video_id) for video_id in traverse_obj(programme, ('custom_data', 'seriesList', 'episodeCode', ...))], 'id': programme_id, **traverse_obj(programme, { 'categories': ('keywords', ...), 'id': ('tv_episode_info', 'show_content_id', {str_or_none}), 'description': ('custom_data', 'program_description'), 'series': ('custom_data', 'program_name'), 'title': ('custom_data', 'program_name'), }), } class TBSJPPlaylistIE(TBSJPBaseIE): _VALID_URL = r'https?://cu\.tbs\.co\.jp/playlist/(?P<id>[\da-f]+)' _TESTS = [{ 'url': 'https://cu.tbs.co.jp/playlist/184f9970e7ba48e4915f1b252c55015e', 'playlist_mincount': 4, 'info_dict': { 'title': 'まもなく配信終了', 'id': '184f9970e7ba48e4915f1b252c55015e', }, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) meta = self._search_window_app_json(webpage, 'playlist', playlist_id) playlist = traverse_obj(meta, ('falcorCache', 'playList', playlist_id)) def entries(): for entry in traverse_obj(playlist, ('catalogs', 'value', lambda _, v: v['content_id'])): # TODO: it's likely possible to get all metadata from the playlist page json instead content_id = entry['content_id'] content_type = entry.get('content_type') if content_type == 'tv_show': yield self.url_result( f'https://cu.tbs.co.jp/program/{content_id}', TBSJPProgramIE, content_id) elif content_type == 'tv_episode': yield self.url_result( f'https://cu.tbs.co.jp/episode/{content_id}', TBSJPEpisodeIE, content_id) else: self.report_warning(f'Skipping "{content_id}" with unsupported content_type "{content_type}"') return self.playlist_result(entries(), playlist_id, traverse_obj(playlist, ('display_name', 'value')))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/koo.py
yt_dlp/extractor/koo.py
from .common import InfoExtractor from ..utils import ( clean_html, try_get, ) class KooIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?kooapp\.com/koo/[^/]+/(?P<id>[^/&#$?]+)' _TESTS = [{ # Test for video in the comments 'url': 'https://www.kooapp.com/koo/ytdlpTestAccount/946c4189-bc2d-4524-b95b-43f641e2adde', 'info_dict': { 'id': '946c4189-bc2d-4524-b95b-43f641e2adde', 'ext': 'mp4', 'title': 'test for video in comment', 'description': 'md5:daa77dc214add4da8b6ea7d2226776e7', 'timestamp': 1632215195, 'uploader_id': 'ytdlpTestAccount', 'uploader': 'yt-dlpTestAccount', 'duration': 7000, 'upload_date': '20210921', }, 'params': {'skip_download': True}, }, { # Test for koo with long title 'url': 'https://www.kooapp.com/koo/laxman_kumarDBFEC/33decbf7-5e1e-4bb8-bfd7-04744a064361', 'info_dict': { 'id': '33decbf7-5e1e-4bb8-bfd7-04744a064361', 'ext': 'mp4', 'title': 'md5:47a71c2337295330c5a19a8af1bbf450', 'description': 'md5:06a6a84e9321499486dab541693d8425', 'timestamp': 1632106884, 'uploader_id': 'laxman_kumarDBFEC', 'uploader': 'Laxman Kumar 🇮🇳', 'duration': 46000, 'upload_date': '20210920', }, 'params': {'skip_download': True}, }, { # Test for audio 'url': 'https://www.kooapp.com/koo/ytdlpTestAccount/a2a9c88e-ce4b-4d2d-952f-d06361c5b602', 'info_dict': { 'id': 'a2a9c88e-ce4b-4d2d-952f-d06361c5b602', 'ext': 'mp4', 'title': 'Test for audio', 'description': 'md5:ecb9a2b6a5d34b736cecb53788cb11e8', 'timestamp': 1632211634, 'uploader_id': 'ytdlpTestAccount', 'uploader': 'yt-dlpTestAccount', 'duration': 214000, 'upload_date': '20210921', }, 'params': {'skip_download': True}, }, { # Test for video 'url': 'https://www.kooapp.com/koo/ytdlpTestAccount/a3e56c53-c1ed-4ac9-ac02-ed1630e6b1d1', 'info_dict': { 'id': 'a3e56c53-c1ed-4ac9-ac02-ed1630e6b1d1', 'ext': 'mp4', 'title': 'Test for video', 'description': 'md5:7afc4eb839074ddeb2beea5dd6fe9500', 'timestamp': 1632211468, 'uploader_id': 'ytdlpTestAccount', 'uploader': 'yt-dlpTestAccount', 'duration': 14000, 'upload_date': '20210921', }, 'params': {'skip_download': True}, }, { # Test for link 'url': 'https://www.kooapp.com/koo/ytdlpTestAccount/01bf5b94-81a5-4d8e-a387-5f732022e15a', 'skip': 'No video/audio found at the provided url.', 'info_dict': { 'id': '01bf5b94-81a5-4d8e-a387-5f732022e15a', 'title': 'Test for link', 'ext': 'none', }, }, { # Test for images 'url': 'https://www.kooapp.com/koo/ytdlpTestAccount/dc05d9cd-a61d-45fd-bb07-e8019d8ca8cb', 'skip': 'No video/audio found at the provided url.', 'info_dict': { 'id': 'dc05d9cd-a61d-45fd-bb07-e8019d8ca8cb', 'title': 'Test for images', 'ext': 'none', }, }] def _real_extract(self, url): video_id = self._match_id(url) data_json = self._download_json( f'https://www.kooapp.com/apiV1/ku/{video_id}?limit=20&offset=0&showSimilarKoos=true', video_id)['parentContent'] item_json = next(content['items'][0] for content in data_json if try_get(content, lambda x: x['items'][0]['id']) == video_id) media_json = item_json['mediaMap'] formats = [] mp4_url = media_json.get('videoMp4') video_m3u8_url = media_json.get('videoHls') if mp4_url: formats.append({ 'url': mp4_url, 'ext': 'mp4', }) if video_m3u8_url: formats.extend(self._extract_m3u8_formats(video_m3u8_url, video_id, fatal=False, ext='mp4')) if not formats: self.raise_no_formats('No video/audio found at the provided url.', expected=True) return { 'id': video_id, 'title': clean_html(item_json.get('title')), 'description': f'{clean_html(item_json.get("title"))}\n\n{clean_html(item_json.get("enTransliteration"))}', 'timestamp': item_json.get('createdAt'), 'uploader_id': item_json.get('handle'), 'uploader': item_json.get('name'), 'duration': media_json.get('duration'), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tv2dk.py
yt_dlp/extractor/tv2dk.py
import json import re from .common import InfoExtractor from .jwplatform import JWPlatformIE from ..utils import ( determine_ext, js_to_json, url_or_none, ) from ..utils.traversal import find_element, traverse_obj class TV2DKIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:www\.)? (?: tvsyd| tv2ostjylland| tvmidtvest| tv2fyn| tv2east| tv2lorry| tv2nord| tv2kosmopol )\.dk/ (?:[^/?#]+/)* (?P<id>[^/?\#&]+) ''' _TESTS = [{ 'url': 'https://www.tvsyd.dk/nyheder/28-10-2019/1930/1930-28-okt-2019?autoplay=1#player', 'info_dict': { 'id': 'sPp5z21q', 'ext': 'mp4', 'title': '19:30 - 28. okt. 2019', 'description': '', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/sPp5z21q/poster.jpg?width=720', 'timestamp': 1572287400, 'upload_date': '20191028', }, }, { 'url': 'https://www.tv2lorry.dk/gadekamp/gadekamp-6-hoejhuse-i-koebenhavn', 'info_dict': { 'id': 'oD9cyq0m', 'ext': 'mp4', 'title': 'Gadekamp #6 - Højhuse i København', 'description': '', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/oD9cyq0m/poster.jpg?width=720', 'timestamp': 1635348600, 'upload_date': '20211027', }, }, { 'url': 'https://www.tvsyd.dk/haderslev/x-factor-brodre-fulde-af-selvtillid-er-igen-hjemme-hos-mor-vores-diagnoser-har-vaeret-en-fordel', 'info_dict': { 'id': 'x-factor-brodre-fulde-af-selvtillid-er-igen-hjemme-hos-mor-vores-diagnoser-har-vaeret-en-fordel', }, 'playlist_count': 2, }, { 'url': 'https://www.tv2ostjylland.dk/aarhus/dom-kan-fa-alvorlige-konsekvenser', 'info_dict': { 'id': 'dom-kan-fa-alvorlige-konsekvenser', }, 'playlist_count': 3, }, { 'url': 'https://www.tv2ostjylland.dk/artikel/minister-gaar-ind-i-sag-om-diabetes-teknologi', 'only_matching': True, }, { 'url': 'https://www.tv2ostjylland.dk/nyheder/28-10-2019/22/2200-nyhederne-mandag-d-28-oktober-2019?autoplay=1#player', 'only_matching': True, }, { 'url': 'https://www.tvmidtvest.dk/nyheder/27-10-2019/1930/1930-27-okt-2019', 'only_matching': True, }, { 'url': 'https://www.tv2fyn.dk/artikel/fyn-kan-faa-landets-foerste-fabrik-til-groent-jetbraendstof', 'only_matching': True, }, { 'url': 'https://www.tv2east.dk/artikel/gods-faar-indleveret-tonsvis-af-aebler-100-kilo-aebler-gaar-til-en-aeblebrandy', 'only_matching': True, }, { 'url': 'https://www.tv2lorry.dk/koebenhavn/rasmus-paludan-evakueret-til-egen-demonstration#player', 'only_matching': True, }, { 'url': 'https://www.tv2nord.dk/artikel/dybt-uacceptabelt', 'only_matching': True, }, { 'url': 'https://www.tv2kosmopol.dk/metropolen/chaufforer-beordres-til-at-kore-videre-i-ulovlige-busser-med-rode-advarselslamper', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) search_space = traverse_obj(webpage, {find_element(tag='article')}) or webpage player_ids = traverse_obj( re.findall(r'x-data="(?:video_player|simple_player)\(({[^"]+})', search_space), (..., {js_to_json}, {json.loads}, ('jwpMediaId', 'videoId'), {str})) return self.playlist_from_matches( player_ids, video_id, getter=lambda x: f'jwplatform:{x}', ie=JWPlatformIE) class TV2DKBornholmPlayIE(InfoExtractor): _VALID_URL = r'https?://play\.tv2bornholm\.dk/\?.*?\bid=(?P<id>\d+)' _TEST = { 'url': 'http://play.tv2bornholm.dk/?area=specifikTV&id=781021', 'info_dict': { 'id': '781021', 'ext': 'mp4', 'title': '12Nyheder-27.11.19', }, 'params': { 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'https://play.tv2bornholm.dk/controls/AJAX.aspx/specifikVideo', video_id, data=json.dumps({ 'playlist_id': video_id, 'serienavn': '', }).encode(), headers={ 'X-Requested-With': 'XMLHttpRequest', 'Content-Type': 'application/json; charset=UTF-8', })['d'] # TODO: generalize flowplayer title = self._search_regex( r'title\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', video, 'title', group='value') sources = self._parse_json(self._search_regex( r'(?s)sources:\s*(\[.+?\]),', video, 'sources'), video_id, js_to_json) formats = [] srcs = set() for source in sources: src = url_or_none(source.get('src')) if not src: continue if src in srcs: continue srcs.add(src) ext = determine_ext(src) src_type = source.get('type') if src_type == 'application/x-mpegurl' or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( src, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif src_type == 'application/dash+xml' or ext == 'mpd': formats.extend(self._extract_mpd_formats( src, video_id, mpd_id='dash', fatal=False)) else: formats.append({ 'url': src, }) return { 'id': video_id, 'title': title, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/karaoketv.py
yt_dlp/extractor/karaoketv.py
from .common import InfoExtractor class KaraoketvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?karaoketv\.co\.il/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'http://www.karaoketv.co.il/%D7%A9%D7%99%D7%A8%D7%99_%D7%A7%D7%A8%D7%99%D7%95%D7%A7%D7%99/58356/%D7%90%D7%99%D7%96%D7%95%D7%9F', 'info_dict': { 'id': '58356', 'ext': 'flv', 'title': 'קריוקי של איזון', }, 'params': { # rtmp download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) api_page_url = self._search_regex( r'<iframe[^>]+src=(["\'])(?P<url>https?://www\.karaoke\.co\.il/api_play\.php\?.+?)\1', webpage, 'API play URL', group='url') api_page = self._download_webpage(api_page_url, video_id) video_cdn_url = self._search_regex( r'<iframe[^>]+src=(["\'])(?P<url>https?://www\.video-cdn\.com/embed/iframe/.+?)\1', api_page, 'video cdn URL', group='url') video_cdn = self._download_webpage(video_cdn_url, video_id) play_path = self._parse_json( self._search_regex( r'var\s+options\s*=\s*({.+?});', video_cdn, 'options'), video_id)['clip']['url'] settings = self._parse_json( self._search_regex( r'var\s+settings\s*=\s*({.+?});', video_cdn, 'servers', default='{}'), video_id, fatal=False) or {} servers = settings.get('servers') if not servers or not isinstance(servers, list): servers = ('wowzail.video-cdn.com:80/vodcdn', ) formats = [{ 'url': f'rtmp://{server}' if not server.startswith('rtmp') else server, 'play_path': play_path, 'app': 'vodcdn', 'page_url': video_cdn_url, 'player_url': 'http://www.video-cdn.com/assets/flowplayer/flowplayer.commercial-3.2.18.swf', 'rtmp_real_time': True, 'ext': 'flv', } for server in servers] return { 'id': video_id, 'title': self._og_search_title(webpage), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/fox.py
yt_dlp/extractor/fox.py
import json import urllib.parse import uuid from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, int_or_none, parse_age_limit, parse_duration, traverse_obj, try_get, unified_timestamp, url_or_none, ) class FOXIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?fox(?:sports)?\.com/(?:watch|replay)/(?P<id>[\da-fA-F]+)' _TESTS = [{ # clip 'url': 'https://www.fox.com/watch/4b765a60490325103ea69888fb2bd4e8/', 'md5': 'ebd296fcc41dd4b19f8115d8461a3165', 'info_dict': { 'id': '4b765a60490325103ea69888fb2bd4e8', 'ext': 'mp4', 'title': 'Aftermath: Bruce Wayne Develops Into The Dark Knight', 'description': 'md5:549cd9c70d413adb32ce2a779b53b486', 'duration': 102, 'timestamp': 1504291893, 'upload_date': '20170901', 'creator': 'FOX', 'series': 'Gotham', 'age_limit': 14, 'episode': 'Aftermath: Bruce Wayne Develops Into The Dark Knight', 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { 'skip_download': True, }, }, { # episode, geo-restricted 'url': 'https://www.fox.com/watch/087036ca7f33c8eb79b08152b4dd75c1/', 'only_matching': True, }, { # sports event, geo-restricted 'url': 'https://www.fox.com/watch/b057484dade738d1f373b3e46216fa2c/', 'only_matching': True, }, { # fox sports replay, geo-restricted 'url': 'https://www.foxsports.com/replay/561f3e071347a24e5e877abc56b22e89', 'only_matching': True, }] _GEO_BYPASS = False _HOME_PAGE_URL = 'https://www.fox.com/' _API_KEY = '6E9S4bmcoNnZwVLOHywOv8PJEdu76cM9' _access_token = None _device_id = str(uuid.uuid4()) def _call_api(self, path, video_id, data=None): headers = { 'X-Api-Key': self._API_KEY, } if self._access_token: headers['Authorization'] = 'Bearer ' + self._access_token try: return self._download_json( 'https://api3.fox.com/v2.0/' + path, video_id, data=data, headers=headers) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 403: entitlement_issues = self._parse_json( e.cause.response.read().decode(), video_id)['entitlementIssues'] for e in entitlement_issues: if e.get('errorCode') == 1005: raise ExtractorError( 'This video is only available via cable service provider ' 'subscription. You may want to use --cookies.', expected=True) messages = ', '.join([e['message'] for e in entitlement_issues]) raise ExtractorError(messages, expected=True) raise def _real_initialize(self): if not self._access_token: mvpd_auth = self._get_cookies(self._HOME_PAGE_URL).get('mvpd-auth') if mvpd_auth: self._access_token = (self._parse_json(urllib.parse.unquote( mvpd_auth.value), None, fatal=False) or {}).get('accessToken') if not self._access_token: self._access_token = self._call_api( 'login', None, json.dumps({ 'deviceId': self._device_id, }).encode())['accessToken'] def _real_extract(self, url): video_id = self._match_id(url) self._access_token = self._call_api( f'previewpassmvpd?device_id={self._device_id}&mvpd_id=TempPass_fbcfox_60min', video_id)['accessToken'] video = self._call_api('watch', video_id, data=json.dumps({ 'capabilities': ['drm/widevine', 'fsdk/yo'], 'deviceWidth': 1280, 'deviceHeight': 720, 'maxRes': '720p', 'os': 'macos', 'osv': '', 'provider': { 'freewheel': {'did': self._device_id}, 'vdms': {'rays': ''}, 'dmp': {'kuid': '', 'seg': ''}, }, 'playlist': '', 'privacy': {'us': '1---'}, 'siteSection': '', 'streamType': 'vod', 'streamId': video_id}).encode()) title = video['name'] release_url = video['url'] try: m3u8_url = self._download_json(release_url, video_id)['playURL'] except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 403: error = self._parse_json(e.cause.response.read().decode(), video_id) if error.get('exception') == 'GeoLocationBlocked': self.raise_geo_restricted(countries=['US']) raise ExtractorError(error['description'], expected=True) raise formats = self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') data = try_get( video, lambda x: x['trackingData']['properties'], dict) or {} duration = int_or_none(video.get('durationInSeconds')) or int_or_none( video.get('duration')) or parse_duration(video.get('duration')) timestamp = unified_timestamp(video.get('datePublished')) creator = data.get('brand') or data.get('network') or video.get('network') series = video.get('seriesName') or data.get( 'seriesName') or data.get('show') subtitles = {} for doc_rel in video.get('documentReleases', []): rel_url = doc_rel.get('url') if not url or doc_rel.get('format') != 'SCC': continue subtitles['en'] = [{ 'url': rel_url, 'ext': 'scc', }] break return { 'id': video_id, 'title': title, 'formats': formats, 'description': video.get('description'), 'duration': duration, 'timestamp': timestamp, 'age_limit': parse_age_limit(video.get('contentRating')), 'creator': creator, 'series': series, 'season_number': int_or_none(video.get('seasonNumber')), 'episode': video.get('name'), 'episode_number': int_or_none(video.get('episodeNumber')), 'thumbnail': traverse_obj(video, ('images', 'still', 'raw'), expected_type=url_or_none), 'release_year': int_or_none(video.get('releaseYear')), 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/idagio.py
yt_dlp/extractor/idagio.py
from .common import InfoExtractor from ..utils import int_or_none, unified_timestamp, url_or_none from ..utils.traversal import traverse_obj class IdagioTrackIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?app\.idagio\.com(?:/[a-z]{2})?/recordings/\d+\?(?:[^#]+&)?trackId=(?P<id>\d+)' _TESTS = [{ 'url': 'https://app.idagio.com/recordings/30576934?trackId=30576943', 'md5': '15148bd71804b2450a2508931a116b56', 'info_dict': { 'id': '30576943', 'ext': 'mp3', 'title': 'Theme. Andante', 'duration': 82, 'composers': ['Edward Elgar'], 'artists': ['Vasily Petrenko', 'Royal Liverpool Philharmonic Orchestra'], 'genres': ['Orchestral', 'Other Orchestral Music'], 'track': 'Theme. Andante', 'timestamp': 1554474370, 'upload_date': '20190405', }, }, { 'url': 'https://app.idagio.com/recordings/20514467?trackId=20514478&utm_source=pcl', 'md5': '3acef2ea0feadf889123b70e5a1e7fa7', 'info_dict': { 'id': '20514478', 'ext': 'mp3', 'title': 'I. Adagio sostenuto', 'duration': 316, 'composers': ['Ludwig van Beethoven'], 'genres': ['Keyboard', 'Sonata (Keyboard)'], 'track': 'I. Adagio sostenuto', 'timestamp': 1518076337, 'upload_date': '20180208', }, }, { 'url': 'https://app.idagio.com/de/recordings/20514467?trackId=20514478&utm_source=pcl', 'only_matching': True, }] def _real_extract(self, url): track_id = self._match_id(url) track_info = self._download_json( f'https://api.idagio.com/v2.0/metadata/tracks/{track_id}', track_id, fatal=False, expected_status=406) if traverse_obj(track_info, 'error_code') == 'idagio.error.blocked.location': self.raise_geo_restricted() content_info = self._download_json( f'https://api.idagio.com/v1.8/content/track/{track_id}', track_id, query={ 'quality': '0', 'format': '2', 'client_type': 'web-4', }) return { 'ext': 'mp3', 'vcodec': 'none', 'id': track_id, 'url': traverse_obj(content_info, ('url', {url_or_none})), **traverse_obj(track_info, ('result', { 'title': ('piece', 'title', {str}), 'timestamp': ('recording', 'created_at', {int_or_none(scale=1000)}), 'location': ('recording', 'location', {str}), 'duration': ('duration', {int_or_none}), 'track': ('piece', 'title', {str}), 'artists': ('recording', ('conductor', ('ensembles', ...), ('soloists', ...)), 'name', {str}, filter), 'composers': ('piece', 'workpart', 'work', 'composer', 'name', {str}, filter, all, filter), 'genres': ('piece', 'workpart', 'work', ('genre', 'subgenre'), 'title', {str}, filter), })), } class IdagioPlaylistBaseIE(InfoExtractor): """Subclasses must set _API_URL_TMPL and define _parse_playlist_metadata""" _PLAYLIST_ID_KEY = 'id' # vs. 'display_id' def _entries(self, playlist_info): for track_data in traverse_obj(playlist_info, ('tracks', lambda _, v: v['id'] and v['recording']['id'])): track_id = track_data['id'] recording_id = track_data['recording']['id'] yield self.url_result( f'https://app.idagio.com/recordings/{recording_id}?trackId={track_id}', ie=IdagioTrackIE, video_id=track_id) def _real_extract(self, url): playlist_id = self._match_id(url) playlist_info = self._download_json( self._API_URL_TMPL.format(playlist_id), playlist_id)['result'] return { '_type': 'playlist', self._PLAYLIST_ID_KEY: playlist_id, 'entries': self._entries(playlist_info), **self._parse_playlist_metadata(playlist_info), } class IdagioRecordingIE(IdagioPlaylistBaseIE): _VALID_URL = r'https?://(?:www\.)?app\.idagio\.com(?:/[a-z]{2})?/recordings/(?P<id>\d+)(?![^#]*[&?]trackId=\d+)' _TESTS = [{ 'url': 'https://app.idagio.com/recordings/30576934', 'info_dict': { 'id': '30576934', 'title': 'Variations on an Original Theme op. 36', 'composers': ['Edward Elgar'], 'artists': ['Vasily Petrenko', 'Royal Liverpool Philharmonic Orchestra'], 'genres': ['Orchestral', 'Other Orchestral Music'], 'timestamp': 1554474370, 'modified_timestamp': 1554474370, 'modified_date': '20190405', 'upload_date': '20190405', }, 'playlist_count': 15, }, { 'url': 'https://app.idagio.com/de/recordings/20514467', 'info_dict': { 'id': '20514467', 'title': 'Sonata for Piano No. 14 in C sharp minor op. 27/2', 'composers': ['Ludwig van Beethoven'], 'genres': ['Keyboard', 'Sonata (Keyboard)'], 'timestamp': 1518076337, 'upload_date': '20180208', 'modified_timestamp': 1518076337, 'modified_date': '20180208', }, 'playlist_count': 3, }] _API_URL_TMPL = 'https://api.idagio.com/v2.0/metadata/recordings/{}' def _parse_playlist_metadata(self, playlist_info): return traverse_obj(playlist_info, { 'title': ('work', 'title', {str}), 'timestamp': ('created_at', {int_or_none(scale=1000)}), 'modified_timestamp': ('created_at', {int_or_none(scale=1000)}), 'location': ('location', {str}), 'artists': (('conductor', ('ensembles', ...), ('soloists', ...)), 'name', {str}), 'composers': ('work', 'composer', 'name', {str}, all), 'genres': ('work', ('genre', 'subgenre'), 'title', {str}), 'tags': ('tags', ..., {str}), }) class IdagioAlbumIE(IdagioPlaylistBaseIE): _VALID_URL = r'https?://(?:www\.)?app\.idagio\.com(?:/[a-z]{2})?/albums/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://app.idagio.com/albums/elgar-enigma-variations-in-the-south-serenade-for-strings', 'info_dict': { 'id': 'a9f139b8-f70d-4b8a-a9a4-5fe8d35eaf9c', 'display_id': 'elgar-enigma-variations-in-the-south-serenade-for-strings', 'title': 'Elgar: Enigma Variations, In the South, Serenade for Strings', 'description': '', 'thumbnail': r're:https://.+/albums/880040420521/main\.jpg', 'artists': ['Vasily Petrenko', 'Royal Liverpool Philharmonic Orchestra', 'Edward Elgar'], 'timestamp': 1553817600, 'upload_date': '20190329', 'modified_timestamp': 1562566559.0, 'modified_date': '20190708', }, 'playlist_count': 19, }, { 'url': 'https://app.idagio.com/de/albums/brahms-ein-deutsches-requiem-3B403DF6-62D7-4A42-807B-47173F3E0192', 'info_dict': { 'id': '2862ad4e-4a61-45ad-9ce4-7fcf0c2626fe', 'display_id': 'brahms-ein-deutsches-requiem-3B403DF6-62D7-4A42-807B-47173F3E0192', 'title': 'Brahms: Ein deutsches Requiem', 'description': 'GRAMOPHONE CLASSICAL MUSIC AWARDS 2025 Recording of the Year & Choral', 'thumbnail': r're:https://.+/albums/3149020954522/main\.jpg', 'artists': ['Sabine Devieilhe', 'Stéphane Degout', 'Raphaël Pichon', 'Pygmalion', 'Johannes Brahms'], 'timestamp': 1760054400, 'upload_date': '20251010', 'modified_timestamp': 1760624868, 'modified_date': '20251016', 'tags': ['recommended', 'recent-release'], }, 'playlist_count': 7, }] _API_URL_TMPL = 'https://api.idagio.com/v2.0/metadata/albums/{}' _PLAYLIST_ID_KEY = 'display_id' def _parse_playlist_metadata(self, playlist_info): return traverse_obj(playlist_info, { 'id': ('id', {str}), 'title': ('title', {str}), 'timestamp': ('publishDate', {unified_timestamp}), 'modified_timestamp': ('lastModified', {unified_timestamp}), 'thumbnail': ('imageUrl', {url_or_none}), 'description': ('description', {str}), 'artists': ('participants', ..., 'name', {str}), 'tags': ('tags', ..., {str}), }) class IdagioPlaylistIE(IdagioPlaylistBaseIE): _VALID_URL = r'https?://(?:www\.)?app\.idagio\.com(?:/[a-z]{2})?/playlists/(?!personal/)(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://app.idagio.com/playlists/beethoven-the-most-beautiful-piano-music', 'info_dict': { 'id': '31652bec-8c5b-460e-a3f0-cf1f69817f53', 'display_id': 'beethoven-the-most-beautiful-piano-music', 'title': 'Beethoven: the most beautiful piano music', 'description': 'md5:d41bb04b8896bb69377f5c2cd9345ad1', 'thumbnail': r're:https://.+/playlists/31652bec-8c5b-460e-a3f0-cf1f69817f53/main\.jpg', 'creators': ['IDAGIO'], }, 'playlist_mincount': 16, # one entry is geo-restricted }, { 'url': 'https://app.idagio.com/de/playlists/piano-music-for-an-autumn-day', 'info_dict': { 'id': 'd70e9c7f-7080-4308-ae0f-f890dddeda82', 'display_id': 'piano-music-for-an-autumn-day', 'title': 'Piano Music for an Autumn Day', 'description': 'Get ready to snuggle up and enjoy all the musical colours of this cosy, autumnal playlist.', 'thumbnail': r're:https://.+/playlists/d70e9c7f-7080-4308-ae0f-f890dddeda82/main\.jpg', 'creators': ['IDAGIO'], }, 'playlist_count': 35, }] _API_URL_TMPL = 'https://api.idagio.com/v2.0/playlists/{}' _PLAYLIST_ID_KEY = 'display_id' def _parse_playlist_metadata(self, playlist_info): return traverse_obj(playlist_info, { 'id': ('id', {str}), 'title': ('title', {str}), 'thumbnail': ('imageUrl', {url_or_none}), 'description': ('description', {str}), 'creators': ('curator', 'name', {str}, all), }) class IdagioPersonalPlaylistIE(IdagioPlaylistBaseIE): _VALID_URL = r'https?://(?:www\.)?app\.idagio\.com(?:/[a-z]{2})?/playlists/personal/(?P<id>[\da-f-]+)' _TESTS = [{ 'url': 'https://app.idagio.com/playlists/personal/99dad72e-7b3a-45a4-b216-867c08046ed8', 'info_dict': { 'id': '99dad72e-7b3a-45a4-b216-867c08046ed8', 'title': 'Test', 'creators': ['1a6f16a6-4514-4d0c-b481-3a9877835626'], 'thumbnail': r're:https://.+/artists/86371/main\.jpg', 'timestamp': 1602859138, 'modified_timestamp': 1755616667, 'upload_date': '20201016', 'modified_date': '20250819', }, 'playlist_count': 100, }, { 'url': 'https://app.idagio.com/de/playlists/personal/99dad72e-7b3a-45a4-b216-867c08046ed8', 'only_matching': True, }] _API_URL_TMPL = 'https://api.idagio.com/v1.0/personal-playlists/{}' def _parse_playlist_metadata(self, playlist_info): return traverse_obj(playlist_info, { 'title': ('title', {str}), 'thumbnail': ('image_url', {url_or_none}), 'creators': ('user_id', {str}, all), 'timestamp': ('created_at', {int_or_none(scale=1000)}), 'modified_timestamp': ('updated_at', {int_or_none(scale=1000)}), })
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/unsupported.py
yt_dlp/extractor/unsupported.py
from .common import InfoExtractor from ..utils import ExtractorError, classproperty, remove_start class UnsupportedInfoExtractor(InfoExtractor): IE_DESC = False URLS = () # Redefine in subclasses @classproperty def IE_NAME(cls): return remove_start(super().IE_NAME, 'Known') @classproperty def _VALID_URL(cls): return rf'https?://(?:www\.)?(?:{"|".join(cls.URLS)})' LF = '\n ' class KnownDRMIE(UnsupportedInfoExtractor): """Sites that are known to use DRM for all their videos Add to this list only if: * You are reasonably certain that the site uses DRM for ALL their videos * Multiple users have asked about this site on github/discord """ URLS = ( r'play\.hbomax\.com', r'channel(?:4|5)\.com', r'peacocktv\.com', r'(?:[\w.]+\.)?disneyplus\.com', r'open\.spotify\.com', r'tvnz\.co\.nz', r'oneplus\.ch', r'artstation\.com/learning/courses', r'philo\.com', r'(?:[\w.]+\.)?mech-plus\.com', r'aha\.video', r'mubi\.com', r'vootkids\.com', r'nowtv\.it/watch', r'tv\.apple\.com', r'primevideo\.com', r'hulu\.com', r'resource\.inkryptvideos\.com', r'joyn\.de', r'amazon\.(?:\w{2}\.)?\w+/gp/video', r'music\.amazon\.(?:\w{2}\.)?\w+', r'(?:watch|front)\.njpwworld\.com', r'qub\.ca/vrai', r'(?:beta\.)?crunchyroll\.com', r'viki\.com', r'deezer\.com', r'b-ch\.com', r'ctv\.ca', r'noovo\.ca', r'tsn\.ca', r'paramountplus\.com', r'(?:m\.)?(?:sony)?crackle\.com', r'cw(?:tv(?:pr)?|seed)\.com', r'6play\.fr', r'rtlplay\.be', r'play\.rtl\.hr', r'rtlmost\.hu', r'plus\.rtl\.de(?!/podcast/)', r'mediasetinfinity\.es', ) _TESTS = [{ # https://github.com/yt-dlp/yt-dlp/issues/4309 'url': 'https://peacocktv.com/watch/playback/vod/GMO_00000000073159_01/f9d03003-eb04-3c7f-a7b6-a83ab7eb55bc', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/1719, 'url': 'https://www.channel4.com/programmes/gurren-lagann/on-demand/69960-001', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/1548 'url': 'https://www.channel5.com/show/uk-s-strongest-man-2021/season-2021/episode-1', 'only_matching': True, }, { 'url': r'https://hsesn.apps.disneyplus.com', 'only_matching': True, }, { 'url': r'https://www.disneyplus.com', 'only_matching': True, }, { 'url': 'https://open.spotify.com', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/4122 'url': 'https://www.tvnz.co.nz/shows/ice-airport-alaska/episodes/s1-e1', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/1922 'url': 'https://www.oneplus.ch/play/1008188', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/1140 'url': 'https://www.artstation.com/learning/courses/dqQ/character-design-masterclass-with-serge-birault/chapters/Rxn3/introduction', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/3544 'url': 'https://www.philo.com/player/player/vod/Vk9EOjYwODU0ODg5OTY0ODY0OTQ5NA', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/3533 'url': 'https://www.mech-plus.com/player/24892/stream?assetType=episodes&playlist_id=6', 'only_matching': True, }, { 'url': 'https://watch.mech-plus.com/details/25240?playlist_id=6', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/2934 'url': 'https://www.aha.video/player/movie/lucky-man', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/2743 'url': 'https://mubi.com/films/the-night-doctor', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/3287 'url': 'https://www.vootkids.com/movies/chhota-bheem-the-rise-of-kirmada/764459', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/2744 'url': 'https://www.nowtv.it/watch/home/asset/and-just-like-that/skyserie_f8fe979772e8437d8a61ab83b6d293e9/seasons/1/episodes/8/R_126182_HD', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/5557 'url': 'https://tv.apple.com/it/show/loot---una-fortuna/umc.cmc.5erbujil1mpazuerhr1udnk45?ctx_brand=tvs.sbd.4000', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/3072 'url': 'https://www.joyn.de/play/serien/clannad/1-1-wo-die-kirschblueten-fallen', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/7323 'url': 'https://music.amazon.co.jp/albums/B088Y368TK', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/7323 'url': 'https://www.amazon.co.jp/gp/video/detail/B09X5HBYRS/', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/6125 'url': 'https://www.primevideo.com/region/eu/detail/0H3DDB4KBJFNDCKKLHNRLRLVKQ/ref=atv_br_def_r_br_c_unkc_1_10', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/5740 'url': 'https://resource.inkryptvideos.com/v2-a83ns52/iframe/index.html#video_id=7999ea0f6e03439eb40d056258c2d736&otp=xxx', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/issues/5767 'url': 'https://www.hulu.com/movie/anthem-6b25fac9-da2b-45a3-8e09-e4156b0471cc', 'only_matching': True, }, { # https://github.com/yt-dlp/yt-dlp/pull/8570 'url': 'https://watch.njpwworld.com/player/36447/series?assetType=series', 'only_matching': True, }, { 'url': 'https://front.njpwworld.com/p/s_series_00563_16_bs', 'only_matching': True, }, { 'url': 'https://www.qub.ca/vrai/l-effet-bocuse-d-or/saison-1/l-effet-bocuse-d-or-saison-1-bande-annonce-1098225063', 'only_matching': True, }, { 'url': 'https://www.crunchyroll.com/watch/GY2P1Q98Y/to-the-future', 'only_matching': True, }, { 'url': 'https://beta.crunchyroll.com/pt-br/watch/G8WUN8VKP/the-ruler-of-conspiracy', 'only_matching': True, }, { 'url': 'https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1', 'only_matching': True, }, { 'url': 'http://www.deezer.com/playlist/176747451', 'only_matching': True, }, { 'url': 'https://www.b-ch.com/titles/8203/001', 'only_matching': True, }, { 'url': 'https://www.ctv.ca/shows/masterchef-53506/the-audition-battles-s15e1', 'only_matching': True, }, { 'url': 'https://www.noovo.ca/emissions/lamour-est-dans-le-pre/prets-pour-lamour-s10e1', 'only_matching': True, }, { 'url': 'https://www.tsn.ca/video/relaxed-oilers-look-to-put-emotional-game-2-loss-in-the-rearview%7E3148747', 'only_matching': True, }, { 'url': 'https://www.paramountplus.com', 'only_matching': True, }, { 'url': 'https://www.crackle.com', 'only_matching': True, }, { 'url': 'https://m.sonycrackle.com', 'only_matching': True, }, { 'url': 'https://www.cwtv.com', 'only_matching': True, }, { 'url': 'https://www.cwseed.com', 'only_matching': True, }, { 'url': 'https://cwtvpr.com', 'only_matching': True, }, { 'url': 'https://www.6play.fr', 'only_matching': True, }, { 'url': 'https://www.rtlplay.be', 'only_matching': True, }, { 'url': 'https://play.rtl.hr', 'only_matching': True, }, { 'url': 'https://www.rtlmost.hu', 'only_matching': True, }, { 'url': 'https://plus.rtl.de/video-tv/', 'only_matching': True, }, { 'url': 'https://www.mediasetinfinity.es/', 'only_matching': True, }] def _real_extract(self, url): raise ExtractorError( f'The requested site is known to use DRM protection. ' f'It will {self._downloader._format_err("NOT", self._downloader.Styles.EMPHASIS)} be supported.{LF}' f'Please {self._downloader._format_err("DO NOT", self._downloader.Styles.ERROR)} open an issue, ' 'unless you have evidence that the video is not DRM protected', expected=True) class KnownPiracyIE(UnsupportedInfoExtractor): """Sites that have been deemed to be piracy In order for this to not end up being a catalog of piracy sites, only sites that were once supported should be added to this list """ URLS = ( r'dood\.(?:to|watch|so|pm|wf|re)', # Sites youtube-dl supports, but we won't r'viewsb\.com', r'filemoon\.sx', r'hentai\.animestigma\.com', r'thisav\.com', r'gounlimited\.to', r'highstream\.tv', r'uqload\.com', r'vedbam\.xyz', r'vadbam\.net' r'vidlo\.us', r'wolfstream\.tv', r'xvideosharing\.com', r'(?:\w+\.)?viidshar\.com', r'sxyprn\.com', r'jable\.tv', r'91porn\.com', r'einthusan\.(?:tv|com|ca)', r'yourupload\.com', r'xanimu\.com', ) _TESTS = [{ 'url': 'http://dood.to/e/5s1wmbdacezb', 'only_matching': True, }, { 'url': 'https://thisav.com/en/terms', 'only_matching': True, }] def _real_extract(self, url): raise ExtractorError( f'This website is no longer supported since it has been determined to be primarily used for piracy.{LF}' f'{self._downloader._format_err("DO NOT", self._downloader.Styles.ERROR)} open issues for it', expected=True)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/itv.py
yt_dlp/extractor/itv.py
import json from .brightcove import BrightcoveNewIE from .common import InfoExtractor from ..utils import ( JSON_LD_RE, ExtractorError, base_url, clean_html, determine_ext, extract_attributes, get_element_by_class, merge_dicts, parse_duration, smuggle_url, try_get, url_basename, url_or_none, urljoin, ) from ..utils.traversal import traverse_obj class ITVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?itv\.com/hub/[^/]+/(?P<id>[0-9a-zA-Z]+)' _GEO_COUNTRIES = ['GB'] _TESTS = [{ 'url': 'https://www.itv.com/hub/plebs/2a1873a0002', 'info_dict': { 'id': '2a1873a0002', 'ext': 'mp4', 'title': 'Plebs - The Orgy', 'description': 'md5:4d7159af53ebd5b36e8b3ec82a41fdb4', 'series': 'Plebs', 'season_number': 1, 'episode_number': 1, 'thumbnail': r're:https?://hubimages\.itv\.com/episode/2_1873_0002', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://www.itv.com/hub/the-jonathan-ross-show/2a1166a0209', 'info_dict': { 'id': '2a1166a0209', 'ext': 'mp4', 'title': 'The Jonathan Ross Show - Series 17 - Episode 8', 'description': 'md5:3023dcdd375db1bc9967186cdb3f1399', 'series': 'The Jonathan Ross Show', 'episode_number': 8, 'season_number': 17, 'thumbnail': r're:https?://hubimages\.itv\.com/episode/2_1873_0002', }, 'params': { # m3u8 download 'skip_download': True, }, }, { # unavailable via data-playlist-url 'url': 'https://www.itv.com/hub/through-the-keyhole/2a2271a0033', 'only_matching': True, }, { # InvalidVodcrid 'url': 'https://www.itv.com/hub/james-martins-saturday-morning/2a5159a0034', 'only_matching': True, }, { # ContentUnavailable 'url': 'https://www.itv.com/hub/whos-doing-the-dishes/2a2898a0024', 'only_matching': True, }] def _generate_api_headers(self, hmac): return merge_dicts({ 'Accept': 'application/vnd.itv.vod.playlist.v2+json', 'Content-Type': 'application/json', 'hmac': hmac.upper(), }, self.geo_verification_headers()) def _call_api(self, video_id, playlist_url, headers, platform_tag, featureset, fatal=True): return self._download_json( playlist_url, video_id, data=json.dumps({ 'user': { 'itvUserId': '', 'entitlements': [], 'token': '', }, 'device': { 'manufacturer': 'Safari', 'model': '5', 'os': { 'name': 'Windows NT', 'version': '6.1', 'type': 'desktop', }, }, 'client': { 'version': '4.1', 'id': 'browser', }, 'variantAvailability': { 'featureset': { 'min': featureset, 'max': featureset, }, 'platformTag': platform_tag, }, }).encode(), headers=headers, fatal=fatal) def _get_subtitles(self, video_id, variants, ios_playlist_url, headers, *args, **kwargs): subtitles = {} # Prefer last matching featureset # See: https://github.com/yt-dlp/yt-dlp/issues/986 platform_tag_subs, featureset_subs = next( ((platform_tag, featureset) for platform_tag, featuresets in reversed(list(variants.items())) for featureset in featuresets if try_get(featureset, lambda x: x[2]) == 'outband-webvtt'), (None, None)) if platform_tag_subs and featureset_subs: subs_playlist = self._call_api( video_id, ios_playlist_url, headers, platform_tag_subs, featureset_subs, fatal=False) subs = try_get(subs_playlist, lambda x: x['Playlist']['Video']['Subtitles'], list) or [] for sub in subs: if not isinstance(sub, dict): continue href = url_or_none(sub.get('Href')) if not href: continue subtitles.setdefault('en', []).append({'url': href}) return subtitles def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) params = extract_attributes(self._search_regex( r'(?s)(<[^>]+id="video"[^>]*>)', webpage, 'params')) variants = self._parse_json( try_get(params, lambda x: x['data-video-variants'], str) or '{}', video_id, fatal=False) # Prefer last matching featureset # See: https://github.com/yt-dlp/yt-dlp/issues/986 platform_tag_video, featureset_video = next( ((platform_tag, featureset) for platform_tag, featuresets in reversed(list(variants.items())) for featureset in featuresets if set(try_get(featureset, lambda x: x[:2]) or []) == {'aes', 'hls'}), (None, None)) if not platform_tag_video or not featureset_video: raise ExtractorError('No downloads available', expected=True, video_id=video_id) ios_playlist_url = params.get('data-video-playlist') or params['data-video-id'] headers = self._generate_api_headers(params['data-video-hmac']) ios_playlist = self._call_api( video_id, ios_playlist_url, headers, platform_tag_video, featureset_video) video_data = try_get(ios_playlist, lambda x: x['Playlist']['Video'], dict) or {} ios_base_url = video_data.get('Base') formats = [] for media_file in (video_data.get('MediaFiles') or []): href = media_file.get('Href') if not href: continue if ios_base_url: href = ios_base_url + href ext = determine_ext(href) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( href, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'url': href, }) info = self._search_json_ld(webpage, video_id, default={}) if not info: json_ld = self._parse_json(self._search_regex( JSON_LD_RE, webpage, 'JSON-LD', '{}', group='json_ld'), video_id, fatal=False) if json_ld and json_ld.get('@type') == 'BreadcrumbList': for ile in (json_ld.get('itemListElement:') or []): item = ile.get('item:') or {} if item.get('@type') == 'TVEpisode': item['@context'] = 'http://schema.org' info = self._json_ld(item, video_id, fatal=False) or {} break thumbnails = [] thumbnail_url = try_get(params, lambda x: x['data-video-posterframe'], str) if thumbnail_url: thumbnails.extend([{ 'url': thumbnail_url.format(width=1920, height=1080, quality=100, blur=0, bg='false'), 'width': 1920, 'height': 1080, }, { 'url': urljoin(base_url(thumbnail_url), url_basename(thumbnail_url)), 'preference': -2, }]) thumbnail_url = self._html_search_meta(['og:image', 'twitter:image'], webpage, default=None) if thumbnail_url: thumbnails.append({ 'url': thumbnail_url, }) self._remove_duplicate_formats(thumbnails) return merge_dicts({ 'id': video_id, 'title': self._html_search_meta(['og:title', 'twitter:title'], webpage), 'formats': formats, 'subtitles': self.extract_subtitles(video_id, variants, ios_playlist_url, headers), 'duration': parse_duration(video_data.get('Duration')), 'description': clean_html(get_element_by_class('episode-info__synopsis', webpage)), 'thumbnails': thumbnails, }, info) class ITVBTCCIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?itv\.com/(?:news|btcc)/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.itv.com/btcc/articles/btcc-2019-brands-hatch-gp-race-action', 'info_dict': { 'id': 'btcc-2019-brands-hatch-gp-race-action', 'title': 'BTCC 2019: Brands Hatch GP race action', }, 'playlist_count': 12, }, { # news page, can have absent `data` field 'url': 'https://www.itv.com/news/2021-10-27/i-have-to-protect-the-country-says-rishi-sunak-as-uk-faces-interest-rate-hike', 'info_dict': { 'id': 'i-have-to-protect-the-country-says-rishi-sunak-as-uk-faces-interest-rate-hike', 'title': 'md5:6ef054dd9f069330db3dcc66cb772d32', }, 'playlist_count': 4, }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s' def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) json_map = try_get( self._search_nextjs_data(webpage, playlist_id), lambda x: x['props']['pageProps']['article']['body']['content']) or [] entries = [] for video in json_map: if not any(traverse_obj(video, ('data', attr)) == 'Brightcove' for attr in ('name', 'type')): continue video_id = video['data']['id'] account_id = video['data']['accountId'] player_id = video['data']['playerId'] entries.append(self.url_result( smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, video_id), { # ITV does not like some GB IP ranges, so here are some # IP blocks it accepts 'geo_ip_blocks': [ '193.113.0.0/16', '54.36.162.0/23', '159.65.16.0/21', ], 'referrer': url, }), ie=BrightcoveNewIE.ie_key(), video_id=video_id)) title = self._og_search_title(webpage, fatal=False) return self.playlist_result(entries, playlist_id, title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/biobiochiletv.py
yt_dlp/extractor/biobiochiletv.py
from .common import InfoExtractor from ..utils import ( ExtractorError, remove_end, ) class BioBioChileTVIE(InfoExtractor): _VALID_URL = r'https?://(?:tv|www)\.biobiochile\.cl/(?:notas|noticias)/(?:[^/]+/)+(?P<id>[^/]+)\.shtml' _TESTS = [{ 'url': 'http://tv.biobiochile.cl/notas/2015/10/21/sobre-camaras-y-camarillas-parlamentarias.shtml', 'md5': '26f51f03cf580265defefb4518faec09', 'info_dict': { 'id': 'sobre-camaras-y-camarillas-parlamentarias', 'ext': 'mp4', 'title': 'Sobre Cámaras y camarillas parlamentarias', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Fernando Atria', }, 'skip': 'URL expired and redirected to http://www.biobiochile.cl/portada/bbtv/index.html', }, { # different uploader layout 'url': 'http://tv.biobiochile.cl/notas/2016/03/18/natalia-valdebenito-repasa-a-diputado-hasbun-paso-a-la-categoria-de-hablar-brutalidades.shtml', 'md5': 'edc2e6b58974c46d5b047dea3c539ff3', 'info_dict': { 'id': 'natalia-valdebenito-repasa-a-diputado-hasbun-paso-a-la-categoria-de-hablar-brutalidades', 'ext': 'mp4', 'title': 'Natalia Valdebenito repasa a diputado Hasbún: Pasó a la categoría de hablar brutalidades', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Piangella Obrador', }, 'params': { 'skip_download': True, }, 'skip': 'URL expired and redirected to http://www.biobiochile.cl/portada/bbtv/index.html', }, { 'url': 'http://www.biobiochile.cl/noticias/bbtv/comentarios-bio-bio/2016/07/08/edecanes-del-congreso-figuras-decorativas-que-le-cuestan-muy-caro-a-los-chilenos.shtml', 'info_dict': { 'id': 'b4xd0LK3SK', 'ext': 'mp4', # TODO: fix url_transparent information overriding # 'uploader': 'Juan Pablo Echenique', 'title': 'Comentario Oscar Cáceres', }, 'params': { # empty m3u8 manifest 'skip_download': True, }, }, { 'url': 'http://tv.biobiochile.cl/notas/2015/10/22/ninos-transexuales-de-quien-es-la-decision.shtml', 'only_matching': True, }, { 'url': 'http://tv.biobiochile.cl/notas/2015/10/21/exclusivo-hector-pinto-formador-de-chupete-revela-version-del-ex-delantero-albo.shtml', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) rudo_url = self._search_regex( r'<iframe[^>]+src=(?P<q1>[\'"])(?P<url>(?:https?:)?//rudo\.video/vod/[0-9a-zA-Z]+)(?P=q1)', webpage, 'embed URL', None, group='url') if not rudo_url: raise ExtractorError('No videos found') title = remove_end(self._og_search_title(webpage), ' - BioBioChile TV') thumbnail = self._og_search_thumbnail(webpage) uploader = self._html_search_regex( r'<a[^>]+href=["\'](?:https?://(?:busca|www)\.biobiochile\.cl)?/(?:lista/)?(?:author|autor)[^>]+>(.+?)</a>', webpage, 'uploader', fatal=False) return { '_type': 'url_transparent', 'url': rudo_url, 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'uploader': uploader, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vh1.py
yt_dlp/extractor/vh1.py
from .mtv import MTVServicesBaseIE class VH1IE(MTVServicesBaseIE): IE_NAME = 'vh1.com' _VALID_URL = r'https?://(?:www\.)?vh1\.com/(?:video-clips|episodes)/(?P<id>[\da-z]{6})' _TESTS = [{ 'url': 'https://www.vh1.com/episodes/d06ta1/barely-famous-barely-famous-season-1-ep-1', 'info_dict': { 'id': '4af4cf2c-a854-11e4-9596-0026b9414f30', 'ext': 'mp4', 'display_id': 'd06ta1', 'title': 'Barely Famous', 'description': 'md5:6da5c9d88012eba0a80fc731c99b5fed', 'channel': 'VH1', 'duration': 1280.0, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'Barely Famous', 'season': 'Season 1', 'season_number': 1, 'episode': 'Episode 1', 'episode_number': 1, 'timestamp': 1426680000, 'upload_date': '20150318', 'release_timestamp': 1426680000, 'release_date': '20150318', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.vh1.com/video-clips/ryzt2n/love-hip-hop-miami-love-hip-hop-miami-season-5-recap', 'info_dict': { 'id': '59e62974-4a5c-4417-91c3-5044cb2f4ce2', 'ext': 'mp4', 'display_id': 'ryzt2n', 'title': 'Love & Hip Hop Miami - Season 5 Recap', 'description': 'md5:4e49c65d0007bfc8d06db555a6b76ef0', 'duration': 792.083, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'Love & Hip Hop Miami', 'season': 'Season 6', 'season_number': 6, 'episode': 'Episode 0', 'episode_number': 0, 'timestamp': 1732597200, 'upload_date': '20241126', 'release_timestamp': 1732597200, 'release_date': '20241126', }, 'params': {'skip_download': 'm3u8'}, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/fathom.py
yt_dlp/extractor/fathom.py
import json from .common import InfoExtractor from ..utils import ( extract_attributes, float_or_none, get_element_html_by_id, parse_iso8601, ) from ..utils.traversal import traverse_obj class FathomIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?fathom\.video/share/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://fathom.video/share/G9mkjkspnohVVZ_L5nrsoPycyWcB8y7s', 'md5': '0decd5343b8f30ae268625e79a02b60f', 'info_dict': { 'id': '47200596', 'ext': 'mp4', 'title': 'eCom Inucbator - Coaching Session', 'duration': 8125.380507, 'timestamp': 1699048914, 'upload_date': '20231103', }, }, { 'url': 'https://fathom.video/share/mEws3bybftHL2QLymxYEDeE21vtLxGVm', 'md5': '4f5cb382126c22d1aba8a939f9c49690', 'info_dict': { 'id': '46812957', 'ext': 'mp4', 'title': 'Jon, Lawrence, Neman chat about practice', 'duration': 3571.517847, 'timestamp': 1698933600, 'upload_date': '20231102', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) props = traverse_obj( get_element_html_by_id('app', webpage), ({extract_attributes}, 'data-page', {json.loads}, 'props')) video_id = str(props['call']['id']) return { 'id': video_id, 'formats': self._extract_m3u8_formats(props['call']['video_url'], video_id, 'mp4'), **traverse_obj(props, { 'title': ('head', 'title', {str}), 'duration': ('duration', {float_or_none}), 'timestamp': ('call', 'started_at', {parse_iso8601}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dbtv.py
yt_dlp/extractor/dbtv.py
from .common import InfoExtractor class DBTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?dagbladet\.no/video/(?:(?:embed|(?P<display_id>[^/]+))/)?(?P<id>[0-9A-Za-z_-]{11}|[a-zA-Z0-9]{8})' _EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dagbladet\.no/video/embed/(?:[0-9A-Za-z_-]{11}|[a-zA-Z0-9]{8}).*?)\1'] _TESTS = [{ 'url': 'https://www.dagbladet.no/video/PynxJnNWChE/', 'md5': 'b8f850ba1860adbda668d367f9b77699', 'info_dict': { 'id': 'PynxJnNWChE', 'ext': 'mp4', 'title': 'Skulle teste ut fornøyelsespark, men kollegaen var bare opptatt av bikinikroppen', 'description': 'md5:49cc8370e7d66e8a2ef15c3b4631fd3f', 'thumbnail': r're:https?://.+\.jpg', 'upload_date': '20160916', 'duration': 69, 'uploader_id': 'UCk5pvsyZJoYJBd7_oFPTlRQ', 'uploader': 'Dagbladet', }, 'skip': 'Invalid URL', }, { 'url': 'https://www.dagbladet.no/video/embed/xlGmyIeN9Jo/?autoplay=false', 'only_matching': True, }, { 'url': 'https://www.dagbladet.no/video/truer-iran-bor-passe-dere/PalfB2Cw', 'only_matching': True, }] _WEBPAGE_TESTS = [{ # FIXME: Embed detection 'url': 'https://www.dagbladet.no/nyheter/rekordstort-russisk-angrep/83325693', 'info_dict': { 'id': '1HW7fYry', 'ext': 'mp4', 'title': 'Putin taler - så skjer dette', 'description': 'md5:3e8bacee33de861a9663d9a3fcc54e5e', 'display_id': 'putin-taler-sa-skjer-dette', 'thumbnail': r're:https?://cdn\.jwplayer\.com/v2/media/.+', 'timestamp': 1751043600, 'upload_date': '20250627', }, }] def _real_extract(self, url): display_id, video_id = self._match_valid_url(url).groups() info = { '_type': 'url_transparent', 'id': video_id, 'display_id': display_id, } if len(video_id) == 11: info.update({ 'url': video_id, 'ie_key': 'Youtube', }) else: info.update({ 'url': 'jwplatform:' + video_id, 'ie_key': 'JWPlatform', }) return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ninenews.py
yt_dlp/extractor/ninenews.py
from .brightcove import BrightcoveNewIE from .common import InfoExtractor from ..utils import ExtractorError from ..utils.traversal import traverse_obj class NineNewsIE(InfoExtractor): IE_NAME = '9News' _VALID_URL = r'https?://(?:www\.)?9news\.com\.au/(?:[\w-]+/){2,3}(?P<id>[\w-]+)/?(?:$|[?#])' _TESTS = [{ 'url': 'https://www.9news.com.au/videos/national/fair-trading-pulls-dozens-of-toys-from-shelves/clqgc7dvj000y0jnvfism0w5m', 'md5': 'd1a65b2e9d126e5feb9bc5cb96e62c80', 'info_dict': { 'id': '6343717246112', 'ext': 'mp4', 'title': 'Fair Trading pulls dozens of toys from shelves', 'description': 'Fair Trading Australia have been forced to pull dozens of toys from shelves over hazard fears.', 'thumbnail': 'md5:bdbe44294e2323b762d97acf8843f66c', 'duration': 93.44, 'timestamp': 1703231748, 'upload_date': '20231222', 'uploader_id': '664969388001', 'tags': ['networkclip', 'aunews_aunationalninenews', 'christmas presents', 'toys', 'fair trading', 'au_news'], }, }, { 'url': 'https://www.9news.com.au/world/tape-reveals-donald-trump-pressured-michigan-officials-not-to-certify-2020-vote-a-new-report-says/0b8b880e-7d3c-41b9-b2bd-55bc7e492259', 'md5': 'a885c44d20898c3e70e9a53e8188cea1', 'info_dict': { 'id': '6343587450112', 'ext': 'mp4', 'title': 'Trump found ineligible to run for president by state court', 'description': 'md5:40e6e7db7a4ac6be0e960569a5af6066', 'thumbnail': 'md5:3e132c48c186039fd06c10787de9bff2', 'duration': 104.64, 'timestamp': 1703058034, 'upload_date': '20231220', 'uploader_id': '664969388001', 'tags': ['networkclip', 'aunews_aunationalninenews', 'ineligible', 'presidential candidate', 'donald trump', 'au_news'], }, }, { 'url': 'https://www.9news.com.au/national/outrage-as-parents-banned-from-giving-gifts-to-kindergarten-teachers/e19b49d4-a1a4-4533-9089-6e10e2d9386a', 'info_dict': { 'id': '6343716797112', 'ext': 'mp4', 'title': 'Outrage as parents banned from giving gifts to kindergarten teachers', 'description': 'md5:7a8b0ed2f9e08875fd9a3e86e462bc46', 'thumbnail': 'md5:5ee4d66717bdd0dee9fc9a705ef041b8', 'duration': 91.307, 'timestamp': 1703229584, 'upload_date': '20231222', 'uploader_id': '664969388001', 'tags': ['networkclip', 'aunews_aunationalninenews', 'presents', 'teachers', 'kindergarten', 'au_news'], }, }] def _real_extract(self, url): article_id = self._match_id(url) webpage = self._download_webpage(url, article_id) initial_state = self._search_json( r'var\s+__INITIAL_STATE__\s*=', webpage, 'initial state', article_id) video_id = traverse_obj( initial_state, ('videoIndex', 'currentVideo', 'brightcoveId', {str}), ('article', ..., 'media', lambda _, v: v['type'] == 'video', 'urn', {str}), get_all=False) account = traverse_obj(initial_state, ( 'videoIndex', 'config', (None, 'video'), 'account', {str}), get_all=False) if not video_id or not account: raise ExtractorError('Unable to get the required video data') return self.url_result( f'https://players.brightcove.net/{account}/default_default/index.html?videoId={video_id}', BrightcoveNewIE, video_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tvnoe.py
yt_dlp/extractor/tvnoe.py
import re from .common import InfoExtractor from ..utils import ( clean_html, extract_attributes, js_to_json, mimetype2ext, unified_strdate, url_or_none, urljoin, ) from ..utils.traversal import find_element, traverse_obj class TVNoeIE(InfoExtractor): IE_NAME = 'tvnoe' IE_DESC = 'Televize Noe' _VALID_URL = r'https?://(?:www\.)?tvnoe\.cz/porad/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://www.tvnoe.cz/porad/43216-outdoor-films-s-mudr-tomasem-kempnym-pomahat-potrebnym-nejen-u-nas', 'info_dict': { 'id': '43216-outdoor-films-s-mudr-tomasem-kempnym-pomahat-potrebnym-nejen-u-nas', 'ext': 'mp4', 'title': 'Pomáhat potřebným nejen u nás', 'description': 'md5:78b538ee32f7e881ec23b9c278a0ff3a', 'release_date': '20250531', 'series': 'Outdoor Films s MUDr. Tomášem Kempným', 'thumbnail': r're:https?://www\.tvnoe\.cz/.+\.jpg', }, }, { 'url': 'https://www.tvnoe.cz/porad/43205-zamysleni-tomase-halika-7-nedele-velikonocni', 'info_dict': { 'id': '43205-zamysleni-tomase-halika-7-nedele-velikonocni', 'ext': 'mp4', 'title': '7. neděle velikonoční', 'description': 'md5:6bb9908efc59abe60e1c8c7c0e9bb6cd', 'release_date': '20250531', 'series': 'Zamyšlení Tomáše Halíka', 'thumbnail': r're:https?://www\.tvnoe\.cz/.+\.jpg', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) player = self._search_json( r'var\s+INIT_PLAYER\s*=', webpage, 'init player', video_id, transform_source=js_to_json) formats = [] for source in traverse_obj(player, ('tracks', ..., lambda _, v: url_or_none(v['src']))): src_url = source['src'] ext = mimetype2ext(source.get('type')) if ext == 'm3u8': fmts = self._extract_m3u8_formats( src_url, video_id, 'mp4', m3u8_id='hls', fatal=False) elif ext == 'mpd': fmts = self._extract_mpd_formats( src_url, video_id, mpd_id='dash', fatal=False) else: self.report_warning(f'Unsupported stream type: {ext}') continue formats.extend(fmts) return { 'id': video_id, 'description': clean_html(self._search_regex( r'<p\s+class="">(.+?)</p>', webpage, 'description', default=None)), 'formats': formats, **traverse_obj(webpage, { 'title': ({find_element(tag='h2')}, {clean_html}), 'release_date': ( {clean_html}, {re.compile(r'Premiéra:\s*(\d{1,2}\.\d{1,2}\.\d{4})').findall}, ..., {str}, {unified_strdate}, any), 'series': ({find_element(tag='h1')}, {clean_html}), 'thumbnail': ( {find_element(id='player-live', html=True)}, {extract_attributes}, 'poster', {urljoin('https://www.tvnoe.cz/')}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/alura.py
yt_dlp/extractor/alura.py
import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, int_or_none, urlencode_postdata, urljoin, ) class AluraIE(InfoExtractor): _VALID_URL = r'https?://(?:cursos\.)?alura\.com\.br/course/(?P<course_name>[^/]+)/task/(?P<id>\d+)' _LOGIN_URL = 'https://cursos.alura.com.br/loginForm?urlAfterLogin=/loginForm' _VIDEO_URL = 'https://cursos.alura.com.br/course/%s/task/%s/video' _NETRC_MACHINE = 'alura' _TESTS = [{ 'url': 'https://cursos.alura.com.br/course/clojure-mutabilidade-com-atoms-e-refs/task/60095', 'info_dict': { 'id': '60095', 'ext': 'mp4', 'title': 'Referências, ref-set e alter', }, 'skip': 'Requires alura account credentials'}, { # URL without video 'url': 'https://cursos.alura.com.br/course/clojure-mutabilidade-com-atoms-e-refs/task/60098', 'only_matching': True}, { 'url': 'https://cursos.alura.com.br/course/fundamentos-market-digital/task/55219', 'only_matching': True}, ] def _real_extract(self, url): course, video_id = self._match_valid_url(url).group('course_name', 'id') video_url = self._VIDEO_URL % (course, video_id) video_dict = self._download_json(video_url, video_id, 'Searching for videos') if video_dict: webpage = self._download_webpage(url, video_id) video_title = clean_html(self._search_regex( r'<span[^>]+class=(["\'])task-body-header-title-text\1[^>]*>(?P<title>[^<]+)', webpage, 'title', group='title')) formats = [] for video_obj in video_dict: video_url_m3u8 = video_obj.get('mp4') video_format = self._extract_m3u8_formats( video_url_m3u8, None, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) for f in video_format: m = re.search(r'^[\w \W]*-(?P<res>\w*).mp4[\W \w]*', f['url']) if m: if not f.get('height'): f['height'] = int('720' if m.group('res') == 'hd' else '480') formats.extend(video_format) return { 'id': video_id, 'title': video_title, 'formats': formats, } def _perform_login(self, username, password): login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading login popup') def is_logged(webpage): return any(re.search(p, webpage) for p in ( r'href=[\"|\']?/signout[\"|\']', r'>Logout<')) # already logged in if is_logged(login_page): return login_form = self._hidden_inputs(login_page) login_form.update({ 'username': username, 'password': password, }) post_url = self._search_regex( r'<form[^>]+class=["|\']signin-form["|\'] action=["|\'](?P<url>.+?)["|\']', login_page, 'post url', default=self._LOGIN_URL, group='url') if not post_url.startswith('http'): post_url = urllib.parse.urljoin(self._LOGIN_URL, post_url) response = self._download_webpage( post_url, None, 'Logging in', data=urlencode_postdata(login_form), headers={'Content-Type': 'application/x-www-form-urlencoded'}) if not is_logged(response): error = self._html_search_regex( r'(?s)<p[^>]+class="alert-message[^"]*">(.+?)</p>', response, 'error message', default=None) if error: raise ExtractorError(f'Unable to login: {error}', expected=True) raise ExtractorError('Unable to log in') class AluraCourseIE(AluraIE): # XXX: Do not subclass from concrete IE _VALID_URL = r'https?://(?:cursos\.)?alura\.com\.br/course/(?P<id>[^/]+)' _LOGIN_URL = 'https://cursos.alura.com.br/loginForm?urlAfterLogin=/loginForm' _NETRC_MACHINE = 'aluracourse' _TESTS = [{ 'url': 'https://cursos.alura.com.br/course/clojure-mutabilidade-com-atoms-e-refs', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if AluraIE.suitable(url) else super().suitable(url) def _real_extract(self, url): course_path = self._match_id(url) webpage = self._download_webpage(url, course_path) course_title = self._search_regex( r'<h1.*?>(.*?)<strong>(?P<course_title>.*?)</strong></h[0-9]>', webpage, 'course title', default=course_path, group='course_title') entries = [] if webpage: for path in re.findall(r'<a\b(?=[^>]* class="[^"]*(?<=[" ])courseSectionList-section[" ])(?=[^>]* href="([^"]*))', webpage): page_url = urljoin(url, path) section_path = self._download_webpage(page_url, course_path) for path_video in re.findall(r'<a\b(?=[^>]* class="[^"]*(?<=[" ])task-menu-nav-item-link-VIDEO[" ])(?=[^>]* href="([^"]*))', section_path): chapter = clean_html( self._search_regex( r'<h3[^>]+class=(["\'])task-menu-section-title-text\1[^>]*>(?P<chapter>[^<]+)', section_path, 'chapter', group='chapter')) chapter_number = int_or_none( self._search_regex( r'<span[^>]+class=(["\'])task-menu-section-title-number[^>]*>(.*?)<strong>(?P<chapter_number>[^<]+)</strong>', section_path, 'chapter number', group='chapter_number')) video_url = urljoin(url, path_video) entry = { '_type': 'url_transparent', 'id': self._match_id(video_url), 'url': video_url, 'id_key': self.ie_key(), 'chapter': chapter, 'chapter_number': chapter_number, } entries.append(entry) return self.playlist_result(entries, course_path, course_title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tumblr.py
yt_dlp/extractor/tumblr.py
from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, traverse_obj, url_or_none, urlencode_postdata, ) class TumblrIE(InfoExtractor): _VALID_URL = r'https?://(?P<blog_name_1>[^/?#&]+)\.tumblr\.com/(?:post|video|(?P<blog_name_2>[a-zA-Z\d-]+))/(?P<id>[0-9]+)(?:$|[/?#])' _NETRC_MACHINE = 'tumblr' _LOGIN_URL = 'https://www.tumblr.com/login' _OAUTH_URL = 'https://www.tumblr.com/api/v2/oauth2/token' _TESTS = [{ 'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes', 'md5': '479bb068e5b16462f5176a6828829767', 'info_dict': { 'id': '54196191430', 'ext': 'mp4', 'title': 'md5:dfac39636969fe6bf1caa2d50405f069', 'timestamp': 1372531260, 'upload_date': '20130629', 'description': 'md5:390ab77358960235b6937ab3b8528956', 'uploader_id': 'tatianamaslanydaily', 'uploader_url': 'https://tatianamaslanydaily.tumblr.com/', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 127, 'like_count': int, 'repost_count': int, 'age_limit': 0, 'tags': ['Orphan Black', 'Tatiana Maslany', 'Interview', 'Video', 'OB S1 DVD Extras'], }, }, { 'note': 'multiple formats', 'url': 'https://maskofthedragon.tumblr.com/post/626907179849564160/mona-talking-in-english', 'md5': 'f43ff8a8861712b6cf0e0c2bd84cfc68', 'info_dict': { 'id': '626907179849564160', 'ext': 'mp4', 'title': 'Mona\xa0“talking” in\xa0“english”', 'description': 'md5:082a3a621530cb786ad2b7592a6d9e2c', 'timestamp': 1597865276, 'upload_date': '20200819', 'uploader_id': 'maskofthedragon', 'uploader_url': 'https://maskofthedragon.tumblr.com/', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 7, 'like_count': int, 'repost_count': int, 'age_limit': 0, 'tags': 'count:19', }, 'params': { 'format': 'hd', }, }, { 'note': 'non-iframe video (with related posts)', 'url': 'https://shieldfoss.tumblr.com/post/675519763813908480', 'md5': '12bdb75661ef443bffe5a4dac1dbf118', 'info_dict': { 'id': '675519763813908480', 'ext': 'mp4', 'title': 'Shieldfoss', 'uploader_id': 'nerviovago', 'uploader_url': 'https://nerviovago.tumblr.com/', 'thumbnail': r're:^https?://.*\.jpg', 'like_count': int, 'repost_count': int, 'age_limit': 0, 'tags': [], }, 'skip': '404', }, { 'note': 'dashboard only (original post)', 'url': 'https://jujanon.tumblr.com/post/159704441298/my-baby-eating', 'md5': '029f7c91ab386701b211e3d494d2d95e', 'info_dict': { 'id': '159704441298', 'ext': 'mp4', 'title': 'md5:ba79365861101f4911452728d2950561', 'timestamp': 1492489550, 'upload_date': '20170418', 'description': 'md5:773738196cea76b6996ec71e285bdabc', 'uploader_id': 'jujanon', 'uploader_url': 'https://jujanon.tumblr.com/', 'thumbnail': r're:^https?://.*\.jpg', 'like_count': int, 'repost_count': int, 'age_limit': 0, 'tags': ['crabs', 'my video', 'my pets'], }, }, { 'note': 'dashboard only (reblog)', 'url': 'https://bartlebyshop.tumblr.com/post/180294460076/duality-of-bird', 'md5': '04334e7cadb1af680d162912559f51a5', 'info_dict': { 'id': '180294460076', 'ext': 'mp4', 'title': 'duality of bird', 'timestamp': 1542651819, 'upload_date': '20181119', 'description': 'duality of bird', 'uploader_id': 'todaysbird', 'uploader_url': 'https://todaysbird.tumblr.com/', 'thumbnail': r're:^https?://.*\.jpg', 'like_count': int, 'repost_count': int, 'age_limit': 0, }, }, { 'note': 'dashboard only (external)', 'url': 'https://afloweroutofstone.tumblr.com/post/675661759168823296/the-blues-remembers-everything-the-country-forgot', 'info_dict': { 'id': 'q67_fd7b8SU', 'ext': 'mp4', 'title': 'The Blues Remembers Everything the Country Forgot', 'alt_title': 'The Blues Remembers Everything the Country Forgot', 'description': 'md5:1a6b4097e451216835a24c1023707c79', 'creator': 'md5:c2239ba15430e87c3b971ba450773272', 'uploader': 'Moor Mother - Topic', 'upload_date': '20201223', 'uploader_id': 'UCxrMtFBRkFvQJ_vVM4il08w', 'uploader_url': 'http://www.youtube.com/channel/UCxrMtFBRkFvQJ_vVM4il08w', 'thumbnail': r're:^https?://i.ytimg.com/.*', 'channel': 'Moor Mother', 'channel_id': 'UCxrMtFBRkFvQJ_vVM4il08w', 'channel_url': 'https://www.youtube.com/channel/UCxrMtFBRkFvQJ_vVM4il08w', 'channel_follower_count': int, 'duration': 181, 'view_count': int, 'like_count': int, 'age_limit': 0, 'categories': ['Music'], 'tags': 'count:7', 'live_status': 'not_live', 'playable_in_embed': True, 'availability': 'public', 'track': 'The Blues Remembers Everything the Country Forgot', 'artist': 'md5:c2239ba15430e87c3b971ba450773272', 'album': 'Brass', 'release_year': 2020, }, 'add_ie': ['Youtube'], 'skip': 'Video Unavailable', }, { 'url': 'https://prozdvoices.tumblr.com/post/673201091169681408/what-recording-voice-acting-sounds-like', 'md5': 'cb8328a6723c30556cef59e370202918', 'info_dict': { 'id': 'eomhW5MLGWA', 'ext': 'mp4', 'title': 'what recording voice acting sounds like', 'description': 'md5:1da3faa22d0e0b1d8b50216c284ee798', 'uploader': 'ProZD', 'upload_date': '20220112', 'uploader_id': '@ProZD', 'uploader_url': 'https://www.youtube.com/@ProZD', 'thumbnail': r're:^https?://i.ytimg.com/.*', 'channel': 'ProZD', 'channel_id': 'UC6MFZAOHXlKK1FI7V0XQVeA', 'channel_url': 'https://www.youtube.com/channel/UC6MFZAOHXlKK1FI7V0XQVeA', 'channel_follower_count': int, 'duration': 20, 'view_count': int, 'like_count': int, 'age_limit': 0, 'categories': ['Film & Animation'], 'tags': [], 'live_status': 'not_live', 'playable_in_embed': True, 'availability': 'public', 'heatmap': 'count:100', 'channel_is_verified': True, 'timestamp': 1642014562, 'comment_count': int, }, 'add_ie': ['Youtube'], }, { 'url': 'https://dominustempori.tumblr.com/post/673572712813297664/youtubes-all-right-for-some-pretty-cool', 'md5': '203e9eb8077e3f45bfaeb4c86c1467b8', 'info_dict': { 'id': '87816359', 'ext': 'mp4', 'title': 'Harold Ramis', 'description': 'md5:c99882405fcca0b1d348ad093f8f1672', 'uploader': 'Resolution Productions Group', 'uploader_id': 'resolutionproductions', 'uploader_url': 'https://vimeo.com/resolutionproductions', 'upload_date': '20140227', 'thumbnail': r're:^https?://i.vimeocdn.com/video/.*', 'timestamp': 1393541719, 'duration': 291, 'comment_count': int, 'like_count': int, 'release_timestamp': 1393541719, 'release_date': '20140227', }, 'add_ie': ['Vimeo'], }, { 'url': 'https://silami.tumblr.com/post/84250043974/my-bad-river-flows-in-you-impression-on-maschine', 'md5': '3c92d7c3d867f14ccbeefa2119022277', 'info_dict': { 'id': 'nYtvtTPuTl', 'ext': 'mp4', 'title': 'Video by silbulterman', 'description': '#maschine', 'uploader_id': '242859024', 'thumbnail': r're:^https?://.*\.jpg', 'timestamp': 1398801174, 'like_count': int, 'uploader': 'Sil', 'channel': 'silbulterman', 'comment_count': int, 'upload_date': '20140429', }, 'add_ie': ['Instagram'], }, { 'note': 'new url scheme', 'url': 'https://www.tumblr.com/autumnsister/765162750456578048?source=share', 'info_dict': { 'id': '765162750456578048', 'ext': 'mp4', 'uploader_url': 'https://autumnsister.tumblr.com/', 'tags': ['autumn', 'food', 'curators on tumblr'], 'like_count': int, 'thumbnail': 'https://64.media.tumblr.com/tumblr_sklad89N3x1ygquow_frame1.jpg', 'title': '🪹', 'uploader_id': 'autumnsister', 'repost_count': int, 'age_limit': 0, }, }, { 'note': 'bandcamp album embed', 'url': 'https://patricia-taxxon.tumblr.com/post/704473755725004800/patricia-taxxon-agnes-hilda-patricia-taxxon', 'info_dict': { 'id': 'agnes-hilda', 'title': 'Agnes & Hilda', 'description': 'The inexplicable joy of an artist. Wash paws after listening.', 'uploader_id': 'patriciataxxon', }, 'playlist_count': 8, }, { 'note': 'bandcamp track embeds (many)', 'url': 'https://www.tumblr.com/felixcosm/730460905855467520/if-youre-looking-for-new-music-to-write-or', 'info_dict': { 'id': '730460905855467520', 'uploader_id': 'felixcosm', 'upload_date': '20231006', 'timestamp': 1696621805, 'repost_count': int, 'tags': 'count:15', 'description': 'md5:2eb3482a3c6987280cbefb6839068f32', 'like_count': int, 'age_limit': 0, 'title': 'If you\'re looking for new music to write or imagine scenerios to: STOP. This is for you.', 'uploader_url': 'https://felixcosm.tumblr.com/', }, 'playlist_count': 10, }, { 'note': 'soundcloud track embed', 'url': 'https://silverfoxstole.tumblr.com/post/765305403763556352/jamie-robertson-doctor-who-8th-doctor', 'info_dict': { 'id': '1218136399', 'ext': 'opus', 'comment_count': int, 'genres': [], 'repost_count': int, 'uploader': 'Jamie Robertson', 'title': 'Doctor Who - 8th doctor - Stranded Theme never released and used.', 'duration': 46.106, 'uploader_id': '2731064', 'thumbnail': 'https://i1.sndcdn.com/artworks-MVgcPm5jN42isC5M-6Dz22w-original.jpg', 'timestamp': 1645181261, 'uploader_url': 'https://soundcloud.com/jamierobertson', 'view_count': int, 'upload_date': '20220218', 'description': 'md5:ab924dd9994d0a7d64d6d31bf2af4625', 'license': 'all-rights-reserved', 'like_count': int, }, }, { 'note': 'soundcloud set embed', 'url': 'https://www.tumblr.com/beyourselfchulanmaria/703505323122638848/chu-lan-maria-the-playlist-%E5%BF%83%E7%9A%84%E5%91%BC%E5%96%9A-call-of-the', 'info_dict': { 'id': '691222680', 'title': '心的呼喚 Call of the heart I', 'description': 'md5:25952a8d178a3aa55e40fcbb646a38c3', }, 'playlist_mincount': 19, }, { 'note': 'dailymotion video embed', 'url': 'https://www.tumblr.com/funvibecentral/759390024460632064', 'info_dict': { 'id': 'x94cnnk', 'ext': 'mp4', 'description': 'Funny dailymotion shorts.\n#funny #fun#comedy #romantic #exciting', 'uploader': 'FunVibe Central', 'like_count': int, 'view_count': int, 'timestamp': 1724210553, 'title': 'Woman watching other Woman', 'tags': [], 'upload_date': '20240821', 'age_limit': 0, 'uploader_id': 'x32m6ye', 'duration': 20, 'thumbnail': r're:https://(?:s[12]\.)dmcdn\.net/v/Wtqh01cnxKNXLG1N8/x1080', }, }, { 'note': 'tiktok video embed', 'url': 'https://fansofcolor.tumblr.com/post/660637918605475840/blockquote-class-tiktok-embed', 'info_dict': { 'id': '7000937272010935558', 'ext': 'mp4', 'artists': ['Alicia Dreaming'], 'like_count': int, 'repost_count': int, 'thumbnail': r're:^https?://[\w\/\.\-]+(~[\w\-]+\.image)?', 'channel_id': 'MS4wLjABAAAAsJohwz_dU4KfAOc61cbGDAZ46-5hg2ANTXVQlRe1ipDhpX08PywR3PPiple1NTAo', 'uploader': 'aliciadreaming', 'description': 'huge casting news Greyworm will be #louisdulac #racebending #interviewwiththevampire', 'title': 'huge casting news Greyworm will be #louisdulac #racebending #interviewwiththevampire', 'channel_url': 'https://www.tiktok.com/@MS4wLjABAAAAsJohwz_dU4KfAOc61cbGDAZ46-5hg2ANTXVQlRe1ipDhpX08PywR3PPiple1NTAo', 'uploader_id': '7000478462196990982', 'uploader_url': 'https://www.tiktok.com/@aliciadreaming', 'timestamp': 1630032733, 'channel': 'Alicia Dreaming', 'track': 'original sound', 'upload_date': '20210827', 'view_count': int, 'comment_count': int, 'duration': 59, }, }, { 'note': 'tumblr video AND youtube embed', 'url': 'https://www.tumblr.com/anyaboz/765332564457209856/my-music-video-for-selkie-by-nobodys-wolf-child', 'info_dict': { 'id': '765332564457209856', 'timestamp': 1729878010, 'upload_date': '20241025', 'uploader_id': 'anyaboz', 'repost_count': int, 'age_limit': 0, 'uploader_url': 'https://anyaboz.tumblr.com/', 'description': 'md5:9a129cf6ce9d87a80ffd3c6dedd4d1e6', 'like_count': int, 'title': 'md5:b18a2ac9387681d20303e485db85c1b5', 'tags': ['music video', 'nobodys wolf child', 'selkie', 'Stop Motion Animation', 'stop Motion', 'room guardians', 'Youtube'], }, 'playlist_count': 2, }, { # twitch_live provider - error when linked account is not live 'url': 'https://www.tumblr.com/anarcho-skamunist/722224493650722816/hollow-knight-stream-right-now-going-to-fight', 'only_matching': True, }] _providers = { 'instagram': 'Instagram', 'vimeo': 'Vimeo', 'youtube': 'Youtube', 'dailymotion': 'Dailymotion', 'tiktok': 'TikTok', 'twitch_live': 'TwitchStream', 'bandcamp': None, 'soundcloud': None, } # known not to be supported _unsupported_providers = { # seems like podcasts can't be embedded 'spotify', } _ACCESS_TOKEN = None def _initialize_pre_login(self): login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading login page', fatal=False) if login_page: self._ACCESS_TOKEN = self._search_regex( r'"API_TOKEN":\s*"(\w+)"', login_page, 'API access token', fatal=False) if not self._ACCESS_TOKEN: self.report_warning('Failed to get access token; metadata will be missing and some videos may not work') def _perform_login(self, username, password): if not self._ACCESS_TOKEN: return data = { 'password': password, 'grant_type': 'password', 'username': username, } if self.get_param('twofactor'): data['tfa_token'] = self.get_param('twofactor') def _call_login(): return self._download_json( self._OAUTH_URL, None, 'Logging in', data=urlencode_postdata(data), headers={ 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': f'Bearer {self._ACCESS_TOKEN}', }, errnote='Login failed', fatal=False, expected_status=lambda s: 400 <= s < 500) response = _call_login() if traverse_obj(response, 'error') == 'tfa_required': data['tfa_token'] = self._get_tfa_info() response = _call_login() if traverse_obj(response, 'error'): raise ExtractorError( f'API returned error {": ".join(traverse_obj(response, (("error", "error_description"), {str})))}') def _real_extract(self, url): blog_1, blog_2, video_id = self._match_valid_url(url).groups() blog = blog_2 or blog_1 url = f'http://{blog}.tumblr.com/post/{video_id}' webpage, urlh = self._download_webpage_handle( url, video_id, headers={'User-Agent': 'WhatsApp/2.0'}) # whatsapp ua bypasses problems redirect_url = urlh.url api_only = bool(self._search_regex( r'(tumblr.com|^)/(safe-mode|login_required|blog/view)', redirect_url, 'redirect', default=None)) if api_only and not self._ACCESS_TOKEN: raise ExtractorError('Cannot get data for dashboard-only post without access token') post_json = {} if self._ACCESS_TOKEN: post_json = traverse_obj( self._download_json( f'https://www.tumblr.com/api/v2/blog/{blog}/posts/{video_id}/permalink', video_id, headers={'Authorization': f'Bearer {self._ACCESS_TOKEN}'}, fatal=False), ('response', 'timeline', 'elements', 0, {dict})) or {} content_json = traverse_obj(post_json, ((('trail', 0), None), 'content', ..., {dict})) # the url we're extracting from might be an original post or it might be a reblog. # if it's a reblog, og:description will be the reblogger's comment, not the uploader's. # content_json is always the op, so if it exists but has no text, there's no description if content_json: description = '\n\n'.join( item.get('text') for item in content_json if item.get('type') == 'text') or None else: description = self._og_search_description(webpage, default=None) uploader_id = traverse_obj(post_json, 'reblogged_root_name', 'blog_name') info_dict = { 'id': video_id, 'title': post_json.get('summary') or (blog if api_only else self._html_search_regex( r'(?s)<title>(?P<title>.*?)(?: \| Tumblr)?</title>', webpage, 'title', default=blog)), 'description': description, 'uploader_id': uploader_id, 'uploader_url': f'https://{uploader_id}.tumblr.com/' if uploader_id else None, **traverse_obj(post_json, { # Try oldest post in reblog chain, fall back to timestamp of the post itself 'timestamp': ((('trail', 0, 'post'), None), 'timestamp', {int_or_none}, any), 'like_count': ('like_count', {int_or_none}), 'repost_count': ('reblog_count', {int_or_none}), 'tags': ('tags', ..., {str}), }), 'age_limit': {True: 18, False: 0}.get(post_json.get('is_nsfw')), } # for tumblr's own video hosting fallback_format = None formats = [] video_url = self._og_search_video_url(webpage, default=None) # for external video hosts entries = [] ignored_providers = set() unknown_providers = set() for video_json in traverse_obj(content_json, lambda _, v: v['type'] in ('video', 'audio')): media_json = video_json.get('media') or {} if api_only and not media_json.get('url') and not video_json.get('url'): raise ExtractorError('Failed to find video data for dashboard-only post') provider = video_json.get('provider') if provider in ('tumblr', None): fallback_format = { 'url': media_json.get('url') or video_url, 'width': int_or_none( media_json.get('width') or self._og_search_property('video:width', webpage, default=None)), 'height': int_or_none( media_json.get('height') or self._og_search_property('video:height', webpage, default=None)), } continue elif provider in self._unsupported_providers: ignored_providers.add(provider) continue elif provider and provider not in self._providers: unknown_providers.add(provider) if video_json.get('url'): # external video host entries.append(self.url_result( video_json['url'], self._providers.get(provider))) duration = None # iframes can supply duration and sometimes additional formats, so check for one iframe_url = self._search_regex( fr'src=\'(https?://www\.tumblr\.com/video/{blog}/{video_id}/[^\']+)\'', webpage, 'iframe url', default=None) if iframe_url: iframe = self._download_webpage( iframe_url, video_id, 'Downloading iframe page', headers={'Referer': redirect_url}) options = self._parse_json( self._search_regex( r'data-crt-options=(["\'])(?P<options>.+?)\1', iframe, 'hd video url', default='', group='options'), video_id, fatal=False) if options: duration = int_or_none(options.get('duration')) hd_url = options.get('hdUrl') if hd_url: # there are multiple formats; extract them # ignore other sources of width/height data as they may be wrong sources = [] sd_url = self._search_regex( r'<source[^>]+src=(["\'])(?P<url>.+?)\1', iframe, 'sd video url', default=None, group='url') if sd_url: sources.append((sd_url, 'sd')) sources.append((hd_url, 'hd')) formats = [{ 'url': video_url, 'format_id': format_id, 'height': int_or_none(self._search_regex( r'_(\d+)\.\w+$', video_url, 'height', default=None)), 'quality': quality, } for quality, (video_url, format_id) in enumerate(sources)] if not formats and fallback_format: formats.append(fallback_format) if formats: # tumblr's own video is always above embeds entries.insert(0, { **info_dict, 'formats': formats, 'duration': duration, 'thumbnail': (traverse_obj(video_json, ('poster', 0, 'url', {url_or_none})) or self._og_search_thumbnail(webpage, default=None)), }) if ignored_providers: if not entries: raise ExtractorError(f'None of embed providers are supported: {", ".join(ignored_providers)!s}', video_id=video_id, expected=True) else: self.report_warning(f'Skipped embeds from unsupported providers: {", ".join(ignored_providers)!s}', video_id) if unknown_providers: self.report_warning(f'Unrecognized providers, please report: {", ".join(unknown_providers)!s}', video_id) if not entries: self.raise_no_formats('No video could be found in this post', expected=True, video_id=video_id) if len(entries) == 1: return { **info_dict, **entries[0], } return { **info_dict, '_type': 'playlist', 'entries': entries, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/microsoftstream.py
yt_dlp/extractor/microsoftstream.py
import base64 from .common import InfoExtractor from ..utils import ( merge_dicts, parse_duration, parse_iso8601, parse_resolution, try_get, url_basename, ) class MicrosoftStreamIE(InfoExtractor): IE_NAME = 'microsoftstream' IE_DESC = 'Microsoft Stream' _VALID_URL = r'https?://(?:web|www|msit)\.microsoftstream\.com/video/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _TESTS = [{ 'url': 'https://web.microsoftstream.com/video/6e51d928-4f46-4f1c-b141-369925e37b62?list=user&userId=f5491e02-e8fe-4e34-b67c-ec2e79a6ecc0', 'only_matching': True, }, { 'url': 'https://msit.microsoftstream.com/video/b60f5987-aabd-4e1c-a42f-c559d138f2ca', 'only_matching': True, }] def _get_all_subtitles(self, api_url, video_id, headers): subtitles = {} automatic_captions = {} text_tracks = self._download_json( f'{api_url}/videos/{video_id}/texttracks', video_id, note='Downloading subtitles JSON', fatal=False, headers=headers, query={'api-version': '1.4-private'}).get('value') or [] for track in text_tracks: if not track.get('language') or not track.get('url'): continue sub_dict = automatic_captions if track.get('autoGenerated') else subtitles sub_dict.setdefault(track['language'], []).append({ 'ext': 'vtt', 'url': track.get('url'), }) return { 'subtitles': subtitles, 'automatic_captions': automatic_captions, } def extract_all_subtitles(self, *args, **kwargs): if (self.get_param('writesubtitles', False) or self.get_param('writeautomaticsub', False) or self.get_param('listsubtitles')): return self._get_all_subtitles(*args, **kwargs) return {} def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) if '<title>Microsoft Stream</title>' not in webpage: self.raise_login_required(method='cookies') access_token = self._html_search_regex(r'"AccessToken":"(.+?)"', webpage, 'access token') api_url = self._html_search_regex(r'"ApiGatewayUri":"(.+?)"', webpage, 'api url') headers = {'Authorization': f'Bearer {access_token}'} video_data = self._download_json( f'{api_url}/videos/{video_id}', video_id, headers=headers, query={ '$expand': 'creator,tokens,status,liveEvent,extensions', 'api-version': '1.4-private', }) video_id = video_data.get('id') or video_id language = video_data.get('language') thumbnails = [] for thumbnail_id in ('extraSmall', 'small', 'medium', 'large'): thumbnail_url = try_get(video_data, lambda x: x['posterImage'][thumbnail_id]['url'], str) if not thumbnail_url: continue thumb = { 'id': thumbnail_id, 'url': thumbnail_url, } thumb_name = url_basename(thumbnail_url) thumb_name = str(base64.b64decode(thumb_name + '=' * (-len(thumb_name) % 4))) thumb.update(parse_resolution(thumb_name)) thumbnails.append(thumb) formats = [] for playlist in video_data['playbackUrls']: if playlist['mimeType'] == 'application/vnd.apple.mpegurl': formats.extend(self._extract_m3u8_formats( playlist['playbackUrl'], video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False, headers=headers)) elif playlist['mimeType'] == 'application/dash+xml': formats.extend(self._extract_mpd_formats( playlist['playbackUrl'], video_id, mpd_id='dash', fatal=False, headers=headers)) elif playlist['mimeType'] == 'application/vnd.ms-sstr+xml': formats.extend(self._extract_ism_formats( playlist['playbackUrl'], video_id, ism_id='mss', fatal=False, headers=headers)) formats = [merge_dicts(f, {'language': language}) for f in formats] return { 'id': video_id, 'title': video_data['name'], 'description': video_data.get('description'), 'uploader': try_get(video_data, lambda x: x['creator']['name'], str), 'uploader_id': try_get(video_data, (lambda x: x['creator']['mail'], lambda x: x['creator']['id']), str), 'thumbnails': thumbnails, **self.extract_all_subtitles(api_url, video_id, headers), 'timestamp': parse_iso8601(video_data.get('created')), 'duration': parse_duration(try_get(video_data, lambda x: x['media']['duration'])), 'webpage_url': f'https://web.microsoftstream.com/video/{video_id}', 'view_count': try_get(video_data, lambda x: x['metrics']['views'], int), 'like_count': try_get(video_data, lambda x: x['metrics']['likes'], int), 'comment_count': try_get(video_data, lambda x: x['metrics']['comments'], int), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/pandatv.py
yt_dlp/extractor/pandatv.py
from .common import InfoExtractor from ..utils import ( ExtractorError, UserNotLive, filter_dict, int_or_none, join_nonempty, parse_iso8601, url_or_none, urlencode_postdata, ) from ..utils.traversal import traverse_obj class PandaTvIE(InfoExtractor): IE_DESC = 'pandalive.co.kr (팬더티비)' _VALID_URL = r'https?://(?:www\.|m\.)?pandalive\.co\.kr/play/(?P<id>\w+)' _TESTS = [{ 'url': 'https://www.pandalive.co.kr/play/bebenim', 'info_dict': { 'id': 'bebenim', 'ext': 'mp4', 'channel': '릴리ෆ', 'title': r're:앙앙❤ \d{4}-\d{2}-\d{2} \d{2}:\d{2}', 'thumbnail': r're:https://cdn\.pandalive\.co\.kr/ivs/v1/.+/thumb\.jpg', 'concurrent_view_count': int, 'like_count': int, 'live_status': 'is_live', 'upload_date': str, }, 'skip': 'The channel is not currently live', }] def _real_extract(self, url): channel_id = self._match_id(url) video_meta = self._download_json( 'https://api.pandalive.co.kr/v1/live/play', channel_id, 'Downloading video meta data', 'Unable to download video meta data', data=urlencode_postdata(filter_dict({ 'action': 'watch', 'userId': channel_id, 'password': self.get_param('videopassword'), })), expected_status=400) if error_code := traverse_obj(video_meta, ('errorData', 'code', {str})): if error_code == 'castEnd': raise UserNotLive(video_id=channel_id) elif error_code == 'needAdult': self.raise_login_required('Adult verification is required for this stream') elif error_code == 'needLogin': self.raise_login_required('Login is required for this stream') elif error_code == 'needCoinPurchase': raise ExtractorError('Coin purchase is required for this stream', expected=True) elif error_code == 'needUnlimitItem': raise ExtractorError('Ticket purchase is required for this stream', expected=True) elif error_code == 'needPw': raise ExtractorError('Password protected video, use --video-password <password>', expected=True) elif error_code == 'wrongPw': raise ExtractorError('Wrong password', expected=True) else: error_msg = video_meta.get('message') raise ExtractorError(join_nonempty( 'API returned error code', error_code, error_msg and 'with error message:', error_msg, delim=' ')) http_headers = {'Origin': 'https://www.pandalive.co.kr'} return { 'id': channel_id, 'is_live': True, 'formats': self._extract_m3u8_formats( video_meta['PlayList']['hls'][0]['url'], channel_id, 'mp4', headers=http_headers, live=True), 'http_headers': http_headers, **traverse_obj(video_meta, ('media', { 'title': ('title', {str}), 'release_timestamp': ('startTime', {parse_iso8601(delim=' ')}), 'thumbnail': ('ivsThumbnail', {url_or_none}), 'channel': ('userNick', {str}), 'concurrent_view_count': ('user', {int_or_none}), 'like_count': ('likeCnt', {int_or_none}), })), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sevenplus.py
yt_dlp/extractor/sevenplus.py
import json import re from .brightcove import BrightcoveNewBaseIE from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, try_get, update_url_query, ) class SevenPlusIE(BrightcoveNewBaseIE): IE_NAME = '7plus' _VALID_URL = r'https?://(?:www\.)?7plus\.com\.au/(?P<path>[^?]+\?.*?\bepisode-id=(?P<id>[^&#]+))' _TESTS = [{ 'url': 'https://7plus.com.au/MTYS?episode-id=MTYS7-003', 'info_dict': { 'id': 'MTYS7-003', 'ext': 'mp4', 'title': 'S7 E3 - Wind Surf', 'description': 'md5:29c6a69f21accda7601278f81b46483d', 'uploader_id': '5303576322001', 'upload_date': '20171201', 'timestamp': 1512106377, 'series': 'Mighty Ships', 'season_number': 7, 'episode_number': 3, 'episode': 'Wind Surf', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://7plus.com.au/UUUU?episode-id=AUMS43-001', 'only_matching': True, }] def _real_initialize(self): self.token = None cookies = self._get_cookies('https://7plus.com.au') api_key = next((x for x in cookies if x.startswith('glt_')), '')[4:] if not api_key: # Cookies are signed out, skip login return login_resp = self._download_json( 'https://login.7plus.com.au/accounts.getJWT', None, 'Logging in', fatal=False, query={ 'APIKey': api_key, 'sdk': 'js_latest', 'login_token': cookies[f'glt_{api_key}'].value, 'authMode': 'cookie', 'pageURL': 'https://7plus.com.au/', 'sdkBuild': '12471', 'format': 'json', }) or {} if 'errorMessage' in login_resp: self.report_warning(f'Unable to login: 7plus said: {login_resp["errorMessage"]}') return id_token = login_resp.get('id_token') if not id_token: self.report_warning('Unable to login: Could not extract id token') return token_resp = self._download_json( 'https://7plus.com.au/auth/token', None, 'Getting auth token', fatal=False, headers={'Content-Type': 'application/json'}, data=json.dumps({ 'idToken': id_token, 'platformId': 'web', 'regSource': '7plus', }).encode()) or {} self.token = token_resp.get('token') if not self.token: self.report_warning('Unable to log in: Could not extract auth token') def _real_extract(self, url): path, episode_id = self._match_valid_url(url).groups() headers = {} if self.token: headers['Authorization'] = f'Bearer {self.token}' try: media = self._download_json( 'https://videoservice.swm.digital/playback', episode_id, query={ 'appId': '7plus', 'deviceType': 'web', 'platformType': 'web', 'accountId': 5303576322001, 'referenceId': 'ref:' + episode_id, 'deliveryId': 'csai', 'videoType': 'vod', }, headers=headers)['media'] except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 403: raise ExtractorError(self._parse_json( e.cause.response.read().decode(), episode_id)[0]['error_code'], expected=True) raise for source in media.get('sources', {}): src = source.get('src') if not src: continue source['src'] = update_url_query(src, {'rule': ''}) info = self._parse_brightcove_metadata(media, episode_id) content = self._download_json( 'https://component-cdn.swm.digital/content/' + path, episode_id, headers={ 'market-id': 4, }, fatal=False) or {} for item in content.get('items', {}): if item.get('componentData', {}).get('componentType') == 'infoPanel': for src_key, dst_key in [('title', 'title'), ('shortSynopsis', 'description')]: value = item.get(src_key) if value: info[dst_key] = value info['series'] = try_get( item, lambda x: x['seriesLogo']['name'], str) mobj = re.search(r'^S(\d+)\s+E(\d+)\s+-\s+(.+)$', info['title']) if mobj: info.update({ 'season_number': int(mobj.group(1)), 'episode_number': int(mobj.group(2)), 'episode': mobj.group(3), }) return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ntvde.py
yt_dlp/extractor/ntvde.py
import re from .common import InfoExtractor from ..utils import ( int_or_none, js_to_json, url_or_none, ) from ..utils.traversal import traverse_obj class NTVDeIE(InfoExtractor): IE_NAME = 'n-tv.de' _VALID_URL = r'https?://(?:www\.)?n-tv\.de/mediathek/(?:videos|magazine)/[^/?#]+/[^/?#]+-article(?P<id>[^/?#]+)\.html' _TESTS = [{ 'url': 'http://www.n-tv.de/mediathek/videos/panorama/Schnee-und-Glaette-fuehren-zu-zahlreichen-Unfaellen-und-Staus-article14438086.html', 'md5': '6bcf2a6638cb83f45d5561659a1cb498', 'info_dict': { 'id': '14438086', 'ext': 'mp4', 'thumbnail': r're:^https?://.*\.jpg$', 'title': 'Schnee und Glätte führen zu zahlreichen Unfällen und Staus', 'alt_title': 'Winterchaos auf deutschen Straßen', 'description': 'Schnee und Glätte sorgen deutschlandweit für einen chaotischen Start in die Woche: Auf den Straßen kommt es zu kilometerlangen Staus und Dutzenden Glätteunfällen. In Düsseldorf und München wirbelt der Schnee zudem den Flugplan durcheinander. Dutzende Flüge landen zu spät, einige fallen ganz aus.', 'duration': 67, 'timestamp': 1422892797, 'upload_date': '20150202', }, }, { 'url': 'https://www.n-tv.de/mediathek/magazine/auslandsreport/Juedische-Siedler-wollten-Rache-die-wollten-nur-toeten-article24523089.html', 'md5': 'c5c6014c014ccc3359470e1d34472bfd', 'info_dict': { 'id': '24523089', 'ext': 'mp4', 'thumbnail': r're:^https?://.*\.jpg$', 'title': 'Jüdische Siedler "wollten Rache, die wollten nur töten"', 'alt_title': 'Israelische Gewalt fern von Gaza', 'description': 'Vier Tage nach dem Massaker der Hamas greifen jüdische Siedler das Haus einer palästinensischen Familie im Westjordanland an. Die Überlebenden berichten, sie waren unbewaffnet, die Angreifer seien nur auf "Rache und Töten" aus gewesen. Als die Toten beerdigt werden sollen, eröffnen die Siedler erneut das Feuer.', 'duration': 326, 'timestamp': 1699688294, 'upload_date': '20231111', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) info = self._search_json( r'article:', webpage, 'info', video_id, transform_source=js_to_json) vdata = self._search_json( r'\$\(\s*"#playerwrapper"\s*\)\s*\.data\(\s*"player",', webpage, 'player data', video_id, transform_source=lambda s: js_to_json(re.sub(r'ivw:[^},]+', '', s)))['setup']['source'] formats = [] if vdata.get('progressive'): formats.append({ 'format_id': 'http', 'url': vdata['progressive'], }) if vdata.get('hls'): formats.extend(self._extract_m3u8_formats( vdata['hls'], video_id, 'mp4', m3u8_id='hls', fatal=False)) if vdata.get('dash'): formats.extend(self._extract_mpd_formats(vdata['dash'], video_id, fatal=False, mpd_id='dash')) return { 'id': video_id, **traverse_obj(info, { 'title': 'headline', 'description': 'intro', 'alt_title': 'kicker', 'timestamp': ('publishedDateAsUnixTimeStamp', {int_or_none}), }), **traverse_obj(vdata, { 'thumbnail': ('poster', {url_or_none}), 'duration': ('length', {int_or_none}), }), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/newgrounds.py
yt_dlp/extractor/newgrounds.py
import functools import re from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, OnDemandPagedList, clean_html, extract_attributes, get_element_by_id, int_or_none, parse_count, parse_duration, unified_timestamp, url_or_none, urlencode_postdata, urljoin, ) from ..utils.traversal import traverse_obj class NewgroundsIE(InfoExtractor): _NETRC_MACHINE = 'newgrounds' _VALID_URL = r'https?://(?:www\.)?newgrounds\.com/(?:audio/listen|portal/view)/(?P<id>\d+)(?:/format/flash)?' _TESTS = [{ 'url': 'https://www.newgrounds.com/audio/listen/549479', 'md5': 'fe6033d297591288fa1c1f780386f07a', 'info_dict': { 'id': '549479', 'ext': 'mp3', 'title': 'B7 - BusMode', 'uploader': 'Burn7', 'timestamp': 1378892945, 'upload_date': '20130911', 'duration': 143, 'view_count': int, 'description': 'md5:b8b3c2958875189f07d8e313462e8c4f', 'age_limit': 0, 'thumbnail': r're:^https://aicon\.ngfiles\.com/549/549479\.png', }, }, { 'url': 'https://www.newgrounds.com/portal/view/1', 'md5': 'fbfb40e2dc765a7e830cb251d370d981', 'info_dict': { 'id': '1', 'ext': 'mp4', 'title': 'Scrotum 1', 'uploader': 'Brian-Beaton', 'timestamp': 955078533, 'upload_date': '20000407', 'view_count': int, 'description': 'Scrotum plays "catch."', 'age_limit': 17, 'thumbnail': r're:^https://picon\.ngfiles\.com/0/flash_1_card\.png', }, }, { # source format unavailable, additional mp4 formats 'url': 'http://www.newgrounds.com/portal/view/689400', 'info_dict': { 'id': '689400', 'ext': 'mp4', 'title': 'ZTV News Episode 8', 'uploader': 'ZONE-SAMA', 'timestamp': 1487983183, 'upload_date': '20170225', 'view_count': int, 'description': 'md5:aff9b330ec2e78ed93b1ad6d017accc6', 'age_limit': 17, 'thumbnail': r're:^https://picon\.ngfiles\.com/689000/flash_689400_card\.png', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.newgrounds.com/portal/view/297383', 'md5': '2c11f5fd8cb6b433a63c89ba3141436c', 'info_dict': { 'id': '297383', 'ext': 'mp4', 'title': 'Metal Gear Awesome', 'uploader': 'Egoraptor', 'timestamp': 1140681292, 'upload_date': '20060223', 'view_count': int, 'description': 'md5:9246c181614e23754571995104da92e0', 'age_limit': 13, 'thumbnail': r're:^https://picon\.ngfiles\.com/297000/flash_297383_card\.png', }, }, { 'url': 'https://www.newgrounds.com/portal/view/297383/format/flash', 'md5': '5d05585a9a0caca059f5abfbd3865524', 'info_dict': { 'id': '297383', 'ext': 'swf', 'title': 'Metal Gear Awesome', 'description': 'Metal Gear Awesome', 'uploader': 'Egoraptor', 'upload_date': '20060223', 'timestamp': 1140681292, 'view_count': int, 'age_limit': 13, 'thumbnail': r're:^https://picon\.ngfiles\.com/297000/flash_297383_card\.png', }, }, { 'url': 'https://www.newgrounds.com/portal/view/823109', 'info_dict': { 'id': '823109', 'ext': 'mp4', 'title': 'Rouge Futa Fleshlight Fuck', 'description': 'I made a fleshlight model and I wanted to use it in an animation. Based on a video by CDNaturally.', 'uploader': 'DefaultUser12', 'upload_date': '20211122', 'timestamp': 1637611540, 'view_count': int, 'age_limit': 18, 'thumbnail': r're:^https://picon\.ngfiles\.com/823000/flash_823109_card\.png', }, }] _AGE_LIMIT = { 'e': 0, 't': 13, 'm': 17, 'a': 18, } _LOGIN_URL = 'https://www.newgrounds.com/passport' def _perform_login(self, username, password): login_webpage = self._download_webpage(self._LOGIN_URL, None, 'Downloading login page') login_url = urljoin(self._LOGIN_URL, self._search_regex( r'<form action="([^"]+)"', login_webpage, 'login endpoint', default=None)) result = self._download_json(login_url, None, 'Logging in', headers={ 'Accept': 'application/json', 'Referer': self._LOGIN_URL, 'X-Requested-With': 'XMLHttpRequest', }, data=urlencode_postdata({ **self._hidden_inputs(login_webpage), 'username': username, 'password': password, })) if errors := traverse_obj(result, ('errors', ..., {str})): raise ExtractorError(', '.join(errors) or 'Unknown Error', expected=True) def _real_extract(self, url): media_id = self._match_id(url) try: webpage = self._download_webpage(url, media_id) except ExtractorError as error: if isinstance(error.cause, HTTPError) and error.cause.status == 401: self.raise_login_required() raise media_url_string = self._search_regex( r'embedController\(\[{"url"\s*:\s*("[^"]+"),', webpage, 'media url', default=None) if media_url_string: uploader = None formats = [{ 'url': self._parse_json(media_url_string, media_id), 'format_id': 'source', 'quality': 1, }] else: json_video = self._download_json(f'https://www.newgrounds.com/portal/video/{media_id}', media_id, headers={ 'Accept': 'application/json', 'Referer': url, 'X-Requested-With': 'XMLHttpRequest', }) formats = [] uploader = traverse_obj(json_video, ('author', {str})) for format_id, sources in traverse_obj(json_video, ('sources', {dict.items}, ...)): quality = int_or_none(format_id[:-1]) formats.extend({ 'format_id': format_id, 'quality': quality, 'url': url, } for url in traverse_obj(sources, (..., 'src', {url_or_none}))) if not uploader: uploader = self._html_search_regex( (r'(?s)<h4[^>]*>(.+?)</h4>.*?<em>\s*(?:Author|Artist)\s*</em>', r'(?:Author|Writer)\s*<a[^>]+>([^<]+)'), webpage, 'uploader', fatal=False) if len(formats) == 1: formats[0]['filesize'] = int_or_none(self._html_search_regex( r'"filesize"\s*:\s*["\']?([\d]+)["\']?,', webpage, 'filesize', default=None)) video_type_description = self._html_search_regex( r'"description"\s*:\s*["\']?([^"\']+)["\']?,', webpage, 'media type', default=None) if video_type_description == 'Audio File': formats[0]['vcodec'] = 'none' self._check_formats(formats, media_id) return { 'id': media_id, 'title': self._html_extract_title(webpage), 'uploader': uploader, 'timestamp': unified_timestamp(self._search_regex( r'itemprop="(?:uploadDate|datePublished)"\s+content="([^"]+)"', webpage, 'timestamp', default=None)), 'duration': parse_duration(self._html_search_regex( r'"duration"\s*:\s*["\']?(\d+)["\']?', webpage, 'duration', default=None)), 'formats': formats, 'thumbnail': self._og_search_thumbnail(webpage), 'description': ( clean_html(get_element_by_id('author_comments', webpage)) or self._og_search_description(webpage)), 'age_limit': self._AGE_LIMIT.get(self._html_search_regex( r'<h2\s+class=["\']rated-([etma])["\']', webpage, 'age_limit', default='e')), 'view_count': parse_count(self._html_search_regex( r'(?s)<dt>\s*(?:Views|Listens)\s*</dt>\s*<dd>([\d\.,]+)</dd>', webpage, 'view count', default=None)), } class NewgroundsPlaylistIE(InfoExtractor): IE_NAME = 'Newgrounds:playlist' _VALID_URL = r'https?://(?:www\.)?newgrounds\.com/(?:collection|[^/]+/search/[^/]+)/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.newgrounds.com/collection/cats', 'info_dict': { 'id': 'cats', 'title': 'Cats', }, 'playlist_mincount': 45, }, { 'url': 'https://www.newgrounds.com/collection/dogs', 'info_dict': { 'id': 'dogs', 'title': 'Dogs', }, 'playlist_mincount': 26, }, { 'url': 'http://www.newgrounds.com/audio/search/title/cats', 'only_matching': True, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) title = self._html_extract_title(webpage, default=None) # cut left menu webpage = self._search_regex( r'(?s)<div[^>]+\bclass=["\']column wide(.+)', webpage, 'wide column', default=webpage) entries = [] for a, path, media_id in re.findall( r'(<a[^>]+\bhref=["\'][^"\']+((?:portal/view|audio/listen)/(\d+))[^>]+>)', webpage): a_class = extract_attributes(a).get('class') if a_class not in ('item-portalsubmission', 'item-audiosubmission'): continue entries.append( self.url_result( f'https://www.newgrounds.com/{path}', ie=NewgroundsIE.ie_key(), video_id=media_id)) return self.playlist_result(entries, playlist_id, title) class NewgroundsUserIE(InfoExtractor): IE_NAME = 'Newgrounds:user' _VALID_URL = r'https?://(?P<id>[^\.]+)\.newgrounds\.com/(?:movies|audio)/?(?:[#?]|$)' _TESTS = [{ 'url': 'https://burn7.newgrounds.com/audio', 'info_dict': { 'id': 'burn7', }, 'playlist_mincount': 150, }, { 'url': 'https://burn7.newgrounds.com/movies', 'info_dict': { 'id': 'burn7', }, 'playlist_mincount': 2, }, { 'url': 'https://brian-beaton.newgrounds.com/movies', 'info_dict': { 'id': 'brian-beaton', }, 'playlist_mincount': 10, }] _PAGE_SIZE = 30 def _fetch_page(self, channel_id, url, page): page += 1 posts_info = self._download_json( f'{url}?page={page}', channel_id, note=f'Downloading page {page}', headers={ 'Accept': 'application/json, text/javascript, */*; q = 0.01', 'X-Requested-With': 'XMLHttpRequest', }) for post in traverse_obj(posts_info, ('items', ..., ..., {str})): path, media_id = self._search_regex( r'<a[^>]+\bhref=["\'][^"\']+((?:portal/view|audio/listen)/(\d+))[^>]+>', post, 'url', group=(1, 2)) yield self.url_result(f'https://www.newgrounds.com/{path}', NewgroundsIE.ie_key(), media_id) def _real_extract(self, url): channel_id = self._match_id(url) entries = OnDemandPagedList(functools.partial( self._fetch_page, channel_id, url), self._PAGE_SIZE) return self.playlist_result(entries, channel_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/redbulltv.py
yt_dlp/extractor/redbulltv.py
from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, float_or_none, ) class RedBullTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?redbull(?:\.tv|\.com(?:/[^/]+)?(?:/tv)?)(?:/events/[^/]+)?/(?:videos?|live|(?:film|episode)s)/(?P<id>AP-\w+)' _TESTS = [{ # film 'url': 'https://www.redbull.tv/video/AP-1Q6XCDTAN1W11', 'md5': 'fb0445b98aa4394e504b413d98031d1f', 'info_dict': { 'id': 'AP-1Q6XCDTAN1W11', 'ext': 'mp4', 'title': 'ABC of... WRC - ABC of... S1E6', 'description': 'md5:5c7ed8f4015c8492ecf64b6ab31e7d31', 'duration': 1582.04, }, }, { # episode 'url': 'https://www.redbull.tv/video/AP-1PMHKJFCW1W11', 'info_dict': { 'id': 'AP-1PMHKJFCW1W11', 'ext': 'mp4', 'title': 'Grime - Hashtags S2E4', 'description': 'md5:5546aa612958c08a98faaad4abce484d', 'duration': 904, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.redbull.com/int-en/tv/video/AP-1UWHCAR9S1W11/rob-meets-sam-gaze?playlist=playlists::3f81040a-2f31-4832-8e2e-545b1d39d173', 'only_matching': True, }, { 'url': 'https://www.redbull.com/us-en/videos/AP-1YM9QCYE52111', 'only_matching': True, }, { 'url': 'https://www.redbull.com/us-en/events/AP-1XV2K61Q51W11/live/AP-1XUJ86FDH1W11', 'only_matching': True, }, { 'url': 'https://www.redbull.com/int-en/films/AP-1ZSMAW8FH2111', 'only_matching': True, }, { 'url': 'https://www.redbull.com/int-en/episodes/AP-1TQWK7XE11W11', 'only_matching': True, }] def extract_info(self, video_id): session = self._download_json( 'https://api.redbull.tv/v3/session', video_id, note='Downloading access token', query={ 'category': 'personal_computer', 'os_family': 'http', }) if session.get('code') == 'error': raise ExtractorError('{} said: {}'.format( self.IE_NAME, session['message'])) token = session['token'] try: video = self._download_json( 'https://api.redbull.tv/v3/products/' + video_id, video_id, note='Downloading video information', headers={'Authorization': token}, ) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 404: error_message = self._parse_json( e.cause.response.read().decode(), video_id)['error'] raise ExtractorError(f'{self.IE_NAME} said: {error_message}', expected=True) raise title = video['title'].strip() formats, subtitles = self._extract_m3u8_formats_and_subtitles( f'https://dms.redbull.tv/v3/{video_id}/{token}/playlist.m3u8', video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') for resource in video.get('resources', []): if resource.startswith('closed_caption_'): splitted_resource = resource.split('_') if splitted_resource[2]: subtitles.setdefault('en', []).append({ 'url': f'https://resources.redbull.tv/{video_id}/{resource}', 'ext': splitted_resource[2], }) subheading = video.get('subheading') if subheading: title += f' - {subheading}' return { 'id': video_id, 'title': title, 'description': video.get('long_description') or video.get( 'short_description'), 'duration': float_or_none(video.get('duration'), scale=1000), 'formats': formats, 'subtitles': subtitles, } def _real_extract(self, url): video_id = self._match_id(url) return self.extract_info(video_id) class RedBullEmbedIE(RedBullTVIE): # XXX: Do not subclass from concrete IE _VALID_URL = r'https?://(?:www\.)?redbull\.com/embed/(?P<id>rrn:content:[^:]+:[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}:[a-z]{2}-[A-Z]{2,3})' _TESTS = [{ # HLS manifest accessible only using assetId 'url': 'https://www.redbull.com/embed/rrn:content:episode-videos:f3021f4f-3ed4-51ac-915a-11987126e405:en-INT', 'only_matching': True, }] _VIDEO_ESSENSE_TMPL = '''... on %s { videoEssence { attributes } }''' def _real_extract(self, url): rrn_id = self._match_id(url) asset_id = self._download_json( 'https://edge-graphql.crepo-production.redbullaws.com/v1/graphql', rrn_id, headers={ 'Accept': 'application/json', 'API-KEY': 'e90a1ff11335423998b100c929ecc866', }, query={ 'query': '''{ resource(id: "%s", enforceGeoBlocking: false) { %s %s } }''' % (rrn_id, self._VIDEO_ESSENSE_TMPL % 'LiveVideo', self._VIDEO_ESSENSE_TMPL % 'VideoResource'), # noqa: UP031 })['data']['resource']['videoEssence']['attributes']['assetId'] return self.extract_info(asset_id) class RedBullTVRrnContentIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?redbull\.com/(?P<region>[a-z]{2,3})-(?P<lang>[a-z]{2})/tv/(?:video|live|film)/(?P<id>rrn:content:[^:]+:[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _TESTS = [{ 'url': 'https://www.redbull.com/int-en/tv/video/rrn:content:live-videos:e3e6feb4-e95f-50b7-962a-c70f8fd13c73/mens-dh-finals-fort-william', 'only_matching': True, }, { 'url': 'https://www.redbull.com/int-en/tv/video/rrn:content:videos:a36a0f36-ff1b-5db8-a69d-ee11a14bf48b/tn-ts-style?playlist=rrn:content:event-profiles:83f05926-5de8-5389-b5e4-9bb312d715e8:extras', 'only_matching': True, }, { 'url': 'https://www.redbull.com/int-en/tv/film/rrn:content:films:d1f4d00e-4c04-5d19-b510-a805ffa2ab83/follow-me', 'only_matching': True, }] def _real_extract(self, url): region, lang, rrn_id = self._match_valid_url(url).groups() rrn_id += f':{lang}-{region.upper()}' return self.url_result( 'https://www.redbull.com/embed/' + rrn_id, RedBullEmbedIE.ie_key(), rrn_id) class RedBullIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?redbull\.com/(?P<region>[a-z]{2,3})-(?P<lang>[a-z]{2})/(?P<type>(?:episode|film|(?:(?:recap|trailer)-)?video)s|live)/(?!AP-|rrn:content:)(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.redbull.com/int-en/episodes/grime-hashtags-s02-e04', 'md5': 'db8271a7200d40053a1809ed0dd574ff', 'info_dict': { 'id': 'AA-1MT8DQWA91W14', 'ext': 'mp4', 'title': 'Grime - Hashtags S2E4', 'description': 'md5:5546aa612958c08a98faaad4abce484d', }, }, { 'url': 'https://www.redbull.com/int-en/films/kilimanjaro-mountain-of-greatness', 'only_matching': True, }, { 'url': 'https://www.redbull.com/int-en/recap-videos/uci-mountain-bike-world-cup-2017-mens-xco-finals-from-vallnord', 'only_matching': True, }, { 'url': 'https://www.redbull.com/int-en/trailer-videos/kings-of-content', 'only_matching': True, }, { 'url': 'https://www.redbull.com/int-en/videos/tnts-style-red-bull-dance-your-style-s1-e12', 'only_matching': True, }, { 'url': 'https://www.redbull.com/int-en/live/mens-dh-finals-fort-william', 'only_matching': True, }, { # only available on the int-en website so a fallback is need for the API # https://www.redbull.com/v3/api/graphql/v1/v3/query/en-GB>en-INT?filter[uriSlug]=fia-wrc-saturday-recap-estonia&rb3Schema=v1:hero 'url': 'https://www.redbull.com/gb-en/live/fia-wrc-saturday-recap-estonia', 'only_matching': True, }] _INT_FALLBACK_LIST = ['de', 'en', 'es', 'fr'] _LAT_FALLBACK_MAP = ['ar', 'bo', 'car', 'cl', 'co', 'mx', 'pe'] def _real_extract(self, url): region, lang, filter_type, display_id = self._match_valid_url(url).groups() if filter_type == 'episodes': filter_type = 'episode-videos' elif filter_type == 'live': filter_type = 'live-videos' regions = [region.upper()] if region != 'int': if region in self._LAT_FALLBACK_MAP: regions.append('LAT') if lang in self._INT_FALLBACK_LIST: regions.append('INT') locale = '>'.join([f'{lang}-{reg}' for reg in regions]) rrn_id = self._download_json( 'https://www.redbull.com/v3/api/graphql/v1/v3/query/' + locale, display_id, query={ 'filter[type]': filter_type, 'filter[uriSlug]': display_id, 'rb3Schema': 'v1:hero', })['data']['id'] return self.url_result( 'https://www.redbull.com/embed/' + rrn_id, RedBullEmbedIE.ie_key(), rrn_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/southpark.py
yt_dlp/extractor/southpark.py
from .mtv import MTVServicesBaseIE class SouthParkIE(MTVServicesBaseIE): IE_NAME = 'southpark.cc.com' _VALID_URL = r'https?://(?:www\.)?southpark(?:\.cc|studios)\.com/(?:video-clips|episodes|collections)/(?P<id>[^?#]+)' _TESTS = [{ 'url': 'https://southpark.cc.com/video-clips/d7wr06/south-park-you-all-agreed-to-counseling', 'info_dict': { 'id': '31929ad5-8269-11eb-8774-70df2f866ace', 'ext': 'mp4', 'display_id': 'd7wr06/south-park-you-all-agreed-to-counseling', 'title': 'You All Agreed to Counseling', 'description': 'md5:01f78fb306c7042f3f05f3c78edfc212', 'duration': 134.552, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'South Park', 'season': 'Season 24', 'season_number': 24, 'episode': 'Episode 2', 'episode_number': 2, 'timestamp': 1615352400, 'upload_date': '20210310', 'release_timestamp': 1615352400, 'release_date': '20210310', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://southpark.cc.com/episodes/940f8z/south-park-cartman-gets-an-anal-probe-season-1-ep-1', 'info_dict': { 'id': '5fb8887e-ecfd-11e0-aca6-0026b9414f30', 'ext': 'mp4', 'display_id': '940f8z/south-park-cartman-gets-an-anal-probe-season-1-ep-1', 'title': 'Cartman Gets An Anal Probe', 'description': 'md5:964e1968c468545752feef102b140300', 'channel': 'Comedy Central', 'duration': 1319.0, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'South Park', 'season': 'Season 1', 'season_number': 1, 'episode': 'Episode 1', 'episode_number': 1, 'timestamp': 871473600, 'upload_date': '19970813', 'release_timestamp': 871473600, 'release_date': '19970813', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://southpark.cc.com/collections/dejukt/south-park-best-of-mr-mackey/tphx9j', 'only_matching': True, }, { 'url': 'https://www.southparkstudios.com/episodes/h4o269/south-park-stunning-and-brave-season-19-ep-1', 'only_matching': True, }] class SouthParkEsIE(MTVServicesBaseIE): IE_NAME = 'southpark.cc.com:español' _VALID_URL = r'https?://(?:www\.)?southpark\.cc\.com/es/episodios/(?P<id>[^?#]+)' _TESTS = [{ 'url': 'https://southpark.cc.com/es/episodios/er4a32/south-park-aumento-de-peso-4000-temporada-1-ep-2', 'info_dict': { 'id': '5fb94f0c-ecfd-11e0-aca6-0026b9414f30', 'ext': 'mp4', 'display_id': 'er4a32/south-park-aumento-de-peso-4000-temporada-1-ep-2', 'title': 'Aumento de peso 4000', 'description': 'md5:a939b4819ea74c245a0cde180de418c0', 'channel': 'Comedy Central', 'duration': 1320.0, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'South Park', 'season': 'Season 1', 'season_number': 1, 'episode': 'Episode 2', 'episode_number': 2, 'timestamp': 872078400, 'upload_date': '19970820', 'release_timestamp': 872078400, 'release_date': '19970820', }, 'params': {'skip_download': 'm3u8'}, }] class SouthParkDeIE(MTVServicesBaseIE): IE_NAME = 'southpark.de' _VALID_URL = r'https?://(?:www\.)?southpark\.de/(?:en/)?(?:videoclip|collections|episodes|video-clips|folgen)/(?P<id>[^?#]+)' _GEO_COUNTRIES = ['DE'] _GEO_BYPASS = True _TESTS = [{ 'url': 'https://www.southpark.de/videoclip/rsribv/south-park-rueckzug-zum-gummibonbon-wald', 'only_matching': True, }, { 'url': 'https://www.southpark.de/folgen/jiru42/south-park-verkabelung-staffel-23-ep-9', 'only_matching': True, }, { 'url': 'https://www.southpark.de/collections/zzno5a/south-park-good-eats/7q26gp', 'only_matching': True, }, { # clip 'url': 'https://www.southpark.de/en/video-clips/ct46op/south-park-tooth-fairy-cartman', 'info_dict': { 'ext': 'mp4', 'id': 'e99d45ea-ed00-11e0-aca6-0026b9414f30', 'display_id': 'ct46op/south-park-tooth-fairy-cartman', 'title': 'Tooth Fairy Cartman', 'description': 'Cartman steals Butters\' tooth and gets four dollars for it.', 'duration': 93.26, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'South Park', 'season': 'Season 4', 'season_number': 4, 'episode': 'Episode 1', 'episode_number': 1, 'timestamp': 954990360, 'upload_date': '20000406', 'release_timestamp': 954990360, 'release_date': '20000406', }, 'params': {'skip_download': 'm3u8'}, }, { # episode 'url': 'https://www.southpark.de/en/episodes/yy0vjs/south-park-the-pandemic-special-season-24-ep-1', 'info_dict': { 'ext': 'mp4', 'id': '230a4f02-f583-11ea-834d-70df2f866ace', 'display_id': 'yy0vjs/south-park-the-pandemic-special-season-24-ep-1', 'title': 'The Pandemic Special', 'description': 'md5:ae0d875eff169dcbed16b21531857ac1', 'channel': 'Comedy Central', 'duration': 2724.0, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'South Park', 'season': 'Season 24', 'season_number': 24, 'episode': 'Episode 1', 'episode_number': 1, 'timestamp': 1601932260, 'upload_date': '20201005', 'release_timestamp': 1601932270, 'release_date': '20201005', }, 'params': {'skip_download': 'm3u8'}, }, { # clip 'url': 'https://www.southpark.de/videoclip/ct46op/south-park-zahnfee-cartman', 'info_dict': { 'ext': 'mp4', 'id': 'e99d45ea-ed00-11e0-aca6-0026b9414f30', 'display_id': 'ct46op/south-park-zahnfee-cartman', 'title': 'Zahnfee Cartman', 'description': 'md5:b917eec991d388811d911fd1377671ac', 'duration': 93.26, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'South Park', 'season': 'Season 4', 'season_number': 4, 'episode': 'Episode 1', 'episode_number': 1, 'timestamp': 954990360, 'upload_date': '20000406', 'release_timestamp': 954990360, 'release_date': '20000406', }, 'params': {'skip_download': 'm3u8'}, }, { # episode 'url': 'https://www.southpark.de/folgen/4r4367/south-park-katerstimmung-staffel-12-ep-3', 'info_dict': { 'ext': 'mp4', 'id': '68c79aa4-ecfd-11e0-aca6-0026b9414f30', 'display_id': '4r4367/south-park-katerstimmung-staffel-12-ep-3', 'title': 'Katerstimmung', 'description': 'md5:94e0e2cd568ffa635e0725518bb4b180', 'channel': 'Comedy Central', 'duration': 1320.0, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'South Park', 'season': 'Season 12', 'season_number': 12, 'episode': 'Episode 3', 'episode_number': 3, 'timestamp': 1206504000, 'upload_date': '20080326', 'release_timestamp': 1206504000, 'release_date': '20080326', }, 'params': {'skip_download': 'm3u8'}, }] class SouthParkLatIE(MTVServicesBaseIE): IE_NAME = 'southpark.lat' _VALID_URL = r'https?://(?:www\.)?southpark\.lat/(?:en/)?(?:video-?clips?|collections|episod(?:e|io)s)/(?P<id>[^?#]+)' _GEO_COUNTRIES = ['MX'] _GEO_BYPASS = True _TESTS = [{ 'url': 'https://www.southpark.lat/en/video-clips/ct46op/south-park-tooth-fairy-cartman', 'info_dict': { 'ext': 'mp4', 'id': 'e99d45ea-ed00-11e0-aca6-0026b9414f30', 'display_id': 'ct46op/south-park-tooth-fairy-cartman', 'title': 'Tooth Fairy Cartman', 'description': 'Cartman steals Butters\' tooth and gets four dollars for it.', 'duration': 93.26, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'South Park', 'season': 'Season 4', 'season_number': 4, 'episode': 'Episode 1', 'episode_number': 1, 'timestamp': 954990360, 'upload_date': '20000406', 'release_timestamp': 954990360, 'release_date': '20000406', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.southpark.lat/episodios/9h0qbg/south-park-orgia-gatuna-temporada-3-ep-7', 'info_dict': { 'ext': 'mp4', 'id': '600d273a-ecfd-11e0-aca6-0026b9414f30', 'display_id': '9h0qbg/south-park-orgia-gatuna-temporada-3-ep-7', 'title': 'Orgía Gatuna ', 'description': 'md5:73c6648413f5977026abb792a25c65d5', 'channel': 'Comedy Central', 'duration': 1319.0, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'South Park', 'season': 'Season 3', 'season_number': 3, 'episode': 'Episode 7', 'episode_number': 7, 'timestamp': 931924800, 'upload_date': '19990714', 'release_timestamp': 931924800, 'release_date': '19990714', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.southpark.lat/en/collections/29ve08/south-park-heating-up/lydbrc', 'only_matching': True, }] class SouthParkDkIE(MTVServicesBaseIE): IE_NAME = 'southparkstudios.nu' _VALID_URL = r'https?://(?:www\.)?southparkstudios\.nu/(?:video-clips|episodes|collections)/(?P<id>[^?#]+)' _GEO_COUNTRIES = ['DK'] _GEO_BYPASS = True _TESTS = [{ 'url': 'https://www.southparkstudios.nu/episodes/y3uvvc/south-park-grounded-vindaloop-season-18-ep-7', 'info_dict': { 'ext': 'mp4', 'id': 'f60690a7-21a7-4ee7-8834-d7099a8707ab', 'display_id': 'y3uvvc/south-park-grounded-vindaloop-season-18-ep-7', 'title': 'Grounded Vindaloop', 'description': 'Butters is convinced he\'s living in a virtual reality.', 'channel': 'Comedy Central', 'duration': 1319.0, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'South Park', 'season': 'Season 18', 'season_number': 18, 'episode': 'Episode 7', 'episode_number': 7, 'timestamp': 1415847600, 'upload_date': '20141113', 'release_timestamp': 1415768400, 'release_date': '20141112', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.southparkstudios.nu/collections/8dk7kr/south-park-best-of-south-park/sd5ean', 'only_matching': True, }, { 'url': 'https://www.southparkstudios.nu/video-clips/k42mrf/south-park-kick-the-baby', 'only_matching': True, }] class SouthParkComBrIE(MTVServicesBaseIE): IE_NAME = 'southparkstudios.com.br' _VALID_URL = r'https?://(?:www\.)?southparkstudios\.com\.br/(?:en/)?(?:video-clips|episodios|collections|episodes)/(?P<id>[^?#]+)' _GEO_COUNTRIES = ['BR'] _GEO_BYPASS = True _TESTS = [{ 'url': 'https://www.southparkstudios.com.br/video-clips/3vifo0/south-park-welcome-to-mar-a-lago7', 'info_dict': { 'ext': 'mp4', 'id': 'ccc3e952-7352-11f0-b405-16fff45bc035', 'display_id': '3vifo0/south-park-welcome-to-mar-a-lago7', 'title': 'Welcome to Mar-a-Lago', 'description': 'The President welcomes Mr. Mackey to Mar-a-Lago, a magical place where anything can happen.', 'duration': 139.223, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'South Park', 'season': 'Season 27', 'season_number': 27, 'episode': 'Episode 2', 'episode_number': 2, 'timestamp': 1754546400, 'upload_date': '20250807', 'release_timestamp': 1754546400, 'release_date': '20250807', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.southparkstudios.com.br/episodios/940f8z/south-park-cartman-ganha-uma-sonda-anal-temporada-1-ep-1', 'only_matching': True, }, { 'url': 'https://www.southparkstudios.com.br/collections/8dk7kr/south-park-best-of-south-park/sd5ean', 'only_matching': True, }, { 'url': 'https://www.southparkstudios.com.br/en/episodes/5v0oap/south-park-south-park-the-25th-anniversary-concert-ep-1', 'only_matching': True, }] class SouthParkCoUkIE(MTVServicesBaseIE): IE_NAME = 'southparkstudios.co.uk' _VALID_URL = r'https?://(?:www\.)?southparkstudios\.co\.uk/(?:video-clips|collections|episodes)/(?P<id>[^?#]+)' _GEO_COUNTRIES = ['UK'] _GEO_BYPASS = True _TESTS = [{ 'url': 'https://www.southparkstudios.co.uk/video-clips/8kabfr/south-park-respectclydesauthority', 'info_dict': { 'ext': 'mp4', 'id': 'f6d9af23-734e-11f0-b405-16fff45bc035', 'display_id': '8kabfr/south-park-respectclydesauthority', 'title': '#RespectClydesAuthority', 'description': 'After learning about Clyde\'s Podcast, Cartman needs to see it for himself.', 'duration': 45.045, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:', 'series': 'South Park', 'season': 'Season 27', 'season_number': 27, 'episode': 'Episode 2', 'episode_number': 2, 'timestamp': 1754546400, 'upload_date': '20250807', 'release_timestamp': 1754546400, 'release_date': '20250807', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.southparkstudios.co.uk/episodes/e1yoxn/south-park-imaginationland-season-11-ep-10', 'only_matching': True, }, { 'url': 'https://www.southparkstudios.co.uk/collections/8dk7kr/south-park-best-of-south-park/sd5ean', 'only_matching': True, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/eurosport.py
yt_dlp/extractor/eurosport.py
from .common import InfoExtractor from ..utils import traverse_obj class EurosportIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?: (?:(?:www|espanol)\.)?eurosport\.(?:com(?:\.tr)?|de|dk|es|fr|hu|it|nl|no|ro)| eurosport\.tvn24\.pl )/[\w-]+/(?:[\w-]+/[\d-]+/)?[\w.-]+_(?P<id>vid\d+) ''' _TESTS = [{ 'url': 'https://www.eurosport.com/tennis/roland-garros/2022/highlights-rafael-nadal-brushes-aside-caper-ruud-to-win-record-extending-14th-french-open-title_vid1694147/video.shtml', 'info_dict': { 'id': '2480939', 'ext': 'mp4', 'title': 'Highlights: Rafael Nadal brushes aside Caper Ruud to win record-extending 14th French Open title', 'description': 'md5:b564db73ecfe4b14ebbd8e62a3692c76', 'thumbnail': 'https://imgresizer.eurosport.com/unsafe/1280x960/smart/filters:format(jpeg)/origin-imgresizer.eurosport.com/2022/06/05/3388285-69245968-2560-1440.png', 'duration': 195.0, 'display_id': 'vid1694147', 'timestamp': 1654446698, 'upload_date': '20220605', }, }, { 'url': 'https://www.eurosport.com/tennis/roland-garros/2022/watch-the-top-five-shots-from-men-s-final-as-rafael-nadal-beats-casper-ruud-to-seal-14th-french-open_vid1694283/video.shtml', 'info_dict': { 'id': '2481254', 'ext': 'mp4', 'title': 'md5:149dcc5dfb38ab7352acc008cc9fb071', 'duration': 130.0, 'thumbnail': 'https://imgresizer.eurosport.com/unsafe/1280x960/smart/filters:format(jpeg)/origin-imgresizer.eurosport.com/2022/06/05/3388422-69248708-2560-1440.png', 'description': 'md5:a0c8a7f6b285e48ae8ddbe7aa85cfee6', 'display_id': 'vid1694283', 'timestamp': 1654456090, 'upload_date': '20220605', }, }, { # geo-fence but can bypassed by xff 'url': 'https://www.eurosport.com/cycling/tour-de-france-femmes/2022/incredible-ride-marlen-reusser-storms-to-stage-4-win-at-tour-de-france-femmes_vid1722221/video.shtml', 'info_dict': { 'id': '2582552', 'ext': 'mp4', 'title': '‘Incredible ride!’ - Marlen Reusser storms to Stage 4 win at Tour de France Femmes', 'duration': 188.0, 'display_id': 'vid1722221', 'timestamp': 1658936167, 'thumbnail': 'https://imgresizer.eurosport.com/unsafe/1280x960/smart/filters:format(jpeg)/origin-imgresizer.eurosport.com/2022/07/27/3423347-69852108-2560-1440.jpg', 'description': 'md5:32bbe3a773ac132c57fb1e8cca4b7c71', 'upload_date': '20220727', }, }, { 'url': 'https://www.eurosport.com/football/champions-league/2022-2023/pep-guardiola-emotionally-destroyed-after-manchester-city-win-over-bayern-munich-in-champions-league_vid1896254/video.shtml', 'info_dict': { 'id': '3096477', 'ext': 'mp4', 'title': 'md5:82edc17370124c7a19b3cf518517583b', 'duration': 84.0, 'description': 'md5:b3f44ef7f5b5b95b24a273b163083feb', 'thumbnail': 'https://imgresizer.eurosport.com/unsafe/1280x960/smart/filters:format(jpeg)/origin-imgresizer.eurosport.com/2023/04/12/3682873-74947393-2560-1440.jpg', 'timestamp': 1681292028, 'upload_date': '20230412', 'display_id': 'vid1896254', }, }, { 'url': 'https://www.eurosport.com/football/last-year-s-semi-final-pain-was-still-there-pep-guardiola-after-man-city-reach-cl-final_vid1914115/video.shtml', 'info_dict': { 'id': '3149108', 'ext': 'mp4', 'title': '\'Last year\'s semi-final pain was still there\' - Pep Guardiola after Man City reach CL final', 'description': 'md5:89ef142fe0170a66abab77fac2955d8e', 'display_id': 'vid1914115', 'timestamp': 1684403618, 'thumbnail': 'https://imgresizer.eurosport.com/unsafe/1280x960/smart/filters:format(jpeg)/origin-imgresizer.eurosport.com/2023/05/18/3707254-75435008-2560-1440.jpg', 'duration': 105.0, 'upload_date': '20230518', }, }, { 'url': 'https://www.eurosport.de/radsport/vuelta-a-espana/2024/vuelta-a-espana-2024-wout-van-aert-und-co.-verzweifeln-an-mcnulty-zeitfahr-krimi-in-lissabon_vid2219478/video.shtml', 'only_matching': True, }, { 'url': 'https://www.eurosport.dk/speedway/mikkel-michelsen-misser-finalen-i-cardiff-se-danskeren-i-semifinalen-her_vid2219363/video.shtml', 'only_matching': True, }, { 'url': 'https://www.eurosport.nl/mixed-martial-arts/ufc/2022/ufc-305-respect-tussen-adesanya-en-du-plessis_vid2219650/video.shtml', 'only_matching': True, }, { 'url': 'https://www.eurosport.es/ciclismo/la-vuelta-2024-carlos-rodriguez-olvida-la-crono-y-ya-espera-que-llegue-la-montana-no-me-encontre-nada-comodo_vid2219682/video.shtml', 'only_matching': True, }, { 'url': 'https://www.eurosport.fr/football/supercoupe-d-europe/2024-2025/kylian-mbappe-vinicius-junior-eduardo-camavinga-touche.-extraits-de-l-entrainement-du-real-madrid-en-video_vid2216993/video.shtml', 'only_matching': True, }, { 'url': 'https://www.eurosport.it/calcio/serie-a/2024-2025/samardzic-a-bergamo-per-le-visite-mediche-con-l-atalanta_vid2219680/video.shtml', 'only_matching': True, }, { 'url': 'https://www.eurosport.hu/kerekpar/vuelta-a-espana/2024/dramai-harc-a-masodpercekert-meglepetesgyoztes-a-vuelta-nyitoszakaszan_vid2219481/video.shtml', 'only_matching': True, }, { 'url': 'https://www.eurosport.no/golf/fedex-st-jude-championship/2024/ligger-pa-andreplass-sa-skjer-dette-drama_vid30000618/video.shtml', 'only_matching': True, }, { 'url': 'https://www.eurosport.no/golf/fedex-st-jude-championship/2024/ligger-pa-andreplass-sa-skjer-dette-drama_vid2219531/video.shtml', 'only_matching': True, }, { 'url': 'https://www.eurosport.ro/tenis/western-southern-open-2/2024/rezumatul-partidei-dintre-zverev-si-shelton-de-la-cincinnati_vid2219657/video.shtml', 'only_matching': True, }, { 'url': 'https://www.eurosport.com.tr/hentbol/olympic-games-paris-2024/2024/paris-2024-denmark-ile-germany-olimpiyatlarin-onemli-anlari_vid2215836/video.shtml', 'only_matching': True, }, { 'url': 'https://eurosport.tvn24.pl/kolarstwo/tour-de-france-kobiet/2024/kasia-niewiadoma-przed-ostatnim-8.-etapem-tour-de-france-kobiet_vid2219765/video.shtml', 'only_matching': True, }] _TOKEN = None # actually defined in https://netsport.eurosport.io/?variables={"databaseId":<databaseId>,"playoutType":"VDP"}&extensions={"persistedQuery":{"version":1 .. # but this method require to get sha256 hash _GEO_COUNTRIES = ['DE', 'NL', 'EU', 'IT', 'FR'] # Not complete list but it should work _GEO_BYPASS = False def _real_initialize(self): if EurosportIE._TOKEN is None: EurosportIE._TOKEN = self._download_json( 'https://eu3-prod-direct.eurosport.com/token?realm=eurosport', None, 'Trying to get token')['data']['attributes']['token'] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) json_data = self._download_json( f'https://eu3-prod-direct.eurosport.com/playback/v2/videoPlaybackInfo/sourceSystemId/eurosport-{display_id}', display_id, query={'usePreAuth': True}, headers={'Authorization': f'Bearer {EurosportIE._TOKEN}'})['data'] json_ld_data = self._search_json_ld(webpage, display_id) formats, subtitles = [], {} for stream_type in json_data['attributes']['streaming']: if stream_type == 'hls': fmts, subs = self._extract_m3u8_formats_and_subtitles( traverse_obj(json_data, ('attributes', 'streaming', stream_type, 'url')), display_id, ext='mp4', fatal=False) elif stream_type == 'dash': fmts, subs = self._extract_mpd_formats_and_subtitles( traverse_obj(json_data, ('attributes', 'streaming', stream_type, 'url')), display_id, fatal=False) elif stream_type == 'mss': fmts, subs = self._extract_ism_formats_and_subtitles( traverse_obj(json_data, ('attributes', 'streaming', stream_type, 'url')), display_id, fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return { 'id': json_data['id'], 'title': json_ld_data.get('title') or self._og_search_title(webpage), 'display_id': display_id, 'formats': formats, 'subtitles': subtitles, 'thumbnails': json_ld_data.get('thumbnails'), 'description': (json_ld_data.get('description') or self._html_search_meta(['og:description', 'description'], webpage)), 'duration': json_ld_data.get('duration'), 'timestamp': json_ld_data.get('timestamp'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/aol.py
yt_dlp/extractor/aol.py
import re from .yahoo import YahooIE from ..utils import ( ExtractorError, int_or_none, parse_qs, url_or_none, ) class AolIE(YahooIE): # XXX: Do not subclass from concrete IE _WORKING = False IE_NAME = 'aol.com' _VALID_URL = r'(?:aol-video:|https?://(?:www\.)?aol\.(?:com|ca|co\.uk|de|jp)/video/(?:[^/]+/)*)(?P<id>\d{9}|[0-9a-f]{24}|[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12})' _TESTS = [{ # video with 5min ID 'url': 'https://www.aol.com/video/view/u-s--official-warns-of-largest-ever-irs-phone-scam/518167793/', 'md5': '18ef68f48740e86ae94b98da815eec42', 'info_dict': { 'id': '518167793', 'ext': 'mp4', 'title': 'U.S. Official Warns Of \'Largest Ever\' IRS Phone Scam', 'description': 'A major phone scam has cost thousands of taxpayers more than $1 million, with less than a month until income tax returns are due to the IRS.', 'timestamp': 1395405060, 'upload_date': '20140321', 'uploader': 'Newsy Studio', }, 'params': { # m3u8 download 'skip_download': True, }, }, { # video with vidible ID 'url': 'https://www.aol.com/video/view/netflix-is-raising-rates/5707d6b8e4b090497b04f706/', 'info_dict': { 'id': '5707d6b8e4b090497b04f706', 'ext': 'mp4', 'title': 'Netflix is Raising Rates', 'description': 'Netflix is rewarding millions of it’s long-standing members with an increase in cost. Veuer’s Carly Figueroa has more.', 'upload_date': '20160408', 'timestamp': 1460123280, 'uploader': 'Veuer', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://www.aol.com/video/view/park-bench-season-2-trailer/559a1b9be4b0c3bfad3357a7/', 'only_matching': True, }, { 'url': 'https://www.aol.com/video/view/donald-trump-spokeswoman-tones-down-megyn-kelly-attacks/519442220/', 'only_matching': True, }, { 'url': 'aol-video:5707d6b8e4b090497b04f706', 'only_matching': True, }, { 'url': 'https://www.aol.com/video/playlist/PL8245/5ca79d19d21f1a04035db606/', 'only_matching': True, }, { 'url': 'https://www.aol.ca/video/view/u-s-woman-s-family-arrested-for-murder-first-pinned-on-panhandler-police/5c7ccf45bc03931fa04b2fe1/', 'only_matching': True, }, { 'url': 'https://www.aol.co.uk/video/view/-one-dead-and-22-hurt-in-bus-crash-/5cb3a6f3d21f1a072b457347/', 'only_matching': True, }, { 'url': 'https://www.aol.de/video/view/eva-braun-privataufnahmen-von-hitlers-geliebter-werden-digitalisiert/5cb2d49de98ab54c113d3d5d/', 'only_matching': True, }, { 'url': 'https://www.aol.jp/video/playlist/5a28e936a1334d000137da0c/5a28f3151e642219fde19831/', 'only_matching': True, }, { # Yahoo video 'url': 'https://www.aol.com/video/play/991e6700-ac02-11ea-99ff-357400036f61/24bbc846-3e30-3c46-915e-fe8ccd7fcc46/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) if '-' in video_id: return self._extract_yahoo_video(video_id, 'us') response = self._download_json( f'https://feedapi.b2c.on.aol.com/v1.0/app/videos/aolon/{video_id}/details', video_id)['response'] if response['statusText'] != 'Ok': raise ExtractorError('{} said: {}'.format(self.IE_NAME, response['statusText']), expected=True) video_data = response['data'] formats = [] m3u8_url = url_or_none(video_data.get('videoMasterPlaylist')) if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) for rendition in video_data.get('renditions', []): video_url = url_or_none(rendition.get('url')) if not video_url: continue ext = rendition.get('format') if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) else: f = { 'url': video_url, 'format_id': rendition.get('quality'), } mobj = re.search(r'(\d+)x(\d+)', video_url) if mobj: f.update({ 'width': int(mobj.group(1)), 'height': int(mobj.group(2)), }) else: qs = parse_qs(video_url) f.update({ 'width': int_or_none(qs.get('w', [None])[0]), 'height': int_or_none(qs.get('h', [None])[0]), }) formats.append(f) return { 'id': video_id, 'title': video_data['title'], 'duration': int_or_none(video_data.get('duration')), 'timestamp': int_or_none(video_data.get('publishDate')), 'view_count': int_or_none(video_data.get('views')), 'description': video_data.get('description'), 'uploader': video_data.get('videoOwner'), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/onsen.py
yt_dlp/extractor/onsen.py
import base64 import json from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, clean_html, int_or_none, parse_qs, str_or_none, strftime_or_none, update_url, update_url_query, url_or_none, ) from ..utils.traversal import traverse_obj class OnsenIE(InfoExtractor): IE_NAME = 'onsen' IE_DESC = 'インターネットラジオステーション<音泉>' _BASE_URL = 'https://www.onsen.ag' _HEADERS = {'Referer': f'{_BASE_URL}/'} _NETRC_MACHINE = 'onsen' _VALID_URL = r'https?://(?:(?:share|www)\.)onsen\.ag/program/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://share.onsen.ag/program/onsenking?p=90&c=MTA0NjI', 'info_dict': { 'id': '10462', 'ext': 'm4a', 'title': '第SP回', 'cast': 'count:3', 'description': 'md5:de62c80a41c4c8d84da53a1ee681ad18', 'display_id': 'MTA0NjI=', 'media_type': 'sound', 'section_start': 0, 'series': '音泉キング「下野紘」のラジオ きみはもちろん、<音泉>ファミリーだよね?', 'series_id': 'onsenking', 'tags': 'count:2', 'thumbnail': r're:https?://d3bzklg4lms4gh\.cloudfront\.net/program_info/image/default/production/.+', 'upload_date': '20220627', 'webpage_url': 'https://www.onsen.ag/program/onsenking?c=MTA0NjI=', }, }, { 'url': 'https://share.onsen.ag/program/girls-band-cry-radio?p=370&c=MTgwMDE', 'info_dict': { 'id': '18001', 'ext': 'mp4', 'title': '第4回', 'cast': 'count:5', 'description': 'md5:bbca8a389d99c90cbbce8f383c85fedd', 'display_id': 'MTgwMDE=', 'media_type': 'movie', 'section_start': 0, 'series': 'TVアニメ『ガールズバンドクライ』WEBラジオ「ガールズバンドクライ~ラジオにも全部ぶち込め。~」', 'series_id': 'girls-band-cry-radio', 'tags': 'count:3', 'thumbnail': r're:https?://d3bzklg4lms4gh\.cloudfront\.net/program_info/image/default/production/.+', 'upload_date': '20240425', 'webpage_url': 'https://www.onsen.ag/program/girls-band-cry-radio?c=MTgwMDE=', }, 'skip': 'Only available for premium supporters', }, { 'url': 'https://www.onsen.ag/program/uma', 'info_dict': { 'id': 'uma', 'title': 'UMA YELL RADIO', }, 'playlist_mincount': 35, }] @staticmethod def _get_encoded_id(program): return base64.urlsafe_b64encode(str(program['id']).encode()).decode() def _perform_login(self, username, password): sign_in = self._download_json( f'{self._BASE_URL}/web_api/signin', None, 'Logging in', headers={ 'Accept': 'application/json', 'Content-Type': 'application/json', }, data=json.dumps({ 'session': { 'email': username, 'password': password, }, }).encode(), expected_status=401) if sign_in.get('error'): raise ExtractorError('Invalid username or password', expected=True) def _real_extract(self, url): program_id = self._match_id(url) try: programs = self._download_json( f'{self._BASE_URL}/web_api/programs/{program_id}', program_id) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 404: raise ExtractorError('Invalid URL', expected=True) raise query = {k: v[-1] for k, v in parse_qs(url).items() if v} if 'c' not in query: entries = [ self.url_result(update_url_query(url, {'c': self._get_encoded_id(program)}), OnsenIE) for program in traverse_obj(programs, ('contents', lambda _, v: v['id'])) ] return self.playlist_result( entries, program_id, traverse_obj(programs, ('program_info', 'title', {clean_html}))) raw_id = base64.urlsafe_b64decode(f'{query["c"]}===').decode() p_keys = ('contents', lambda _, v: v['id'] == int(raw_id)) program = traverse_obj(programs, (*p_keys, any)) if not program: raise ExtractorError( 'This program is no longer available', expected=True) m3u8_url = traverse_obj(program, ('streaming_url', {url_or_none})) if not m3u8_url: self.raise_login_required( 'This program is only available for premium supporters') display_id = self._get_encoded_id(program) date_str = self._search_regex( rf'{program_id}0?(\d{{6}})', m3u8_url, 'date string', default=None) return { 'display_id': display_id, 'formats': self._extract_m3u8_formats(m3u8_url, raw_id, headers=self._HEADERS), 'http_headers': self._HEADERS, 'section_start': int_or_none(query.get('t', 0)), 'upload_date': strftime_or_none(f'20{date_str}'), 'webpage_url': f'{self._BASE_URL}/program/{program_id}?c={display_id}', **traverse_obj(program, { 'id': ('id', {int}, {str_or_none}), 'title': ('title', {clean_html}), 'media_type': ('media_type', {str}), 'thumbnail': ('poster_image_url', {url_or_none}, {update_url(query=None)}), }), **traverse_obj(programs, { 'cast': (('performers', (*p_keys, 'guests')), ..., 'name', {str}, filter), 'series_id': ('directory_name', {str}), }), **traverse_obj(programs, ('program_info', { 'description': ('description', {clean_html}, filter), 'series': ('title', {clean_html}), 'tags': ('hashtag_list', ..., {str}, filter), })), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/academicearth.py
yt_dlp/extractor/academicearth.py
import re from .common import InfoExtractor class AcademicEarthCourseIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?academicearth\.org/playlists/(?P<id>[^?#/]+)' IE_NAME = 'AcademicEarth:Course' _TEST = { 'url': 'http://academicearth.org/playlists/laws-of-nature/', 'info_dict': { 'id': 'laws-of-nature', 'title': 'Laws of Nature', 'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.', }, 'playlist_count': 3, } def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) title = self._html_search_regex( r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, 'title') description = self._html_search_regex( r'<p class="excerpt"[^>]*?>(.*?)</p>', webpage, 'description', fatal=False) urls = re.findall( r'<li class="lecture-preview">\s*?<a target="_blank" href="([^"]+)">', webpage) entries = [self.url_result(u) for u in urls] return { '_type': 'playlist', 'id': playlist_id, 'title': title, 'description': description, 'entries': entries, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nebula.py
yt_dlp/extractor/nebula.py
import itertools import json from .art19 import Art19IE from .common import InfoExtractor from ..networking import PATCHRequest from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, int_or_none, make_archive_id, parse_iso8601, smuggle_url, try_call, unsmuggle_url, update_url_query, url_or_none, urljoin, ) from ..utils.traversal import traverse_obj _BASE_URL_RE = r'https?://(?:www\.|beta\.)?(?:watchnebula\.com|nebula\.app|nebula\.tv)' class NebulaBaseIE(InfoExtractor): _NETRC_MACHINE = 'watchnebula' _token = _api_token = None def _perform_login(self, username, password): try: response = self._download_json( 'https://nebula.tv/auth/login/', None, 'Logging in to Nebula', 'Login failed', data=json.dumps({'email': username, 'password': password}).encode(), headers={'content-type': 'application/json'}) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 400: raise ExtractorError('Login failed: Invalid username or password', expected=True) raise self._api_token = traverse_obj(response, ('key', {str})) if not self._api_token: raise ExtractorError('Login failed: No token') def _call_api(self, *args, **kwargs): if self._token: kwargs.setdefault('headers', {})['Authorization'] = f'Bearer {self._token}' try: return self._download_json(*args, **kwargs) except ExtractorError as e: if not isinstance(e.cause, HTTPError) or e.cause.status not in (401, 403): raise self.to_screen( f'Reauthorizing with Nebula and retrying, because last API call resulted in error {e.cause.status}') self._real_initialize() if self._token: kwargs.setdefault('headers', {})['Authorization'] = f'Bearer {self._token}' return self._download_json(*args, **kwargs) def _real_initialize(self): if not self._api_token: self._api_token = try_call( lambda: self._get_cookies('https://nebula.tv')['nebula_auth.apiToken'].value) self._token = self._download_json( 'https://users.api.nebula.app/api/v1/authorization/', None, headers={'Authorization': f'Token {self._api_token}'} if self._api_token else None, note='Authorizing to Nebula', data=b'')['token'] def _extract_formats(self, content_id, slug): for retry in (False, True): try: fmts, subs = self._extract_m3u8_formats_and_subtitles( f'https://content.api.nebula.app/{content_id.split(":")[0]}s/{content_id}/manifest.m3u8', slug, 'mp4', query={ 'token': self._token, 'app_version': '23.10.0', 'platform': 'ios', }) break except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 401: self.raise_login_required() if not retry and isinstance(e.cause, HTTPError) and e.cause.status == 403: self.to_screen('Reauthorizing with Nebula and retrying, because fetching video resulted in error') self._real_initialize() continue raise self.mark_watched(content_id, slug) return {'formats': fmts, 'subtitles': subs} def _extract_video_metadata(self, episode): channel_url = traverse_obj( episode, (('channel_slug', 'class_slug'), {urljoin('https://nebula.tv/')}), get_all=False) return { 'id': episode['id'].partition(':')[2], **traverse_obj(episode, { 'display_id': 'slug', 'title': 'title', 'description': 'description', 'timestamp': ('published_at', {parse_iso8601}), 'duration': ('duration', {int_or_none}), 'channel_id': 'channel_slug', 'uploader_id': 'channel_slug', 'channel': 'channel_title', 'uploader': 'channel_title', 'series': 'channel_title', 'creator': 'channel_title', 'thumbnail': ('images', 'thumbnail', 'src', {url_or_none}), 'episode_number': ('order', {int_or_none}), # Old code was wrongly setting extractor_key from NebulaSubscriptionsIE '_old_archive_ids': ('zype_id', {lambda x: [ make_archive_id(NebulaIE, x), make_archive_id(NebulaSubscriptionsIE, x)] if x else None}), }), 'channel_url': channel_url, 'uploader_url': channel_url, } def _mark_watched(self, content_id, slug): self._call_api( PATCHRequest(f'https://content.api.nebula.app/{content_id.split(":")[0]}s/{content_id}/progress/'), slug, 'Marking watched', 'Unable to mark watched', fatal=False, data=json.dumps({'completed': True}).encode(), headers={'content-type': 'application/json'}) class NebulaIE(NebulaBaseIE): IE_NAME = 'nebula:video' _VALID_URL = rf'{_BASE_URL_RE}/videos/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://nebula.tv/videos/that-time-disney-remade-beauty-and-the-beast', 'info_dict': { 'id': '84ed544d-4afd-4723-8cd5-2b95261f0abf', 'ext': 'mp4', 'title': 'That Time Disney Remade Beauty and the Beast', 'description': 'md5:2aae3c4cfc5ee09a1ecdff0909618cf4', 'upload_date': '20180731', 'timestamp': 1533009600, 'channel': 'Lindsay Ellis', 'channel_id': 'lindsayellis', 'uploader': 'Lindsay Ellis', 'uploader_id': 'lindsayellis', 'uploader_url': r're:https://nebula\.(tv|app)/lindsayellis', 'series': 'Lindsay Ellis', 'display_id': 'that-time-disney-remade-beauty-and-the-beast', 'channel_url': r're:https://nebula\.(tv|app)/lindsayellis', 'creator': 'Lindsay Ellis', 'duration': 2212, 'thumbnail': r're:https://\w+\.cloudfront\.net/[\w-]+', '_old_archive_ids': ['nebula 5c271b40b13fd613090034fd', 'nebulasubscriptions 5c271b40b13fd613090034fd'], }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://nebula.tv/videos/the-logistics-of-d-day-landing-craft-how-the-allies-got-ashore', 'md5': 'd05739cf6c38c09322422f696b569c23', 'info_dict': { 'id': '7e623145-1b44-4ca3-aa0b-ed25a247ea34', 'ext': 'mp4', 'title': 'Landing Craft - How The Allies Got Ashore', 'description': r're:^In this episode we explore the unsung heroes of D-Day, the landing craft.', 'upload_date': '20200327', 'timestamp': 1585348140, 'channel': 'Real Engineering — The Logistics of D-Day', 'channel_id': 'd-day', 'uploader': 'Real Engineering — The Logistics of D-Day', 'uploader_id': 'd-day', 'series': 'Real Engineering — The Logistics of D-Day', 'display_id': 'the-logistics-of-d-day-landing-craft-how-the-allies-got-ashore', 'creator': 'Real Engineering — The Logistics of D-Day', 'duration': 841, 'channel_url': 'https://nebula.tv/d-day', 'uploader_url': 'https://nebula.tv/d-day', 'thumbnail': r're:https://\w+\.cloudfront\.net/[\w-]+', '_old_archive_ids': ['nebula 5e7e78171aaf320001fbd6be', 'nebulasubscriptions 5e7e78171aaf320001fbd6be'], }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://nebula.tv/videos/money-episode-1-the-draw', 'md5': 'ebe28a7ad822b9ee172387d860487868', 'info_dict': { 'id': 'b96c5714-9e2b-4ec3-b3f1-20f6e89cc553', 'ext': 'mp4', 'title': 'Episode 1: The Draw', 'description': r'contains:There’s free money on offer… if the players can all work together.', 'upload_date': '20200323', 'timestamp': 1584980400, 'channel': 'Tom Scott Presents: Money', 'channel_id': 'tom-scott-presents-money', 'uploader': 'Tom Scott Presents: Money', 'uploader_id': 'tom-scott-presents-money', 'uploader_url': 'https://nebula.tv/tom-scott-presents-money', 'duration': 825, 'channel_url': 'https://nebula.tv/tom-scott-presents-money', 'series': 'Tom Scott Presents: Money', 'display_id': 'money-episode-1-the-draw', 'thumbnail': r're:https://\w+\.cloudfront\.net/[\w-]+', 'creator': 'Tom Scott Presents: Money', '_old_archive_ids': ['nebula 5e779ebdd157bc0001d1c75a', 'nebulasubscriptions 5e779ebdd157bc0001d1c75a'], }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://watchnebula.com/videos/money-episode-1-the-draw', 'only_matching': True, }, { 'url': 'https://nebula.tv/videos/tldrnewseu-did-the-us-really-blow-up-the-nordstream-pipelines', 'info_dict': { 'id': 'e389af9d-1dab-44f2-8788-ee24deb7ff0d', 'ext': 'mp4', 'display_id': 'tldrnewseu-did-the-us-really-blow-up-the-nordstream-pipelines', 'title': 'Did the US Really Blow Up the NordStream Pipelines?', 'description': 'md5:b4e2a14e3ff08f546a3209c75261e789', 'upload_date': '20230223', 'timestamp': 1677144070, 'channel': 'TLDR News EU', 'channel_id': 'tldrnewseu', 'uploader': 'TLDR News EU', 'uploader_id': 'tldrnewseu', 'uploader_url': r're:https://nebula\.(tv|app)/tldrnewseu', 'duration': 524, 'channel_url': r're:https://nebula\.(tv|app)/tldrnewseu', 'series': 'TLDR News EU', 'thumbnail': r're:https://\w+\.cloudfront\.net/[\w-]+', 'creator': 'TLDR News EU', '_old_archive_ids': ['nebula 63f64c74366fcd00017c1513', 'nebulasubscriptions 63f64c74366fcd00017c1513'], }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://beta.nebula.tv/videos/money-episode-1-the-draw', 'only_matching': True, }] def _real_extract(self, url): slug = self._match_id(url) url, smuggled_data = unsmuggle_url(url, {}) if smuggled_data.get('id'): return { 'id': smuggled_data['id'], 'display_id': slug, 'title': '', **self._extract_formats(smuggled_data['id'], slug), } metadata = self._call_api( f'https://content.api.nebula.app/content/videos/{slug}', slug, note='Fetching video metadata') return { **self._extract_video_metadata(metadata), **self._extract_formats(metadata['id'], slug), } class NebulaClassIE(NebulaBaseIE): IE_NAME = 'nebula:media' _VALID_URL = rf'{_BASE_URL_RE}/(?!(?:myshows|library|videos)/)(?P<id>[\w-]+)/(?P<ep>[\w-]+)/?(?:$|[?#])' _TESTS = [{ 'url': 'https://nebula.tv/copyright-for-fun-and-profit/14', 'info_dict': { 'id': 'd7432cdc-c608-474d-942c-f74345daed7b', 'ext': 'mp4', 'display_id': '14', 'channel_url': 'https://nebula.tv/copyright-for-fun-and-profit', 'episode_number': 14, 'thumbnail': 'https://dj423fildxgac.cloudfront.net/d533718d-9307-42d4-8fb0-e283285e99c9', 'uploader_url': 'https://nebula.tv/copyright-for-fun-and-profit', 'duration': 646, 'episode': 'Episode 14', 'title': 'Photos, Sculpture, and Video', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://nebula.tv/extremitiespodcast/pyramiden-the-high-arctic-soviet-ghost-town', 'info_dict': { 'ext': 'mp3', 'id': '018f65f0-0033-4021-8f87-2d132beb19aa', 'description': 'md5:05d2b23ab780c955e2511a2b9127acff', 'series_id': '335e8159-d663-491a-888f-1732285706ac', 'modified_timestamp': 1599091504, 'episode_id': '018f65f0-0033-4021-8f87-2d132beb19aa', 'series': 'Extremities', 'modified_date': '20200903', 'upload_date': '20200902', 'title': 'Pyramiden: The High-Arctic Soviet Ghost Town', 'release_timestamp': 1571237958, 'thumbnail': r're:^https?://content\.production\.cdn\.art19\.com.*\.jpeg$', 'duration': 1546.05714, 'timestamp': 1599085608, 'release_date': '20191016', }, }, { 'url': 'https://nebula.tv/thelayover/the-layover-episode-1', 'info_dict': { 'ext': 'mp3', 'id': '9d74a762-00bb-45a8-9e8d-9ed47c04a1d0', 'episode_number': 1, 'thumbnail': r're:^https?://content\.production\.cdn\.art19\.com.*\.jpeg$', 'release_date': '20230304', 'modified_date': '20230403', 'series': 'The Layover', 'episode_id': '9d74a762-00bb-45a8-9e8d-9ed47c04a1d0', 'modified_timestamp': 1680554566, 'duration': 3130.46401, 'release_timestamp': 1677943800, 'title': 'The Layover — Episode 1', 'series_id': '874303a5-4900-4626-a4b6-2aacac34466a', 'upload_date': '20230303', 'episode': 'Episode 1', 'timestamp': 1677883672, 'description': 'md5:002cca89258e3bc7c268d5b8c24ba482', }, }] def _real_extract(self, url): slug, episode = self._match_valid_url(url).group('id', 'ep') url, smuggled_data = unsmuggle_url(url, {}) if smuggled_data.get('id'): return { 'id': smuggled_data['id'], 'display_id': slug, 'title': '', **self._extract_formats(smuggled_data['id'], slug), } metadata = self._call_api( f'https://content.api.nebula.app/content/{slug}/{episode}/?include=lessons', slug, note='Fetching class/podcast metadata') content_type = metadata.get('type') if content_type == 'lesson': return { **self._extract_video_metadata(metadata), **self._extract_formats(metadata['id'], slug), } elif content_type == 'podcast_episode': episode_url = metadata['episode_url'] if not episode_url and metadata.get('premium'): self.raise_login_required() self.mark_watched(metadata['id'], slug) if Art19IE.suitable(episode_url): return self.url_result(episode_url, Art19IE) return traverse_obj(metadata, { 'id': ('id', {str}), 'url': ('episode_url', {url_or_none}), 'title': ('title', {str}), 'description': ('description', {str}), 'timestamp': ('published_at', {parse_iso8601}), 'duration': ('duration', {int_or_none}), 'channel_id': ('channel_id', {str}), 'chnanel': ('channel_title', {str}), 'thumbnail': ('assets', 'regular', {url_or_none}), }) raise ExtractorError(f'Unexpected content type {content_type!r}') class NebulaSubscriptionsIE(NebulaBaseIE): IE_NAME = 'nebula:subscriptions' _VALID_URL = rf'{_BASE_URL_RE}/(?P<id>myshows|library/latest-videos)/?(?:$|[?#])' _TESTS = [{ 'url': 'https://nebula.tv/myshows', 'playlist_mincount': 1, 'info_dict': { 'id': 'myshows', }, }] def _generate_playlist_entries(self): next_url = update_url_query('https://content.api.nebula.app/video_episodes/', { 'following': 'true', 'include': 'engagement', 'ordering': '-published_at', }) for page_num in itertools.count(1): channel = self._call_api( next_url, 'myshows', note=f'Retrieving subscriptions page {page_num}') for episode in channel['results']: metadata = self._extract_video_metadata(episode) yield self.url_result(smuggle_url( f'https://nebula.tv/videos/{metadata["display_id"]}', {'id': episode['id']}), NebulaIE, url_transparent=True, **metadata) next_url = channel.get('next') if not next_url: return def _real_extract(self, url): return self.playlist_result(self._generate_playlist_entries(), 'myshows') class NebulaChannelIE(NebulaBaseIE): IE_NAME = 'nebula:channel' _VALID_URL = rf'{_BASE_URL_RE}/(?!myshows|library|videos)(?P<id>[\w-]+)/?(?:$|[?#])' _TESTS = [{ 'url': 'https://nebula.tv/tom-scott-presents-money', 'info_dict': { 'id': 'tom-scott-presents-money', 'title': 'Tom Scott Presents: Money', 'description': 'Tom Scott hosts a series all about trust, negotiation and money.', }, 'playlist_count': 5, }, { 'url': 'https://nebula.tv/lindsayellis', 'info_dict': { 'id': 'lindsayellis', 'title': 'Lindsay Ellis', 'description': 'Enjoy these hottest of takes on Disney, Transformers, and Musicals.', }, 'playlist_mincount': 2, }, { 'url': 'https://nebula.tv/johnnyharris', 'info_dict': { 'id': 'johnnyharris', 'title': 'Johnny Harris', 'description': 'I make videos about maps and many other things.', }, 'playlist_mincount': 90, }, { 'url': 'https://nebula.tv/copyright-for-fun-and-profit', 'info_dict': { 'id': 'copyright-for-fun-and-profit', 'title': 'Copyright for Fun and Profit', 'description': 'md5:6690248223eed044a9f11cd5a24f9742', }, 'playlist_count': 23, }, { 'url': 'https://nebula.tv/trussissuespodcast', 'info_dict': { 'id': 'trussissuespodcast', 'title': 'The TLDR News Podcast', 'description': 'md5:a08c4483bc0b705881d3e0199e721385', }, 'playlist_mincount': 80, }] def _generate_playlist_entries(self, collection_id, collection_slug): next_url = f'https://content.api.nebula.app/video_channels/{collection_id}/video_episodes/?ordering=-published_at' for page_num in itertools.count(1): episodes = self._call_api(next_url, collection_slug, note=f'Retrieving channel page {page_num}') for episode in episodes['results']: metadata = self._extract_video_metadata(episode) yield self.url_result(smuggle_url( episode.get('share_url') or f'https://nebula.tv/videos/{metadata["display_id"]}', {'id': episode['id']}), NebulaIE, url_transparent=True, **metadata) next_url = episodes.get('next') if not next_url: break def _generate_class_entries(self, channel): for lesson in channel['lessons']: metadata = self._extract_video_metadata(lesson) yield self.url_result(smuggle_url( lesson.get('share_url') or f'https://nebula.tv/{metadata["class_slug"]}/{metadata["slug"]}', {'id': lesson['id']}), NebulaClassIE, url_transparent=True, **metadata) def _generate_podcast_entries(self, collection_id, collection_slug): next_url = f'https://content.api.nebula.app/podcast_channels/{collection_id}/podcast_episodes/?ordering=-published_at&premium=true' for page_num in itertools.count(1): episodes = self._call_api(next_url, collection_slug, note=f'Retrieving podcast page {page_num}') for episode in traverse_obj(episodes, ('results', lambda _, v: url_or_none(v['share_url']))): yield self.url_result(episode['share_url'], NebulaClassIE) next_url = episodes.get('next') if not next_url: break def _real_extract(self, url): collection_slug = self._match_id(url) channel = self._call_api( f'https://content.api.nebula.app/content/{collection_slug}/?include=lessons', collection_slug, note='Retrieving channel') if channel.get('type') == 'class': entries = self._generate_class_entries(channel) elif channel.get('type') == 'podcast_channel': entries = self._generate_podcast_entries(channel['id'], collection_slug) else: entries = self._generate_playlist_entries(channel['id'], collection_slug) return self.playlist_result( entries=entries, playlist_id=collection_slug, playlist_title=channel.get('title'), playlist_description=channel.get('description')) class NebulaSeasonIE(NebulaBaseIE): IE_NAME = 'nebula:season' _VALID_URL = rf'{_BASE_URL_RE}/(?P<series>[\w-]+)/season/(?P<season_number>[\w-]+)' _TESTS = [{ 'url': 'https://nebula.tv/jetlag/season/15', 'info_dict': { 'id': 'jetlag_15', 'title': 'Tag: All Stars', 'description': 'md5:5aa5b8abf3de71756448dc44ffebb674', }, 'playlist_count': 8, }, { 'url': 'https://nebula.tv/jetlag/season/14', 'info_dict': { 'id': 'jetlag_14', 'title': 'Snake', 'description': 'md5:6da9040f1c2ac559579738bfb6919d1e', }, 'playlist_count': 8, }, { 'url': 'https://nebula.tv/jetlag/season/13-5', 'info_dict': { 'id': 'jetlag_13-5', 'title': 'Hide + Seek Across NYC', 'description': 'md5:5b87bb9acc6dcdff289bb4c71a2ad59f', }, 'playlist_count': 3, }] def _build_url_result(self, item): url = ( traverse_obj(item, ('share_url', {url_or_none})) or urljoin('https://nebula.tv/', item.get('app_path')) or f'https://nebula.tv/videos/{item["slug"]}') return self.url_result( smuggle_url(url, {'id': item['id']}), NebulaIE, url_transparent=True, **self._extract_video_metadata(item)) def _entries(self, data): for episode in traverse_obj(data, ('episodes', lambda _, v: v['video']['id'], 'video')): yield self._build_url_result(episode) for extra in traverse_obj(data, ('extras', ..., 'items', lambda _, v: v['id'])): yield self._build_url_result(extra) for trailer in traverse_obj(data, ('trailers', lambda _, v: v['id'])): yield self._build_url_result(trailer) def _real_extract(self, url): series, season_id = self._match_valid_url(url).group('series', 'season_number') playlist_id = f'{series}_{season_id}' data = self._call_api( f'https://content.api.nebula.app/content/{series}/season/{season_id}', playlist_id) return self.playlist_result( self._entries(data), playlist_id, **traverse_obj(data, { 'title': ('title', {str}), 'description': ('description', {str}), }))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rtrfm.py
yt_dlp/extractor/rtrfm.py
from .common import InfoExtractor class RTRFMIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rtrfm\.com\.au/(?:shows|show-episode)/(?P<id>[^/?\#&]+)' _TESTS = [ { 'url': 'https://rtrfm.com.au/shows/breakfast/', 'md5': '46168394d3a5ce237cf47e85d0745413', 'info_dict': { 'id': 'breakfast-2021-11-16', 'ext': 'mp3', 'series': 'Breakfast with Taylah', 'title': r're:^Breakfast with Taylah \d{4}-\d{2}-\d{2}$', 'description': 'md5:0979c3ab1febfbec3f1ccb743633c611', }, 'skip': 'ID and md5 changes daily', }, { 'url': 'https://rtrfm.com.au/show-episode/breakfast-2021-11-11/', 'md5': '396bedf1e40f96c62b30d4999202a790', 'info_dict': { 'id': 'breakfast-2021-11-11', 'ext': 'mp3', 'series': 'Breakfast with Taylah', 'title': 'Breakfast with Taylah 2021-11-11', 'description': 'md5:0979c3ab1febfbec3f1ccb743633c611', }, }, { 'url': 'https://rtrfm.com.au/show-episode/breakfast-2020-06-01/', 'md5': '594027f513ec36a24b15d65007a24dff', 'info_dict': { 'id': 'breakfast-2020-06-01', 'ext': 'mp3', 'series': 'Breakfast with Taylah', 'title': 'Breakfast with Taylah 2020-06-01', 'description': r're:^Breakfast with Taylah ', }, 'skip': 'This audio has expired', }, ] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) show, date, title = self._search_regex( r'''\.playShow(?:From)?\(['"](?P<show>[^'"]+)['"],\s*['"](?P<date>[0-9]{4}-[0-9]{2}-[0-9]{2})['"],\s*['"](?P<title>[^'"]+)['"]''', webpage, 'details', group=('show', 'date', 'title')) url = self._download_json( 'https://restreams.rtrfm.com.au/rzz', show, 'Downloading MP3 URL', query={'n': show, 'd': date})['u'] # This is the only indicator of an error until trying to download the URL and # downloads of mp4 URLs always fail (403 for current episodes, 404 for missing). if '.mp4' in url: url = None self.raise_no_formats('Expired or no episode on this date', expected=True) return { 'id': f'{show}-{date}', 'title': f'{title} {date}', 'series': title, 'url': url, 'release_date': date, 'description': self._og_search_description(webpage), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/fptplay.py
yt_dlp/extractor/fptplay.py
import hashlib import time import urllib.parse from .common import InfoExtractor from ..utils import ( clean_html, join_nonempty, strip_or_none, ) class FptplayIE(InfoExtractor): _VALID_URL = r'https?://fptplay\.vn/xem-video/[^/]+\-(?P<id>\w+)(?:/tap-(?P<episode>\d+)?/?(?:[?#]|$)|)' _GEO_COUNTRIES = ['VN'] IE_NAME = 'fptplay' IE_DESC = 'fptplay.vn' _TESTS = [{ 'url': 'https://fptplay.vn/xem-video/nhan-duyen-dai-nhan-xin-dung-buoc-621a123016f369ebbde55945', 'md5': 'ca0ee9bc63446c0c3e9a90186f7d6b33', 'info_dict': { 'id': '621a123016f369ebbde55945', 'ext': 'mp4', 'title': 'Nhân Duyên Đại Nhân Xin Dừng Bước - Tập 1A', 'description': 'md5:23cf7d1ce0ade8e21e76ae482e6a8c6c', }, }, { 'url': 'https://fptplay.vn/xem-video/ma-toi-la-dai-gia-61f3aa8a6b3b1d2e73c60eb5/tap-3', 'md5': 'b35be968c909b3e4e1e20ca45dd261b1', 'info_dict': { 'id': '61f3aa8a6b3b1d2e73c60eb5', 'ext': 'mp4', 'title': 'Má Tôi Là Đại Gia - Tập 3', 'description': 'md5:ff8ba62fb6e98ef8875c42edff641d1c', }, }, { 'url': 'https://fptplay.vn/xem-video/lap-toi-do-giam-under-the-skin-6222d9684ec7230fa6e627a2/tap-4', 'md5': 'bcb06c55ec14786d7d4eda07fa1ccbb9', 'info_dict': { 'id': '6222d9684ec7230fa6e627a2', 'ext': 'mp4', 'title': 'Lạp Tội Đồ Giám - Tập 2B', 'description': 'md5:e5a47e9d35fbf7e9479ca8a77204908b', }, }, { 'url': 'https://fptplay.vn/xem-video/nha-co-chuyen-hi-alls-well-ends-well-1997-6218995f6af792ee370459f0', 'only_matching': True, }] def _real_extract(self, url): video_id, slug_episode = self._match_valid_url(url).group('id', 'episode') webpage = self._download_webpage(url, video_id=video_id, fatal=False) or '' title = self._search_regex( r'(?s)<h4\s+class="mb-1 text-2xl text-white"[^>]*>(.+)</h4>', webpage, 'title', fatal=False) real_episode = slug_episode if not title else self._search_regex( r'<p.+title="(?P<episode>[^">]+)"\s+class="epi-title active"', webpage, 'episode', fatal=False) title = strip_or_none(title) or self._html_search_meta(('og:title', 'twitter:title'), webpage) info = self._download_json( self.get_api_with_st_token(video_id, int(slug_episode) - 1 if slug_episode else 0), video_id) formats, subtitles = self._extract_m3u8_formats_and_subtitles(info['data']['url'], video_id, 'mp4') return { 'id': video_id, 'title': join_nonempty(title, real_episode, delim=' - '), 'description': ( clean_html(self._search_regex(r'<p\s+class="overflow-hidden"[^>]*>(.+)</p>', webpage, 'description')) or self._html_search_meta(('og:description', 'twitter:description'), webpage)), 'formats': formats, 'subtitles': subtitles, } def get_api_with_st_token(self, video_id, episode): path = f'/api/v6.2_w/stream/vod/{video_id}/{episode}/auto_vip' timestamp = int(time.time()) + 10800 t = hashlib.md5(f'WEBv6Dkdsad90dasdjlALDDDS{timestamp}{path}'.encode()).hexdigest().upper() r = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' n = [int(f'0x{t[2 * o: 2 * o + 2]}', 16) for o in range(len(t) // 2)] def convert(e): t = '' n = 0 i = [0, 0, 0] a = [0, 0, 0, 0] s = len(e) c = 0 for _ in range(s, 0, -1): if n <= 3: i[n] = e[c] n += 1 c += 1 if 3 == n: a[0] = (252 & i[0]) >> 2 a[1] = ((3 & i[0]) << 4) + ((240 & i[1]) >> 4) a[2] = ((15 & i[1]) << 2) + ((192 & i[2]) >> 6) a[3] = (63 & i[2]) for v in range(4): t += r[a[v]] n = 0 if n: for o in range(n, 3): i[o] = 0 for o in range(n + 1): a[0] = (252 & i[0]) >> 2 a[1] = ((3 & i[0]) << 4) + ((240 & i[1]) >> 4) a[2] = ((15 & i[1]) << 2) + ((192 & i[2]) >> 6) a[3] = (63 & i[2]) t += r[a[o]] n += 1 while n < 3: t += '' n += 1 return t st_token = convert(n).replace('+', '-').replace('/', '_').replace('=', '') return f'https://api.fptplay.net{path}?{urllib.parse.urlencode({"st": st_token, "e": timestamp})}'
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ntvru.py
yt_dlp/extractor/ntvru.py
from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, unescapeHTML, url_or_none, xpath_text, ) from ..utils.traversal import traverse_obj class NTVRuIE(InfoExtractor): IE_NAME = 'ntv.ru' _VALID_URL = r'https?://(?:www\.)?ntv\.ru/(?:[^/#?]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ # JSON Api is geo restricted 'url': 'https://www.ntv.ru/peredacha/svoya_igra/m58980/o818800', 'md5': '818962a1b52747d446db7cd5be43e142', 'info_dict': { 'id': '2520563', 'ext': 'mp4', 'title': 'Участники: Ирина Петрова, Сергей Коновалов, Кристина Кораблина', 'description': 'md5:fcbd21cd45238a940b95550f9e178e3e', 'thumbnail': r're:^http://.*\.jpg', 'duration': 2462, 'view_count': int, 'comment_count': int, 'tags': ['игры и игрушки'], 'timestamp': 1761821096, 'upload_date': '20251030', 'release_timestamp': 1761821096, 'release_date': '20251030', 'modified_timestamp': 1761821096, 'modified_date': '20251030', }, }, { 'url': 'http://www.ntv.ru/novosti/863142/', 'md5': 'ba7ea172a91cb83eb734cad18c10e723', 'info_dict': { 'id': '746000', 'ext': 'mp4', 'title': 'Командующий Черноморским флотом провел переговоры в штабе ВМС Украины', 'description': 'Командующий Черноморским флотом провел переговоры в штабе ВМС Украины', 'thumbnail': r're:^http://.*\.jpg', 'duration': 136, 'view_count': int, 'comment_count': int, 'tags': ['ВМС', 'захват', 'митинги', 'Севастополь', 'Украина'], 'timestamp': 1395222013, 'upload_date': '20140319', 'release_timestamp': 1395222013, 'release_date': '20140319', 'modified_timestamp': 1395222013, 'modified_date': '20140319', }, }, { # Requires unescapeHTML 'url': 'http://www.ntv.ru/peredacha/segodnya/m23700/o232416', 'md5': '82dbd49b38e3af1d00df16acbeab260c', 'info_dict': { 'id': '747480', 'ext': 'mp4', 'title': '"Сегодня". 21 марта 2014 года. 16:00 ', 'description': 'md5:bed80745ca72af557433195f51a02785', 'thumbnail': r're:^http://.*\.jpg', 'duration': 1496, 'view_count': int, 'comment_count': int, 'tags': ['Брюссель', 'гражданство', 'ЕС', 'Крым', 'ОСАГО', 'саммит', 'санкции', 'события', 'чиновники', 'рейтинг'], 'timestamp': 1395406951, 'upload_date': '20140321', 'release_timestamp': 1395406951, 'release_date': '20140321', 'modified_timestamp': 1395406951, 'modified_date': '20140321', }, }, { 'url': 'https://www.ntv.ru/kino/Koma_film/m70281/o336036/video/', 'md5': 'e9c7cde24d9d3eaed545911a04e6d4f4', 'info_dict': { 'id': '1126480', 'ext': 'mp4', 'title': 'Остросюжетный фильм "Кома"', 'description': 'md5:e79ffd0887425a0f05a58885c408d7d8', 'thumbnail': r're:^http://.*\.jpg', 'duration': 5608, 'view_count': int, 'comment_count': int, 'tags': ['кино'], 'timestamp': 1432868572, 'upload_date': '20150529', 'release_timestamp': 1432868572, 'release_date': '20150529', 'modified_timestamp': 1432868572, 'modified_date': '20150529', }, }, { 'url': 'http://www.ntv.ru/serial/Delo_vrachey/m31760/o233916/', 'md5': '9320cd0e23f3ea59c330dc744e06ff3b', 'info_dict': { 'id': '751482', 'ext': 'mp4', 'title': '"Дело врачей": "Деревце жизни"', 'description': 'md5:d6fbf9193f880f50d9cbfbcc954161c1', 'thumbnail': r're:^http://.*\.jpg', 'duration': 2590, 'view_count': int, 'comment_count': int, 'tags': ['врачи', 'больницы'], 'timestamp': 1395882300, 'upload_date': '20140327', 'release_timestamp': 1395882300, 'release_date': '20140327', 'modified_timestamp': 1395882300, 'modified_date': '20140327', }, }, { # Schemeless file URL 'url': 'https://www.ntv.ru/video/1797442', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._html_search_regex( r'<meta property="ya:ovs:feed_url" content="https?://www\.ntv\.ru/(?:exp/)?video/(\d+)', webpage, 'video id') player = self._download_xml( f'http://www.ntv.ru/vi{video_id}/', video_id, 'Downloading video XML') video = player.find('./data/video') formats = [] for format_id in ['', 'hi', 'webm']: video_url = url_or_none(xpath_text(video, f'./{format_id}file')) if not video_url: continue formats.append({ 'url': video_url, 'filesize': int_or_none(xpath_text(video, f'./{format_id}size')), }) hls_manifest = xpath_text(video, './playback/hls') if hls_manifest: formats.extend(self._extract_m3u8_formats( hls_manifest, video_id, m3u8_id='hls', fatal=False)) dash_manifest = xpath_text(video, './playback/dash') if dash_manifest: formats.extend(self._extract_mpd_formats( dash_manifest, video_id, mpd_id='dash', fatal=False)) metadata = self._download_xml( f'https://www.ntv.ru/exp/video/{video_id}', video_id, 'Downloading XML metadata', fatal=False) return { 'id': video_id, 'formats': formats, **traverse_obj(player, { 'title': ('data/title/text()', ..., {str}, {unescapeHTML}, any), 'description': ('data/description/text()', ..., {str}, {unescapeHTML}, any), 'duration': ('data/video/totaltime/text()', ..., {int_or_none}, any), 'view_count': ('data/video/views/text()', ..., {int_or_none}, any), 'thumbnail': ('data/video/splash/text()', ..., {url_or_none}, any), }), **traverse_obj(metadata, { 'title': ('{*}title/text()', ..., {str}, {unescapeHTML}, any), 'description': ('{*}description/text()', ..., {str}, {unescapeHTML}, any), 'duration': ('{*}duration/text()', ..., {int_or_none}, any), 'timestamp': ('{*}create_date/text()', ..., {parse_iso8601}, any), 'release_timestamp': ('{*}upload_date/text()', ..., {parse_iso8601}, any), 'modified_timestamp': ('{*}modify_date/text()', ..., {parse_iso8601}, any), 'tags': ('{*}tag/text()', ..., {str}, {lambda x: x.split(',')}, ..., {str.strip}, filter), 'view_count': ('{*}stats/views_total/text()', ..., {int_or_none}, any), 'comment_count': ('{*}stats/comments/text()', ..., {int_or_none}, any), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/kakao.py
yt_dlp/extractor/kakao.py
from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, int_or_none, str_or_none, strip_or_none, traverse_obj, unified_timestamp, ) class KakaoIE(InfoExtractor): _VALID_URL = r'https?://(?:play-)?tv\.kakao\.com/(?:channel/\d+|embed/player)/cliplink/(?P<id>\d+|[^?#&]+@my)' _API_BASE_TMPL = 'http://tv.kakao.com/api/v1/ft/playmeta/cliplink/%s/' _CDN_API = 'https://tv.kakao.com/katz/v1/ft/cliplink/%s/readyNplay?' _TESTS = [{ 'url': 'http://tv.kakao.com/channel/2671005/cliplink/301965083', 'md5': '702b2fbdeb51ad82f5c904e8c0766340', 'info_dict': { 'id': '301965083', 'ext': 'mp4', 'title': '乃木坂46 バナナマン 「3期生紹介コーナーが始動!顔高低差GPも!」 『乃木坂工事中』', 'description': '', 'uploader_id': '2671005', 'uploader': '그랑그랑이', 'timestamp': 1488160199, 'upload_date': '20170227', 'like_count': int, 'thumbnail': r're:http://.+/thumb\.png', 'tags': ['乃木坂'], 'view_count': int, 'duration': 1503, 'comment_count': int, }, }, { 'url': 'http://tv.kakao.com/channel/2653210/cliplink/300103180', 'md5': 'a8917742069a4dd442516b86e7d66529', 'info_dict': { 'id': '300103180', 'ext': 'mp4', 'description': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)\r\n\r\n[쇼! 음악중심] 20160611, 507회', 'title': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)', 'uploader_id': '2653210', 'uploader': '쇼! 음악중심', 'timestamp': 1485684628, 'upload_date': '20170129', 'like_count': int, 'thumbnail': r're:http://.+/thumb\.png', 'tags': 'count:28', 'view_count': int, 'duration': 184, 'comment_count': int, }, }, { # geo restricted 'url': 'https://tv.kakao.com/channel/3643855/cliplink/412069491', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) api_base = self._API_BASE_TMPL % video_id cdn_api_base = self._CDN_API % video_id query = { 'player': 'monet_html5', 'referer': url, 'uuid': '', 'service': 'kakao_tv', 'section': '', 'dteType': 'PC', 'fields': ','.join([ '-*', 'tid', 'clipLink', 'displayTitle', 'clip', 'title', 'description', 'channelId', 'createTime', 'duration', 'playCount', 'likeCount', 'commentCount', 'tagList', 'channel', 'name', 'clipChapterThumbnailList', 'thumbnailUrl', 'timeInSec', 'isDefault', 'videoOutputList', 'width', 'height', 'kbps', 'profile', 'label']), } api_json = self._download_json( api_base, video_id, 'Downloading video info') clip_link = api_json['clipLink'] clip = clip_link['clip'] title = clip.get('title') or clip_link.get('displayTitle') formats = [] for fmt in clip.get('videoOutputList') or []: profile_name = fmt.get('profile') if not profile_name or profile_name == 'AUDIO': continue query.update({ 'profile': profile_name, 'fields': '-*,code,message,url', }) try: fmt_url_json = self._download_json( cdn_api_base, video_id, query=query, note=f'Downloading video URL for profile {profile_name}') except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 403: resp = self._parse_json(e.cause.response.read().decode(), video_id) if resp.get('code') == 'GeoBlocked': self.raise_geo_restricted() raise fmt_url = traverse_obj(fmt_url_json, ('videoLocation', 'url')) if not fmt_url: continue formats.append({ 'url': fmt_url, 'format_id': profile_name, 'width': int_or_none(fmt.get('width')), 'height': int_or_none(fmt.get('height')), 'format_note': fmt.get('label'), 'filesize': int_or_none(fmt.get('filesize')), 'tbr': int_or_none(fmt.get('kbps')), }) thumbs = [] for thumb in clip.get('clipChapterThumbnailList') or []: thumbs.append({ 'url': thumb.get('thumbnailUrl'), 'id': str(thumb.get('timeInSec')), 'preference': -1 if thumb.get('isDefault') else 0, }) top_thumbnail = clip.get('thumbnailUrl') if top_thumbnail: thumbs.append({ 'url': top_thumbnail, 'preference': 10, }) return { 'id': video_id, 'title': title, 'description': strip_or_none(clip.get('description')), 'uploader': traverse_obj(clip_link, ('channel', 'name')), 'uploader_id': str_or_none(clip_link.get('channelId')), 'thumbnails': thumbs, 'timestamp': unified_timestamp(clip_link.get('createTime')), 'duration': int_or_none(clip.get('duration')), 'view_count': int_or_none(clip.get('playCount')), 'like_count': int_or_none(clip.get('likeCount')), 'comment_count': int_or_none(clip.get('commentCount')), 'formats': formats, 'tags': clip.get('tagList'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/manyvids.py
yt_dlp/extractor/manyvids.py
from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, int_or_none, join_nonempty, parse_count, parse_duration, parse_iso8601, url_or_none, ) from ..utils.traversal import traverse_obj class ManyVidsIE(InfoExtractor): _VALID_URL = r'(?i)https?://(?:www\.)?manyvids\.com/video/(?P<id>\d+)' _TESTS = [{ # preview video 'url': 'https://www.manyvids.com/Video/530341/mv-tips-tricks', 'md5': '738dc723f7735ee9602f7ea352a6d058', 'info_dict': { 'id': '530341-preview', 'ext': 'mp4', 'title': 'MV Tips & Tricks (Preview)', 'description': r're:I will take you on a tour around .{1313}$', 'thumbnail': r're:https://cdn5\.manyvids\.com/php_uploads/video_images/DestinyDiaz/.+\.jpg', 'uploader': 'DestinyDiaz', 'view_count': int, 'like_count': int, 'release_timestamp': 1508419904, 'tags': ['AdultSchool', 'BBW', 'SFW', 'TeacherFetish'], 'release_date': '20171019', 'duration': 3167.0, }, 'expected_warnings': ['Only extracting preview'], }, { # full video 'url': 'https://www.manyvids.com/Video/935718/MY-FACE-REVEAL/', 'md5': 'bb47bab0e0802c2a60c24ef079dfe60f', 'info_dict': { 'id': '935718', 'ext': 'mp4', 'title': 'MY FACE REVEAL', 'description': r're:Today is the day!! I am finally taking off my mask .{445}$', 'thumbnail': r're:https://ods\.manyvids\.com/1001061960/3aa5397f2a723ec4597e344df66ab845/screenshots/.+\.jpg', 'uploader': 'Sarah Calanthe', 'view_count': int, 'like_count': int, 'release_date': '20181110', 'tags': ['EyeContact', 'Interviews', 'MaskFetish', 'MouthFetish', 'Redhead'], 'release_timestamp': 1541851200, 'duration': 224.0, }, }] _API_BASE = 'https://www.manyvids.com/bff/store/video' def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json(f'{self._API_BASE}/{video_id}/private', video_id)['data'] formats, preview_only = [], True for format_id, path in [ ('preview', ['teaser', 'filepath']), ('transcoded', ['transcodedFilepath']), ('filepath', ['filepath']), ]: format_url = traverse_obj(video_data, (*path, {url_or_none})) if not format_url: continue if determine_ext(format_url) == 'm3u8': formats.extend(self._extract_m3u8_formats(format_url, video_id, 'mp4', m3u8_id=format_id)) else: formats.append({ 'url': format_url, 'format_id': format_id, 'preference': -10 if format_id == 'preview' else None, 'quality': 10 if format_id == 'filepath' else None, 'height': int_or_none( self._search_regex(r'_(\d{2,3}[02468])_', format_url, 'height', default=None)), }) if format_id != 'preview': preview_only = False metadata = traverse_obj( self._download_json(f'{self._API_BASE}/{video_id}', video_id, fatal=False), 'data') title = traverse_obj(metadata, ('title', {clean_html})) if preview_only: title = join_nonempty(title, '(Preview)', delim=' ') video_id += '-preview' self.report_warning( f'Only extracting preview. Video may be paid or subscription only. {self._login_hint()}') return { 'id': video_id, 'title': title, 'formats': formats, **traverse_obj(metadata, { 'description': ('description', {clean_html}), 'uploader': ('model', 'displayName', {clean_html}), 'thumbnail': (('screenshot', 'thumbnail'), {url_or_none}, any), 'view_count': ('views', {parse_count}), 'like_count': ('likes', {parse_count}), 'release_timestamp': ('launchDate', {parse_iso8601}), 'duration': ('videoDuration', {parse_duration}), 'tags': ('tagList', ..., 'label', {str}, filter, all, filter), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/la7.py
yt_dlp/extractor/la7.py
import re from .common import InfoExtractor from ..networking import HEADRequest from ..utils import float_or_none, int_or_none, parse_duration, unified_strdate class LA7IE(InfoExtractor): IE_NAME = 'la7.it' _VALID_URL = r'''(?x)https?://(?: (?:www\.)?la7\.it/([^/]+)/(?:rivedila7|video|news)/| tg\.la7\.it/repliche-tgla7\?id= )(?P<id>.+)''' _TESTS = [{ # single quality video 'url': 'http://www.la7.it/crozza/video/inccool8-02-10-2015-163722', 'md5': '8b613ffc0c4bf9b9e377169fc19c214c', 'info_dict': { 'id': 'inccool8-02-10-2015-163722', 'ext': 'mp4', 'title': 'Inc.Cool8', 'description': 'Benvenuti nell\'incredibile mondo della INC. COOL. 8. dove “INC.” sta per “Incorporated” “COOL” sta per “fashion” ed Eight sta per il gesto atletico', 'thumbnail': 're:^https?://.*', 'upload_date': '20151002', 'formats': 'count:4', }, }, { # multiple quality video 'url': 'https://www.la7.it/calcio-femminile/news/il-gol-di-lindsey-thomas-fiorentina-vs-milan-serie-a-calcio-femminile-26-11-2022-461736', 'md5': 'd2370e78f75e8d1238cb3a0db9a2eda3', 'info_dict': { 'id': 'il-gol-di-lindsey-thomas-fiorentina-vs-milan-serie-a-calcio-femminile-26-11-2022-461736', 'ext': 'mp4', 'title': 'Il gol di Lindsey Thomas | Fiorentina vs Milan | Serie A Calcio Femminile', 'description': 'Il gol di Lindsey Thomas | Fiorentina vs Milan | Serie A Calcio Femminile', 'thumbnail': 're:^https?://.*', 'upload_date': '20221126', 'formats': 'count:8', }, }, { 'url': 'http://www.la7.it/omnibus/rivedila7/omnibus-news-02-07-2016-189077', 'only_matching': True, }] _HOST = 'https://awsvodpkg.iltrovatore.it' def _generate_mp4_url(self, quality, m3u8_formats): for f in m3u8_formats: if f['vcodec'] != 'none' and quality in f['url']: http_url = f'{self._HOST}{quality}.mp4' urlh = self._request_webpage( HEADRequest(http_url), quality, note='Check filesize', fatal=False) if urlh: http_f = f.copy() del http_f['manifest_url'] http_f.update({ 'format_id': http_f['format_id'].replace('hls-', 'https-'), 'url': http_url, 'protocol': 'https', 'filesize_approx': int_or_none(urlh.headers.get('Content-Length', None)), }) return http_f return None def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) if re.search(r'(?i)(drmsupport\s*:\s*true)\s*', webpage): self.report_drm(video_id) video_path = self._search_regex( r'(/content/[\w/,]+?)\.mp4(?:\.csmil)?/master\.m3u8', webpage, 'video_path') formats = self._extract_mpd_formats( f'{self._HOST}/local/dash/,{video_path}.mp4.urlset/manifest.mpd', video_id, mpd_id='dash', fatal=False) m3u8_formats = self._extract_m3u8_formats( f'{self._HOST}/local/hls/,{video_path}.mp4.urlset/master.m3u8', video_id, 'mp4', m3u8_id='hls', fatal=False) formats.extend(m3u8_formats) for q in filter(None, video_path.split(',')): http_f = self._generate_mp4_url(q, m3u8_formats) if http_f: formats.append(http_f) return { 'id': video_id, 'title': self._og_search_title(webpage, default=None), 'description': self._og_search_description(webpage, default=None), 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'formats': formats, 'upload_date': unified_strdate(self._search_regex(r'datetime="(.+?)"', webpage, 'upload_date', fatal=False)), } class LA7PodcastEpisodeIE(InfoExtractor): IE_NAME = 'la7.it:pod:episode' _VALID_URL = r'https?://(?:www\.)?la7\.it/[^/]+/podcast/([^/]+-)?(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.la7.it/voicetown/podcast/la-carezza-delle-memoria-di-carlo-verdone-23-03-2021-371497', 'md5': '7737d4d79b3c1a34b3de3e16297119ed', 'info_dict': { 'id': '371497', 'ext': 'mp3', 'title': '"La carezza delle memoria" di Carlo Verdone', 'description': 'md5:5abf07c3c551a687db80af3f9ceb7d52', 'thumbnail': 'https://www.la7.it/sites/default/files/podcast/371497.jpg', 'upload_date': '20210323', }, }, { # embed url 'url': 'https://www.la7.it/embed/podcast/371497', 'only_matching': True, }, { # date already in the title 'url': 'https://www.la7.it/propagandalive/podcast/lintervista-di-diego-bianchi-ad-annalisa-cuzzocrea-puntata-del-1932021-20-03-2021-371130', 'only_matching': True, }, { # title same as show_title 'url': 'https://www.la7.it/otto-e-mezzo/podcast/otto-e-mezzo-26-03-2021-372340', 'only_matching': True, }] def _extract_info(self, webpage, video_id=None, ppn=None): if not video_id: video_id = self._search_regex( r'data-nid=([\'"])(?P<vid>\d+)\1', webpage, 'video_id', group='vid') media_url = self._search_regex( (r'src\s*:\s*([\'"])(?P<url>\S+?mp3.+?)\1', r'data-podcast\s*=\s*([\'"])(?P<url>\S+?mp3.+?)\1'), webpage, 'media_url', group='url') formats = [{ 'url': media_url, 'format_id': 'http-mp3', 'ext': 'mp3', 'acodec': 'mp3', 'vcodec': 'none', }] title = self._html_search_regex( (r'<div class="title">(?P<title>.+?)</', r'<title>(?P<title>[^<]+)</title>', r'title:\s*([\'"])(?P<title>.+?)\1'), webpage, 'title', group='title') description = ( self._html_search_regex( (r'<div class="description">(.+?)</div>', r'<div class="description-mobile">(.+?)</div>', r'<div class="box-txt">([^<]+?)</div>', r'<div class="field-content"><p>(.+?)</p></div>'), webpage, 'description', default=None) or self._html_search_meta('description', webpage)) thumb = self._html_search_regex( (r'<div class="podcast-image"><img src="(.+?)"></div>', r'<div class="container-embed"[^<]+url\((.+?)\);">', r'<div class="field-content"><img src="(.+?)"'), webpage, 'thumbnail', fatal=False, default=None) duration = parse_duration(self._html_search_regex( r'<span class="(?:durata|duration)">([\d:]+)</span>', webpage, 'duration', fatal=False, default=None)) date = self._html_search_regex( r'class="data">\s*(?:<span>)?([\d\.]+)\s*</', webpage, 'date', default=None) date_alt = self._search_regex( r'(\d+[\./]\d+[\./]\d+)', title, 'date_alt', default=None) ppn = ppn or self._search_regex( r'ppN:\s*([\'"])(?P<ppn>.+?)\1', webpage, 'ppn', group='ppn', default=None) # if the date is not in the title # and title is the same as the show_title # add the date to the title if date and not date_alt and ppn and ppn.lower() == title.lower(): title = f'{title} del {date}' return { 'id': video_id, 'title': title, 'description': description, 'duration': float_or_none(duration), 'formats': formats, 'thumbnail': thumb, 'upload_date': unified_strdate(date), } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) return self._extract_info(webpage, video_id) class LA7PodcastIE(LA7PodcastEpisodeIE): # XXX: Do not subclass from concrete IE IE_NAME = 'la7.it:podcast' _VALID_URL = r'https?://(?:www\.)?la7\.it/(?P<id>[^/]+)/podcast/?(?:$|[#?])' _TESTS = [{ 'url': 'https://www.la7.it/propagandalive/podcast', 'info_dict': { 'id': 'propagandalive', 'title': 'Propaganda Live', }, 'playlist_mincount': 10, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) title = ( self._html_search_regex( r'<h1.*?>(.+?)</h1>', webpage, 'title', fatal=False, default=None) or self._og_search_title(webpage)) ppn = self._search_regex( r'window\.ppN\s*=\s*([\'"])(?P<ppn>.+?)\1', webpage, 'ppn', group='ppn', default=None) entries = [] for episode in re.finditer( r'<div class="container-podcast-property">([\s\S]+?)(?:</div>\s*){3}', webpage): entries.append(self._extract_info(episode.group(1), ppn=ppn)) return self.playlist_result(entries, playlist_id, title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/toutiao.py
yt_dlp/extractor/toutiao.py
import json import urllib.parse from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, str_or_none, try_call, url_or_none, ) from ..utils.traversal import find_element, traverse_obj class ToutiaoIE(InfoExtractor): IE_NAME = 'toutiao' IE_DESC = '今日头条' _VALID_URL = r'https?://www\.toutiao\.com/video/(?P<id>\d+)/?(?:[?#]|$)' _TESTS = [{ 'url': 'https://www.toutiao.com/video/7505382061495176511/', 'info_dict': { 'id': '7505382061495176511', 'ext': 'mp4', 'title': '新疆多地现不明飞行物,目击者称和月亮一样亮,几秒内突然加速消失,气象部门回应', 'comment_count': int, 'duration': 9.753, 'like_count': int, 'release_date': '20250517', 'release_timestamp': 1747483344, 'thumbnail': r're:https?://p\d+-sign\.toutiaoimg\.com/.+$', 'uploader': '极目新闻', 'uploader_id': 'MS4wLjABAAAAeateBb9Su8I3MJOZozmvyzWktmba5LMlliRDz1KffnM', 'view_count': int, }, }, { 'url': 'https://www.toutiao.com/video/7479446610359878153/', 'info_dict': { 'id': '7479446610359878153', 'ext': 'mp4', 'title': '小伙竟然利用两块磁铁制作成磁力减震器,简直太有创意了!', 'comment_count': int, 'duration': 118.374, 'like_count': int, 'release_date': '20250308', 'release_timestamp': 1741444368, 'thumbnail': r're:https?://p\d+-sign\.toutiaoimg\.com/.+$', 'uploader': '小莉创意发明', 'uploader_id': 'MS4wLjABAAAA4f7d4mwtApALtHIiq-QM20dwXqe32NUz0DeWF7wbHKw', 'view_count': int, }, }] def _real_initialize(self): if self._get_cookies('https://www.toutiao.com').get('ttwid'): return urlh = self._request_webpage( 'https://ttwid.bytedance.com/ttwid/union/register/', None, 'Fetching ttwid', 'Unable to fetch ttwid', headers={ 'Content-Type': 'application/json', }, data=json.dumps({ 'aid': 24, 'needFid': False, 'region': 'cn', 'service': 'www.toutiao.com', 'union': True, }).encode(), ) if ttwid := try_call(lambda: self._get_cookies(urlh.url)['ttwid'].value): self._set_cookie('.toutiao.com', 'ttwid', ttwid) return self.raise_login_required() def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_data = traverse_obj(webpage, ( {find_element(tag='script', id='RENDER_DATA')}, {urllib.parse.unquote}, {json.loads}, 'data', 'initialVideo', )) formats = [] for video in traverse_obj(video_data, ( 'videoPlayInfo', 'video_list', lambda _, v: v['main_url'], )): formats.append({ 'url': video['main_url'], **traverse_obj(video, ('video_meta', { 'acodec': ('audio_profile', {str}), 'asr': ('audio_sample_rate', {int_or_none}), 'audio_channels': ('audio_channels', {float_or_none}, {int_or_none}), 'ext': ('vtype', {str}), 'filesize': ('size', {int_or_none}), 'format_id': ('definition', {str}), 'fps': ('fps', {int_or_none}), 'height': ('vheight', {int_or_none}), 'tbr': ('real_bitrate', {float_or_none(scale=1000)}), 'vcodec': ('codec_type', {str}), 'width': ('vwidth', {int_or_none}), })), }) return { 'id': video_id, 'formats': formats, **traverse_obj(video_data, { 'comment_count': ('commentCount', {int_or_none}), 'duration': ('videoPlayInfo', 'video_duration', {float_or_none}), 'like_count': ('repinCount', {int_or_none}), 'release_timestamp': ('publishTime', {int_or_none}), 'thumbnail': (('poster', 'coverUrl'), {url_or_none}, any), 'title': ('title', {str}), 'uploader': ('userInfo', 'name', {str}), 'uploader_id': ('userInfo', 'userId', {str_or_none}), 'view_count': ('playCount', {int_or_none}), 'webpage_url': ('detailUrl', {url_or_none}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bibeltv.py
yt_dlp/extractor/bibeltv.py
from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, determine_ext, format_field, int_or_none, js_to_json, orderedSet, parse_iso8601, traverse_obj, url_or_none, ) class BibelTVBaseIE(InfoExtractor): _GEO_COUNTRIES = ['AT', 'CH', 'DE'] _GEO_BYPASS = False API_URL = 'https://www.bibeltv.de/mediathek/api' AUTH_TOKEN = 'j88bRXY8DsEqJ9xmTdWhrByVi5Hm' def _extract_formats_and_subtitles(self, data, crn_id, *, is_live=False): formats = [] subtitles = {} for media_url in traverse_obj(data, (..., 'src', {url_or_none})): media_ext = determine_ext(media_url) if media_ext == 'm3u8': m3u8_formats, m3u8_subs = self._extract_m3u8_formats_and_subtitles( media_url, crn_id, live=is_live) formats.extend(m3u8_formats) subtitles.update(m3u8_subs) elif media_ext == 'mpd': mpd_formats, mpd_subs = self._extract_mpd_formats_and_subtitles(media_url, crn_id) formats.extend(mpd_formats) subtitles.update(mpd_subs) elif media_ext == 'mp4': formats.append({'url': media_url}) else: self.report_warning(f'Unknown format {media_ext!r}') return formats, subtitles @staticmethod def _extract_base_info(data): return { 'id': data['crn'], **traverse_obj(data, { 'title': 'title', 'description': 'description', 'duration': ('duration', {int_or_none(scale=1000)}), 'timestamp': ('schedulingStart', {parse_iso8601}), 'season_number': 'seasonNumber', 'episode_number': 'episodeNumber', 'view_count': 'viewCount', 'like_count': 'likeCount', }), 'thumbnails': orderedSet(traverse_obj(data, ('images', ..., { 'url': ('url', {url_or_none}), }))), } def _extract_url_info(self, data): return { '_type': 'url', 'url': format_field(data, 'slug', 'https://www.bibeltv.de/mediathek/videos/%s'), **self._extract_base_info(data), } def _extract_video_info(self, data): crn_id = data['crn'] if data.get('drm'): self.report_drm(crn_id) json_data = self._download_json( format_field(data, 'id', f'{self.API_URL}/video/%s'), crn_id, headers={'Authorization': self.AUTH_TOKEN}, fatal=False, errnote='No formats available') or {} formats, subtitles = self._extract_formats_and_subtitles( traverse_obj(json_data, ('video', 'videoUrls', ...)), crn_id) return { '_type': 'video', **self._extract_base_info(data), 'formats': formats, 'subtitles': subtitles, } class BibelTVVideoIE(BibelTVBaseIE): IE_DESC = 'BibelTV single video' _VALID_URL = r'https?://(?:www\.)?bibeltv\.de/mediathek/videos/(?P<id>\d+)[\w-]+' IE_NAME = 'bibeltv:video' _TESTS = [{ 'url': 'https://www.bibeltv.de/mediathek/videos/344436-alte-wege', 'md5': 'ec1c07efe54353780512e8a4103b612e', 'info_dict': { 'id': '344436', 'ext': 'mp4', 'title': 'Alte Wege', 'description': 'md5:2f4eb7294c9797a47b8fd13cccca22e9', 'timestamp': 1677877071, 'duration': 150.0, 'upload_date': '20230303', 'thumbnail': r're:https://bibeltv\.imgix\.net/[\w-]+\.jpg', 'episode': 'Episode 1', 'episode_number': 1, 'view_count': int, 'like_count': int, }, 'params': { 'format': '6', }, }] def _real_extract(self, url): crn_id = self._match_id(url) video_data = traverse_obj( self._search_nextjs_data(self._download_webpage(url, crn_id), crn_id), ('props', 'pageProps', 'videoPageData', 'videos', 0, {dict})) if not video_data: raise ExtractorError('Missing video data.') return self._extract_video_info(video_data) class BibelTVSeriesIE(BibelTVBaseIE): IE_DESC = 'BibelTV series playlist' _VALID_URL = r'https?://(?:www\.)?bibeltv\.de/mediathek/serien/(?P<id>\d+)[\w-]+' IE_NAME = 'bibeltv:series' _TESTS = [{ 'url': 'https://www.bibeltv.de/mediathek/serien/333485-ein-wunder-fuer-jeden-tag', 'playlist_mincount': 400, 'info_dict': { 'id': '333485', 'title': 'Ein Wunder für jeden Tag', 'description': 'Tägliche Kurzandacht mit Déborah Rosenkranz.', }, }] def _real_extract(self, url): crn_id = self._match_id(url) webpage = self._download_webpage(url, crn_id) nextjs_data = self._search_nextjs_data(webpage, crn_id) series_data = traverse_obj(nextjs_data, ('props', 'pageProps', 'seriePageData', {dict})) if not series_data: raise ExtractorError('Missing series data.') return self.playlist_result( traverse_obj(series_data, ('videos', ..., {dict}, {self._extract_url_info})), crn_id, series_data.get('title'), clean_html(series_data.get('description'))) class BibelTVLiveIE(BibelTVBaseIE): IE_DESC = 'BibelTV live program' _VALID_URL = r'https?://(?:www\.)?bibeltv\.de/livestreams/(?P<id>[\w-]+)' IE_NAME = 'bibeltv:live' _TESTS = [{ 'url': 'https://www.bibeltv.de/livestreams/bibeltv/', 'info_dict': { 'id': 'bibeltv', 'ext': 'mp4', 'title': 're:Bibel TV', 'live_status': 'is_live', 'thumbnail': 'https://streampreview.bibeltv.de/bibeltv.webp', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.bibeltv.de/livestreams/impuls/', 'only_matching': True, }] def _real_extract(self, url): stream_id = self._match_id(url) webpage = self._download_webpage(url, stream_id) stream_data = self._search_json( r'\\"video\\":', webpage, 'bibeltvData', stream_id, transform_source=lambda jstring: js_to_json(jstring.replace('\\"', '"'))) formats, subtitles = self._extract_formats_and_subtitles( traverse_obj(stream_data, ('src', ...)), stream_id, is_live=True) return { 'id': stream_id, 'title': stream_data.get('title'), 'thumbnail': stream_data.get('poster'), 'is_live': True, 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/infoq.py
yt_dlp/extractor/infoq.py
import base64 import urllib.parse from .bokecc import BokeCCBaseIE from ..utils import ( ExtractorError, determine_ext, traverse_obj, update_url_query, ) class InfoQIE(BokeCCBaseIE): _VALID_URL = r'https?://(?:www\.)?infoq\.com/(?:[^/]+/)+(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things', 'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2', 'info_dict': { 'id': 'A-Few-of-My-Favorite-Python-Things', 'ext': 'mp4', 'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.', 'title': 'A Few of My Favorite [Python] Things', }, }, { 'url': 'http://www.infoq.com/fr/presentations/changez-avis-sur-javascript', 'only_matching': True, }, { 'url': 'http://www.infoq.com/cn/presentations/openstack-continued-delivery', 'md5': '4918d0cca1497f2244572caf626687ef', 'info_dict': { 'id': 'openstack-continued-delivery', 'title': 'OpenStack持续交付之路', 'ext': 'flv', 'description': 'md5:308d981fb28fa42f49f9568322c683ff', }, 'skip': 'Sorry, the page you visited does not exist', }, { 'url': 'https://www.infoq.com/presentations/Simple-Made-Easy', 'md5': '0e34642d4d9ef44bf86f66f6399672db', 'info_dict': { 'id': 'Simple-Made-Easy', 'title': 'Simple Made Easy', 'ext': 'mp3', 'description': 'md5:3e0e213a8bbd074796ef89ea35ada25b', }, 'params': { 'format': 'bestaudio', }, }] def _extract_rtmp_video(self, webpage): # The server URL is hardcoded video_url = 'rtmpe://videof.infoq.com/cfx/st/' # Extract video URL encoded_id = self._search_regex( r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id', default=None) real_id = urllib.parse.unquote(base64.b64decode(encoded_id).decode('utf-8')) playpath = 'mp4:' + real_id return [{ 'format_id': 'rtmp_video', 'url': video_url, 'ext': determine_ext(playpath), 'play_path': playpath, }] def _extract_cf_auth(self, webpage): policy = self._search_regex(r'InfoQConstants\.scp\s*=\s*\'([^\']+)\'', webpage, 'policy') signature = self._search_regex(r'InfoQConstants\.scs\s*=\s*\'([^\']+)\'', webpage, 'signature') key_pair_id = self._search_regex(r'InfoQConstants\.sck\s*=\s*\'([^\']+)\'', webpage, 'key-pair-id') return { 'Policy': policy, 'Signature': signature, 'Key-Pair-Id': key_pair_id, } def _extract_http_video(self, webpage): http_video_url = self._search_regex(r'P\.s\s*=\s*\'([^\']+)\'', webpage, 'video URL') http_video_url = update_url_query(http_video_url, self._extract_cf_auth(webpage)) return [{ 'format_id': 'http_video', 'url': http_video_url, 'http_headers': {'Referer': 'https://www.infoq.com/'}, }] def _extract_http_audio(self, webpage, video_id): try: http_audio_url = traverse_obj(self._form_hidden_inputs('mp3Form', webpage), 'filename') except ExtractorError: http_audio_url = None if not http_audio_url: return [] # base URL is found in the Location header in the response returned by # GET https://www.infoq.com/mp3download.action?filename=... when logged in. http_audio_url = urllib.parse.urljoin('http://ress.infoq.com/downloads/mp3downloads/', http_audio_url) http_audio_url = update_url_query(http_audio_url, self._extract_cf_auth(webpage)) # audio file seem to be missing some times even if there is a download link # so probe URL to make sure if not self._is_valid_url(http_audio_url, video_id): return [] return [{ 'format_id': 'http_audio', 'url': http_audio_url, 'vcodec': 'none', }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_title = self._html_extract_title(webpage) video_description = self._html_search_meta('description', webpage, 'description') if '/cn/' in url: # for China videos, HTTP video URL exists but always fails with 403 formats = self._extract_bokecc_formats(webpage, video_id) else: formats = ( self._extract_rtmp_video(webpage) + self._extract_http_video(webpage) + self._extract_http_audio(webpage, video_id)) return { 'id': video_id, 'title': video_title, 'description': video_description, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/faulio.py
yt_dlp/extractor/faulio.py
import re import urllib.parse from .common import InfoExtractor from ..utils import int_or_none, js_to_json, url_or_none from ..utils.traversal import traverse_obj class FaulioBaseIE(InfoExtractor): _DOMAINS = ( 'aloula.sba.sa', 'bahry.com', 'maraya.sba.net.ae', 'sat7plus.org', ) _LANGUAGES = ('ar', 'en', 'fa') _BASE_URL_RE = fr'https?://(?:{"|".join(map(re.escape, _DOMAINS))})/(?:(?:{"|".join(_LANGUAGES)})/)?' def _get_headers(self, url): parsed_url = urllib.parse.urlparse(url) return { 'Referer': url, 'Origin': f'{parsed_url.scheme}://{parsed_url.hostname}', } def _get_api_base(self, url, video_id): webpage = self._download_webpage(url, video_id) config_data = self._search_json( r'window\.__NUXT__\.config=', webpage, 'config', video_id, transform_source=js_to_json) return config_data['public']['TRANSLATIONS_API_URL'] class FaulioIE(FaulioBaseIE): _VALID_URL = fr'{FaulioBaseIE._BASE_URL_RE}(?:episode|media)/(?P<id>[a-zA-Z0-9-]+)' _TESTS = [{ 'url': 'https://aloula.sba.sa/en/episode/29102', 'info_dict': { 'id': 'aloula.faulio.com_29102', 'ext': 'mp4', 'display_id': 'هذا-مكانك-03-004-v-29102', 'title': 'الحلقة 4', 'episode': 'الحلقة 4', 'description': '', 'series': 'هذا مكانك', 'season': 'Season 3', 'season_number': 3, 'episode_number': 4, 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 4855, 'age_limit': 3, }, }, { 'url': 'https://bahry.com/en/media/1191', 'info_dict': { 'id': 'bahry.faulio.com_1191', 'ext': 'mp4', 'display_id': 'Episode-4-1191', 'title': 'Episode 4', 'episode': 'Episode 4', 'description': '', 'series': 'Wild Water', 'season': 'Season 1', 'season_number': 1, 'episode_number': 4, 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1653, 'age_limit': 0, }, }, { 'url': 'https://maraya.sba.net.ae/episode/127735', 'info_dict': { 'id': 'maraya.faulio.com_127735', 'ext': 'mp4', 'display_id': 'عبدالله-الهاجري---عبدالرحمن-المطروشي-127735', 'title': 'عبدالله الهاجري - عبدالرحمن المطروشي', 'episode': 'عبدالله الهاجري - عبدالرحمن المطروشي', 'description': 'md5:53de01face66d3d6303221e5a49388a0', 'series': 'أبناؤنا في الخارج', 'season': 'Season 3', 'season_number': 3, 'episode_number': 7, 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1316, 'age_limit': 0, }, }, { 'url': 'https://sat7plus.org/episode/18165', 'info_dict': { 'id': 'sat7.faulio.com_18165', 'ext': 'mp4', 'display_id': 'ep-13-ADHD-18165', 'title': 'ADHD and creativity', 'episode': 'ADHD and creativity', 'description': '', 'series': 'ADHD Podcast', 'season': 'Season 1', 'season_number': 1, 'episode_number': 13, 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2492, 'age_limit': 0, }, }, { 'url': 'https://aloula.sba.sa/en/episode/0', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) api_base = self._get_api_base(url, video_id) video_info = self._download_json(f'{api_base}/video/{video_id}', video_id, fatal=False) player_info = self._download_json(f'{api_base}/video/{video_id}/player', video_id) headers = self._get_headers(url) formats = [] subtitles = {} if hls_url := traverse_obj(player_info, ('settings', 'protocols', 'hls', {url_or_none})): fmts, subs = self._extract_m3u8_formats_and_subtitles( hls_url, video_id, 'mp4', m3u8_id='hls', fatal=False, headers=headers) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) if mpd_url := traverse_obj(player_info, ('settings', 'protocols', 'dash', {url_or_none})): fmts, subs = self._extract_mpd_formats_and_subtitles( mpd_url, video_id, mpd_id='dash', fatal=False, headers=headers) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return { 'id': f'{urllib.parse.urlparse(api_base).hostname}_{video_id}', **traverse_obj(traverse_obj(video_info, ('blocks', 0)), { 'display_id': ('slug', {str}), 'title': ('title', {str}), 'episode': ('title', {str}), 'description': ('description', {str}), 'series': ('program_title', {str}), 'season_number': ('season_number', {int_or_none}), 'episode_number': ('episode', {int_or_none}), 'thumbnail': ('image', {url_or_none}), 'duration': ('duration', 'total', {int_or_none}), 'age_limit': ('age_rating', {int_or_none}), }), 'formats': formats, 'subtitles': subtitles, 'http_headers': headers, } class FaulioLiveIE(FaulioBaseIE): _VALID_URL = fr'{FaulioBaseIE._BASE_URL_RE}live/(?P<id>[a-zA-Z0-9-]+)' _TESTS = [{ 'url': 'https://aloula.sba.sa/live/saudiatv', 'info_dict': { 'id': 'aloula.faulio.com_saudiatv', 'title': str, 'description': str, 'ext': 'mp4', 'live_status': 'is_live', }, 'params': { 'skip_download': 'Livestream', }, }, { 'url': 'https://bahry.com/live/1', 'info_dict': { 'id': 'bahry.faulio.com_1', 'title': str, 'description': str, 'ext': 'mp4', 'live_status': 'is_live', }, 'params': { 'skip_download': 'Livestream', }, }, { 'url': 'https://maraya.sba.net.ae/live/1', 'info_dict': { 'id': 'maraya.faulio.com_1', 'title': str, 'description': str, 'ext': 'mp4', 'live_status': 'is_live', }, 'params': { 'skip_download': 'Livestream', }, }, { 'url': 'https://sat7plus.org/live/pars', 'info_dict': { 'id': 'sat7.faulio.com_pars', 'title': str, 'description': str, 'ext': 'mp4', 'live_status': 'is_live', }, 'params': { 'skip_download': 'Livestream', }, }, { 'url': 'https://sat7plus.org/fa/live/arabic', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) api_base = self._get_api_base(url, video_id) channel = traverse_obj( self._download_json(f'{api_base}/channels', video_id), (lambda k, v: v['url'] == video_id, any)) headers = self._get_headers(url) formats = [] subtitles = {} if hls_url := traverse_obj(channel, ('streams', 'hls', {url_or_none})): fmts, subs = self._extract_m3u8_formats_and_subtitles( hls_url, video_id, 'mp4', m3u8_id='hls', live=True, fatal=False, headers=headers) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) if mpd_url := traverse_obj(channel, ('streams', 'mpd', {url_or_none})): fmts, subs = self._extract_mpd_formats_and_subtitles( mpd_url, video_id, mpd_id='dash', fatal=False, headers=headers) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return { 'id': f'{urllib.parse.urlparse(api_base).hostname}_{video_id}', **traverse_obj(channel, { 'title': ('title', {str}), 'description': ('description', {str}), }), 'formats': formats, 'subtitles': subtitles, 'http_headers': headers, 'is_live': True, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hollywoodreporter.py
yt_dlp/extractor/hollywoodreporter.py
import functools import re from .common import InfoExtractor from .jwplatform import JWPlatformIE from ..utils import ( ExtractorError, OnDemandPagedList, extract_attributes, get_element_by_class, get_element_html_by_class, ) class HollywoodReporterIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?hollywoodreporter\.com/video/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://www.hollywoodreporter.com/video/chris-pine-michelle-rodriguez-dungeons-dragons-cast-directors-on-what-it-took-to-make-film-sxsw-2023/', 'info_dict': { 'id': 'zH4jZaR5', 'ext': 'mp4', 'title': 'md5:a9a1c073770a32f178955997712c4bd9', 'description': 'The cast and directors of \'Dungeons & Dragons: Honor Among Thieves\' talk about their new film.', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/zH4jZaR5/poster.jpg?width=720', 'upload_date': '20230312', 'timestamp': 1678586423, 'duration': 242.0, }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) data = extract_attributes(get_element_html_by_class('vlanding-video-card__link', webpage) or '') video_id = data['data-video-showcase-trigger'] showcase_type = data['data-video-showcase-type'] if showcase_type == 'jwplayer': return self.url_result(f'jwplatform:{video_id}', JWPlatformIE) elif showcase_type == 'youtube': return self.url_result(video_id, 'Youtube') else: raise ExtractorError(f'Unsupported showcase type "{showcase_type}"') class HollywoodReporterPlaylistIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?hollywoodreporter\.com/vcategory/(?P<slug>[\w-]+)-(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.hollywoodreporter.com/vcategory/heat-vision-breakdown-57822/', 'playlist_mincount': 109, 'info_dict': { 'id': '57822', 'title': 'heat-vision-breakdown', }, }] def _fetch_page(self, slug, pl_id, page): page += 1 webpage = self._download_webpage( f'https://www.hollywoodreporter.com/vcategory/{slug}-{pl_id}/page/{page}/', pl_id, note=f'Downloading playlist page {page}') section = get_element_by_class('video-playlist-river', webpage) or '' for url in re.findall(r'<a[^>]+href="([^"]+)"[^>]+class="c-title__link', section): yield self.url_result(url, HollywoodReporterIE) def _real_extract(self, url): slug, pl_id = self._match_valid_url(url).group('slug', 'id') return self.playlist_result( OnDemandPagedList(functools.partial(self._fetch_page, slug, pl_id), 15), pl_id, slug)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nonktube.py
yt_dlp/extractor/nonktube.py
from .nuevo import NuevoBaseIE class NonkTubeIE(NuevoBaseIE): _VALID_URL = r'https?://(?:www\.)?nonktube\.com/(?:(?:video|embed)/|media/nuevo/embed\.php\?.*?\bid=)(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.nonktube.com/video/118636/sensual-wife-uncensored-fucked-in-hairy-pussy-and-facialized', 'info_dict': { 'id': '118636', 'ext': 'mp4', 'title': 'Sensual Wife Uncensored Fucked In Hairy Pussy And Facialized', 'age_limit': 18, 'duration': 1150.98, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.nonktube.com/embed/118636', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage) info = self._parse_html5_media_entries(url, webpage, video_id)[0] info.update({ 'id': video_id, 'title': title, 'age_limit': 18, }) return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bet.py
yt_dlp/extractor/bet.py
from .mtv import MTVServicesBaseIE class BetIE(MTVServicesBaseIE): _VALID_URL = r'https?://(?:www\.)?bet\.com/(?:video-clips|episodes)/(?P<id>[\da-z]{6})' _TESTS = [{ 'url': 'https://www.bet.com/video-clips/w9mk7v', 'info_dict': { 'id': '3022d121-d191-43fd-b5fb-b2c26f335497', 'ext': 'mp4', 'display_id': 'w9mk7v', 'title': 'New Normal', 'description': 'md5:d7898c124713b4646cecad9d16ff01f3', 'duration': 30.08, 'series': 'Tyler Perry\'s Sistas', 'season': 'Season 0', 'season_number': 0, 'episode': 'Episode 0', 'episode_number': 0, 'timestamp': 1755269073, 'upload_date': '20250815', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.bet.com/episodes/nmce72/tyler-perry-s-sistas-heavy-is-the-crown-season-9-ep-5', 'info_dict': { 'id': '6427562b-3029-11f0-b405-16fff45bc035', 'ext': 'mp4', 'display_id': 'nmce72', 'title': 'Heavy Is the Crown', 'description': 'md5:1ed345d3157a50572d2464afcc7a652a', 'channel': 'BET', 'duration': 2550.0, 'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref', 'series': 'Tyler Perry\'s Sistas', 'season': 'Season 9', 'season_number': 9, 'episode': 'Episode 5', 'episode_number': 5, 'timestamp': 1755165600, 'upload_date': '20250814', 'release_timestamp': 1755129600, 'release_date': '20250814', }, 'params': {'skip_download': 'm3u8'}, 'skip': 'Requires provider sign-in', }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/abcnews.py
yt_dlp/extractor/abcnews.py
from .amp import AMPIE from .common import InfoExtractor from ..utils import ( parse_duration, parse_iso8601, try_get, ) class AbcNewsVideoIE(AMPIE): IE_NAME = 'abcnews:video' _VALID_URL = r'''(?x) https?:// (?: abcnews\.go\.com/ (?: (?:[^/]+/)*video/(?P<display_id>[0-9a-z-]+)-| video/(?:embed|itemfeed)\?.*?\bid= )| fivethirtyeight\.abcnews\.go\.com/video/embed/\d+/ ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'http://abcnews.go.com/ThisWeek/video/week-exclusive-irans-foreign-minister-zarif-20411932', 'info_dict': { 'id': '20411932', 'ext': 'mp4', 'display_id': 'week-exclusive-irans-foreign-minister-zarif', 'title': '\'This Week\' Exclusive: Iran\'s Foreign Minister Zarif', 'description': 'George Stephanopoulos goes one-on-one with Iranian Foreign Minister Dr. Javad Zarif.', 'duration': 180, 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1380454200, 'upload_date': '20130929', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://abcnews.go.com/video/embed?id=46979033', 'only_matching': True, }, { 'url': 'http://abcnews.go.com/2020/video/2020-husband-stands-teacher-jail-student-affairs-26119478', 'only_matching': True, }, { 'url': 'http://abcnews.go.com/video/itemfeed?id=46979033', 'only_matching': True, }, { 'url': 'https://abcnews.go.com/GMA/News/video/history-christmas-story-67894761', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) display_id = mobj.group('display_id') video_id = mobj.group('id') info_dict = self._extract_feed_info( f'http://abcnews.go.com/video/itemfeed?id={video_id}') info_dict.update({ 'id': video_id, 'display_id': display_id, }) return info_dict class AbcNewsIE(InfoExtractor): IE_NAME = 'abcnews' _VALID_URL = r'https?://abcnews\.go\.com/(?:[^/]+/)+(?P<display_id>[0-9a-z-]+)/story\?id=(?P<id>\d+)' _TESTS = [{ # Youtube Embeds 'url': 'https://abcnews.go.com/Entertainment/peter-billingsley-child-actor-christmas-story-hollywood-power/story?id=51286501', 'info_dict': { 'id': '51286501', 'title': "Peter Billingsley: From child actor in 'A Christmas Story' to Hollywood power player", 'description': 'Billingsley went from a child actor to Hollywood power player.', }, 'playlist_count': 5, }, { 'url': 'http://abcnews.go.com/Entertainment/justin-timberlake-performs-stop-feeling-eurovision-2016/story?id=39125818', 'info_dict': { 'id': '38897857', 'ext': 'mp4', 'title': 'Justin Timberlake Drops Hints For Secret Single', 'description': 'Lara Spencer reports the buzziest stories of the day in "GMA" Pop News.', 'upload_date': '20160505', 'timestamp': 1462442280, }, 'params': { # m3u8 download 'skip_download': True, # The embedded YouTube video is blocked due to copyright issues 'playlist_items': '1', }, 'add_ie': ['AbcNewsVideo'], }, { 'url': 'http://abcnews.go.com/Technology/exclusive-apple-ceo-tim-cook-iphone-cracking-software/story?id=37173343', 'only_matching': True, }, { # inline.type == 'video' 'url': 'http://abcnews.go.com/Technology/exclusive-apple-ceo-tim-cook-iphone-cracking-software/story?id=37173343', 'only_matching': True, }] def _real_extract(self, url): story_id = self._match_id(url) webpage = self._download_webpage(url, story_id) story = self._parse_json(self._search_regex( r"window\['__abcnews__'\]\s*=\s*({.+?});", webpage, 'data'), story_id)['page']['content']['story']['everscroll'][0] article_contents = story.get('articleContents') or {} def entries(): featured_video = story.get('featuredVideo') or {} feed = try_get(featured_video, lambda x: x['video']['feed']) if feed: yield { '_type': 'url', 'id': featured_video.get('id'), 'title': featured_video.get('name'), 'url': feed, 'thumbnail': featured_video.get('images'), 'description': featured_video.get('description'), 'timestamp': parse_iso8601(featured_video.get('uploadDate')), 'duration': parse_duration(featured_video.get('duration')), 'ie_key': AbcNewsVideoIE.ie_key(), } for inline in (article_contents.get('inlines') or []): inline_type = inline.get('type') if inline_type == 'iframe': iframe_url = try_get(inline, lambda x: x['attrs']['src']) if iframe_url: yield self.url_result(iframe_url) elif inline_type == 'video': video_id = inline.get('id') if video_id: yield { '_type': 'url', 'id': video_id, 'url': 'http://abcnews.go.com/video/embed?id=' + video_id, 'thumbnail': inline.get('imgSrc') or inline.get('imgDefault'), 'description': inline.get('description'), 'duration': parse_duration(inline.get('duration')), 'ie_key': AbcNewsVideoIE.ie_key(), } return self.playlist_result( entries(), story_id, article_contents.get('headline'), article_contents.get('subHead'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/huya.py
yt_dlp/extractor/huya.py
import base64 import hashlib import random import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, int_or_none, parse_duration, str_or_none, try_get, unescapeHTML, update_url, update_url_query, url_or_none, ) from ..utils.traversal import traverse_obj class HuyaLiveIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.|m\.)?huya\.com/(?!(?:video/play/))(?P<id>[^/#?&]+)(?:\D|$)' IE_NAME = 'huya:live' IE_DESC = '虎牙直播' _TESTS = [{ 'url': 'https://www.huya.com/572329', 'info_dict': { 'id': '572329', 'title': str, 'ext': 'flv', 'description': str, 'is_live': True, 'view_count': int, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.huya.com/xiaoyugame', 'only_matching': True, }] _RESOLUTION = { '蓝光': { 'width': 1920, 'height': 1080, }, '超清': { 'width': 1280, 'height': 720, }, '流畅': { 'width': 800, 'height': 480, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id=video_id) stream_data = self._search_json(r'stream:\s', webpage, 'stream', video_id=video_id, default=None) room_info = try_get(stream_data, lambda x: x['data'][0]['gameLiveInfo']) if not room_info: raise ExtractorError('Can not extract the room info', expected=True) title = room_info.get('roomName') or room_info.get('introduction') or self._html_extract_title(webpage) screen_type = room_info.get('screenType') live_source_type = room_info.get('liveSourceType') stream_info_list = stream_data['data'][0]['gameStreamInfoList'] if not stream_info_list: raise ExtractorError('Video is offline', expected=True) formats = [] for stream_info in stream_info_list: stream_url = stream_info.get('sFlvUrl') if not stream_url: continue stream_name = stream_info.get('sStreamName') re_secret = not screen_type and live_source_type in (0, 8, 13) params = dict(urllib.parse.parse_qsl(unescapeHTML(stream_info['sFlvAntiCode']))) fm, ss = '', '' if re_secret: fm, ss = self.encrypt(params, stream_info, stream_name) for si in stream_data.get('vMultiStreamInfo'): display_name, bitrate = re.fullmatch( r'(.+?)(?:(\d+)M)?', si.get('sDisplayName')).groups() rate = si.get('iBitRate') if rate: params['ratio'] = rate else: params.pop('ratio', None) if bitrate: rate = int(bitrate) * 1000 if re_secret: params['wsSecret'] = hashlib.md5( '_'.join([fm, params['u'], stream_name, ss, params['wsTime']])) formats.append({ 'ext': stream_info.get('sFlvUrlSuffix'), 'format_id': str_or_none(stream_info.get('iLineIndex')), 'tbr': rate, 'url': update_url_query(f'{stream_url}/{stream_name}.{stream_info.get("sFlvUrlSuffix")}', query=params), **self._RESOLUTION.get(display_name, {}), }) return { 'id': video_id, 'title': title, 'formats': formats, 'view_count': room_info.get('totalCount'), 'thumbnail': room_info.get('screenshot'), 'description': room_info.get('contentIntro'), 'http_headers': { 'Origin': 'https://www.huya.com', 'Referer': 'https://www.huya.com/', }, } def encrypt(self, params, stream_info, stream_name): ct = int_or_none(params.get('wsTime'), 16) + random.random() presenter_uid = stream_info['lPresenterUid'] if not stream_name.startswith(str(presenter_uid)): uid = presenter_uid else: uid = int_or_none(ct % 1e7 * 1e6 % 0xffffffff) u1 = uid & 0xffffffff00000000 u2 = uid & 0xffffffff u3 = uid & 0xffffff u = u1 | u2 >> 24 | u3 << 8 params.update({ 'u': str_or_none(u), 'seqid': str_or_none(int_or_none(ct * 1000) + uid), 'ver': '1', 'uuid': int_or_none(ct % 1e7 * 1e6 % 0xffffffff), 't': '100', }) fm = base64.b64decode(params['fm']).decode().split('_', 1)[0] ss = hashlib.md5('|'.join([params['seqid'], params['ctype'], params['t']])) return fm, ss class HuyaVideoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?huya\.com/video/play/(?P<id>\d+)\.html' IE_NAME = 'huya:video' IE_DESC = '虎牙视频' _TESTS = [{ 'url': 'https://www.huya.com/video/play/1002412640.html', 'info_dict': { 'id': '1002412640', 'ext': 'mp4', 'title': '8月3日', 'categories': ['主机游戏'], 'duration': 14.0, 'uploader': '虎牙-ATS欧卡车队青木', 'uploader_id': '1564376151', 'upload_date': '20240803', 'view_count': int, 'comment_count': int, 'like_count': int, 'thumbnail': r're:https?://.+\.jpg', 'timestamp': 1722675433, }, }, { 'url': 'https://www.huya.com/video/play/556054543.html', 'info_dict': { 'id': '556054543', 'ext': 'mp4', 'title': '我不挑事 也不怕事', 'categories': ['英雄联盟'], 'description': 'md5:58184869687d18ce62dc7b4b2ad21201', 'duration': 1864.0, 'uploader': '卡尔', 'uploader_id': '367138632', 'upload_date': '20210811', 'view_count': int, 'comment_count': int, 'like_count': int, 'tags': 'count:4', 'thumbnail': r're:https?://.+\.jpg', 'timestamp': 1628675950, }, }, { # Only m3u8 available 'url': 'https://www.huya.com/video/play/1063345618.html', 'info_dict': { 'id': '1063345618', 'ext': 'mp4', 'title': '峡谷第一中!黑铁上钻石顶级教学对抗elo', 'categories': ['英雄联盟'], 'comment_count': int, 'duration': 21603.0, 'like_count': int, 'thumbnail': r're:https?://.+\.jpg', 'timestamp': 1749668803, 'upload_date': '20250611', 'uploader': '北枫CC', 'uploader_id': '2183525275', 'view_count': int, }, }] def _real_extract(self, url: str): video_id = self._match_id(url) moment = self._download_json( 'https://liveapi.huya.com/moment/getMomentContent', video_id, query={'videoId': video_id})['data']['moment'] formats = [] for definition in traverse_obj(moment, ( 'videoInfo', 'definitions', lambda _, v: url_or_none(v['m3u8']), )): fmts = self._extract_m3u8_formats(definition['m3u8'], video_id, 'mp4', fatal=False) for fmt in fmts: fmt.update(**traverse_obj(definition, { 'filesize': ('size', {int_or_none}), 'format_id': ('defName', {str}), 'height': ('height', {int_or_none}), 'quality': ('definition', {int_or_none}), 'width': ('width', {int_or_none}), })) formats.extend(fmts) return { 'id': video_id, 'formats': formats, **traverse_obj(moment, { 'comment_count': ('commentCount', {int_or_none}), 'description': ('content', {clean_html}, filter), 'like_count': ('favorCount', {int_or_none}), 'timestamp': ('cTime', {int_or_none}), }), **traverse_obj(moment, ('videoInfo', { 'title': ('videoTitle', {str}), 'categories': ('category', {str}, filter, all, filter), 'duration': ('videoDuration', {parse_duration}), 'tags': ('tags', ..., {str}, filter, all, filter), 'thumbnail': (('videoBigCover', 'videoCover'), {url_or_none}, {update_url(query=None)}, any), 'uploader': ('nickName', {str}), 'uploader_id': ('uid', {str_or_none}), 'view_count': ('videoPlayNum', {int_or_none}), })), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/googlepodcasts.py
yt_dlp/extractor/googlepodcasts.py
import json from .common import InfoExtractor from ..utils import ( clean_podcast_url, int_or_none, try_get, urlencode_postdata, ) class GooglePodcastsBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://podcasts\.google\.com/feed/' def _batch_execute(self, func_id, video_id, params): return json.loads(self._download_json( 'https://podcasts.google.com/_/PodcastsUi/data/batchexecute', video_id, data=urlencode_postdata({ 'f.req': json.dumps([[[func_id, json.dumps(params), None, '1']]]), }), transform_source=lambda x: self._search_regex(r'(?s)(\[.+\])', x, 'data'))[0][2]) def _extract_episode(self, episode): return { 'id': episode[4][3], 'title': episode[8], 'url': clean_podcast_url(episode[13]), 'thumbnail': episode[2], 'description': episode[9], 'creator': try_get(episode, lambda x: x[14]), 'timestamp': int_or_none(episode[11]), 'duration': int_or_none(episode[12]), 'series': episode[1], } class GooglePodcastsIE(GooglePodcastsBaseIE): IE_NAME = 'google:podcasts' _VALID_URL = GooglePodcastsBaseIE._VALID_URL_BASE + r'(?P<feed_url>[^/]+)/episode/(?P<id>[^/?&#]+)' _TEST = { 'url': 'https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5ucHIub3JnLzM0NDA5ODUzOS9wb2RjYXN0LnhtbA/episode/MzBlNWRlN2UtOWE4Yy00ODcwLTk2M2MtM2JlMmUyNmViOTRh', 'md5': 'fa56b2ee8bd0703e27e42d4b104c4766', 'info_dict': { 'id': '30e5de7e-9a8c-4870-963c-3be2e26eb94a', 'ext': 'mp3', 'title': 'WWDTM New Year 2021', 'description': 'We say goodbye to 2020 with Christine Baranksi, Doug Jones, Jonna Mendez, and Kellee Edwards.', 'upload_date': '20210102', 'timestamp': 1609606800, 'duration': 2901, 'series': "Wait Wait... Don't Tell Me!", }, } def _real_extract(self, url): b64_feed_url, b64_guid = self._match_valid_url(url).groups() episode = self._batch_execute( 'oNjqVe', b64_guid, [b64_feed_url, b64_guid])[1] return self._extract_episode(episode) class GooglePodcastsFeedIE(GooglePodcastsBaseIE): IE_NAME = 'google:podcasts:feed' _VALID_URL = GooglePodcastsBaseIE._VALID_URL_BASE + r'(?P<id>[^/?&#]+)/?(?:[?#&]|$)' _TEST = { 'url': 'https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5ucHIub3JnLzM0NDA5ODUzOS9wb2RjYXN0LnhtbA', 'info_dict': { 'title': "Wait Wait... Don't Tell Me!", 'description': "NPR's weekly current events quiz. Have a laugh and test your news knowledge while figuring out what's real and what we've made up.", }, 'playlist_mincount': 20, } def _real_extract(self, url): b64_feed_url = self._match_id(url) data = self._batch_execute('ncqJEe', b64_feed_url, [b64_feed_url]) entries = [] for episode in (try_get(data, lambda x: x[1][0]) or []): entries.append(self._extract_episode(episode)) feed = try_get(data, lambda x: x[3]) or [] return self.playlist_result( entries, playlist_title=try_get(feed, lambda x: x[0]), playlist_description=try_get(feed, lambda x: x[2]))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/telecinco.py
yt_dlp/extractor/telecinco.py
import json import re from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, clean_html, extract_attributes, int_or_none, join_nonempty, str_or_none, update_url, url_or_none, ) from ..utils.traversal import traverse_obj class TelecincoBaseIE(InfoExtractor): def _parse_content(self, content, url): video_id = content['dataMediaId'][1] config = self._download_json( content['dataConfig'][1], video_id, 'Downloading config JSON') services = config['services'] caronte = self._download_json(services['caronte'], video_id) if traverse_obj(caronte, ('dls', 0, 'drm', {bool})): self.report_drm(video_id) stream = caronte['dls'][0]['stream'] headers = { 'Referer': url, 'Origin': re.match(r'https?://[^/]+', url).group(0), } geo_headers = {**headers, **self.geo_verification_headers()} try: cdn = self._download_json( caronte['cerbero'], video_id, data=json.dumps({ 'bbx': caronte['bbx'], 'gbx': self._download_json(services['gbx'], video_id)['gbx'], }).encode(), headers={ 'Content-Type': 'application/json', **geo_headers, })['tokens']['1']['cdn'] except ExtractorError as error: if isinstance(error.cause, HTTPError) and error.cause.status == 403: error_code = traverse_obj( self._webpage_read_content(error.cause.response, caronte['cerbero'], video_id, fatal=False), ({json.loads}, 'code', {int})) if error_code in (4036, 4038, 40313): self.raise_geo_restricted(countries=['ES']) raise formats = self._extract_m3u8_formats( update_url(stream, query=cdn), video_id, 'mp4', m3u8_id='hls', headers=geo_headers) return { 'id': video_id, 'title': traverse_obj(config, ('info', 'title', {str})), 'formats': formats, 'thumbnail': (traverse_obj(content, ('dataPoster', 1, {url_or_none})) or traverse_obj(config, 'poster', 'imageUrl', expected_type=url_or_none)), 'duration': traverse_obj(content, ('dataDuration', 1, {int_or_none})), 'http_headers': headers, } class TelecincoIE(TelecincoBaseIE): IE_DESC = 'telecinco.es, cuatro.com and mediaset.es' _VALID_URL = r'https?://(?:www\.)?(?:telecinco\.es|cuatro\.com|mediaset\.es)/(?:[^/]+/)+(?P<id>.+?)\.html' _TESTS = [{ 'url': 'http://www.telecinco.es/robinfood/temporada-01/t01xp14/Bacalao-cocochas-pil-pil_0_1876350223.html', 'info_dict': { 'id': '1876350223', 'title': 'Bacalao con kokotxas al pil-pil', 'description': 'md5:716caf5601e25c3c5ab6605b1ae71529', }, 'playlist': [{ 'md5': '7ee56d665cfd241c0e6d80fd175068b0', 'info_dict': { 'id': 'JEA5ijCnF6p5W08A1rNKn7', 'ext': 'mp4', 'title': 'Con Martín Berasategui, hacer un bacalao al pil-pil es fácil y divertido', 'duration': 662, }, }], 'skip': 'HTTP Error 410 Gone', }, { 'url': 'http://www.cuatro.com/deportes/futbol/barcelona/Leo_Messi-Champions-Roma_2_2052780128.html', 'md5': 'c86fe0d99e3bdb46b7950d38bf6ef12a', 'info_dict': { 'id': 'jn24Od1zGLG4XUZcnUnZB6', 'ext': 'mp4', 'title': '¿Quién es este ex futbolista con el que hablan Leo Messi y Luis Suárez?', 'description': 'md5:a62ecb5f1934fc787107d7b9a2262805', 'duration': 79, }, 'skip': 'Redirects to main page', }, { 'url': 'http://www.mediaset.es/12meses/campanas/doylacara/conlatratanohaytrato/Ayudame-dar-cara-trata-trato_2_1986630220.html', 'md5': '5ce057f43f30b634fbaf0f18c71a140a', 'info_dict': { 'id': 'aywerkD2Sv1vGNqq9b85Q2', 'ext': 'mp4', 'title': '#DOYLACARA. Con la trata no hay trato', 'duration': 50, 'thumbnail': 'https://album.mediaset.es/eimg/2017/11/02/1tlQLO5Q3mtKT24f3EaC24.jpg', }, }, { # video in opening's content 'url': 'https://www.telecinco.es/vivalavida/fiorella-sobrina-edmundo-arrocet-entrevista_18_2907195140.html', 'info_dict': { 'id': '1691427', 'title': 'La surrealista entrevista a la sobrina de Edmundo Arrocet: "No puedes venir aquí y tomarnos por tontos"', 'description': r're:Fiorella, la sobrina de Edmundo Arrocet, concedió .{727}', }, 'playlist': [{ 'md5': 'adb28c37238b675dad0f042292f209a7', 'info_dict': { 'id': 'TpI2EttSDAReWpJ1o0NVh2', 'ext': 'mp4', 'title': 'La surrealista entrevista a la sobrina de Edmundo Arrocet: "No puedes venir aquí y tomarnos por tontos"', 'duration': 1015, 'thumbnail': 'https://album.mediaset.es/eimg/2020/02/29/5opaC37lUhKlZ7FoDhiVC.jpg', }, }], 'params': { 'skip_download': True, }, }, { 'url': 'http://www.telecinco.es/informativos/nacional/Pablo_Iglesias-Informativos_Telecinco-entrevista-Pedro_Piqueras_2_1945155182.html', 'only_matching': True, }, { 'url': 'http://www.telecinco.es/espanasinirmaslejos/Espana-gran-destino-turistico_2_1240605043.html', 'only_matching': True, }, { 'url': 'http://www.cuatro.com/chesterinlove/a-carta/chester-chester_in_love-chester_edu_2_2331030022.html', 'only_matching': True, }] _ASTRO_ISLAND_RE = re.compile(r'<astro-island\b[^>]+>') def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id, impersonate=True) props_list = traverse_obj(webpage, ( {self._ASTRO_ISLAND_RE.findall}, ..., {extract_attributes}, 'props', {json.loads})) description = traverse_obj(props_list, (..., 'leadParagraph', 1, {clean_html}, any, filter)) main_content = traverse_obj(props_list, (..., ('content', ('articleData', 1, 'opening')), 1, {dict}, any)) if traverse_obj(props_list, (..., 'editorialType', 1, {str}, any)) != 'VID': # e.g. 'ART' entries = [] for p in traverse_obj(props_list, (..., 'articleData', 1, ('opening', ('body', 1, ...)), 1, {dict})): type_ = traverse_obj(p, ('type', 1, {str})) content = traverse_obj(p, ('content', 1, {str} if type_ == 'paragraph' else {dict})) if not content: continue if type_ == 'paragraph': description = join_nonempty(description, content, delim='') elif type_ == 'video': entries.append(self._parse_content(content, url)) else: self.report_warning( f'Skipping unsupported content type "{type_}"', display_id, only_once=True) return self.playlist_result( entries, traverse_obj(props_list, (..., 'id', 1, {int}, {str_or_none}, any)) or display_id, traverse_obj(main_content, ('dataTitle', 1, {str})), clean_html(description)) if not main_content: raise ExtractorError('Unable to extract main content from webpage') info = self._parse_content(main_content, url) info['description'] = description return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/expressen.py
yt_dlp/extractor/expressen.py
from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, unescapeHTML, unified_timestamp, ) class ExpressenIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:www\.)?(?:expressen|di)\.se/ (?:(?:tvspelare/video|video-?player/embed)/)? (?:tv|nyheter)/(?:[^/?#]+/)* (?P<id>[^/?#&]+) ''' _EMBED_REGEX = [r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?(?:expressen|di)\.se/(?:tvspelare/video|videoplayer/embed)/tv/.+?)\1'] _TESTS = [{ 'url': 'https://www.expressen.se/tv/ledare/ledarsnack/ledarsnack-om-arbetslosheten-bland-kvinnor-i-speciellt-utsatta-omraden/', 'md5': 'deb2ca62e7b1dcd19fa18ba37523f66e', 'info_dict': { 'id': 'ba90f5a9-78d1-4511-aa02-c177b9c99136', 'display_id': 'ledarsnack-om-arbetslosheten-bland-kvinnor-i-speciellt-utsatta-omraden', 'ext': 'mp4', 'title': 'Ledarsnack: Om arbetslösheten bland kvinnor i speciellt utsatta områden', 'description': 'md5:f38c81ff69f3de4d269bbda012fcbbba', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 788, 'timestamp': 1526639109, 'upload_date': '20180518', }, }, { 'url': 'https://www.expressen.se/tv/kultur/kulturdebatt-med-expressens-karin-olsson/', 'only_matching': True, }, { 'url': 'https://www.expressen.se/tvspelare/video/tv/ditv/ekonomistudion/experterna-har-ar-fragorna-som-avgor-valet/?embed=true&external=true&autoplay=true&startVolume=0&partnerId=di', 'only_matching': True, }, { 'url': 'https://www.expressen.se/videoplayer/embed/tv/ditv/ekonomistudion/experterna-har-ar-fragorna-som-avgor-valet/?embed=true&external=true&autoplay=true&startVolume=0&partnerId=di', 'only_matching': True, }, { 'url': 'https://www.di.se/videoplayer/embed/tv/ditv/borsmorgon/implantica-rusar-70--under-borspremiaren-hor-styrelsemedlemmen/?embed=true&external=true&autoplay=true&startVolume=0&partnerId=di', 'only_matching': True, }, { 'url': 'https://www.expressen.se/video-player/embed/tv/nyheter/ekero-fodda-olof-gustafsson-forvaltar-knarkbaronen-pablo-escobars-namn', 'only_matching': True, }, { 'url': 'https://www.expressen.se/nyheter/efter-egna-telefonbluffen-escobar-stammer-klarna/', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) def extract_data(name): return self._parse_json( self._search_regex( rf'data-{name}=(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'info', group='value'), display_id, transform_source=unescapeHTML) info = extract_data('video-tracking-info') video_id = info['contentId'] data = extract_data('article-data') stream = data['stream'] if determine_ext(stream) == 'm3u8': formats = self._extract_m3u8_formats( stream, display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') else: formats = [{ 'url': stream, }] title = info.get('titleRaw') or data['title'] description = info.get('descriptionRaw') thumbnail = info.get('socialMediaImage') or data.get('image') duration = int_or_none(info.get('videoTotalSecondsDuration') or data.get('totalSecondsDuration')) timestamp = unified_timestamp(info.get('publishDate')) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'timestamp': timestamp, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/samplefocus.py
yt_dlp/extractor/samplefocus.py
import re from .common import InfoExtractor from ..utils import ( extract_attributes, get_element_by_attribute, int_or_none, ) class SampleFocusIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?samplefocus\.com/samples/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://samplefocus.com/samples/lil-peep-sad-emo-guitar', 'md5': '48c8d62d60be467293912e0e619a5120', 'info_dict': { 'id': '40316', 'display_id': 'lil-peep-sad-emo-guitar', 'ext': 'mp3', 'title': 'Lil Peep Sad Emo Guitar', 'thumbnail': r're:^https?://.+\.png', 'license': 'Standard License', 'uploader': 'CapsCtrl', 'uploader_id': 'capsctrl', 'like_count': int, 'comment_count': int, 'categories': ['Samples', 'Guitar', 'Electric guitar'], }, }, { 'url': 'https://samplefocus.com/samples/dababy-style-bass-808', 'only_matching': True, }, { 'url': 'https://samplefocus.com/samples/young-chop-kick', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id, impersonate=True) sample_id = self._search_regex( r'<input[^>]+id=(["\'])sample_id\1[^>]+value=(?:["\'])(?P<id>\d+)', webpage, 'sample id', group='id') title = self._og_search_title(webpage, fatal=False) or self._html_search_regex( r'<h1>(.+?)</h1>', webpage, 'title') mp3_url = self._search_regex( r'<input[^>]+id=(["\'])sample_mp3\1[^>]+value=(["\'])(?P<url>(?:(?!\2).)+)', webpage, 'mp3', fatal=False, group='url') or extract_attributes(self._search_regex( r'<meta[^>]+itemprop=(["\'])contentUrl\1[^>]*>', webpage, 'mp3 url', group=0))['content'] thumbnail = self._og_search_thumbnail(webpage) or self._html_search_regex( r'<img[^>]+class=(?:["\'])waveform responsive-img[^>]+src=(["\'])(?P<url>(?:(?!\1).)+)', webpage, 'mp3', fatal=False, group='url') comments = [] for author_id, author, body in re.findall(r'(?s)<p[^>]+class="comment-author"><a[^>]+href="/users/([^"]+)">([^"]+)</a>.+?<p[^>]+class="comment-body">([^>]+)</p>', webpage): comments.append({ 'author': author, 'author_id': author_id, 'text': body, }) uploader_id = uploader = None mobj = re.search(r'>By <a[^>]+href="/users/([^"]+)"[^>]*>([^<]+)', webpage) if mobj: uploader_id, uploader = mobj.groups() breadcrumb = get_element_by_attribute('typeof', 'BreadcrumbList', webpage) categories = [] if breadcrumb: for _, name in re.findall(r'<span[^>]+property=(["\'])name\1[^>]*>([^<]+)', breadcrumb): categories.append(name) def extract_count(klass): return int_or_none(self._html_search_regex( rf'<span[^>]+class=(?:["\'])?{klass}-count[^>]*>(\d+)', webpage, klass, fatal=False)) return { 'id': sample_id, 'title': title, 'formats': [{ 'url': mp3_url, 'ext': 'mp3', 'vcodec': 'none', 'acodec': 'mp3', 'http_headers': { 'Referer': url, }, }], 'display_id': display_id, 'thumbnail': thumbnail, 'uploader': uploader, 'license': self._html_search_regex( r'<a[^>]+href=(["\'])/license\1[^>]*>(?P<license>[^<]+)<', webpage, 'license', fatal=False, group='license'), 'uploader_id': uploader_id, 'like_count': extract_count(f'sample-{sample_id}-favorites'), 'comment_count': extract_count('comments'), 'comments': comments, 'categories': categories, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/prx.py
yt_dlp/extractor/prx.py
import itertools from .common import InfoExtractor, SearchInfoExtractor from ..utils import ( clean_html, int_or_none, mimetype2ext, str_or_none, traverse_obj, unified_timestamp, url_or_none, urljoin, ) class PRXBaseIE(InfoExtractor): PRX_BASE_URL_RE = r'https?://(?:(?:beta|listen)\.)?prx.org/%s' def _call_api(self, item_id, path, query=None, fatal=True, note='Downloading CMS API JSON'): return self._download_json( urljoin('https://cms.prx.org/api/v1/', path), item_id, query=query, fatal=fatal, note=note) @staticmethod def _get_prx_embed_response(response, section): return traverse_obj(response, ('_embedded', f'prx:{section}')) @staticmethod def _extract_file_link(response): return url_or_none(traverse_obj( response, ('_links', 'enclosure', 'href'), expected_type=str)) @classmethod def _extract_image(cls, image_response): if not isinstance(image_response, dict): return return { 'id': str_or_none(image_response.get('id')), 'filesize': image_response.get('size'), 'width': image_response.get('width'), 'height': image_response.get('height'), 'url': cls._extract_file_link(image_response), } @classmethod def _extract_base_info(cls, response): if not isinstance(response, dict): return item_id = str_or_none(response.get('id')) if not item_id: return thumbnail_dict = cls._extract_image(cls._get_prx_embed_response(response, 'image')) description = ( clean_html(response.get('description')) or response.get('shortDescription')) return { 'id': item_id, 'title': response.get('title') or item_id, 'thumbnails': [thumbnail_dict] if thumbnail_dict else None, 'description': description, 'release_timestamp': unified_timestamp(response.get('releasedAt')), 'timestamp': unified_timestamp(response.get('createdAt')), 'modified_timestamp': unified_timestamp(response.get('updatedAt')), 'duration': int_or_none(response.get('duration')), 'tags': response.get('tags'), 'episode_number': int_or_none(response.get('episodeIdentifier')), 'season_number': int_or_none(response.get('seasonIdentifier')), } @classmethod def _extract_series_info(cls, series_response): base_info = cls._extract_base_info(series_response) if not base_info: return account_info = cls._extract_account_info( cls._get_prx_embed_response(series_response, 'account')) or {} return { **base_info, 'channel_id': account_info.get('channel_id'), 'channel_url': account_info.get('channel_url'), 'channel': account_info.get('channel'), 'series': base_info.get('title'), 'series_id': base_info.get('id'), } @classmethod def _extract_account_info(cls, account_response): base_info = cls._extract_base_info(account_response) if not base_info: return name = account_response.get('name') return { **base_info, 'title': name, 'channel_id': base_info.get('id'), 'channel_url': 'https://beta.prx.org/accounts/{}'.format(base_info.get('id')), 'channel': name, } @classmethod def _extract_story_info(cls, story_response): base_info = cls._extract_base_info(story_response) if not base_info: return series = cls._extract_series_info( cls._get_prx_embed_response(story_response, 'series')) or {} account = cls._extract_account_info( cls._get_prx_embed_response(story_response, 'account')) or {} return { **base_info, 'series': series.get('series'), 'series_id': series.get('series_id'), 'channel_id': account.get('channel_id'), 'channel_url': account.get('channel_url'), 'channel': account.get('channel'), } def _entries(self, item_id, endpoint, entry_func, query=None): """ Extract entries from paginated list API @param entry_func: Function to generate entry from response item """ total = 0 for page in itertools.count(1): response = self._call_api(f'{item_id}: page {page}', endpoint, query={ **(query or {}), 'page': page, 'per': 100, }) items = self._get_prx_embed_response(response, 'items') if not response or not items: break yield from filter(None, map(entry_func, items)) total += response['count'] if total >= response['total']: break def _story_playlist_entry(self, response): story = self._extract_story_info(response) if not story: return story.update({ '_type': 'url', 'url': 'https://beta.prx.org/stories/{}'.format(story['id']), 'ie_key': PRXStoryIE.ie_key(), }) return story def _series_playlist_entry(self, response): series = self._extract_series_info(response) if not series: return series.update({ '_type': 'url', 'url': 'https://beta.prx.org/series/{}'.format(series['id']), 'ie_key': PRXSeriesIE.ie_key(), }) return series class PRXStoryIE(PRXBaseIE): _VALID_URL = PRXBaseIE.PRX_BASE_URL_RE % r'stories/(?P<id>\d+)' _TESTS = [ { # Story with season and episode details 'url': 'https://beta.prx.org/stories/399200', 'info_dict': { 'id': '399200', 'title': 'Fly Me To The Moon', 'description': 'md5:43230168390b95d3322048d8a56bf2bb', 'release_timestamp': 1640250000, 'timestamp': 1640208972, 'modified_timestamp': 1641318202, 'duration': 1004, 'tags': 'count:7', 'episode_number': 8, 'season_number': 5, 'series': 'AirSpace', 'series_id': '38057', 'channel_id': '220986', 'channel_url': 'https://beta.prx.org/accounts/220986', 'channel': 'Air and Space Museum', }, 'playlist': [{ 'info_dict': { 'id': '399200_part1', 'title': 'Fly Me To The Moon', 'description': 'md5:43230168390b95d3322048d8a56bf2bb', 'release_timestamp': 1640250000, 'timestamp': 1640208972, 'modified_timestamp': 1641318202, 'duration': 530, 'tags': 'count:7', 'episode_number': 8, 'season_number': 5, 'series': 'AirSpace', 'series_id': '38057', 'channel_id': '220986', 'channel_url': 'https://beta.prx.org/accounts/220986', 'channel': 'Air and Space Museum', 'ext': 'mp3', 'upload_date': '20211222', 'episode': 'Episode 8', 'release_date': '20211223', 'season': 'Season 5', 'modified_date': '20220104', }, }, { 'info_dict': { 'id': '399200_part2', 'title': 'Fly Me To The Moon', 'description': 'md5:43230168390b95d3322048d8a56bf2bb', 'release_timestamp': 1640250000, 'timestamp': 1640208972, 'modified_timestamp': 1641318202, 'duration': 474, 'tags': 'count:7', 'episode_number': 8, 'season_number': 5, 'series': 'AirSpace', 'series_id': '38057', 'channel_id': '220986', 'channel_url': 'https://beta.prx.org/accounts/220986', 'channel': 'Air and Space Museum', 'ext': 'mp3', 'upload_date': '20211222', 'episode': 'Episode 8', 'release_date': '20211223', 'season': 'Season 5', 'modified_date': '20220104', }, }, ], }, { # Story with only split audio 'url': 'https://beta.prx.org/stories/326414', 'info_dict': { 'id': '326414', 'title': 'Massachusetts v EPA', 'description': 'md5:744fffba08f19f4deab69fa8d49d5816', 'timestamp': 1592509124, 'modified_timestamp': 1592510457, 'duration': 3088, 'tags': 'count:0', 'series': 'Outside/In', 'series_id': '36252', 'channel_id': '206', 'channel_url': 'https://beta.prx.org/accounts/206', 'channel': 'New Hampshire Public Radio', }, 'playlist_count': 4, }, { # Story with single combined audio 'url': 'https://beta.prx.org/stories/400404', 'info_dict': { 'id': '400404', 'title': 'Cafe Chill (Episode 2022-01)', 'thumbnails': 'count:1', 'description': 'md5:9f1b5a3cbd64fb159d08c3baa31f1539', 'timestamp': 1641233952, 'modified_timestamp': 1641234248, 'duration': 3540, 'series': 'Café Chill', 'series_id': '37762', 'channel_id': '5767', 'channel_url': 'https://beta.prx.org/accounts/5767', 'channel': 'C89.5 - KNHC Seattle', 'ext': 'mp3', 'tags': 'count:0', 'thumbnail': r're:https?://cms\.prx\.org/pub/\w+/0/web/story_image/767965/medium/Aurora_Over_Trees\.jpg', 'upload_date': '20220103', 'modified_date': '20220103', }, }, { 'url': 'https://listen.prx.org/stories/399200', 'only_matching': True, }, ] def _extract_audio_pieces(self, audio_response): return [{ 'format_id': str_or_none(piece_response.get('id')), 'format_note': str_or_none(piece_response.get('label')), 'filesize': int_or_none(piece_response.get('size')), 'duration': int_or_none(piece_response.get('duration')), 'ext': mimetype2ext(piece_response.get('contentType')), 'asr': int_or_none(piece_response.get('frequency'), scale=1000), 'abr': int_or_none(piece_response.get('bitRate')), 'url': self._extract_file_link(piece_response), 'vcodec': 'none', } for piece_response in sorted( self._get_prx_embed_response(audio_response, 'items') or [], key=lambda p: int_or_none(p.get('position')))] def _extract_story(self, story_response): info = self._extract_story_info(story_response) if not info: return audio_pieces = self._extract_audio_pieces( self._get_prx_embed_response(story_response, 'audio')) if len(audio_pieces) == 1: return { 'formats': audio_pieces, **info, } entries = [{ **info, 'id': '{}_part{}'.format(info['id'], (idx + 1)), 'formats': [fmt], } for idx, fmt in enumerate(audio_pieces)] return { '_type': 'multi_video', 'entries': entries, **info, } def _real_extract(self, url): story_id = self._match_id(url) response = self._call_api(story_id, f'stories/{story_id}') return self._extract_story(response) class PRXSeriesIE(PRXBaseIE): _VALID_URL = PRXBaseIE.PRX_BASE_URL_RE % r'series/(?P<id>\d+)' _TESTS = [ { 'url': 'https://beta.prx.org/series/36252', 'info_dict': { 'id': '36252', 'title': 'Outside/In', 'thumbnails': 'count:1', 'description': 'md5:a6bedc5f810777bcb09ab30ff9059114', 'timestamp': 1470684964, 'modified_timestamp': 1582308830, 'channel_id': '206', 'channel_url': 'https://beta.prx.org/accounts/206', 'channel': 'New Hampshire Public Radio', 'series': 'Outside/In', 'series_id': '36252', }, 'playlist_mincount': 39, }, { # Blank series 'url': 'https://beta.prx.org/series/25038', 'info_dict': { 'id': '25038', 'title': '25038', 'timestamp': 1207612800, 'modified_timestamp': 1207612800, 'channel_id': '206', 'channel_url': 'https://beta.prx.org/accounts/206', 'channel': 'New Hampshire Public Radio', 'series': '25038', 'series_id': '25038', }, 'playlist_count': 0, }, ] def _extract_series(self, series_response): info = self._extract_series_info(series_response) return { '_type': 'playlist', 'entries': self._entries(info['id'], 'series/{}/stories'.format(info['id']), self._story_playlist_entry), **info, } def _real_extract(self, url): series_id = self._match_id(url) response = self._call_api(series_id, f'series/{series_id}') return self._extract_series(response) class PRXAccountIE(PRXBaseIE): _VALID_URL = PRXBaseIE.PRX_BASE_URL_RE % r'accounts/(?P<id>\d+)' _TESTS = [{ 'url': 'https://beta.prx.org/accounts/206', 'info_dict': { 'id': '206', 'title': 'New Hampshire Public Radio', 'description': 'md5:277f2395301d0aca563c80c70a18ee0a', 'channel_id': '206', 'channel_url': 'https://beta.prx.org/accounts/206', 'channel': 'New Hampshire Public Radio', 'thumbnails': 'count:1', }, 'playlist_mincount': 380, }] def _extract_account(self, account_response): info = self._extract_account_info(account_response) series = self._entries( info['id'], f'accounts/{info["id"]}/series', self._series_playlist_entry) stories = self._entries( info['id'], f'accounts/{info["id"]}/stories', self._story_playlist_entry) return { '_type': 'playlist', 'entries': itertools.chain(series, stories), **info, } def _real_extract(self, url): account_id = self._match_id(url) response = self._call_api(account_id, f'accounts/{account_id}') return self._extract_account(response) class PRXStoriesSearchIE(PRXBaseIE, SearchInfoExtractor): IE_DESC = 'PRX Stories Search' IE_NAME = 'prxstories:search' _SEARCH_KEY = 'prxstories' def _search_results(self, query): yield from self._entries( f'query {query}', 'stories/search', self._story_playlist_entry, query={'q': query}) class PRXSeriesSearchIE(PRXBaseIE, SearchInfoExtractor): IE_DESC = 'PRX Series Search' IE_NAME = 'prxseries:search' _SEARCH_KEY = 'prxseries' def _search_results(self, query): yield from self._entries( f'query {query}', 'series/search', self._series_playlist_entry, query={'q': query})
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/germanupa.py
yt_dlp/extractor/germanupa.py
from .common import InfoExtractor from .vimeo import VimeoIE from ..utils import ( parse_qs, traverse_obj, url_or_none, ) class GermanupaIE(InfoExtractor): IE_DESC = 'germanupa.de' _VALID_URL = r'https?://germanupa\.de/mediathek/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://germanupa.de/mediathek/4-figma-beratung-deine-sprechstunde-fuer-figma-fragen', 'info_dict': { 'id': '909179246', 'title': 'Tutorial: #4 Figma Beratung - Deine Sprechstunde für Figma-Fragen', 'ext': 'mp4', 'uploader': 'German UPA', 'uploader_id': 'germanupa', 'thumbnail': 'https://i.vimeocdn.com/video/1792564420-7415283ccef8bf8702dab8c6b7515555ceeb7a1c11371ffcc133b8e887dbf70e-d_1280', 'uploader_url': 'https://vimeo.com/germanupa', 'duration': 3987, }, 'expected_warnings': ['Failed to parse XML: not well-formed'], 'params': {'skip_download': 'm3u8'}, }, { 'note': 'audio, uses GenericIE', 'url': 'https://germanupa.de/mediathek/live-vom-ux-festival-neuigkeiten-von-figma-jobmarkt-agenturszene-interview-zu-sustainable', 'info_dict': { 'id': '1867346676', 'title': 'Live vom UX Festival: Neuigkeiten von Figma, Jobmarkt, Agenturszene & Interview zu Sustainable UX', 'ext': 'opus', 'timestamp': 1720545088, 'upload_date': '20240709', 'duration': 3910.557, 'like_count': int, 'description': 'md5:db2aed5ff131e177a7b33901e9a8db05', 'uploader': 'German UPA', 'repost_count': int, 'genres': ['Science'], 'license': 'all-rights-reserved', 'uploader_url': 'https://soundcloud.com/user-80097677', 'uploader_id': '471579486', 'view_count': int, 'comment_count': int, 'thumbnail': 'https://i1.sndcdn.com/artworks-oCti2e9GhaZFWBqY-48ybGw-original.jpg', }, }, { 'note': 'Nur für Mitglieder/Just for members', 'url': 'https://germanupa.de/mediathek/ux-festival-2024-usability-tests-und-ai', 'info_dict': { 'id': '986994430', 'title': 'UX Festival 2024 "Usability Tests und AI" von Lennart Weber', 'ext': 'mp4', 'release_date': '20240719', 'uploader_url': 'https://vimeo.com/germanupa', 'timestamp': 1721373980, 'license': 'by-sa', 'like_count': int, 'thumbnail': 'https://i.vimeocdn.com/video/1904187064-2a672630c30f9ad787bd390bff3f51d7506a3e8416763ba6dbf465732b165c5c-d_1280', 'duration': 2146, 'release_timestamp': 1721373980, 'uploader': 'German UPA', 'uploader_id': 'germanupa', 'upload_date': '20240719', 'comment_count': int, }, 'expected_warnings': ['Failed to parse XML: not well-formed'], 'skip': 'login required', }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) param_url = traverse_obj( self._search_regex( r'<iframe[^>]+data-src\s*?=\s*?([\'"])(?P<url>https://germanupa\.de/media/oembed\?url=(?:(?!\1).)+)\1', webpage, 'embedded video', default=None, group='url'), ({parse_qs}, 'url', 0, {url_or_none})) if not param_url: if self._search_regex( r'<div[^>]+class\s*?=\s*?([\'"])(?:(?!\1).)*login-wrapper(?:(?!\1).)*\1', webpage, 'login wrapper', default=None): self.raise_login_required('This video is only available for members') return self.url_result(url, 'Generic') # Fall back to generic to extract audio real_url = param_url.replace('https://vimeo.com/', 'https://player.vimeo.com/video/') return self.url_result(VimeoIE._smuggle_referrer(real_url, url), VimeoIE, video_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gofile.py
yt_dlp/extractor/gofile.py
import hashlib from .common import InfoExtractor from ..utils import ExtractorError, try_get class GofileIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?gofile\.io/d/(?P<id>[^/]+)' _TESTS = [{ 'url': 'https://gofile.io/d/AMZyDw', 'info_dict': { 'id': 'AMZyDw', }, 'playlist_mincount': 2, 'playlist': [{ 'info_dict': { 'id': 'de571ac1-5edc-42e2-8ec2-bdac83ad4a31', 'filesize': 928116, 'ext': 'mp4', 'title': 'nuuh', 'release_timestamp': 1638338704, 'release_date': '20211201', }, }], }, { 'url': 'https://gofile.io/d/is8lKr', 'info_dict': { 'id': 'TMjXd9', 'ext': 'mp4', }, 'playlist_count': 0, 'skip': 'No video/audio found at provided URL.', }, { 'url': 'https://gofile.io/d/TMjXd9', 'info_dict': { 'id': 'TMjXd9', }, 'playlist_count': 1, }, { 'url': 'https://gofile.io/d/gqOtRf', 'info_dict': { 'id': 'gqOtRf', }, 'playlist_mincount': 1, 'params': { 'videopassword': 'password', }, }] _STATIC_TOKEN = '4fd6sg89d7s6' # From https://gofile.io/dist/js/config.js _TOKEN = None def _real_initialize(self): token = self._get_cookies('https://gofile.io/').get('accountToken') if token: self._TOKEN = token.value return account_data = self._download_json( 'https://api.gofile.io/accounts', None, 'Getting a new guest account', data=b'{}') self._TOKEN = account_data['data']['token'] self._set_cookie('.gofile.io', 'accountToken', self._TOKEN) def _entries(self, file_id): query_params = {} if password := self.get_param('videopassword'): query_params['password'] = hashlib.sha256(password.encode()).hexdigest() files = self._download_json( f'https://api.gofile.io/contents/{file_id}', file_id, 'Getting filelist', query=query_params, headers={ 'Authorization': f'Bearer {self._TOKEN}', 'X-Website-Token': self._STATIC_TOKEN, }) status = files['status'] if status == 'error-passwordRequired': raise ExtractorError( 'This video is protected by a password, use the --video-password option', expected=True) elif status != 'ok': raise ExtractorError(f'{self.IE_NAME} said: status {status}', expected=True) found_files = False for file in (try_get(files, lambda x: x['data']['children'], dict) or {}).values(): file_type, file_format = file.get('mimetype').split('/', 1) if file_type not in ('video', 'audio') and file_format != 'vnd.mts': continue found_files = True file_url = file.get('link') if file_url: yield { 'id': file['id'], 'title': file['name'].rsplit('.', 1)[0], 'url': file_url, 'filesize': file.get('size'), 'release_timestamp': file.get('createTime'), } if not found_files: raise ExtractorError('No video/audio found at provided URL.', expected=True) def _real_extract(self, url): file_id = self._match_id(url) return self.playlist_result(self._entries(file_id), playlist_id=file_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/radiozet.py
yt_dlp/extractor/radiozet.py
from .common import InfoExtractor from ..utils import ( strip_or_none, traverse_obj, ) class RadioZetPodcastIE(InfoExtractor): _VALID_URL = r'https?://player\.radiozet\.pl\/Podcasty/.*?/(?P<id>.+)' _TEST = { 'url': 'https://player.radiozet.pl/Podcasty/Nie-Ma-Za-Co/O-przedmiotach-szkolnych-ktore-przydaja-sie-w-zyciu', 'md5': 'e03665c316b4fbc5f6a8f232948bbba3', 'info_dict': { 'id': '42154', 'display_id': 'O-przedmiotach-szkolnych-ktore-przydaja-sie-w-zyciu', 'title': 'O przedmiotach szkolnych, które przydają się w życiu', 'description': 'md5:fa72bed49da334b09e5b2f79851f185c', 'release_timestamp': 1592985480, 'ext': 'mp3', 'thumbnail': r're:^https?://.*\.png$', 'duration': 83, 'series': 'Nie Ma Za Co', 'creator': 'Katarzyna Pakosińska', }, } def _call_api(self, podcast_id, display_id): return self._download_json( f'https://player.radiozet.pl/api/podcasts/getPodcast/(node)/{podcast_id}/(station)/radiozet', display_id) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) podcast_id = self._html_search_regex(r'<div.*?\sid="player".*?\sdata-id=[\'"]([^\'"]+)[\'"]', webpage, 'podcast id') data = self._call_api(podcast_id, display_id)['data'][0] return { 'id': podcast_id, 'display_id': display_id, 'title': strip_or_none(data.get('title')), 'description': strip_or_none(traverse_obj(data, ('program', 'desc'))), 'release_timestamp': data.get('published_date'), 'url': traverse_obj(data, ('player', 'stream')), 'thumbnail': traverse_obj(data, ('program', 'image', 'original')), 'duration': traverse_obj(data, ('player', 'duration')), 'series': strip_or_none(traverse_obj(data, ('program', 'title'))), 'creator': strip_or_none(traverse_obj(data, ('presenter', 0, 'title'))), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/aitube.py
yt_dlp/extractor/aitube.py
from .common import InfoExtractor from ..utils import int_or_none, merge_dicts class AitubeKZVideoIE(InfoExtractor): _VALID_URL = r'https?://aitube\.kz/(?:video|embed/)\?(?:[^\?]+)?id=(?P<id>[\w-]+)' _TESTS = [{ # id paramater as first parameter 'url': 'https://aitube.kz/video?id=9291d29b-c038-49a1-ad42-3da2051d353c&playlistId=d55b1f5f-ef2a-4f23-b646-2a86275b86b7&season=1', 'info_dict': { 'id': '9291d29b-c038-49a1-ad42-3da2051d353c', 'ext': 'mp4', 'duration': 2174.0, 'channel_id': '94962f73-013b-432c-8853-1bd78ca860fe', 'like_count': int, 'channel': 'ASTANA TV', 'comment_count': int, 'view_count': int, 'description': 'Смотреть любимые сериалы и видео, поделиться видео и сериалами с друзьями и близкими', 'thumbnail': 'https://cdn.static02.aitube.kz/kz.aitudala.aitube.staticaccess/files/ddf2a2ff-bee3-409b-b5f2-2a8202bba75b', 'upload_date': '20221102', 'timestamp': 1667370519, 'title': 'Ангел хранитель 1 серия', 'channel_follower_count': int, }, }, { # embed url 'url': 'https://aitube.kz/embed/?id=9291d29b-c038-49a1-ad42-3da2051d353c', 'only_matching': True, }, { # id parameter is not as first paramater 'url': 'https://aitube.kz/video?season=1&id=9291d29b-c038-49a1-ad42-3da2051d353c&playlistId=d55b1f5f-ef2a-4f23-b646-2a86275b86b7', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) nextjs_data = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['videoInfo'] json_ld_data = self._search_json_ld(webpage, video_id) formats, subtitles = self._extract_m3u8_formats_and_subtitles( f'https://api-http.aitube.kz/kz.aitudala.aitube.staticaccess/video/{video_id}/video', video_id) return merge_dicts({ 'id': video_id, 'title': nextjs_data.get('title') or self._html_search_meta(['name', 'og:title'], webpage), 'description': nextjs_data.get('description'), 'formats': formats, 'subtitles': subtitles, 'view_count': (nextjs_data.get('viewCount') or int_or_none(self._html_search_meta('ya:ovs:views_total', webpage))), 'like_count': nextjs_data.get('likeCount'), 'channel': nextjs_data.get('channelTitle'), 'channel_id': nextjs_data.get('channelId'), 'thumbnail': nextjs_data.get('coverUrl'), 'comment_count': nextjs_data.get('commentCount'), 'channel_follower_count': int_or_none(nextjs_data.get('channelSubscriberCount')), }, json_ld_data)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/jiosaavn.py
yt_dlp/extractor/jiosaavn.py
import functools import itertools import math import re from .common import InfoExtractor from ..utils import ( InAdvancePagedList, ISO639Utils, OnDemandPagedList, clean_html, int_or_none, js_to_json, make_archive_id, orderedSet, smuggle_url, unified_strdate, unified_timestamp, unsmuggle_url, url_basename, url_or_none, urlencode_postdata, urljoin, variadic, ) from ..utils.traversal import traverse_obj class JioSaavnBaseIE(InfoExtractor): _URL_BASE_RE = r'https?://(?:www\.)?(?:jio)?saavn\.com' _API_URL = 'https://www.jiosaavn.com/api.php' _VALID_BITRATES = {'16', '32', '64', '128', '320'} @functools.cached_property def requested_bitrates(self): requested_bitrates = self._configuration_arg('bitrate', ['128', '320'], ie_key='JioSaavn') if invalid_bitrates := set(requested_bitrates) - self._VALID_BITRATES: raise ValueError( f'Invalid bitrate(s): {", ".join(invalid_bitrates)}. ' f'Valid bitrates are: {", ".join(sorted(self._VALID_BITRATES, key=int))}') return requested_bitrates def _extract_formats(self, item_data): # Show/episode JSON data has a slightly different structure than song JSON data if media_url := traverse_obj(item_data, ('more_info', 'encrypted_media_url', {str})): item_data.setdefault('encrypted_media_url', media_url) for bitrate in self.requested_bitrates: media_data = self._download_json( self._API_URL, item_data['id'], f'Downloading format info for {bitrate}', fatal=False, data=urlencode_postdata({ '__call': 'song.generateAuthToken', '_format': 'json', 'bitrate': bitrate, 'url': item_data['encrypted_media_url'], })) if not traverse_obj(media_data, ('auth_url', {url_or_none})): self.report_warning(f'Unable to extract format info for {bitrate}') continue ext = media_data.get('type') yield { 'url': media_data['auth_url'], 'ext': 'm4a' if ext == 'mp4' else ext, 'format_id': bitrate, 'abr': int(bitrate), 'vcodec': 'none', } def _call_api(self, type_, token, note='API', params={}): return self._download_json( self._API_URL, token, f'Downloading {note} JSON', f'Unable to download {note} JSON', query={ '__call': 'webapi.get', '_format': 'json', '_marker': '0', 'ctx': 'web6dot0', 'token': token, 'type': type_, **params, }) @staticmethod def _extract_song(song_data, url=None): info = traverse_obj(song_data, { 'id': ('id', {str}), 'title': (('song', 'title'), {clean_html}, any), 'album': ((None, 'more_info'), 'album', {clean_html}, any), 'duration': ((None, 'more_info'), 'duration', {int_or_none}, any), 'channel': ((None, 'more_info'), 'label', {str}, any), 'channel_id': ((None, 'more_info'), 'label_id', {str}, any), 'channel_url': ((None, 'more_info'), 'label_url', {urljoin('https://www.jiosaavn.com/')}, any), 'release_date': ((None, 'more_info'), 'release_date', {unified_strdate}, any), 'release_year': ('year', {int_or_none}), 'thumbnail': ('image', {url_or_none}, {lambda x: re.sub(r'-\d+x\d+\.', '-500x500.', x)}), 'view_count': ('play_count', {int_or_none}), 'language': ('language', {lambda x: ISO639Utils.short2long(x.casefold()) or 'und'}), 'webpage_url': ('perma_url', {url_or_none}), 'artists': ('more_info', 'artistMap', 'primary_artists', ..., 'name', {str}, filter, all), }) if webpage_url := info.get('webpage_url') or url: info['display_id'] = url_basename(webpage_url) info['_old_archive_ids'] = [make_archive_id(JioSaavnSongIE, info['display_id'])] if primary_artists := traverse_obj(song_data, ('primary_artists', {lambda x: x.split(', ') if x else None})): info['artists'].extend(primary_artists) if featured_artists := traverse_obj(song_data, ('featured_artists', {str}, filter)): info['artists'].extend(featured_artists.split(', ')) info['artists'] = orderedSet(info['artists']) or None return info @staticmethod def _extract_episode(episode_data, url=None): info = JioSaavnBaseIE._extract_song(episode_data, url) info.pop('_old_archive_ids', None) info.update(traverse_obj(episode_data, { 'description': ('more_info', 'description', {str}), 'timestamp': ('more_info', 'release_time', {unified_timestamp}), 'series': ('more_info', 'show_title', {str}), 'series_id': ('more_info', 'show_id', {str}), 'season': ('more_info', 'season_title', {str}), 'season_number': ('more_info', 'season_no', {int_or_none}), 'season_id': ('more_info', 'season_id', {str}), 'episode_number': ('more_info', 'episode_number', {int_or_none}), 'cast': ('starring', {lambda x: x.split(', ') if x else None}), })) return info def _extract_jiosaavn_result(self, url, endpoint, response_key, parse_func): url, smuggled_data = unsmuggle_url(url) data = traverse_obj(smuggled_data, ({ 'id': ('id', {str}), 'encrypted_media_url': ('encrypted_media_url', {str}), })) if 'id' in data and 'encrypted_media_url' in data: result = {'id': data['id']} else: # only extract metadata if this is not a url_transparent result data = self._call_api(endpoint, self._match_id(url))[response_key][0] result = parse_func(data, url) result['formats'] = list(self._extract_formats(data)) return result def _yield_items(self, playlist_data, keys=None, parse_func=None): """Subclasses using this method must set _ENTRY_IE""" if parse_func is None: parse_func = self._extract_song for item_data in traverse_obj(playlist_data, ( *variadic(keys, (str, bytes, dict, set)), lambda _, v: v['id'] and v['perma_url'], )): info = parse_func(item_data) url = smuggle_url(info['webpage_url'], traverse_obj(item_data, { 'id': ('id', {str}), 'encrypted_media_url': ((None, 'more_info'), 'encrypted_media_url', {str}, any), })) yield self.url_result(url, self._ENTRY_IE, url_transparent=True, **info) class JioSaavnSongIE(JioSaavnBaseIE): IE_NAME = 'jiosaavn:song' _VALID_URL = JioSaavnBaseIE._URL_BASE_RE + r'(?:/song/[^/?#]+/|/s/song/(?:[^/?#]+/){3})(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.jiosaavn.com/song/leja-re/OQsEfQFVUXk', 'md5': '3b84396d15ed9e083c3106f1fa589c04', 'info_dict': { 'id': 'IcoLuefJ', 'display_id': 'OQsEfQFVUXk', 'ext': 'm4a', 'title': 'Leja Re', 'album': 'Leja Re', 'thumbnail': r're:https?://.+/.+\.jpg', 'duration': 205, 'view_count': int, 'release_year': 2018, 'artists': ['Sandesh Shandilya', 'Dhvani Bhanushali', 'Tanishk Bagchi'], '_old_archive_ids': ['jiosaavnsong OQsEfQFVUXk'], 'channel': 'T-Series', 'language': 'hin', 'channel_id': '34297', 'channel_url': 'https://www.jiosaavn.com/label/t-series-albums/6DLuXO3VoTo_', 'release_date': '20181124', }, }, { 'url': 'https://www.jiosaavn.com/song/chuttamalle/P1FfWjZkQ0Q', 'md5': '96296c58d6ce488a417ef0728fd2d680', 'info_dict': { 'id': 'O94kBTtw', 'display_id': 'P1FfWjZkQ0Q', 'ext': 'm4a', 'title': 'Chuttamalle', 'album': 'Devara Part 1 - Telugu', 'thumbnail': r're:https?://.+/.+\.jpg', 'duration': 222, 'view_count': int, 'release_year': 2024, 'artists': 'count:3', '_old_archive_ids': ['jiosaavnsong P1FfWjZkQ0Q'], 'channel': 'T-Series', 'language': 'tel', 'channel_id': '34297', 'channel_url': 'https://www.jiosaavn.com/label/t-series-albums/6DLuXO3VoTo_', 'release_date': '20240926', }, }, { 'url': 'https://www.saavn.com/s/song/hindi/Saathiya/O-Humdum-Suniyo-Re/KAMiazoCblU', 'only_matching': True, }] def _real_extract(self, url): return self._extract_jiosaavn_result(url, 'song', 'songs', self._extract_song) class JioSaavnShowIE(JioSaavnBaseIE): IE_NAME = 'jiosaavn:show' _VALID_URL = JioSaavnBaseIE._URL_BASE_RE + r'/shows/[^/?#]+/(?P<id>[^/?#]{11,})/?(?:$|[?#])' _TESTS = [{ 'url': 'https://www.jiosaavn.com/shows/non-food-ways-to-boost-your-energy/XFMcKICOCgc_', 'md5': '0733cd254cfe74ef88bea1eaedcf1f4f', 'info_dict': { 'id': 'qqzh3RKZ', 'display_id': 'XFMcKICOCgc_', 'ext': 'mp3', 'title': 'Non-Food Ways To Boost Your Energy', 'description': 'md5:26e7129644b5c6aada32b8851c3997c8', 'episode': 'Episode 1', 'timestamp': 1640563200, 'series': 'Holistic Lifestyle With Neha Ranglani', 'series_id': '52397', 'season': 'Holistic Lifestyle With Neha Ranglani', 'season_number': 1, 'season_id': '61273', 'thumbnail': r're:https?://.+/.+\.jpg', 'duration': 311, 'view_count': int, 'release_year': 2021, 'language': 'eng', 'channel': 'Saavn OG', 'channel_id': '1953876', 'episode_number': 1, 'upload_date': '20211227', 'release_date': '20211227', }, }, { 'url': 'https://www.jiosaavn.com/shows/himesh-reshammiya/Kr8fmfSN4vo_', 'only_matching': True, }] def _real_extract(self, url): return self._extract_jiosaavn_result(url, 'episode', 'episodes', self._extract_episode) class JioSaavnAlbumIE(JioSaavnBaseIE): IE_NAME = 'jiosaavn:album' _VALID_URL = JioSaavnBaseIE._URL_BASE_RE + r'/album/[^/?#]+/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.jiosaavn.com/album/96/buIOjYZDrNA_', 'info_dict': { 'id': 'buIOjYZDrNA_', 'title': '96', }, 'playlist_count': 10, }] _ENTRY_IE = JioSaavnSongIE def _real_extract(self, url): display_id = self._match_id(url) album_data = self._call_api('album', display_id) return self.playlist_result( self._yield_items(album_data, 'songs'), display_id, traverse_obj(album_data, ('title', {str}))) class JioSaavnPlaylistIE(JioSaavnBaseIE): IE_NAME = 'jiosaavn:playlist' _VALID_URL = JioSaavnBaseIE._URL_BASE_RE + r'/(?:s/playlist/(?:[^/?#]+/){2}|featured/[^/?#]+/)(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.jiosaavn.com/s/playlist/2279fbe391defa793ad7076929a2f5c9/mood-english/LlJ8ZWT1ibN5084vKHRj2Q__', 'info_dict': { 'id': 'LlJ8ZWT1ibN5084vKHRj2Q__', 'title': 'Mood English', }, 'playlist_mincount': 301, }, { 'url': 'https://www.jiosaavn.com/s/playlist/2279fbe391defa793ad7076929a2f5c9/mood-hindi/DVR,pFUOwyXqIp77B1JF,A__', 'info_dict': { 'id': 'DVR,pFUOwyXqIp77B1JF,A__', 'title': 'Mood Hindi', }, 'playlist_mincount': 750, }, { 'url': 'https://www.jiosaavn.com/featured/taaza-tunes/Me5RridRfDk_', 'info_dict': { 'id': 'Me5RridRfDk_', 'title': 'Taaza Tunes', }, 'playlist_mincount': 50, }] _ENTRY_IE = JioSaavnSongIE _PAGE_SIZE = 50 def _fetch_page(self, token, page): return self._call_api( 'playlist', token, f'playlist page {page}', {'p': page, 'n': self._PAGE_SIZE}) def _entries(self, token, first_page_data, page): page_data = first_page_data if not page else self._fetch_page(token, page + 1) yield from self._yield_items(page_data, 'songs') def _real_extract(self, url): display_id = self._match_id(url) playlist_data = self._fetch_page(display_id, 1) total_pages = math.ceil(int(playlist_data['list_count']) / self._PAGE_SIZE) return self.playlist_result(InAdvancePagedList( functools.partial(self._entries, display_id, playlist_data), total_pages, self._PAGE_SIZE), display_id, traverse_obj(playlist_data, ('listname', {str}))) class JioSaavnShowPlaylistIE(JioSaavnBaseIE): IE_NAME = 'jiosaavn:show:playlist' _VALID_URL = JioSaavnBaseIE._URL_BASE_RE + r'/shows/(?P<show>[^#/?]+)/(?P<season>\d+)/[^/?#]+' _TESTS = [{ 'url': 'https://www.jiosaavn.com/shows/talking-music/1/PjReFP-Sguk_', 'info_dict': { 'id': 'talking-music-1', 'title': 'Talking Music', }, 'playlist_mincount': 11, }] _ENTRY_IE = JioSaavnShowIE _PAGE_SIZE = 10 def _fetch_page(self, show_id, season_id, page): return self._call_api('show', show_id, f'show page {page}', { 'p': page, '__call': 'show.getAllEpisodes', 'show_id': show_id, 'season_number': season_id, 'api_version': '4', 'sort_order': 'desc', }) def _entries(self, show_id, season_id, page): page_data = self._fetch_page(show_id, season_id, page + 1) yield from self._yield_items(page_data, keys=None, parse_func=self._extract_episode) def _real_extract(self, url): show_slug, season_id = self._match_valid_url(url).group('show', 'season') playlist_id = f'{show_slug}-{season_id}' webpage = self._download_webpage(url, playlist_id) show_info = self._search_json( r'window\.__INITIAL_DATA__\s*=', webpage, 'initial data', playlist_id, transform_source=js_to_json)['showView'] show_id = show_info['current_id'] entries = OnDemandPagedList(functools.partial(self._entries, show_id, season_id), self._PAGE_SIZE) return self.playlist_result( entries, playlist_id, traverse_obj(show_info, ('show', 'title', 'text', {str}))) class JioSaavnArtistIE(JioSaavnBaseIE): IE_NAME = 'jiosaavn:artist' _VALID_URL = JioSaavnBaseIE._URL_BASE_RE + r'/artist/[^/?#]+/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.jiosaavn.com/artist/krsna-songs/rYLBEve2z3U_', 'info_dict': { 'id': 'rYLBEve2z3U_', 'title': 'KR$NA', }, 'playlist_mincount': 38, }, { 'url': 'https://www.jiosaavn.com/artist/sanam-puri-songs/SkNEv3qRhDE_', 'info_dict': { 'id': 'SkNEv3qRhDE_', 'title': 'Sanam Puri', }, 'playlist_mincount': 51, }] _ENTRY_IE = JioSaavnSongIE _PAGE_SIZE = 50 def _fetch_page(self, artist_id, page): return self._call_api('artist', artist_id, f'artist page {page + 1}', { 'p': page, 'n_song': self._PAGE_SIZE, 'n_album': self._PAGE_SIZE, 'sub_type': '', 'includeMetaTags': '', 'api_version': '4', 'category': 'alphabetical', 'sort_order': 'asc', }) def _entries(self, artist_id, first_page): for page in itertools.count(): playlist_data = first_page if not page else self._fetch_page(artist_id, page) if not traverse_obj(playlist_data, ('topSongs', ..., {dict})): break yield from self._yield_items(playlist_data, 'topSongs') def _real_extract(self, url): artist_id = self._match_id(url) first_page = self._fetch_page(artist_id, 0) return self.playlist_result( self._entries(artist_id, first_page), artist_id, traverse_obj(first_page, ('name', {str})))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/fczenit.py
yt_dlp/extractor/fczenit.py
from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, ) class FczenitIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?fc-zenit\.ru/video/(?P<id>[0-9]+)' _TEST = { 'url': 'http://fc-zenit.ru/video/41044/', 'md5': '0e3fab421b455e970fa1aa3891e57df0', 'info_dict': { 'id': '41044', 'ext': 'mp4', 'title': 'Так пишется история: казанский разгром ЦСКА на «Зенит-ТВ»', 'timestamp': 1462283735, 'upload_date': '20160503', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) msi_id = self._search_regex( r"(?s)config\s*=\s*{.+?video_id\s*:\s*'([^']+)'", webpage, 'msi id') msi_data = self._download_json( 'http://player.fc-zenit.ru/msi/video', msi_id, query={ 'video': msi_id, })['data'] title = msi_data['name'] formats = [{ 'format_id': q.get('label'), 'url': q['url'], 'height': int_or_none(q.get('label')), } for q in msi_data['qualities'] if q.get('url')] tags = [tag['label'] for tag in msi_data.get('tags', []) if tag.get('label')] return { 'id': video_id, 'title': title, 'thumbnail': msi_data.get('preview'), 'formats': formats, 'duration': float_or_none(msi_data.get('duration')), 'timestamp': int_or_none(msi_data.get('date')), 'tags': tags, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dangalplay.py
yt_dlp/extractor/dangalplay.py
import hashlib import json import re import time from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ExtractorError, int_or_none, join_nonempty, url_or_none from ..utils.traversal import traverse_obj class DangalPlayBaseIE(InfoExtractor): _NETRC_MACHINE = 'dangalplay' _REGION = 'IN' _OTV_USER_ID = None _LOGIN_HINT = ( 'Pass credentials as -u "token" -p "USER_ID" ' '(where USER_ID is the value of "otv_user_id" in your browser local storage). ' 'Your login region can be optionally suffixed to the username as @REGION ' '(where REGION is the two-letter "region" code found in your browser local storage), ' 'e.g.: -u "token@IN" -p "USER_ID"') _API_BASE = 'https://ottapi.dangalplay.com' _AUTH_TOKEN = 'jqeGWxRKK7FK5zEk3xCM' # from https://www.dangalplay.com/main.48ad19e24eb46acccef3.js _SECRET_KEY = 'f53d31a4377e4ef31fa0' # same as above def _perform_login(self, username, password): if self._OTV_USER_ID: return mobj = re.fullmatch(r'token(?:@(?P<region>[A-Z]{2}))?', username) if not mobj or not re.fullmatch(r'[\da-f]{32}', password): raise ExtractorError(self._LOGIN_HINT, expected=True) if region := mobj.group('region'): self._REGION = region self.write_debug(f'Setting login region to "{self._REGION}"') self._OTV_USER_ID = password def _real_initialize(self): if not self._OTV_USER_ID: self.raise_login_required(f'Login required. {self._LOGIN_HINT}', method=None) def _extract_episode_info(self, metadata, episode_slug, series_slug): return { 'display_id': episode_slug, 'episode_number': int_or_none(self._search_regex( r'ep-(?:number-)?(\d+)', episode_slug, 'episode number', default=None)), 'season_number': int_or_none(self._search_regex( r'season-(\d+)', series_slug, 'season number', default='1')), 'series': series_slug, **traverse_obj(metadata, { 'id': ('content_id', {str}), 'title': ('display_title', {str}), 'episode': ('title', {str}), 'series': ('show_name', {str}, filter), 'series_id': ('catalog_id', {str}), 'duration': ('duration', {int_or_none}), 'release_timestamp': ('release_date_uts', {int_or_none}), }), } def _call_api(self, path, display_id, note='Downloading JSON metadata', fatal=True, query={}): return self._download_json( f'{self._API_BASE}/{path}', display_id, note, fatal=fatal, headers={'Accept': 'application/json'}, query={ 'auth_token': self._AUTH_TOKEN, 'region': self._REGION, **query, }) class DangalPlayIE(DangalPlayBaseIE): IE_NAME = 'dangalplay' _VALID_URL = r'https?://(?:www\.)?dangalplay.com/shows/(?P<series>[^/?#]+)/(?P<id>(?!episodes)[^/?#]+)/?(?:$|[?#])' _TESTS = [{ 'url': 'https://www.dangalplay.com/shows/kitani-mohabbat-hai-season-2/kitani-mohabbat-hai-season-2-ep-number-01', 'info_dict': { 'id': '647c61dc1e7171310dcd49b4', 'ext': 'mp4', 'release_timestamp': 1262304000, 'episode_number': 1, 'episode': 'EP 1 | KITANI MOHABBAT HAI SEASON 2', 'series': 'kitani-mohabbat-hai-season-2', 'season_number': 2, 'title': 'EP 1 | KITANI MOHABBAT HAI SEASON 2', 'release_date': '20100101', 'duration': 2325, 'season': 'Season 2', 'display_id': 'kitani-mohabbat-hai-season-2-ep-number-01', 'series_id': '645c9ea41e717158ca574966', }, }, { 'url': 'https://www.dangalplay.com/shows/milke-bhi-hum-na-mile/milke-bhi-hum-na-mile-ep-number-01', 'info_dict': { 'id': '65d31d9ba73b9c3abd14a7f3', 'ext': 'mp4', 'episode': 'EP 1 | MILKE BHI HUM NA MILE', 'release_timestamp': 1708367411, 'episode_number': 1, 'season': 'Season 1', 'title': 'EP 1 | MILKE BHI HUM NA MILE', 'duration': 156048, 'release_date': '20240219', 'season_number': 1, 'series': 'MILKE BHI HUM NA MILE', 'series_id': '645c9ea41e717158ca574966', 'display_id': 'milke-bhi-hum-na-mile-ep-number-01', }, }] def _generate_api_data(self, data): catalog_id = data['catalog_id'] content_id = data['content_id'] timestamp = str(int(time.time())) unhashed = ''.join((catalog_id, content_id, self._OTV_USER_ID, timestamp, self._SECRET_KEY)) return json.dumps({ 'catalog_id': catalog_id, 'content_id': content_id, 'category': '', 'region': self._REGION, 'auth_token': self._AUTH_TOKEN, 'id': self._OTV_USER_ID, 'md5': hashlib.md5(unhashed.encode()).hexdigest(), 'ts': timestamp, }, separators=(',', ':')).encode() def _real_extract(self, url): series_slug, episode_slug = self._match_valid_url(url).group('series', 'id') metadata = self._call_api( f'catalogs/shows/{series_slug}/episodes/{episode_slug}.gzip', episode_slug, query={'item_language': ''})['data'] try: details = self._download_json( f'{self._API_BASE}/v2/users/get_all_details.gzip', episode_slug, 'Downloading playback details JSON', headers={ 'Accept': 'application/json', 'Content-Type': 'application/json', }, data=self._generate_api_data(metadata))['data'] except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 422: error_info = traverse_obj(e.cause.response.read().decode(), ({json.loads}, 'error', {dict})) or {} error_code = error_info.get('code') if error_code == '1016': self.raise_login_required( f'Your token has expired or is invalid. {self._LOGIN_HINT}', method=None) elif error_code == '4028': self.raise_login_required( f'Your login region is unspecified or incorrect. {self._LOGIN_HINT}', method=None) raise ExtractorError(join_nonempty(error_code, error_info.get('message'), delim=': ')) raise m3u8_url = traverse_obj(details, ( ('adaptive_url', ('adaptive_urls', 'hd', 'hls', ..., 'playback_url')), {url_or_none}, any)) formats, subtitles = self._extract_m3u8_formats_and_subtitles(m3u8_url, episode_slug, 'mp4') return { 'formats': formats, 'subtitles': subtitles, **self._extract_episode_info(metadata, episode_slug, series_slug), } class DangalPlaySeasonIE(DangalPlayBaseIE): IE_NAME = 'dangalplay:season' _VALID_URL = r'https?://(?:www\.)?dangalplay.com/shows/(?P<id>[^/?#]+)(?:/(?P<sub>ep-[^/?#]+)/episodes)?/?(?:$|[?#])' _TESTS = [{ 'url': 'https://www.dangalplay.com/shows/kitani-mohabbat-hai-season-1', 'playlist_mincount': 170, 'info_dict': { 'id': 'kitani-mohabbat-hai-season-1', }, }, { 'url': 'https://www.dangalplay.com/shows/kitani-mohabbat-hai-season-1/ep-01-30-1/episodes', 'playlist_count': 30, 'info_dict': { 'id': 'kitani-mohabbat-hai-season-1-ep-01-30-1', }, }, { # 1 season only, series page is season page 'url': 'https://www.dangalplay.com/shows/milke-bhi-hum-na-mile', 'playlist_mincount': 15, 'info_dict': { 'id': 'milke-bhi-hum-na-mile', }, }] def _entries(self, subcategories, series_slug): for subcategory in subcategories: data = self._call_api( f'catalogs/shows/items/{series_slug}/subcategories/{subcategory}/episodes.gzip', series_slug, f'Downloading episodes JSON for {subcategory}', fatal=False, query={ 'order_by': 'asc', 'status': 'published', }) for ep in traverse_obj(data, ('data', 'items', lambda _, v: v['friendly_id'])): episode_slug = ep['friendly_id'] yield self.url_result( f'https://www.dangalplay.com/shows/{series_slug}/{episode_slug}', DangalPlayIE, **self._extract_episode_info(ep, episode_slug, series_slug)) def _real_extract(self, url): series_slug, subcategory = self._match_valid_url(url).group('id', 'sub') subcategories = [subcategory] if subcategory else traverse_obj( self._call_api( f'catalogs/shows/items/{series_slug}.gzip', series_slug, 'Downloading season info JSON', query={'item_language': ''}), ('data', 'subcategories', ..., 'friendly_id', {str})) return self.playlist_result( self._entries(subcategories, series_slug), join_nonempty(series_slug, subcategory))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/skyit.py
yt_dlp/extractor/skyit.py
import urllib.parse from .common import InfoExtractor from ..utils import ( clean_html, dict_get, int_or_none, parse_duration, unified_timestamp, url_or_none, urljoin, ) from ..utils.traversal import traverse_obj class SkyItBaseIE(InfoExtractor): _GEO_BYPASS = False _DOMAIN = 'sky' _PLAYER_TMPL = 'https://player.sky.it/player/external.html?id=%s&domain=%s' # http://static.sky.it/static/skyplayer/conf.json _TOKEN_MAP = { 'cielo': 'Hh9O7M8ks5yi6nSROL7bKYz933rdf3GhwZlTLMgvy4Q', 'hotclub': 'kW020K2jq2lk2eKRJD2vWEg832ncx2EivZlTLQput2C', 'mtv8': 'A5Nn9GGb326CI7vP5e27d7E4PIaQjota', 'salesforce': 'C6D585FD1615272C98DE38235F38BD86', 'sitocommerciale': 'VJwfFuSGnLKnd9Phe9y96WkXgYDCguPMJ2dLhGMb2RE', 'sky': 'F96WlOd8yoFmLQgiqv6fNQRvHZcsWk5jDaYnDvhbiJk', 'skyarte': 'LWk29hfiU39NNdq87ePeRach3nzTSV20o0lTv2001Cd', 'theupfront': 'PRSGmDMsg6QMGc04Obpoy7Vsbn7i2Whp', } def _player_url_result(self, video_id): return self.url_result( self._PLAYER_TMPL % (video_id, self._DOMAIN), SkyItPlayerIE.ie_key(), video_id) def _parse_video(self, video, video_id): is_live = video.get('type') == 'live' hls_url = video.get(('streaming' if is_live else 'hls') + '_url') if not hls_url and video.get('geoblock' if is_live else 'geob'): self.raise_geo_restricted(countries=['IT']) formats = self._extract_m3u8_formats(hls_url, video_id, 'mp4') return { 'id': video_id, 'title': video.get('title'), 'formats': formats, 'thumbnail': dict_get(video, ('video_still', 'video_still_medium', 'thumb')), 'description': video.get('short_desc') or None, 'timestamp': unified_timestamp(video.get('create_date')), 'duration': int_or_none(video.get('duration_sec')) or parse_duration(video.get('duration')), 'is_live': is_live, } class SkyItPlayerIE(SkyItBaseIE): IE_NAME = 'player.sky.it' _VALID_URL = r'https?://player\.sky\.it/player/(?:external|social)\.html\?.*?\bid=(?P<id>\d+)' def _real_extract(self, url): video_id = self._match_id(url) domain = urllib.parse.parse_qs(urllib.parse.urlparse( url).query).get('domain', [None])[0] token = dict_get(self._TOKEN_MAP, (domain, 'sky')) video = self._download_json( 'https://apid.sky.it/vdp/v1/getVideoData', video_id, query={ 'caller': 'sky', 'id': video_id, 'token': token, }, headers=self.geo_verification_headers()) return self._parse_video(video, video_id) class SkyItVideoIE(SkyItBaseIE): IE_NAME = 'video.sky.it' _VALID_URL = r'https?://(?:masterchef|video|xfactor)\.sky\.it(?:/[^/]+)*/video/[0-9a-z-]+-(?P<id>\d+)' _TESTS = [{ 'url': 'https://video.sky.it/news/mondo/video/uomo-ucciso-da-uno-squalo-in-australia-631227', 'md5': '5b858a62d9ffe2ab77b397553024184a', 'info_dict': { 'id': '631227', 'ext': 'mp4', 'title': 'Uomo ucciso da uno squalo in Australia', 'timestamp': 1606036192, 'upload_date': '20201122', 'duration': 26, 'thumbnail': 'https://video.sky.it/captures/thumbs/631227/631227_thumb_880x494.jpg', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://xfactor.sky.it/video/x-factor-2020-replay-audizioni-1-615820', 'only_matching': True, }, { 'url': 'https://masterchef.sky.it/video/masterchef-9-cosa-e-successo-nella-prima-puntata-562831', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) return self._player_url_result(video_id) class SkyItVideoLiveIE(SkyItBaseIE): IE_NAME = 'video.sky.it:live' _VALID_URL = r'https?://video\.sky\.it/diretta/(?P<id>[^/?&#]+)' _TEST = { 'url': 'https://video.sky.it/diretta/tg24', 'info_dict': { 'id': '1', 'ext': 'mp4', 'title': r're:Diretta TG24 \d{4}-\d{2}-\d{2} \d{2}:\d{2}', 'description': r're:(?:Clicca play e )?[Gg]uarda la diretta streaming di SkyTg24, segui con Sky tutti gli appuntamenti e gli speciali di Tg24\.', 'live_status': 'is_live', }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) asset_id = str(self._search_nextjs_data(webpage, display_id)['props']['initialState']['livePage']['content']['asset_id']) livestream = self._download_json( 'https://apid.sky.it/vdp/v1/getLivestream', asset_id, query={'id': asset_id}) return self._parse_video(livestream, asset_id) class SkyItIE(SkyItBaseIE): IE_NAME = 'sky.it' _VALID_URL = r'https?://(?:sport|tg24)\.sky\.it(?:/[^/]+)*/\d{4}/\d{2}/\d{2}/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://sport.sky.it/calcio/serie-a/2022/11/03/brozovic-inter-news', 'info_dict': { 'id': '789222', 'ext': 'mp4', 'title': 'Brozovic con il gruppo: verso convocazione per Juve-Inter', 'upload_date': '20221103', 'timestamp': 1667484130, 'duration': 22, 'thumbnail': 'https://videoplatform.sky.it/still/2022/11/03/1667480526353_brozovic_videostill_1.jpg', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://tg24.sky.it/mondo/2020/11/22/australia-squalo-uccide-uomo', 'md5': 'fe5c91e59a84a3437eaa0bca6e134ccd', 'info_dict': { 'id': '631227', 'ext': 'mp4', 'title': 'Uomo ucciso da uno squalo in Australia', 'timestamp': 1606036192, 'upload_date': '20201122', 'duration': 26, 'thumbnail': 'https://video.sky.it/captures/thumbs/631227/631227_thumb_880x494.jpg', }, 'params': {'skip_download': 'm3u8'}, }] _VIDEO_ID_REGEX = r'data-videoid="(\d+)"' def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._search_regex( self._VIDEO_ID_REGEX, webpage, 'video id') return self._player_url_result(video_id) class SkyItArteIE(SkyItIE): # XXX: Do not subclass from concrete IE IE_NAME = 'arte.sky.it' _VALID_URL = r'https?://arte\.sky\.it/video/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://arte.sky.it/video/oliviero-toscani-torino-galleria-mazzoleni-788962', 'md5': '515aee97b87d7a018b6c80727d3e7e17', 'info_dict': { 'id': '788962', 'ext': 'mp4', 'title': 'La fotografia di Oliviero Toscani conquista Torino', 'upload_date': '20221102', 'timestamp': 1667399996, 'duration': 12, 'thumbnail': 'https://videoplatform.sky.it/still/2022/11/02/1667396388552_oliviero-toscani-torino-galleria-mazzoleni_videostill_1.jpg', }, 'params': {'skip_download': 'm3u8'}, }] _DOMAIN = 'skyarte' _VIDEO_ID_REGEX = r'"embedUrl"\s*:\s*"(?:https:)?//player\.sky\.it/player/external\.html\?[^"]*\bid=(\d+)' class CieloTVItIE(SkyItIE): # XXX: Do not subclass from concrete IE IE_NAME = 'cielotv.it' _VALID_URL = r'https?://(?:www\.)?cielotv\.it/video/(?P<id>[^.]+)\.html' _TESTS = [{ 'url': 'https://www.cielotv.it/video/Il-lunedi-e-sempre-un-dramma.html', 'md5': 'c4deed77552ba901c2a0d9258320304b', 'info_dict': { 'id': '499240', 'ext': 'mp4', 'title': 'Il lunedì è sempre un dramma', 'upload_date': '20190329', 'timestamp': 1553862178, 'duration': 30, 'thumbnail': 'https://videoplatform.sky.it/still/2019/03/29/1553858575610_lunedi_dramma_mant_videostill_1.jpg', }, 'params': {'skip_download': 'm3u8'}, }] _DOMAIN = 'cielo' _VIDEO_ID_REGEX = r'videoId\s*=\s*"(\d+)"' class TV8ItIE(SkyItVideoIE): # XXX: Do not subclass from concrete IE IE_NAME = 'tv8.it' _VALID_URL = r'https?://(?:www\.)?tv8\.it/(?:show)?video/(?:[0-9a-z-]+-)?(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.tv8.it/video/ogni-mattina-ucciso-asino-di-andrea-lo-cicero-630529', 'md5': '9ab906a3f75ea342ed928442f9dabd21', 'info_dict': { 'id': '630529', 'ext': 'mp4', 'title': 'Ogni mattina - Ucciso asino di Andrea Lo Cicero', 'timestamp': 1605721374, 'upload_date': '20201118', 'duration': 114, 'thumbnail': 'https://videoplatform.sky.it/still/2020/11/18/1605717753954_ogni-mattina-ucciso-asino-di-andrea-lo-cicero_videostill_1.jpg', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.tv8.it/video/964361', 'md5': '1e58e807154658a16edc29e45be38107', 'info_dict': { 'id': '964361', 'ext': 'mp4', 'title': 'GialappaShow - S.4 Ep.2', 'description': 'md5:60bb4ff5af18bbeeaedabc1de5f9e1e2', 'duration': 8030, 'thumbnail': 'https://videoplatform.sky.it/captures/494/2024/11/06/964361/964361_1730888412914_thumb_494.jpg', 'timestamp': 1730821499, 'upload_date': '20241105', }, }] _DOMAIN = 'mtv8' class TV8ItLiveIE(SkyItBaseIE): IE_NAME = 'tv8.it:live' IE_DESC = 'TV8 Live' _VALID_URL = r'https?://(?:www\.)?tv8\.it/streaming' _TESTS = [{ 'url': 'https://tv8.it/streaming', 'info_dict': { 'id': 'tv8', 'ext': 'mp4', 'title': str, 'description': str, 'is_live': True, 'live_status': 'is_live', }, }] def _real_extract(self, url): video_id = 'tv8' livestream = self._download_json( 'https://apid.sky.it/vdp/v1/getLivestream', video_id, 'Downloading manifest JSON', query={'id': '7'}) metadata = self._download_json('https://tv8.it/api/getStreaming', video_id, fatal=False) return { **self._parse_video(livestream, video_id), **traverse_obj(metadata, ('info', { 'title': ('title', 'text', {str}), 'description': ('description', 'html', {clean_html}), })), } class TV8ItPlaylistIE(InfoExtractor): IE_NAME = 'tv8.it:playlist' IE_DESC = 'TV8 Playlist' _VALID_URL = r'https?://(?:www\.)?tv8\.it/(?!video)[^/#?]+/(?P<id>[^/#?]+)' _TESTS = [{ 'url': 'https://tv8.it/intrattenimento/tv8-gialappas-night', 'playlist_mincount': 32, 'info_dict': { 'id': 'tv8-gialappas-night', 'title': 'Tv8 Gialappa\'s Night', 'description': 'md5:c876039d487d9cf40229b768872718ed', 'thumbnail': r're:https://static\.sky\.it/.+\.(png|jpe?g|webp)', }, }, { 'url': 'https://tv8.it/sport/uefa-europa-league', 'playlist_mincount': 11, 'info_dict': { 'id': 'uefa-europa-league', 'title': 'UEFA Europa League', 'description': 'md5:9ab1832b7a8b1705b1f590e13a36bc6a', 'thumbnail': r're:https://static\.sky\.it/.+\.(png|jpe?g|webp)', }, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) data = self._search_nextjs_data(webpage, playlist_id)['props']['pageProps']['data'] entries = [self.url_result( urljoin('https://tv8.it', card['href']), ie=TV8ItIE, **traverse_obj(card, { 'description': ('extraData', 'videoDesc', {str}), 'id': ('extraData', 'asset_id', {str}), 'thumbnail': ('image', 'src', {url_or_none}), 'title': ('title', 'typography', 'text', {str}), })) for card in traverse_obj(data, ('lastContent', 'cards', lambda _, v: v['href']))] return self.playlist_result(entries, playlist_id, **traverse_obj(data, ('card', 'desktop', { 'description': ('description', 'html', {clean_html}), 'thumbnail': ('image', 'src', {url_or_none}), 'title': ('title', 'text', {str}), })))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nate.py
yt_dlp/extractor/nate.py
import itertools from .common import InfoExtractor from ..utils import ( int_or_none, str_or_none, traverse_obj, unified_strdate, ) class NateIE(InfoExtractor): _VALID_URL = r'https?://tv\.nate\.com/clip/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://tv.nate.com/clip/1848976', 'info_dict': { 'id': '1848976', 'ext': 'mp4', 'title': '[결승 오프닝 타이틀] 2018 LCK 서머 스플릿 결승전 kt Rolster VS Griffin', 'description': 'md5:e1b79a7dcf0d8d586443f11366f50e6f', 'thumbnail': r're:^https?://.*\.jpg', 'upload_date': '20180908', 'age_limit': 15, 'duration': 73, 'uploader': '2018 LCK 서머 스플릿(롤챔스)', 'channel': '2018 LCK 서머 스플릿(롤챔스)', 'channel_id': '3606', 'uploader_id': '3606', 'tags': 'count:59', }, 'params': {'skip_download': True}, }, { 'url': 'https://tv.nate.com/clip/4300566', 'info_dict': { 'id': '4300566', 'ext': 'mp4', 'title': '[심쿵엔딩] 이준호x이세영, 서로를 기억하며 끌어안는 두 사람!💕, MBC 211204 방송', 'description': 'md5:be1653502d9c13ce344ddf7828e089fa', 'thumbnail': r're:^https?://.*\.jpg', 'upload_date': '20211204', 'age_limit': 15, 'duration': 201, 'uploader': '옷소매 붉은 끝동', 'channel': '옷소매 붉은 끝동', 'channel_id': '27987', 'uploader_id': '27987', 'tags': 'count:20', }, 'params': {'skip_download': True}, }] _QUALITY = { '36': 2160, '35': 1080, '34': 720, '33': 480, '32': 360, '31': 270, } def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json(f'https://tv.nate.com/api/v1/clip/{video_id}', video_id) formats = [{ 'format_id': f_url[-2:], 'url': f_url, 'height': self._QUALITY.get(f_url[-2:]), 'quality': int_or_none(f_url[-2:]), } for f_url in video_data.get('smcUriList') or []] return { 'id': video_id, 'title': video_data.get('clipTitle'), 'description': video_data.get('synopsis'), 'thumbnail': video_data.get('contentImg'), 'upload_date': unified_strdate(traverse_obj(video_data, 'broadDate', 'regDate')), 'age_limit': video_data.get('targetAge'), 'duration': video_data.get('playTime'), 'formats': formats, 'uploader': video_data.get('programTitle'), 'channel': video_data.get('programTitle'), 'channel_id': str_or_none(video_data.get('programSeq')), 'uploader_id': str_or_none(video_data.get('programSeq')), 'tags': video_data['hashTag'].split(',') if video_data.get('hashTag') else None, } class NateProgramIE(InfoExtractor): _VALID_URL = r'https?://tv\.nate\.com/program/clips/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://tv.nate.com/program/clips/27987', 'playlist_mincount': 191, 'info_dict': { 'id': '27987', }, }, { 'url': 'https://tv.nate.com/program/clips/3606', 'playlist_mincount': 15, 'info_dict': { 'id': '3606', }, }] def _entries(self, playlist_id): for page_num in itertools.count(1): program_data = self._download_json( f'https://tv.nate.com/api/v1/program/{playlist_id}/clip/ranking?size=20&page={page_num}', playlist_id, note=f'Downloading page {page_num}') for clip in program_data.get('content') or []: clip_id = clip.get('clipSeq') if clip_id: yield self.url_result( f'https://tv.nate.com/clip/{clip_id}', NateIE, playlist_id) if program_data.get('last'): break def _real_extract(self, url): playlist_id = self._match_id(url) return self.playlist_result(self._entries(playlist_id), playlist_id=playlist_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/detik.py
yt_dlp/extractor/detik.py
from .common import InfoExtractor from ..utils import int_or_none, merge_dicts, try_call, url_basename class DetikEmbedIE(InfoExtractor): _VALID_URL = False _WEBPAGE_TESTS = [{ # cnn embed 'url': 'https://www.cnnindonesia.com/embed/video/846189', 'info_dict': { 'id': '846189', 'ext': 'mp4', 'description': 'md5:ece7b003b3ee7d81c6a5cfede7d5397d', 'thumbnail': r're:https?://akcdn\.detik\.net\.id/visual/2022/09/11/thumbnail-video-1_169.jpeg', 'title': 'Video CNN Indonesia - VIDEO: Momen Charles Disambut Meriah usai Dilantik jadi Raja Inggris', 'age_limit': 0, 'tags': ['raja charles', ' raja charles iii', ' ratu elizabeth', ' ratu elizabeth meninggal dunia', ' raja inggris', ' inggris'], 'release_timestamp': 1662869995, 'release_date': '20220911', 'uploader': 'REUTERS', }, }, { # 20.detik 'url': 'https://20.detik.com/otobuzz/20220704-220704093/mulai-rp-10-jutaan-ini-skema-kredit-mitsubishi-pajero-sport', 'info_dict': { 'display_id': 'mulai-rp-10-jutaan-ini-skema-kredit-mitsubishi-pajero-sport', 'id': '220704093', 'ext': 'mp4', 'description': 'md5:9b2257341b6f375cdcf90106146d5ffb', 'thumbnail': r're:https?://cdnv\.detik\.com/videoservice/AdminTV/2022/07/04/5d6187e402ec4a91877755a5886ff5b6-20220704161859-0s.jpg', 'title': 'Mulai Rp 10 Jutaan! Ini Skema Kredit Mitsubishi Pajero Sport', 'timestamp': 1656951521, 'upload_date': '20220704', 'duration': 83.0, 'tags': ['cicilan mobil', 'mitsubishi pajero sport', 'mitsubishi', 'pajero sport'], 'release_timestamp': 1656926321, 'release_date': '20220704', 'age_limit': 0, 'uploader': 'Ridwan Arifin ', # TODO: strip trailling whitespace at uploader }, }, { # pasangmata.detik 'url': 'https://pasangmata.detik.com/contribution/366649', 'info_dict': { 'id': '366649', 'ext': 'mp4', 'title': 'Saling Dorong Aparat dan Pendemo di Aksi Tolak Kenaikan BBM', 'description': 'md5:7a6580876c8381c454679e028620bea7', 'age_limit': 0, 'tags': 'count:17', 'thumbnail': 'https://akcdn.detik.net.id/community/data/media/thumbs-pasangmata/2022/09/08/366649-16626229351533009620.mp4-03.jpg', }, }, { # insertlive embed 'url': 'https://www.insertlive.com/embed/video/290482', 'info_dict': { 'id': '290482', 'ext': 'mp4', 'release_timestamp': 1663063704, 'thumbnail': 'https://akcdn.detik.net.id/visual/2022/09/13/leonardo-dicaprio_169.png?w=600&q=90', 'age_limit': 0, 'description': 'Aktor Leonardo DiCaprio memang baru saja putus dari kekasihnya yang bernama Camilla Morrone.', 'release_date': '20220913', 'title': 'Diincar Leonardo DiCaprio, Gigi Hadid Ngaku Tertarik Tapi Belum Cinta', 'tags': ['leonardo dicaprio', ' gigi hadid', ' hollywood'], 'uploader': '!nsertlive', }, }, { # beautynesia embed 'url': 'https://www.beautynesia.id/embed/video/261636', 'info_dict': { 'id': '261636', 'ext': 'mp4', 'age_limit': 0, 'release_timestamp': 1662375600, 'description': 'Menurut ramalan astrologi, tiga zodiak ini bakal hoki sepanjang September 2022.', 'title': '3 Zodiak Paling Beruntung Selama September 2022', 'release_date': '20220905', 'tags': ['zodiac update', ' zodiak', ' ramalan bintang', ' zodiak beruntung 2022', ' zodiak hoki september 2022', ' zodiak beruntung september 2022'], 'thumbnail': 'https://akcdn.detik.net.id/visual/2022/09/05/3-zodiak-paling-beruntung-selama-september-2022_169.jpeg?w=600&q=90', 'uploader': 'amh', }, }, { # cnbcindonesia embed 'url': 'https://www.cnbcindonesia.com/embed/video/371839', 'info_dict': { 'id': '371839', 'ext': 'mp4', 'title': 'Puluhan Pejabat Rusia Tuntut Putin Mundur', 'tags': ['putin'], 'age_limit': 0, 'thumbnail': 'https://awsimages.detik.net.id/visual/2022/09/13/cnbc-indonesia-tv-3_169.png?w=600&q=80', 'description': 'md5:8b9111e37555fcd95fe549a9b4ae6fdc', }, }, { # detik shortlink (we can get it from https://dtk.id/?<url>) 'url': 'https://dtk.id/NkISKr', 'info_dict': { 'id': '220914049', 'ext': 'mp4', 'release_timestamp': 1663114488, 'uploader': 'Tim 20Detik', 'title': 'Pakar Bicara soal Tim Khusus Jokowi dan Mereka yang Pro ke Bjorka', 'age_limit': 0, 'thumbnail': 'https://cdnv.detik.com/videoservice/AdminTV/2022/09/14/f15cae71d7b640c58e75b254ecbb1ce1-20220914071613-0s.jpg?w=400&q=80', 'display_id': 'pakar-bicara-soal-tim-khusus-jokowi-dan-mereka-yang-pro-ke-bjorka', 'upload_date': '20220914', 'release_date': '20220914', 'description': 'md5:5eb03225f7ee40207dd3a1e18a73f1ff', 'timestamp': 1663139688, 'duration': 213.0, 'tags': ['hacker bjorka', 'bjorka', 'hacker bjorka bocorkan data rahasia presiden jokowi', 'jokowi'], }, }] def _extract_from_webpage(self, url, webpage): player_type, video_data = self._search_regex( r'<script\s*[^>]+src="https?://(aws)?cdn\.detik\.net\.id/(?P<type>flowplayer|detikVideo)[^>]+>\s*(?P<video_data>{[^}]+})', webpage, 'playerjs', group=('type', 'video_data'), default=(None, '')) if not player_type: return display_id, extra_info_dict = url_basename(url), {} if player_type == 'flowplayer': video_json_data = self._parse_json(video_data.replace('\'', '"'), display_id) video_url = video_json_data['videoUrl'] extra_info_dict = { 'id': self._search_regex(r'identifier\s*:\s*\'([^\']+)', webpage, 'identifier'), 'thumbnail': video_json_data.get('imageUrl'), } elif player_type == 'detikVideo': video_url = self._search_regex( r'videoUrl\s*:\s*[\'"]?([^"\']+)', video_data, 'videoUrl') extra_info_dict = { 'id': self._html_search_meta(['video_id', 'dtk:video_id'], webpage), 'thumbnail': self._search_regex(r'imageUrl\s*:\s*[\'"]?([^"\']+)', video_data, 'videoUrl'), 'duration': int_or_none(self._html_search_meta('duration', webpage, fatal=False, default=None)), 'release_timestamp': int_or_none(self._html_search_meta('dtk:publishdateunix', webpage, fatal=False, default=None), 1000), 'timestamp': int_or_none(self._html_search_meta('dtk:createdateunix', webpage, fatal=False, default=None), 1000), 'uploader': self._search_regex( r'([^-]+)', self._html_search_meta('dtk:author', webpage, default='').strip(), 'uploader', default=None), } formats, subtitles = self._extract_m3u8_formats_and_subtitles(video_url, display_id) json_ld_data = self._search_json_ld(webpage, display_id, default={}) yield merge_dicts(json_ld_data, extra_info_dict, { 'display_id': display_id, 'title': self._html_search_meta(['og:title', 'originalTitle'], webpage) or self._html_extract_title(webpage), 'description': self._html_search_meta(['og:description', 'twitter:description', 'description'], webpage), 'formats': formats, 'subtitles': subtitles, 'tags': try_call(lambda: self._html_search_meta( ['keywords', 'keyword', 'dtk:keywords'], webpage).split(',')), })
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/noice.py
yt_dlp/extractor/noice.py
from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, int_or_none, parse_iso8601, traverse_obj, variadic, ) class NoicePodcastIE(InfoExtractor): _VALID_URL = r'https?://open\.noice\.id/content/(?P<id>[a-fA-F0-9-]+)' _TESTS = [{ 'url': 'https://open.noice.id/content/7694bb04-ff0f-40fa-a60b-5b39f29584b2', 'info_dict': { 'id': '7694bb04-ff0f-40fa-a60b-5b39f29584b2', 'ext': 'm4a', 'season': 'Season 1', 'description': 'md5:58d1274e6857b6fbbecf47075885380d', 'release_date': '20221115', 'timestamp': 1668496642, 'season_number': 1, 'upload_date': '20221115', 'release_timestamp': 1668496642, 'title': 'Eps 1. Belajar dari Wishnutama: Kreatif Bukan Followers! (bersama Wishnutama)', 'modified_date': '20221121', 'categories': ['Bisnis dan Keuangan'], 'duration': 3567, 'modified_timestamp': 1669030647, 'thumbnail': 'https://images.noiceid.cc/catalog/content-1668496302560', 'channel_id': '9dab1024-5b92-4265-ae1c-63da87359832', 'like_count': int, 'channel': 'Noice Space Talks', 'comment_count': int, 'dislike_count': int, 'channel_follower_count': int, }, }, { 'url': 'https://open.noice.id/content/222134e4-99f2-456f-b8a2-b8be404bf063', 'info_dict': { 'id': '222134e4-99f2-456f-b8a2-b8be404bf063', 'ext': 'm4a', 'release_timestamp': 1653488220, 'description': 'md5:35074f6190cef52b05dd133bb2ef460e', 'upload_date': '20220525', 'timestamp': 1653460637, 'release_date': '20220525', 'thumbnail': 'https://images.noiceid.cc/catalog/content-1653460337625', 'title': 'Eps 1: Dijodohin Sama Anak Pak RT', 'modified_timestamp': 1669030647, 'season_number': 1, 'modified_date': '20221121', 'categories': ['Cerita dan Drama'], 'duration': 1830, 'season': 'Season 1', 'channel_id': '60193f6b-d24d-4b23-913b-ceed5a731e74', 'dislike_count': int, 'like_count': int, 'comment_count': int, 'channel': 'Dear Jerome', 'channel_follower_count': int, }, }] def _get_formats_and_subtitles(self, media_url, video_id): formats, subtitles = [], {} for url in variadic(media_url): ext = determine_ext(url) if ext == 'm3u8': fmts, subs = self._extract_m3u8_formats_and_subtitles(url, video_id) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) else: formats.append({ 'url': url, 'ext': 'mp3', 'vcodec': 'none', 'acodec': 'mp3', }) return formats, subtitles def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) nextjs_data = self._search_nextjs_data(webpage, display_id)['props']['pageProps']['contentDetails'] media_url_list = traverse_obj(nextjs_data, (('rawContentUrl', 'url'), )) formats, subtitles = self._get_formats_and_subtitles(media_url_list, display_id) return { 'id': nextjs_data.get('id') or display_id, 'title': nextjs_data.get('title') or self._html_search_meta('og:title', webpage), 'formats': formats, 'subtitles': subtitles, 'description': (nextjs_data.get('description') or clean_html(nextjs_data.get('htmlDescription')) or self._html_search_meta(['description', 'og:description'], webpage)), 'thumbnail': nextjs_data.get('image') or self._html_search_meta('og:image', webpage), 'timestamp': parse_iso8601(nextjs_data.get('createdAt')), 'release_timestamp': parse_iso8601(nextjs_data.get('publishedAt')), 'modified_timestamp': parse_iso8601( nextjs_data.get('updatedAt') or self._html_search_meta('og:updated_time', webpage)), 'duration': int_or_none(nextjs_data.get('duration')), 'categories': traverse_obj(nextjs_data, ('genres', ..., 'name')), 'season': nextjs_data.get('seasonName'), 'season_number': int_or_none(nextjs_data.get('seasonNumber')), 'channel': traverse_obj(nextjs_data, ('catalog', 'title')), 'channel_id': traverse_obj(nextjs_data, ('catalog', 'id'), 'catalogId'), **traverse_obj(nextjs_data, ('meta', 'aggregations', { 'like_count': 'likes', 'dislike_count': 'dislikes', 'comment_count': 'comments', 'channel_follower_count': 'followers', })), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mgtv.py
yt_dlp/extractor/mgtv.py
import base64 import time import uuid from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, int_or_none, parse_resolution, traverse_obj, try_get, url_or_none, urljoin, ) class MGTVIE(InfoExtractor): _VALID_URL = r'https?://(?:w(?:ww)?\.)?mgtv\.com/[bv]/(?:[^/]+/)*(?P<id>\d+)\.html' IE_DESC = '芒果TV' IE_NAME = 'MangoTV' _TESTS = [{ 'url': 'http://www.mgtv.com/v/1/290525/f/3116640.html', 'info_dict': { 'id': '3116640', 'ext': 'mp4', 'title': '我是歌手 第四季', 'description': '我是歌手第四季双年巅峰会', 'duration': 7461, 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://w.mgtv.com/b/427837/15588271.html', 'info_dict': { 'id': '15588271', 'ext': 'mp4', 'title': '春日迟迟再出发 沉浸版第1期:陆莹结婚半年查出肾炎被离婚 吴雅婷把一半票根退给前夫', 'description': 'md5:a7a05a05b1aa87bd50cae619b19bbca6', 'thumbnail': r're:^https?://.+\.jpg', 'duration': 4026, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://w.mgtv.com/b/333652/7329822.html', 'info_dict': { 'id': '7329822', 'ext': 'mp4', 'title': '拜托,请你爱我', 'description': 'md5:cd81be6499bafe32e4d143abd822bf9c', 'thumbnail': r're:^https?://.+\.jpg', 'duration': 2656, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://w.mgtv.com/b/427837/15591647.html', 'only_matching': True, }, { 'url': 'https://w.mgtv.com/b/388252/15634192.html?fpa=33318&fpos=4&lastp=ch_home', 'only_matching': True, }, { 'url': 'http://www.mgtv.com/b/301817/3826653.html', 'only_matching': True, }, { 'url': 'https://w.mgtv.com/b/301817/3826653.html', 'only_matching': True, }] _RESOLUTIONS = { '标清': ('480p', '854x480'), '高清': ('540p', '960x540'), '超清': ('720p', '1280x720'), '蓝光': ('1080p', '1920x1080'), } def _real_extract(self, url): video_id = self._match_id(url) tk2 = base64.urlsafe_b64encode( f'did={uuid.uuid4()}|pno=1030|ver=0.3.0301|clit={int(time.time())}'.encode())[::-1] try: api_data = self._download_json( 'https://pcweb.api.mgtv.com/player/video', video_id, query={ 'tk2': tk2, 'video_id': video_id, 'type': 'pch5', }, headers=self.geo_verification_headers())['data'] except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 401: error = self._parse_json(e.cause.response.read().decode(), None) if error.get('code') == 40005: self.raise_geo_restricted(countries=self._GEO_COUNTRIES) raise ExtractorError(error['msg'], expected=True) raise stream_data = self._download_json( 'https://pcweb.api.mgtv.com/player/getSource', video_id, query={ 'tk2': tk2, 'pm2': api_data['atc']['pm2'], 'video_id': video_id, 'type': 'pch5', 'src': 'intelmgtv', }, headers=self.geo_verification_headers())['data'] stream_domain = traverse_obj(stream_data, ('stream_domain', ..., {url_or_none}), get_all=False) formats = [] for idx, stream in enumerate(traverse_obj(stream_data, ('stream', lambda _, v: v['url']))): stream_name = traverse_obj(stream, 'name', 'standardName', 'barName', expected_type=str) resolution = traverse_obj( self._RESOLUTIONS, (stream_name, 1 if stream.get('scale') == '16:9' else 0)) format_url = traverse_obj(self._download_json( urljoin(stream_domain, stream['url']), video_id, fatal=False, note=f'Downloading video info for format {resolution or stream_name}'), ('info', {url_or_none})) if not format_url: continue tbr = int_or_none(stream.get('filebitrate') or self._search_regex( r'_(\d+)_mp4/', format_url, 'tbr', default=None)) formats.append({ 'format_id': str(tbr or idx), 'url': format_url, 'ext': 'mp4', 'tbr': tbr, 'vcodec': stream.get('videoFormat'), 'acodec': stream.get('audioFormat'), **parse_resolution(resolution), 'protocol': 'm3u8_native', 'http_headers': { 'Referer': url, }, 'format_note': stream_name, }) return { 'id': video_id, 'formats': formats, **traverse_obj(api_data, ('info', { 'title': ('title', {str.strip}), 'description': ('desc', {str}), 'duration': ('duration', {int_or_none}), 'thumbnail': ('thumb', {url_or_none}), })), 'subtitles': self.extract_subtitles(video_id, stream_domain), } def _get_subtitles(self, video_id, domain): info = self._download_json(f'https://pcweb.api.mgtv.com/video/title?videoId={video_id}', video_id, fatal=False) or {} subtitles = {} for sub in try_get(info, lambda x: x['data']['title']) or []: url_sub = sub.get('url') if not url_sub: continue locale = sub.get('captionSimpleName') or 'en' sub = self._download_json(f'{domain}{url_sub}', video_id, fatal=False, note=f'Download subtitle for locale {sub.get("name")} ({locale})') or {} sub_url = url_or_none(sub.get('info')) if not sub_url: continue subtitles.setdefault(locale.lower(), []).append({ 'url': sub_url, 'name': sub.get('name'), 'ext': 'srt', }) return subtitles
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bitmovin.py
yt_dlp/extractor/bitmovin.py
import re from .common import InfoExtractor from ..utils.traversal import traverse_obj class BitmovinIE(InfoExtractor): _VALID_URL = r'https?://streams\.bitmovin\.com/(?P<id>\w+)' _EMBED_REGEX = [r'<iframe\b[^>]+\bsrc=["\'](?P<url>(?:https?:)?//streams\.bitmovin\.com/(?P<id>\w+)[^"\']+)'] _TESTS = [{ 'url': 'https://streams.bitmovin.com/cqkl1t5giv3lrce7pjbg/embed', 'info_dict': { 'id': 'cqkl1t5giv3lrce7pjbg', 'ext': 'mp4', 'title': 'Developing Osteopathic Residents as Faculty', 'thumbnail': 'https://streams.bitmovin.com/cqkl1t5giv3lrce7pjbg/poster', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://streams.bitmovin.com/cgl9rh94uvs51rqc8jhg/share', 'info_dict': { 'id': 'cgl9rh94uvs51rqc8jhg', 'ext': 'mp4', 'title': 'Big Buck Bunny (Streams Docs)', 'thumbnail': 'https://streams.bitmovin.com/cgl9rh94uvs51rqc8jhg/poster', }, 'params': {'skip_download': 'm3u8'}, }] _WEBPAGE_TESTS = [{ # bitmovin-stream web component 'url': 'https://www.institutionalinvestor.com/article/2bsw1in1l9k68mp9kritc/video-war-stories-over-board-games/best-case-i-get-fired-war-stories', 'info_dict': { 'id': 'cuiumeil6g115lc4li3g', 'ext': 'mp4', 'title': '[media] War Stories over Board Games: “Best Case: I Get Fired” ', 'thumbnail': 'https://streams.bitmovin.com/cuiumeil6g115lc4li3g/poster', }, 'params': {'skip_download': 'm3u8'}, }, { # iframe embed 'url': 'https://www.clearblueionizer.com/en/pool-ionizers/mineral-pool-vs-saltwater-pool/', 'info_dict': { 'id': 'cvpvfsm1pf7itg7cfvtg', 'ext': 'mp4', 'title': 'Pool Ionizer vs. Salt Chlorinator', 'thumbnail': 'https://streams.bitmovin.com/cvpvfsm1pf7itg7cfvtg/poster', }, 'params': {'skip_download': 'm3u8'}, }] @classmethod def _extract_embed_urls(cls, url, webpage): yield from super()._extract_embed_urls(url, webpage) for stream_id in re.findall(r'<bitmovin-stream\b[^>]*\bstream-id=["\'](?P<id>\w+)', webpage): yield f'https://streams.bitmovin.com/{stream_id}' def _real_extract(self, url): video_id = self._match_id(url) player_config = self._download_json( f'https://streams.bitmovin.com/{video_id}/config', video_id)['sources'] formats, subtitles = self._extract_m3u8_formats_and_subtitles( player_config['hls'], video_id, 'mp4') return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(player_config, { 'title': ('title', {str}), 'thumbnail': ('poster', {str}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ctsnews.py
yt_dlp/extractor/ctsnews.py
from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import unified_timestamp class CtsNewsIE(InfoExtractor): IE_DESC = '華視新聞' _VALID_URL = r'https?://news\.cts\.com\.tw/[a-z]+/[a-z]+/\d+/(?P<id>\d+)\.html' _TESTS = [{ 'url': 'http://news.cts.com.tw/cts/international/201501/201501291578109.html', 'md5': 'a9875cb790252b08431186d741beaabe', 'info_dict': { 'id': '201501291578109', 'ext': 'mp4', 'title': '以色列.真主黨交火 3人死亡 - 華視新聞網', 'description': '以色列和黎巴嫩真主黨,爆發五年最嚴重衝突,雙方砲轟交火,兩名以軍死亡,還有一名西班牙籍的聯合國維和人員也不幸罹難。大陸陝西、河南、安徽、江蘇和湖北五個省份出現大暴雪,嚴重影響陸空交通,不過九華山卻出現...', 'timestamp': 1422528540, 'upload_date': '20150129', }, }, { # News count not appear on page but still available in database 'url': 'http://news.cts.com.tw/cts/international/201309/201309031304098.html', 'md5': '3aee7e0df7cdff94e43581f54c22619e', 'info_dict': { 'id': '201309031304098', 'ext': 'mp4', 'title': '韓國31歲童顏男 貌如十多歲小孩 - 華視新聞網', 'description': '越有年紀的人,越希望看起來年輕一點,而南韓卻有一位31歲的男子,看起來像是11、12歲的小孩,身...', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1378205880, 'upload_date': '20130903', }, }, { # With Youtube embedded video 'url': 'http://news.cts.com.tw/cts/money/201501/201501291578003.html', 'md5': 'e4726b2ccd70ba2c319865e28f0a91d1', 'info_dict': { 'id': 'OVbfO7d0_hQ', 'ext': 'mp4', 'title': 'iPhone6熱銷 蘋果財報亮眼', 'description': 'md5:f395d4f485487bb0f992ed2c4b07aa7d', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20150128', 'uploader_id': 'TBSCTS', 'uploader': '中華電視公司', }, 'add_ie': ['Youtube'], }] def _real_extract(self, url): news_id = self._match_id(url) page = self._download_webpage(url, news_id) news_id = self._hidden_inputs(page).get('get_id') if news_id: mp4_feed = self._download_json( 'http://news.cts.com.tw/action/test_mp4feed.php', news_id, note='Fetching feed', query={'news_id': news_id}) video_url = mp4_feed['source_url'] else: self.to_screen('Not CTSPlayer video, trying Youtube...') youtube_url = YoutubeIE._extract_url(page) return self.url_result(youtube_url, ie='Youtube') description = self._html_search_meta('description', page) title = self._html_search_meta('title', page, fatal=True) thumbnail = self._html_search_meta('image', page) datetime_str = self._html_search_regex( r'(\d{4}/\d{2}/\d{2} \d{2}:\d{2})', page, 'date and time', fatal=False) timestamp = None if datetime_str: timestamp = unified_timestamp(datetime_str) - 8 * 3600 return { 'id': news_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/kick.py
yt_dlp/extractor/kick.py
import functools import urllib.parse from .common import InfoExtractor from ..utils import ( UserNotLive, determine_ext, float_or_none, int_or_none, parse_iso8601, str_or_none, traverse_obj, unified_timestamp, url_or_none, ) class KickBaseIE(InfoExtractor): @functools.cached_property def _api_headers(self): token = traverse_obj( self._get_cookies('https://kick.com/'), ('session_token', 'value', {urllib.parse.unquote})) return {'Authorization': f'Bearer {token}'} if token else {} def _call_api(self, path, display_id, note='Downloading API JSON', headers={}, **kwargs): return self._download_json( f'https://kick.com/api/{path}', display_id, note=note, headers={**self._api_headers, **headers}, impersonate=True, **kwargs) class KickIE(KickBaseIE): IE_NAME = 'kick:live' _VALID_URL = r'https?://(?:www\.)?kick\.com/(?!(?:video|categories|search|auth)(?:[/?#]|$))(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://kick.com/buddha', 'info_dict': { 'id': '92722911-nopixel-40', 'ext': 'mp4', 'title': str, 'description': str, 'timestamp': int, 'thumbnail': r're:https?://.+\.jpg', 'categories': list, 'upload_date': str, 'channel': 'buddha', 'channel_id': '32807', 'uploader': 'Buddha', 'uploader_id': '33057', 'live_status': 'is_live', 'concurrent_view_count': int, 'release_timestamp': int, 'age_limit': 18, 'release_date': str, }, 'params': {'skip_download': 'livestream'}, # 'skip': 'livestream', }, { 'url': 'https://kick.com/xqc', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if (KickVODIE.suitable(url) or KickClipIE.suitable(url)) else super().suitable(url) def _real_extract(self, url): channel = self._match_id(url) response = self._call_api(f'v2/channels/{channel}', channel) if not traverse_obj(response, 'livestream', expected_type=dict): raise UserNotLive(video_id=channel) return { 'channel': channel, 'is_live': True, 'formats': self._extract_m3u8_formats(response['playback_url'], channel, 'mp4', live=True), **traverse_obj(response, { 'id': ('livestream', 'slug', {str}), 'title': ('livestream', 'session_title', {str}), 'description': ('user', 'bio', {str}), 'channel_id': (('id', ('livestream', 'channel_id')), {int}, {str_or_none}, any), 'uploader': (('name', ('user', 'username')), {str}, any), 'uploader_id': (('user_id', ('user', 'id')), {int}, {str_or_none}, any), 'timestamp': ('livestream', 'created_at', {unified_timestamp}), 'release_timestamp': ('livestream', 'start_time', {unified_timestamp}), 'thumbnail': ('livestream', 'thumbnail', 'url', {url_or_none}), 'categories': ('recent_categories', ..., 'name', {str}), 'concurrent_view_count': ('livestream', 'viewer_count', {int_or_none}), 'age_limit': ('livestream', 'is_mature', {bool}, {lambda x: 18 if x else 0}), }), } class KickVODIE(KickBaseIE): IE_NAME = 'kick:vod' _VALID_URL = r'https?://(?:www\.)?kick\.com/[\w-]+/videos/(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})' _TESTS = [{ # Regular VOD 'url': 'https://kick.com/xqc/videos/5c697a87-afce-4256-b01f-3c8fe71ef5cb', 'info_dict': { 'id': '5c697a87-afce-4256-b01f-3c8fe71ef5cb', 'ext': 'mp4', 'title': '🐗LIVE🐗CLICK🐗HERE🐗DRAMA🐗ALL DAY🐗NEWS🐗VIDEOS🐗CLIPS🐗GAMES🐗STUFF🐗WOW🐗IM HERE🐗LETS GO🐗COOL🐗VERY NICE🐗', 'description': 'THE BEST AT ABSOLUTELY EVERYTHING. THE JUICER. LEADER OF THE JUICERS.', 'uploader': 'xQc', 'uploader_id': '676', 'channel': 'xqc', 'channel_id': '668', 'view_count': int, 'age_limit': 18, 'duration': 22278.0, 'thumbnail': r're:^https?://.*\.jpg', 'categories': ['Deadlock'], 'timestamp': 1756082443, 'upload_date': '20250825', }, 'params': {'skip_download': 'm3u8'}, }, { # VOD of ongoing livestream (at the time of writing the test, ID rotates every two days) 'url': 'https://kick.com/a-log-burner/videos/5230df84-ea38-46e1-be4f-f5949ae55641', 'info_dict': { 'id': '5230df84-ea38-46e1-be4f-f5949ae55641', 'ext': 'mp4', 'title': r're:😴 Cozy Fireplace ASMR 🔥 | Relax, Focus, Sleep 💤', 'description': 'md5:080bc713eac0321a7b376a1b53816d1b', 'uploader': 'A_Log_Burner', 'uploader_id': '65114691', 'channel': 'a-log-burner', 'channel_id': '63967687', 'view_count': int, 'age_limit': 18, 'thumbnail': r're:^https?://.*\.jpg', 'categories': ['Other, Watch Party'], 'timestamp': int, 'upload_date': str, 'live_status': 'is_live', }, 'skip': 'live', }] def _real_extract(self, url): video_id = self._match_id(url) response = self._call_api(f'v1/video/{video_id}', video_id) return { 'id': video_id, 'formats': self._extract_m3u8_formats(response['source'], video_id, 'mp4'), **traverse_obj(response, { 'title': ('livestream', ('session_title', 'slug'), {str}, any), 'description': ('livestream', 'channel', 'user', 'bio', {str}), 'channel': ('livestream', 'channel', 'slug', {str}), 'channel_id': ('livestream', 'channel', 'id', {int}, {str_or_none}), 'uploader': ('livestream', 'channel', 'user', 'username', {str}), 'uploader_id': ('livestream', 'channel', 'user_id', {int}, {str_or_none}), 'timestamp': ('created_at', {parse_iso8601}), 'duration': ('livestream', 'duration', {float_or_none(scale=1000)}), 'thumbnail': ('livestream', 'thumbnail', {url_or_none}), 'categories': ('livestream', 'categories', ..., 'name', {str}), 'view_count': ('views', {int_or_none}), 'age_limit': ('livestream', 'is_mature', {bool}, {lambda x: 18 if x else 0}), 'is_live': ('livestream', 'is_live', {bool}), }), } class KickClipIE(KickBaseIE): IE_NAME = 'kick:clips' _VALID_URL = r'https?://(?:www\.)?kick\.com/[\w-]+(?:/clips/|/?\?(?:[^#]+&)?clip=)(?P<id>clip_[\w-]+)' _TESTS = [{ 'url': 'https://kick.com/mxddy?clip=clip_01GYXVB5Y8PWAPWCWMSBCFB05X', 'info_dict': { 'id': 'clip_01GYXVB5Y8PWAPWCWMSBCFB05X', 'ext': 'mp4', 'title': 'Maddy detains Abd D:', 'channel': 'mxddy', 'channel_id': '133789', 'uploader': 'AbdCreates', 'uploader_id': '3309077', 'thumbnail': r're:^https?://.*\.jpeg', 'duration': 35, 'timestamp': 1682481453, 'upload_date': '20230426', 'view_count': int, 'like_count': int, 'categories': ['VALORANT'], 'age_limit': 18, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://kick.com/destiny?clip=clip_01H9SKET879NE7N9RJRRDS98J3', 'info_dict': { 'id': 'clip_01H9SKET879NE7N9RJRRDS98J3', 'title': 'W jews', 'ext': 'mp4', 'channel': 'destiny', 'channel_id': '1772249', 'uploader': 'punished_furry', 'uploader_id': '2027722', 'duration': 49.0, 'upload_date': '20230908', 'timestamp': 1694150180, 'thumbnail': 'https://clips.kick.com/clips/j3/clip_01H9SKET879NE7N9RJRRDS98J3/thumbnail.png', 'view_count': int, 'like_count': int, 'categories': ['Just Chatting'], 'age_limit': 0, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://kick.com/spreen/clips/clip_01J8RGZRKHXHXXKJEHGRM932A5', 'info_dict': { 'id': 'clip_01J8RGZRKHXHXXKJEHGRM932A5', 'ext': 'mp4', 'title': 'KLJASLDJKLJKASDLJKDAS', 'channel': 'spreen', 'channel_id': '5312671', 'uploader': 'AnormalBarraBaja', 'uploader_id': '26518262', 'duration': 43.0, 'upload_date': '20240927', 'timestamp': 1727399987, 'thumbnail': 'https://clips.kick.com/clips/f2/clip_01J8RGZRKHXHXXKJEHGRM932A5/thumbnail.webp', 'view_count': int, 'like_count': int, 'categories': ['Minecraft'], 'age_limit': 0, }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): clip_id = self._match_id(url) clip = self._call_api(f'v2/clips/{clip_id}/play', clip_id)['clip'] clip_url = clip['clip_url'] if determine_ext(clip_url) == 'm3u8': formats = self._extract_m3u8_formats(clip_url, clip_id, 'mp4') else: formats = [{'url': clip_url}] return { 'id': clip_id, 'formats': formats, **traverse_obj(clip, { 'title': ('title', {str}), 'channel': ('channel', 'slug', {str}), 'channel_id': ('channel', 'id', {int}, {str_or_none}), 'uploader': ('creator', 'username', {str}), 'uploader_id': ('creator', 'id', {int}, {str_or_none}), 'thumbnail': ('thumbnail_url', {url_or_none}), 'duration': ('duration', {float_or_none}), 'categories': ('category', 'name', {str}, all), 'timestamp': ('created_at', {parse_iso8601}), 'view_count': ('views', {int_or_none}), 'like_count': ('likes', {int_or_none}), 'age_limit': ('is_mature', {bool}, {lambda x: 18 if x else 0}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/skynewsau.py
yt_dlp/extractor/skynewsau.py
from .common import InfoExtractor from ..utils import ( try_get, unified_strdate, ) class SkyNewsAUIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?skynews\.com\.au/[^/]+/[^/]+/[^/]+/video/(?P<id>[a-z0-9]+)' _TESTS = [{ 'url': 'https://www.skynews.com.au/world-news/united-states/incredible-vision-shows-lava-overflowing-from-spains-la-palma-volcano/video/0f4c6243d6903502c01251f228b91a71', 'info_dict': { 'id': '6277184925001', 'ext': 'mp4', 'title': 'md5:60594f1ea6d5ae93e292900f4d34e9ae', 'description': 'md5:60594f1ea6d5ae93e292900f4d34e9ae', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 76.394, 'timestamp': 1634271300, 'uploader_id': '5348771529001', 'tags': ['fblink', 'msn', 'usa', 'world', 'yt'], 'upload_date': '20211015', }, 'params': {'skip_download': True, 'format': 'bv'}, }] _API_KEY = '6krsj3w249nk779d8fukqx9f' def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) embedcode = self._search_regex(r'embedcode\s?=\s?\"([^\"]+)\"', webpage, 'embedcode') data_json = self._download_json( f'https://content.api.news/v3/videos/brightcove/{embedcode}?api_key={self._API_KEY}', video_id)['content'] return { 'id': video_id, '_type': 'url_transparent', 'url': 'https://players.brightcove.net/{}/default_default/index.html?videoId={}'.format(*tuple(embedcode.split('-'))), 'ie_key': 'BrightcoveNew', 'title': data_json.get('caption'), 'upload_date': unified_strdate(try_get(data_json, lambda x: x['date']['created'])), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cnbc.py
yt_dlp/extractor/cnbc.py
from .common import InfoExtractor from ..utils import int_or_none, parse_iso8601, str_or_none, url_or_none from ..utils.traversal import traverse_obj class CNBCVideoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?cnbc\.com/video/(?:[^/?#]+/)+(?P<id>[^./?#&]+)\.html' _TESTS = [{ 'url': 'https://www.cnbc.com/video/2023/12/07/mcdonalds-just-unveiled-cosmcsits-new-spinoff-brand.html', 'info_dict': { 'ext': 'mp4', 'id': '107344774', 'display_id': 'mcdonalds-just-unveiled-cosmcsits-new-spinoff-brand', 'modified_timestamp': 1702053483, 'timestamp': 1701977810, 'channel': 'News Videos', 'upload_date': '20231207', 'description': 'md5:882c001d85cb43d7579b514307b3e78b', 'release_timestamp': 1701977375, 'modified_date': '20231208', 'release_date': '20231207', 'duration': 65, 'creators': ['Sean Conlon'], 'title': 'Here\'s a first look at McDonald\'s new spinoff brand, CosMc\'s', 'thumbnail': 'https://image.cnbcfm.com/api/v1/image/107344192-1701894812493-CosMcsskyHero_2336x1040_hero-desktop.jpg?v=1701894855', }, 'expected_warnings': ['Unable to download f4m manifest'], }, { 'url': 'https://www.cnbc.com/video/2023/12/08/jim-cramer-shares-his-take-on-seattles-tech-scene.html', 'info_dict': { 'creators': ['Jim Cramer'], 'channel': 'Mad Money with Jim Cramer', 'description': 'md5:72925be21b952e95eba51178dddf4e3e', 'duration': 299.0, 'ext': 'mp4', 'id': '107345451', 'display_id': 'jim-cramer-shares-his-take-on-seattles-tech-scene', 'thumbnail': 'https://image.cnbcfm.com/api/v1/image/107345481-1702079431MM-B-120823.jpg?v=1702079430', 'timestamp': 1702080139, 'title': 'Jim Cramer shares his take on Seattle\'s tech scene', 'release_date': '20231208', 'upload_date': '20231209', 'modified_timestamp': 1702080139, 'modified_date': '20231209', 'release_timestamp': 1702073551, }, 'expected_warnings': ['Unable to download f4m manifest'], }, { 'url': 'https://www.cnbc.com/video/2023/12/08/the-epicenter-of-ai-is-in-seattle-says-jim-cramer.html', 'info_dict': { 'creators': ['Jim Cramer'], 'channel': 'Mad Money with Jim Cramer', 'description': 'md5:72925be21b952e95eba51178dddf4e3e', 'duration': 113.0, 'ext': 'mp4', 'id': '107345474', 'display_id': 'the-epicenter-of-ai-is-in-seattle-says-jim-cramer', 'thumbnail': 'https://image.cnbcfm.com/api/v1/image/107345486-Screenshot_2023-12-08_at_70339_PM.png?v=1702080248', 'timestamp': 1702080535, 'title': 'The epicenter of AI is in Seattle, says Jim Cramer', 'release_timestamp': 1702077347, 'modified_timestamp': 1702080535, 'release_date': '20231208', 'upload_date': '20231209', 'modified_date': '20231209', }, 'expected_warnings': ['Unable to download f4m manifest'], }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) data = self._search_json(r'window\.__s_data=', webpage, 'video data', display_id) player_data = traverse_obj(data, ( 'page', 'page', 'layout', ..., 'columns', ..., 'modules', lambda _, v: v['name'] == 'clipPlayer', 'data', {dict}), get_all=False) return { 'id': display_id, 'display_id': display_id, 'formats': self._extract_akamai_formats(player_data['playbackURL'], display_id), **self._search_json_ld(webpage, display_id, fatal=False), **traverse_obj(player_data, { 'id': ('id', {str_or_none}), 'title': ('title', {str}), 'description': ('description', {str}), 'creators': ('author', ..., 'name', {str}), 'timestamp': ('datePublished', {parse_iso8601}), 'release_timestamp': ('uploadDate', {parse_iso8601}), 'modified_timestamp': ('dateLastPublished', {parse_iso8601}), 'thumbnail': ('thumbnail', {url_or_none}), 'duration': ('duration', {int_or_none}), 'channel': ('section', 'title', {str}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/getcourseru.py
yt_dlp/extractor/getcourseru.py
import re import time import urllib.parse from .common import InfoExtractor from ..utils import ExtractorError, int_or_none, url_or_none, urlencode_postdata from ..utils.traversal import traverse_obj class GetCourseRuPlayerIE(InfoExtractor): _VALID_URL = r'https?://(?:player02\.getcourse\.ru|cf-api-2\.vhcdn\.com)/sign-player/?\?(?:[^#]+&)?json=[^#&]+' _EMBED_REGEX = [rf'<iframe[^>]+\bsrc=[\'"](?P<url>{_VALID_URL}[^\'"]*)'] _TESTS = [{ 'url': 'http://player02.getcourse.ru/sign-player/?json=eyJ2aWRlb19oYXNoIjoiMTkwYmRmOTNmMWIyOTczNTMwOTg1M2E3YTE5ZTI0YjMiLCJ1c2VyX2lkIjozNTk1MjUxODMsInN1Yl9sb2dpbl91c2VyX2lkIjpudWxsLCJsZXNzb25faWQiOm51bGwsImlwIjoiNDYuMTQyLjE4Mi4yNDciLCJnY19ob3N0IjoiYWNhZGVteW1lbC5vbmxpbmUiLCJ0aW1lIjoxNzA1NDQ5NjQyLCJwYXlsb2FkIjoidV8zNTk1MjUxODMiLCJ1aV9sYW5ndWFnZSI6InJ1IiwiaXNfaGF2ZV9jdXN0b21fc3R5bGUiOnRydWV9&s=354ad2c993d95d5ac629e3133d6cefea&vh-static-feature=zigzag', 'info_dict': { 'id': '513573381', 'title': '190bdf93f1b29735309853a7a19e24b3', 'ext': 'mp4', 'thumbnail': 'https://preview-htz.kinescopecdn.net/preview/190bdf93f1b29735309853a7a19e24b3/preview.jpg?version=1702370546&host=vh-80', 'duration': 1693, }, 'skip': 'JWT expired', }, { 'url': 'https://cf-api-2.vhcdn.com/sign-player/?json=example', 'info_dict': { 'id': '435735291', 'title': '8afd7c489952108e00f019590f3711f3', 'ext': 'mp4', 'thumbnail': 'https://preview-htz.vhcdn.com/preview/8afd7c489952108e00f019590f3711f3/preview.jpg?version=1682170973&host=vh-72', 'duration': 777, }, 'skip': 'JWT expired', }] def _real_extract(self, url): webpage = self._download_webpage(url, None, 'Downloading player page') window_configs = self._search_json( r'window\.configs\s*=', webpage, 'config', None) video_id = str(window_configs['gcFileId']) formats, subtitles = self._extract_m3u8_formats_and_subtitles( window_configs['masterPlaylistUrl'], video_id) return { **traverse_obj(window_configs, { 'title': ('videoHash', {str}), 'thumbnail': ('previewUrl', {url_or_none}), 'duration': ('videoDuration', {int_or_none}), }), 'id': video_id, 'formats': formats, 'subtitles': subtitles, } class GetCourseRuIE(InfoExtractor): _NETRC_MACHINE = 'getcourseru' _DOMAINS = [ 'academymel.online', 'marafon.mani-beauty.com', 'on.psbook.ru', ] _BASE_URL_RE = rf'https?://(?:(?!player02\.)[^.]+\.getcourse\.(?:ru|io)|{"|".join(map(re.escape, _DOMAINS))})' _VALID_URL = [ rf'{_BASE_URL_RE}/(?!pl/|teach/)(?P<id>[^?#]+)', rf'{_BASE_URL_RE}/(?:pl/)?teach/control/lesson/view\?(?:[^#]+&)?id=(?P<id>\d+)', ] _TESTS = [{ 'url': 'http://academymel.online/3video_1', 'info_dict': { 'id': '3059742', 'display_id': '3video_1', 'title': 'Промоуроки Академии МЕЛ', }, 'playlist_count': 1, 'playlist': [{ 'info_dict': { 'id': '513573381', 'ext': 'mp4', 'title': 'Промоуроки Академии МЕЛ', 'thumbnail': 'https://preview-htz.kinescopecdn.net/preview/190bdf93f1b29735309853a7a19e24b3/preview.jpg?version=1702370546&host=vh-80', 'duration': 1693, }, }], }, { 'url': 'https://academymel.getcourse.ru/3video_1', 'info_dict': { 'id': '3059742', 'display_id': '3video_1', 'title': 'Промоуроки Академии МЕЛ', }, 'playlist_count': 1, 'playlist': [{ 'info_dict': { 'id': '513573381', 'ext': 'mp4', 'title': 'Промоуроки Академии МЕЛ', 'thumbnail': 'https://preview-htz.kinescopecdn.net/preview/190bdf93f1b29735309853a7a19e24b3/preview.jpg?version=1702370546&host=vh-80', 'duration': 1693, }, }], }, { 'url': 'https://academymel.getcourse.ru/pl/teach/control/lesson/view?id=319141781&editMode=0', 'info_dict': { 'id': '319141781', 'title': '1. Разминка у стены', }, 'playlist_count': 1, 'playlist': [{ 'info_dict': { 'id': '4919601', 'ext': 'mp4', 'title': '1. Разминка у стены', 'thumbnail': 'https://preview-htz.vhcdn.com/preview/5a521788e7dc25b4f70c3dff6512d90e/preview.jpg?version=1703223532&host=vh-81', 'duration': 704, }, }], 'skip': 'paid lesson', }, { 'url': 'https://manibeauty.getcourse.ru/pl/teach/control/lesson/view?id=272499894', 'info_dict': { 'id': '272499894', 'title': 'Мотивация к тренировкам', }, 'playlist_count': 1, 'playlist': [{ 'info_dict': { 'id': '447479687', 'ext': 'mp4', 'title': 'Мотивация к тренировкам', 'thumbnail': 'https://preview-htz.vhcdn.com/preview/70ed5b9f489dd03b4aff55bfdff71a26/preview.jpg?version=1685115787&host=vh-71', 'duration': 30, }, }], 'skip': 'paid lesson', }, { 'url': 'https://gaismasmandalas.getcourse.io/ATLAUTSEVBUT', 'only_matching': True, }] _LOGIN_URL_PATH = '/cms/system/login' def _login(self, hostname, username, password): if self._get_cookies(f'https://{hostname}').get('PHPSESSID5'): return login_url = f'https://{hostname}{self._LOGIN_URL_PATH}' webpage = self._download_webpage(login_url, None) self._request_webpage( login_url, None, 'Logging in', 'Failed to log in', data=urlencode_postdata({ 'action': 'processXdget', 'xdgetId': self._html_search_regex( r'<form[^>]+\bclass="[^"]*\bstate-login[^"]*"[^>]+\bdata-xdget-id="([^"]+)"', webpage, 'xdgetId'), 'params[action]': 'login', 'params[url]': login_url, 'params[object_type]': 'cms_page', 'params[object_id]': -1, 'params[email]': username, 'params[password]': password, 'requestTime': int(time.time()), 'requestSimpleSign': self._html_search_regex( r'window.requestSimpleSign\s*=\s*"([\da-f]+)"', webpage, 'simple sign'), })) def _real_extract(self, url): hostname = urllib.parse.urlparse(url).hostname username, password = self._get_login_info(netrc_machine=hostname) if username: self._login(hostname, username, password) display_id = self._match_id(url) webpage, urlh = self._download_webpage_handle(url, display_id) if self._LOGIN_URL_PATH in urlh.url: raise ExtractorError( f'This video is only available for registered users. {self._login_hint("any", netrc=hostname)}', expected=True) playlist_id = self._search_regex( r'window\.(?:lessonId|gcsObjectId)\s*=\s*(\d+)', webpage, 'playlist id', default=display_id) title = self._og_search_title(webpage, default=None) or self._html_extract_title(webpage) return self.playlist_from_matches( re.findall(GetCourseRuPlayerIE._EMBED_REGEX[0], webpage), playlist_id, title, display_id=display_id, ie=GetCourseRuPlayerIE, video_kwargs={ 'url_transparent': True, 'title': title, })
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/newsy.py
yt_dlp/extractor/newsy.py
from .common import InfoExtractor from ..utils import ( js_to_json, merge_dicts, ) class NewsyIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?newsy\.com/stories/(?P<id>[^/?#$&]+)' _TESTS = [{ 'url': 'https://www.newsy.com/stories/nft-trend-leads-to-fraudulent-art-auctions/', 'info_dict': { 'id': '609d65125b086c24fb529312', 'ext': 'mp4', 'title': 'NFT Art Auctions Have A Piracy Problem', 'description': 'md5:971e52ab8bc97e50305475cde8284c83', 'display_id': 'nft-trend-leads-to-fraudulent-art-auctions', 'timestamp': 1621339200, 'duration': 339630, 'thumbnail': 'https://cdn.newsy.com/images/videos/x/1620927824_xyrrP4.jpg', 'upload_date': '20210518', }, 'params': {'skip_download': True}, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) data_json = self._parse_json(self._html_search_regex( r'data-video-player\s?=\s?"({[^"]+})">', webpage, 'data'), display_id, js_to_json) ld_json = self._search_json_ld(webpage, display_id, fatal=False) formats, subtitles = [], {} if data_json.get('stream'): fmts, subs = self._extract_m3u8_formats_and_subtitles(data_json['stream'], display_id) formats.extend(fmts) subtitles = self._merge_subtitles(subtitles, subs) return merge_dicts(ld_json, { 'id': data_json['id'], 'display_id': display_id, 'title': data_json.get('headline'), 'duration': data_json.get('duration'), 'thumbnail': data_json.get('image'), 'formats': formats, 'subtitles': subtitles, })
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sharepoint.py
yt_dlp/extractor/sharepoint.py
import json import urllib.parse from .common import InfoExtractor from ..utils import determine_ext, int_or_none, url_or_none from ..utils.traversal import traverse_obj class SharePointIE(InfoExtractor): _BASE_URL_RE = r'https?://[\w-]+\.sharepoint\.com/' _VALID_URL = [ rf'{_BASE_URL_RE}:v:/[a-z]/(?:[^/?#]+/)*(?P<id>[^/?#]{{46}})/?(?:$|[?#])', rf'{_BASE_URL_RE}(?!:v:)(?:[^/?#]+/)*stream\.aspx\?(?:[^#]+&)?id=(?P<id>[^&#]+)', ] _TESTS = [{ 'url': 'https://lut-my.sharepoint.com/:v:/g/personal/juha_eerola_student_lab_fi/EUrAmrktb4ZMhUcY9J2PqMEBD_9x_l0DyYWVgAvp-TTOMw?e=ZpQOOw', 'md5': '2950821d0d4937a0a76373782093b435', 'info_dict': { 'id': '01EQRS7EKKYCNLSLLPQZGIKRYY6SOY7KGB', 'display_id': 'EUrAmrktb4ZMhUcY9J2PqMEBD_9x_l0DyYWVgAvp-TTOMw', 'ext': 'mp4', 'title': 'CmvpJST', 'duration': 54.567, 'thumbnail': r're:https://.+/thumbnail', 'uploader_id': '8dcec565-a956-4b91-95e5-bacfb8bc015f', }, }, { 'url': 'https://greaternyace.sharepoint.com/:v:/s/acementornydrive/ETski5eAfNVEoPRZUAyy1wEBpLgVFYWso5bjbZjfBLlPUg?e=PQUfVb', 'md5': 'c496a01644223273bff12e93e501afd1', 'info_dict': { 'id': '01QI4AVTZ3ESFZPAD42VCKB5CZKAGLFVYB', 'display_id': 'ETski5eAfNVEoPRZUAyy1wEBpLgVFYWso5bjbZjfBLlPUg', 'ext': 'mp4', 'title': '930103681233985536', 'duration': 3797.326, 'thumbnail': r're:https://.+/thumbnail', }, }, { 'url': 'https://lut-my.sharepoint.com/personal/juha_eerola_student_lab_fi/_layouts/15/stream.aspx?id=%2Fpersonal%2Fjuha_eerola_student_lab_fi%2FDocuments%2FM-DL%2FCmvpJST.mp4&ga=1&referrer=StreamWebApp.Web&referrerScenario=AddressBarCopied.view', 'info_dict': { 'id': '01EQRS7EKKYCNLSLLPQZGIKRYY6SOY7KGB', 'display_id': '/personal/juha_eerola_student_lab_fi/Documents/M-DL/CmvpJST.mp4', 'ext': 'mp4', 'title': 'CmvpJST', 'duration': 54.567, 'thumbnail': r're:https://.+/thumbnail', 'uploader_id': '8dcec565-a956-4b91-95e5-bacfb8bc015f', }, 'skip': 'Session cookies needed', }, { 'url': 'https://izoobasisschool.sharepoint.com/:v:/g/Eaqleq8COVBIvIPvod0U27oBypC6aWOkk8ptuDpmJ6arHw', 'only_matching': True, }, { 'url': 'https://uskudaredutr-my.sharepoint.com/:v:/g/personal/songul_turkaydin_uskudar_edu_tr/EbTf-VRUIbtGuIN73tx1MuwBCHBOmNcWNqSLw61Fd2_o0g?e=n5Vkof', 'only_matching': True, }, { 'url': 'https://epam-my.sharepoint.com/:v:/p/dzmitry_tamashevich/Ec4ZOs-rATZHjFYZWVxjczEB649FCoYFKDV_x3RxZiWAGA?e=4hswgA', 'only_matching': True, }, { 'url': 'https://microsoft.sharepoint.com/:v:/t/MicrosoftSPARKRecordings-MSFTInternal/EWCyeqByVWBAt8wDvNZdV-UB0BvU5YVbKm0UHgdrUlI6dg?e=QbPck6', 'only_matching': True, }] def _real_extract(self, url): display_id = urllib.parse.unquote(self._match_id(url)) webpage, urlh = self._download_webpage_handle(url, display_id) if urllib.parse.urlparse(urlh.url).hostname == 'login.microsoftonline.com': self.raise_login_required( 'Session cookies are required for this URL and can be passed ' 'with the --cookies option. The --cookies-from-browser option will not work', method=None) video_data = self._search_json(r'g_fileInfo\s*=', webpage, 'player config', display_id) video_id = video_data['VroomItemId'] parsed_url = urllib.parse.urlparse(video_data['.transformUrl']) base_media_url = urllib.parse.urlunparse(parsed_url._replace( path=urllib.parse.urljoin(f'{parsed_url.path}/', '../videomanifest'), query=urllib.parse.urlencode({ **urllib.parse.parse_qs(parsed_url.query), 'cTag': video_data['.ctag'], 'action': 'Access', 'part': 'index', }, doseq=True))) # Web player adds more params to the format URLs but we still get all formats without them formats = self._extract_mpd_formats( base_media_url, video_id, mpd_id='dash', query={'format': 'dash'}, fatal=False) for hls_type in ('hls', 'hls-vnext'): formats.extend(self._extract_m3u8_formats( base_media_url, video_id, 'mp4', m3u8_id=hls_type, query={'format': hls_type}, fatal=False, quality=-2)) if video_url := traverse_obj(video_data, ('downloadUrl', {url_or_none})): formats.append({ 'url': video_url, 'ext': determine_ext(video_data.get('extension') or video_data.get('name')), 'quality': 1, 'format_id': 'source', 'filesize': int_or_none(video_data.get('size')), 'vcodec': 'none' if video_data.get('isAudio') is True else None, }) return { 'id': video_id, 'formats': formats, 'title': video_data.get('title') or video_data.get('displayName'), 'display_id': display_id, 'uploader_id': video_data.get('authorId'), 'duration': traverse_obj(video_data, ( 'MediaServiceFastMetadata', {json.loads}, 'media', 'duration', {lambda x: x / 10000000})), 'thumbnail': url_or_none(video_data.get('thumbnailUrl')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/zeenews.py
yt_dlp/extractor/zeenews.py
from .common import InfoExtractor from ..utils import ExtractorError, traverse_obj class ZeeNewsIE(InfoExtractor): _WORKING = False _ENABLED = None # XXX: pass through to GenericIE _VALID_URL = r'https?://zeenews\.india\.com/[^#?]+/video/(?P<display_id>[^#/?]+)/(?P<id>\d+)' _TESTS = [ { 'url': 'https://zeenews.india.com/hindi/india/delhi-ncr-haryana/delhi-ncr/video/greater-noida-video-viral-on-social-media-attackers-beat-businessman-and-his-son-oppose-market-closed-atdnh/1402138', 'info_dict': { 'id': '1402138', 'ext': 'mp4', 'title': 'Greater Noida Video: हमलावरों ने दिनदहाड़े दुकान में घुसकर की मारपीट, देखें वीडियो', 'display_id': 'greater-noida-video-viral-on-social-media-attackers-beat-businessman-and-his-son-oppose-market-closed-atdnh', 'upload_date': '20221019', 'thumbnail': r're:^https?://.*\.jpg*', 'timestamp': 1666174501, 'view_count': int, 'duration': 97, 'description': 'ग्रेटर नोएडा जारचा थाना क्षेत्र के प्याबली में दिनदहाड़े दुकान में घुसकर अज्ञात हमलावरों ने हमला कर', }, }, { 'url': 'https://zeenews.india.com/hindi/india/video/videsh-superfast-queen-elizabeth-iis-funeral-today/1357710', 'info_dict': { 'id': '1357710', 'ext': 'mp4', 'title': 'Videsh Superfast: महारानी के अंतिम संस्कार की तैयारी शुरू', 'display_id': 'videsh-superfast-queen-elizabeth-iis-funeral-today', 'upload_date': '20220919', 'thumbnail': r're:^https?://.*\.jpg*', 'timestamp': 1663556881, 'view_count': int, 'duration': 133, 'description': 'सेगमेंट विदेश सुपराफास्ट में देखिए देश और दुनिया की सभी बड़ी खबरें, वो भी हर खबर फटाफट अंदाज में.', }, }, ] def _real_extract(self, url): content_id, display_id = self._match_valid_url(url).group('id', 'display_id') webpage = self._download_webpage(url, content_id) json_ld_list = list(self._yield_json_ld(webpage, display_id)) embed_url = traverse_obj( json_ld_list, (lambda _, v: v['@type'] == 'VideoObject', 'embedUrl'), get_all=False) if not embed_url: raise ExtractorError('No video found', expected=True) formats = self._extract_m3u8_formats(embed_url, content_id, 'mp4') return { **self._json_ld(json_ld_list, display_id), 'id': content_id, 'display_id': display_id, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/alphaporno.py
yt_dlp/extractor/alphaporno.py
from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, parse_filesize, parse_iso8601, ) class AlphaPornoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?alphaporno\.com/videos/(?P<id>[^/]+)' _TEST = { 'url': 'http://www.alphaporno.com/videos/sensual-striptease-porn-with-samantha-alexandra/', 'md5': 'feb6d3bba8848cd54467a87ad34bd38e', 'info_dict': { 'id': '258807', 'display_id': 'sensual-striptease-porn-with-samantha-alexandra', 'ext': 'mp4', 'title': 'Sensual striptease porn with Samantha Alexandra', 'thumbnail': r're:https?://.*\.jpg$', 'timestamp': 1418694611, 'upload_date': '20141216', 'duration': 387, 'filesize_approx': 54120000, 'tbr': 1145, 'categories': list, 'age_limit': 18, }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._search_regex( r"video_id\s*:\s*'([^']+)'", webpage, 'video id', default=None) video_url = self._search_regex( r"video_url\s*:\s*'([^']+)'", webpage, 'video url') ext = self._html_search_meta( 'encodingFormat', webpage, 'ext', default='.mp4')[1:] title = self._search_regex( [r'<meta content="([^"]+)" itemprop="description">', r'class="title" itemprop="name">([^<]+)<'], webpage, 'title') thumbnail = self._html_search_meta('thumbnail', webpage, 'thumbnail') timestamp = parse_iso8601(self._html_search_meta( 'uploadDate', webpage, 'upload date')) duration = parse_duration(self._html_search_meta( 'duration', webpage, 'duration')) filesize_approx = parse_filesize(self._html_search_meta( 'contentSize', webpage, 'file size')) bitrate = int_or_none(self._html_search_meta( 'bitrate', webpage, 'bitrate')) categories = self._html_search_meta( 'keywords', webpage, 'categories', default='').split(',') age_limit = self._rta_search(webpage) return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'ext': ext, 'title': title, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, 'filesize_approx': filesize_approx, 'tbr': bitrate, 'categories': categories, 'age_limit': age_limit, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cloudflarestream.py
yt_dlp/extractor/cloudflarestream.py
import base64 from .common import InfoExtractor class CloudflareStreamIE(InfoExtractor): _SUBDOMAIN_RE = r'(?:(?:watch|iframe|customer-\w+)\.)?' _DOMAIN_RE = r'(?:cloudflarestream\.com|(?:videodelivery|bytehighway)\.net)' _EMBED_RE = rf'(?:embed\.|{_SUBDOMAIN_RE}){_DOMAIN_RE}/embed/[^/?#]+\.js\?(?:[^#]+&)?video=' _ID_RE = r'[\da-f]{32}|eyJ[\w-]+\.[\w-]+\.[\w-]+' _VALID_URL = rf'https?://(?:{_SUBDOMAIN_RE}(?P<domain>{_DOMAIN_RE})/|{_EMBED_RE})(?P<id>{_ID_RE})' _EMBED_REGEX = [ rf'<script[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//{_EMBED_RE}(?:{_ID_RE})(?:(?!\1).)*)\1', rf'<iframe[^>]+\bsrc=["\'](?P<url>https?://{_SUBDOMAIN_RE}{_DOMAIN_RE}/[\da-f]{{32}})', ] _TESTS = [{ 'url': 'https://embed.cloudflarestream.com/embed/we4g.fla9.latest.js?video=31c9291ab41fac05471db4e73aa11717', 'info_dict': { 'id': '31c9291ab41fac05471db4e73aa11717', 'ext': 'mp4', 'title': '31c9291ab41fac05471db4e73aa11717', 'thumbnail': r're:https?://cloudflarestream\.com/.+\.jpg', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://watch.cloudflarestream.com/embed/sdk-iframe-integration.fla9.latest.js?video=0e8e040aec776862e1d632a699edf59e', 'info_dict': { 'id': '0e8e040aec776862e1d632a699edf59e', 'ext': 'mp4', 'title': '0e8e040aec776862e1d632a699edf59e', 'thumbnail': r're:https?://cloudflarestream\.com/.+\.jpg', }, }, { 'url': 'https://watch.cloudflarestream.com/9df17203414fd1db3e3ed74abbe936c1', 'only_matching': True, }, { 'url': 'https://cloudflarestream.com/31c9291ab41fac05471db4e73aa11717/manifest/video.mpd', 'only_matching': True, }, { 'url': 'https://embed.videodelivery.net/embed/r4xu.fla9.latest.js?video=81d80727f3022488598f68d323c1ad5e', 'only_matching': True, }, { 'url': 'https://customer-aw5py76sw8wyqzmh.cloudflarestream.com/2463f6d3e06fa29710a337f5f5389fd8/iframe', 'only_matching': True, }, { 'url': 'https://watch.cloudflarestream.com/eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJraWQiOiJmYTA0YjViMzQ2NDkwYTM5NWJiNzQ1NWFhZTA2YzYwZSIsInN1YiI6Ijg4ZDQxMDhhMzY0MjA3M2VhYmFhZjg3ZGExODJkMjYzIiwiZXhwIjoxNjAwNjA5MzE5fQ.xkRJwLGkt0nZ%5F0BlPiwU7iW4pqb4lKkznbKfAhGg0tGcxSS6ZBA3lcTUwu7W%2DyCFbnAl%2Dhqk3Fn%5FqeQS%5FQydP27qTHpB9iIFFsMtk1tqzGZV5v4yrYDnwLSKzEKvVd6QwJnfABtxH2JdpSNuWlMUiVXFxGWgjOw6QeTNDDklTQYXV%5FNLV7sErSn5CeOPeRRkdXb%2D8ip%5FVOcfk1nDsFoOo4fctFtGP0wYMyY5ae8nhhatydHwevuvJCcEvEfh%2D4qjq9mCZOodevmtSQ4YWmggf4BxtWnDWYrGW8Otp6oqezrR8oY4%2DbKdV6PaqBj49aJdcls6xK7PmM8%5Fvjy3xfm0Mg', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'https://upride.cc/incident/shoulder-pass-at-light/', 'info_dict': { 'id': 'eaef9dea5159cf968be84241b5cedfe7', 'ext': 'mp4', 'title': 'eaef9dea5159cf968be84241b5cedfe7', 'thumbnail': r're:https?://cloudflarestream\.com/.+\.jpg', }, 'params': { 'extractor_args': {'generic': {'impersonate': ['chrome']}}, 'skip_download': 'm3u8', }, }, { # FIXME: Embed detection 'url': 'https://www.cloudflare.com/developer-platform/products/cloudflare-stream/', 'info_dict': { 'id': 'e7bd2dd67e0f8860b4ae81e33a966049', 'ext': 'mp4', 'title': 'e7bd2dd67e0f8860b4ae81e33a966049', 'thumbnail': r're:https?://cloudflarestream\.com/.+\.jpg', }, }] def _real_extract(self, url): video_id, domain = self._match_valid_url(url).group('id', 'domain') if domain != 'bytehighway.net': domain = 'cloudflarestream.com' base_url = f'https://{domain}/{video_id}/' if '.' in video_id: video_id = self._parse_json(base64.urlsafe_b64decode( video_id.split('.')[1] + '==='), video_id)['sub'] manifest_base_url = base_url + 'manifest/video.' formats, subtitles = self._extract_m3u8_formats_and_subtitles( manifest_base_url + 'm3u8', video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) fmts, subs = self._extract_mpd_formats_and_subtitles( manifest_base_url + 'mpd', video_id, mpd_id='dash', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return { 'id': video_id, 'title': video_id, 'thumbnail': base_url + 'thumbnails/thumbnail.jpg', 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/plvideo.py
yt_dlp/extractor/plvideo.py
from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, parse_iso8601, parse_resolution, url_or_none, ) from ..utils.traversal import traverse_obj class PlVideoIE(InfoExtractor): IE_DESC = 'Платформа' _VALID_URL = r'https?://(?:www\.)?plvideo\.ru/(?:watch\?(?:[^#]+&)?v=|shorts/)(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://plvideo.ru/watch?v=Y5JzUzkcQTMK', 'md5': 'fe8e18aca892b3b31f3bf492169f8a26', 'info_dict': { 'id': 'Y5JzUzkcQTMK', 'ext': 'mp4', 'thumbnail': 'https://img.plvideo.ru/images/fp-2024-images/v/cover/37/dd/37dd00a4c96c77436ab737e85947abd7/original663a4a3bb713e5.33151959.jpg', 'title': 'Presidente de Cuba llega a Moscú en una visita de trabajo', 'channel': 'RT en Español', 'channel_id': 'ZH4EKqunVDvo', 'media_type': 'video', 'comment_count': int, 'tags': ['rusia', 'cuba', 'russia', 'miguel díaz-canel'], 'description': 'md5:a1a395d900d77a86542a91ee0826c115', 'release_timestamp': 1715096124, 'channel_is_verified': True, 'like_count': int, 'timestamp': 1715095911, 'duration': 44320, 'view_count': int, 'dislike_count': int, 'upload_date': '20240507', 'modified_date': '20240701', 'channel_follower_count': int, 'modified_timestamp': 1719824073, }, }, { 'url': 'https://plvideo.ru/shorts/S3Uo9c-VLwFX', 'md5': '7d8fa2279406c69d2fd2a6fc548a9805', 'info_dict': { 'id': 'S3Uo9c-VLwFX', 'ext': 'mp4', 'channel': 'Romaatom', 'tags': 'count:22', 'dislike_count': int, 'upload_date': '20241130', 'description': 'md5:452e6de219bf2f32bb95806c51c3b364', 'duration': 58433, 'modified_date': '20241130', 'thumbnail': 'https://img.plvideo.ru/images/fp-2024-11-cover/S3Uo9c-VLwFX/f9318999-a941-482b-b700-2102a7049366.jpg', 'media_type': 'shorts', 'like_count': int, 'modified_timestamp': 1732961458, 'channel_is_verified': True, 'channel_id': 'erJyyTIbmUd1', 'timestamp': 1732961355, 'comment_count': int, 'title': 'Белоусов отменил приказы о кадровом резерве на гражданской службе', 'channel_follower_count': int, 'view_count': int, 'release_timestamp': 1732961458, }, }] def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( f'https://api.g1.plvideo.ru/v1/videos/{video_id}?Aud=18', video_id) is_live = False formats = [] subtitles = {} automatic_captions = {} for quality, data in traverse_obj(video_data, ('item', 'profiles', {dict.items}, lambda _, v: url_or_none(v[1]['hls']))): formats.append({ 'format_id': quality, 'ext': 'mp4', 'protocol': 'm3u8_native', **traverse_obj(data, { 'url': 'hls', 'fps': ('fps', {float_or_none}), 'aspect_ratio': ('aspectRatio', {float_or_none}), }), **parse_resolution(quality), }) if livestream_url := traverse_obj(video_data, ('item', 'livestream', 'url', {url_or_none})): is_live = True formats.extend(self._extract_m3u8_formats(livestream_url, video_id, 'mp4', live=True)) for lang, url in traverse_obj(video_data, ('item', 'subtitles', {dict.items}, lambda _, v: url_or_none(v[1]))): if lang.endswith('-auto'): automatic_captions.setdefault(lang[:-5], []).append({ 'url': url, }) else: subtitles.setdefault(lang, []).append({ 'url': url, }) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'automatic_captions': automatic_captions, 'is_live': is_live, **traverse_obj(video_data, ('item', { 'id': ('id', {str}), 'title': ('title', {str}), 'description': ('description', {str}), 'thumbnail': ('cover', 'paths', 'original', 'src', {url_or_none}), 'duration': ('uploadFile', 'videoDuration', {int_or_none}), 'channel': ('channel', 'name', {str}), 'channel_id': ('channel', 'id', {str}), 'channel_follower_count': ('channel', 'stats', 'subscribers', {int_or_none}), 'channel_is_verified': ('channel', 'verified', {bool}), 'tags': ('tags', ..., {str}), 'timestamp': ('createdAt', {parse_iso8601}), 'release_timestamp': ('publishedAt', {parse_iso8601}), 'modified_timestamp': ('updatedAt', {parse_iso8601}), 'view_count': ('stats', 'viewTotalCount', {int_or_none}), 'like_count': ('stats', 'likeCount', {int_or_none}), 'dislike_count': ('stats', 'dislikeCount', {int_or_none}), 'comment_count': ('stats', 'commentCount', {int_or_none}), 'media_type': ('type', {str}), })), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/axs.py
yt_dlp/extractor/axs.py
from .common import InfoExtractor from ..utils import ( float_or_none, js_to_json, parse_iso8601, traverse_obj, url_or_none, ) class AxsIE(InfoExtractor): IE_NAME = 'axs.tv' _VALID_URL = r'https?://(?:www\.)?axs\.tv/(?:channel/(?:[^/?#]+/)+)?video/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.axs.tv/video/5f4dc776b70e4f1c194f22ef/', 'md5': '8d97736ae8e50c64df528e5e676778cf', 'info_dict': { 'id': '5f4dc776b70e4f1c194f22ef', 'title': 'Small Town', 'ext': 'mp4', 'description': 'md5:e314d28bfaa227a4d7ec965fae19997f', 'upload_date': '20230602', 'timestamp': 1685729564, 'duration': 1284.216, 'series': 'Rock & Roll Road Trip with Sammy Hagar', 'season': 'Season 2', 'season_number': 2, 'episode': '3', 'thumbnail': 'https://images.dotstudiopro.com/5f4e9d330a0c3b295a7e8394', }, }, { 'url': 'https://www.axs.tv/channel/rock-star-interview/video/daryl-hall', 'md5': '300ae795cd8f9984652c0949734ffbdc', 'info_dict': { 'id': '5f488148b70e4f392572977c', 'display_id': 'daryl-hall', 'title': 'Daryl Hall', 'ext': 'mp4', 'description': 'md5:e54ecaa0f4b5683fc9259e9e4b196628', 'upload_date': '20230214', 'timestamp': 1676403615, 'duration': 2570.668, 'series': 'The Big Interview with Dan Rather', 'season': 'Season 3', 'season_number': 3, 'episode': '5', 'thumbnail': 'https://images.dotstudiopro.com/5f4d1901f340b50d937cec32', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) webpage_json_data = self._search_json( r'mountObj\s*=', webpage, 'video ID data', display_id, transform_source=js_to_json) video_id = webpage_json_data['video_id'] company_id = webpage_json_data['company_id'] meta = self._download_json( f'https://api.myspotlight.tv/dotplayer/video/{company_id}/{video_id}', video_id, query={'device_type': 'desktop_web'})['video'] formats = self._extract_m3u8_formats( meta['video_m3u8'], video_id, 'mp4', m3u8_id='hls') subtitles = {} for cc in traverse_obj(meta, ('closeCaption', lambda _, v: url_or_none(v['srtPath']))): subtitles.setdefault(cc.get('srtShortLang') or 'en', []).append( {'ext': cc.get('srtExt'), 'url': cc['srtPath']}) return { 'id': video_id, 'display_id': display_id, 'formats': formats, **traverse_obj(meta, { 'title': ('title', {str}), 'description': ('description', {str}), 'series': ('seriestitle', {str}), 'season_number': ('season', {int}), 'episode': ('episode', {str}), 'duration': ('duration', {float_or_none}), 'timestamp': ('updated_at', {parse_iso8601}), 'thumbnail': ('thumb', {url_or_none}), }), 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/screencastomatic.py
yt_dlp/extractor/screencastomatic.py
from .common import InfoExtractor from ..utils import ( ExtractorError, get_element_by_class, int_or_none, remove_start, strip_or_none, unified_strdate, urlencode_postdata, ) class ScreencastOMaticIE(InfoExtractor): _VALID_URL = r'https?://screencast-o-matic\.com/(?:(?:watch|player)/|embed\?.*?\bsc=)(?P<id>[0-9a-zA-Z]+)' _TESTS = [{ 'url': 'http://screencast-o-matic.com/watch/c2lD3BeOPl', 'md5': '483583cb80d92588f15ccbedd90f0c18', 'info_dict': { 'id': 'c2lD3BeOPl', 'ext': 'mp4', 'title': 'Welcome to 3-4 Philosophy @ DECV!', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'as the title says! also: some general info re 1) VCE philosophy and 2) distance learning.', 'duration': 369, 'upload_date': '20141216', }, }, { 'url': 'http://screencast-o-matic.com/player/c2lD3BeOPl', 'only_matching': True, }, { 'url': 'http://screencast-o-matic.com/embed?ff=true&sc=cbV2r4Q5TL&fromPH=true&a=1', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://screencast-o-matic.com/player/' + video_id, video_id) if (self._html_extract_title(webpage) == 'Protected Content' or 'This video is private and requires a password' in webpage): password = self.get_param('videopassword') if not password: raise ExtractorError('Password protected video, use --video-password <password>', expected=True) form = self._search_regex( r'(?is)<form[^>]*>(?P<form>.+?)</form>', webpage, 'login form', group='form') form_data = self._hidden_inputs(form) form_data.update({ 'scPassword': password, }) webpage = self._download_webpage( 'https://screencast-o-matic.com/player/password', video_id, 'Logging in', data=urlencode_postdata(form_data)) if '<small class="text-danger">Invalid password</small>' in webpage: raise ExtractorError('Unable to login: Invalid password', expected=True) info = self._parse_html5_media_entries(url, webpage, video_id)[0] info.update({ 'id': video_id, 'title': get_element_by_class('overlayTitle', webpage), 'description': strip_or_none(get_element_by_class('overlayDescription', webpage)) or None, 'duration': int_or_none(self._search_regex( r'player\.duration\s*=\s*function\(\)\s*{\s*return\s+(\d+);\s*};', webpage, 'duration', default=None)), 'upload_date': unified_strdate(remove_start( get_element_by_class('overlayPublished', webpage), 'Published: ')), }) return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/behindkink.py
yt_dlp/extractor/behindkink.py
from .common import InfoExtractor from ..utils import url_basename class BehindKinkIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)' _TEST = { 'url': 'http://www.behindkink.com/2014/12/05/what-are-you-passionate-about-marley-blaze/', 'md5': '507b57d8fdcd75a41a9a7bdb7989c762', 'info_dict': { 'id': '37127', 'ext': 'mp4', 'title': 'What are you passionate about – Marley Blaze', 'description': 'md5:aee8e9611b4ff70186f752975d9b94b4', 'upload_date': '20141205', 'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/12/blaze-1.jpg', 'age_limit': 18, }, } def _real_extract(self, url): mobj = self._match_valid_url(url) display_id = mobj.group('id') webpage = self._download_webpage(url, display_id) video_url = self._search_regex( r'<source src="([^"]+)"', webpage, 'video URL') video_id = url_basename(video_url).split('_')[0] upload_date = mobj.group('year') + mobj.group('month') + mobj.group('day') return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'title': self._og_search_title(webpage), 'thumbnail': self._og_search_thumbnail(webpage), 'description': self._og_search_description(webpage), 'upload_date': upload_date, 'age_limit': 18, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/huajiao.py
yt_dlp/extractor/huajiao.py
from .common import InfoExtractor from ..utils import ( parse_duration, parse_iso8601, ) class HuajiaoIE(InfoExtractor): IE_DESC = '花椒直播' _VALID_URL = r'https?://(?:www\.)?huajiao\.com/l/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.huajiao.com/l/38941232', 'md5': 'd08bf9ac98787d24d1e4c0283f2d372d', 'info_dict': { 'id': '38941232', 'ext': 'mp4', 'title': '#新人求关注#', 'description': 're:.*', 'duration': 2424.0, 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1475866459, 'upload_date': '20161007', 'uploader': 'Penny_余姿昀', 'uploader_id': '75206005', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) feed_json = self._search_regex( r'var\s+feed\s*=\s*({.+})', webpage, 'feed json') feed = self._parse_json(feed_json, video_id) description = self._html_search_meta( 'description', webpage, 'description', fatal=False) def get(section, field): return feed.get(section, {}).get(field) return { 'id': video_id, 'title': feed['feed']['formated_title'], 'description': description, 'duration': parse_duration(get('feed', 'duration')), 'thumbnail': get('feed', 'image'), 'timestamp': parse_iso8601(feed.get('creatime'), ' '), 'uploader': get('author', 'nickname'), 'uploader_id': get('author', 'uid'), 'formats': self._extract_m3u8_formats( feed['feed']['m3u8'], video_id, 'mp4', 'm3u8_native'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/holodex.py
yt_dlp/extractor/holodex.py
from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import traverse_obj class HolodexIE(InfoExtractor): _VALID_URL = r'''(?x)https?://(?:www\.|staging\.)?holodex\.net/(?: api/v2/playlist/(?P<playlist>\d+)| watch/(?P<id>[\w-]{11})(?:\?(?:[^#]+&)?playlist=(?P<playlist2>\d+))? )''' _TESTS = [{ 'url': 'https://holodex.net/watch/9kQ2GtvDV3s', 'md5': 'be5ffce2f0feae8ba4c01553abc0f175', 'info_dict': { 'ext': 'mp4', 'id': '9kQ2GtvDV3s', 'title': '【おちゃめ機能】ホロライブが吹っ切れた【24人で歌ってみた】', 'channel_id': 'UCJFZiqLMntJufDCHc6bQixg', 'playable_in_embed': True, 'tags': 'count:43', 'age_limit': 0, 'live_status': 'not_live', 'description': 'md5:040e866c09dc4ab899b36479f4b7c7a2', 'channel_url': 'https://www.youtube.com/channel/UCJFZiqLMntJufDCHc6bQixg', 'upload_date': '20200406', 'uploader_url': 'http://www.youtube.com/channel/UCJFZiqLMntJufDCHc6bQixg', 'view_count': int, 'channel': 'hololive ホロライブ - VTuber Group', 'categories': ['Music'], 'uploader': 'hololive ホロライブ - VTuber Group', 'channel_follower_count': int, 'uploader_id': 'UCJFZiqLMntJufDCHc6bQixg', 'availability': 'public', 'thumbnail': 'https://i.ytimg.com/vi_webp/9kQ2GtvDV3s/maxresdefault.webp', 'duration': 263, 'like_count': int, }, }, { 'url': 'https://holodex.net/api/v2/playlist/239', 'info_dict': { 'id': '239', 'title': 'Songs/Videos that made fall into the rabbit hole (from my google activity history)', }, 'playlist_count': 14, }, { 'url': 'https://holodex.net/watch/_m2mQyaofjI?foo=bar&playlist=69', 'info_dict': { 'id': '69', 'title': '拿著金斧頭的藍髮大姊姊', }, 'playlist_count': 3, }, { 'url': 'https://holodex.net/watch/_m2mQyaofjI?playlist=69', 'info_dict': { 'id': '_m2mQyaofjI', 'ext': 'mp4', 'playable_in_embed': True, 'like_count': int, 'uploader': 'Ernst / エンスト', 'duration': 11, 'uploader_url': 'http://www.youtube.com/channel/UCqSX4PPZY0cyetqKVY_wRVA', 'categories': ['Entertainment'], 'title': '【星街すいせい】星街向你獻上晚安', 'upload_date': '20210705', 'description': 'md5:8b8ffb157bae77f2d109021a0b577d4a', 'channel': 'Ernst / エンスト', 'channel_id': 'UCqSX4PPZY0cyetqKVY_wRVA', 'channel_follower_count': int, 'view_count': int, 'tags': [], 'live_status': 'not_live', 'channel_url': 'https://www.youtube.com/channel/UCqSX4PPZY0cyetqKVY_wRVA', 'availability': 'public', 'thumbnail': 'https://i.ytimg.com/vi_webp/_m2mQyaofjI/maxresdefault.webp', 'age_limit': 0, 'uploader_id': 'UCqSX4PPZY0cyetqKVY_wRVA', 'comment_count': int, }, 'params': {'noplaylist': True}, }, { 'url': 'https://staging.holodex.net/api/v2/playlist/125', 'only_matching': True, }, { 'url': 'https://staging.holodex.net/watch/rJJTJA_T_b0?playlist=25', 'only_matching': True, }, { 'url': 'https://staging.holodex.net/watch/s1ifBeukThg', 'only_matching': True, }] def _real_extract(self, url): video_id, playlist_id, pl_id2 = self._match_valid_url(url).group('id', 'playlist', 'playlist2') playlist_id = playlist_id or pl_id2 if not self._yes_playlist(playlist_id, video_id): return self.url_result(f'https://www.youtube.com/watch?v={video_id}', YoutubeIE) data = self._download_json(f'https://holodex.net/api/v2/playlist/{playlist_id}', playlist_id) return self.playlist_from_matches( traverse_obj(data, ('videos', ..., 'id')), playlist_id, data.get('name'), ie=YoutubeIE)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/baidu.py
yt_dlp/extractor/baidu.py
from .common import InfoExtractor from ..utils import unescapeHTML class BaiduVideoIE(InfoExtractor): IE_DESC = '百度视频' _VALID_URL = r'https?://v\.baidu\.com/(?P<type>[a-z]+)/(?P<id>\d+)\.htm' _TESTS = [{ 'url': 'http://v.baidu.com/comic/1069.htm?frp=bdbrand&q=%E4%B8%AD%E5%8D%8E%E5%B0%8F%E5%BD%93%E5%AE%B6', 'info_dict': { 'id': '1069', 'title': '中华小当家 TV版国语', 'description': 'md5:51be07afe461cf99fa61231421b5397c', }, 'playlist_count': 52, }, { 'url': 'http://v.baidu.com/show/11595.htm?frp=bdbrand', 'info_dict': { 'id': '11595', 'title': 're:^奔跑吧兄弟', 'description': 'md5:1bf88bad6d850930f542d51547c089b8', }, 'playlist_mincount': 12, }] def _call_api(self, path, category, playlist_id, note): return self._download_json( f'http://app.video.baidu.com/{path}/?worktype=adnative{category}&id={playlist_id}', playlist_id, note) def _real_extract(self, url): category, playlist_id = self._match_valid_url(url).groups() if category == 'show': category = 'tvshow' if category == 'tv': category = 'tvplay' playlist_detail = self._call_api( 'xqinfo', category, playlist_id, 'Download playlist JSON metadata') playlist_title = playlist_detail['title'] playlist_description = unescapeHTML(playlist_detail.get('intro')) episodes_detail = self._call_api( 'xqsingle', category, playlist_id, 'Download episodes JSON metadata') entries = [self.url_result( episode['url'], video_title=episode['title'], ) for episode in episodes_detail['videos']] return self.playlist_result( entries, playlist_id, playlist_title, playlist_description)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rte.py
yt_dlp/extractor/rte.py
import re from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, float_or_none, parse_iso8601, str_or_none, try_get, unescapeHTML, url_or_none, ) class RteBaseIE(InfoExtractor): def _real_extract(self, url): item_id = self._match_id(url) info_dict = {} formats = [] ENDPOINTS = ( 'https://feeds.rasset.ie/rteavgen/player/playlist?type=iptv&format=json&showId=', 'http://www.rte.ie/rteavgen/getplaylist/?type=web&format=json&id=', ) for num, ep_url in enumerate(ENDPOINTS, start=1): try: data = self._download_json(ep_url + item_id, item_id) except ExtractorError as ee: if num < len(ENDPOINTS) or formats: continue if isinstance(ee.cause, HTTPError) and ee.cause.status == 404: error_info = self._parse_json(ee.cause.response.read().decode(), item_id, fatal=False) if error_info: raise ExtractorError( '{} said: {}'.format(self.IE_NAME, error_info['message']), expected=True) raise # NB the string values in the JSON are stored using XML escaping(!) show = try_get(data, lambda x: x['shows'][0], dict) if not show: continue if not info_dict: title = unescapeHTML(show['title']) description = unescapeHTML(show.get('description')) thumbnail = show.get('thumbnail') duration = float_or_none(show.get('duration'), 1000) timestamp = parse_iso8601(show.get('published')) info_dict = { 'id': item_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, } mg = try_get(show, lambda x: x['media:group'][0], dict) if not mg: continue if mg.get('url'): m = re.match(r'(?P<url>rtmpe?://[^/]+)/(?P<app>.+)/(?P<playpath>mp4:.*)', mg['url']) if m: m = m.groupdict() formats.append({ 'url': m['url'] + '/' + m['app'], 'app': m['app'], 'play_path': m['playpath'], 'player_url': url, 'ext': 'flv', 'format_id': 'rtmp', }) if mg.get('hls_server') and mg.get('hls_url'): formats.extend(self._extract_m3u8_formats( mg['hls_server'] + mg['hls_url'], item_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) if mg.get('hds_server') and mg.get('hds_url'): formats.extend(self._extract_f4m_formats( mg['hds_server'] + mg['hds_url'], item_id, f4m_id='hds', fatal=False)) mg_rte_server = str_or_none(mg.get('rte:server')) mg_url = str_or_none(mg.get('url')) if mg_rte_server and mg_url: hds_url = url_or_none(mg_rte_server + mg_url) if hds_url: formats.extend(self._extract_f4m_formats( hds_url, item_id, f4m_id='hds', fatal=False)) info_dict['formats'] = formats return info_dict class RteIE(RteBaseIE): IE_NAME = 'rte' IE_DESC = 'Raidió Teilifís Éireann TV' _VALID_URL = r'https?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/[^/]+/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.rte.ie/player/ie/show/iwitness-862/10478715/', 'md5': '4a76eb3396d98f697e6e8110563d2604', 'info_dict': { 'id': '10478715', 'ext': 'mp4', 'title': 'iWitness', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'The spirit of Ireland, one voice and one minute at a time.', 'duration': 60.046, 'upload_date': '20151012', 'timestamp': 1444694160, }, } class RteRadioIE(RteBaseIE): IE_NAME = 'rte:radio' IE_DESC = 'Raidió Teilifís Éireann radio' # Radioplayer URLs have two distinct specifier formats, # the old format #!rii=<channel_id>:<id>:<playable_item_id>:<date>: # the new format #!rii=b<channel_id>_<id>_<playable_item_id>_<date>_ # where the IDs are int/empty, the date is DD-MM-YYYY, and the specifier may be truncated. # An <id> uniquely defines an individual recording, and is the only part we require. _VALID_URL = r'https?://(?:www\.)?rte\.ie/radio/utils/radioplayer/rteradioweb\.html#!rii=(?:b?[0-9]*)(?:%3A|:|%5F|_)(?P<id>[0-9]+)' _TESTS = [{ # Old-style player URL; HLS and RTMPE formats 'url': 'http://www.rte.ie/radio/utils/radioplayer/rteradioweb.html#!rii=16:10507902:2414:27-12-2015:', 'md5': 'c79ccb2c195998440065456b69760411', 'info_dict': { 'id': '10507902', 'ext': 'mp4', 'title': 'Gloria', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:9ce124a7fb41559ec68f06387cabddf0', 'timestamp': 1451203200, 'upload_date': '20151227', 'duration': 7230.0, }, }, { # New-style player URL; RTMPE formats only 'url': 'http://rte.ie/radio/utils/radioplayer/rteradioweb.html#!rii=b16_3250678_8861_06-04-2012_', 'info_dict': { 'id': '3250678', 'ext': 'flv', 'title': 'The Lyric Concert with Paul Herriott', 'thumbnail': r're:^https?://.*\.jpg$', 'description': '', 'timestamp': 1333742400, 'upload_date': '20120406', 'duration': 7199.016, }, 'params': { # rtmp download 'skip_download': True, }, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lbry.py
yt_dlp/extractor/lbry.py
import functools import json import re import urllib.parse from .common import InfoExtractor from ..networking import HEADRequest from ..utils import ( ExtractorError, OnDemandPagedList, UnsupportedError, determine_ext, int_or_none, mimetype2ext, parse_qs, traverse_obj, try_get, url_or_none, urlhandle_detect_ext, urljoin, ) class LBRYBaseIE(InfoExtractor): _BASE_URL_REGEX = r'(?x)(?:https?://(?:www\.)?(?:lbry\.tv|odysee\.com)/|lbry://)' _CLAIM_ID_REGEX = r'[0-9a-f]{1,40}' _OPT_CLAIM_ID = f'[^$@:/?#&]+(?:[:#]{_CLAIM_ID_REGEX})?' _SUPPORTED_STREAM_TYPES = ['video', 'audio'] _UNSUPPORTED_STREAM_TYPES = ['binary'] _PAGE_SIZE = 50 def _call_api_proxy(self, method, display_id, params, resource): headers = {'Content-Type': 'application/json-rpc'} token = try_get(self._get_cookies('https://odysee.com'), lambda x: x['auth_token'].value) if token: headers['x-lbry-auth-token'] = token response = self._download_json( 'https://api.lbry.tv/api/v1/proxy', display_id, f'Downloading {resource} JSON metadata', headers=headers, data=json.dumps({ 'method': method, 'params': params, }).encode()) err = response.get('error') if err: raise ExtractorError( f'{self.IE_NAME} said: {err.get("code")} - {err.get("message")}', expected=True) return response['result'] def _resolve_url(self, url, display_id, resource): return self._call_api_proxy( 'resolve', display_id, {'urls': url}, resource)[url] def _permanent_url(self, url, claim_name, claim_id): return urljoin( url.replace('lbry://', 'https://lbry.tv/'), f'/{claim_name}:{claim_id}') def _parse_stream(self, stream, url): stream_type = traverse_obj(stream, ('value', 'stream_type', {str})) info = traverse_obj(stream, { 'title': ('value', 'title', {str}), 'thumbnail': ('value', 'thumbnail', 'url', {url_or_none}), 'description': ('value', 'description', {str}), 'license': ('value', 'license', {str}), 'timestamp': ('timestamp', {int_or_none}), 'release_timestamp': ('value', 'release_time', {int_or_none}), 'tags': ('value', 'tags', ..., filter), 'duration': ('value', stream_type, 'duration', {int_or_none}), 'channel': ('signing_channel', 'value', 'title', {str}), 'channel_id': ('signing_channel', 'claim_id', {str}), 'uploader_id': ('signing_channel', 'name', {str}), }) if info.get('uploader_id') and info.get('channel_id'): info['channel_url'] = self._permanent_url(url, info['uploader_id'], info['channel_id']) return info def _fetch_page(self, display_id, url, params, page): page += 1 page_params = { 'no_totals': True, 'page': page, 'page_size': self._PAGE_SIZE, **params, } result = self._call_api_proxy( 'claim_search', display_id, page_params, f'page {page}') for item in traverse_obj(result, ('items', lambda _, v: v['name'] and v['claim_id'])): yield { **self._parse_stream(item, url), '_type': 'url', 'id': item['claim_id'], 'url': self._permanent_url(url, item['name'], item['claim_id']), } def _playlist_entries(self, url, display_id, claim_param, metadata): qs = parse_qs(url) content = qs.get('content', [None])[0] params = { 'fee_amount': qs.get('fee_amount', ['>=0'])[0], 'order_by': { 'new': ['release_time'], 'top': ['effective_amount'], 'trending': ['trending_group', 'trending_mixed'], }[qs.get('order', ['new'])[0]], 'claim_type': 'stream', 'stream_types': [content] if content in ['audio', 'video'] else self._SUPPORTED_STREAM_TYPES, **claim_param, } duration = qs.get('duration', [None])[0] if duration: params['duration'] = { 'long': '>=1200', 'short': '<=240', }[duration] language = qs.get('language', ['all'])[0] if language != 'all': languages = [language] if language == 'en': languages.append('none') params['any_languages'] = languages entries = OnDemandPagedList( functools.partial(self._fetch_page, display_id, url, params), self._PAGE_SIZE) return self.playlist_result( entries, display_id, **traverse_obj(metadata, ('value', { 'title': 'title', 'description': 'description', }))) class LBRYIE(LBRYBaseIE): IE_NAME = 'lbry' IE_DESC = 'odysee.com' _VALID_URL = LBRYBaseIE._BASE_URL_REGEX + rf''' (?:\$/(?:download|embed)/)? (?P<id> [^$@:/?#]+/{LBRYBaseIE._CLAIM_ID_REGEX} |(?:@{LBRYBaseIE._OPT_CLAIM_ID}/)?{LBRYBaseIE._OPT_CLAIM_ID} )''' _TESTS = [{ # Video 'url': 'https://lbry.tv/@Mantega:1/First-day-LBRY:1', 'md5': '65bd7ec1f6744ada55da8e4c48a2edf9', 'info_dict': { 'id': '17f983b61f53091fb8ea58a9c56804e4ff8cff4d', 'ext': 'mp4', 'title': 'First day in LBRY? Start HERE!', 'description': 'md5:f6cb5c704b332d37f5119313c2c98f51', 'timestamp': 1595694354, 'upload_date': '20200725', 'release_timestamp': 1595340697, 'release_date': '20200721', 'width': 1280, 'height': 720, 'thumbnail': 'https://spee.ch/7/67f2d809c263288c.png', 'license': 'None', 'uploader_id': '@Mantega', 'duration': 346, 'channel': 'LBRY/Odysee rats united!!!', 'channel_id': '1c8ad6a2ab4e889a71146ae4deeb23bb92dab627', 'channel_url': 'https://lbry.tv/@Mantega:1c8ad6a2ab4e889a71146ae4deeb23bb92dab627', 'tags': [ 'first day in lbry', 'lbc', 'lbry', 'start', 'tutorial', ], }, }, { # Audio 'url': 'https://lbry.tv/@LBRYFoundation:0/Episode-1:e', 'md5': 'c94017d3eba9b49ce085a8fad6b98d00', 'info_dict': { 'id': 'e7d93d772bd87e2b62d5ab993c1c3ced86ebb396', 'ext': 'mp3', 'title': 'The LBRY Foundation Community Podcast Episode 1 - Introduction, Streaming on LBRY, Transcoding', 'description': 'md5:661ac4f1db09f31728931d7b88807a61', 'timestamp': 1591312601, 'upload_date': '20200604', 'release_timestamp': 1591312421, 'release_date': '20200604', 'tags': list, 'duration': 2570, 'channel': 'The LBRY Foundation', 'channel_id': '0ed629d2b9c601300cacf7eabe9da0be79010212', 'channel_url': 'https://lbry.tv/@LBRYFoundation:0ed629d2b9c601300cacf7eabe9da0be79010212', 'vcodec': 'none', 'thumbnail': 'https://spee.ch/d/0bc63b0e6bf1492d.png', 'license': 'None', 'uploader_id': '@LBRYFoundation', }, }, { 'url': 'https://odysee.com/@gardeningincanada:b/plants-i-will-never-grow-again.-the:e', 'md5': 'c35fac796f62a14274b4dc2addb5d0ba', 'info_dict': { 'id': 'e51671357333fe22ae88aad320bde2f6f96b1410', 'ext': 'mp4', 'title': 'PLANTS I WILL NEVER GROW AGAIN. THE BLACK LIST PLANTS FOR A CANADIAN GARDEN | Gardening in Canada 🍁', 'description': 'md5:9c539c6a03fb843956de61a4d5288d5e', 'timestamp': 1618254123, 'upload_date': '20210412', 'release_timestamp': 1618254002, 'release_date': '20210412', 'tags': list, 'duration': 554, 'channel': 'Gardening In Canada', 'channel_id': 'b8be0e93b423dad221abe29545fbe8ec36e806bc', 'channel_url': 'https://odysee.com/@gardeningincanada:b8be0e93b423dad221abe29545fbe8ec36e806bc', 'uploader_id': '@gardeningincanada', 'formats': 'mincount:3', 'thumbnail': 'https://thumbnails.lbry.com/AgHSc_HzrrE', 'license': 'Copyrighted (contact publisher)', }, }, { # HLS live stream (might expire) 'url': 'https://odysee.com/@RT:fd/livestream_RT:d', 'info_dict': { 'id': 'fdd11cb3ab75f95efb7b3bc2d726aa13ac915b66', 'ext': 'mp4', 'live_status': 'is_live', 'title': 'startswith:RT News | Livestream 24/7', 'description': 'md5:fe68d0056dfe79c1a6b8ce8c34d5f6fa', 'timestamp': int, 'upload_date': str, 'release_timestamp': int, 'release_date': str, 'tags': list, 'channel': 'RT', 'channel_id': 'fdd11cb3ab75f95efb7b3bc2d726aa13ac915b66', 'channel_url': 'https://odysee.com/@RT:fdd11cb3ab75f95efb7b3bc2d726aa13ac915b66', 'formats': 'mincount:1', 'thumbnail': 'startswith:https://thumb', 'license': 'None', 'uploader_id': '@RT', }, 'params': {'skip_download': True}, }, { # original quality format w/higher resolution than HLS formats 'url': 'https://odysee.com/@wickedtruths:2/Biotechnological-Invasion-of-Skin-(April-2023):4', 'md5': '305b0b3b369bde1b984961f005b67193', 'info_dict': { 'id': '41fbfe805eb73c8d3012c0c49faa0f563274f634', 'ext': 'mp4', 'title': 'Biotechnological Invasion of Skin (April 2023)', 'description': 'md5:fe28689db2cb7ba3436d819ac3ffc378', 'channel': 'Wicked Truths', 'channel_id': '23d2bbf856b0ceed5b1d7c5960bcc72da5a20cb0', 'channel_url': 'https://odysee.com/@wickedtruths:23d2bbf856b0ceed5b1d7c5960bcc72da5a20cb0', 'uploader_id': '@wickedtruths', 'timestamp': 1695114347, 'upload_date': '20230919', 'release_timestamp': 1685617473, 'release_date': '20230601', 'duration': 1063, 'thumbnail': 'https://thumbs.odycdn.com/4e6d39da4df0cfdad45f64e253a15959.webp', 'tags': ['smart skin surveillance', 'biotechnology invasion of skin', 'morgellons'], 'license': 'None', 'protocol': 'https', # test for direct mp4 download }, }, { 'url': 'https://odysee.com/@BrodieRobertson:5/apple-is-tracking-everything-you-do-on:e', 'only_matching': True, }, { 'url': 'https://odysee.com/@ScammerRevolts:b0/I-SYSKEY\'D-THE-SAME-SCAMMERS-3-TIMES!:b', 'only_matching': True, }, { 'url': 'https://lbry.tv/Episode-1:e7d93d772bd87e2b62d5ab993c1c3ced86ebb396', 'only_matching': True, }, { 'url': 'https://lbry.tv/$/embed/Episode-1/e7d93d772bd87e2b62d5ab993c1c3ced86ebb396', 'only_matching': True, }, { 'url': 'https://lbry.tv/Episode-1:e7', 'only_matching': True, }, { 'url': 'https://lbry.tv/@LBRYFoundation/Episode-1', 'only_matching': True, }, { 'url': 'https://lbry.tv/$/download/Episode-1/e7d93d772bd87e2b62d5ab993c1c3ced86ebb396', 'only_matching': True, }, { 'url': 'https://lbry.tv/@lacajadepandora:a/TRUMP-EST%C3%81-BIEN-PUESTO-con-Pilar-Baselga,-Carlos-Senra,-Luis-Palacios-(720p_30fps_H264-192kbit_AAC):1', 'only_matching': True, }, { 'url': 'lbry://@lbry#3f/odysee#7', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) if display_id.startswith('@'): display_id = display_id.replace(':', '#') else: display_id = display_id.replace('/', ':') display_id = urllib.parse.unquote(display_id) uri = 'lbry://' + display_id result = self._resolve_url(uri, display_id, 'stream') headers = {'Referer': 'https://odysee.com/'} formats = [] stream_type = traverse_obj(result, ('value', 'stream_type', {str})) if stream_type in self._SUPPORTED_STREAM_TYPES: claim_id, is_live = result['claim_id'], False streaming_url = self._call_api_proxy( 'get', claim_id, { 'uri': uri, **traverse_obj(parse_qs(url), { 'signature': ('signature', 0), 'signature_ts': ('signature_ts', 0), }), }, 'streaming url')['streaming_url'] # GET request to v3 API returns original video/audio file if available direct_url = re.sub(r'/api/v\d+/', '/api/v3/', streaming_url) urlh = self._request_webpage( direct_url, display_id, 'Checking for original quality', headers=headers, fatal=False) if urlh and urlhandle_detect_ext(urlh) != 'm3u8': formats.append({ 'url': direct_url, 'format_id': 'original', 'quality': 1, **traverse_obj(result, ('value', { 'ext': ('source', (('name', {determine_ext}), ('media_type', {mimetype2ext}))), 'filesize': ('source', 'size', {int_or_none}), 'width': ('video', 'width', {int_or_none}), 'height': ('video', 'height', {int_or_none}), }), get_all=False), 'vcodec': 'none' if stream_type == 'audio' else None, }) final_url = None # HEAD request returns redirect response to m3u8 URL if available urlh = self._request_webpage( HEADRequest(streaming_url), display_id, headers=headers, note='Downloading streaming redirect url info', fatal=False) if urlh: final_url = urlh.url elif result.get('value_type') == 'stream' and stream_type not in self._UNSUPPORTED_STREAM_TYPES: claim_id, is_live = result['signing_channel']['claim_id'], True live_data = self._download_json( 'https://api.odysee.live/livestream/is_live', claim_id, query={'channel_claim_id': claim_id}, note='Downloading livestream JSON metadata')['data'] final_url = live_data.get('VideoURL') # Upcoming videos may still give VideoURL if not live_data.get('Live'): final_url = None self.raise_no_formats('This stream is not live', True, claim_id) else: raise UnsupportedError(url) if determine_ext(final_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( final_url, display_id, 'mp4', m3u8_id='hls', live=is_live, headers=headers)) return { **self._parse_stream(result, url), 'id': claim_id, 'formats': formats, 'is_live': is_live, 'http_headers': headers, } class LBRYChannelIE(LBRYBaseIE): IE_NAME = 'lbry:channel' IE_DESC = 'odysee.com channels' _VALID_URL = LBRYBaseIE._BASE_URL_REGEX + rf'(?P<id>@{LBRYBaseIE._OPT_CLAIM_ID})/?(?:[?&]|$)' _TESTS = [{ 'url': 'https://lbry.tv/@LBRYFoundation:0', 'info_dict': { 'id': '0ed629d2b9c601300cacf7eabe9da0be79010212', 'title': 'The LBRY Foundation', 'description': 'Channel for the LBRY Foundation. Follow for updates and news.', }, 'playlist_mincount': 29, }, { 'url': 'https://lbry.tv/@LBRYFoundation', 'only_matching': True, }, { 'url': 'lbry://@lbry#3f', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url).replace(':', '#') result = self._resolve_url(f'lbry://{display_id}', display_id, 'channel') claim_id = result['claim_id'] return self._playlist_entries(url, claim_id, {'channel_ids': [claim_id]}, result) class LBRYPlaylistIE(LBRYBaseIE): IE_NAME = 'lbry:playlist' IE_DESC = 'odysee.com playlists' _VALID_URL = LBRYBaseIE._BASE_URL_REGEX + r'\$/(?:play)?list/(?P<id>[0-9a-f-]+)' _TESTS = [{ 'url': 'https://odysee.com/$/playlist/ffef782f27486f0ac138bde8777f72ebdd0548c2', 'info_dict': { 'id': 'ffef782f27486f0ac138bde8777f72ebdd0548c2', 'title': 'Théâtre Classique', 'description': 'Théâtre Classique', }, 'playlist_mincount': 4, }, { 'url': 'https://odysee.com/$/list/9c6658b3dd21e4f2a0602d523a13150e2b48b770', 'info_dict': { 'id': '9c6658b3dd21e4f2a0602d523a13150e2b48b770', 'title': 'Social Media Exposed', 'description': 'md5:98af97317aacd5b85d595775ea37d80e', }, 'playlist_mincount': 34, }, { 'url': 'https://odysee.com/$/playlist/938fb11d-215f-4d1c-ad64-723954df2184', 'info_dict': { 'id': '938fb11d-215f-4d1c-ad64-723954df2184', }, 'playlist_mincount': 1000, }] def _real_extract(self, url): display_id = self._match_id(url) result = traverse_obj(self._call_api_proxy('claim_search', display_id, { 'claim_ids': [display_id], 'no_totals': True, 'page': 1, 'page_size': self._PAGE_SIZE, }, 'playlist'), ('items', 0)) claim_param = {'claim_ids': traverse_obj(result, ('value', 'claims', ..., {str}))} return self._playlist_entries(url, display_id, claim_param, result)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/unistra.py
yt_dlp/extractor/unistra.py
import re from .common import InfoExtractor from ..utils import qualities class UnistraIE(InfoExtractor): _VALID_URL = r'https?://utv\.unistra\.fr/(?:index|video)\.php\?id_video\=(?P<id>\d+)' _TESTS = [ { 'url': 'http://utv.unistra.fr/video.php?id_video=154', 'md5': '736f605cfdc96724d55bb543ab3ced24', 'info_dict': { 'id': '154', 'ext': 'mp4', 'title': 'M!ss Yella', 'description': 'md5:104892c71bd48e55d70b902736b81bbf', }, }, { 'url': 'http://utv.unistra.fr/index.php?id_video=437', 'md5': '1ddddd6cccaae76f622ce29b8779636d', 'info_dict': { 'id': '437', 'ext': 'mp4', 'title': 'Prix Louise Weiss 2014', 'description': 'md5:cc3a8735f079f4fb6b0b570fc10c135a', }, }, ] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) files = set(re.findall(r'file\s*:\s*"(/[^"]+)"', webpage)) quality = qualities(['SD', 'HD']) formats = [] for file_path in files: format_id = 'HD' if file_path.endswith('-HD.mp4') else 'SD' formats.append({ 'url': f'http://vod-flash.u-strasbg.fr:8080{file_path}', 'format_id': format_id, 'quality': quality(format_id), }) title = self._html_search_regex( r'<title>UTV - (.*?)</', webpage, 'title') description = self._html_search_regex( r'<meta name="Description" content="(.*?)"', webpage, 'description', flags=re.DOTALL) thumbnail = self._search_regex( r'image: "(.*?)"', webpage, 'thumbnail') return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sapo.py
yt_dlp/extractor/sapo.py
import re from .common import InfoExtractor from ..utils import ( parse_duration, unified_strdate, ) class SapoIE(InfoExtractor): IE_DESC = 'SAPO Vídeos' _VALID_URL = r'https?://(?:(?:v2|www)\.)?videos\.sapo\.(?:pt|cv|ao|mz|tl)/(?P<id>[\da-zA-Z]{20})' _TESTS = [ { 'url': 'http://videos.sapo.pt/UBz95kOtiWYUMTA5Ghfi', 'md5': '79ee523f6ecb9233ac25075dee0eda83', 'note': 'SD video', 'info_dict': { 'id': 'UBz95kOtiWYUMTA5Ghfi', 'ext': 'mp4', 'title': 'Benfica - Marcas na Hitória', 'description': 'md5:c9082000a128c3fd57bf0299e1367f22', 'duration': 264, 'uploader': 'tiago_1988', 'upload_date': '20080229', 'categories': ['benfica', 'cabral', 'desporto', 'futebol', 'geovanni', 'hooijdonk', 'joao', 'karel', 'lisboa', 'miccoli'], }, }, { 'url': 'http://videos.sapo.pt/IyusNAZ791ZdoCY5H5IF', 'md5': '90a2f283cfb49193fe06e861613a72aa', 'note': 'HD video', 'info_dict': { 'id': 'IyusNAZ791ZdoCY5H5IF', 'ext': 'mp4', 'title': 'Codebits VII - Report', 'description': 'md5:6448d6fd81ce86feac05321f354dbdc8', 'duration': 144, 'uploader': 'codebits', 'upload_date': '20140427', 'categories': ['codebits', 'codebits2014'], }, }, { 'url': 'http://v2.videos.sapo.pt/yLqjzPtbTimsn2wWBKHz', 'md5': 'e5aa7cc0bdc6db9b33df1a48e49a15ac', 'note': 'v2 video', 'info_dict': { 'id': 'yLqjzPtbTimsn2wWBKHz', 'ext': 'mp4', 'title': 'Hipnose Condicionativa 4', 'description': 'md5:ef0481abf8fb4ae6f525088a6dadbc40', 'duration': 692, 'uploader': 'sapozen', 'upload_date': '20090609', 'categories': ['condicionativa', 'heloisa', 'hipnose', 'miranda', 'sapo', 'zen'], }, }, ] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') item = self._download_xml( f'http://rd3.videos.sapo.pt/{video_id}/rss2', video_id).find('./channel/item') title = item.find('./title').text description = item.find('./{http://videos.sapo.pt/mrss/}synopse').text thumbnail = item.find('./{http://search.yahoo.com/mrss/}content').get('url') duration = parse_duration(item.find('./{http://videos.sapo.pt/mrss/}time').text) uploader = item.find('./{http://videos.sapo.pt/mrss/}author').text upload_date = unified_strdate(item.find('./pubDate').text) view_count = int(item.find('./{http://videos.sapo.pt/mrss/}views').text) comment_count = int(item.find('./{http://videos.sapo.pt/mrss/}comment_count').text) tags = item.find('./{http://videos.sapo.pt/mrss/}tags').text categories = tags.split() if tags else [] age_limit = 18 if item.find('./{http://videos.sapo.pt/mrss/}m18').text == 'true' else 0 video_url = item.find('./{http://videos.sapo.pt/mrss/}videoFile').text video_size = item.find('./{http://videos.sapo.pt/mrss/}videoSize').text.split('x') formats = [{ 'url': video_url, 'ext': 'mp4', 'format_id': 'sd', 'width': int(video_size[0]), 'height': int(video_size[1]), }] if item.find('./{http://videos.sapo.pt/mrss/}HD').text == 'true': formats.append({ 'url': re.sub(r'/mov/1$', '/mov/39', video_url), 'ext': 'mp4', 'format_id': 'hd', 'width': 1280, 'height': 720, }) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'uploader': uploader, 'upload_date': upload_date, 'view_count': view_count, 'comment_count': comment_count, 'categories': categories, 'age_limit': age_limit, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/megaphone.py
yt_dlp/extractor/megaphone.py
from .common import InfoExtractor from ..utils import js_to_json class MegaphoneIE(InfoExtractor): IE_NAME = 'megaphone.fm' IE_DESC = 'megaphone.fm embedded players' _VALID_URL = r'https?://player\.megaphone\.fm/(?P<id>[A-Z0-9]+)' _EMBED_REGEX = [rf'<iframe[^>]*?\ssrc=["\'](?P<url>{_VALID_URL})'] _TEST = { 'url': 'https://player.megaphone.fm/GLT9749789991', 'md5': '4816a0de523eb3e972dc0dda2c191f96', 'info_dict': { 'id': 'GLT9749789991', 'ext': 'mp3', 'title': '#97 What Kind Of Idiot Gets Phished?', 'thumbnail': r're:^https://.*\.png.*$', 'duration': 1998.36, 'creators': ['Reply All'], }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_property('audio:title', webpage) author = self._og_search_property('audio:artist', webpage) thumbnail = self._og_search_thumbnail(webpage) episode_json = self._search_regex(r'(?s)var\s+episode\s*=\s*(\{.+?\});', webpage, 'episode JSON') episode_data = self._parse_json(episode_json, video_id, js_to_json) video_url = self._proto_relative_url(episode_data['mediaUrl'], 'https:') formats = [{ 'url': video_url, }] return { 'id': video_id, 'thumbnail': thumbnail, 'title': title, 'creators': [author] if author else None, 'duration': episode_data['duration'], 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/brainpop.py
yt_dlp/extractor/brainpop.py
import json import re from .common import InfoExtractor from ..utils import ( classproperty, int_or_none, traverse_obj, urljoin, ) class BrainPOPBaseIE(InfoExtractor): _NETRC_MACHINE = 'brainpop' _ORIGIN = '' # So that _VALID_URL doesn't crash _LOGIN_ERRORS = { 1502: 'The username and password you entered did not match.', # LOGIN_FAILED 1503: 'Payment method is expired.', # LOGIN_FAILED_ACCOUNT_NOT_ACTIVE 1506: 'Your BrainPOP plan has expired.', # LOGIN_FAILED_ACCOUNT_EXPIRED 1507: 'Terms not accepted.', # LOGIN_FAILED_TERMS_NOT_ACCEPTED 1508: 'Account not activated.', # LOGIN_FAILED_SUBSCRIPTION_NOT_ACTIVE 1512: 'The maximum number of devices permitted are logged in with your account right now.', # LOGIN_FAILED_LOGIN_LIMIT_REACHED 1513: 'You are trying to access your account from outside of its allowed IP range.', # LOGIN_FAILED_INVALID_IP 1514: 'Individual accounts are not included in your plan. Try again with your shared username and password.', # LOGIN_FAILED_MBP_DISABLED 1515: 'Account not activated.', # LOGIN_FAILED_TEACHER_NOT_ACTIVE 1523: 'That username and password won\'t work on this BrainPOP site.', # LOGIN_FAILED_NO_ACCESS 1524: 'You\'ll need to join a class before you can login.', # LOGIN_FAILED_STUDENT_NO_PERIOD 1526: 'Your account is locked. Reset your password, or ask a teacher or administrator for help.', # LOGIN_FAILED_ACCOUNT_LOCKED } @classproperty def _VALID_URL(cls): root = re.escape(cls._ORIGIN).replace(r'https:', r'https?:').replace(r'www\.', r'(?:www\.)?') return rf'{root}/(?P<slug>[^/]+/[^/]+/(?P<id>[^/?#&]+))' def _assemble_formats(self, slug, format_id, display_id, token='', extra_fields={}): formats = [] formats = self._extract_m3u8_formats( f'{urljoin(self._HLS_URL, slug)}.m3u8?{token}', display_id, 'mp4', m3u8_id=f'{format_id}-hls', fatal=False) formats.append({ 'format_id': format_id, 'url': f'{urljoin(self._VIDEO_URL, slug)}?{token}', }) for f in formats: f.update(extra_fields) return formats def _extract_adaptive_formats(self, data, token, display_id, key_format='%s', extra_fields={}): formats = [] additional_key_formats = { '%s': {}, 'ad_%s': { 'format_note': 'Audio description', 'source_preference': -2, }, } for additional_key_format, additional_key_fields in additional_key_formats.items(): for key_quality, key_index in enumerate(('high', 'low')): full_key_index = additional_key_format % (key_format % key_index) if data.get(full_key_index): formats.extend(self._assemble_formats(data[full_key_index], full_key_index, display_id, token, { 'quality': -1 - key_quality, **additional_key_fields, **extra_fields, })) return formats def _perform_login(self, username, password): login_res = self._download_json( 'https://api.brainpop.com/api/login', None, data=json.dumps({'username': username, 'password': password}).encode(), headers={ 'Content-Type': 'application/json', 'Referer': self._ORIGIN, }, note='Logging in', errnote='Unable to log in', expected_status=400) status_code = int_or_none(login_res['status_code']) if status_code != 1505: self.report_warning( f'Unable to login: {self._LOGIN_ERRORS.get(status_code) or login_res.get("message")}' or f'Got status code {status_code}') class BrainPOPIE(BrainPOPBaseIE): _ORIGIN = 'https://www.brainpop.com' _VIDEO_URL = 'https://svideos.brainpop.com' _HLS_URL = 'https://hls.brainpop.com' _CDN_URL = 'https://cdn.brainpop.com' _TESTS = [{ 'url': 'https://www.brainpop.com/health/conflictresolution/martinlutherkingjr/movie?ref=null', 'md5': '3ead374233ae74c7f1b0029a01c972f0', 'info_dict': { 'id': '1f3259fa457292b4', 'ext': 'mp4', 'title': 'Martin Luther King, Jr.', 'display_id': 'martinlutherkingjr', 'description': 'md5:f403dbb2bf3ccc7cf4c59d9e43e3c349', }, }, { 'url': 'https://www.brainpop.com/science/space/bigbang/', 'md5': '9a1ff0e77444dd9e437354eb669c87ec', 'info_dict': { 'id': 'acae52cd48c99acf', 'ext': 'mp4', 'title': 'Big Bang', 'display_id': 'bigbang', 'description': 'md5:3e53b766b0f116f631b13f4cae185d38', }, 'skip': 'Requires login', }] def _real_extract(self, url): slug, display_id = self._match_valid_url(url).group('slug', 'id') movie_data = self._download_json( f'https://api.brainpop.com/api/content/published/bp/en/{slug}/movie?full=1', display_id, 'Downloading movie data JSON', 'Unable to download movie data')['data'] topic_data = traverse_obj(self._download_json( f'https://api.brainpop.com/api/content/published/bp/en/{slug}?full=1', display_id, 'Downloading topic data JSON', 'Unable to download topic data', fatal=False), ('data', 'topic'), expected_type=dict) or movie_data['topic'] if not traverse_obj(movie_data, ('access', 'allow')): reason = traverse_obj(movie_data, ('access', 'reason')) if 'logged' in reason: self.raise_login_required(reason, metadata_available=True) else: self.raise_no_formats(reason, video_id=display_id) movie_feature = movie_data['feature'] movie_feature_data = movie_feature['data'] formats, subtitles = [], {} formats.extend(self._extract_adaptive_formats(movie_feature_data, movie_feature_data.get('token', ''), display_id, '%s_v2', { 'language': movie_feature.get('language') or 'en', 'language_preference': 10, })) for lang, localized_feature in traverse_obj(movie_feature, 'localization', default={}, expected_type=dict).items(): formats.extend(self._extract_adaptive_formats(localized_feature, localized_feature.get('token', ''), display_id, '%s_v2', { 'language': lang, 'language_preference': -10, })) # TODO: Do localization fields also have subtitles? for name, url in movie_feature_data.items(): lang = self._search_regex( r'^subtitles_(?P<lang>\w+)$', name, 'subtitle metadata', default=None) if lang and url: subtitles.setdefault(lang, []).append({ 'url': urljoin(self._CDN_URL, url), }) return { 'id': topic_data['topic_id'], 'display_id': display_id, 'title': topic_data.get('name'), 'description': topic_data.get('synopsis'), 'formats': formats, 'subtitles': subtitles, } class BrainPOPLegacyBaseIE(BrainPOPBaseIE): def _parse_js_topic_data(self, topic_data, display_id, token): movie_data = topic_data['movies'] # TODO: Are there non-burned subtitles? formats = self._extract_adaptive_formats(movie_data, token, display_id) return { 'id': topic_data['EntryID'], 'display_id': display_id, 'title': topic_data.get('name'), 'alt_title': topic_data.get('title'), 'description': topic_data.get('synopsis'), 'formats': formats, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) topic_data = self._search_json( r'var\s+content\s*=\s*', webpage, 'content data', display_id, end_pattern=';')['category']['unit']['topic'] token = self._search_regex(r'ec_token\s*:\s*[\'"]([^\'"]+)', webpage, 'video token') return self._parse_js_topic_data(topic_data, display_id, token) class BrainPOPJrIE(BrainPOPLegacyBaseIE): _ORIGIN = 'https://jr.brainpop.com' _VIDEO_URL = 'https://svideos-jr.brainpop.com' _HLS_URL = 'https://hls-jr.brainpop.com' _CDN_URL = 'https://cdn-jr.brainpop.com' _TESTS = [{ 'url': 'https://jr.brainpop.com/health/feelingsandsel/emotions/', 'md5': '04e0561bb21770f305a0ce6cf0d869ab', 'info_dict': { 'id': '347', 'ext': 'mp4', 'title': 'Emotions', 'display_id': 'emotions', }, }, { 'url': 'https://jr.brainpop.com/science/habitats/arctichabitats/', 'md5': 'b0ed063bbd1910df00220ee29340f5d6', 'info_dict': { 'id': '29', 'ext': 'mp4', 'title': 'Arctic Habitats', 'display_id': 'arctichabitats', }, 'skip': 'Requires login', }] class BrainPOPELLIE(BrainPOPLegacyBaseIE): _ORIGIN = 'https://ell.brainpop.com' _VIDEO_URL = 'https://svideos-esl.brainpop.com' _HLS_URL = 'https://hls-esl.brainpop.com' _CDN_URL = 'https://cdn-esl.brainpop.com' _TESTS = [{ 'url': 'https://ell.brainpop.com/level1/unit1/lesson1/', 'md5': 'a2012700cfb774acb7ad2e8834eed0d0', 'info_dict': { 'id': '1', 'ext': 'mp4', 'title': 'Lesson 1', 'display_id': 'lesson1', 'alt_title': 'Personal Pronouns', }, }, { 'url': 'https://ell.brainpop.com/level3/unit6/lesson5/', 'md5': 'be19c8292c87b24aacfb5fda2f3f8363', 'info_dict': { 'id': '101', 'ext': 'mp4', 'title': 'Lesson 5', 'display_id': 'lesson5', 'alt_title': 'Review: Unit 6', }, 'skip': 'Requires login', }] class BrainPOPEspIE(BrainPOPLegacyBaseIE): IE_DESC = 'BrainPOP Español' _ORIGIN = 'https://esp.brainpop.com' _VIDEO_URL = 'https://svideos.brainpop.com' _HLS_URL = 'https://hls.brainpop.com' _CDN_URL = 'https://cdn.brainpop.com/mx' _TESTS = [{ 'url': 'https://esp.brainpop.com/ciencia/la_diversidad_de_la_vida/ecosistemas/', 'md5': 'cb3f062db2b3c5240ddfcfde7108f8c9', 'info_dict': { 'id': '3893', 'ext': 'mp4', 'title': 'Ecosistemas', 'display_id': 'ecosistemas', 'description': 'md5:80fc55b07e241f8c8f2aa8d74deaf3c3', }, }, { 'url': 'https://esp.brainpop.com/espanol/la_escritura/emily_dickinson/', 'md5': '98c1b9559e0e33777209c425cda7dac4', 'info_dict': { 'id': '7146', 'ext': 'mp4', 'title': 'Emily Dickinson', 'display_id': 'emily_dickinson', 'description': 'md5:2795ad87b1d239c9711c1e92ab5a978b', }, 'skip': 'Requires login', }] class BrainPOPFrIE(BrainPOPLegacyBaseIE): IE_DESC = 'BrainPOP Français' _ORIGIN = 'https://fr.brainpop.com' _VIDEO_URL = 'https://svideos.brainpop.com' _HLS_URL = 'https://hls.brainpop.com' _CDN_URL = 'https://cdn.brainpop.com/fr' _TESTS = [{ 'url': 'https://fr.brainpop.com/sciencesdelaterre/energie/sourcesdenergie/', 'md5': '97e7f48af8af93f8a2be11709f239371', 'info_dict': { 'id': '1651', 'ext': 'mp4', 'title': 'Sources d\'énergie', 'display_id': 'sourcesdenergie', 'description': 'md5:7eece350f019a21ef9f64d4088b2d857', }, }, { 'url': 'https://fr.brainpop.com/francais/ecrire/plagiat/', 'md5': '0cf2b4f89804d0dd4a360a51310d445a', 'info_dict': { 'id': '5803', 'ext': 'mp4', 'title': 'Plagiat', 'display_id': 'plagiat', 'description': 'md5:4496d87127ace28e8b1eda116e77cd2b', }, 'skip': 'Requires login', }] class BrainPOPIlIE(BrainPOPLegacyBaseIE): IE_DESC = 'BrainPOP Hebrew' _ORIGIN = 'https://il.brainpop.com' _VIDEO_URL = 'https://svideos.brainpop.com' _HLS_URL = 'https://hls.brainpop.com' _CDN_URL = 'https://cdn.brainpop.com/he' _TESTS = [{ 'url': 'https://il.brainpop.com/category_9/subcategory_150/subjects_3782/', 'md5': '9e4ea9dc60ecd385a6e5ca12ccf31641', 'info_dict': { 'id': '3782', 'ext': 'mp4', 'title': 'md5:e993632fcda0545d9205602ec314ad67', 'display_id': 'subjects_3782', 'description': 'md5:4cc084a8012beb01f037724423a4d4ed', }, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false