repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/massengeschmacktv.py
youtube_dl/extractor/massengeschmacktv.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, int_or_none, js_to_json, mimetype2ext, parse_filesize, ) class MassengeschmackTVIE(InfoExtractor): IE_NAME = 'massengeschmack.tv' _VALID_URL = r'https?://(?:www\.)?massengeschmack\.tv/play/(?P<id>[^?&#]+)' _TEST = { 'url': 'https://massengeschmack.tv/play/fktv202', 'md5': 'a9e054db9c2b5a08f0a0527cc201e8d3', 'info_dict': { 'id': 'fktv202', 'ext': 'mp4', 'title': 'Fernsehkritik-TV - Folge 202', }, } def _real_extract(self, url): episode = self._match_id(url) webpage = self._download_webpage(url, episode) title = clean_html(self._html_search_regex( '<h3>([^<]+)</h3>', webpage, 'title')) thumbnail = self._search_regex(r'POSTER\s*=\s*"([^"]+)', webpage, 'thumbnail', fatal=False) sources = self._parse_json(self._search_regex(r'(?s)MEDIA\s*=\s*(\[.+?\]);', webpage, 'media'), episode, js_to_json) formats = [] for source in sources: furl = source.get('src') if not furl: continue furl = self._proto_relative_url(furl) ext = determine_ext(furl) or mimetype2ext(source.get('type')) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( furl, episode, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'url': furl, 'format_id': determine_ext(furl), }) for (durl, format_id, width, height, filesize) in re.findall(r'''(?x) <a[^>]+?href="(?P<url>(?:https:)?//[^"]+)".*? <strong>(?P<format_id>.+?)</strong>.*? <small>(?:(?P<width>\d+)x(?P<height>\d+))?\s+?\((?P<filesize>[\d,]+\s*[GM]iB)\)</small> ''', webpage): formats.append({ 'url': durl, 'format_id': format_id, 'width': int_or_none(width), 'height': int_or_none(height), 'filesize': parse_filesize(filesize), 'vcodec': 'none' if format_id.startswith('Audio') else None, }) self._sort_formats(formats, ('width', 'height', 'filesize', 'tbr')) return { 'id': episode, 'title': title, 'formats': formats, 'thumbnail': thumbnail, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tv5mondeplus.py
youtube_dl/extractor/tv5mondeplus.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, extract_attributes, int_or_none, parse_duration, ) class TV5MondePlusIE(InfoExtractor): IE_DESC = 'TV5MONDE+' _VALID_URL = r'https?://(?:www\.)?(?:tv5mondeplus|revoir\.tv5monde)\.com/toutes-les-videos/[^/]+/(?P<id>[^/?#]+)' _TESTS = [{ # movie 'url': 'https://revoir.tv5monde.com/toutes-les-videos/cinema/rendez-vous-a-atlit', 'md5': '8cbde5ea7b296cf635073e27895e227f', 'info_dict': { 'id': '822a4756-0712-7329-1859-a13ac7fd1407', 'display_id': 'rendez-vous-a-atlit', 'ext': 'mp4', 'title': 'Rendez-vous à Atlit', 'description': 'md5:2893a4c5e1dbac3eedff2d87956e4efb', 'upload_date': '20200130', }, }, { # series episode 'url': 'https://revoir.tv5monde.com/toutes-les-videos/series-fictions/c-est-la-vie-ennemie-juree', 'info_dict': { 'id': '0df7007c-4900-3936-c601-87a13a93a068', 'display_id': 'c-est-la-vie-ennemie-juree', 'ext': 'mp4', 'title': "C'est la vie - Ennemie jurée", 'description': 'md5:dfb5c63087b6f35fe0cc0af4fe44287e', 'upload_date': '20200130', 'series': "C'est la vie", 'episode': 'Ennemie jurée', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://revoir.tv5monde.com/toutes-les-videos/series-fictions/neuf-jours-en-hiver-neuf-jours-en-hiver', 'only_matching': True, }, { 'url': 'https://revoir.tv5monde.com/toutes-les-videos/info-societe/le-journal-de-la-rts-edition-du-30-01-20-19h30', 'only_matching': True, }] _GEO_BYPASS = False def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) if ">Ce programme n'est malheureusement pas disponible pour votre zone géographique.<" in webpage: self.raise_geo_restricted(countries=['FR']) title = episode = self._html_search_regex(r'<h1>([^<]+)', webpage, 'title') vpl_data = extract_attributes(self._search_regex( r'(<[^>]+class="video_player_loader"[^>]+>)', webpage, 'video player loader')) video_files = self._parse_json( vpl_data['data-broadcast'], display_id).get('files', []) formats = [] for video_file in video_files: v_url = video_file.get('url') if not v_url: continue video_format = video_file.get('format') or determine_ext(v_url) if video_format == 'm3u8': formats.extend(self._extract_m3u8_formats( v_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'url': v_url, 'format_id': video_format, }) self._sort_formats(formats) description = self._html_search_regex( r'(?s)<div[^>]+class=["\']episode-texte[^>]+>(.+?)</div>', webpage, 'description', fatal=False) series = self._html_search_regex( r'<p[^>]+class=["\']episode-emission[^>]+>([^<]+)', webpage, 'series', default=None) if series and series != title: title = '%s - %s' % (series, title) upload_date = self._search_regex( r'(?:date_publication|publish_date)["\']\s*:\s*["\'](\d{4}_\d{2}_\d{2})', webpage, 'upload date', default=None) if upload_date: upload_date = upload_date.replace('_', '') video_id = self._search_regex( (r'data-guid=["\']([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})', r'id_contenu["\']\s:\s*(\d+)'), webpage, 'video id', default=display_id) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': vpl_data.get('data-image'), 'duration': int_or_none(vpl_data.get('data-duration')) or parse_duration(self._html_search_meta('duration', webpage)), 'upload_date': upload_date, 'formats': formats, 'series': series, 'episode': episode, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/disney.py
youtube_dl/extractor/disney.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, unified_strdate, compat_str, determine_ext, ExtractorError, update_url_query, ) class DisneyIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?P<domain>(?:[^/]+\.)?(?:disney\.[a-z]{2,3}(?:\.[a-z]{2})?|disney(?:(?:me|latino)\.com|turkiye\.com\.tr|channel\.de)|(?:starwars|marvelkids)\.com))/(?:(?:embed/|(?:[^/]+/)+[\w-]+-)(?P<id>[a-z0-9]{24})|(?:[^/]+/)?(?P<display_id>[^/?#]+))''' _TESTS = [{ # Disney.EmbedVideo 'url': 'http://video.disney.com/watch/moana-trailer-545ed1857afee5a0ec239977', 'info_dict': { 'id': '545ed1857afee5a0ec239977', 'ext': 'mp4', 'title': 'Moana - Trailer', 'description': 'A fun adventure for the entire Family! Bring home Moana on Digital HD Feb 21 & Blu-ray March 7', 'upload_date': '20170112', }, 'params': { # m3u8 download 'skip_download': True, } }, { # Grill.burger 'url': 'http://www.starwars.com/video/rogue-one-a-star-wars-story-intro-featurette', 'info_dict': { 'id': '5454e9f4e9804a552e3524c8', 'ext': 'mp4', 'title': '"Intro" Featurette: Rogue One: A Star Wars Story', 'upload_date': '20170104', 'description': 'Go behind-the-scenes of Rogue One: A Star Wars Story in this featurette with Director Gareth Edwards and the cast of the film.', }, 'params': { # m3u8 download 'skip_download': True, } }, { 'url': 'http://videos.disneylatino.com/ver/spider-man-de-regreso-a-casa-primer-adelanto-543a33a1850bdcfcca13bae2', 'only_matching': True, }, { 'url': 'http://video.en.disneyme.com/watch/future-worm/robo-carp-2001-544b66002aa7353cdd3f5114', 'only_matching': True, }, { 'url': 'http://video.disneyturkiye.com.tr/izle/7c-7-cuceler/kimin-sesi-zaten-5456f3d015f6b36c8afdd0e2', 'only_matching': True, }, { 'url': 'http://disneyjunior.disney.com/embed/546a4798ddba3d1612e4005d', 'only_matching': True, }, { 'url': 'http://www.starwars.com/embed/54690d1e6c42e5f09a0fb097', 'only_matching': True, }, { 'url': 'http://spiderman.marvelkids.com/embed/522900d2ced3c565e4cc0677', 'only_matching': True, }, { 'url': 'http://spiderman.marvelkids.com/videos/contest-of-champions-part-four-clip-1', 'only_matching': True, }, { 'url': 'http://disneyjunior.en.disneyme.com/dj/watch-my-friends-tigger-and-pooh-promo', 'only_matching': True, }, { 'url': 'http://disneychannel.de/sehen/soy-luna-folge-118-5518518987ba27f3cc729268', 'only_matching': True, }, { 'url': 'http://disneyjunior.disney.com/galactech-the-galactech-grab-galactech-an-admiral-rescue', 'only_matching': True, }] def _real_extract(self, url): domain, video_id, display_id = re.match(self._VALID_URL, url).groups() if not video_id: webpage = self._download_webpage(url, display_id) grill = re.sub(r'"\s*\+\s*"', '', self._search_regex( r'Grill\.burger\s*=\s*({.+})\s*:', webpage, 'grill data')) page_data = next(s for s in self._parse_json(grill, display_id)['stack'] if s.get('type') == 'video') video_data = page_data['data'][0] else: webpage = self._download_webpage( 'http://%s/embed/%s' % (domain, video_id), video_id) page_data = self._parse_json(self._search_regex( r'Disney\.EmbedVideo\s*=\s*({.+});', webpage, 'embed data'), video_id) video_data = page_data['video'] for external in video_data.get('externals', []): if external.get('source') == 'vevo': return self.url_result('vevo:' + external['data_id'], 'Vevo') video_id = video_data['id'] title = video_data['title'] formats = [] for flavor in video_data.get('flavors', []): flavor_format = flavor.get('format') flavor_url = flavor.get('url') if not flavor_url or not re.match(r'https?://', flavor_url) or flavor_format == 'mp4_access': continue tbr = int_or_none(flavor.get('bitrate')) if tbr == 99999: # wrong ks(Kaltura Signature) causes 404 Error flavor_url = update_url_query(flavor_url, {'ks': ''}) m3u8_formats = self._extract_m3u8_formats( flavor_url, video_id, 'mp4', m3u8_id=flavor_format, fatal=False) for f in m3u8_formats: # Apple FairPlay if '/fpshls/' in f['url']: continue formats.append(f) continue format_id = [] if flavor_format: format_id.append(flavor_format) if tbr: format_id.append(compat_str(tbr)) ext = determine_ext(flavor_url) if flavor_format == 'applehttp' or ext == 'm3u8': ext = 'mp4' width = int_or_none(flavor.get('width')) height = int_or_none(flavor.get('height')) formats.append({ 'format_id': '-'.join(format_id), 'url': flavor_url, 'width': width, 'height': height, 'tbr': tbr, 'ext': ext, 'vcodec': 'none' if (width == 0 and height == 0) else None, }) if not formats and video_data.get('expired'): raise ExtractorError( '%s said: %s' % (self.IE_NAME, page_data['translations']['video_expired']), expected=True) self._sort_formats(formats) subtitles = {} for caption in video_data.get('captions', []): caption_url = caption.get('url') caption_format = caption.get('format') if not caption_url or caption_format.startswith('unknown'): continue subtitles.setdefault(caption.get('language', 'en'), []).append({ 'url': caption_url, 'ext': { 'webvtt': 'vtt', }.get(caption_format, caption_format), }) return { 'id': video_id, 'title': title, 'description': video_data.get('description') or video_data.get('short_desc'), 'thumbnail': video_data.get('thumb') or video_data.get('thumb_secure'), 'duration': int_or_none(video_data.get('duration_sec')), 'upload_date': unified_strdate(video_data.get('publish_date')), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/senateisvp.py
youtube_dl/extractor/senateisvp.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, unsmuggle_url, ) from ..compat import ( compat_parse_qs, compat_urlparse, ) class SenateISVPIE(InfoExtractor): _COMM_MAP = [ ['ag', '76440', 'http://ag-f.akamaihd.net'], ['aging', '76442', 'http://aging-f.akamaihd.net'], ['approps', '76441', 'http://approps-f.akamaihd.net'], ['armed', '76445', 'http://armed-f.akamaihd.net'], ['banking', '76446', 'http://banking-f.akamaihd.net'], ['budget', '76447', 'http://budget-f.akamaihd.net'], ['cecc', '76486', 'http://srs-f.akamaihd.net'], ['commerce', '80177', 'http://commerce1-f.akamaihd.net'], ['csce', '75229', 'http://srs-f.akamaihd.net'], ['dpc', '76590', 'http://dpc-f.akamaihd.net'], ['energy', '76448', 'http://energy-f.akamaihd.net'], ['epw', '76478', 'http://epw-f.akamaihd.net'], ['ethics', '76449', 'http://ethics-f.akamaihd.net'], ['finance', '76450', 'http://finance-f.akamaihd.net'], ['foreign', '76451', 'http://foreign-f.akamaihd.net'], ['govtaff', '76453', 'http://govtaff-f.akamaihd.net'], ['help', '76452', 'http://help-f.akamaihd.net'], ['indian', '76455', 'http://indian-f.akamaihd.net'], ['intel', '76456', 'http://intel-f.akamaihd.net'], ['intlnarc', '76457', 'http://intlnarc-f.akamaihd.net'], ['jccic', '85180', 'http://jccic-f.akamaihd.net'], ['jec', '76458', 'http://jec-f.akamaihd.net'], ['judiciary', '76459', 'http://judiciary-f.akamaihd.net'], ['rpc', '76591', 'http://rpc-f.akamaihd.net'], ['rules', '76460', 'http://rules-f.akamaihd.net'], ['saa', '76489', 'http://srs-f.akamaihd.net'], ['smbiz', '76461', 'http://smbiz-f.akamaihd.net'], ['srs', '75229', 'http://srs-f.akamaihd.net'], ['uscc', '76487', 'http://srs-f.akamaihd.net'], ['vetaff', '76462', 'http://vetaff-f.akamaihd.net'], ['arch', '', 'http://ussenate-f.akamaihd.net/'] ] IE_NAME = 'senate.gov' _VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)' _TESTS = [{ 'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png', 'info_dict': { 'id': 'judiciary031715', 'ext': 'mp4', 'title': 'Integrated Senate Video Player', 'thumbnail': r're:^https?://.*\.(?:jpg|png)$', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.senate.gov/isvp/?type=live&comm=commerce&filename=commerce011514.mp4&auto_play=false', 'info_dict': { 'id': 'commerce011514', 'ext': 'mp4', 'title': 'Integrated Senate Video Player' }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.senate.gov/isvp/?type=arch&comm=intel&filename=intel090613&hc_location=ufi', # checksum differs each time 'info_dict': { 'id': 'intel090613', 'ext': 'mp4', 'title': 'Integrated Senate Video Player' } }, { # From http://www.c-span.org/video/?96791-1 'url': 'http://www.senate.gov/isvp?type=live&comm=banking&filename=banking012715', 'only_matching': True, }] @staticmethod def _search_iframe_url(webpage): mobj = re.search( r"<iframe[^>]+src=['\"](?P<url>https?://www\.senate\.gov/isvp/?\?[^'\"]+)['\"]", webpage) if mobj: return mobj.group('url') def _get_info_for_comm(self, committee): for entry in self._COMM_MAP: if entry[0] == committee: return entry[1:] def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) qs = compat_parse_qs(re.match(self._VALID_URL, url).group('qs')) if not qs.get('filename') or not qs.get('type') or not qs.get('comm'): raise ExtractorError('Invalid URL', expected=True) video_id = re.sub(r'.mp4$', '', qs['filename'][0]) webpage = self._download_webpage(url, video_id) if smuggled_data.get('force_title'): title = smuggled_data['force_title'] else: title = self._html_search_regex(r'<title>([^<]+)</title>', webpage, video_id) poster = qs.get('poster') thumbnail = poster[0] if poster else None video_type = qs['type'][0] committee = video_type if video_type == 'arch' else qs['comm'][0] stream_num, domain = self._get_info_for_comm(committee) formats = [] if video_type == 'arch': filename = video_id if '.' in video_id else video_id + '.mp4' formats = [{ # All parameters in the query string are necessary to prevent a 403 error 'url': compat_urlparse.urljoin(domain, filename) + '?v=3.1.0&fp=&r=&g=', }] else: hdcore_sign = 'hdcore=3.1.0' url_params = (domain, video_id, stream_num) f4m_url = '%s/z/%s_1@%s/manifest.f4m?' % url_params + hdcore_sign m3u8_url = '%s/i/%s_1@%s/master.m3u8' % url_params for entry in self._extract_f4m_formats(f4m_url, video_id, f4m_id='f4m'): # URLs without the extra param induce an 404 error entry.update({'extra_param_to_segment_url': hdcore_sign}) formats.append(entry) for entry in self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', m3u8_id='m3u8'): mobj = re.search(r'(?P<tag>(?:-p|-b)).m3u8', entry['url']) if mobj: entry['format_id'] += mobj.group('tag') formats.append(entry) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/oktoberfesttv.py
youtube_dl/extractor/oktoberfesttv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class OktoberfestTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?oktoberfest-tv\.de/[^/]+/[^/]+/video/(?P<id>[^/?#]+)' _TEST = { 'url': 'http://www.oktoberfest-tv.de/de/kameras/video/hb-zelt', 'info_dict': { 'id': 'hb-zelt', 'ext': 'mp4', 'title': 're:^Live-Kamera: Hofbräuzelt [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'thumbnail': r're:^https?://.*\.jpg$', 'is_live': True, }, 'params': { 'skip_download': True, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._live_title(self._html_search_regex( r'<h1><strong>.*?</strong>(.*?)</h1>', webpage, 'title')) clip = self._search_regex( r"clip:\s*\{\s*url:\s*'([^']+)'", webpage, 'clip') ncurl = self._search_regex( r"netConnectionUrl:\s*'([^']+)'", webpage, 'rtmp base') video_url = ncurl + clip thumbnail = self._search_regex( r"canvas:\s*\{\s*backgroundImage:\s*'url\(([^)]+)\)'", webpage, 'thumbnail', fatal=False) return { 'id': video_id, 'title': title, 'url': video_url, 'ext': 'mp4', 'is_live': True, 'thumbnail': thumbnail, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/dailymail.py
youtube_dl/extractor/dailymail.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, determine_protocol, try_get, unescapeHTML, ) class DailyMailIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?dailymail\.co\.uk/(?:video/[^/]+/video-|embed/video/)(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://www.dailymail.co.uk/video/tvshowbiz/video-1295863/The-Mountain-appears-sparkling-water-ad-Heavy-Bubbles.html', 'md5': 'f6129624562251f628296c3a9ffde124', 'info_dict': { 'id': '1295863', 'ext': 'mp4', 'title': 'The Mountain appears in sparkling water ad for \'Heavy Bubbles\'', 'description': 'md5:a93d74b6da172dd5dc4d973e0b766a84', } }, { 'url': 'http://www.dailymail.co.uk/embed/video/1295863.html', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe\b[^>]+\bsrc=["\'](?P<url>(?:https?:)?//(?:www\.)?dailymail\.co\.uk/embed/video/\d+\.html)', webpage) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_data = self._parse_json(self._search_regex( r"data-opts='({.+?})'", webpage, 'video data'), video_id) title = unescapeHTML(video_data['title']) sources_url = (try_get( video_data, (lambda x: x['plugins']['sources']['url'], lambda x: x['sources']['url']), compat_str) or 'http://www.dailymail.co.uk/api/player/%s/video-sources.json' % video_id) video_sources = self._download_json(sources_url, video_id) body = video_sources.get('body') if body: video_sources = body formats = [] for rendition in video_sources['renditions']: rendition_url = rendition.get('url') if not rendition_url: continue tbr = int_or_none(rendition.get('encodingRate'), 1000) container = rendition.get('videoContainer') is_hls = container == 'M2TS' protocol = 'm3u8_native' if is_hls else determine_protocol({'url': rendition_url}) formats.append({ 'format_id': ('hls' if is_hls else protocol) + ('-%d' % tbr if tbr else ''), 'url': rendition_url, 'width': int_or_none(rendition.get('frameWidth')), 'height': int_or_none(rendition.get('frameHeight')), 'tbr': tbr, 'vcodec': rendition.get('videoCodec'), 'container': container, 'protocol': protocol, 'ext': 'mp4' if is_hls else None, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': unescapeHTML(video_data.get('descr')), 'thumbnail': video_data.get('poster') or video_data.get('thumbnail'), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/svt.py
youtube_dl/extractor/svt.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( determine_ext, dict_get, int_or_none, unified_timestamp, str_or_none, strip_or_none, try_get, ) class SVTBaseIE(InfoExtractor): _GEO_COUNTRIES = ['SE'] def _extract_video(self, video_info, video_id): is_live = dict_get(video_info, ('live', 'simulcast'), default=False) m3u8_protocol = 'm3u8' if is_live else 'm3u8_native' formats = [] for vr in video_info['videoReferences']: player_type = vr.get('playerType') or vr.get('format') vurl = vr['url'] ext = determine_ext(vurl) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( vurl, video_id, ext='mp4', entry_protocol=m3u8_protocol, m3u8_id=player_type, fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( vurl + '?hdcore=3.3.0', video_id, f4m_id=player_type, fatal=False)) elif ext == 'mpd': if player_type == 'dashhbbtv': formats.extend(self._extract_mpd_formats( vurl, video_id, mpd_id=player_type, fatal=False)) else: formats.append({ 'format_id': player_type, 'url': vurl, }) rights = try_get(video_info, lambda x: x['rights'], dict) or {} if not formats and rights.get('geoBlockedSweden'): self.raise_geo_restricted( 'This video is only available in Sweden', countries=self._GEO_COUNTRIES) self._sort_formats(formats) subtitles = {} subtitle_references = dict_get(video_info, ('subtitles', 'subtitleReferences')) if isinstance(subtitle_references, list): for sr in subtitle_references: subtitle_url = sr.get('url') subtitle_lang = sr.get('language', 'sv') if subtitle_url: if determine_ext(subtitle_url) == 'm3u8': # TODO(yan12125): handle WebVTT in m3u8 manifests continue subtitles.setdefault(subtitle_lang, []).append({'url': subtitle_url}) title = video_info.get('title') series = video_info.get('programTitle') season_number = int_or_none(video_info.get('season')) episode = video_info.get('episodeTitle') episode_number = int_or_none(video_info.get('episodeNumber')) timestamp = unified_timestamp(rights.get('validFrom')) duration = int_or_none(dict_get(video_info, ('materialLength', 'contentDuration'))) age_limit = None adult = dict_get( video_info, ('inappropriateForChildren', 'blockedForChildren'), skip_false_values=False) if adult is not None: age_limit = 18 if adult else 0 return { 'id': video_id, 'title': title, 'formats': formats, 'subtitles': subtitles, 'duration': duration, 'timestamp': timestamp, 'age_limit': age_limit, 'series': series, 'season_number': season_number, 'episode': episode, 'episode_number': episode_number, 'is_live': is_live, } class SVTIE(SVTBaseIE): _VALID_URL = r'https?://(?:www\.)?svt\.se/wd\?(?:.*?&)?widgetId=(?P<widget_id>\d+)&.*?\barticleId=(?P<id>\d+)' _TEST = { 'url': 'http://www.svt.se/wd?widgetId=23991&sectionId=541&articleId=2900353&type=embed&contextSectionId=123&autostart=false', 'md5': '33e9a5d8f646523ce0868ecfb0eed77d', 'info_dict': { 'id': '2900353', 'ext': 'mp4', 'title': 'Stjärnorna skojar till det - under SVT-intervjun', 'duration': 27, 'age_limit': 0, }, } @staticmethod def _extract_url(webpage): mobj = re.search( r'(?:<iframe src|href)="(?P<url>%s[^"]*)"' % SVTIE._VALID_URL, webpage) if mobj: return mobj.group('url') def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) widget_id = mobj.group('widget_id') article_id = mobj.group('id') info = self._download_json( 'http://www.svt.se/wd?widgetId=%s&articleId=%s&format=json&type=embed&output=json' % (widget_id, article_id), article_id) info_dict = self._extract_video(info['video'], article_id) info_dict['title'] = info['context']['title'] return info_dict class SVTPlayBaseIE(SVTBaseIE): _SVTPLAY_RE = r'root\s*\[\s*(["\'])_*svtplay\1\s*\]\s*=\s*(?P<json>{.+?})\s*;\s*\n' class SVTPlayIE(SVTPlayBaseIE): IE_DESC = 'SVT Play and Öppet arkiv' _VALID_URL = r'''(?x) (?: (?: svt:| https?://(?:www\.)?svt\.se/barnkanalen/barnplay/[^/]+/ ) (?P<svt_id>[^/?#&]+)| https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se/(?:video|klipp|kanaler)/(?P<id>[^/?#&]+) (?:.*?(?:modalId|id)=(?P<modal_id>[\da-zA-Z-]+))? ) ''' _TESTS = [{ 'url': 'https://www.svtplay.se/video/30479064', 'md5': '2382036fd6f8c994856c323fe51c426e', 'info_dict': { 'id': '8zVbDPA', 'ext': 'mp4', 'title': 'Designdrömmar i Stenungsund', 'timestamp': 1615770000, 'upload_date': '20210315', 'duration': 3519, 'thumbnail': r're:^https?://(?:.*[\.-]jpg|www.svtstatic.se/image/.*)$', 'age_limit': 0, 'subtitles': { 'sv': [{ 'ext': 'vtt', }] }, }, 'params': { 'format': 'bestvideo', # skip for now due to download test asserts that segment is > 10000 bytes and svt uses # init segments that are smaller # AssertionError: Expected test_SVTPlay_jNwpV9P.mp4 to be at least 9.77KiB, but it's only 864.00B 'skip_download': True, }, }, { 'url': 'https://www.svtplay.se/video/30479064/husdrommar/husdrommar-sasong-8-designdrommar-i-stenungsund?modalId=8zVbDPA', 'only_matching': True, }, { 'url': 'https://www.svtplay.se/video/30684086/rapport/rapport-24-apr-18-00-7?id=e72gVpa', 'only_matching': True, }, { # geo restricted to Sweden 'url': 'http://www.oppetarkiv.se/video/5219710/trollflojten', 'only_matching': True, }, { 'url': 'http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg', 'only_matching': True, }, { 'url': 'https://www.svtplay.se/kanaler/svt1', 'only_matching': True, }, { 'url': 'svt:1376446-003A', 'only_matching': True, }, { 'url': 'svt:14278044', 'only_matching': True, }, { 'url': 'https://www.svt.se/barnkanalen/barnplay/kar/eWv5MLX/', 'only_matching': True, }, { 'url': 'svt:eWv5MLX', 'only_matching': True, }] def _adjust_title(self, info): if info['is_live']: info['title'] = self._live_title(info['title']) def _extract_by_video_id(self, video_id, webpage=None): data = self._download_json( 'https://api.svt.se/videoplayer-api/video/%s' % video_id, video_id, headers=self.geo_verification_headers()) info_dict = self._extract_video(data, video_id) if not info_dict.get('title'): title = dict_get(info_dict, ('episode', 'series')) if not title and webpage: title = re.sub( r'\s*\|\s*.+?$', '', self._og_search_title(webpage)) if not title: title = video_id info_dict['title'] = title self._adjust_title(info_dict) return info_dict def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') svt_id = mobj.group('svt_id') or mobj.group('modal_id') if svt_id: return self._extract_by_video_id(svt_id) webpage = self._download_webpage(url, video_id) data = self._parse_json( self._search_regex( self._SVTPLAY_RE, webpage, 'embedded data', default='{}', group='json'), video_id, fatal=False) thumbnail = self._og_search_thumbnail(webpage) if data: video_info = try_get( data, lambda x: x['context']['dispatcher']['stores']['VideoTitlePageStore']['data']['video'], dict) if video_info: info_dict = self._extract_video(video_info, video_id) info_dict.update({ 'title': data['context']['dispatcher']['stores']['MetaStore']['title'], 'thumbnail': thumbnail, }) self._adjust_title(info_dict) return info_dict svt_id = try_get( data, lambda x: x['statistics']['dataLake']['content']['id'], compat_str) if not svt_id: svt_id = self._search_regex( (r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)', r'<[^>]+\bdata-rt=["\']top-area-play-button["\'][^>]+\bhref=["\'][^"\']*video/%s/[^"\']*\b(?:modalId|id)=([\da-zA-Z-]+)' % re.escape(video_id), r'["\']videoSvtId["\']\s*:\s*["\']([\da-zA-Z-]+)', r'["\']videoSvtId\\?["\']\s*:\s*\\?["\']([\da-zA-Z-]+)', r'"content"\s*:\s*{.*?"id"\s*:\s*"([\da-zA-Z-]+)"', r'["\']svtId["\']\s*:\s*["\']([\da-zA-Z-]+)', r'["\']svtId\\?["\']\s*:\s*\\?["\']([\da-zA-Z-]+)'), webpage, 'video id') info_dict = self._extract_by_video_id(svt_id, webpage) info_dict['thumbnail'] = thumbnail return info_dict class SVTSeriesIE(SVTPlayBaseIE): _VALID_URL = r'https?://(?:www\.)?svtplay\.se/(?P<id>[^/?&#]+)(?:.+?\btab=(?P<season_slug>[^&#]+))?' _TESTS = [{ 'url': 'https://www.svtplay.se/rederiet', 'info_dict': { 'id': '14445680', 'title': 'Rederiet', 'description': 'md5:d9fdfff17f5d8f73468176ecd2836039', }, 'playlist_mincount': 318, }, { 'url': 'https://www.svtplay.se/rederiet?tab=season-2-14445680', 'info_dict': { 'id': 'season-2-14445680', 'title': 'Rederiet - Säsong 2', 'description': 'md5:d9fdfff17f5d8f73468176ecd2836039', }, 'playlist_mincount': 12, }] @classmethod def suitable(cls, url): return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTSeriesIE, cls).suitable(url) def _real_extract(self, url): series_slug, season_id = re.match(self._VALID_URL, url).groups() series = self._download_json( 'https://api.svt.se/contento/graphql', series_slug, 'Downloading series page', query={ 'query': '''{ listablesBySlug(slugs: ["%s"]) { associatedContent(include: [productionPeriod, season]) { items { item { ... on Episode { videoSvtId } } } id name } id longDescription name shortDescription } }''' % series_slug, })['data']['listablesBySlug'][0] season_name = None entries = [] for season in series['associatedContent']: if not isinstance(season, dict): continue if season_id: if season.get('id') != season_id: continue season_name = season.get('name') items = season.get('items') if not isinstance(items, list): continue for item in items: video = item.get('item') or {} content_id = video.get('videoSvtId') if not content_id or not isinstance(content_id, compat_str): continue entries.append(self.url_result( 'svt:' + content_id, SVTPlayIE.ie_key(), content_id)) title = series.get('name') season_name = season_name or season_id if title and season_name: title = '%s - %s' % (title, season_name) elif season_id: title = season_id return self.playlist_result( entries, season_id or series.get('id'), title, dict_get(series, ('longDescription', 'shortDescription'))) class SVTPageIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?svt\.se/(?P<path>(?:[^/]+/)*(?P<id>[^/?&#]+))' _TESTS = [{ 'url': 'https://www.svt.se/sport/ishockey/bakom-masken-lehners-kamp-mot-mental-ohalsa', 'info_dict': { 'id': '25298267', 'title': 'Bakom masken – Lehners kamp mot mental ohälsa', }, 'playlist_count': 4, }, { 'url': 'https://www.svt.se/nyheter/utrikes/svenska-andrea-ar-en-mil-fran-branderna-i-kalifornien', 'info_dict': { 'id': '24243746', 'title': 'Svenska Andrea redo att fly sitt hem i Kalifornien', }, 'playlist_count': 2, }, { # only programTitle 'url': 'http://www.svt.se/sport/ishockey/jagr-tacklar-giroux-under-intervjun', 'info_dict': { 'id': '8439V2K', 'ext': 'mp4', 'title': 'Stjärnorna skojar till det - under SVT-intervjun', 'duration': 27, 'age_limit': 0, }, }, { 'url': 'https://www.svt.se/nyheter/lokalt/vast/svt-testar-tar-nagon-upp-skrapet-1', 'only_matching': True, }, { 'url': 'https://www.svt.se/vader/manadskronikor/maj2018', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTPageIE, cls).suitable(url) def _real_extract(self, url): path, display_id = re.match(self._VALID_URL, url).groups() article = self._download_json( 'https://api.svt.se/nss-api/page/' + path, display_id, query={'q': 'articles'})['articles']['content'][0] entries = [] def _process_content(content): if content.get('_type') in ('VIDEOCLIP', 'VIDEOEPISODE'): video_id = compat_str(content['image']['svtId']) entries.append(self.url_result( 'svt:' + video_id, SVTPlayIE.ie_key(), video_id)) for media in article.get('media', []): _process_content(media) for obj in article.get('structuredBody', []): _process_content(obj.get('content') or {}) return self.playlist_result( entries, str_or_none(article.get('id')), strip_or_none(article.get('title')))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/drbonanza.py
youtube_dl/extractor/drbonanza.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( js_to_json, parse_duration, unescapeHTML, ) class DRBonanzaIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?dr\.dk/bonanza/[^/]+/\d+/[^/]+/(?P<id>\d+)/(?P<display_id>[^/?#&]+)' _TEST = { 'url': 'http://www.dr.dk/bonanza/serie/154/matador/40312/matador---0824-komme-fremmede-', 'info_dict': { 'id': '40312', 'display_id': 'matador---0824-komme-fremmede-', 'ext': 'mp4', 'title': 'MATADOR - 08:24. "Komme fremmede".', 'description': 'md5:77b4c1ac4d4c1b9d610ab4395212ff84', 'thumbnail': r're:^https?://.*\.(?:gif|jpg)$', 'duration': 4613, }, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id, display_id = mobj.group('id', 'display_id') webpage = self._download_webpage(url, display_id) info = self._parse_html5_media_entries( url, webpage, display_id, m3u8_id='hls', m3u8_entry_protocol='m3u8_native')[0] self._sort_formats(info['formats']) asset = self._parse_json( self._search_regex( r'(?s)currentAsset\s*=\s*({.+?})\s*</script', webpage, 'asset'), display_id, transform_source=js_to_json) title = unescapeHTML(asset['AssetTitle']).strip() def extract(field): return self._search_regex( r'<div[^>]+>\s*<p>%s:<p>\s*</div>\s*<div[^>]+>\s*<p>([^<]+)</p>' % field, webpage, field, default=None) info.update({ 'id': asset.get('AssetId') or video_id, 'display_id': display_id, 'title': title, 'description': extract('Programinfo'), 'duration': parse_duration(extract('Tid')), 'thumbnail': asset.get('AssetImageUrl'), }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/washingtonpost.py
youtube_dl/extractor/washingtonpost.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class WashingtonPostIE(InfoExtractor): IE_NAME = 'washingtonpost' _VALID_URL = r'(?:washingtonpost:|https?://(?:www\.)?washingtonpost\.com/(?:video|posttv)/(?:[^/]+/)*)(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _EMBED_URL = r'https?://(?:www\.)?washingtonpost\.com/video/c/embed/[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}' _TESTS = [{ 'url': 'https://www.washingtonpost.com/video/c/video/480ba4ee-1ec7-11e6-82c2-a7dcb313287d', 'md5': '6f537e1334b714eb15f9563bd4b9cdfa', 'info_dict': { 'id': '480ba4ee-1ec7-11e6-82c2-a7dcb313287d', 'ext': 'mp4', 'title': 'Egypt finds belongings, debris from plane crash', 'description': 'md5:a17ceee432f215a5371388c1f680bd86', 'upload_date': '20160520', 'timestamp': 1463775187, }, }, { 'url': 'https://www.washingtonpost.com/video/world/egypt-finds-belongings-debris-from-plane-crash/2016/05/20/480ba4ee-1ec7-11e6-82c2-a7dcb313287d_video.html', 'only_matching': True, }, { 'url': 'https://www.washingtonpost.com/posttv/world/iraq-to-track-down-antiquities-after-islamic-state-museum-rampage/2015/02/28/7c57e916-bf86-11e4-9dfb-03366e719af8_video.html', 'only_matching': True, }] @classmethod def _extract_urls(cls, webpage): return re.findall( r'<iframe[^>]+\bsrc=["\'](%s)' % cls._EMBED_URL, webpage) def _real_extract(self, url): video_id = self._match_id(url) return self.url_result( 'arcpublishing:wapo:' + video_id, 'ArcPublishing', video_id) class WashingtonPostArticleIE(InfoExtractor): IE_NAME = 'washingtonpost:article' _VALID_URL = r'https?://(?:www\.)?washingtonpost\.com/(?:[^/]+/)*(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'http://www.washingtonpost.com/sf/national/2014/03/22/sinkhole-of-bureaucracy/', 'info_dict': { 'id': 'sinkhole-of-bureaucracy', 'title': 'Sinkhole of bureaucracy', }, 'playlist': [{ 'md5': 'b9be794ceb56c7267d410a13f99d801a', 'info_dict': { 'id': 'fc433c38-b146-11e3-b8b3-44b1d1cd4c1f', 'ext': 'mp4', 'title': 'Breaking Points: The Paper Mine', 'duration': 1290, 'description': 'Overly complicated paper pushing is nothing new to government bureaucracy. But the way federal retirement applications are filed may be the most outdated. David Fahrenthold explains.', 'timestamp': 1395440416, 'upload_date': '20140321', }, }, { 'md5': '1fff6a689d8770966df78c8cb6c8c17c', 'info_dict': { 'id': '41255e28-b14a-11e3-b8b3-44b1d1cd4c1f', 'ext': 'mp4', 'title': 'The town bureaucracy sustains', 'description': 'Underneath the friendly town of Boyers is a sea of government paperwork. In a disused limestone mine, hundreds of locals now track, file and process retirement applications for the federal government. We set out to find out what it\'s like to do paperwork 230 feet underground.', 'duration': 2220, 'timestamp': 1395441819, 'upload_date': '20140321', }, }], }, { 'url': 'http://www.washingtonpost.com/blogs/wonkblog/wp/2014/12/31/one-airline-figured-out-how-to-make-sure-its-airplanes-never-disappear/', 'info_dict': { 'id': 'one-airline-figured-out-how-to-make-sure-its-airplanes-never-disappear', 'title': 'One airline figured out how to make sure its airplanes never disappear', }, 'playlist': [{ 'md5': 'a7c1b5634ba5e57a6a82cdffa5b1e0d0', 'info_dict': { 'id': '0e4bb54c-9065-11e4-a66f-0ca5037a597d', 'ext': 'mp4', 'description': 'Washington Post transportation reporter Ashley Halsey III explains why a plane\'s black box needs to be recovered from a crash site instead of having its information streamed in real time throughout the flight.', 'upload_date': '20141230', 'timestamp': 1419972442, 'title': 'Why black boxes don’t transmit data in real time', } }] }] @classmethod def suitable(cls, url): return False if WashingtonPostIE.suitable(url) else super(WashingtonPostArticleIE, cls).suitable(url) def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) title = self._og_search_title(webpage) uuids = re.findall(r'''(?x) (?: <div\s+class="posttv-video-embed[^>]*?data-uuid=| data-video-uuid= )"([^"]+)"''', webpage) entries = [self.url_result('washingtonpost:%s' % uuid, 'WashingtonPost', uuid) for uuid in uuids] return { '_type': 'playlist', 'entries': entries, 'id': page_id, 'title': title, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/brightcove.py
youtube_dl/extractor/brightcove.py
# coding: utf-8 from __future__ import unicode_literals import base64 import re import struct from .adobepass import AdobePassIE from .common import InfoExtractor from ..compat import ( compat_etree_fromstring, compat_HTTPError, compat_parse_qs, compat_urllib_parse_urlparse, compat_urlparse, compat_xml_parse_error, ) from ..utils import ( clean_html, extract_attributes, ExtractorError, find_xpath_attr, fix_xml_ampersands, float_or_none, int_or_none, js_to_json, mimetype2ext, parse_iso8601, smuggle_url, str_or_none, try_get, unescapeHTML, unsmuggle_url, UnsupportedError, update_url_query, url_or_none, ) class BrightcoveLegacyIE(InfoExtractor): IE_NAME = 'brightcove:legacy' _VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)' _TESTS = [ { # From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/ 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001', 'md5': '5423e113865d26e40624dce2e4b45d95', 'note': 'Test Brightcove downloads and detection in GenericIE', 'info_dict': { 'id': '2371591881001', 'ext': 'mp4', 'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”', 'uploader': '8TV', 'description': 'md5:a950cc4285c43e44d763d036710cd9cd', 'timestamp': 1368213670, 'upload_date': '20130510', 'uploader_id': '1589608506001', }, 'skip': 'The player has been deactivated by the content owner', }, { # From http://medianetwork.oracle.com/video/player/1785452137001 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001', 'info_dict': { 'id': '1785452137001', 'ext': 'flv', 'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges', 'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.', 'uploader': 'Oracle', 'timestamp': 1344975024, 'upload_date': '20120814', 'uploader_id': '1460825906', }, 'skip': 'video not playable', }, { # From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/ 'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001', 'info_dict': { 'id': '2750934548001', 'ext': 'mp4', 'title': 'This Bracelet Acts as a Personal Thermostat', 'description': 'md5:547b78c64f4112766ccf4e151c20b6a0', # 'uploader': 'Mashable', 'timestamp': 1382041798, 'upload_date': '20131017', 'uploader_id': '1130468786001', }, }, { # test that the default referer works # from http://national.ballet.ca/interact/video/Lost_in_Motion_II/ 'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001', 'info_dict': { 'id': '2878862109001', 'ext': 'mp4', 'title': 'Lost in Motion II', 'description': 'md5:363109c02998fee92ec02211bd8000df', 'uploader': 'National Ballet of Canada', }, 'skip': 'Video gone', }, { # test flv videos served by akamaihd.net # From http://www.redbull.com/en/bike/stories/1331655643987/replay-uci-dh-world-cup-2014-from-fort-william 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?%40videoPlayer=ref%3Aevent-stream-356&linkBaseURL=http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fvideos%2F1331655630249%2Freplay-uci-fort-william-2014-dh&playerKey=AQ%7E%7E%2CAAAApYJ7UqE%7E%2Cxqr_zXk0I-zzNndy8NlHogrCb5QdyZRf&playerID=1398061561001#__youtubedl_smuggle=%7B%22Referer%22%3A+%22http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fstories%2F1331655643987%2Freplay-uci-dh-world-cup-2014-from-fort-william%22%7D', # The md5 checksum changes on each download 'info_dict': { 'id': '3750436379001', 'ext': 'flv', 'title': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals', 'uploader': 'RBTV Old (do not use)', 'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals', 'timestamp': 1409122195, 'upload_date': '20140827', 'uploader_id': '710858724001', }, 'skip': 'Video gone', }, { # playlist with 'videoList' # from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL', 'info_dict': { 'title': 'Sealife', 'id': '3550319591001', }, 'playlist_mincount': 7, 'skip': 'Unsupported URL', }, { # playlist with 'playlistTab' (https://github.com/ytdl-org/youtube-dl/issues/9965) 'url': 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=AQ%7E%7E,AAABXlLMdok%7E,NJ4EoMlZ4rZdx9eU1rkMVd8EaYPBBUlg', 'info_dict': { 'id': '1522758701001', 'title': 'Lesson 08', }, 'playlist_mincount': 10, 'skip': 'Unsupported URL', }, { # playerID inferred from bcpid # from http://www.un.org/chinese/News/story.asp?NewsID=27724 'url': 'https://link.brightcove.com/services/player/bcpid1722935254001/?bctid=5360463607001&autoStart=false&secureConnections=true&width=650&height=350', 'only_matching': True, # Tested in GenericIE } ] @classmethod def _build_brightcove_url(cls, object_str): """ Build a Brightcove url from a xml string containing <object class="BrightcoveExperience">{params}</object> """ # Fix up some stupid HTML, see https://github.com/ytdl-org/youtube-dl/issues/1553 object_str = re.sub(r'(<param(?:\s+[a-zA-Z0-9_]+="[^"]*")*)>', lambda m: m.group(1) + '/>', object_str) # Fix up some stupid XML, see https://github.com/ytdl-org/youtube-dl/issues/1608 object_str = object_str.replace('<--', '<!--') # remove namespace to simplify extraction object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str) object_str = fix_xml_ampersands(object_str) try: object_doc = compat_etree_fromstring(object_str.encode('utf-8')) except compat_xml_parse_error: return fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars') if fv_el is not None: flashvars = dict( (k, v[0]) for k, v in compat_parse_qs(fv_el.attrib['value']).items()) else: flashvars = {} data_url = object_doc.attrib.get('data', '') data_url_params = compat_parse_qs(compat_urllib_parse_urlparse(data_url).query) def find_param(name): if name in flashvars: return flashvars[name] node = find_xpath_attr(object_doc, './param', 'name', name) if node is not None: return node.attrib['value'] return data_url_params.get(name) params = {} playerID = find_param('playerID') or find_param('playerId') if playerID is None: raise ExtractorError('Cannot find player ID') params['playerID'] = playerID playerKey = find_param('playerKey') # Not all pages define this value if playerKey is not None: params['playerKey'] = playerKey # These fields hold the id of the video videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID') or find_param('@videoList') if videoPlayer is not None: if isinstance(videoPlayer, list): videoPlayer = videoPlayer[0] videoPlayer = videoPlayer.strip() # UUID is also possible for videoPlayer (e.g. # http://www.popcornflix.com/hoodies-vs-hooligans/7f2d2b87-bbf2-4623-acfb-ea942b4f01dd # or http://www8.hp.com/cn/zh/home.html) if not (re.match( r'^(?:\d+|[\da-fA-F]{8}-?[\da-fA-F]{4}-?[\da-fA-F]{4}-?[\da-fA-F]{4}-?[\da-fA-F]{12})$', videoPlayer) or videoPlayer.startswith('ref:')): return None params['@videoPlayer'] = videoPlayer linkBase = find_param('linkBaseURL') if linkBase is not None: params['linkBaseURL'] = linkBase return cls._make_brightcove_url(params) @classmethod def _build_brightcove_url_from_js(cls, object_js): # The layout of JS is as follows: # customBC.createVideo = function (width, height, playerID, playerKey, videoPlayer, VideoRandomID) { # // build Brightcove <object /> XML # } m = re.search( r'''(?x)customBC\.createVideo\( .*? # skipping width and height ["\'](?P<playerID>\d+)["\']\s*,\s* # playerID ["\'](?P<playerKey>AQ[^"\']{48})[^"\']*["\']\s*,\s* # playerKey begins with AQ and is 50 characters # in length, however it's appended to itself # in places, so truncate ["\'](?P<videoID>\d+)["\'] # @videoPlayer ''', object_js) if m: return cls._make_brightcove_url(m.groupdict()) @classmethod def _make_brightcove_url(cls, params): return update_url_query( 'http://c.brightcove.com/services/viewer/htmlFederated', params) @classmethod def _extract_brightcove_url(cls, webpage): """Try to extract the brightcove url from the webpage, returns None if it can't be found """ urls = cls._extract_brightcove_urls(webpage) return urls[0] if urls else None @classmethod def _extract_brightcove_urls(cls, webpage): """Return a list of all Brightcove URLs from the webpage """ url_m = re.search( r'''(?x) <meta\s+ (?:property|itemprop)=([\'"])(?:og:video|embedURL)\1[^>]+ content=([\'"])(?P<url>https?://(?:secure|c)\.brightcove.com/(?:(?!\2).)+)\2 ''', webpage) if url_m: url = unescapeHTML(url_m.group('url')) # Some sites don't add it, we can't download with this url, for example: # http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/ if 'playerKey' in url or 'videoId' in url or 'idVideo' in url: return [url] matches = re.findall( r'''(?sx)<object (?: [^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] | [^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/ ).+?>\s*</object>''', webpage) if matches: return list(filter(None, [cls._build_brightcove_url(m) for m in matches])) matches = re.findall(r'(customBC\.createVideo\(.+?\);)', webpage) if matches: return list(filter(None, [ cls._build_brightcove_url_from_js(custom_bc) for custom_bc in matches])) return [src for _, src in re.findall( r'<iframe[^>]+src=([\'"])((?:https?:)?//link\.brightcove\.com/services/player/(?!\1).+)\1', webpage)] def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) # Change the 'videoId' and others field to '@videoPlayer' url = re.sub(r'(?<=[?&])(videoI(d|D)|idVideo|bctid)', '%40videoPlayer', url) # Change bckey (used by bcove.me urls) to playerKey url = re.sub(r'(?<=[?&])bckey', 'playerKey', url) mobj = re.match(self._VALID_URL, url) query_str = mobj.group('query') query = compat_urlparse.parse_qs(query_str) videoPlayer = query.get('@videoPlayer') if videoPlayer: # We set the original url as the default 'Referer' header referer = query.get('linkBaseURL', [None])[0] or smuggled_data.get('Referer', url) video_id = videoPlayer[0] if 'playerID' not in query: mobj = re.search(r'/bcpid(\d+)', url) if mobj is not None: query['playerID'] = [mobj.group(1)] publisher_id = query.get('publisherId') if publisher_id and publisher_id[0].isdigit(): publisher_id = publisher_id[0] if not publisher_id: player_key = query.get('playerKey') if player_key and ',' in player_key[0]: player_key = player_key[0] else: player_id = query.get('playerID') if player_id and player_id[0].isdigit(): headers = {} if referer: headers['Referer'] = referer player_page = self._download_webpage( 'http://link.brightcove.com/services/player/bcpid' + player_id[0], video_id, headers=headers, fatal=False) if player_page: player_key = self._search_regex( r'<param\s+name="playerKey"\s+value="([\w~,-]+)"', player_page, 'player key', fatal=False) if player_key: enc_pub_id = player_key.split(',')[1].replace('~', '=') publisher_id = struct.unpack('>Q', base64.urlsafe_b64decode(enc_pub_id))[0] if publisher_id: brightcove_new_url = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s' % (publisher_id, video_id) if referer: brightcove_new_url = smuggle_url(brightcove_new_url, {'referrer': referer}) return self.url_result(brightcove_new_url, BrightcoveNewIE.ie_key(), video_id) # TODO: figure out if it's possible to extract playlistId from playerKey # elif 'playerKey' in query: # player_key = query['playerKey'] # return self._get_playlist_info(player_key[0]) raise UnsupportedError(url) class BrightcoveNewIE(AdobePassIE): IE_NAME = 'brightcove:new' _VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*(?P<content_type>video|playlist)Id=(?P<video_id>\d+|ref:[^&]+)' _TESTS = [{ 'url': 'http://players.brightcove.net/929656772001/e41d32dc-ec74-459e-a845-6c69f7b724ea_default/index.html?videoId=4463358922001', 'md5': 'c8100925723840d4b0d243f7025703be', 'info_dict': { 'id': '4463358922001', 'ext': 'mp4', 'title': 'Meet the man behind Popcorn Time', 'description': 'md5:eac376a4fe366edc70279bfb681aea16', 'duration': 165.768, 'timestamp': 1441391203, 'upload_date': '20150904', 'uploader_id': '929656772001', 'formats': 'mincount:20', }, }, { # with rtmp streams 'url': 'http://players.brightcove.net/4036320279001/5d112ed9-283f-485f-a7f9-33f42e8bc042_default/index.html?videoId=4279049078001', 'info_dict': { 'id': '4279049078001', 'ext': 'mp4', 'title': 'Titansgrave: Chapter 0', 'description': 'Titansgrave: Chapter 0', 'duration': 1242.058, 'timestamp': 1433556729, 'upload_date': '20150606', 'uploader_id': '4036320279001', 'formats': 'mincount:39', }, 'params': { # m3u8 download 'skip_download': True, } }, { # playlist stream 'url': 'https://players.brightcove.net/1752604059001/S13cJdUBz_default/index.html?playlistId=5718313430001', 'info_dict': { 'id': '5718313430001', 'title': 'No Audio Playlist', }, 'playlist_count': 7, 'params': { # m3u8 download 'skip_download': True, } }, { 'url': 'http://players.brightcove.net/5690807595001/HyZNerRl7_default/index.html?playlistId=5743160747001', 'only_matching': True, }, { # ref: prefixed video id 'url': 'http://players.brightcove.net/3910869709001/21519b5c-4b3b-4363-accb-bdc8f358f823_default/index.html?videoId=ref:7069442', 'only_matching': True, }, { # non numeric ref: prefixed video id 'url': 'http://players.brightcove.net/710858724001/default_default/index.html?videoId=ref:event-stream-356', 'only_matching': True, }, { # unavailable video without message but with error_code 'url': 'http://players.brightcove.net/1305187701/c832abfb-641b-44eb-9da0-2fe76786505f_default/index.html?videoId=4377407326001', 'only_matching': True, }] @staticmethod def _extract_url(ie, webpage): urls = BrightcoveNewIE._extract_urls(ie, webpage) return urls[0] if urls else None @staticmethod def _extract_urls(ie, webpage): # Reference: # 1. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideoiniframe # 2. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#tag # 3. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideousingjavascript # 4. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/in-page-embed-player-implementation.html # 5. https://support.brightcove.com/en/video-cloud/docs/dynamically-assigning-videos-player entries = [] # Look for iframe embeds [1] for _, url in re.findall( r'<iframe[^>]+src=(["\'])((?:https?:)?//players\.brightcove\.net/\d+/[^/]+/index\.html.+?)\1', webpage): entries.append(url if url.startswith('http') else 'http:' + url) # Look for <video> tags [2] and embed_in_page embeds [3] # [2] looks like: for video, script_tag, account_id, player_id, embed in re.findall( r'''(?isx) (<video(?:-js)?\s+[^>]*\bdata-video-id\s*=\s*['"]?[^>]+>) (?:.*? (<script[^>]+ src=["\'](?:https?:)?//players\.brightcove\.net/ (\d+)/([^/]+)_([^/]+)/index(?:\.min)?\.js ) )? ''', webpage): attrs = extract_attributes(video) # According to examples from [4] it's unclear whether video id # may be optional and what to do when it is video_id = attrs.get('data-video-id') if not video_id: continue account_id = account_id or attrs.get('data-account') if not account_id: continue player_id = player_id or attrs.get('data-player') or 'default' embed = embed or attrs.get('data-embed') or 'default' bc_url = 'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s' % ( account_id, player_id, embed, video_id) # Some brightcove videos may be embedded with video tag only and # without script tag or any mentioning of brightcove at all. Such # embeds are considered ambiguous since they are matched based only # on data-video-id and data-account attributes and in the wild may # not be brightcove embeds at all. Let's check reconstructed # brightcove URLs in case of such embeds and only process valid # ones. By this we ensure there is indeed a brightcove embed. if not script_tag and not ie._is_valid_url( bc_url, video_id, 'possible brightcove video'): continue entries.append(bc_url) return entries def _parse_brightcove_metadata(self, json_data, video_id, headers={}): title = json_data['name'].strip() num_drm_sources = 0 formats = [] sources = json_data.get('sources') or [] for source in sources: container = source.get('container') ext = mimetype2ext(source.get('type')) src = source.get('src') # https://support.brightcove.com/playback-api-video-fields-reference#key_systems_object if container == 'WVM' or source.get('key_systems'): num_drm_sources += 1 continue elif ext == 'ism': continue elif ext == 'm3u8' or container == 'M2TS': if not src: continue formats.extend(self._extract_m3u8_formats( src, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif ext == 'mpd': if not src: continue formats.extend(self._extract_mpd_formats(src, video_id, 'dash', fatal=False)) else: streaming_src = source.get('streaming_src') stream_name, app_name = source.get('stream_name'), source.get('app_name') if not src and not streaming_src and (not stream_name or not app_name): continue tbr = float_or_none(source.get('avg_bitrate'), 1000) height = int_or_none(source.get('height')) width = int_or_none(source.get('width')) f = { 'tbr': tbr, 'filesize': int_or_none(source.get('size')), 'container': container, 'ext': ext or container.lower(), } if width == 0 and height == 0: f.update({ 'vcodec': 'none', }) else: f.update({ 'width': width, 'height': height, 'vcodec': source.get('codec'), }) def build_format_id(kind): format_id = kind if tbr: format_id += '-%dk' % int(tbr) if height: format_id += '-%dp' % height return format_id if src or streaming_src: f.update({ 'url': src or streaming_src, 'format_id': build_format_id('http' if src else 'http-streaming'), 'source_preference': 0 if src else -1, }) else: f.update({ 'url': app_name, 'play_path': stream_name, 'format_id': build_format_id('rtmp'), }) formats.append(f) if not formats: errors = json_data.get('errors') if errors: error = errors[0] raise ExtractorError( error.get('message') or error.get('error_subcode') or error['error_code'], expected=True) if sources and num_drm_sources == len(sources): raise ExtractorError('This video is DRM protected.', expected=True) self._sort_formats(formats) for f in formats: f.setdefault('http_headers', {}).update(headers) subtitles = {} for text_track in json_data.get('text_tracks', []): if text_track.get('kind') != 'captions': continue text_track_url = url_or_none(text_track.get('src')) if not text_track_url: continue lang = (str_or_none(text_track.get('srclang')) or str_or_none(text_track.get('label')) or 'en').lower() subtitles.setdefault(lang, []).append({ 'url': text_track_url, }) is_live = False duration = float_or_none(json_data.get('duration'), 1000) if duration is not None and duration <= 0: is_live = True return { 'id': video_id, 'title': self._live_title(title) if is_live else title, 'description': clean_html(json_data.get('description')), 'thumbnail': json_data.get('thumbnail') or json_data.get('poster'), 'duration': duration, 'timestamp': parse_iso8601(json_data.get('published_at')), 'uploader_id': json_data.get('account_id'), 'formats': formats, 'subtitles': subtitles, 'tags': json_data.get('tags', []), 'is_live': is_live, } def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) self._initialize_geo_bypass({ 'countries': smuggled_data.get('geo_countries'), 'ip_blocks': smuggled_data.get('geo_ip_blocks'), }) account_id, player_id, embed, content_type, video_id = re.match(self._VALID_URL, url).groups() policy_key_id = '%s_%s' % (account_id, player_id) policy_key = self._downloader.cache.load('brightcove', policy_key_id) policy_key_extracted = False store_pk = lambda x: self._downloader.cache.store('brightcove', policy_key_id, x) def extract_policy_key(): base_url = 'http://players.brightcove.net/%s/%s_%s/' % (account_id, player_id, embed) config = self._download_json( base_url + 'config.json', video_id, fatal=False) or {} policy_key = try_get( config, lambda x: x['video_cloud']['policy_key']) if not policy_key: webpage = self._download_webpage( base_url + 'index.min.js', video_id) catalog = self._search_regex( r'catalog\(({.+?})\);', webpage, 'catalog', default=None) if catalog: catalog = self._parse_json( js_to_json(catalog), video_id, fatal=False) if catalog: policy_key = catalog.get('policyKey') if not policy_key: policy_key = self._search_regex( r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1', webpage, 'policy key', group='pk') store_pk(policy_key) return policy_key api_url = 'https://edge.api.brightcove.com/playback/v1/accounts/%s/%ss/%s' % (account_id, content_type, video_id) headers = {} referrer = smuggled_data.get('referrer') if referrer: headers.update({ 'Referer': referrer, 'Origin': re.search(r'https?://[^/]+', referrer).group(0), }) for _ in range(2): if not policy_key: policy_key = extract_policy_key() policy_key_extracted = True headers['Accept'] = 'application/json;pk=%s' % policy_key try: json_data = self._download_json(api_url, video_id, headers=headers) break except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code in (401, 403): json_data = self._parse_json(e.cause.read().decode(), video_id)[0] message = json_data.get('message') or json_data['error_code'] if json_data.get('error_subcode') == 'CLIENT_GEO': self.raise_geo_restricted(msg=message) elif json_data.get('error_code') == 'INVALID_POLICY_KEY' and not policy_key_extracted: policy_key = None store_pk(None) continue raise ExtractorError(message, expected=True) raise errors = json_data.get('errors') if errors and errors[0].get('error_subcode') == 'TVE_AUTH': custom_fields = json_data['custom_fields'] tve_token = self._extract_mvpd_auth( smuggled_data['source_url'], video_id, custom_fields['bcadobepassrequestorid'], custom_fields['bcadobepassresourceid']) json_data = self._download_json( api_url, video_id, headers={ 'Accept': 'application/json;pk=%s' % policy_key }, query={ 'tveToken': tve_token, }) if content_type == 'playlist': return self.playlist_result( [self._parse_brightcove_metadata(vid, vid.get('id'), headers) for vid in json_data.get('videos', []) if vid.get('id')], json_data.get('id'), json_data.get('name'), json_data.get('description')) return self._parse_brightcove_metadata( json_data, video_id, headers=headers)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/packtpub.py
youtube_dl/extractor/packtpub.py
from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import ( # compat_str, compat_HTTPError, ) from ..utils import ( clean_html, ExtractorError, # remove_end, str_or_none, strip_or_none, unified_timestamp, # urljoin, ) class PacktPubBaseIE(InfoExtractor): # _PACKT_BASE = 'https://www.packtpub.com' _STATIC_PRODUCTS_BASE = 'https://static.packt-cdn.com/products/' class PacktPubIE(PacktPubBaseIE): _VALID_URL = r'https?://(?:(?:www\.)?packtpub\.com/mapt|subscription\.packtpub\.com)/video/[^/]+/(?P<course_id>\d+)/(?P<chapter_id>[^/]+)/(?P<id>[^/]+)(?:/(?P<display_id>[^/?&#]+))?' _TESTS = [{ 'url': 'https://www.packtpub.com/mapt/video/web-development/9781787122215/20528/20530/Project+Intro', 'md5': '1e74bd6cfd45d7d07666f4684ef58f70', 'info_dict': { 'id': '20530', 'ext': 'mp4', 'title': 'Project Intro', 'thumbnail': r're:(?i)^https?://.*\.jpg', 'timestamp': 1490918400, 'upload_date': '20170331', }, }, { 'url': 'https://subscription.packtpub.com/video/web_development/9781787122215/20528/20530/project-intro', 'only_matching': True, }, { 'url': 'https://subscription.packtpub.com/video/programming/9781838988906/p1/video1_1/business-card-project', 'only_matching': True, }] _NETRC_MACHINE = 'packtpub' _TOKEN = None def _real_initialize(self): username, password = self._get_login_info() if username is None: return try: self._TOKEN = self._download_json( 'https://services.packtpub.com/auth-v1/users/tokens', None, 'Downloading Authorization Token', data=json.dumps({ 'username': username, 'password': password, }).encode())['data']['access'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code in (400, 401, 404): message = self._parse_json(e.cause.read().decode(), None)['message'] raise ExtractorError(message, expected=True) raise def _real_extract(self, url): course_id, chapter_id, video_id, display_id = re.match(self._VALID_URL, url).groups() headers = {} if self._TOKEN: headers['Authorization'] = 'Bearer ' + self._TOKEN try: video_url = self._download_json( 'https://services.packtpub.com/products-v1/products/%s/%s/%s' % (course_id, chapter_id, video_id), video_id, 'Downloading JSON video', headers=headers)['data'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400: self.raise_login_required('This video is locked') raise # TODO: find a better way to avoid duplicating course requests # metadata = self._download_json( # '%s/products/%s/chapters/%s/sections/%s/metadata' # % (self._MAPT_REST, course_id, chapter_id, video_id), # video_id)['data'] # title = metadata['pageTitle'] # course_title = metadata.get('title') # if course_title: # title = remove_end(title, ' - %s' % course_title) # timestamp = unified_timestamp(metadata.get('publicationDate')) # thumbnail = urljoin(self._PACKT_BASE, metadata.get('filepath')) return { 'id': video_id, 'url': video_url, 'title': display_id or video_id, # title, # 'thumbnail': thumbnail, # 'timestamp': timestamp, } class PacktPubCourseIE(PacktPubBaseIE): _VALID_URL = r'(?P<url>https?://(?:(?:www\.)?packtpub\.com/mapt|subscription\.packtpub\.com)/video/[^/]+/(?P<id>\d+))' _TESTS = [{ 'url': 'https://www.packtpub.com/mapt/video/web-development/9781787122215', 'info_dict': { 'id': '9781787122215', 'title': 'Learn Nodejs by building 12 projects [Video]', 'description': 'md5:489da8d953f416e51927b60a1c7db0aa', }, 'playlist_count': 90, }, { 'url': 'https://subscription.packtpub.com/video/web_development/9781787122215', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if PacktPubIE.suitable(url) else super( PacktPubCourseIE, cls).suitable(url) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) url, course_id = mobj.group('url', 'id') course = self._download_json( self._STATIC_PRODUCTS_BASE + '%s/toc' % course_id, course_id) metadata = self._download_json( self._STATIC_PRODUCTS_BASE + '%s/summary' % course_id, course_id, fatal=False) or {} entries = [] for chapter_num, chapter in enumerate(course['chapters'], 1): chapter_id = str_or_none(chapter.get('id')) sections = chapter.get('sections') if not chapter_id or not isinstance(sections, list): continue chapter_info = { 'chapter': chapter.get('title'), 'chapter_number': chapter_num, 'chapter_id': chapter_id, } for section in sections: section_id = str_or_none(section.get('id')) if not section_id or section.get('contentType') != 'video': continue entry = { '_type': 'url_transparent', 'url': '/'.join([url, chapter_id, section_id]), 'title': strip_or_none(section.get('title')), 'description': clean_html(section.get('summary')), 'thumbnail': metadata.get('coverImage'), 'timestamp': unified_timestamp(metadata.get('publicationDate')), 'ie_key': PacktPubIE.ie_key(), } entry.update(chapter_info) entries.append(entry) return self.playlist_result( entries, course_id, metadata.get('title'), clean_html(metadata.get('about')))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/radiobremen.py
youtube_dl/extractor/radiobremen.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import parse_duration class RadioBremenIE(InfoExtractor): _VALID_URL = r'http?://(?:www\.)?radiobremen\.de/mediathek/(?:index\.html)?\?id=(?P<id>[0-9]+)' IE_NAME = 'radiobremen' _TEST = { 'url': 'http://www.radiobremen.de/mediathek/?id=141876', 'info_dict': { 'id': '141876', 'ext': 'mp4', 'duration': 178, 'width': 512, 'title': 'Druck auf Patrick Öztürk', 'thumbnail': r're:https?://.*\.jpg$', 'description': 'Gegen den SPD-Bürgerschaftsabgeordneten Patrick Öztürk wird wegen Beihilfe zum gewerbsmäßigen Betrug ermittelt. Am Donnerstagabend sollte er dem Vorstand des SPD-Unterbezirks Bremerhaven dazu Rede und Antwort stehen.', }, } def _real_extract(self, url): video_id = self._match_id(url) meta_url = 'http://www.radiobremen.de/apps/php/mediathek/metadaten.php?id=%s' % video_id meta_doc = self._download_webpage( meta_url, video_id, 'Downloading metadata') title = self._html_search_regex( r'<h1.*>(?P<title>.+)</h1>', meta_doc, 'title') description = self._html_search_regex( r'<p>(?P<description>.*)</p>', meta_doc, 'description', fatal=False) duration = parse_duration(self._html_search_regex( r'L&auml;nge:</td>\s+<td>(?P<duration>[0-9]+:[0-9]+)</td>', meta_doc, 'duration', fatal=False)) page_doc = self._download_webpage( url, video_id, 'Downloading video information') mobj = re.search( r"ardformatplayerclassic\(\'playerbereich\',\'(?P<width>[0-9]+)\',\'.*\',\'(?P<video_id>[0-9]+)\',\'(?P<secret>[0-9]+)\',\'(?P<thumbnail>.+)\',\'\'\)", page_doc) video_url = ( "http://dl-ondemand.radiobremen.de/mediabase/%s/%s_%s_%s.mp4" % (video_id, video_id, mobj.group("secret"), mobj.group('width'))) formats = [{ 'url': video_url, 'ext': 'mp4', 'width': int(mobj.group('width')), }] return { 'id': video_id, 'title': title, 'description': description, 'duration': duration, 'formats': formats, 'thumbnail': mobj.group('thumbnail'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/lnkgo.py
youtube_dl/extractor/lnkgo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, compat_str, int_or_none, parse_iso8601, ) class LnkGoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?lnk(?:go)?\.(?:alfa\.)?lt/(?:visi-video/[^/]+|video)/(?P<id>[A-Za-z0-9-]+)(?:/(?P<episode_id>\d+))?' _TESTS = [{ 'url': 'http://www.lnkgo.lt/visi-video/aktualai-pratesimas/ziurek-putka-trys-klausimai', 'info_dict': { 'id': '10809', 'ext': 'mp4', 'title': "Put'ka: Trys Klausimai", 'upload_date': '20161216', 'description': 'Seniai matytas Put’ka užduoda tris klausimėlius. Pabandykime surasti atsakymus.', 'age_limit': 18, 'duration': 117, 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1481904000, }, 'params': { 'skip_download': True, # HLS download }, }, { 'url': 'http://lnkgo.alfa.lt/visi-video/aktualai-pratesimas/ziurek-nerdas-taiso-kompiuteri-2', 'info_dict': { 'id': '10467', 'ext': 'mp4', 'title': 'Nėrdas: Kompiuterio Valymas', 'upload_date': '20150113', 'description': 'md5:7352d113a242a808676ff17e69db6a69', 'age_limit': 18, 'duration': 346, 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1421164800, }, 'params': { 'skip_download': True, # HLS download }, }, { 'url': 'https://lnk.lt/video/neigalieji-tv-bokste/37413', 'only_matching': True, }] _AGE_LIMITS = { 'N-7': 7, 'N-14': 14, 'S': 18, } _M3U8_TEMPL = 'https://vod.lnk.lt/lnk_vod/lnk/lnk/%s:%s/playlist.m3u8%s' def _real_extract(self, url): display_id, video_id = re.match(self._VALID_URL, url).groups() video_info = self._download_json( 'https://lnk.lt/api/main/video-page/%s/%s/false' % (display_id, video_id or '0'), display_id)['videoConfig']['videoInfo'] video_id = compat_str(video_info['id']) title = video_info['title'] prefix = 'smil' if video_info.get('isQualityChangeAvailable') else 'mp4' formats = self._extract_m3u8_formats( self._M3U8_TEMPL % (prefix, video_info['videoUrl'], video_info.get('secureTokenParams') or ''), video_id, 'mp4', 'm3u8_native') self._sort_formats(formats) poster_image = video_info.get('posterImage') return { 'id': video_id, 'display_id': display_id, 'title': title, 'formats': formats, 'thumbnail': 'https://lnk.lt/all-images/' + poster_image if poster_image else None, 'duration': int_or_none(video_info.get('duration')), 'description': clean_html(video_info.get('htmlDescription')), 'age_limit': self._AGE_LIMITS.get(video_info.get('pgRating'), 0), 'timestamp': parse_iso8601(video_info.get('airDate')), 'view_count': int_or_none(video_info.get('viewsCount')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/fourtube.py
youtube_dl/extractor/fourtube.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_b64decode, compat_str, compat_urllib_parse_unquote, compat_urlparse, ) from ..utils import ( int_or_none, parse_duration, parse_iso8601, str_or_none, str_to_int, try_get, unified_timestamp, url_or_none, ) class FourTubeBaseIE(InfoExtractor): def _extract_formats(self, url, video_id, media_id, sources): token_url = 'https://%s/%s/desktop/%s' % ( self._TKN_HOST, media_id, '+'.join(sources)) parsed_url = compat_urlparse.urlparse(url) tokens = self._download_json(token_url, video_id, data=b'', headers={ 'Origin': '%s://%s' % (parsed_url.scheme, parsed_url.hostname), 'Referer': url, }) formats = [{ 'url': tokens[format]['token'], 'format_id': format + 'p', 'resolution': format + 'p', 'quality': int(format), } for format in sources] self._sort_formats(formats) return formats def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) kind, video_id, display_id = mobj.group('kind', 'id', 'display_id') if kind == 'm' or not display_id: url = self._URL_TEMPLATE % video_id webpage = self._download_webpage(url, video_id) title = self._html_search_meta('name', webpage) timestamp = parse_iso8601(self._html_search_meta( 'uploadDate', webpage)) thumbnail = self._html_search_meta('thumbnailUrl', webpage) uploader_id = self._html_search_regex( r'<a class="item-to-subscribe" href="[^"]+/(?:channel|user)s?/([^/"]+)" title="Go to [^"]+ page">', webpage, 'uploader id', fatal=False) uploader = self._html_search_regex( r'<a class="item-to-subscribe" href="[^"]+/(?:channel|user)s?/[^/"]+" title="Go to ([^"]+) page">', webpage, 'uploader', fatal=False) categories_html = self._search_regex( r'(?s)><i class="icon icon-tag"></i>\s*Categories / Tags\s*.*?<ul class="[^"]*?list[^"]*?">(.*?)</ul>', webpage, 'categories', fatal=False) categories = None if categories_html: categories = [ c.strip() for c in re.findall( r'(?s)<li><a.*?>(.*?)</a>', categories_html)] view_count = str_to_int(self._search_regex( r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:([0-9,]+)">', webpage, 'view count', default=None)) like_count = str_to_int(self._search_regex( r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserLikes:([0-9,]+)">', webpage, 'like count', default=None)) duration = parse_duration(self._html_search_meta('duration', webpage)) media_id = self._search_regex( r'<button[^>]+data-id=(["\'])(?P<id>\d+)\1[^>]+data-quality=', webpage, 'media id', default=None, group='id') sources = [ quality for _, quality in re.findall(r'<button[^>]+data-quality=(["\'])(.+?)\1', webpage)] if not (media_id and sources): player_js = self._download_webpage( self._search_regex( r'<script[^>]id=(["\'])playerembed\1[^>]+src=(["\'])(?P<url>.+?)\2', webpage, 'player JS', group='url'), video_id, 'Downloading player JS') params_js = self._search_regex( r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)', player_js, 'initialization parameters') params = self._parse_json('[%s]' % params_js, video_id) media_id = params[0] sources = ['%s' % p for p in params[2]] formats = self._extract_formats(url, video_id, media_id, sources) return { 'id': video_id, 'title': title, 'formats': formats, 'categories': categories, 'thumbnail': thumbnail, 'uploader': uploader, 'uploader_id': uploader_id, 'timestamp': timestamp, 'like_count': like_count, 'view_count': view_count, 'duration': duration, 'age_limit': 18, } class FourTubeIE(FourTubeBaseIE): IE_NAME = '4tube' _VALID_URL = r'https?://(?:(?P<kind>www|m)\.)?4tube\.com/(?:videos|embed)/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?' _URL_TEMPLATE = 'https://www.4tube.com/videos/%s/video' _TKN_HOST = 'token.4tube.com' _TESTS = [{ 'url': 'http://www.4tube.com/videos/209733/hot-babe-holly-michaels-gets-her-ass-stuffed-by-black', 'md5': '6516c8ac63b03de06bc8eac14362db4f', 'info_dict': { 'id': '209733', 'ext': 'mp4', 'title': 'Hot Babe Holly Michaels gets her ass stuffed by black', 'uploader': 'WCP Club', 'uploader_id': 'wcp-club', 'upload_date': '20131031', 'timestamp': 1383263892, 'duration': 583, 'view_count': int, 'like_count': int, 'categories': list, 'age_limit': 18, }, }, { 'url': 'http://www.4tube.com/embed/209733', 'only_matching': True, }, { 'url': 'http://m.4tube.com/videos/209733/hot-babe-holly-michaels-gets-her-ass-stuffed-by-black', 'only_matching': True, }] class FuxIE(FourTubeBaseIE): _VALID_URL = r'https?://(?:(?P<kind>www|m)\.)?fux\.com/(?:video|embed)/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?' _URL_TEMPLATE = 'https://www.fux.com/video/%s/video' _TKN_HOST = 'token.fux.com' _TESTS = [{ 'url': 'https://www.fux.com/video/195359/awesome-fucking-kitchen-ends-cum-swallow', 'info_dict': { 'id': '195359', 'ext': 'mp4', 'title': 'Awesome fucking in the kitchen ends with cum swallow', 'uploader': 'alenci2342', 'uploader_id': 'alenci2342', 'upload_date': '20131230', 'timestamp': 1388361660, 'duration': 289, 'view_count': int, 'like_count': int, 'categories': list, 'age_limit': 18, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.fux.com/embed/195359', 'only_matching': True, }, { 'url': 'https://www.fux.com/video/195359/awesome-fucking-kitchen-ends-cum-swallow', 'only_matching': True, }] class PornTubeIE(FourTubeBaseIE): _VALID_URL = r'https?://(?:(?P<kind>www|m)\.)?porntube\.com/(?:videos/(?P<display_id>[^/]+)_|embed/)(?P<id>\d+)' _URL_TEMPLATE = 'https://www.porntube.com/videos/video_%s' _TKN_HOST = 'tkn.porntube.com' _TESTS = [{ 'url': 'https://www.porntube.com/videos/teen-couple-doing-anal_7089759', 'info_dict': { 'id': '7089759', 'ext': 'mp4', 'title': 'Teen couple doing anal', 'uploader': 'Alexy', 'uploader_id': '91488', 'upload_date': '20150606', 'timestamp': 1433595647, 'duration': 5052, 'view_count': int, 'like_count': int, 'age_limit': 18, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.porntube.com/videos/squirting-teen-ballerina-ecg_1331406', 'info_dict': { 'id': '1331406', 'ext': 'mp4', 'title': 'Squirting Teen Ballerina on ECG', 'uploader': 'Exploited College Girls', 'uploader_id': '665', 'channel': 'Exploited College Girls', 'channel_id': '665', 'upload_date': '20130920', 'timestamp': 1379685485, 'duration': 851, 'view_count': int, 'like_count': int, 'age_limit': 18, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.porntube.com/embed/7089759', 'only_matching': True, }, { 'url': 'https://m.porntube.com/videos/teen-couple-doing-anal_7089759', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id, display_id = mobj.group('id', 'display_id') webpage = self._download_webpage(url, display_id) video = self._parse_json( self._search_regex( r'INITIALSTATE\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'data', group='value'), video_id, transform_source=lambda x: compat_urllib_parse_unquote( compat_b64decode(x).decode('utf-8')))['page']['video'] title = video['title'] media_id = video['mediaId'] sources = [compat_str(e['height']) for e in video['encodings'] if e.get('height')] formats = self._extract_formats(url, video_id, media_id, sources) thumbnail = url_or_none(video.get('masterThumb')) uploader = try_get(video, lambda x: x['user']['username'], compat_str) uploader_id = str_or_none(try_get( video, lambda x: x['user']['id'], int)) channel = try_get(video, lambda x: x['channel']['name'], compat_str) channel_id = str_or_none(try_get( video, lambda x: x['channel']['id'], int)) like_count = int_or_none(video.get('likes')) dislike_count = int_or_none(video.get('dislikes')) view_count = int_or_none(video.get('playsQty')) duration = int_or_none(video.get('durationInSeconds')) timestamp = unified_timestamp(video.get('publishedAt')) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'uploader': uploader or channel, 'uploader_id': uploader_id or channel_id, 'channel': channel, 'channel_id': channel_id, 'timestamp': timestamp, 'like_count': like_count, 'dislike_count': dislike_count, 'view_count': view_count, 'duration': duration, 'age_limit': 18, } class PornerBrosIE(FourTubeBaseIE): _VALID_URL = r'https?://(?:(?P<kind>www|m)\.)?pornerbros\.com/(?:videos/(?P<display_id>[^/]+)_|embed/)(?P<id>\d+)' _URL_TEMPLATE = 'https://www.pornerbros.com/videos/video_%s' _TKN_HOST = 'token.pornerbros.com' _TESTS = [{ 'url': 'https://www.pornerbros.com/videos/skinny-brunette-takes-big-cock-down-her-anal-hole_181369', 'md5': '6516c8ac63b03de06bc8eac14362db4f', 'info_dict': { 'id': '181369', 'ext': 'mp4', 'title': 'Skinny brunette takes big cock down her anal hole', 'uploader': 'PornerBros HD', 'uploader_id': 'pornerbros-hd', 'upload_date': '20130130', 'timestamp': 1359527401, 'duration': 1224, 'view_count': int, 'categories': list, 'age_limit': 18, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.pornerbros.com/embed/181369', 'only_matching': True, }, { 'url': 'https://m.pornerbros.com/videos/skinny-brunette-takes-big-cock-down-her-anal-hole_181369', 'only_matching': True, }]
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/xminus.py
youtube_dl/extractor/xminus.py
# coding: utf-8 from __future__ import unicode_literals import re import time from .common import InfoExtractor from ..compat import ( compat_ord, ) from ..utils import ( int_or_none, parse_duration, ) class XMinusIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?x-minus\.org/track/(?P<id>[0-9]+)' _TEST = { 'url': 'http://x-minus.org/track/4542/%D0%BF%D0%B5%D1%81%D0%B5%D0%BD%D0%BA%D0%B0-%D1%88%D0%BE%D1%84%D0%B5%D1%80%D0%B0.html', 'md5': '401a15f2d2dcf6d592cb95528d72a2a8', 'info_dict': { 'id': '4542', 'ext': 'mp3', 'title': 'Леонид Агутин-Песенка шофёра', 'duration': 156, 'tbr': 320, 'filesize_approx': 5900000, 'view_count': int, 'description': 'md5:03238c5b663810bc79cf42ef3c03e371', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) artist = self._html_search_regex( r'<a[^>]+href="/artist/\d+">([^<]+)</a>', webpage, 'artist') title = artist + '-' + self._html_search_regex( r'<span[^>]+class="minustrack-full-title(?:\s+[^"]+)?"[^>]*>([^<]+)', webpage, 'title') duration = parse_duration(self._html_search_regex( r'<span[^>]+class="player-duration(?:\s+[^"]+)?"[^>]*>([^<]+)', webpage, 'duration', fatal=False)) mobj = re.search( r'<div[^>]+class="dw-info(?:\s+[^"]+)?"[^>]*>(?P<tbr>\d+)\s*кбит/c\s+(?P<filesize>[0-9.]+)\s*мб</div>', webpage) tbr = filesize_approx = None if mobj: filesize_approx = float(mobj.group('filesize')) * 1000000 tbr = float(mobj.group('tbr')) view_count = int_or_none(self._html_search_regex( r'<span><[^>]+class="icon-chart-bar".*?>(\d+)</span>', webpage, 'view count', fatal=False)) description = self._html_search_regex( r'(?s)<pre[^>]+id="lyrics-original"[^>]*>(.*?)</pre>', webpage, 'song lyrics', fatal=False) if description: description = re.sub(' *\r *', '\n', description) k = self._search_regex( r'<div[^>]+id="player-bottom"[^>]+data-k="([^"]+)">', webpage, 'encoded data') h = time.time() / 3600 a = sum(map(int, [compat_ord(c) for c in k])) + int(video_id) + h video_url = 'http://x-minus.me/dl/minus?id=%s&tkn2=%df%d' % (video_id, a, h) return { 'id': video_id, 'title': title, 'url': video_url, # The extension is unknown until actual downloading 'ext': 'mp3', 'duration': duration, 'filesize_approx': filesize_approx, 'tbr': tbr, 'view_count': view_count, 'description': description, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/lemonde.py
youtube_dl/extractor/lemonde.py
from __future__ import unicode_literals from .common import InfoExtractor class LemondeIE(InfoExtractor): _VALID_URL = r'https?://(?:.+?\.)?lemonde\.fr/(?:[^/]+/)*(?P<id>[^/]+)\.html' _TESTS = [{ 'url': 'http://www.lemonde.fr/police-justice/video/2016/01/19/comprendre-l-affaire-bygmalion-en-cinq-minutes_4849702_1653578.html', 'md5': 'da120c8722d8632eec6ced937536cc98', 'info_dict': { 'id': 'lqm3kl', 'ext': 'mp4', 'title': "Comprendre l'affaire Bygmalion en 5 minutes", 'thumbnail': r're:^https?://.*\.jpg', 'duration': 309, 'upload_date': '20160119', 'timestamp': 1453194778, 'uploader_id': '3pmkp', }, }, { # standard iframe embed 'url': 'http://www.lemonde.fr/les-decodeurs/article/2016/10/18/tout-comprendre-du-ceta-le-petit-cousin-du-traite-transatlantique_5015920_4355770.html', 'info_dict': { 'id': 'uzsxms', 'ext': 'mp4', 'title': "CETA : quelles suites pour l'accord commercial entre l'Europe et le Canada ?", 'thumbnail': r're:^https?://.*\.jpg', 'duration': 325, 'upload_date': '20161021', 'timestamp': 1477044540, 'uploader_id': '3pmkp', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://redaction.actu.lemonde.fr/societe/video/2016/01/18/calais-debut-des-travaux-de-defrichement-dans-la-jungle_4849233_3224.html', 'only_matching': True, }, { # YouTube embeds 'url': 'http://www.lemonde.fr/pixels/article/2016/12/09/pourquoi-pewdiepie-superstar-de-youtube-a-menace-de-fermer-sa-chaine_5046649_4408996.html', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) digiteka_url = self._proto_relative_url(self._search_regex( r'url\s*:\s*(["\'])(?P<url>(?:https?://)?//(?:www\.)?(?:digiteka\.net|ultimedia\.com)/deliver/.+?)\1', webpage, 'digiteka url', group='url', default=None)) if digiteka_url: return self.url_result(digiteka_url, 'Digiteka') return self.url_result(url, 'Generic')
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/pinterest.py
youtube_dl/extractor/pinterest.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( determine_ext, float_or_none, int_or_none, try_get, unified_timestamp, url_or_none, ) class PinterestBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:[^/]+\.)?pinterest\.(?:com|fr|de|ch|jp|cl|ca|it|co\.uk|nz|ru|com\.au|at|pt|co\.kr|es|com\.mx|dk|ph|th|com\.uy|co|nl|info|kr|ie|vn|com\.vn|ec|mx|in|pe|co\.at|hu|co\.in|co\.nz|id|com\.ec|com\.py|tw|be|uk|com\.bo|com\.pe)' def _call_api(self, resource, video_id, options): return self._download_json( 'https://www.pinterest.com/resource/%sResource/get/' % resource, video_id, 'Download %s JSON metadata' % resource, query={ 'data': json.dumps({'options': options}) })['resource_response'] def _extract_video(self, data, extract_formats=True): video_id = data['id'] title = (data.get('title') or data.get('grid_title') or video_id).strip() urls = [] formats = [] duration = None if extract_formats: for format_id, format_dict in data['videos']['video_list'].items(): if not isinstance(format_dict, dict): continue format_url = url_or_none(format_dict.get('url')) if not format_url or format_url in urls: continue urls.append(format_url) duration = float_or_none(format_dict.get('duration'), scale=1000) ext = determine_ext(format_url) if 'hls' in format_id.lower() or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=format_id, fatal=False)) else: formats.append({ 'url': format_url, 'format_id': format_id, 'width': int_or_none(format_dict.get('width')), 'height': int_or_none(format_dict.get('height')), 'duration': duration, }) self._sort_formats( formats, field_preference=('height', 'width', 'tbr', 'format_id')) description = data.get('description') or data.get('description_html') or data.get('seo_description') timestamp = unified_timestamp(data.get('created_at')) def _u(field): return try_get(data, lambda x: x['closeup_attribution'][field], compat_str) uploader = _u('full_name') uploader_id = _u('id') repost_count = int_or_none(data.get('repin_count')) comment_count = int_or_none(data.get('comment_count')) categories = try_get(data, lambda x: x['pin_join']['visual_annotation'], list) tags = data.get('hashtags') thumbnails = [] images = data.get('images') if isinstance(images, dict): for thumbnail_id, thumbnail in images.items(): if not isinstance(thumbnail, dict): continue thumbnail_url = url_or_none(thumbnail.get('url')) if not thumbnail_url: continue thumbnails.append({ 'url': thumbnail_url, 'width': int_or_none(thumbnail.get('width')), 'height': int_or_none(thumbnail.get('height')), }) return { 'id': video_id, 'title': title, 'description': description, 'duration': duration, 'timestamp': timestamp, 'thumbnails': thumbnails, 'uploader': uploader, 'uploader_id': uploader_id, 'repost_count': repost_count, 'comment_count': comment_count, 'categories': categories, 'tags': tags, 'formats': formats, 'extractor_key': PinterestIE.ie_key(), } class PinterestIE(PinterestBaseIE): _VALID_URL = r'%s/pin/(?P<id>\d+)' % PinterestBaseIE._VALID_URL_BASE _TESTS = [{ 'url': 'https://www.pinterest.com/pin/664281013778109217/', 'md5': '6550c2af85d6d9f3fe3b88954d1577fc', 'info_dict': { 'id': '664281013778109217', 'ext': 'mp4', 'title': 'Origami', 'description': 'md5:b9d90ddf7848e897882de9e73344f7dd', 'duration': 57.7, 'timestamp': 1593073622, 'upload_date': '20200625', 'uploader': 'Love origami -I am Dafei', 'uploader_id': '586523688879454212', 'repost_count': 50, 'comment_count': 0, 'categories': list, 'tags': list, }, }, { 'url': 'https://co.pinterest.com/pin/824721750502199491/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) data = self._call_api( 'Pin', video_id, { 'field_set_key': 'unauth_react_main_pin', 'id': video_id, })['data'] return self._extract_video(data) class PinterestCollectionIE(PinterestBaseIE): _VALID_URL = r'%s/(?P<username>[^/]+)/(?P<id>[^/?#&]+)' % PinterestBaseIE._VALID_URL_BASE _TESTS = [{ 'url': 'https://www.pinterest.ca/mashal0407/cool-diys/', 'info_dict': { 'id': '585890301462791043', 'title': 'cool diys', }, 'playlist_count': 8, }, { 'url': 'https://www.pinterest.ca/fudohub/videos/', 'info_dict': { 'id': '682858430939307450', 'title': 'VIDEOS', }, 'playlist_mincount': 365, 'skip': 'Test with extract_formats=False', }] @classmethod def suitable(cls, url): return False if PinterestIE.suitable(url) else super( PinterestCollectionIE, cls).suitable(url) def _real_extract(self, url): username, slug = re.match(self._VALID_URL, url).groups() board = self._call_api( 'Board', slug, { 'slug': slug, 'username': username })['data'] board_id = board['id'] options = { 'board_id': board_id, 'page_size': 250, } bookmark = None entries = [] while True: if bookmark: options['bookmarks'] = [bookmark] board_feed = self._call_api('BoardFeed', board_id, options) for item in (board_feed.get('data') or []): if not isinstance(item, dict) or item.get('type') != 'pin': continue video_id = item.get('id') if video_id: # Some pins may not be available anonymously via pin URL # video = self._extract_video(item, extract_formats=False) # video.update({ # '_type': 'url_transparent', # 'url': 'https://www.pinterest.com/pin/%s/' % video_id, # }) # entries.append(video) entries.append(self._extract_video(item)) bookmark = board_feed.get('bookmark') if not bookmark: break return self.playlist_result( entries, playlist_id=board_id, playlist_title=board.get('name'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vidlii.py
youtube_dl/extractor/vidlii.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( float_or_none, get_element_by_id, int_or_none, strip_or_none, unified_strdate, urljoin, str_to_int, ) class VidLiiIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?vidlii\.com/(?:watch|embed)\?.*?\bv=(?P<id>[0-9A-Za-z_-]{11})' _TESTS = [{ 'url': 'https://www.vidlii.com/watch?v=tJluaH4BJ3v', 'md5': '9bf7d1e005dfa909b6efb0a1ff5175e2', 'info_dict': { 'id': 'tJluaH4BJ3v', 'ext': 'mp4', 'title': 'Vidlii is against me', 'description': 'md5:fa3f119287a2bfb922623b52b1856145', 'thumbnail': 're:https://.*.jpg', 'uploader': 'APPle5auc31995', 'uploader_url': 'https://www.vidlii.com/user/APPle5auc31995', 'upload_date': '20171107', 'duration': 212, 'view_count': int, 'comment_count': int, 'average_rating': float, 'categories': ['News & Politics'], 'tags': ['Vidlii', 'Jan', 'Videogames'], } }, { # HD 'url': 'https://www.vidlii.com/watch?v=2Ng8Abj2Fkl', 'md5': '450e7da379c884788c3a4fa02a3ce1a4', 'info_dict': { 'id': '2Ng8Abj2Fkl', 'ext': 'mp4', 'title': 'test', 'description': 'md5:cc55a86032a7b6b3cbfd0f6b155b52e9', 'thumbnail': 'https://www.vidlii.com/usfi/thmp/2Ng8Abj2Fkl.jpg', 'uploader': 'VidLii', 'uploader_url': 'https://www.vidlii.com/user/VidLii', 'upload_date': '20200927', 'duration': 5, 'view_count': int, 'comment_count': int, 'average_rating': float, 'categories': ['Film & Animation'], 'tags': list, }, }, { 'url': 'https://www.vidlii.com/embed?v=tJluaH4BJ3v&a=0', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://www.vidlii.com/watch?v=%s' % video_id, video_id) formats = [] def add_format(format_url, height=None): height = int(self._search_regex(r'(\d+)\.mp4', format_url, 'height', default=360)) formats.append({ 'url': format_url, 'format_id': '%dp' % height if height else None, 'height': height, }) sources = re.findall( r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1', webpage) formats = [] if len(sources) > 1: add_format(sources[1][1]) self._check_formats(formats, video_id) if len(sources) > 0: add_format(sources[0][1]) self._sort_formats(formats) title = self._html_search_regex( (r'<h1>([^<]+)</h1>', r'<title>([^<]+) - VidLii<'), webpage, 'title') description = self._html_search_meta( ('description', 'twitter:description'), webpage, default=None) or strip_or_none( get_element_by_id('des_text', webpage)) thumbnail = self._html_search_meta( 'twitter:image', webpage, default=None) if not thumbnail: thumbnail_path = self._search_regex( r'img\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'thumbnail', fatal=False, group='url') if thumbnail_path: thumbnail = urljoin(url, thumbnail_path) uploader = self._search_regex( r'<div[^>]+class=["\']wt_person[^>]+>\s*<a[^>]+\bhref=["\']/user/[^>]+>([^<]+)', webpage, 'uploader', fatal=False) uploader_url = 'https://www.vidlii.com/user/%s' % uploader if uploader else None upload_date = unified_strdate(self._html_search_meta( 'datePublished', webpage, default=None) or self._search_regex( r'<date>([^<]+)', webpage, 'upload date', fatal=False)) duration = int_or_none(self._html_search_meta( 'video:duration', webpage, 'duration', default=None) or self._search_regex( r'duration\s*:\s*(\d+)', webpage, 'duration', fatal=False)) view_count = str_to_int(self._html_search_regex( (r'<strong>([\d,.]+)</strong> views', r'Views\s*:\s*<strong>([\d,.]+)</strong>'), webpage, 'view count', fatal=False)) comment_count = int_or_none(self._search_regex( (r'<span[^>]+id=["\']cmt_num[^>]+>(\d+)', r'Comments\s*:\s*<strong>(\d+)'), webpage, 'comment count', fatal=False)) average_rating = float_or_none(self._search_regex( r'rating\s*:\s*([\d.]+)', webpage, 'average rating', fatal=False)) category = self._html_search_regex( r'<div>Category\s*:\s*</div>\s*<div>\s*<a[^>]+>([^<]+)', webpage, 'category', fatal=False) categories = [category] if category else None tags = [ strip_or_none(tag) for tag in re.findall( r'<a[^>]+\bhref=["\']/results\?.*?q=[^>]*>([^<]+)', webpage) if strip_or_none(tag) ] or None return { 'id': video_id, 'formats': formats, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'uploader_url': uploader_url, 'upload_date': upload_date, 'duration': duration, 'view_count': view_count, 'comment_count': comment_count, 'average_rating': average_rating, 'categories': categories, 'tags': tags, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/daum.py
youtube_dl/extractor/daum.py
# coding: utf-8 from __future__ import unicode_literals import itertools from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urllib_parse_unquote, compat_urlparse, ) class DaumBaseIE(InfoExtractor): _KAKAO_EMBED_BASE = 'http://tv.kakao.com/embed/player/cliplink/' class DaumIE(DaumBaseIE): _VALID_URL = r'https?://(?:(?:m\.)?tvpot\.daum\.net/v/|videofarm\.daum\.net/controller/player/VodPlayer\.swf\?vid=)(?P<id>[^?#&]+)' IE_NAME = 'daum.net' _TESTS = [{ 'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz', 'info_dict': { 'id': 'vab4dyeDBysyBssyukBUjBz', 'ext': 'mp4', 'title': '마크 헌트 vs 안토니오 실바', 'description': 'Mark Hunt vs Antonio Silva', 'upload_date': '20131217', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'duration': 2117, 'view_count': int, 'comment_count': int, 'uploader_id': 186139, 'uploader': '콘간지', 'timestamp': 1387310323, }, }, { 'url': 'http://m.tvpot.daum.net/v/65139429', 'info_dict': { 'id': '65139429', 'ext': 'mp4', 'title': '1297회, \'아빠 아들로 태어나길 잘 했어\' 민수, 감동의 눈물[아빠 어디가] 20150118', 'description': 'md5:79794514261164ff27e36a21ad229fc5', 'upload_date': '20150118', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'duration': 154, 'view_count': int, 'comment_count': int, 'uploader': 'MBC 예능', 'uploader_id': 132251, 'timestamp': 1421604228, }, }, { 'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24', 'only_matching': True, }, { 'url': 'http://videofarm.daum.net/controller/player/VodPlayer.swf?vid=vwIpVpCQsT8%24&ref=', 'info_dict': { 'id': 'vwIpVpCQsT8$', 'ext': 'flv', 'title': '01-Korean War ( Trouble on the horizon )', 'description': 'Korean War 01\r\nTrouble on the horizon\r\n전쟁의 먹구름', 'upload_date': '20080223', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'duration': 249, 'view_count': int, 'comment_count': int, 'uploader': '까칠한 墮落始祖 황비홍님의', 'uploader_id': 560824, 'timestamp': 1203770745, }, }, { # Requires dte_type=WEB (#9972) 'url': 'http://tvpot.daum.net/v/s3794Uf1NZeZ1qMpGpeqeRU', 'md5': 'a8917742069a4dd442516b86e7d66529', 'info_dict': { 'id': 's3794Uf1NZeZ1qMpGpeqeRU', 'ext': 'mp4', 'title': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)', 'description': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)\r\n\r\n[쇼! 음악중심] 20160611, 507회', 'upload_date': '20170129', 'uploader': '쇼! 음악중심', 'uploader_id': 2653210, 'timestamp': 1485684628, }, }] def _real_extract(self, url): video_id = compat_urllib_parse_unquote(self._match_id(url)) if not video_id.isdigit(): video_id += '@my' return self.url_result( self._KAKAO_EMBED_BASE + video_id, 'Kakao', video_id) class DaumClipIE(DaumBaseIE): _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:clip/ClipView.(?:do|tv)|mypot/View.do)\?.*?clipid=(?P<id>\d+)' IE_NAME = 'daum.net:clip' _URL_TEMPLATE = 'http://tvpot.daum.net/clip/ClipView.do?clipid=%s' _TESTS = [{ 'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690', 'info_dict': { 'id': '52554690', 'ext': 'mp4', 'title': 'DOTA 2GETHER 시즌2 6회 - 2부', 'description': 'DOTA 2GETHER 시즌2 6회 - 2부', 'upload_date': '20130831', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'duration': 3868, 'view_count': int, 'uploader': 'GOMeXP', 'uploader_id': 6667, 'timestamp': 1377911092, }, }, { 'url': 'http://m.tvpot.daum.net/clip/ClipView.tv?clipid=54999425', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if DaumPlaylistIE.suitable(url) or DaumUserIE.suitable(url) else super(DaumClipIE, cls).suitable(url) def _real_extract(self, url): video_id = self._match_id(url) return self.url_result( self._KAKAO_EMBED_BASE + video_id, 'Kakao', video_id) class DaumListIE(InfoExtractor): def _get_entries(self, list_id, list_id_type): name = None entries = [] for pagenum in itertools.count(1): list_info = self._download_json( 'http://tvpot.daum.net/mypot/json/GetClipInfo.do?size=48&init=true&order=date&page=%d&%s=%s' % ( pagenum, list_id_type, list_id), list_id, 'Downloading list info - %s' % pagenum) entries.extend([ self.url_result( 'http://tvpot.daum.net/v/%s' % clip['vid']) for clip in list_info['clip_list'] ]) if not name: name = list_info.get('playlist_bean', {}).get('name') or \ list_info.get('potInfo', {}).get('name') if not list_info.get('has_more'): break return name, entries def _check_clip(self, url, list_id): query_dict = compat_parse_qs(compat_urlparse.urlparse(url).query) if 'clipid' in query_dict: clip_id = query_dict['clipid'][0] if self._downloader.params.get('noplaylist'): self.to_screen('Downloading just video %s because of --no-playlist' % clip_id) return self.url_result(DaumClipIE._URL_TEMPLATE % clip_id, 'DaumClip') else: self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % list_id) class DaumPlaylistIE(DaumListIE): _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/mypot/(?:View\.do|Top\.tv)\?.*?playlistid=(?P<id>[0-9]+)' IE_NAME = 'daum.net:playlist' _URL_TEMPLATE = 'http://tvpot.daum.net/mypot/View.do?playlistid=%s' _TESTS = [{ 'note': 'Playlist url with clipid', 'url': 'http://tvpot.daum.net/mypot/View.do?playlistid=6213966&clipid=73806844', 'info_dict': { 'id': '6213966', 'title': 'Woorissica Official', }, 'playlist_mincount': 181 }, { 'note': 'Playlist url with clipid - noplaylist', 'url': 'http://tvpot.daum.net/mypot/View.do?playlistid=6213966&clipid=73806844', 'info_dict': { 'id': '73806844', 'ext': 'mp4', 'title': '151017 Airport', 'upload_date': '20160117', }, 'params': { 'noplaylist': True, 'skip_download': True, } }] @classmethod def suitable(cls, url): return False if DaumUserIE.suitable(url) else super(DaumPlaylistIE, cls).suitable(url) def _real_extract(self, url): list_id = self._match_id(url) clip_result = self._check_clip(url, list_id) if clip_result: return clip_result name, entries = self._get_entries(list_id, 'playlistid') return self.playlist_result(entries, list_id, name) class DaumUserIE(DaumListIE): _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/mypot/(?:View|Top)\.(?:do|tv)\?.*?ownerid=(?P<id>[0-9a-zA-Z]+)' IE_NAME = 'daum.net:user' _TESTS = [{ 'url': 'http://tvpot.daum.net/mypot/View.do?ownerid=o2scDLIVbHc0', 'info_dict': { 'id': 'o2scDLIVbHc0', 'title': '마이 리틀 텔레비전', }, 'playlist_mincount': 213 }, { 'url': 'http://tvpot.daum.net/mypot/View.do?ownerid=o2scDLIVbHc0&clipid=73801156', 'info_dict': { 'id': '73801156', 'ext': 'mp4', 'title': '[미공개] 김구라, 오만석이 부릅니다 \'오케피\' - 마이 리틀 텔레비전 20160116', 'upload_date': '20160117', 'description': 'md5:5e91d2d6747f53575badd24bd62b9f36' }, 'params': { 'noplaylist': True, 'skip_download': True, } }, { 'note': 'Playlist url has ownerid and playlistid, playlistid takes precedence', 'url': 'http://tvpot.daum.net/mypot/View.do?ownerid=o2scDLIVbHc0&playlistid=6196631', 'info_dict': { 'id': '6196631', 'title': '마이 리틀 텔레비전 - 20160109', }, 'playlist_count': 11 }, { 'url': 'http://tvpot.daum.net/mypot/Top.do?ownerid=o2scDLIVbHc0', 'only_matching': True, }, { 'url': 'http://m.tvpot.daum.net/mypot/Top.tv?ownerid=45x1okb1If50&playlistid=3569733', 'only_matching': True, }] def _real_extract(self, url): list_id = self._match_id(url) clip_result = self._check_clip(url, list_id) if clip_result: return clip_result query_dict = compat_parse_qs(compat_urlparse.urlparse(url).query) if 'playlistid' in query_dict: playlist_id = query_dict['playlistid'][0] return self.url_result(DaumPlaylistIE._URL_TEMPLATE % playlist_id, 'DaumPlaylist') name, entries = self._get_entries(list_id, 'ownerid') return self.playlist_result(entries, list_id, name)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/teachingchannel.py
youtube_dl/extractor/teachingchannel.py
from __future__ import unicode_literals from .common import InfoExtractor class TeachingChannelIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?teachingchannel\.org/videos?/(?P<id>[^/?&#]+)' _TEST = { 'url': 'https://www.teachingchannel.org/videos/teacher-teaming-evolution', 'info_dict': { 'id': '3swwlzkT', 'ext': 'mp4', 'title': 'A History of Teaming', 'description': 'md5:2a9033db8da81f2edffa4c99888140b3', 'duration': 422, 'upload_date': '20170316', 'timestamp': 1489691297, }, 'params': { 'skip_download': True, }, 'add_ie': ['JWPlatform'], } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) mid = self._search_regex( r'(?:data-mid=["\']|id=["\']jw-video-player-)([a-zA-Z0-9]{8})', webpage, 'media id') return self.url_result('jwplatform:' + mid, 'JWPlatform', mid)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/gamestar.py
youtube_dl/extractor/gamestar.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, remove_end, ) class GameStarIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?game(?P<site>pro|star)\.de/videos/.*,(?P<id>[0-9]+)\.html' _TESTS = [{ 'url': 'http://www.gamestar.de/videos/trailer,3/hobbit-3-die-schlacht-der-fuenf-heere,76110.html', 'md5': 'ee782f1f8050448c95c5cacd63bc851c', 'info_dict': { 'id': '76110', 'ext': 'mp4', 'title': 'Hobbit 3: Die Schlacht der Fünf Heere - Teaser-Trailer zum dritten Teil', 'description': 'Der Teaser-Trailer zu Hobbit 3: Die Schlacht der Fünf Heere zeigt einige Szenen aus dem dritten Teil der Saga und kündigt den...', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1406542380, 'upload_date': '20140728', 'duration': 17, } }, { 'url': 'http://www.gamepro.de/videos/top-10-indie-spiele-fuer-nintendo-switch-video-tolle-nindies-games-zum-download,95316.html', 'only_matching': True, }, { 'url': 'http://www.gamestar.de/videos/top-10-indie-spiele-fuer-nintendo-switch-video-tolle-nindies-games-zum-download,95316.html', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) site = mobj.group('site') video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) # TODO: there are multiple ld+json objects in the webpage, # while _search_json_ld finds only the first one json_ld = self._parse_json(self._search_regex( r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>[^<]+VideoObject[^<]+)</script>', webpage, 'JSON-LD', group='json_ld'), video_id) info_dict = self._json_ld(json_ld, video_id) info_dict['title'] = remove_end( info_dict['title'], ' - Game%s' % site.title()) view_count = int_or_none(json_ld.get('interactionCount')) comment_count = int_or_none(self._html_search_regex( r'<span>Kommentare</span>\s*<span[^>]+class=["\']count[^>]+>\s*\(\s*([0-9]+)', webpage, 'comment count', fatal=False)) info_dict.update({ 'id': video_id, 'url': 'http://gamestar.de/_misc/videos/portal/getVideoUrl.cfm?premium=0&videoId=' + video_id, 'ext': 'mp4', 'view_count': view_count, 'comment_count': comment_count }) return info_dict
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ndr.py
youtube_dl/extractor/ndr.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_urllib_parse_urlparse from ..utils import ( determine_ext, ExtractorError, int_or_none, merge_dicts, parse_iso8601, qualities, try_get, urljoin, ) class NDRBaseIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id = next(group for group in mobj.groups() if group) webpage = self._download_webpage(url, display_id) return self._extract_embed(webpage, display_id, url) class NDRIE(NDRBaseIE): IE_NAME = 'ndr' IE_DESC = 'NDR.de - Norddeutscher Rundfunk' _VALID_URL = r'https?://(?:\w+\.)*ndr\.de/(?:[^/]+/)*(?P<id>[^/?#]+),[\da-z]+\.html' _TESTS = [{ # httpVideo, same content id 'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html', 'md5': '6515bc255dc5c5f8c85bbc38e035a659', 'info_dict': { 'id': 'hafengeburtstag988', 'display_id': 'Party-Poette-und-Parade', 'ext': 'mp4', 'title': 'Party, Pötte und Parade', 'description': 'md5:ad14f9d2f91d3040b6930c697e5f6b4c', 'uploader': 'ndrtv', 'timestamp': 1431255671, 'upload_date': '20150510', 'duration': 3498, }, 'params': { 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest'], }, { # httpVideo, different content id 'url': 'http://www.ndr.de/sport/fussball/40-Osnabrueck-spielt-sich-in-einen-Rausch,osna270.html', 'md5': '1043ff203eab307f0c51702ec49e9a71', 'info_dict': { 'id': 'osna272', 'display_id': '40-Osnabrueck-spielt-sich-in-einen-Rausch', 'ext': 'mp4', 'title': 'Osnabrück - Wehen Wiesbaden: Die Highlights', 'description': 'md5:32e9b800b3d2d4008103752682d5dc01', 'uploader': 'ndrtv', 'timestamp': 1442059200, 'upload_date': '20150912', 'duration': 510, }, 'params': { 'skip_download': True, }, 'skip': 'No longer available', }, { # httpAudio, same content id 'url': 'http://www.ndr.de/info/La-Valette-entgeht-der-Hinrichtung,audio51535.html', 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8', 'info_dict': { 'id': 'audio51535', 'display_id': 'La-Valette-entgeht-der-Hinrichtung', 'ext': 'mp3', 'title': 'La Valette entgeht der Hinrichtung', 'description': 'md5:22f9541913a40fe50091d5cdd7c9f536', 'uploader': 'ndrinfo', 'timestamp': 1631711863, 'upload_date': '20210915', 'duration': 884, }, 'params': { 'skip_download': True, }, }, { # with subtitles 'url': 'https://www.ndr.de/fernsehen/sendungen/extra_3/extra-3-Satiremagazin-mit-Christian-Ehring,sendung1091858.html', 'info_dict': { 'id': 'extra18674', 'display_id': 'extra-3-Satiremagazin-mit-Christian-Ehring', 'ext': 'mp4', 'title': 'Extra 3 vom 11.11.2020 mit Christian Ehring', 'description': 'md5:700f6de264010585012a72f97b0ac0c9', 'uploader': 'ndrtv', 'upload_date': '20201207', 'timestamp': 1614349457, 'duration': 1749, 'subtitles': { 'de': [{ 'ext': 'ttml', 'url': r're:^https://www\.ndr\.de.+', }], }, }, 'params': { 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest'], }, { 'url': 'https://www.ndr.de/Fettes-Brot-Ferris-MC-und-Thees-Uhlmann-live-on-stage,festivalsommer116.html', 'only_matching': True, }] def _extract_embed(self, webpage, display_id, url): embed_url = ( self._html_search_meta( 'embedURL', webpage, 'embed URL', default=None) or self._search_regex( r'\bembedUrl["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'embed URL', group='url', default=None) or self._search_regex( r'\bvar\s*sophoraID\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'embed URL', group='url', default='')) # some more work needed if we only found sophoraID if re.match(r'^[a-z]+\d+$', embed_url): # get the initial part of the url path,. eg /panorama/archiv/2022/ parsed_url = compat_urllib_parse_urlparse(url) path = self._search_regex(r'(.+/)%s' % display_id, parsed_url.path or '', 'embed URL', default='') # find tell-tale image with the actual ID ndr_id = self._search_regex(r'%s([a-z]+\d+)(?!\.)\b' % (path, ), webpage, 'embed URL', default=None) # or try to use special knowledge! NDR_INFO_URL_TPL = 'https://www.ndr.de/info/%s-player.html' embed_url = 'ndr:%s' % (ndr_id, ) if ndr_id else NDR_INFO_URL_TPL % (embed_url, ) if not embed_url: raise ExtractorError('Unable to extract embedUrl') description = self._search_regex( r'<p[^>]+itemprop="description">([^<]+)</p>', webpage, 'description', default=None) or self._og_search_description(webpage) timestamp = parse_iso8601( self._search_regex( (r'<span[^>]+itemprop="(?:datePublished|uploadDate)"[^>]+content="(?P<cont>[^"]+)"', r'\bvar\s*pdt\s*=\s*(?P<q>["\'])(?P<cont>(?:(?!(?P=q)).)+)(?P=q)', ), webpage, 'upload date', group='cont', default=None)) info = self._search_json_ld(webpage, display_id, default={}) return merge_dicts({ '_type': 'url_transparent', 'url': embed_url, 'display_id': display_id, 'description': description, 'timestamp': timestamp, }, info) class NJoyIE(NDRBaseIE): IE_NAME = 'njoy' IE_DESC = 'N-JOY' _VALID_URL = r'https?://(?:www\.)?n-joy\.de/(?:[^/]+/)*(?:(?P<display_id>[^/?#]+),)?(?P<id>[\da-z]+)\.html' _TESTS = [{ # httpVideo, same content id 'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html', 'md5': 'cb63be60cd6f9dd75218803146d8dc67', 'info_dict': { 'id': 'comedycontest2480', 'display_id': 'Benaissa-beim-NDR-Comedy-Contest', 'ext': 'mp4', 'title': 'Benaissa beim NDR Comedy Contest', 'description': 'md5:f057a6c4e1c728b10d33b5ffd36ddc39', 'uploader': 'ndrtv', 'upload_date': '20141129', 'duration': 654, }, 'params': { 'skip_download': True, }, 'skip': 'No longer available', }, { # httpVideo, different content id 'url': 'http://www.n-joy.de/musik/Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-,felixjaehn168.html', 'md5': '417660fffa90e6df2fda19f1b40a64d8', 'info_dict': { 'id': 'livestream283', 'display_id': 'Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-', 'ext': 'mp3', 'title': 'Das frueheste DJ Set des Nordens live mit Felix Jaehn', 'description': 'md5:681698f527b8601e511e7b79edde7d2c', 'uploader': 'njoy', 'upload_date': '20210830', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.n-joy.de/radio/webradio/morningshow209.html', 'only_matching': True, }] def _extract_embed(self, webpage, display_id, url=None): # find tell-tale URL with the actual ID, or ... video_id = self._search_regex( (r'''\bsrc\s*=\s*["']?(?:/\w+)+/([a-z]+\d+)(?!\.)\b''', r'<iframe[^>]+id="pp_([\da-z]+)"', ), webpage, 'NDR id', default=None) description = ( self._html_search_meta('description', webpage) or self._search_regex( r'<div[^>]+class="subline"[^>]*>[^<]+</div>\s*<p>([^<]+)</p>', webpage, 'description', fatal=False)) return { '_type': 'url_transparent', 'ie_key': 'NDREmbedBase', 'url': 'ndr:%s' % video_id, 'display_id': display_id, 'description': description, 'title': display_id.replace('-', ' ').strip(), } class NDREmbedBaseIE(InfoExtractor): IE_NAME = 'ndr:embed:base' _VALID_URL = r'(?:ndr:(?P<id_s>[\da-z]+)|https?://www\.ndr\.de/(?P<id>[\da-z]+)-ppjson\.json)' _TESTS = [{ 'url': 'ndr:soundcheck3366', 'only_matching': True, }, { 'url': 'http://www.ndr.de/soundcheck3366-ppjson.json', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') or mobj.group('id_s') ppjson = self._download_json( 'http://www.ndr.de/%s-ppjson.json' % video_id, video_id) playlist = ppjson['playlist'] formats = [] quality_key = qualities(('xs', 's', 'm', 'l', 'xl')) for format_id, f in playlist.items(): src = f.get('src') if not src: continue ext = determine_ext(src, None) if ext == 'f4m': formats.extend(self._extract_f4m_formats( src + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, f4m_id='hds', fatal=False)) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( src, video_id, 'mp4', m3u8_id='hls', entry_protocol='m3u8_native', fatal=False)) else: quality = f.get('quality') ff = { 'url': src, 'format_id': quality or format_id, 'quality': quality_key(quality), } type_ = f.get('type') if type_ and type_.split('/')[0] == 'audio': ff['vcodec'] = 'none' ff['ext'] = ext or 'mp3' formats.append(ff) self._sort_formats(formats) config = playlist['config'] live = playlist.get('config', {}).get('streamType') in ['httpVideoLive', 'httpAudioLive'] title = config['title'] if live: title = self._live_title(title) uploader = ppjson.get('config', {}).get('branding') upload_date = ppjson.get('config', {}).get('publicationDate') duration = int_or_none(config.get('duration')) thumbnails = [] poster = try_get(config, lambda x: x['poster'], dict) or {} for thumbnail_id, thumbnail in poster.items(): thumbnail_url = urljoin(url, thumbnail.get('src')) if not thumbnail_url: continue thumbnails.append({ 'id': thumbnail.get('quality') or thumbnail_id, 'url': thumbnail_url, 'preference': quality_key(thumbnail.get('quality')), }) subtitles = {} tracks = config.get('tracks') if tracks and isinstance(tracks, list): for track in tracks: if not isinstance(track, dict): continue track_url = urljoin(url, track.get('src')) if not track_url: continue subtitles.setdefault(track.get('srclang') or 'de', []).append({ 'url': track_url, 'ext': 'ttml', }) return { 'id': video_id, 'title': title, 'is_live': live, 'uploader': uploader if uploader != '-' else None, 'upload_date': upload_date[0:8] if upload_date else None, 'duration': duration, 'thumbnails': thumbnails, 'formats': formats, 'subtitles': subtitles, } class NDREmbedIE(NDREmbedBaseIE): IE_NAME = 'ndr:embed' _VALID_URL = r'https?://(?:\w+\.)*ndr\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:(?:ard)?player|externalPlayer)\.html' _TESTS = [{ 'url': 'http://www.ndr.de/fernsehen/sendungen/ndr_aktuell/ndraktuell28488-player.html', 'md5': '8b9306142fe65bbdefb5ce24edb6b0a9', 'info_dict': { 'id': 'ndraktuell28488', 'ext': 'mp4', 'title': 'Norddeutschland begrüßt Flüchtlinge', 'is_live': False, 'uploader': 'ndrtv', 'upload_date': '20150907', 'duration': 132, }, 'skip': 'No longer available', }, { 'url': 'http://www.ndr.de/ndr2/events/soundcheck/soundcheck3366-player.html', 'md5': '002085c44bae38802d94ae5802a36e78', 'info_dict': { 'id': 'soundcheck3366', 'ext': 'mp4', 'title': 'Ella Henderson braucht Vergleiche nicht zu scheuen', 'is_live': False, 'uploader': 'ndr2', 'upload_date': '20150912', 'duration': 3554, }, 'params': { 'skip_download': True, }, 'skip': 'No longer available', }, { 'url': 'http://www.ndr.de/info/audio51535-player.html', 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8', 'info_dict': { 'id': 'audio51535', 'ext': 'mp3', 'title': 'La Valette entgeht der Hinrichtung', 'is_live': False, 'uploader': 'ndrinfo', 'upload_date': '20210915', 'duration': 884, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/visite/visite11010-externalPlayer.html', 'md5': 'ae57f80511c1e1f2fd0d0d3d31aeae7c', 'info_dict': { 'id': 'visite11010', 'ext': 'mp4', 'title': 'Visite - die ganze Sendung', 'is_live': False, 'uploader': 'ndrtv', 'upload_date': '20150902', 'duration': 3525, }, 'params': { 'skip_download': True, }, 'skip': 'No longer available', }, { # httpVideoLive 'url': 'http://www.ndr.de/fernsehen/livestream/livestream217-externalPlayer.html', 'info_dict': { 'id': 'livestream217', 'ext': 'mp4', 'title': r're:^NDR Fernsehen Niedersachsen \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'is_live': True, 'upload_date': '20210409', 'uploader': 'ndrtv', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.ndr.de/ndrkultur/audio255020-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/nordtour/nordtour7124-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/kultur/film/videos/videoimport10424-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/hamburg_journal/hamj43006-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/weltbilder/weltbilder4518-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/doku952-player.html', 'only_matching': True, }] class NJoyEmbedIE(NDREmbedBaseIE): IE_NAME = 'njoy:embed' _VALID_URL = r'https?://(?:www\.)?n-joy\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)_[^/]+\.html' _TESTS = [{ # httpVideo 'url': 'http://www.n-joy.de/events/reeperbahnfestival/doku948-player_image-bc168e87-5263-4d6d-bd27-bb643005a6de_theme-n-joy.html', 'md5': '8483cbfe2320bd4d28a349d62d88bd74', 'info_dict': { 'id': 'doku948', 'ext': 'mp4', 'title': 'Zehn Jahre Reeperbahn Festival - die Doku', 'is_live': False, 'upload_date': '20200826', 'duration': 1011, }, 'expected_warnings': ['Unable to download f4m manifest'], }, { # httpAudio 'url': 'http://www.n-joy.de/news_wissen/stefanrichter100-player_image-d5e938b1-f21a-4b9a-86b8-aaba8bca3a13_theme-n-joy.html', 'md5': 'd989f80f28ac954430f7b8a48197188a', 'info_dict': { 'id': 'stefanrichter100', 'ext': 'mp3', 'title': 'Interview mit einem Augenzeugen', 'is_live': False, 'uploader': 'njoy', 'upload_date': '20150909', 'duration': 140, }, 'params': { 'skip_download': True, }, 'skip': 'No longer available', }, { # httpAudioLive, no explicit ext 'url': 'http://www.n-joy.de/news_wissen/webradioweltweit100-player_image-3fec0484-2244-4565-8fb8-ed25fd28b173_theme-n-joy.html', 'info_dict': { 'id': 'webradioweltweit100', 'ext': 'mp3', 'title': r're:^N-JOY Weltweit \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'is_live': True, 'uploader': 'njoy', 'upload_date': '20210830', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.n-joy.de/musik/dockville882-player_image-3905259e-0803-4764-ac72-8b7de077d80a_theme-n-joy.html', 'only_matching': True, }, { 'url': 'http://www.n-joy.de/radio/sendungen/morningshow/urlaubsfotos190-player_image-066a5df1-5c95-49ec-a323-941d848718db_theme-n-joy.html', 'only_matching': True, }, { 'url': 'http://www.n-joy.de/entertainment/comedy/krudetv290-player_image-ab261bfe-51bf-4bf3-87ba-c5122ee35b3d_theme-n-joy.html', 'only_matching': True, }]
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/bild.py
youtube_dl/extractor/bild.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, unescapeHTML, ) class BildIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?bild\.de/(?:[^/]+/)+(?P<display_id>[^/]+)-(?P<id>\d+)(?:,auto=true)?\.bild\.html' IE_DESC = 'Bild.de' _TEST = { 'url': 'http://www.bild.de/video/clip/apple-ipad-air/das-koennen-die-neuen-ipads-38184146.bild.html', 'md5': 'dd495cbd99f2413502a1713a1156ac8a', 'info_dict': { 'id': '38184146', 'ext': 'mp4', 'title': 'Das können die neuen iPads', 'description': 'md5:a4058c4fa2a804ab59c00d7244bbf62f', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 196, } } def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( url.split('.bild.html')[0] + ',view=json.bild.html', video_id) return { 'id': video_id, 'title': unescapeHTML(video_data['title']).strip(), 'description': unescapeHTML(video_data.get('description')), 'url': video_data['clipList'][0]['srces'][0]['src'], 'thumbnail': video_data.get('poster'), 'duration': int_or_none(video_data.get('durationSec')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/eyedotv.py
youtube_dl/extractor/eyedotv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( xpath_text, parse_duration, ExtractorError, ) class EyedoTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?eyedo\.tv/[^/]+/(?:#!/)?Live/Detail/(?P<id>[0-9]+)' _TEST = { 'url': 'https://www.eyedo.tv/en-US/#!/Live/Detail/16301', 'md5': 'ba14f17995cdfc20c36ba40e21bf73f7', 'info_dict': { 'id': '16301', 'ext': 'mp4', 'title': 'Journée du conseil scientifique de l\'Afnic 2015', 'description': 'md5:4abe07293b2f73efc6e1c37028d58c98', 'uploader': 'Afnic Live', 'uploader_id': '8023', } } _ROOT_URL = 'http://live.eyedo.net:1935/' def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_xml('http://eyedo.tv/api/live/GetLive/%s' % video_id, video_id) def _add_ns(path): return self._xpath_ns(path, 'http://schemas.datacontract.org/2004/07/EyeDo.Core.Implementation.Web.ViewModels.Api') title = xpath_text(video_data, _add_ns('Titre'), 'title', True) state_live_code = xpath_text(video_data, _add_ns('StateLiveCode'), 'title', True) if state_live_code == 'avenir': raise ExtractorError( '%s said: We\'re sorry, but this video is not yet available.' % self.IE_NAME, expected=True) is_live = state_live_code == 'live' m3u8_url = None # http://eyedo.tv/Content/Html5/Scripts/html5view.js if is_live: if xpath_text(video_data, 'Cdn') == 'true': m3u8_url = 'http://rrr.sz.xlcdn.com/?account=eyedo&file=A%s&type=live&service=wowza&protocol=http&output=playlist.m3u8' % video_id else: m3u8_url = self._ROOT_URL + 'w/%s/eyedo_720p/playlist.m3u8' % video_id else: m3u8_url = self._ROOT_URL + 'replay-w/%s/mp4:%s.mp4/playlist.m3u8' % (video_id, video_id) return { 'id': video_id, 'title': title, 'formats': self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native'), 'description': xpath_text(video_data, _add_ns('Description')), 'duration': parse_duration(xpath_text(video_data, _add_ns('Duration'))), 'uploader': xpath_text(video_data, _add_ns('Createur')), 'uploader_id': xpath_text(video_data, _add_ns('CreateurId')), 'chapter': xpath_text(video_data, _add_ns('ChapitreTitre')), 'chapter_id': xpath_text(video_data, _add_ns('ChapitreId')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/streamcz.py
youtube_dl/extractor/streamcz.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, merge_dicts, parse_codecs, urljoin, ) class StreamCZIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:stream|televizeseznam)\.cz/[^?#]+/(?P<display_id>[^?#]+)-(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.televizeseznam.cz/video/lajna/buh-57953890', 'md5': '40c41ade1464a390a0b447e333df4239', 'info_dict': { 'id': '57953890', 'ext': 'mp4', 'title': 'Bůh', 'display_id': 'buh', 'description': 'md5:8f5f09b9b7bc67df910486cdd88f7165', 'duration': 1369.6, 'view_count': int, } }, { 'url': 'https://www.stream.cz/kdo-to-mluvi/kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna-64087937', 'md5': '41fd358000086a1ccdb068c77809b158', 'info_dict': { 'id': '64087937', 'ext': 'mp4', 'title': 'Kdo to mluví? Velké odhalení přináší nový pořad už od 25. srpna', 'display_id': 'kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna', 'description': 'md5:97a811000a6460266029d6c1c2ebcd59', 'duration': 50.2, 'view_count': int, } }, { 'url': 'https://www.stream.cz/tajemno/znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili-64147267', 'md5': '3ee4d0be040e8f4a543e67e509d55e3f', 'info_dict': { 'id': '64147267', 'ext': 'mp4', 'title': 'Zničehonic jim skrz střechu prolítnul záhadný předmět. Badatelé vše objasnili', 'display_id': 'znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili', 'description': 'md5:4b8ada6718d34bb011c4e04ca4bc19bf', 'duration': 442.84, 'view_count': int, } }] def _extract_formats(self, spl_url, video): for ext, pref, streams in ( ('ts', -1, video.get('http_stream', {}).get('qualities', {})), ('mp4', 1, video.get('mp4'))): for format_id, stream in streams.items(): if not stream.get('url'): continue yield merge_dicts({ 'format_id': '-'.join((format_id, ext)), 'ext': ext, 'source_preference': pref, 'url': urljoin(spl_url, stream['url']), 'tbr': float_or_none(stream.get('bandwidth'), scale=1000), 'duration': float_or_none(stream.get('duration'), scale=1000), 'width': stream.get('resolution', 2 * [0])[0] or None, 'height': stream.get('resolution', 2 * [0])[1] or int_or_none(format_id.replace('p', '')), }, parse_codecs(stream.get('codec'))) def _real_extract(self, url): display_id, video_id = re.match(self._VALID_URL, url).groups() data = self._download_json( 'https://www.televizeseznam.cz/api/graphql', video_id, 'Downloading GraphQL result', data=json.dumps({ 'variables': {'urlName': video_id}, 'query': ''' query LoadEpisode($urlName : String){ episode(urlName: $urlName){ ...VideoDetailFragmentOnEpisode } } fragment VideoDetailFragmentOnEpisode on Episode { id spl urlName name perex duration views }''' }).encode('utf-8'), headers={'Content-Type': 'application/json;charset=UTF-8'} )['data']['episode'] spl_url = data['spl'] + 'spl2,3' metadata = self._download_json(spl_url, video_id, 'Downloading playlist') if 'Location' in metadata and 'data' not in metadata: spl_url = metadata['Location'] metadata = self._download_json(spl_url, video_id, 'Downloading redirected playlist') video = metadata['data'] subtitles = {} for subs in video.get('subtitles', {}).values(): if not subs.get('language'): continue for ext, sub_url in subs.get('urls').items(): subtitles.setdefault(subs['language'], []).append({ 'ext': ext, 'url': urljoin(spl_url, sub_url) }) formats = list(self._extract_formats(spl_url, video)) self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'title': data.get('name'), 'description': data.get('perex'), 'duration': float_or_none(data.get('duration')), 'view_count': int_or_none(data.get('views')), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/slutload.py
youtube_dl/extractor/slutload.py
from __future__ import unicode_literals from .common import InfoExtractor class SlutloadIE(InfoExtractor): _VALID_URL = r'https?://(?:\w+\.)?slutload\.com/(?:video/[^/]+|embed_player|watch)/(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.slutload.com/video/virginie-baisee-en-cam/TD73btpBqSxc/', 'md5': '868309628ba00fd488cf516a113fd717', 'info_dict': { 'id': 'TD73btpBqSxc', 'ext': 'mp4', 'title': 'virginie baisee en cam', 'age_limit': 18, 'thumbnail': r're:https?://.*?\.jpg' }, }, { # mobile site 'url': 'http://mobile.slutload.com/video/masturbation-solo/fviFLmc6kzJ/', 'only_matching': True, }, { 'url': 'http://www.slutload.com/embed_player/TD73btpBqSxc/', 'only_matching': True, }, { 'url': 'http://www.slutload.com/watch/TD73btpBqSxc/Virginie-Baisee-En-Cam.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) embed_page = self._download_webpage( 'http://www.slutload.com/embed_player/%s' % video_id, video_id, 'Downloading embed page', fatal=False) if embed_page: def extract(what): return self._html_search_regex( r'data-video-%s=(["\'])(?P<url>(?:(?!\1).)+)\1' % what, embed_page, 'video %s' % what, default=None, group='url') video_url = extract('url') if video_url: title = self._html_search_regex( r'<title>([^<]+)', embed_page, 'title', default=video_id) return { 'id': video_id, 'url': video_url, 'title': title, 'thumbnail': extract('preview'), 'age_limit': 18 } webpage = self._download_webpage( 'http://www.slutload.com/video/_/%s/' % video_id, video_id) title = self._html_search_regex( r'<h1><strong>([^<]+)</strong>', webpage, 'title').strip() info = self._parse_html5_media_entries(url, webpage, video_id)[0] info.update({ 'id': video_id, 'title': title, 'age_limit': 18, }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/teletask.py
youtube_dl/extractor/teletask.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import unified_strdate class TeleTaskIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tele-task\.de/archive/video/html5/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.tele-task.de/archive/video/html5/26168/', 'info_dict': { 'id': '26168', 'title': 'Duplicate Detection', }, 'playlist': [{ 'md5': '290ef69fb2792e481169c3958dbfbd57', 'info_dict': { 'id': '26168-speaker', 'ext': 'mp4', 'title': 'Duplicate Detection', 'upload_date': '20141218', } }, { 'md5': 'e1e7218c5f0e4790015a437fcf6c71b4', 'info_dict': { 'id': '26168-slides', 'ext': 'mp4', 'title': 'Duplicate Detection', 'upload_date': '20141218', } }] } def _real_extract(self, url): lecture_id = self._match_id(url) webpage = self._download_webpage(url, lecture_id) title = self._html_search_regex( r'itemprop="name">([^<]+)</a>', webpage, 'title') upload_date = unified_strdate(self._html_search_regex( r'Date:</td><td>([^<]+)</td>', webpage, 'date', fatal=False)) entries = [{ 'id': '%s-%s' % (lecture_id, format_id), 'url': video_url, 'title': title, 'upload_date': upload_date, } for format_id, video_url in re.findall( r'<video class="([^"]+)"[^>]*>\s*<source src="([^"]+)"', webpage)] return self.playlist_result(entries, lecture_id, title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cultureunplugged.py
youtube_dl/extractor/cultureunplugged.py
from __future__ import unicode_literals import re import time from .common import InfoExtractor from ..utils import ( int_or_none, HEADRequest, ) class CultureUnpluggedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?cultureunplugged\.com/documentary/watch-online/play/(?P<id>\d+)(?:/(?P<display_id>[^/]+))?' _TESTS = [{ 'url': 'http://www.cultureunplugged.com/documentary/watch-online/play/53662/The-Next--Best-West', 'md5': 'ac6c093b089f7d05e79934dcb3d228fc', 'info_dict': { 'id': '53662', 'display_id': 'The-Next--Best-West', 'ext': 'mp4', 'title': 'The Next, Best West', 'description': 'md5:0423cd00833dea1519cf014e9d0903b1', 'thumbnail': r're:^https?://.*\.jpg$', 'creator': 'Coldstream Creative', 'duration': 2203, 'view_count': int, } }, { 'url': 'http://www.cultureunplugged.com/documentary/watch-online/play/53662', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') or video_id # request setClientTimezone.php to get PHPSESSID cookie which is need to get valid json data in the next request self._request_webpage(HEADRequest( 'http://www.cultureunplugged.com/setClientTimezone.php?timeOffset=%d' % -(time.timezone / 3600)), display_id) movie_data = self._download_json( 'http://www.cultureunplugged.com/movie-data/cu-%s.json' % video_id, display_id) video_url = movie_data['url'] title = movie_data['title'] description = movie_data.get('synopsis') creator = movie_data.get('producer') duration = int_or_none(movie_data.get('duration')) view_count = int_or_none(movie_data.get('views')) thumbnails = [{ 'url': movie_data['%s_thumb' % size], 'id': size, 'preference': preference, } for preference, size in enumerate(( 'small', 'large')) if movie_data.get('%s_thumb' % size)] return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'title': title, 'description': description, 'creator': creator, 'duration': duration, 'view_count': view_count, 'thumbnails': thumbnails, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/comedycentral.py
youtube_dl/extractor/comedycentral.py
from __future__ import unicode_literals from .mtv import MTVServicesInfoExtractor class ComedyCentralIE(MTVServicesInfoExtractor): _VALID_URL = r'https?://(?:www\.)?cc\.com/(?:episodes|video(?:-clips)?)/(?P<id>[0-9a-z]{6})' _FEED_URL = 'http://comedycentral.com/feeds/mrss/' _TESTS = [{ 'url': 'http://www.cc.com/video-clips/5ke9v2/the-daily-show-with-trevor-noah-doc-rivers-and-steve-ballmer---the-nba-player-strike', 'md5': 'b8acb347177c680ff18a292aa2166f80', 'info_dict': { 'id': '89ccc86e-1b02-4f83-b0c9-1d9592ecd025', 'ext': 'mp4', 'title': 'The Daily Show with Trevor Noah|August 28, 2020|25|25149|Doc Rivers and Steve Ballmer - The NBA Player Strike', 'description': 'md5:5334307c433892b85f4f5e5ac9ef7498', 'timestamp': 1598670000, 'upload_date': '20200829', }, }, { 'url': 'http://www.cc.com/episodes/pnzzci/drawn-together--american-idol--parody-clip-show-season-3-ep-314', 'only_matching': True, }, { 'url': 'https://www.cc.com/video/k3sdvm/the-daily-show-with-jon-stewart-exclusive-the-fourth-estate', 'only_matching': True, }] class ComedyCentralTVIE(MTVServicesInfoExtractor): _VALID_URL = r'https?://(?:www\.)?comedycentral\.tv/folgen/(?P<id>[0-9a-z]{6})' _TESTS = [{ 'url': 'https://www.comedycentral.tv/folgen/pxdpec/josh-investigates-klimawandel-staffel-1-ep-1', 'info_dict': { 'id': '15907dc3-ec3c-11e8-a442-0e40cf2fc285', 'ext': 'mp4', 'title': 'Josh Investigates', 'description': 'Steht uns das Ende der Welt bevor?', }, }] _FEED_URL = 'http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed' _GEO_COUNTRIES = ['DE'] def _get_feed_query(self, uri): return { 'accountOverride': 'intl.mtvi.com', 'arcEp': 'web.cc.tv', 'ep': 'b9032c3a', 'imageEp': 'web.cc.tv', 'mgid': uri, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/mgoon.py
youtube_dl/extractor/mgoon.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, qualities, unified_strdate, ) class MgoonIE(InfoExtractor): _VALID_URL = r'''(?x)https?://(?:www\.)? (?:(:?m\.)?mgoon\.com/(?:ch/(?:.+)/v|play/view)| video\.mgoon\.com)/(?P<id>[0-9]+)''' _API_URL = 'http://mpos.mgoon.com/player/video?id={0:}' _TESTS = [ { 'url': 'http://m.mgoon.com/ch/hi6618/v/5582148', 'md5': 'dd46bb66ab35cf6d51cc812fd82da79d', 'info_dict': { 'id': '5582148', 'uploader_id': 'hi6618', 'duration': 240.419, 'upload_date': '20131220', 'ext': 'mp4', 'title': 'md5:543aa4c27a4931d371c3f433e8cebebc', 'thumbnail': r're:^https?://.*\.jpg$', } }, { 'url': 'http://www.mgoon.com/play/view/5582148', 'only_matching': True, }, { 'url': 'http://video.mgoon.com/5582148', 'only_matching': True, }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') data = self._download_json(self._API_URL.format(video_id), video_id) if data.get('errorInfo', {}).get('code') != 'NONE': raise ExtractorError('%s encountered an error: %s' % ( self.IE_NAME, data['errorInfo']['message']), expected=True) v_info = data['videoInfo'] title = v_info.get('v_title') thumbnail = v_info.get('v_thumbnail') duration = v_info.get('v_duration') upload_date = unified_strdate(v_info.get('v_reg_date')) uploader_id = data.get('userInfo', {}).get('u_alias') if duration: duration /= 1000.0 age_limit = None if data.get('accessInfo', {}).get('code') == 'VIDEO_STATUS_ADULT': age_limit = 18 formats = [] get_quality = qualities(['360p', '480p', '720p', '1080p']) for fmt in data['videoFiles']: formats.append({ 'format_id': fmt['label'], 'quality': get_quality(fmt['label']), 'url': fmt['url'], 'ext': fmt['format'], }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'duration': duration, 'upload_date': upload_date, 'uploader_id': uploader_id, 'age_limit': age_limit, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ted.py
youtube_dl/extractor/ted.py
from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse ) from ..utils import ( extract_attributes, float_or_none, int_or_none, try_get, url_or_none, ) class TEDIE(InfoExtractor): IE_NAME = 'ted' _VALID_URL = r'''(?x) (?P<proto>https?://) (?P<type>www|embed(?:-ssl)?)(?P<urlmain>\.ted\.com/ ( (?P<type_playlist>playlists(?:/(?P<playlist_id>\d+))?) # We have a playlist | ((?P<type_talk>talks)) # We have a simple talk | (?P<type_watch>watch)/[^/]+/[^/]+ ) (/lang/(.*?))? # The url may contain the language /(?P<name>[\w-]+) # Here goes the name and then ".html" .*)$ ''' _TESTS = [{ 'url': 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html', 'md5': 'b0ce2b05ca215042124fbc9e3886493a', 'info_dict': { 'id': '102', 'ext': 'mp4', 'title': 'The illusion of consciousness', 'description': ('Philosopher Dan Dennett makes a compelling ' 'argument that not only don\'t we understand our own ' 'consciousness, but that half the time our brains are ' 'actively fooling us.'), 'uploader': 'Dan Dennett', 'width': 853, 'duration': 1308, 'view_count': int, 'comment_count': int, 'tags': list, }, 'params': { 'skip_download': True, }, }, { # missing HTTP bitrates 'url': 'https://www.ted.com/talks/vishal_sikka_the_beauty_and_power_of_algorithms', 'info_dict': { 'id': '6069', 'ext': 'mp4', 'title': 'The beauty and power of algorithms', 'thumbnail': r're:^https?://.+\.jpg', 'description': 'md5:734e352710fb00d840ab87ae31aaf688', 'uploader': 'Vishal Sikka', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.ted.com/talks/gabby_giffords_and_mark_kelly_be_passionate_be_courageous_be_your_best', 'md5': 'e6b9617c01a7970ceac8bb2c92c346c0', 'info_dict': { 'id': '1972', 'ext': 'mp4', 'title': 'Be passionate. Be courageous. Be your best.', 'uploader': 'Gabby Giffords and Mark Kelly', 'description': 'md5:5174aed4d0f16021b704120360f72b92', 'duration': 1128, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.ted.com/playlists/who_are_the_hackers', 'info_dict': { 'id': '10', 'title': 'Who are the hackers?', 'description': 'md5:49a0dbe8fb76d81a0e64b4a80af7f15a' }, 'playlist_mincount': 6, }, { # contains a youtube video 'url': 'https://www.ted.com/talks/douglas_adams_parrots_the_universe_and_everything', 'add_ie': ['Youtube'], 'info_dict': { 'id': '_ZG8HBuDjgc', 'ext': 'webm', 'title': 'Douglas Adams: Parrots the Universe and Everything', 'description': 'md5:01ad1e199c49ac640cb1196c0e9016af', 'uploader': 'University of California Television (UCTV)', 'uploader_id': 'UCtelevision', 'upload_date': '20080522', }, 'params': { 'skip_download': True, }, }, { # no nativeDownloads 'url': 'https://www.ted.com/talks/tom_thum_the_orchestra_in_my_mouth', 'info_dict': { 'id': '1792', 'ext': 'mp4', 'title': 'The orchestra in my mouth', 'description': 'md5:5d1d78650e2f8dfcbb8ebee2951ac29a', 'uploader': 'Tom Thum', 'view_count': int, 'comment_count': int, 'tags': list, }, 'params': { 'skip_download': True, }, }, { # with own formats and private Youtube external 'url': 'https://www.ted.com/talks/spencer_wells_a_family_tree_for_humanity', 'only_matching': True, }] _NATIVE_FORMATS = { 'low': {'width': 320, 'height': 180}, 'medium': {'width': 512, 'height': 288}, 'high': {'width': 854, 'height': 480}, } def _extract_info(self, webpage): info_json = self._search_regex( r'(?s)q\(\s*"\w+.init"\s*,\s*({.+?})\)\s*</script>', webpage, 'info json') return json.loads(info_json) def _real_extract(self, url): m = re.match(self._VALID_URL, url, re.VERBOSE) if m.group('type').startswith('embed'): desktop_url = m.group('proto') + 'www' + m.group('urlmain') return self.url_result(desktop_url, 'TED') name = m.group('name') if m.group('type_talk'): return self._talk_info(url, name) elif m.group('type_watch'): return self._watch_info(url, name) else: return self._playlist_videos_info(url, name) def _playlist_videos_info(self, url, name): '''Returns the videos of the playlist''' webpage = self._download_webpage(url, name, 'Downloading playlist webpage') playlist_entries = [] for entry in re.findall(r'(?s)<[^>]+data-ga-context=["\']playlist["\'][^>]*>', webpage): attrs = extract_attributes(entry) entry_url = compat_urlparse.urljoin(url, attrs['href']) playlist_entries.append(self.url_result(entry_url, self.ie_key())) final_url = self._og_search_url(webpage, fatal=False) playlist_id = ( re.match(self._VALID_URL, final_url).group('playlist_id') if final_url else None) return self.playlist_result( playlist_entries, playlist_id=playlist_id, playlist_title=self._og_search_title(webpage, fatal=False), playlist_description=self._og_search_description(webpage)) def _talk_info(self, url, video_name): webpage = self._download_webpage(url, video_name) info = self._extract_info(webpage) data = try_get(info, lambda x: x['__INITIAL_DATA__'], dict) or info talk_info = data['talks'][0] title = talk_info['title'].strip() downloads = talk_info.get('downloads') or {} native_downloads = downloads.get('nativeDownloads') or talk_info.get('nativeDownloads') or {} formats = [{ 'url': format_url, 'format_id': format_id, } for (format_id, format_url) in native_downloads.items() if format_url is not None] subtitled_downloads = downloads.get('subtitledDownloads') or {} for lang, subtitled_download in subtitled_downloads.items(): for q in self._NATIVE_FORMATS: q_url = subtitled_download.get(q) if not q_url: continue formats.append({ 'url': q_url, 'format_id': '%s-%s' % (q, lang), 'language': lang, }) if formats: for f in formats: finfo = self._NATIVE_FORMATS.get(f['format_id'].split('-')[0]) if finfo: f.update(finfo) player_talk = talk_info['player_talks'][0] resources_ = player_talk.get('resources') or talk_info.get('resources') http_url = None for format_id, resources in resources_.items(): if format_id == 'hls': if not isinstance(resources, dict): continue stream_url = url_or_none(resources.get('stream')) if not stream_url: continue formats.extend(self._extract_m3u8_formats( stream_url, video_name, 'mp4', m3u8_id=format_id, fatal=False)) else: if not isinstance(resources, list): continue if format_id == 'h264': for resource in resources: h264_url = resource.get('file') if not h264_url: continue bitrate = int_or_none(resource.get('bitrate')) formats.append({ 'url': h264_url, 'format_id': '%s-%sk' % (format_id, bitrate), 'tbr': bitrate, }) if re.search(r'\d+k', h264_url): http_url = h264_url elif format_id == 'rtmp': streamer = talk_info.get('streamer') if not streamer: continue for resource in resources: formats.append({ 'format_id': '%s-%s' % (format_id, resource.get('name')), 'url': streamer, 'play_path': resource['file'], 'ext': 'flv', 'width': int_or_none(resource.get('width')), 'height': int_or_none(resource.get('height')), 'tbr': int_or_none(resource.get('bitrate')), }) m3u8_formats = list(filter( lambda f: f.get('protocol') == 'm3u8' and f.get('vcodec') != 'none', formats)) if http_url: for m3u8_format in m3u8_formats: bitrate = self._search_regex(r'(\d+k)', m3u8_format['url'], 'bitrate', default=None) if not bitrate: continue bitrate_url = re.sub(r'\d+k', bitrate, http_url) if not self._is_valid_url( bitrate_url, video_name, '%s bitrate' % bitrate): continue f = m3u8_format.copy() f.update({ 'url': bitrate_url, 'format_id': m3u8_format['format_id'].replace('hls', 'http'), 'protocol': 'http', }) if f.get('acodec') == 'none': del f['acodec'] formats.append(f) audio_download = talk_info.get('audioDownload') if audio_download: formats.append({ 'url': audio_download, 'format_id': 'audio', 'vcodec': 'none', }) if not formats: external = player_talk.get('external') if isinstance(external, dict): service = external.get('service') if isinstance(service, compat_str): ext_url = None if service.lower() == 'youtube': ext_url = external.get('code') return self.url_result(ext_url or external['uri']) self._sort_formats(formats) video_id = compat_str(talk_info['id']) return { 'id': video_id, 'title': title, 'uploader': player_talk.get('speaker') or talk_info.get('speaker'), 'thumbnail': player_talk.get('thumb') or talk_info.get('thumb'), 'description': self._og_search_description(webpage), 'subtitles': self._get_subtitles(video_id, talk_info), 'formats': formats, 'duration': float_or_none(talk_info.get('duration')), 'view_count': int_or_none(data.get('viewed_count')), 'comment_count': int_or_none( try_get(data, lambda x: x['comments']['count'])), 'tags': try_get(talk_info, lambda x: x['tags'], list), } def _get_subtitles(self, video_id, talk_info): sub_lang_list = {} for language in try_get( talk_info, (lambda x: x['downloads']['languages'], lambda x: x['languages']), list): lang_code = language.get('languageCode') or language.get('ianaCode') if not lang_code: continue sub_lang_list[lang_code] = [ { 'url': 'http://www.ted.com/talks/subtitles/id/%s/lang/%s/format/%s' % (video_id, lang_code, ext), 'ext': ext, } for ext in ['ted', 'srt'] ] return sub_lang_list def _watch_info(self, url, name): webpage = self._download_webpage(url, name) config_json = self._html_search_regex( r'"pages\.jwplayer"\s*,\s*({.+?})\s*\)\s*</script>', webpage, 'config', default=None) if not config_json: embed_url = self._search_regex( r"<iframe[^>]+class='pages-video-embed__video__object'[^>]+src='([^']+)'", webpage, 'embed url') return self.url_result(self._proto_relative_url(embed_url)) config = json.loads(config_json)['config'] video_url = config['video']['url'] thumbnail = config.get('image', {}).get('url') title = self._html_search_regex( r"(?s)<h1(?:\s+class='[^']+')?>(.+?)</h1>", webpage, 'title') description = self._html_search_regex( [ r'(?s)<h4 class="[^"]+" id="h3--about-this-talk">.*?</h4>(.*?)</div>', r'(?s)<p><strong>About this talk:</strong>\s+(.*?)</p>', ], webpage, 'description', fatal=False) return { 'id': name, 'url': video_url, 'title': title, 'thumbnail': thumbnail, 'description': description, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/platzi.py
youtube_dl/extractor/platzi.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_b64decode, compat_str, ) from ..utils import ( clean_html, ExtractorError, int_or_none, str_or_none, try_get, url_or_none, urlencode_postdata, urljoin, ) class PlatziBaseIE(InfoExtractor): _LOGIN_URL = 'https://platzi.com/login/' _NETRC_MACHINE = 'platzi' def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading login page') login_form = self._hidden_inputs(login_page) login_form.update({ 'email': username, 'password': password, }) urlh = self._request_webpage( self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(login_form), headers={'Referer': self._LOGIN_URL}) # login succeeded if 'platzi.com/login' not in urlh.geturl(): return login_error = self._webpage_read_content( urlh, self._LOGIN_URL, None, 'Downloading login error page') login = self._parse_json( self._search_regex( r'login\s*=\s*({.+?})(?:\s*;|\s*</script)', login_error, 'login'), None) for kind in ('error', 'password', 'nonFields'): error = str_or_none(login.get('%sError' % kind)) if error: raise ExtractorError( 'Unable to login: %s' % error, expected=True) raise ExtractorError('Unable to log in') class PlatziIE(PlatziBaseIE): _VALID_URL = r'''(?x) https?:// (?: platzi\.com/clases| # es version courses\.platzi\.com/classes # en version )/[^/]+/(?P<id>\d+)-[^/?\#&]+ ''' _TESTS = [{ 'url': 'https://platzi.com/clases/1311-next-js/12074-creando-nuestra-primera-pagina/', 'md5': '8f56448241005b561c10f11a595b37e3', 'info_dict': { 'id': '12074', 'ext': 'mp4', 'title': 'Creando nuestra primera página', 'description': 'md5:4c866e45034fc76412fbf6e60ae008bc', 'duration': 420, }, 'skip': 'Requires platzi account credentials', }, { 'url': 'https://courses.platzi.com/classes/1367-communication-codestream/13430-background/', 'info_dict': { 'id': '13430', 'ext': 'mp4', 'title': 'Background', 'description': 'md5:49c83c09404b15e6e71defaf87f6b305', 'duration': 360, }, 'skip': 'Requires platzi account credentials', 'params': { 'skip_download': True, }, }] def _real_extract(self, url): lecture_id = self._match_id(url) webpage = self._download_webpage(url, lecture_id) data = self._parse_json( self._search_regex( # client_data may contain "};" so that we have to try more # strict regex first (r'client_data\s*=\s*({.+?})\s*;\s*\n', r'client_data\s*=\s*({.+?})\s*;'), webpage, 'client data'), lecture_id) material = data['initialState']['material'] desc = material['description'] title = desc['title'] formats = [] for server_id, server in material['videos'].items(): if not isinstance(server, dict): continue for format_id in ('hls', 'dash'): format_url = url_or_none(server.get(format_id)) if not format_url: continue if format_id == 'hls': formats.extend(self._extract_m3u8_formats( format_url, lecture_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=format_id, note='Downloading %s m3u8 information' % server_id, fatal=False)) elif format_id == 'dash': formats.extend(self._extract_mpd_formats( format_url, lecture_id, mpd_id=format_id, note='Downloading %s MPD manifest' % server_id, fatal=False)) self._sort_formats(formats) content = str_or_none(desc.get('content')) description = (clean_html(compat_b64decode(content).decode('utf-8')) if content else None) duration = int_or_none(material.get('duration'), invscale=60) return { 'id': lecture_id, 'title': title, 'description': description, 'duration': duration, 'formats': formats, } class PlatziCourseIE(PlatziBaseIE): _VALID_URL = r'''(?x) https?:// (?: platzi\.com/clases| # es version courses\.platzi\.com/classes # en version )/(?P<id>[^/?\#&]+) ''' _TESTS = [{ 'url': 'https://platzi.com/clases/next-js/', 'info_dict': { 'id': '1311', 'title': 'Curso de Next.js', }, 'playlist_count': 22, }, { 'url': 'https://courses.platzi.com/classes/communication-codestream/', 'info_dict': { 'id': '1367', 'title': 'Codestream Course', }, 'playlist_count': 14, }] @classmethod def suitable(cls, url): return False if PlatziIE.suitable(url) else super(PlatziCourseIE, cls).suitable(url) def _real_extract(self, url): course_name = self._match_id(url) webpage = self._download_webpage(url, course_name) props = self._parse_json( self._search_regex(r'data\s*=\s*({.+?})\s*;', webpage, 'data'), course_name)['initialProps'] entries = [] for chapter_num, chapter in enumerate(props['concepts'], 1): if not isinstance(chapter, dict): continue materials = chapter.get('materials') if not materials or not isinstance(materials, list): continue chapter_title = chapter.get('title') chapter_id = str_or_none(chapter.get('id')) for material in materials: if not isinstance(material, dict): continue if material.get('material_type') != 'video': continue video_url = urljoin(url, material.get('url')) if not video_url: continue entries.append({ '_type': 'url_transparent', 'url': video_url, 'title': str_or_none(material.get('name')), 'id': str_or_none(material.get('id')), 'ie_key': PlatziIE.ie_key(), 'chapter': chapter_title, 'chapter_number': chapter_num, 'chapter_id': chapter_id, }) course_id = compat_str(try_get(props, lambda x: x['course']['id'])) course_title = try_get(props, lambda x: x['course']['name'], compat_str) return self.playlist_result(entries, course_id, course_title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/yandexvideo.py
youtube_dl/extractor/yandexvideo.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, try_get, url_or_none, ) class YandexVideoIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?: yandex\.ru(?:/(?:portal/(?:video|efir)|efir))?/?\?.*?stream_id=| frontend\.vh\.yandex\.ru/player/ ) (?P<id>(?:[\da-f]{32}|[\w-]{12})) ''' _TESTS = [{ 'url': 'https://yandex.ru/portal/video?stream_id=4dbb36ec4e0526d58f9f2dc8f0ecf374', 'md5': 'e02a05bfaf0d9615ef07ae3a10f4faf4', 'info_dict': { 'id': '4dbb36ec4e0526d58f9f2dc8f0ecf374', 'ext': 'mp4', 'title': 'Русский Вудсток - главный рок-фест в истории СССР / вДудь', 'description': 'md5:7d6b8d4bc4a3b9a56499916c1ea5b5fa', 'thumbnail': r're:^https?://', 'timestamp': 1549972939, 'duration': 5575, 'age_limit': 18, 'upload_date': '20190212', 'view_count': int, 'like_count': int, 'dislike_count': int, }, }, { 'url': 'https://yandex.ru/portal/efir?stream_id=4dbb262b4fe5cf15a215de4f34eee34d&from=morda', 'only_matching': True, }, { 'url': 'https://yandex.ru/?stream_id=4dbb262b4fe5cf15a215de4f34eee34d', 'only_matching': True, }, { 'url': 'https://frontend.vh.yandex.ru/player/4dbb262b4fe5cf15a215de4f34eee34d?from=morda', 'only_matching': True, }, { # vod-episode, series episode 'url': 'https://yandex.ru/portal/video?stream_id=45b11db6e4b68797919c93751a938cee', 'only_matching': True, }, { # episode, sports 'url': 'https://yandex.ru/?stream_channel=1538487871&stream_id=4132a07f71fb0396be93d74b3477131d', 'only_matching': True, }, { # DASH with DRM 'url': 'https://yandex.ru/portal/video?from=morda&stream_id=485a92d94518d73a9d0ff778e13505f8', 'only_matching': True, }, { 'url': 'https://yandex.ru/efir?stream_active=watching&stream_id=v7a2dZ-v5mSI&from_block=efir_newtab', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) player = try_get((self._download_json( 'https://frontend.vh.yandex.ru/graphql', video_id, data=('''{ player(content_id: "%s") { computed_title content_url description dislikes duration likes program_title release_date release_date_ut release_year restriction_age season start_time streams thumbnail title views_count } }''' % video_id).encode(), fatal=False)), lambda x: x['player']['content']) if not player or player.get('error'): player = self._download_json( 'https://frontend.vh.yandex.ru/v23/player/%s.json' % video_id, video_id, query={ 'stream_options': 'hires', 'disable_trackings': 1, }) content = player['content'] title = content.get('title') or content['computed_title'] formats = [] streams = content.get('streams') or [] streams.append({'url': content.get('content_url')}) for stream in streams: content_url = url_or_none(stream.get('url')) if not content_url: continue ext = determine_ext(content_url) if ext == 'ismc': continue elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( content_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( content_url, video_id, mpd_id='dash', fatal=False)) else: formats.append({'url': content_url}) self._sort_formats(formats) timestamp = (int_or_none(content.get('release_date')) or int_or_none(content.get('release_date_ut')) or int_or_none(content.get('start_time'))) season = content.get('season') or {} return { 'id': video_id, 'title': title, 'description': content.get('description'), 'thumbnail': content.get('thumbnail'), 'timestamp': timestamp, 'duration': int_or_none(content.get('duration')), 'series': content.get('program_title'), 'age_limit': int_or_none(content.get('restriction_age')), 'view_count': int_or_none(content.get('views_count')), 'like_count': int_or_none(content.get('likes')), 'dislike_count': int_or_none(content.get('dislikes')), 'season_number': int_or_none(season.get('season_number')), 'season_id': season.get('id'), 'release_year': int_or_none(content.get('release_year')), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/faz.py
youtube_dl/extractor/faz.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_etree_fromstring from ..utils import ( xpath_element, xpath_text, int_or_none, ) class FazIE(InfoExtractor): IE_NAME = 'faz.net' _VALID_URL = r'https?://(?:www\.)?faz\.net/(?:[^/]+/)*.*?-(?P<id>\d+)\.html' _TESTS = [{ 'url': 'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html', 'info_dict': { 'id': '12610585', 'ext': 'mp4', 'title': 'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher', 'description': 'md5:1453fbf9a0d041d985a47306192ea253', }, }, { 'url': 'http://www.faz.net/aktuell/politik/berlin-gabriel-besteht-zerreissprobe-ueber-datenspeicherung-13659345.html', 'only_matching': True, }, { 'url': 'http://www.faz.net/berlin-gabriel-besteht-zerreissprobe-ueber-datenspeicherung-13659345.html', 'only_matching': True, }, { 'url': 'http://www.faz.net/-13659345.html', 'only_matching': True, }, { 'url': 'http://www.faz.net/aktuell/politik/-13659345.html', 'only_matching': True, }, { 'url': 'http://www.faz.net/foobarblafasel-13659345.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) description = self._og_search_description(webpage) media = self._html_search_regex( r"data-videojs-media='([^']+)", webpage, 'media') if media == 'extern': perform_url = self._search_regex( r"<iframe[^>]+?src='((?:http:)?//player\.performgroup\.com/eplayer/eplayer\.html#/?[0-9a-f]{26}\.[0-9a-z]{26})", webpage, 'perform url') return self.url_result(perform_url) config = compat_etree_fromstring(media) encodings = xpath_element(config, 'ENCODINGS', 'encodings', True) formats = [] for pref, code in enumerate(['LOW', 'HIGH', 'HQ']): encoding = xpath_element(encodings, code) if encoding is not None: encoding_url = xpath_text(encoding, 'FILENAME') if encoding_url: tbr = xpath_text(encoding, 'AVERAGEBITRATE', 1000) if tbr: tbr = int_or_none(tbr.replace(',', '.')) f = { 'url': encoding_url, 'format_id': code.lower(), 'quality': pref, 'tbr': tbr, 'vcodec': xpath_text(encoding, 'CODEC'), } mobj = re.search(r'(\d+)x(\d+)_(\d+)\.mp4', encoding_url) if mobj: f.update({ 'width': int(mobj.group(1)), 'height': int(mobj.group(2)), 'tbr': tbr or int(mobj.group(3)), }) formats.append(f) self._sort_formats(formats) return { 'id': video_id, 'title': self._og_search_title(webpage), 'formats': formats, 'description': description.strip() if description else None, 'thumbnail': xpath_text(config, 'STILL/STILL_BIG'), 'duration': int_or_none(xpath_text(config, 'DURATION')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/threeqsdn.py
youtube_dl/extractor/threeqsdn.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( determine_ext, ExtractorError, float_or_none, int_or_none, parse_iso8601, ) class ThreeQSDNIE(InfoExtractor): IE_NAME = '3qsdn' IE_DESC = '3Q SDN' _VALID_URL = r'https?://playout\.3qsdn\.com/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _TESTS = [{ # https://player.3qsdn.com/demo.html 'url': 'https://playout.3qsdn.com/7201c779-6b3c-11e7-a40e-002590c750be', 'md5': '64a57396b16fa011b15e0ea60edce918', 'info_dict': { 'id': '7201c779-6b3c-11e7-a40e-002590c750be', 'ext': 'mp4', 'title': 'Video Ads', 'is_live': False, 'description': 'Video Ads Demo', 'timestamp': 1500334803, 'upload_date': '20170717', 'duration': 888.032, 'subtitles': { 'eng': 'count:1', }, }, 'expected_warnings': ['Unknown MIME type application/mp4 in DASH manifest'], }, { # live video stream 'url': 'https://playout.3qsdn.com/66e68995-11ca-11e8-9273-002590c750be', 'info_dict': { 'id': '66e68995-11ca-11e8-9273-002590c750be', 'ext': 'mp4', 'title': 're:^66e68995-11ca-11e8-9273-002590c750be [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'is_live': True, }, 'params': { 'skip_download': True, # m3u8 downloads }, }, { # live audio stream 'url': 'http://playout.3qsdn.com/9edf36e0-6bf2-11e2-a16a-9acf09e2db48', 'only_matching': True, }, { # live audio stream with some 404 URLs 'url': 'http://playout.3qsdn.com/ac5c3186-777a-11e2-9c30-9acf09e2db48', 'only_matching': True, }, { # geo restricted with 'This content is not available in your country' 'url': 'http://playout.3qsdn.com/d63a3ffe-75e8-11e2-9c30-9acf09e2db48', 'only_matching': True, }, { # geo restricted with 'playout.3qsdn.com/forbidden' 'url': 'http://playout.3qsdn.com/8e330f26-6ae2-11e2-a16a-9acf09e2db48', 'only_matching': True, }, { # live video with rtmp link 'url': 'https://playout.3qsdn.com/6092bb9e-8f72-11e4-a173-002590c750be', 'only_matching': True, }, { # ondemand from http://www.philharmonie.tv/veranstaltung/26/ 'url': 'http://playout.3qsdn.com/0280d6b9-1215-11e6-b427-0cc47a188158?protocol=http', 'only_matching': True, }, { # live video stream 'url': 'https://playout.3qsdn.com/d755d94b-4ab9-11e3-9162-0025907ad44f?js=true', 'only_matching': True, }] @staticmethod def _extract_url(webpage): mobj = re.search( r'<iframe[^>]+\b(?:data-)?src=(["\'])(?P<url>%s.*?)\1' % ThreeQSDNIE._VALID_URL, webpage) if mobj: return mobj.group('url') def _real_extract(self, url): video_id = self._match_id(url) try: config = self._download_json( url.replace('://playout.3qsdn.com/', '://playout.3qsdn.com/config/'), video_id) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: self.raise_geo_restricted() raise live = config.get('streamContent') == 'live' aspect = float_or_none(config.get('aspect')) formats = [] for source_type, source in (config.get('sources') or {}).items(): if not source: continue if source_type == 'dash': formats.extend(self._extract_mpd_formats( source, video_id, mpd_id='mpd', fatal=False)) elif source_type == 'hls': formats.extend(self._extract_m3u8_formats( source, video_id, 'mp4', 'm3u8' if live else 'm3u8_native', m3u8_id='hls', fatal=False)) elif source_type == 'progressive': for s in source: src = s.get('src') if not (src and self._is_valid_url(src, video_id)): continue width = None format_id = ['http'] ext = determine_ext(src) if ext: format_id.append(ext) height = int_or_none(s.get('height')) if height: format_id.append('%dp' % height) if aspect: width = int(height * aspect) formats.append({ 'ext': ext, 'format_id': '-'.join(format_id), 'height': height, 'source_preference': 0, 'url': src, 'vcodec': 'none' if height == 0 else None, 'width': width, }) for f in formats: if f.get('acodec') == 'none': f['preference'] = -40 elif f.get('vcodec') == 'none': f['preference'] = -50 self._sort_formats(formats, ('preference', 'width', 'height', 'source_preference', 'tbr', 'vbr', 'abr', 'ext', 'format_id')) subtitles = {} for subtitle in (config.get('subtitles') or []): src = subtitle.get('src') if not src: continue subtitles.setdefault(subtitle.get('label') or 'eng', []).append({ 'url': src, }) title = config.get('title') or video_id return { 'id': video_id, 'title': self._live_title(title) if live else title, 'thumbnail': config.get('poster') or None, 'description': config.get('description') or None, 'timestamp': parse_iso8601(config.get('upload_date')), 'duration': float_or_none(config.get('vlength')) or None, 'is_live': live, 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/nuvid.py
youtube_dl/extractor/nuvid.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( parse_duration, int_or_none, try_get, url_or_none, ) import re class NuvidIE(InfoExtractor): _VALID_URL = r'https?://(?:www|m)\.nuvid\.com/video/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.nuvid.com/video/6513023/italian-babe', 'md5': '772d2f8288f3d3c5c45f7a41761c7844', 'info_dict': { 'id': '6513023', 'ext': 'mp4', 'title': 'italian babe', 'format_id': '360p', 'duration': 321.0, 'age_limit': 18, 'thumbnail': r're:https?://.+\.jpg', 'thumbnails': list, } }, { 'url': 'https://m.nuvid.com/video/6523263', 'md5': 'ebd22ce8e47e1d9a4d0756a15c67da52', 'info_dict': { 'id': '6523263', 'ext': 'mp4', 'title': 'Slut brunette college student anal dorm', 'format_id': '720p', 'duration': 421.0, 'age_limit': 18, 'thumbnail': r're:https?://.+\.jpg', 'thumbnails': list, } }, { 'url': 'http://m.nuvid.com/video/6415801/', 'md5': '638d5ececb138d5753593f751ae3f697', 'info_dict': { 'id': '6415801', 'ext': 'mp4', 'title': 'My best friend wanted to fuck my wife for a long time', 'format_id': '720p', 'duration': 1882, 'age_limit': 18, 'thumbnail': r're:https?://.+\.jpg', 'thumbnails': list, } }] def _real_extract(self, url): video_id = self._match_id(url) qualities = { 'lq': '360p', 'hq': '720p', } json_url = 'https://www.nuvid.com/player_config_json/?vid={video_id}&aid=0&domain_id=0&embed=0&check_speed=0'.format(**locals()) video_data = self._download_json( json_url, video_id, headers={ 'Accept': 'application/json, text/javascript, */*; q = 0.01', 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8', }) or {} # nice to have, not required webpage = self._download_webpage( 'http://m.nuvid.com/video/%s' % (video_id, ), video_id, 'Downloading video page', fatal=False) or '' title = ( try_get(video_data, lambda x: x['title'], compat_str) or self._html_search_regex( (r'''<span\s[^>]*?\btitle\s*=\s*(?P<q>"|'|\b)(?P<title>[^"]+)(?P=q)\s*>''', r'''<div\s[^>]*?\bclass\s*=\s*(?P<q>"|'|\b)thumb-holder video(?P=q)>\s*<h5\b[^>]*>(?P<title>[^<]+)</h5''', r'''<span\s[^>]*?\bclass\s*=\s*(?P<q>"|'|\b)title_thumb(?P=q)>(?P<title>[^<]+)</span'''), webpage, 'title', group='title')).strip() formats = [{ 'url': source, 'format_id': qualities.get(quality), 'height': int_or_none(qualities.get(quality)[:-1]), } for quality, source in video_data.get('files').items() if source] self._check_formats(formats, video_id) self._sort_formats(formats) duration = parse_duration(video_data.get('duration') or video_data.get('duration_format')) thumbnails = [ {'url': thumb_url, } for thumb_url in ( url_or_none(src) for src in re.findall( r'<div\s+class\s*=\s*"video-tmb-wrap"\s*>\s*<img\s+src\s*=\s*"([^"]+)"\s*/>', webpage)) ] return { 'id': video_id, 'formats': formats, 'title': title, 'thumbnail': url_or_none(video_data.get('poster')), 'thumbnails': thumbnails, 'duration': duration, 'age_limit': 18, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/dailymotion.py
youtube_dl/extractor/dailymotion.py
# coding: utf-8 from __future__ import unicode_literals import functools import json import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( age_restricted, clean_html, ExtractorError, int_or_none, OnDemandPagedList, try_get, unescapeHTML, urlencode_postdata, ) class DailymotionBaseInfoExtractor(InfoExtractor): _FAMILY_FILTER = None _HEADERS = { 'Content-Type': 'application/json', 'Origin': 'https://www.dailymotion.com', } _NETRC_MACHINE = 'dailymotion' def _get_dailymotion_cookies(self): return self._get_cookies('https://www.dailymotion.com/') @staticmethod def _get_cookie_value(cookies, name): cookie = cookies.get(name) if cookie: return cookie.value def _set_dailymotion_cookie(self, name, value): self._set_cookie('www.dailymotion.com', name, value) def _real_initialize(self): cookies = self._get_dailymotion_cookies() ff = self._get_cookie_value(cookies, 'ff') self._FAMILY_FILTER = ff == 'on' if ff else age_restricted(18, self._downloader.params.get('age_limit')) self._set_dailymotion_cookie('ff', 'on' if self._FAMILY_FILTER else 'off') def _call_api(self, object_type, xid, object_fields, note, filter_extra=None): if not self._HEADERS.get('Authorization'): cookies = self._get_dailymotion_cookies() token = self._get_cookie_value(cookies, 'access_token') or self._get_cookie_value(cookies, 'client_token') if not token: data = { 'client_id': 'f1a362d288c1b98099c7', 'client_secret': 'eea605b96e01c796ff369935357eca920c5da4c5', } username, password = self._get_login_info() if username: data.update({ 'grant_type': 'password', 'password': password, 'username': username, }) else: data['grant_type'] = 'client_credentials' try: token = self._download_json( 'https://graphql.api.dailymotion.com/oauth/token', None, 'Downloading Access Token', data=urlencode_postdata(data))['access_token'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400: raise ExtractorError(self._parse_json( e.cause.read().decode(), xid)['error_description'], expected=True) raise self._set_dailymotion_cookie('access_token' if username else 'client_token', token) self._HEADERS['Authorization'] = 'Bearer ' + token resp = self._download_json( 'https://graphql.api.dailymotion.com/', xid, note, data=json.dumps({ 'query': '''{ %s(xid: "%s"%s) { %s } }''' % (object_type, xid, ', ' + filter_extra if filter_extra else '', object_fields), }).encode(), headers=self._HEADERS) obj = resp['data'][object_type] if not obj: raise ExtractorError(resp['errors'][0]['message'], expected=True) return obj class DailymotionIE(DailymotionBaseInfoExtractor): _VALID_URL = r'''(?ix) https?:// (?: (?:(?:www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(?:(?:embed|swf|\#)/)?video|swf)| (?:www\.)?lequipe\.fr/video ) /(?P<id>[^/?_]+)(?:.+?\bplaylist=(?P<playlist_id>x[0-9a-z]+))? ''' IE_NAME = 'dailymotion' _TESTS = [{ 'url': 'http://www.dailymotion.com/video/x5kesuj_office-christmas-party-review-jason-bateman-olivia-munn-t-j-miller_news', 'md5': '074b95bdee76b9e3654137aee9c79dfe', 'info_dict': { 'id': 'x5kesuj', 'ext': 'mp4', 'title': 'Office Christmas Party Review – Jason Bateman, Olivia Munn, T.J. Miller', 'description': 'Office Christmas Party Review - Jason Bateman, Olivia Munn, T.J. Miller', 'duration': 187, 'timestamp': 1493651285, 'upload_date': '20170501', 'uploader': 'Deadline', 'uploader_id': 'x1xm8ri', 'age_limit': 0, }, }, { 'url': 'https://www.dailymotion.com/video/x2iuewm_steam-machine-models-pricing-listed-on-steam-store-ign-news_videogames', 'md5': '2137c41a8e78554bb09225b8eb322406', 'info_dict': { 'id': 'x2iuewm', 'ext': 'mp4', 'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News', 'description': 'Several come bundled with the Steam Controller.', 'thumbnail': r're:^https?:.*\.(?:jpg|png)$', 'duration': 74, 'timestamp': 1425657362, 'upload_date': '20150306', 'uploader': 'IGN', 'uploader_id': 'xijv66', 'age_limit': 0, 'view_count': int, }, 'skip': 'video gone', }, { # Vevo video 'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi', 'info_dict': { 'title': 'Roar (Official)', 'id': 'USUV71301934', 'ext': 'mp4', 'uploader': 'Katy Perry', 'upload_date': '20130905', }, 'params': { 'skip_download': True, }, 'skip': 'VEVO is only available in some countries', }, { # age-restricted video 'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband', 'md5': '0d667a7b9cebecc3c89ee93099c4159d', 'info_dict': { 'id': 'xyh2zz', 'ext': 'mp4', 'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]', 'uploader': 'HotWaves1012', 'age_limit': 18, }, 'skip': 'video gone', }, { # geo-restricted, player v5 'url': 'http://www.dailymotion.com/video/xhza0o', 'only_matching': True, }, { # with subtitles 'url': 'http://www.dailymotion.com/video/x20su5f_the-power-of-nightmares-1-the-rise-of-the-politics-of-fear-bbc-2004_news', 'only_matching': True, }, { 'url': 'http://www.dailymotion.com/swf/video/x3n92nf', 'only_matching': True, }, { 'url': 'http://www.dailymotion.com/swf/x3ss1m_funny-magic-trick-barry-and-stuart_fun', 'only_matching': True, }, { 'url': 'https://www.lequipe.fr/video/x791mem', 'only_matching': True, }, { 'url': 'https://www.lequipe.fr/video/k7MtHciueyTcrFtFKA2', 'only_matching': True, }, { 'url': 'https://www.dailymotion.com/video/x3z49k?playlist=xv4bw', 'only_matching': True, }] _GEO_BYPASS = False _COMMON_MEDIA_FIELDS = '''description geoblockedCountries { allowed } xid''' @staticmethod def _extract_urls(webpage): urls = [] # Look for embedded Dailymotion player # https://developer.dailymotion.com/player#player-parameters for mobj in re.finditer( r'<(?:(?:embed|iframe)[^>]+?src=|input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=)(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/(?:embed|swf)/video/.+?)\1', webpage): urls.append(unescapeHTML(mobj.group('url'))) for mobj in re.finditer( r'(?s)DM\.player\([^,]+,\s*{.*?video[\'"]?\s*:\s*["\']?(?P<id>[0-9a-zA-Z]+).+?}\s*\);', webpage): urls.append('https://www.dailymotion.com/embed/video/' + mobj.group('id')) return urls def _real_extract(self, url): video_id, playlist_id = re.match(self._VALID_URL, url).groups() if playlist_id: if not self._downloader.params.get('noplaylist'): self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % playlist_id) return self.url_result( 'http://www.dailymotion.com/playlist/' + playlist_id, 'DailymotionPlaylist', playlist_id) self.to_screen('Downloading just video %s because of --no-playlist' % video_id) password = self._downloader.params.get('videopassword') media = self._call_api( 'media', video_id, '''... on Video { %s stats { likes { total } views { total } } } ... on Live { %s audienceCount isOnAir }''' % (self._COMMON_MEDIA_FIELDS, self._COMMON_MEDIA_FIELDS), 'Downloading media JSON metadata', 'password: "%s"' % self._downloader.params.get('videopassword') if password else None) xid = media['xid'] metadata = self._download_json( 'https://www.dailymotion.com/player/metadata/video/' + xid, xid, 'Downloading metadata JSON', query={'app': 'com.dailymotion.neon'}) error = metadata.get('error') if error: title = error.get('title') or error['raw_message'] # See https://developer.dailymotion.com/api#access-error if error.get('code') == 'DM007': allowed_countries = try_get(media, lambda x: x['geoblockedCountries']['allowed'], list) self.raise_geo_restricted(msg=title, countries=allowed_countries) raise ExtractorError( '%s said: %s' % (self.IE_NAME, title), expected=True) title = metadata['title'] is_live = media.get('isOnAir') formats = [] for quality, media_list in metadata['qualities'].items(): for m in media_list: media_url = m.get('url') media_type = m.get('type') if not media_url or media_type == 'application/vnd.lumberjack.manifest': continue if media_type == 'application/x-mpegURL': formats.extend(self._extract_m3u8_formats( media_url, video_id, 'mp4', 'm3u8' if is_live else 'm3u8_native', m3u8_id='hls', fatal=False)) else: f = { 'url': media_url, 'format_id': 'http-' + quality, } m = re.search(r'/H264-(\d+)x(\d+)(?:-(60)/)?', media_url) if m: width, height, fps = map(int_or_none, m.groups()) f.update({ 'fps': fps, 'height': height, 'width': width, }) formats.append(f) for f in formats: f['url'] = f['url'].split('#')[0] if not f.get('fps') and f['format_id'].endswith('@60'): f['fps'] = 60 self._sort_formats(formats) subtitles = {} subtitles_data = try_get(metadata, lambda x: x['subtitles']['data'], dict) or {} for subtitle_lang, subtitle in subtitles_data.items(): subtitles[subtitle_lang] = [{ 'url': subtitle_url, } for subtitle_url in subtitle.get('urls', [])] thumbnails = [] for height, poster_url in metadata.get('posters', {}).items(): thumbnails.append({ 'height': int_or_none(height), 'id': height, 'url': poster_url, }) owner = metadata.get('owner') or {} stats = media.get('stats') or {} get_count = lambda x: int_or_none(try_get(stats, lambda y: y[x + 's']['total'])) return { 'id': video_id, 'title': self._live_title(title) if is_live else title, 'description': clean_html(media.get('description')), 'thumbnails': thumbnails, 'duration': int_or_none(metadata.get('duration')) or None, 'timestamp': int_or_none(metadata.get('created_time')), 'uploader': owner.get('screenname'), 'uploader_id': owner.get('id') or metadata.get('screenname'), 'age_limit': 18 if metadata.get('explicit') else 0, 'tags': metadata.get('tags'), 'view_count': get_count('view') or int_or_none(media.get('audienceCount')), 'like_count': get_count('like'), 'formats': formats, 'subtitles': subtitles, 'is_live': is_live, } class DailymotionPlaylistBaseIE(DailymotionBaseInfoExtractor): _PAGE_SIZE = 100 def _fetch_page(self, playlist_id, page): page += 1 videos = self._call_api( self._OBJECT_TYPE, playlist_id, '''videos(allowExplicit: %s, first: %d, page: %d) { edges { node { xid url } } }''' % ('false' if self._FAMILY_FILTER else 'true', self._PAGE_SIZE, page), 'Downloading page %d' % page)['videos'] for edge in videos['edges']: node = edge['node'] yield self.url_result( node['url'], DailymotionIE.ie_key(), node['xid']) def _real_extract(self, url): playlist_id = self._match_id(url) entries = OnDemandPagedList(functools.partial( self._fetch_page, playlist_id), self._PAGE_SIZE) return self.playlist_result( entries, playlist_id) class DailymotionPlaylistIE(DailymotionPlaylistBaseIE): IE_NAME = 'dailymotion:playlist' _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>x[0-9a-z]+)' _TESTS = [{ 'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q', 'info_dict': { 'id': 'xv4bw', }, 'playlist_mincount': 20, }] _OBJECT_TYPE = 'collection' class DailymotionUserIE(DailymotionPlaylistBaseIE): IE_NAME = 'dailymotion:user' _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|swf|#|video|playlist)/)(?:(?:old/)?user/)?(?P<id>[^/]+)' _TESTS = [{ 'url': 'https://www.dailymotion.com/user/nqtv', 'info_dict': { 'id': 'nqtv', }, 'playlist_mincount': 152, }, { 'url': 'http://www.dailymotion.com/user/UnderProject', 'info_dict': { 'id': 'UnderProject', }, 'playlist_mincount': 1000, 'skip': 'Takes too long time', }, { 'url': 'https://www.dailymotion.com/user/nqtv', 'info_dict': { 'id': 'nqtv', }, 'playlist_mincount': 148, 'params': { 'age_limit': 0, }, }] _OBJECT_TYPE = 'channel'
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tvanouvelles.py
youtube_dl/extractor/tvanouvelles.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from .brightcove import BrightcoveNewIE class TVANouvellesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tvanouvelles\.ca/videos/(?P<id>\d+)' _TEST = { 'url': 'http://www.tvanouvelles.ca/videos/5117035533001', 'info_dict': { 'id': '5117035533001', 'ext': 'mp4', 'title': 'L’industrie du taxi dénonce l’entente entre Québec et Uber: explications', 'description': 'md5:479653b7c8cf115747bf5118066bd8b3', 'uploader_id': '1741764581', 'timestamp': 1473352030, 'upload_date': '20160908', }, 'add_ie': ['BrightcoveNew'], } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1741764581/default_default/index.html?videoId=%s' def _real_extract(self, url): brightcove_id = self._match_id(url) return self.url_result( self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, BrightcoveNewIE.ie_key(), brightcove_id) class TVANouvellesArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tvanouvelles\.ca/(?:[^/]+/)+(?P<id>[^/?#&]+)' _TEST = { 'url': 'http://www.tvanouvelles.ca/2016/11/17/des-policiers-qui-ont-la-meche-un-peu-courte', 'info_dict': { 'id': 'des-policiers-qui-ont-la-meche-un-peu-courte', 'title': 'Des policiers qui ont «la mèche un peu courte»?', 'description': 'md5:92d363c8eb0f0f030de9a4a84a90a3a0', }, 'playlist_mincount': 4, } @classmethod def suitable(cls, url): return False if TVANouvellesIE.suitable(url) else super(TVANouvellesArticleIE, cls).suitable(url) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) entries = [ self.url_result( 'http://www.tvanouvelles.ca/videos/%s' % mobj.group('id'), ie=TVANouvellesIE.ie_key(), video_id=mobj.group('id')) for mobj in re.finditer( r'data-video-id=(["\'])?(?P<id>\d+)', webpage)] title = self._og_search_title(webpage, fatal=False) description = self._og_search_description(webpage) return self.playlist_result(entries, display_id, title, description)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/huffpost.py
youtube_dl/extractor/huffpost.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, parse_duration, unified_strdate, ) class HuffPostIE(InfoExtractor): IE_DESC = 'Huffington Post' _VALID_URL = r'''(?x) https?://(embed\.)?live\.huffingtonpost\.com/ (?: r/segment/[^/]+/| HPLEmbedPlayer/\?segmentId= ) (?P<id>[0-9a-f]+)''' _TEST = { 'url': 'http://live.huffingtonpost.com/r/segment/legalese-it/52dd3e4b02a7602131000677', 'md5': '55f5e8981c1c80a64706a44b74833de8', 'info_dict': { 'id': '52dd3e4b02a7602131000677', 'ext': 'mp4', 'title': 'Legalese It! with @MikeSacksHP', 'description': 'This week on Legalese It, Mike talks to David Bosco about his new book on the ICC, "Rough Justice," he also discusses the Virginia AG\'s historic stance on gay marriage, the execution of Edgar Tamayo, the ICC\'s delay of Kenya\'s President and more. ', 'duration': 1549, 'upload_date': '20140124', }, 'params': { # m3u8 download 'skip_download': True, }, 'expected_warnings': ['HTTP Error 404: Not Found'], } def _real_extract(self, url): video_id = self._match_id(url) api_url = 'http://embed.live.huffingtonpost.com/api/segments/%s.json' % video_id data = self._download_json(api_url, video_id)['data'] video_title = data['title'] duration = parse_duration(data.get('running_time')) upload_date = unified_strdate( data.get('schedule', {}).get('starts_at') or data.get('segment_start_date_time')) description = data.get('description') thumbnails = [] for url in filter(None, data['images'].values()): m = re.match(r'.*-([0-9]+x[0-9]+)\.', url) if not m: continue thumbnails.append({ 'url': url, 'resolution': m.group(1), }) formats = [] sources = data.get('sources', {}) live_sources = list(sources.get('live', {}).items()) + list(sources.get('live_again', {}).items()) for key, url in live_sources: ext = determine_ext(url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( url, video_id, ext='mp4', m3u8_id='hls', fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( url + '?hdcore=2.9.5', video_id, f4m_id='hds', fatal=False)) else: formats.append({ 'format': key, 'format_id': key.replace('/', '.'), 'ext': 'mp4', 'url': url, 'vcodec': 'none' if key.startswith('audio/') else None, }) if not formats and data.get('fivemin_id'): return self.url_result('5min:%s' % data['fivemin_id']) self._sort_formats(formats) return { 'id': video_id, 'title': video_title, 'description': description, 'formats': formats, 'duration': duration, 'upload_date': upload_date, 'thumbnails': thumbnails, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/glide.py
youtube_dl/extractor/glide.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class GlideIE(InfoExtractor): IE_DESC = 'Glide mobile video messages (glide.me)' _VALID_URL = r'https?://share\.glide\.me/(?P<id>[A-Za-z0-9\-=_+]+)' _TEST = { 'url': 'http://share.glide.me/UZF8zlmuQbe4mr+7dCiQ0w==', 'md5': '4466372687352851af2d131cfaa8a4c7', 'info_dict': { 'id': 'UZF8zlmuQbe4mr+7dCiQ0w==', 'ext': 'mp4', 'title': "Damon's Glide message", 'thumbnail': r're:^https?://.*?\.cloudfront\.net/.*\.jpg$', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<title>(.+?)</title>', webpage, 'title', default=None) or self._og_search_title(webpage) video_url = self._proto_relative_url(self._search_regex( r'<source[^>]+src=(["\'])(?P<url>.+?)\1', webpage, 'video URL', default=None, group='url')) or self._og_search_video_url(webpage) thumbnail = self._proto_relative_url(self._search_regex( r'<img[^>]+id=["\']video-thumbnail["\'][^>]+src=(["\'])(?P<url>.+?)\1', webpage, 'thumbnail url', default=None, group='url')) or self._og_search_thumbnail(webpage) return { 'id': video_id, 'title': title, 'url': video_url, 'thumbnail': thumbnail, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cinchcast.py
youtube_dl/extractor/cinchcast.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( unified_strdate, xpath_text, ) class CinchcastIE(InfoExtractor): _VALID_URL = r'https?://player\.cinchcast\.com/.*?(?:assetId|show_id)=(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://player.cinchcast.com/?show_id=5258197&platformId=1&assetType=single', 'info_dict': { 'id': '5258197', 'ext': 'mp3', 'title': 'Train Your Brain to Up Your Game with Coach Mandy', 'upload_date': '20130816', }, }, { # Actual test is run in generic, look for undergroundwellness 'url': 'http://player.cinchcast.com/?platformId=1&#038;assetType=single&#038;assetId=7141703', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) doc = self._download_xml( 'http://www.blogtalkradio.com/playerasset/mrss?assetType=single&assetId=%s' % video_id, video_id) item = doc.find('.//item') title = xpath_text(item, './title', fatal=True) date_str = xpath_text( item, './{http://developer.longtailvideo.com/trac/}date') upload_date = unified_strdate(date_str, day_first=False) # duration is present but wrong formats = [{ 'format_id': 'main', 'url': item.find('./{http://search.yahoo.com/mrss/}content').attrib['url'], }] backup_url = xpath_text( item, './{http://developer.longtailvideo.com/trac/}backupContent') if backup_url: formats.append({ 'preference': 2, # seems to be more reliable 'format_id': 'backup', 'url': backup_url, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'upload_date': upload_date, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/laola1tv.py
youtube_dl/extractor/laola1tv.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import ( ExtractorError, unified_strdate, urlencode_postdata, xpath_element, xpath_text, update_url_query, js_to_json, ) class Laola1TvEmbedIE(InfoExtractor): IE_NAME = 'laola1tv:embed' _VALID_URL = r'https?://(?:www\.)?laola1\.tv/titanplayer\.php\?.*?\bvideoid=(?P<id>\d+)' _TESTS = [{ # flashvars.premium = "false"; 'url': 'https://www.laola1.tv/titanplayer.php?videoid=708065&type=V&lang=en&portal=int&customer=1024', 'info_dict': { 'id': '708065', 'ext': 'mp4', 'title': 'MA Long CHN - FAN Zhendong CHN', 'uploader': 'ITTF - International Table Tennis Federation', 'upload_date': '20161211', }, }] def _extract_token_url(self, stream_access_url, video_id, data): return self._download_json( self._proto_relative_url(stream_access_url, 'https:'), video_id, headers={ 'Content-Type': 'application/json', }, data=json.dumps(data).encode())['data']['stream-access'][0] def _extract_formats(self, token_url, video_id): token_doc = self._download_xml( token_url, video_id, 'Downloading token', headers=self.geo_verification_headers()) token_attrib = xpath_element(token_doc, './/token').attrib if token_attrib['status'] != '0': raise ExtractorError( 'Token error: %s' % token_attrib['comment'], expected=True) formats = self._extract_akamai_formats( '%s?hdnea=%s' % (token_attrib['url'], token_attrib['auth']), video_id) self._sort_formats(formats) return formats def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) flash_vars = self._search_regex( r'(?s)flashvars\s*=\s*({.+?});', webpage, 'flash vars') def get_flashvar(x, *args, **kwargs): flash_var = self._search_regex( r'%s\s*:\s*"([^"]+)"' % x, flash_vars, x, default=None) if not flash_var: flash_var = self._search_regex([ r'flashvars\.%s\s*=\s*"([^"]+)"' % x, r'%s\s*=\s*"([^"]+)"' % x], webpage, x, *args, **kwargs) return flash_var hd_doc = self._download_xml( 'http://www.laola1.tv/server/hd_video.php', video_id, query={ 'play': get_flashvar('streamid'), 'partner': get_flashvar('partnerid'), 'portal': get_flashvar('portalid'), 'lang': get_flashvar('sprache'), 'v5ident': '', }) _v = lambda x, **k: xpath_text(hd_doc, './/video/' + x, **k) title = _v('title', fatal=True) token_url = None premium = get_flashvar('premium', default=None) if premium: token_url = update_url_query( _v('url', fatal=True), { 'timestamp': get_flashvar('timestamp'), 'auth': get_flashvar('auth'), }) else: data_abo = urlencode_postdata( dict((i, v) for i, v in enumerate(_v('req_liga_abos').split(',')))) stream_access_url = update_url_query( 'https://club.laola1.tv/sp/laola1/api/v3/user/session/premium/player/stream-access', { 'videoId': _v('id'), 'target': self._search_regex(r'vs_target = (\d+);', webpage, 'vs target'), 'label': _v('label'), 'area': _v('area'), }) token_url = self._extract_token_url(stream_access_url, video_id, data_abo) formats = self._extract_formats(token_url, video_id) categories_str = _v('meta_sports') categories = categories_str.split(',') if categories_str else [] is_live = _v('islive') == 'true' return { 'id': video_id, 'title': self._live_title(title) if is_live else title, 'upload_date': unified_strdate(_v('time_date')), 'uploader': _v('meta_organisation'), 'categories': categories, 'is_live': is_live, 'formats': formats, } class Laola1TvBaseIE(Laola1TvEmbedIE): def _extract_video(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) if 'Dieser Livestream ist bereits beendet.' in webpage: raise ExtractorError('This live stream has already finished.', expected=True) conf = self._parse_json(self._search_regex( r'(?s)conf\s*=\s*({.+?});', webpage, 'conf'), display_id, transform_source=lambda s: js_to_json(re.sub(r'shareurl:.+,', '', s))) video_id = conf['videoid'] config = self._download_json(conf['configUrl'], video_id, query={ 'videoid': video_id, 'partnerid': conf['partnerid'], 'language': conf.get('language', ''), 'portal': conf.get('portalid', ''), }) error = config.get('error') if error: raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True) video_data = config['video'] title = video_data['title'] is_live = video_data.get('isLivestream') and video_data.get('isLive') meta = video_data.get('metaInformation') sports = meta.get('sports') categories = sports.split(',') if sports else [] token_url = self._extract_token_url( video_data['streamAccess'], video_id, video_data['abo']['required']) formats = self._extract_formats(token_url, video_id) return { 'id': video_id, 'display_id': display_id, 'title': self._live_title(title) if is_live else title, 'description': video_data.get('description'), 'thumbnail': video_data.get('image'), 'categories': categories, 'formats': formats, 'is_live': is_live, } class Laola1TvIE(Laola1TvBaseIE): IE_NAME = 'laola1tv' _VALID_URL = r'https?://(?:www\.)?laola1\.tv/[a-z]+-[a-z]+/[^/]+/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie/227883.html', 'info_dict': { 'id': '227883', 'display_id': 'straubing-tigers-koelner-haie', 'ext': 'flv', 'title': 'Straubing Tigers - Kölner Haie', 'upload_date': '20140912', 'is_live': False, 'categories': ['Eishockey'], }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie', 'info_dict': { 'id': '464602', 'display_id': 'straubing-tigers-koelner-haie', 'ext': 'flv', 'title': 'Straubing Tigers - Kölner Haie', 'upload_date': '20160129', 'is_live': False, 'categories': ['Eishockey'], }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.laola1.tv/de-de/livestream/2016-03-22-belogorie-belgorod-trentino-diatec-lde', 'info_dict': { 'id': '487850', 'display_id': '2016-03-22-belogorie-belgorod-trentino-diatec-lde', 'ext': 'flv', 'title': 'Belogorie BELGOROD - TRENTINO Diatec', 'upload_date': '20160322', 'uploader': 'CEV - Europäischer Volleyball Verband', 'is_live': True, 'categories': ['Volleyball'], }, 'params': { 'skip_download': True, }, 'skip': 'This live stream has already finished.', }] def _real_extract(self, url): return self._extract_video(url) class EHFTVIE(Laola1TvBaseIE): IE_NAME = 'ehftv' _VALID_URL = r'https?://(?:www\.)?ehftv\.com/[a-z]+(?:-[a-z]+)?/[^/]+/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.ehftv.com/int/video/paris-saint-germain-handball-pge-vive-kielce/1166761', 'info_dict': { 'id': '1166761', 'display_id': 'paris-saint-germain-handball-pge-vive-kielce', 'ext': 'mp4', 'title': 'Paris Saint-Germain Handball - PGE Vive Kielce', 'is_live': False, 'categories': ['Handball'], }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): return self._extract_video(url) class ITTFIE(InfoExtractor): _VALID_URL = r'https?://tv\.ittf\.com/video/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'https://tv.ittf.com/video/peng-wang-wei-matsudaira-kenta/951802', 'only_matching': True, } def _real_extract(self, url): return self.url_result( update_url_query('https://www.laola1.tv/titanplayer.php', { 'videoid': self._match_id(url), 'type': 'V', 'lang': 'en', 'portal': 'int', 'customer': 1024, }), Laola1TvEmbedIE.ie_key())
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cbc.py
youtube_dl/extractor/cbc.py
# coding: utf-8 from __future__ import unicode_literals import hashlib import json import re from xml.sax.saxutils import escape from .common import InfoExtractor from ..compat import ( compat_str, compat_HTTPError, ) from ..utils import ( js_to_json, smuggle_url, try_get, xpath_text, xpath_element, xpath_with_ns, find_xpath_attr, orderedSet, parse_duration, parse_iso8601, parse_age_limit, strip_or_none, int_or_none, ExtractorError, ) class CBCIE(InfoExtractor): IE_NAME = 'cbc.ca' _VALID_URL = r'https?://(?:www\.)?cbc\.ca/(?!player/)(?:[^/]+/)+(?P<id>[^/?#]+)' _TESTS = [{ # with mediaId 'url': 'http://www.cbc.ca/22minutes/videos/clips-season-23/don-cherry-play-offs', 'md5': '97e24d09672fc4cf56256d6faa6c25bc', 'info_dict': { 'id': '2682904050', 'ext': 'mp4', 'title': 'Don Cherry – All-Stars', 'description': 'Don Cherry has a bee in his bonnet about AHL player John Scott because that guy’s got heart.', 'timestamp': 1454463000, 'upload_date': '20160203', 'uploader': 'CBCC-NEW', }, 'skip': 'Geo-restricted to Canada', }, { # with clipId, feed available via tpfeed.cbc.ca and feed.theplatform.com 'url': 'http://www.cbc.ca/22minutes/videos/22-minutes-update/22-minutes-update-episode-4', 'md5': '162adfa070274b144f4fdc3c3b8207db', 'info_dict': { 'id': '2414435309', 'ext': 'mp4', 'title': '22 Minutes Update: What Not To Wear Quebec', 'description': "This week's latest Canadian top political story is What Not To Wear Quebec.", 'upload_date': '20131025', 'uploader': 'CBCC-NEW', 'timestamp': 1382717907, }, }, { # with clipId, feed only available via tpfeed.cbc.ca 'url': 'http://www.cbc.ca/archives/entry/1978-robin-williams-freestyles-on-90-minutes-live', 'md5': '0274a90b51a9b4971fe005c63f592f12', 'info_dict': { 'id': '2487345465', 'ext': 'mp4', 'title': 'Robin Williams freestyles on 90 Minutes Live', 'description': 'Wacky American comedian Robin Williams shows off his infamous "freestyle" comedic talents while being interviewed on CBC\'s 90 Minutes Live.', 'upload_date': '19780210', 'uploader': 'CBCC-NEW', 'timestamp': 255977160, }, }, { # multiple iframes 'url': 'http://www.cbc.ca/natureofthings/blog/birds-eye-view-from-vancouvers-burrard-street-bridge-how-we-got-the-shot', 'playlist': [{ 'md5': '377572d0b49c4ce0c9ad77470e0b96b4', 'info_dict': { 'id': '2680832926', 'ext': 'mp4', 'title': 'An Eagle\'s-Eye View Off Burrard Bridge', 'description': 'Hercules the eagle flies from Vancouver\'s Burrard Bridge down to a nearby park with a mini-camera strapped to his back.', 'upload_date': '20160201', 'timestamp': 1454342820, 'uploader': 'CBCC-NEW', }, }, { 'md5': '415a0e3f586113894174dfb31aa5bb1a', 'info_dict': { 'id': '2658915080', 'ext': 'mp4', 'title': 'Fly like an eagle!', 'description': 'Eagle equipped with a mini camera flies from the world\'s tallest tower', 'upload_date': '20150315', 'timestamp': 1426443984, 'uploader': 'CBCC-NEW', }, }], 'skip': 'Geo-restricted to Canada', }, { # multiple CBC.APP.Caffeine.initInstance(...) 'url': 'http://www.cbc.ca/news/canada/calgary/dog-indoor-exercise-winter-1.3928238', 'info_dict': { 'title': 'Keep Rover active during the deep freeze with doggie pushups and other fun indoor tasks', 'id': 'dog-indoor-exercise-winter-1.3928238', 'description': 'md5:c18552e41726ee95bd75210d1ca9194c', }, 'playlist_mincount': 6, }] @classmethod def suitable(cls, url): return False if CBCPlayerIE.suitable(url) else super(CBCIE, cls).suitable(url) def _extract_player_init(self, player_init, display_id): player_info = self._parse_json(player_init, display_id, js_to_json) media_id = player_info.get('mediaId') if not media_id: clip_id = player_info['clipId'] feed = self._download_json( 'http://tpfeed.cbc.ca/f/ExhSPC/vms_5akSXx4Ng_Zn?byCustomValue={:mpsReleases}{%s}' % clip_id, clip_id, fatal=False) if feed: media_id = try_get(feed, lambda x: x['entries'][0]['guid'], compat_str) if not media_id: media_id = self._download_json( 'http://feed.theplatform.com/f/h9dtGB/punlNGjMlc1F?fields=id&byContent=byReleases%3DbyId%253D' + clip_id, clip_id)['entries'][0]['id'].split('/')[-1] return self.url_result('cbcplayer:%s' % media_id, 'CBCPlayer', media_id) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = self._og_search_title(webpage, default=None) or self._html_search_meta( 'twitter:title', webpage, 'title', default=None) or self._html_search_regex( r'<title>([^<]+)</title>', webpage, 'title', fatal=False) entries = [ self._extract_player_init(player_init, display_id) for player_init in re.findall(r'CBC\.APP\.Caffeine\.initInstance\(({.+?})\);', webpage)] media_ids = [] for media_id_re in ( r'<iframe[^>]+src="[^"]+?mediaId=(\d+)"', r'<div[^>]+\bid=["\']player-(\d+)', r'guid["\']\s*:\s*["\'](\d+)'): media_ids.extend(re.findall(media_id_re, webpage)) entries.extend([ self.url_result('cbcplayer:%s' % media_id, 'CBCPlayer', media_id) for media_id in orderedSet(media_ids)]) return self.playlist_result( entries, display_id, strip_or_none(title), self._og_search_description(webpage)) class CBCPlayerIE(InfoExtractor): IE_NAME = 'cbc.ca:player' _VALID_URL = r'(?:cbcplayer:|https?://(?:www\.)?cbc\.ca/(?:player/play/|i/caffeine/syndicate/\?mediaId=))(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.cbc.ca/player/play/2683190193', 'md5': '64d25f841ddf4ddb28a235338af32e2c', 'info_dict': { 'id': '2683190193', 'ext': 'mp4', 'title': 'Gerry Runs a Sweat Shop', 'description': 'md5:b457e1c01e8ff408d9d801c1c2cd29b0', 'timestamp': 1455071400, 'upload_date': '20160210', 'uploader': 'CBCC-NEW', }, 'skip': 'Geo-restricted to Canada', }, { # Redirected from http://www.cbc.ca/player/AudioMobile/All%20in%20a%20Weekend%20Montreal/ID/2657632011/ 'url': 'http://www.cbc.ca/player/play/2657631896', 'md5': 'e5e708c34ae6fca156aafe17c43e8b75', 'info_dict': { 'id': '2657631896', 'ext': 'mp3', 'title': 'CBC Montreal is organizing its first ever community hackathon!', 'description': 'The modern technology we tend to depend on so heavily, is never without it\'s share of hiccups and headaches. Next weekend - CBC Montreal will be getting members of the public for its first Hackathon.', 'timestamp': 1425704400, 'upload_date': '20150307', 'uploader': 'CBCC-NEW', }, }, { 'url': 'http://www.cbc.ca/player/play/2164402062', 'md5': '33fcd8f6719b9dd60a5e73adcb83b9f6', 'info_dict': { 'id': '2164402062', 'ext': 'mp4', 'title': 'Cancer survivor four times over', 'description': 'Tim Mayer has beaten three different forms of cancer four times in five years.', 'timestamp': 1320410746, 'upload_date': '20111104', 'uploader': 'CBCC-NEW', }, }] def _real_extract(self, url): video_id = self._match_id(url) return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': smuggle_url( 'http://link.theplatform.com/s/ExhSPC/media/guid/2655402169/%s?mbr=true&formats=MPEG4,FLV,MP3' % video_id, { 'force_smil_url': True }), 'id': video_id, } class CBCWatchBaseIE(InfoExtractor): _device_id = None _device_token = None _API_BASE_URL = 'https://api-cbc.cloud.clearleap.com/cloffice/client/' _NS_MAP = { 'media': 'http://search.yahoo.com/mrss/', 'clearleap': 'http://www.clearleap.com/namespace/clearleap/1.0/', } _GEO_COUNTRIES = ['CA'] _LOGIN_URL = 'https://api.loginradius.com/identity/v2/auth/login' _TOKEN_URL = 'https://cloud-api.loginradius.com/sso/jwt/api/token' _API_KEY = '3f4beddd-2061-49b0-ae80-6f1f2ed65b37' _NETRC_MACHINE = 'cbcwatch' def _signature(self, email, password): data = json.dumps({ 'email': email, 'password': password, }).encode() headers = {'content-type': 'application/json'} query = {'apikey': self._API_KEY} resp = self._download_json(self._LOGIN_URL, None, data=data, headers=headers, query=query) access_token = resp['access_token'] # token query = { 'access_token': access_token, 'apikey': self._API_KEY, 'jwtapp': 'jwt', } resp = self._download_json(self._TOKEN_URL, None, headers=headers, query=query) return resp['signature'] def _call_api(self, path, video_id): url = path if path.startswith('http') else self._API_BASE_URL + path for _ in range(2): try: result = self._download_xml(url, video_id, headers={ 'X-Clearleap-DeviceId': self._device_id, 'X-Clearleap-DeviceToken': self._device_token, }) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: # Device token has expired, re-acquiring device token self._register_device() continue raise error_message = xpath_text(result, 'userMessage') or xpath_text(result, 'systemMessage') if error_message: raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message)) return result def _real_initialize(self): if self._valid_device_token(): return device = self._downloader.cache.load( 'cbcwatch', self._cache_device_key()) or {} self._device_id, self._device_token = device.get('id'), device.get('token') if self._valid_device_token(): return self._register_device() def _valid_device_token(self): return self._device_id and self._device_token def _cache_device_key(self): email, _ = self._get_login_info() return '%s_device' % hashlib.sha256(email.encode()).hexdigest() if email else 'device' def _register_device(self): result = self._download_xml( self._API_BASE_URL + 'device/register', None, 'Acquiring device token', data=b'<device><type>web</type></device>') self._device_id = xpath_text(result, 'deviceId', fatal=True) email, password = self._get_login_info() if email and password: signature = self._signature(email, password) data = '<login><token>{0}</token><device><deviceId>{1}</deviceId><type>web</type></device></login>'.format( escape(signature), escape(self._device_id)).encode() url = self._API_BASE_URL + 'device/login' result = self._download_xml( url, None, data=data, headers={'content-type': 'application/xml'}) self._device_token = xpath_text(result, 'token', fatal=True) else: self._device_token = xpath_text(result, 'deviceToken', fatal=True) self._downloader.cache.store( 'cbcwatch', self._cache_device_key(), { 'id': self._device_id, 'token': self._device_token, }) def _parse_rss_feed(self, rss): channel = xpath_element(rss, 'channel', fatal=True) def _add_ns(path): return xpath_with_ns(path, self._NS_MAP) entries = [] for item in channel.findall('item'): guid = xpath_text(item, 'guid', fatal=True) title = xpath_text(item, 'title', fatal=True) media_group = xpath_element(item, _add_ns('media:group'), fatal=True) content = xpath_element(media_group, _add_ns('media:content'), fatal=True) content_url = content.attrib['url'] thumbnails = [] for thumbnail in media_group.findall(_add_ns('media:thumbnail')): thumbnail_url = thumbnail.get('url') if not thumbnail_url: continue thumbnails.append({ 'id': thumbnail.get('profile'), 'url': thumbnail_url, 'width': int_or_none(thumbnail.get('width')), 'height': int_or_none(thumbnail.get('height')), }) timestamp = None release_date = find_xpath_attr( item, _add_ns('media:credit'), 'role', 'releaseDate') if release_date is not None: timestamp = parse_iso8601(release_date.text) entries.append({ '_type': 'url_transparent', 'url': content_url, 'id': guid, 'title': title, 'description': xpath_text(item, 'description'), 'timestamp': timestamp, 'duration': int_or_none(content.get('duration')), 'age_limit': parse_age_limit(xpath_text(item, _add_ns('media:rating'))), 'episode': xpath_text(item, _add_ns('clearleap:episode')), 'episode_number': int_or_none(xpath_text(item, _add_ns('clearleap:episodeInSeason'))), 'series': xpath_text(item, _add_ns('clearleap:series')), 'season_number': int_or_none(xpath_text(item, _add_ns('clearleap:season'))), 'thumbnails': thumbnails, 'ie_key': 'CBCWatchVideo', }) return self.playlist_result( entries, xpath_text(channel, 'guid'), xpath_text(channel, 'title'), xpath_text(channel, 'description')) class CBCWatchVideoIE(CBCWatchBaseIE): IE_NAME = 'cbc.ca:watch:video' _VALID_URL = r'https?://api-cbc\.cloud\.clearleap\.com/cloffice/client/web/play/?\?.*?\bcontentId=(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _TEST = { # geo-restricted to Canada, bypassable 'url': 'https://api-cbc.cloud.clearleap.com/cloffice/client/web/play/?contentId=3c84472a-1eea-4dee-9267-2655d5055dcf&categoryId=ebc258f5-ee40-4cca-b66b-ba6bd55b7235', 'only_matching': True, } def _real_extract(self, url): video_id = self._match_id(url) result = self._call_api(url, video_id) m3u8_url = xpath_text(result, 'url', fatal=True) formats = self._extract_m3u8_formats(re.sub(r'/([^/]+)/[^/?]+\.m3u8', r'/\1/\1.m3u8', m3u8_url), video_id, 'mp4', fatal=False) if len(formats) < 2: formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4') for f in formats: format_id = f.get('format_id') if format_id.startswith('AAC'): f['acodec'] = 'aac' elif format_id.startswith('AC3'): f['acodec'] = 'ac-3' self._sort_formats(formats) info = { 'id': video_id, 'title': video_id, 'formats': formats, } rss = xpath_element(result, 'rss') if rss: info.update(self._parse_rss_feed(rss)['entries'][0]) del info['url'] del info['_type'] del info['ie_key'] return info class CBCWatchIE(CBCWatchBaseIE): IE_NAME = 'cbc.ca:watch' _VALID_URL = r'https?://(?:gem|watch)\.cbc\.ca/(?:[^/]+/)+(?P<id>[0-9a-f-]+)' _TESTS = [{ # geo-restricted to Canada, bypassable 'url': 'http://watch.cbc.ca/doc-zone/season-6/customer-disservice/38e815a-009e3ab12e4', 'info_dict': { 'id': '9673749a-5e77-484c-8b62-a1092a6b5168', 'ext': 'mp4', 'title': 'Customer (Dis)Service', 'description': 'md5:8bdd6913a0fe03d4b2a17ebe169c7c87', 'upload_date': '20160219', 'timestamp': 1455840000, }, 'params': { # m3u8 download 'skip_download': True, 'format': 'bestvideo', }, }, { # geo-restricted to Canada, bypassable 'url': 'http://watch.cbc.ca/arthur/all/1ed4b385-cd84-49cf-95f0-80f004680057', 'info_dict': { 'id': '1ed4b385-cd84-49cf-95f0-80f004680057', 'title': 'Arthur', 'description': 'Arthur, the sweetest 8-year-old aardvark, and his pals solve all kinds of problems with humour, kindness and teamwork.', }, 'playlist_mincount': 30, }, { 'url': 'https://gem.cbc.ca/media/this-hour-has-22-minutes/season-26/episode-20/38e815a-0108c6c6a42', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) rss = self._call_api('web/browse/' + video_id, video_id) return self._parse_rss_feed(rss) class CBCOlympicsIE(InfoExtractor): IE_NAME = 'cbc.ca:olympics' _VALID_URL = r'https?://olympics\.cbc\.ca/video/[^/]+/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://olympics.cbc.ca/video/whats-on-tv/olympic-morning-featuring-the-opening-ceremony/', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._hidden_inputs(webpage)['videoId'] video_doc = self._download_xml( 'https://olympics.cbc.ca/videodata/%s.xml' % video_id, video_id) title = xpath_text(video_doc, 'title', fatal=True) is_live = xpath_text(video_doc, 'kind') == 'Live' if is_live: title = self._live_title(title) formats = [] for video_source in video_doc.findall('videoSources/videoSource'): uri = xpath_text(video_source, 'uri') if not uri: continue tokenize = self._download_json( 'https://olympics.cbc.ca/api/api-akamai/tokenize', video_id, data=json.dumps({ 'VideoSource': uri, }).encode(), headers={ 'Content-Type': 'application/json', 'Referer': url, # d3.VideoPlayer._init in https://olympics.cbc.ca/components/script/base.js 'Cookie': '_dvp=TK:C0ObxjerU', # AKAMAI CDN cookie }, fatal=False) if not tokenize: continue content_url = tokenize['ContentUrl'] video_source_format = video_source.get('format') if video_source_format == 'IIS': formats.extend(self._extract_ism_formats( content_url, video_id, ism_id=video_source_format, fatal=False)) else: formats.extend(self._extract_m3u8_formats( content_url, video_id, 'mp4', 'm3u8' if is_live else 'm3u8_native', m3u8_id=video_source_format, fatal=False)) self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': xpath_text(video_doc, 'description'), 'thumbnail': xpath_text(video_doc, 'thumbnailUrl'), 'duration': parse_duration(xpath_text(video_doc, 'duration')), 'formats': formats, 'is_live': is_live, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/corus.py
youtube_dl/extractor/corus.py
# coding: utf-8 from __future__ import unicode_literals import re from .theplatform import ThePlatformFeedIE from ..utils import ( dict_get, ExtractorError, float_or_none, int_or_none, ) class CorusIE(ThePlatformFeedIE): _VALID_URL = r'''(?x) https?:// (?:www\.)? (?P<domain> (?: globaltv| etcanada| seriesplus| wnetwork| ytv )\.com| (?: hgtv| foodnetwork| slice| history| showcase| bigbrothercanada| abcspark| disney(?:channel|lachaine) )\.ca ) /(?:[^/]+/)* (?: video\.html\?.*?\bv=| videos?/(?:[^/]+/)*(?:[a-z0-9-]+-)? ) (?P<id> [\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}| (?:[A-Z]{4})?\d{12,20} ) ''' _TESTS = [{ 'url': 'http://www.hgtv.ca/shows/bryan-inc/videos/movie-night-popcorn-with-bryan-870923331648/', 'info_dict': { 'id': '870923331648', 'ext': 'mp4', 'title': 'Movie Night Popcorn with Bryan', 'description': 'Bryan whips up homemade popcorn, the old fashion way for Jojo and Lincoln.', 'upload_date': '20170206', 'timestamp': 1486392197, }, 'params': { 'format': 'bestvideo', 'skip_download': True, }, 'expected_warnings': ['Failed to parse JSON'], }, { 'url': 'http://www.foodnetwork.ca/shows/chopped/video/episode/chocolate-obsession/video.html?v=872683587753', 'only_matching': True, }, { 'url': 'http://etcanada.com/video/873675331955/meet-the-survivor-game-changers-castaways-part-2/', 'only_matching': True, }, { 'url': 'http://www.history.ca/the-world-without-canada/video/full-episodes/natural-resources/video.html?v=955054659646#video', 'only_matching': True, }, { 'url': 'http://www.showcase.ca/eyewitness/video/eyewitness++106/video.html?v=955070531919&p=1&s=da#video', 'only_matching': True, }, { 'url': 'http://www.bigbrothercanada.ca/video/1457812035894/', 'only_matching': True }, { 'url': 'https://www.bigbrothercanada.ca/video/big-brother-canada-704/1457812035894/', 'only_matching': True }, { 'url': 'https://www.seriesplus.com/emissions/dre-mary-mort-sur-ordonnance/videos/deux-coeurs-battant/SERP0055626330000200/', 'only_matching': True }, { 'url': 'https://www.disneychannel.ca/shows/gabby-duran-the-unsittables/video/crybaby-duran-clip/2f557eec-0588-11ea-ae2b-e2c6776b770e/', 'only_matching': True }] _GEO_BYPASS = False _SITE_MAP = { 'globaltv': 'series', 'etcanada': 'series', 'foodnetwork': 'food', 'bigbrothercanada': 'series', 'disneychannel': 'disneyen', 'disneylachaine': 'disneyfr', } def _real_extract(self, url): domain, video_id = re.match(self._VALID_URL, url).groups() site = domain.split('.')[0] path = self._SITE_MAP.get(site, site) if path != 'series': path = 'migration/' + path video = self._download_json( 'https://globalcontent.corusappservices.com/templates/%s/playlist/' % path, video_id, query={'byId': video_id}, headers={'Accept': 'application/json'})[0] title = video['title'] formats = [] for source in video.get('sources', []): smil_url = source.get('file') if not smil_url: continue source_type = source.get('type') note = 'Downloading%s smil file' % (' ' + source_type if source_type else '') resp = self._download_webpage( smil_url, video_id, note, fatal=False, headers=self.geo_verification_headers()) if not resp: continue error = self._parse_json(resp, video_id, fatal=False) if error: if error.get('exception') == 'GeoLocationBlocked': self.raise_geo_restricted(countries=['CA']) raise ExtractorError(error['description']) smil = self._parse_xml(resp, video_id, fatal=False) if smil is None: continue namespace = self._parse_smil_namespace(smil) formats.extend(self._parse_smil_formats( smil, smil_url, video_id, namespace)) if not formats and video.get('drm'): raise ExtractorError('This video is DRM protected.', expected=True) self._sort_formats(formats) subtitles = {} for track in video.get('tracks', []): track_url = track.get('file') if not track_url: continue lang = 'fr' if site in ('disneylachaine', 'seriesplus') else 'en' subtitles.setdefault(lang, []).append({'url': track_url}) metadata = video.get('metadata') or {} get_number = lambda x: int_or_none(video.get('pl1$' + x) or metadata.get(x + 'Number')) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': dict_get(video, ('defaultThumbnailUrl', 'thumbnail', 'image')), 'description': video.get('description'), 'timestamp': int_or_none(video.get('availableDate'), 1000), 'subtitles': subtitles, 'duration': float_or_none(metadata.get('duration')), 'series': dict_get(video, ('show', 'pl1$show')), 'season_number': get_number('season'), 'episode_number': get_number('episode'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vrv.py
youtube_dl/extractor/vrv.py
# coding: utf-8 from __future__ import unicode_literals import base64 import json import hashlib import hmac import random import string import time from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_urllib_parse_urlencode, compat_urllib_parse, ) from ..utils import ( ExtractorError, float_or_none, int_or_none, ) class VRVBaseIE(InfoExtractor): _API_DOMAIN = None _API_PARAMS = {} _CMS_SIGNING = {} _TOKEN = None _TOKEN_SECRET = '' def _call_api(self, path, video_id, note, data=None): # https://tools.ietf.org/html/rfc5849#section-3 base_url = self._API_DOMAIN + '/core/' + path query = [ ('oauth_consumer_key', self._API_PARAMS['oAuthKey']), ('oauth_nonce', ''.join([random.choice(string.ascii_letters) for _ in range(32)])), ('oauth_signature_method', 'HMAC-SHA1'), ('oauth_timestamp', int(time.time())), ] if self._TOKEN: query.append(('oauth_token', self._TOKEN)) encoded_query = compat_urllib_parse_urlencode(query) headers = self.geo_verification_headers() if data: data = json.dumps(data).encode() headers['Content-Type'] = 'application/json' base_string = '&'.join([ 'POST' if data else 'GET', compat_urllib_parse.quote(base_url, ''), compat_urllib_parse.quote(encoded_query, '')]) oauth_signature = base64.b64encode(hmac.new( (self._API_PARAMS['oAuthSecret'] + '&' + self._TOKEN_SECRET).encode('ascii'), base_string.encode(), hashlib.sha1).digest()).decode() encoded_query += '&oauth_signature=' + compat_urllib_parse.quote(oauth_signature, '') try: return self._download_json( '?'.join([base_url, encoded_query]), video_id, note='Downloading %s JSON metadata' % note, headers=headers, data=data) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: raise ExtractorError(json.loads(e.cause.read().decode())['message'], expected=True) raise def _call_cms(self, path, video_id, note): if not self._CMS_SIGNING: index = self._call_api('index', video_id, 'CMS Signing') self._CMS_SIGNING = index.get('cms_signing') or {} if not self._CMS_SIGNING: for signing_policy in index.get('signing_policies', []): signing_path = signing_policy.get('path') if signing_path and signing_path.startswith('/cms/'): name, value = signing_policy.get('name'), signing_policy.get('value') if name and value: self._CMS_SIGNING[name] = value return self._download_json( self._API_DOMAIN + path, video_id, query=self._CMS_SIGNING, note='Downloading %s JSON metadata' % note, headers=self.geo_verification_headers()) def _get_cms_resource(self, resource_key, video_id): return self._call_api( 'cms_resource', video_id, 'resource path', data={ 'resource_key': resource_key, })['__links__']['cms_resource']['href'] def _real_initialize(self): webpage = self._download_webpage( 'https://vrv.co/', None, headers=self.geo_verification_headers()) self._API_PARAMS = self._parse_json(self._search_regex( [ r'window\.__APP_CONFIG__\s*=\s*({.+?})(?:</script>|;)', r'window\.__APP_CONFIG__\s*=\s*({.+})' ], webpage, 'app config'), None)['cxApiParams'] self._API_DOMAIN = self._API_PARAMS.get('apiDomain', 'https://api.vrv.co') class VRVIE(VRVBaseIE): IE_NAME = 'vrv' _VALID_URL = r'https?://(?:www\.)?vrv\.co/watch/(?P<id>[A-Z0-9]+)' _TESTS = [{ 'url': 'https://vrv.co/watch/GR9PNZ396/Hidden-America-with-Jonah-Ray:BOSTON-WHERE-THE-PAST-IS-THE-PRESENT', 'info_dict': { 'id': 'GR9PNZ396', 'ext': 'mp4', 'title': 'BOSTON: WHERE THE PAST IS THE PRESENT', 'description': 'md5:4ec8844ac262ca2df9e67c0983c6b83f', 'uploader_id': 'seeso', }, 'params': { # m3u8 download 'skip_download': True, }, }, { # movie listing 'url': 'https://vrv.co/watch/G6NQXZ1J6/Lily-CAT', 'info_dict': { 'id': 'G6NQXZ1J6', 'title': 'Lily C.A.T', 'description': 'md5:988b031e7809a6aeb60968be4af7db07', }, 'playlist_count': 2, }] _NETRC_MACHINE = 'vrv' def _real_initialize(self): super(VRVIE, self)._real_initialize() email, password = self._get_login_info() if email is None: return token_credentials = self._call_api( 'authenticate/by:credentials', None, 'Token Credentials', data={ 'email': email, 'password': password, }) self._TOKEN = token_credentials['oauth_token'] self._TOKEN_SECRET = token_credentials['oauth_token_secret'] def _extract_vrv_formats(self, url, video_id, stream_format, audio_lang, hardsub_lang): if not url or stream_format not in ('hls', 'dash', 'adaptive_hls'): return [] stream_id_list = [] if audio_lang: stream_id_list.append('audio-%s' % audio_lang) if hardsub_lang: stream_id_list.append('hardsub-%s' % hardsub_lang) format_id = stream_format if stream_id_list: format_id += '-' + '-'.join(stream_id_list) if 'hls' in stream_format: adaptive_formats = self._extract_m3u8_formats( url, video_id, 'mp4', m3u8_id=format_id, note='Downloading %s information' % format_id, fatal=False) elif stream_format == 'dash': adaptive_formats = self._extract_mpd_formats( url, video_id, mpd_id=format_id, note='Downloading %s information' % format_id, fatal=False) if audio_lang: for f in adaptive_formats: if f.get('acodec') != 'none': f['language'] = audio_lang return adaptive_formats def _real_extract(self, url): video_id = self._match_id(url) object_data = self._call_cms(self._get_cms_resource( 'cms:/objects/' + video_id, video_id), video_id, 'object')['items'][0] resource_path = object_data['__links__']['resource']['href'] video_data = self._call_cms(resource_path, video_id, 'video') title = video_data['title'] description = video_data.get('description') if video_data.get('__class__') == 'movie_listing': items = self._call_cms( video_data['__links__']['movie_listing/movies']['href'], video_id, 'movie listing').get('items') or [] if len(items) != 1: entries = [] for item in items: item_id = item.get('id') if not item_id: continue entries.append(self.url_result( 'https://vrv.co/watch/' + item_id, self.ie_key(), item_id, item.get('title'))) return self.playlist_result(entries, video_id, title, description) video_data = items[0] streams_path = video_data['__links__'].get('streams', {}).get('href') if not streams_path: self.raise_login_required() streams_json = self._call_cms(streams_path, video_id, 'streams') audio_locale = streams_json.get('audio_locale') formats = [] for stream_type, streams in streams_json.get('streams', {}).items(): if stream_type in ('adaptive_hls', 'adaptive_dash'): for stream in streams.values(): formats.extend(self._extract_vrv_formats( stream.get('url'), video_id, stream_type.split('_')[1], audio_locale, stream.get('hardsub_locale'))) self._sort_formats(formats) subtitles = {} for k in ('captions', 'subtitles'): for subtitle in streams_json.get(k, {}).values(): subtitle_url = subtitle.get('url') if not subtitle_url: continue subtitles.setdefault(subtitle.get('locale', 'en-US'), []).append({ 'url': subtitle_url, 'ext': subtitle.get('format', 'ass'), }) thumbnails = [] for thumbnail in video_data.get('images', {}).get('thumbnails', []): thumbnail_url = thumbnail.get('source') if not thumbnail_url: continue thumbnails.append({ 'url': thumbnail_url, 'width': int_or_none(thumbnail.get('width')), 'height': int_or_none(thumbnail.get('height')), }) return { 'id': video_id, 'title': title, 'formats': formats, 'subtitles': subtitles, 'thumbnails': thumbnails, 'description': description, 'duration': float_or_none(video_data.get('duration_ms'), 1000), 'uploader_id': video_data.get('channel_id'), 'series': video_data.get('series_title'), 'season': video_data.get('season_title'), 'season_number': int_or_none(video_data.get('season_number')), 'season_id': video_data.get('season_id'), 'episode': title, 'episode_number': int_or_none(video_data.get('episode_number')), 'episode_id': video_data.get('production_episode_id'), } class VRVSeriesIE(VRVBaseIE): IE_NAME = 'vrv:series' _VALID_URL = r'https?://(?:www\.)?vrv\.co/series/(?P<id>[A-Z0-9]+)' _TEST = { 'url': 'https://vrv.co/series/G68VXG3G6/The-Perfect-Insider', 'info_dict': { 'id': 'G68VXG3G6', }, 'playlist_mincount': 11, } def _real_extract(self, url): series_id = self._match_id(url) seasons_path = self._get_cms_resource( 'cms:/seasons?series_id=' + series_id, series_id) seasons_data = self._call_cms(seasons_path, series_id, 'seasons') entries = [] for season in seasons_data.get('items', []): episodes_path = season['__links__']['season/episodes']['href'] episodes = self._call_cms(episodes_path, series_id, 'episodes') for episode in episodes.get('items', []): episode_id = episode['id'] entries.append(self.url_result( 'https://vrv.co/watch/' + episode_id, 'VRV', episode_id, episode.get('title'))) return self.playlist_result(entries, series_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/eporner.py
youtube_dl/extractor/eporner.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( encode_base_n, ExtractorError, int_or_none, merge_dicts, parse_duration, str_to_int, url_or_none, ) class EpornerIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?eporner\.com/(?:(?:hd-porn|embed)/|video-)(?P<id>\w+)(?:/(?P<display_id>[\w-]+))?' _TESTS = [{ 'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/', 'md5': '39d486f046212d8e1b911c52ab4691f8', 'info_dict': { 'id': 'qlDUmNsj6VS', 'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video', 'ext': 'mp4', 'title': 'Infamous Tiffany Teen Strip Tease Video', 'description': 'md5:764f39abf932daafa37485eb46efa152', 'timestamp': 1232520922, 'upload_date': '20090121', 'duration': 1838, 'view_count': int, 'age_limit': 18, }, 'params': { 'proxy': '127.0.0.1:8118' } }, { # New (May 2016) URL layout 'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0/Star-Wars-XXX-Parody/', 'only_matching': True, }, { 'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0', 'only_matching': True, }, { 'url': 'http://www.eporner.com/embed/3YRUtzMcWn0', 'only_matching': True, }, { 'url': 'https://www.eporner.com/video-FJsA19J3Y3H/one-of-the-greats/', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') or video_id webpage, urlh = self._download_webpage_handle(url, display_id) video_id = self._match_id(urlh.geturl()) hash = self._search_regex( r'hash\s*[:=]\s*["\']([\da-f]{32})', webpage, 'hash') title = self._og_search_title(webpage, default=None) or self._html_search_regex( r'<title>(.+?) - EPORNER', webpage, 'title') # Reverse engineered from vjs.js def calc_hash(s): return ''.join((encode_base_n(int(s[lb:lb + 8], 16), 36) for lb in range(0, 32, 8))) video = self._download_json( 'http://www.eporner.com/xhr/video/%s' % video_id, display_id, note='Downloading video JSON', query={ 'hash': calc_hash(hash), 'device': 'generic', 'domain': 'www.eporner.com', 'fallback': 'false', }) if video.get('available') is False: raise ExtractorError( '%s said: %s' % (self.IE_NAME, video['message']), expected=True) sources = video['sources'] formats = [] for kind, formats_dict in sources.items(): if not isinstance(formats_dict, dict): continue for format_id, format_dict in formats_dict.items(): if not isinstance(format_dict, dict): continue src = url_or_none(format_dict.get('src')) if not src or not src.startswith('http'): continue if kind == 'hls': formats.extend(self._extract_m3u8_formats( src, display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=kind, fatal=False)) else: height = int_or_none(self._search_regex( r'(\d+)[pP]', format_id, 'height', default=None)) fps = int_or_none(self._search_regex( r'(\d+)fps', format_id, 'fps', default=None)) formats.append({ 'url': src, 'format_id': format_id, 'height': height, 'fps': fps, }) self._sort_formats(formats) json_ld = self._search_json_ld(webpage, display_id, default={}) duration = parse_duration(self._html_search_meta( 'duration', webpage, default=None)) view_count = str_to_int(self._search_regex( r'id=["\']cinemaviews1["\'][^>]*>\s*([0-9,]+)', webpage, 'view count', default=None)) return merge_dicts(json_ld, { 'id': video_id, 'display_id': display_id, 'title': title, 'duration': duration, 'view_count': view_count, 'formats': formats, 'age_limit': 18, })
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/audioboom.py
youtube_dl/extractor/audioboom.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( clean_html, float_or_none, ) class AudioBoomIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?audioboom\.com/(?:boos|posts)/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://audioboom.com/posts/7398103-asim-chaudhry', 'md5': '7b00192e593ff227e6a315486979a42d', 'info_dict': { 'id': '7398103', 'ext': 'mp3', 'title': 'Asim Chaudhry', 'description': 'md5:2f3fef17dacc2595b5362e1d7d3602fc', 'duration': 4000.99, 'uploader': 'Sue Perkins: An hour or so with...', 'uploader_url': r're:https?://(?:www\.)?audioboom\.com/channel/perkins', } }, { 'url': 'https://audioboom.com/posts/4279833-3-09-2016-czaban-hour-3?t=0', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) clip = None clip_store = self._parse_json( self._html_search_regex( r'data-new-clip-store=(["\'])(?P<json>{.+?})\1', webpage, 'clip store', default='{}', group='json'), video_id, fatal=False) if clip_store: clips = clip_store.get('clips') if clips and isinstance(clips, list) and isinstance(clips[0], dict): clip = clips[0] def from_clip(field): if clip: return clip.get(field) audio_url = from_clip('clipURLPriorToLoading') or self._og_search_property( 'audio', webpage, 'audio url') title = from_clip('title') or self._html_search_meta( ['og:title', 'og:audio:title', 'audio_title'], webpage) description = from_clip('description') or clean_html(from_clip('formattedDescription')) or self._og_search_description(webpage) duration = float_or_none(from_clip('duration') or self._html_search_meta( 'weibo:audio:duration', webpage)) uploader = from_clip('author') or self._html_search_meta( ['og:audio:artist', 'twitter:audio:artist_name', 'audio_artist'], webpage, 'uploader') uploader_url = from_clip('author_url') or self._html_search_meta( 'audioboo:channel', webpage, 'uploader url') return { 'id': video_id, 'url': audio_url, 'title': title, 'description': description, 'duration': duration, 'uploader': uploader, 'uploader_url': uploader_url, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ebaumsworld.py
youtube_dl/extractor/ebaumsworld.py
from __future__ import unicode_literals from .common import InfoExtractor class EbaumsWorldIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ebaumsworld\.com/videos/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'http://www.ebaumsworld.com/videos/a-giant-python-opens-the-door/83367677/', 'info_dict': { 'id': '83367677', 'ext': 'mp4', 'title': 'A Giant Python Opens The Door', 'description': 'This is how nightmares start...', 'uploader': 'jihadpizza', }, } def _real_extract(self, url): video_id = self._match_id(url) config = self._download_xml( 'http://www.ebaumsworld.com/video/player/%s' % video_id, video_id) video_url = config.find('file').text return { 'id': video_id, 'title': config.find('title').text, 'url': video_url, 'description': config.find('description').text, 'thumbnail': config.find('image').text, 'uploader': config.find('username').text, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/yesjapan.py
youtube_dl/extractor/yesjapan.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( HEADRequest, get_element_by_attribute, parse_iso8601, ) class YesJapanIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?yesjapan\.com/video/(?P<slug>[A-Za-z0-9\-]*)_(?P<id>[A-Za-z0-9]+)\.html' _TEST = { 'url': 'http://www.yesjapan.com/video/japanese-in-5-20-wa-and-ga-particle-usages_726497834.html', 'md5': 'f0be416314e5be21a12b499b330c21cf', 'info_dict': { 'id': '726497834', 'title': 'Japanese in 5! #20 - WA And GA Particle Usages', 'description': 'This should clear up some issues most students of Japanese encounter with WA and GA....', 'ext': 'mp4', 'timestamp': 1416391590, 'upload_date': '20141119', 'thumbnail': r're:^https?://.*\.jpg$', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage) video_url = self._og_search_video_url(webpage) description = self._og_search_description(webpage) thumbnail = self._og_search_thumbnail(webpage) timestamp = None submit_info = get_element_by_attribute('class', 'pm-submit-data', webpage) if submit_info: timestamp = parse_iso8601(self._search_regex( r'datetime="([^"]+)"', submit_info, 'upload date', fatal=False, default=None)) # attempt to resolve the final URL in order to get a proper extension redirect_req = HEADRequest(video_url) req = self._request_webpage( redirect_req, video_id, note='Resolving final URL', errnote='Could not resolve final URL', fatal=False) if req: video_url = req.geturl() formats = [{ 'format_id': 'sd', 'url': video_url, }] return { 'id': video_id, 'title': title, 'formats': formats, 'description': description, 'timestamp': timestamp, 'thumbnail': thumbnail, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ndtv.py
youtube_dl/extractor/ndtv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_urllib_parse_unquote_plus ) from ..utils import ( parse_duration, remove_end, unified_strdate, urljoin ) class NDTVIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?ndtv\.com/(?:[^/]+/)*videos?/?(?:[^/]+/)*[^/?^&]+-(?P<id>\d+)' _TESTS = [ { 'url': 'https://khabar.ndtv.com/video/show/prime-time/prime-time-ill-system-and-poor-education-468818', 'md5': '78efcf3880ef3fd9b83d405ca94a38eb', 'info_dict': { 'id': '468818', 'ext': 'mp4', 'title': "प्राइम टाइम: सिस्टम बीमार, स्कूल बदहाल", 'description': 'md5:f410512f1b49672e5695dea16ef2731d', 'upload_date': '20170928', 'duration': 2218, 'thumbnail': r're:https?://.*\.jpg', } }, { # __filename is url 'url': 'http://movies.ndtv.com/videos/cracker-free-diwali-wishes-from-karan-johar-kriti-sanon-other-stars-470304', 'md5': 'f1d709352305b44443515ac56b45aa46', 'info_dict': { 'id': '470304', 'ext': 'mp4', 'title': "Cracker-Free Diwali Wishes From Karan Johar, Kriti Sanon & Other Stars", 'description': 'md5:f115bba1adf2f6433fa7c1ade5feb465', 'upload_date': '20171019', 'duration': 137, 'thumbnail': r're:https?://.*\.jpg', } }, { 'url': 'https://www.ndtv.com/video/news/news/delhi-s-air-quality-status-report-after-diwali-is-very-poor-470372', 'only_matching': True }, { 'url': 'https://auto.ndtv.com/videos/the-cnb-daily-october-13-2017-469935', 'only_matching': True }, { 'url': 'https://sports.ndtv.com/cricket/videos/2nd-t20i-rock-thrown-at-australia-cricket-team-bus-after-win-over-india-469764', 'only_matching': True }, { 'url': 'http://gadgets.ndtv.com/videos/uncharted-the-lost-legacy-review-465568', 'only_matching': True }, { 'url': 'http://profit.ndtv.com/videos/news/video-indian-economy-on-very-solid-track-international-monetary-fund-chief-470040', 'only_matching': True }, { 'url': 'http://food.ndtv.com/video-basil-seeds-coconut-porridge-419083', 'only_matching': True }, { 'url': 'https://doctor.ndtv.com/videos/top-health-stories-of-the-week-467396', 'only_matching': True }, { 'url': 'https://swirlster.ndtv.com/video/how-to-make-friends-at-work-469324', 'only_matching': True } ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) # '__title' does not contain extra words such as sub-site name, "Video" etc. title = compat_urllib_parse_unquote_plus( self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None) or self._og_search_title(webpage)) filename = self._search_regex( r"(?:__)?filename\s*[:=]\s*'([^']+)'", webpage, 'video filename') # in "movies" sub-site pages, filename is URL video_url = urljoin('https://ndtvod.bc-ssl.cdn.bitgravity.com/23372/ndtv/', filename.lstrip('/')) # "doctor" sub-site has MM:SS format duration = parse_duration(self._search_regex( r"(?:__)?duration\s*[:=]\s*'([^']+)'", webpage, 'duration', fatal=False)) # "sports", "doctor", "swirlster" sub-sites don't have 'publish-date' upload_date = unified_strdate(self._html_search_meta( 'publish-date', webpage, 'upload date', default=None) or self._html_search_meta( 'uploadDate', webpage, 'upload date', default=None) or self._search_regex( r'datePublished"\s*:\s*"([^"]+)"', webpage, 'upload date', fatal=False)) description = remove_end(self._og_search_description(webpage), ' (Read more)') return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': self._og_search_thumbnail(webpage), 'duration': duration, 'upload_date': upload_date, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/sportdeutschland.py
youtube_dl/extractor/sportdeutschland.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urllib_parse_urlparse, ) from ..utils import ( clean_html, float_or_none, int_or_none, parse_iso8601, strip_or_none, try_get, ) class SportDeutschlandIE(InfoExtractor): _VALID_URL = r'https?://sportdeutschland\.tv/(?P<id>(?:[^/]+/)?[^?#/&]+)' _TESTS = [{ 'url': 'https://sportdeutschland.tv/badminton/re-live-deutsche-meisterschaften-2020-halbfinals?playlistId=0', 'info_dict': { 'id': '5318cac0275701382770543d7edaf0a0', 'ext': 'mp4', 'title': 'Re-live: Deutsche Meisterschaften 2020 - Halbfinals - Teil 1', 'duration': 16106.36, }, 'params': { 'noplaylist': True, # m3u8 download 'skip_download': True, }, }, { 'url': 'https://sportdeutschland.tv/badminton/re-live-deutsche-meisterschaften-2020-halbfinals?playlistId=0', 'info_dict': { 'id': 'c6e2fdd01f63013854c47054d2ab776f', 'title': 'Re-live: Deutsche Meisterschaften 2020 - Halbfinals', 'description': 'md5:5263ff4c31c04bb780c9f91130b48530', 'duration': 31397, }, 'playlist_count': 2, }, { 'url': 'https://sportdeutschland.tv/freeride-world-tour-2021-fieberbrunn-oesterreich', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) data = self._download_json( 'https://backend.sportdeutschland.tv/api/permalinks/' + display_id, display_id, query={'access_token': 'true'}) asset = data['asset'] title = (asset.get('title') or asset['label']).strip() asset_id = asset.get('id') or asset.get('uuid') info = { 'id': asset_id, 'title': title, 'description': clean_html(asset.get('body') or asset.get('description')) or asset.get('teaser'), 'duration': int_or_none(asset.get('seconds')), } videos = asset.get('videos') or [] if len(videos) > 1: playlist_id = compat_parse_qs(compat_urllib_parse_urlparse(url).query).get('playlistId', [None])[0] if playlist_id: if self._downloader.params.get('noplaylist'): videos = [videos[int(playlist_id)]] self.to_screen('Downloading just a single video because of --no-playlist') else: self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % asset_id) def entries(): for i, video in enumerate(videos, 1): video_id = video.get('uuid') video_url = video.get('url') if not (video_id and video_url): continue formats = self._extract_m3u8_formats( video_url.replace('.smil', '.m3u8'), video_id, 'mp4', fatal=False) if not formats: continue yield { 'id': video_id, 'formats': formats, 'title': title + ' - ' + (video.get('label') or 'Teil %d' % i), 'duration': float_or_none(video.get('duration')), } info.update({ '_type': 'multi_video', 'entries': entries(), }) else: formats = self._extract_m3u8_formats( videos[0]['url'].replace('.smil', '.m3u8'), asset_id, 'mp4') section_title = strip_or_none(try_get(data, lambda x: x['section']['title'])) info.update({ 'formats': formats, 'display_id': asset.get('permalink'), 'thumbnail': try_get(asset, lambda x: x['images'][0]), 'categories': [section_title] if section_title else None, 'view_count': int_or_none(asset.get('views')), 'is_live': asset.get('is_live') is True, 'timestamp': parse_iso8601(asset.get('date') or asset.get('published_at')), }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/dctp.py
youtube_dl/extractor/dctp.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( float_or_none, int_or_none, unified_timestamp, url_or_none, ) class DctpTvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?dctp\.tv/(?:#/)?filme/(?P<id>[^/?#&]+)' _TESTS = [{ # 4x3 'url': 'http://www.dctp.tv/filme/videoinstallation-fuer-eine-kaufhausfassade/', 'md5': '3ffbd1556c3fe210724d7088fad723e3', 'info_dict': { 'id': '95eaa4f33dad413aa17b4ee613cccc6c', 'display_id': 'videoinstallation-fuer-eine-kaufhausfassade', 'ext': 'm4v', 'title': 'Videoinstallation für eine Kaufhausfassade', 'description': 'Kurzfilm', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 71.24, 'timestamp': 1302172322, 'upload_date': '20110407', }, }, { # 16x9 'url': 'http://www.dctp.tv/filme/sind-youtuber-die-besseren-lehrer/', 'only_matching': True, }] _BASE_URL = 'http://dctp-ivms2-restapi.s3.amazonaws.com' def _real_extract(self, url): display_id = self._match_id(url) version = self._download_json( '%s/version.json' % self._BASE_URL, display_id, 'Downloading version JSON') restapi_base = '%s/%s/restapi' % ( self._BASE_URL, version['version_name']) info = self._download_json( '%s/slugs/%s.json' % (restapi_base, display_id), display_id, 'Downloading video info JSON') media = self._download_json( '%s/media/%s.json' % (restapi_base, compat_str(info['object_id'])), display_id, 'Downloading media JSON') uuid = media['uuid'] title = media['title'] is_wide = media.get('is_wide') formats = [] def add_formats(suffix): templ = 'https://%%s/%s_dctp_%s.m4v' % (uuid, suffix) formats.extend([{ 'format_id': 'hls-' + suffix, 'url': templ % 'cdn-segments.dctp.tv' + '/playlist.m3u8', 'protocol': 'm3u8_native', }, { 'format_id': 's3-' + suffix, 'url': templ % 'completed-media.s3.amazonaws.com', }, { 'format_id': 'http-' + suffix, 'url': templ % 'cdn-media.dctp.tv', }]) add_formats('0500_' + ('16x9' if is_wide else '4x3')) if is_wide: add_formats('720p') thumbnails = [] images = media.get('images') if isinstance(images, list): for image in images: if not isinstance(image, dict): continue image_url = url_or_none(image.get('url')) if not image_url: continue thumbnails.append({ 'url': image_url, 'width': int_or_none(image.get('width')), 'height': int_or_none(image.get('height')), }) return { 'id': uuid, 'display_id': display_id, 'title': title, 'alt_title': media.get('subtitle'), 'description': media.get('description') or media.get('teaser'), 'timestamp': unified_timestamp(media.get('created')), 'duration': float_or_none(media.get('duration_in_ms'), scale=1000), 'thumbnails': thumbnails, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vodplatform.py
youtube_dl/extractor/vodplatform.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import unescapeHTML class VODPlatformIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www\.)?vod-platform\.net|embed\.kwikmotion\.com)/[eE]mbed/(?P<id>[^/?#]+)' _TESTS = [{ # from http://www.lbcgroup.tv/watch/chapter/29143/52844/%D8%A7%D9%84%D9%86%D8%B5%D8%B1%D8%A9-%D9%81%D9%8A-%D8%B6%D9%8A%D8%A7%D9%81%D8%A9-%D8%A7%D9%84%D9%80-cnn/ar 'url': 'http://vod-platform.net/embed/RufMcytHDolTH1MuKHY9Fw', 'md5': '1db2b7249ce383d6be96499006e951fc', 'info_dict': { 'id': 'RufMcytHDolTH1MuKHY9Fw', 'ext': 'mp4', 'title': 'LBCi News_ النصرة في ضيافة الـ "سي.أن.أن"', } }, { 'url': 'http://embed.kwikmotion.com/embed/RufMcytHDolTH1MuKHY9Fw', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = unescapeHTML(self._og_search_title(webpage)) hidden_inputs = self._hidden_inputs(webpage) formats = self._extract_wowza_formats( hidden_inputs.get('HiddenmyhHlsLink') or hidden_inputs['HiddenmyDashLink'], video_id, skip_protocols=['f4m', 'smil']) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'thumbnail': hidden_inputs.get('HiddenThumbnail') or self._og_search_thumbnail(webpage), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/__init__.py
youtube_dl/extractor/__init__.py
from __future__ import unicode_literals try: from .lazy_extractors import * from .lazy_extractors import _ALL_CLASSES _LAZY_LOADER = True except ImportError: _LAZY_LOADER = False from .extractors import * _ALL_CLASSES = [ klass for name, klass in globals().items() if name.endswith('IE') and name != 'GenericIE' ] _ALL_CLASSES.append(GenericIE) def gen_extractor_classes(): """ Return a list of supported extractors. The order does matter; the first extractor matched is the one handling the URL. """ return _ALL_CLASSES def gen_extractors(): """ Return a list of an instance of every supported extractor. The order does matter; the first extractor matched is the one handling the URL. """ return [klass() for klass in gen_extractor_classes()] def list_extractors(age_limit): """ Return a list of extractors that are suitable for the given age, sorted by extractor ID. """ return sorted( filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()), key=lambda ie: ie.IE_NAME.lower()) def get_info_extractor(ie_name): """Returns the info extractor class with the given ie_name""" return globals()[ie_name + 'IE']
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/mofosex.py
youtube_dl/extractor/mofosex.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, str_to_int, unified_strdate, ) from .keezmovies import KeezMoviesIE class MofosexIE(KeezMoviesIE): _VALID_URL = r'https?://(?:www\.)?mofosex\.com/videos/(?P<id>\d+)/(?P<display_id>[^/?#&.]+)\.html' _TESTS = [{ 'url': 'http://www.mofosex.com/videos/318131/amateur-teen-playing-and-masturbating-318131.html', 'md5': '558fcdafbb63a87c019218d6e49daf8a', 'info_dict': { 'id': '318131', 'display_id': 'amateur-teen-playing-and-masturbating-318131', 'ext': 'mp4', 'title': 'amateur teen playing and masturbating', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20121114', 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, } }, { # This video is no longer available 'url': 'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html', 'only_matching': True, }] def _real_extract(self, url): webpage, info = self._extract_info(url) view_count = str_to_int(self._search_regex( r'VIEWS:</span>\s*([\d,.]+)', webpage, 'view count', fatal=False)) like_count = int_or_none(self._search_regex( r'id=["\']amountLikes["\'][^>]*>(\d+)', webpage, 'like count', fatal=False)) dislike_count = int_or_none(self._search_regex( r'id=["\']amountDislikes["\'][^>]*>(\d+)', webpage, 'like count', fatal=False)) upload_date = unified_strdate(self._html_search_regex( r'Added:</span>([^<]+)', webpage, 'upload date', fatal=False)) info.update({ 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, 'upload_date': upload_date, 'thumbnail': self._og_search_thumbnail(webpage), }) return info class MofosexEmbedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?mofosex\.com/embed/?\?.*?\bvideoid=(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.mofosex.com/embed/?videoid=318131&referrer=KM', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?mofosex\.com/embed/?\?.*?\bvideoid=\d+)', webpage) def _real_extract(self, url): video_id = self._match_id(url) return self.url_result( 'http://www.mofosex.com/videos/{0}/{0}.html'.format(video_id), ie=MofosexIE.ie_key(), video_id=video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/hotstar.py
youtube_dl/extractor/hotstar.py
# coding: utf-8 from __future__ import unicode_literals import hashlib import hmac import json import re import time import uuid from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_str, ) from ..utils import ( determine_ext, ExtractorError, int_or_none, str_or_none, try_get, url_or_none, ) class HotStarBaseIE(InfoExtractor): _AKAMAI_ENCRYPTION_KEY = b'\x05\xfc\x1a\x01\xca\xc9\x4b\xc4\x12\xfc\x53\x12\x07\x75\xf9\xee' def _call_api_impl(self, path, video_id, headers, query, data=None): st = int(time.time()) exp = st + 6000 auth = 'st=%d~exp=%d~acl=/*' % (st, exp) auth += '~hmac=' + hmac.new(self._AKAMAI_ENCRYPTION_KEY, auth.encode(), hashlib.sha256).hexdigest() h = {'hotstarauth': auth} h.update(headers) return self._download_json( 'https://api.hotstar.com/' + path, video_id, headers=h, query=query, data=data) def _call_api(self, path, video_id, query_name='contentId'): response = self._call_api_impl(path, video_id, { 'x-country-code': 'IN', 'x-platform-code': 'JIO', }, { query_name: video_id, 'tas': 10000, }) if response['statusCode'] != 'OK': raise ExtractorError( response['body']['message'], expected=True) return response['body']['results'] def _call_api_v2(self, path, video_id, headers, query=None, data=None): h = {'X-Request-Id': compat_str(uuid.uuid4())} h.update(headers) try: return self._call_api_impl( path, video_id, h, query, data) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError): if e.cause.code == 402: self.raise_login_required() message = self._parse_json(e.cause.read().decode(), video_id)['message'] if message in ('Content not available in region', 'Country is not supported'): raise self.raise_geo_restricted(message) raise ExtractorError(message) raise e class HotStarIE(HotStarBaseIE): IE_NAME = 'hotstar' _VALID_URL = r'https?://(?:www\.)?hotstar\.com/(?:.+[/-])?(?P<id>\d{10})' _TESTS = [{ # contentData 'url': 'https://www.hotstar.com/can-you-not-spread-rumours/1000076273', 'info_dict': { 'id': '1000076273', 'ext': 'mp4', 'title': 'Can You Not Spread Rumours?', 'description': 'md5:c957d8868e9bc793ccb813691cc4c434', 'timestamp': 1447248600, 'upload_date': '20151111', 'duration': 381, }, 'params': { # m3u8 download 'skip_download': True, } }, { # contentDetail 'url': 'https://www.hotstar.com/movies/radha-gopalam/1000057157', 'only_matching': True, }, { 'url': 'http://www.hotstar.com/sports/cricket/rajitha-sizzles-on-debut-with-329/2001477583', 'only_matching': True, }, { 'url': 'http://www.hotstar.com/1000000515', 'only_matching': True, }, { # only available via api v2 'url': 'https://www.hotstar.com/tv/ek-bhram-sarvagun-sampanna/s-2116/janhvi-targets-suman/1000234847', 'only_matching': True, }, { 'url': 'https://www.hotstar.com/in/tv/start-music/1260005217/cooks-vs-comalis/1100039717', 'only_matching': True, }] _GEO_BYPASS = False _DEVICE_ID = None _USER_TOKEN = None def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) app_state = self._parse_json(self._search_regex( r'<script>window\.APP_STATE\s*=\s*({.+?})</script>', webpage, 'app state'), video_id) video_data = {} getters = list( lambda x, k=k: x['initialState']['content%s' % k]['content'] for k in ('Data', 'Detail') ) for v in app_state.values(): content = try_get(v, getters, dict) if content and content.get('contentId') == video_id: video_data = content break title = video_data['title'] if video_data.get('drmProtected'): raise ExtractorError('This video is DRM protected.', expected=True) headers = {'Referer': url} formats = [] geo_restricted = False if not self._USER_TOKEN: self._DEVICE_ID = compat_str(uuid.uuid4()) self._USER_TOKEN = self._call_api_v2('um/v3/users', video_id, { 'X-HS-Platform': 'PCTV', 'Content-Type': 'application/json', }, data=json.dumps({ 'device_ids': [{ 'id': self._DEVICE_ID, 'type': 'device_id', }], }).encode())['user_identity'] playback_sets = self._call_api_v2( 'play/v2/playback/content/' + video_id, video_id, { 'X-HS-Platform': 'web', 'X-HS-AppVersion': '6.99.1', 'X-HS-UserToken': self._USER_TOKEN, }, query={ 'device-id': self._DEVICE_ID, 'desired-config': 'encryption:plain', 'os-name': 'Windows', 'os-version': '10', })['data']['playBackSets'] for playback_set in playback_sets: if not isinstance(playback_set, dict): continue format_url = url_or_none(playback_set.get('playbackUrl')) if not format_url: continue format_url = re.sub( r'(?<=//staragvod)(\d)', r'web\1', format_url) tags = str_or_none(playback_set.get('tagsCombination')) or '' if tags and 'encryption:plain' not in tags: continue ext = determine_ext(format_url) try: if 'package:hls' in tags or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', headers=headers)) elif 'package:dash' in tags or ext == 'mpd': formats.extend(self._extract_mpd_formats( format_url, video_id, mpd_id='dash', headers=headers)) elif ext == 'f4m': # produce broken files pass else: formats.append({ 'url': format_url, 'width': int_or_none(playback_set.get('width')), 'height': int_or_none(playback_set.get('height')), }) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: geo_restricted = True continue if not formats and geo_restricted: self.raise_geo_restricted(countries=['IN']) self._sort_formats(formats) for f in formats: f.setdefault('http_headers', {}).update(headers) image = try_get(video_data, lambda x: x['image']['h'], compat_str) return { 'id': video_id, 'title': title, 'thumbnail': 'https://img1.hotstarext.com/image/upload/' + image if image else None, 'description': video_data.get('description'), 'duration': int_or_none(video_data.get('duration')), 'timestamp': int_or_none(video_data.get('broadcastDate') or video_data.get('startDate')), 'formats': formats, 'channel': video_data.get('channelName'), 'channel_id': str_or_none(video_data.get('channelId')), 'series': video_data.get('showName'), 'season': video_data.get('seasonName'), 'season_number': int_or_none(video_data.get('seasonNo')), 'season_id': str_or_none(video_data.get('seasonId')), 'episode': title, 'episode_number': int_or_none(video_data.get('episodeNo')), } class HotStarPlaylistIE(HotStarBaseIE): IE_NAME = 'hotstar:playlist' _VALID_URL = r'https?://(?:www\.)?hotstar\.com/(?:[a-z]{2}/)?tv/[^/]+/s-\w+/list/[^/]+/t-(?P<id>\w+)' _TESTS = [{ 'url': 'https://www.hotstar.com/tv/savdhaan-india/s-26/list/popular-clips/t-3_2_26', 'info_dict': { 'id': '3_2_26', }, 'playlist_mincount': 20, }, { 'url': 'https://www.hotstar.com/tv/savdhaan-india/s-26/list/extras/t-2480', 'only_matching': True, }, { 'url': 'https://www.hotstar.com/us/tv/masterchef-india/s-830/list/episodes/t-1_2_830', 'only_matching': True, }] def _real_extract(self, url): playlist_id = self._match_id(url) collection = self._call_api('o/v1/tray/find', playlist_id, 'uqId') entries = [ self.url_result( 'https://www.hotstar.com/%s' % video['contentId'], ie=HotStarIE.ie_key(), video_id=video['contentId']) for video in collection['assets']['items'] if video.get('contentId')] return self.playlist_result(entries, playlist_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tudou.py
youtube_dl/extractor/tudou.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class TudouPlaylistIE(InfoExtractor): IE_NAME = 'tudou:playlist' _VALID_URL = r'https?://(?:www\.)?tudou\.com/listplay/(?P<id>[\w-]{11})\.html' _TESTS = [{ 'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo.html', 'info_dict': { 'id': 'zzdE77v6Mmo', }, 'playlist_mincount': 209, }] def _real_extract(self, url): playlist_id = self._match_id(url) playlist_data = self._download_json( 'http://www.tudou.com/tvp/plist.action?lcode=%s' % playlist_id, playlist_id) entries = [self.url_result( 'http://www.tudou.com/programs/view/%s' % item['icode'], 'Tudou', item['icode'], item['kw']) for item in playlist_data['items']] return self.playlist_result(entries, playlist_id) class TudouAlbumIE(InfoExtractor): IE_NAME = 'tudou:album' _VALID_URL = r'https?://(?:www\.)?tudou\.com/album(?:cover|play)/(?P<id>[\w-]{11})' _TESTS = [{ 'url': 'http://www.tudou.com/albumplay/v5qckFJvNJg.html', 'info_dict': { 'id': 'v5qckFJvNJg', }, 'playlist_mincount': 45, }] def _real_extract(self, url): album_id = self._match_id(url) album_data = self._download_json( 'http://www.tudou.com/tvp/alist.action?acode=%s' % album_id, album_id) entries = [self.url_result( 'http://www.tudou.com/programs/view/%s' % item['icode'], 'Tudou', item['icode'], item['kw']) for item in album_data['items']] return self.playlist_result(entries, album_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/dvtv.py
youtube_dl/extractor/dvtv.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, ExtractorError, int_or_none, js_to_json, mimetype2ext, try_get, unescapeHTML, parse_iso8601, ) class DVTVIE(InfoExtractor): IE_NAME = 'dvtv' IE_DESC = 'http://video.aktualne.cz/' _VALID_URL = r'https?://video\.aktualne\.cz/(?:[^/]+/)+r~(?P<id>[0-9a-f]{32})' _TESTS = [{ 'url': 'http://video.aktualne.cz/dvtv/vondra-o-ceskem-stoleti-pri-pohledu-na-havla-mi-bylo-trapne/r~e5efe9ca855511e4833a0025900fea04/', 'md5': '67cb83e4a955d36e1b5d31993134a0c2', 'info_dict': { 'id': 'dc0768de855511e49e4b0025900fea04', 'ext': 'mp4', 'title': 'Vondra o Českém století: Při pohledu na Havla mi bylo trapně', 'duration': 1484, 'upload_date': '20141217', 'timestamp': 1418792400, } }, { 'url': 'http://video.aktualne.cz/dvtv/dvtv-16-12-2014-utok-talibanu-boj-o-kliniku-uprchlici/r~973eb3bc854e11e498be002590604f2e/', 'info_dict': { 'title': r'DVTV 16. 12. 2014: útok Talibanu, boj o kliniku, uprchlíci', 'id': '973eb3bc854e11e498be002590604f2e', }, 'playlist': [{ 'md5': 'da7ca6be4935532241fa9520b3ad91e4', 'info_dict': { 'id': 'b0b40906854d11e4bdad0025900fea04', 'ext': 'mp4', 'title': 'Drtinová Veselovský TV 16. 12. 2014: Témata dne', 'description': 'md5:0916925dea8e30fe84222582280b47a0', 'timestamp': 1418760010, 'upload_date': '20141216', } }, { 'md5': '5f7652a08b05009c1292317b449ffea2', 'info_dict': { 'id': '420ad9ec854a11e4bdad0025900fea04', 'ext': 'mp4', 'title': 'Školní masakr možná změní boj s Talibanem, říká novinářka', 'description': 'md5:ff2f9f6de73c73d7cef4f756c1c1af42', 'timestamp': 1418760010, 'upload_date': '20141216', } }, { 'md5': '498eb9dfa97169f409126c617e2a3d64', 'info_dict': { 'id': '95d35580846a11e4b6d20025900fea04', 'ext': 'mp4', 'title': 'Boj o kliniku: Veřejný zájem, nebo právo na majetek?', 'description': 'md5:889fe610a70fee5511dc3326a089188e', 'timestamp': 1418760010, 'upload_date': '20141216', } }, { 'md5': 'b8dc6b744844032dab6ba3781a7274b9', 'info_dict': { 'id': '6fe14d66853511e4833a0025900fea04', 'ext': 'mp4', 'title': 'Pánek: Odmítání syrských uprchlíků je ostudou české vlády', 'description': 'md5:544f86de6d20c4815bea11bf2ac3004f', 'timestamp': 1418760010, 'upload_date': '20141216', } }], }, { 'url': 'https://video.aktualne.cz/dvtv/zeman-si-jen-leci-mindraky-sobotku-nenavidi-a-babis-se-mu-te/r~960cdb3a365a11e7a83b0025900fea04/', 'md5': 'f8efe9656017da948369aa099788c8ea', 'info_dict': { 'id': '3c496fec365911e7a6500025900fea04', 'ext': 'mp4', 'title': 'Zeman si jen léčí mindráky, Sobotku nenávidí a Babiš se mu teď hodí, tvrdí Kmenta', 'duration': 1103, 'upload_date': '20170511', 'timestamp': 1494514200, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://video.aktualne.cz/v-cechach-poprve-zazni-zelenkova-zrestaurovana-mse/r~45b4b00483ec11e4883b002590604f2e/', 'only_matching': True, }, { # Test live stream video (liveStarter) parsing 'url': 'https://video.aktualne.cz/dvtv/zive-mistryne-sveta-eva-samkova-po-navratu-ze-sampionatu/r~182654c2288811e990fd0cc47ab5f122/', 'md5': '2e552e483f2414851ca50467054f9d5d', 'info_dict': { 'id': '8d116360288011e98c840cc47ab5f122', 'ext': 'mp4', 'title': 'Živě: Mistryně světa Eva Samková po návratu ze šampionátu', 'upload_date': '20190204', 'timestamp': 1549289591, }, 'params': { # Video content is no longer available 'skip_download': True, }, }] def _parse_video_metadata(self, js, video_id, timestamp): data = self._parse_json(js, video_id, transform_source=js_to_json) title = unescapeHTML(data['title']) live_starter = try_get(data, lambda x: x['plugins']['liveStarter'], dict) if live_starter: data.update(live_starter) formats = [] for tracks in data.get('tracks', {}).values(): for video in tracks: video_url = video.get('src') if not video_url: continue video_type = video.get('type') ext = determine_ext(video_url, mimetype2ext(video_type)) if video_type == 'application/vnd.apple.mpegurl' or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif video_type == 'application/dash+xml' or ext == 'mpd': formats.extend(self._extract_mpd_formats( video_url, video_id, mpd_id='dash', fatal=False)) else: label = video.get('label') height = self._search_regex( r'^(\d+)[pP]', label or '', 'height', default=None) format_id = ['http'] for f in (ext, label): if f: format_id.append(f) formats.append({ 'url': video_url, 'format_id': '-'.join(format_id), 'height': int_or_none(height), }) self._sort_formats(formats) return { 'id': data.get('mediaid') or video_id, 'title': title, 'description': data.get('description'), 'thumbnail': data.get('image'), 'duration': int_or_none(data.get('duration')), 'timestamp': int_or_none(timestamp), 'formats': formats } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) timestamp = parse_iso8601(self._html_search_meta( 'article:published_time', webpage, 'published time', default=None)) items = re.findall(r'(?s)playlist\.push\(({.+?})\);', webpage) if items: return self.playlist_result( [self._parse_video_metadata(i, video_id, timestamp) for i in items], video_id, self._html_search_meta('twitter:title', webpage)) item = self._search_regex( r'(?s)BBXPlayer\.setup\((.+?)\);', webpage, 'video', default=None) if item: # remove function calls (ex. htmldeentitize) # TODO this should be fixed in a general way in the js_to_json item = re.sub(r'\w+?\((.+)\)', r'\1', item) return self._parse_video_metadata(item, video_id, timestamp) raise ExtractorError('Could not find neither video nor playlist')
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/livejournal.py
youtube_dl/extractor/livejournal.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import int_or_none class LiveJournalIE(InfoExtractor): _VALID_URL = r'https?://(?:[^.]+\.)?livejournal\.com/video/album/\d+.+?\bid=(?P<id>\d+)' _TEST = { 'url': 'https://andrei-bt.livejournal.com/video/album/407/?mode=view&id=51272', 'md5': 'adaf018388572ced8a6f301ace49d4b2', 'info_dict': { 'id': '1263729', 'ext': 'mp4', 'title': 'Истребители против БПЛА', 'upload_date': '20190624', 'timestamp': 1561406715, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) record = self._parse_json(self._search_regex( r'Site\.page\s*=\s*({.+?});', webpage, 'page data'), video_id)['video']['record'] storage_id = compat_str(record['storageid']) title = record.get('name') if title: # remove filename extension(.mp4, .mov, etc...) title = title.rsplit('.', 1)[0] return { '_type': 'url_transparent', 'id': video_id, 'title': title, 'thumbnail': record.get('thumbnail'), 'timestamp': int_or_none(record.get('timecreate')), 'url': 'eagleplatform:vc.videos.livejournal.com:' + storage_id, 'ie_key': 'EaglePlatform', }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/gaskrank.py
youtube_dl/extractor/gaskrank.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, unified_strdate, ) class GaskrankIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?gaskrank\.tv/tv/(?P<categories>[^/]+)/(?P<id>[^/]+)\.htm' _TESTS = [{ 'url': 'http://www.gaskrank.tv/tv/motorrad-fun/strike-einparken-durch-anfaenger-crash-mit-groesserem-flurschaden.htm', 'md5': '1ae88dbac97887d85ebd1157a95fc4f9', 'info_dict': { 'id': '201601/26955', 'ext': 'mp4', 'title': 'Strike! Einparken können nur Männer - Flurschaden hält sich in Grenzen *lol*', 'thumbnail': r're:^https?://.*\.jpg$', 'categories': ['motorrad-fun'], 'display_id': 'strike-einparken-durch-anfaenger-crash-mit-groesserem-flurschaden', 'uploader_id': 'Bikefun', 'upload_date': '20170110', 'uploader_url': None, } }, { 'url': 'http://www.gaskrank.tv/tv/racing/isle-of-man-tt-2011-michael-du-15920.htm', 'md5': 'c33ee32c711bc6c8224bfcbe62b23095', 'info_dict': { 'id': '201106/15920', 'ext': 'mp4', 'title': 'Isle of Man - Michael Dunlop vs Guy Martin - schwindelig kucken', 'thumbnail': r're:^https?://.*\.jpg$', 'categories': ['racing'], 'display_id': 'isle-of-man-tt-2011-michael-du-15920', 'uploader_id': 'IOM', 'upload_date': '20170523', 'uploader_url': 'www.iomtt.com', } }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = self._og_search_title( webpage, default=None) or self._html_search_meta( 'title', webpage, fatal=True) categories = [re.match(self._VALID_URL, url).group('categories')] mobj = re.search( r'Video von:\s*(?P<uploader_id>[^|]*?)\s*\|\s*vom:\s*(?P<upload_date>[0-9][0-9]\.[0-9][0-9]\.[0-9][0-9][0-9][0-9])', webpage) if mobj is not None: uploader_id = mobj.groupdict().get('uploader_id') upload_date = unified_strdate(mobj.groupdict().get('upload_date')) uploader_url = self._search_regex( r'Homepage:\s*<[^>]*>(?P<uploader_url>[^<]*)', webpage, 'uploader_url', default=None) tags = re.findall( r'/tv/tags/[^/]+/"\s*>(?P<tag>[^<]*?)<', webpage) view_count = self._search_regex( r'class\s*=\s*"gkRight"(?:[^>]*>\s*<[^>]*)*icon-eye-open(?:[^>]*>\s*<[^>]*)*>\s*(?P<view_count>[0-9\.]*)', webpage, 'view_count', default=None) if view_count: view_count = int_or_none(view_count.replace('.', '')) average_rating = self._search_regex( r'itemprop\s*=\s*"ratingValue"[^>]*>\s*(?P<average_rating>[0-9,]+)', webpage, 'average_rating') if average_rating: average_rating = float_or_none(average_rating.replace(',', '.')) video_id = self._search_regex( r'https?://movies\.gaskrank\.tv/([^-]*?)(-[^\.]*)?\.mp4', webpage, 'video id', default=display_id) entry = self._parse_html5_media_entries(url, webpage, video_id)[0] entry.update({ 'id': video_id, 'title': title, 'categories': categories, 'display_id': display_id, 'uploader_id': uploader_id, 'upload_date': upload_date, 'uploader_url': uploader_url, 'tags': tags, 'view_count': view_count, 'average_rating': average_rating, }) self._sort_formats(entry['formats']) return entry
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/playstuff.py
youtube_dl/extractor/playstuff.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( smuggle_url, try_get, ) class PlayStuffIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?play\.stuff\.co\.nz/details/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://play.stuff.co.nz/details/608778ac1de1c4001a3fa09a', 'md5': 'c82d3669e5247c64bc382577843e5bd0', 'info_dict': { 'id': '6250584958001', 'ext': 'mp4', 'title': 'Episode 1: Rotorua/Mt Maunganui/Tauranga', 'description': 'md5:c154bafb9f0dd02d01fd4100fb1c1913', 'uploader_id': '6005208634001', 'timestamp': 1619491027, 'upload_date': '20210427', }, 'add_ie': ['BrightcoveNew'], }, { # geo restricted, bypassable 'url': 'https://play.stuff.co.nz/details/_6155660351001', 'only_matching': True, }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s' def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) state = self._parse_json( self._search_regex( r'__INITIAL_STATE__\s*=\s*({.+?})\s*;', webpage, 'state'), video_id) account_id = try_get( state, lambda x: x['configurations']['accountId'], compat_str) or '6005208634001' player_id = try_get( state, lambda x: x['configurations']['playerId'], compat_str) or 'default' entries = [] for item_id, video in state['items'].items(): if not isinstance(video, dict): continue asset_id = try_get( video, lambda x: x['content']['attributes']['assetId'], compat_str) if not asset_id: continue entries.append(self.url_result( smuggle_url( self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, asset_id), {'geo_countries': ['NZ']}), 'BrightcoveNew', video_id)) return self.playlist_result(entries, video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ustream.py
youtube_dl/extractor/ustream.py
from __future__ import unicode_literals import random import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( encode_data_uri, ExtractorError, int_or_none, float_or_none, mimetype2ext, str_or_none, ) class UstreamIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:ustream\.tv|video\.ibm\.com)/(?P<type>recorded|embed|embed/recorded)/(?P<id>\d+)' IE_NAME = 'ustream' _TESTS = [{ 'url': 'http://www.ustream.tv/recorded/20274954', 'md5': '088f151799e8f572f84eb62f17d73e5c', 'info_dict': { 'id': '20274954', 'ext': 'flv', 'title': 'Young Americans for Liberty February 7, 2012 2:28 AM', 'description': 'Young Americans for Liberty February 7, 2012 2:28 AM', 'timestamp': 1328577035, 'upload_date': '20120207', 'uploader': 'yaliberty', 'uploader_id': '6780869', }, }, { # From http://sportscanada.tv/canadagames/index.php/week2/figure-skating/444 # Title and uploader available only from params JSON 'url': 'http://www.ustream.tv/embed/recorded/59307601?ub=ff0000&lc=ff0000&oc=ffffff&uc=ffffff&v=3&wmode=direct', 'md5': '5a2abf40babeac9812ed20ae12d34e10', 'info_dict': { 'id': '59307601', 'ext': 'flv', 'title': '-CG11- Canada Games Figure Skating', 'uploader': 'sportscanadatv', }, 'skip': 'This Pro Broadcaster has chosen to remove this video from the ustream.tv site.', }, { 'url': 'http://www.ustream.tv/embed/10299409', 'info_dict': { 'id': '10299409', }, 'playlist_count': 3, }, { 'url': 'http://www.ustream.tv/recorded/91343263', 'info_dict': { 'id': '91343263', 'ext': 'mp4', 'title': 'GitHub Universe - General Session - Day 1', 'upload_date': '20160914', 'description': 'GitHub Universe - General Session - Day 1', 'timestamp': 1473872730, 'uploader': 'wa0dnskeqkr', 'uploader_id': '38977840', }, 'params': { 'skip_download': True, # m3u8 download }, }, { 'url': 'https://video.ibm.com/embed/recorded/128240221?&autoplay=true&controls=true&volume=100', 'only_matching': True, }] @staticmethod def _extract_url(webpage): mobj = re.search( r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?(?:ustream\.tv|video\.ibm\.com)/embed/.+?)\1', webpage) if mobj is not None: return mobj.group('url') def _get_stream_info(self, url, video_id, app_id_ver, extra_note=None): def num_to_hex(n): return hex(n)[2:] rnd = random.randrange if not extra_note: extra_note = '' conn_info = self._download_json( 'http://r%d-1-%s-recorded-lp-live.ums.ustream.tv/1/ustream' % (rnd(1e8), video_id), video_id, note='Downloading connection info' + extra_note, query={ 'type': 'viewer', 'appId': app_id_ver[0], 'appVersion': app_id_ver[1], 'rsid': '%s:%s' % (num_to_hex(rnd(1e8)), num_to_hex(rnd(1e8))), 'rpin': '_rpin.%d' % rnd(1e15), 'referrer': url, 'media': video_id, 'application': 'recorded', }) host = conn_info[0]['args'][0]['host'] connection_id = conn_info[0]['args'][0]['connectionId'] return self._download_json( 'http://%s/1/ustream?connectionId=%s' % (host, connection_id), video_id, note='Downloading stream info' + extra_note) def _get_streams(self, url, video_id, app_id_ver): # Sometimes the return dict does not have 'stream' for trial_count in range(3): stream_info = self._get_stream_info( url, video_id, app_id_ver, extra_note=' (try %d)' % (trial_count + 1) if trial_count > 0 else '') if 'stream' in stream_info[0]['args'][0]: return stream_info[0]['args'][0]['stream'] return [] def _parse_segmented_mp4(self, dash_stream_info): def resolve_dash_template(template, idx, chunk_hash): return template.replace('%', compat_str(idx), 1).replace('%', chunk_hash) formats = [] for stream in dash_stream_info['streams']: # Use only one provider to avoid too many formats provider = dash_stream_info['providers'][0] fragments = [{ 'url': resolve_dash_template( provider['url'] + stream['initUrl'], 0, dash_stream_info['hashes']['0']) }] for idx in range(dash_stream_info['videoLength'] // dash_stream_info['chunkTime']): fragments.append({ 'url': resolve_dash_template( provider['url'] + stream['segmentUrl'], idx, dash_stream_info['hashes'][compat_str(idx // 10 * 10)]) }) content_type = stream['contentType'] kind = content_type.split('/')[0] f = { 'format_id': '-'.join(filter(None, [ 'dash', kind, str_or_none(stream.get('bitrate'))])), 'protocol': 'http_dash_segments', # TODO: generate a MPD doc for external players? 'url': encode_data_uri(b'<MPD/>', 'text/xml'), 'ext': mimetype2ext(content_type), 'height': stream.get('height'), 'width': stream.get('width'), 'fragments': fragments, } if kind == 'video': f.update({ 'vcodec': stream.get('codec'), 'acodec': 'none', 'vbr': stream.get('bitrate'), }) else: f.update({ 'vcodec': 'none', 'acodec': stream.get('codec'), 'abr': stream.get('bitrate'), }) formats.append(f) return formats def _real_extract(self, url): m = re.match(self._VALID_URL, url) video_id = m.group('id') # some sites use this embed format (see: https://github.com/ytdl-org/youtube-dl/issues/2990) if m.group('type') == 'embed/recorded': video_id = m.group('id') desktop_url = 'http://www.ustream.tv/recorded/' + video_id return self.url_result(desktop_url, 'Ustream') if m.group('type') == 'embed': video_id = m.group('id') webpage = self._download_webpage(url, video_id) content_video_ids = self._parse_json(self._search_regex( r'ustream\.vars\.offAirContentVideoIds=([^;]+);', webpage, 'content video IDs'), video_id) return self.playlist_result( map(lambda u: self.url_result('http://www.ustream.tv/recorded/' + u, 'Ustream'), content_video_ids), video_id) params = self._download_json( 'https://api.ustream.tv/videos/%s.json' % video_id, video_id) error = params.get('error') if error: raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, error), expected=True) video = params['video'] title = video['title'] filesize = float_or_none(video.get('file_size')) formats = [{ 'id': video_id, 'url': video_url, 'ext': format_id, 'filesize': filesize, } for format_id, video_url in video['media_urls'].items() if video_url] if not formats: hls_streams = self._get_streams(url, video_id, app_id_ver=(11, 2)) if hls_streams: # m3u8_native leads to intermittent ContentTooShortError formats.extend(self._extract_m3u8_formats( hls_streams[0]['url'], video_id, ext='mp4', m3u8_id='hls')) ''' # DASH streams handling is incomplete as 'url' is missing dash_streams = self._get_streams(url, video_id, app_id_ver=(3, 1)) if dash_streams: formats.extend(self._parse_segmented_mp4(dash_streams)) ''' self._sort_formats(formats) description = video.get('description') timestamp = int_or_none(video.get('created_at')) duration = float_or_none(video.get('length')) view_count = int_or_none(video.get('views')) uploader = video.get('owner', {}).get('username') uploader_id = video.get('owner', {}).get('id') thumbnails = [{ 'id': thumbnail_id, 'url': thumbnail_url, } for thumbnail_id, thumbnail_url in video.get('thumbnail', {}).items()] return { 'id': video_id, 'title': title, 'description': description, 'thumbnails': thumbnails, 'timestamp': timestamp, 'duration': duration, 'view_count': view_count, 'uploader': uploader, 'uploader_id': uploader_id, 'formats': formats, } class UstreamChannelIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ustream\.tv/channel/(?P<slug>.+)' IE_NAME = 'ustream:channel' _TEST = { 'url': 'http://www.ustream.tv/channel/channeljapan', 'info_dict': { 'id': '10874166', }, 'playlist_mincount': 17, } def _real_extract(self, url): m = re.match(self._VALID_URL, url) display_id = m.group('slug') webpage = self._download_webpage(url, display_id) channel_id = self._html_search_meta('ustream:channel_id', webpage) BASE = 'http://www.ustream.tv' next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id video_ids = [] while next_url: reply = self._download_json( compat_urlparse.urljoin(BASE, next_url), display_id, note='Downloading video information (next: %d)' % (len(video_ids) + 1)) video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data'])) next_url = reply['nextUrl'] entries = [ self.url_result('http://www.ustream.tv/recorded/' + vid, 'Ustream') for vid in video_ids] return { '_type': 'playlist', 'id': channel_id, 'display_id': display_id, 'entries': entries, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/funimation.py
youtube_dl/extractor/funimation.py
# coding: utf-8 from __future__ import unicode_literals import random import string from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( determine_ext, int_or_none, js_to_json, ExtractorError, urlencode_postdata ) class FunimationIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?funimation(?:\.com|now\.uk)/(?:[^/]+/)?shows/[^/]+/(?P<id>[^/?#&]+)' _NETRC_MACHINE = 'funimation' _TOKEN = None _TESTS = [{ 'url': 'https://www.funimation.com/shows/hacksign/role-play/', 'info_dict': { 'id': '91144', 'display_id': 'role-play', 'ext': 'mp4', 'title': '.hack//SIGN - Role Play', 'description': 'md5:b602bdc15eef4c9bbb201bb6e6a4a2dd', 'thumbnail': r're:https?://.*\.jpg', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://www.funimation.com/shows/attack-on-titan-junior-high/broadcast-dub-preview/', 'info_dict': { 'id': '210051', 'display_id': 'broadcast-dub-preview', 'ext': 'mp4', 'title': 'Attack on Titan: Junior High - Broadcast Dub Preview', 'thumbnail': r're:https?://.*\.(?:jpg|png)', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://www.funimationnow.uk/shows/puzzle-dragons-x/drop-impact/simulcast/', 'only_matching': True, }, { # with lang code 'url': 'https://www.funimation.com/en/shows/hacksign/role-play/', 'only_matching': True, }] def _login(self): username, password = self._get_login_info() if username is None: return try: data = self._download_json( 'https://prod-api-funimationnow.dadcdigital.com/api/auth/login/', None, 'Logging in', data=urlencode_postdata({ 'username': username, 'password': password, })) self._TOKEN = data['token'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: error = self._parse_json(e.cause.read().decode(), None)['error'] raise ExtractorError(error, expected=True) raise def _real_initialize(self): self._login() def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) def _search_kane(name): return self._search_regex( r"KANE_customdimensions\.%s\s*=\s*'([^']+)';" % name, webpage, name, default=None) title_data = self._parse_json(self._search_regex( r'TITLE_DATA\s*=\s*({[^}]+})', webpage, 'title data', default=''), display_id, js_to_json, fatal=False) or {} video_id = title_data.get('id') or self._search_regex([ r"KANE_customdimensions.videoID\s*=\s*'(\d+)';", r'<iframe[^>]+src="/player/(\d+)', ], webpage, 'video_id', default=None) if not video_id: player_url = self._html_search_meta([ 'al:web:url', 'og:video:url', 'og:video:secure_url', ], webpage, fatal=True) video_id = self._search_regex(r'/player/(\d+)', player_url, 'video id') title = episode = title_data.get('title') or _search_kane('videoTitle') or self._og_search_title(webpage) series = _search_kane('showName') if series: title = '%s - %s' % (series, title) description = self._html_search_meta(['description', 'og:description'], webpage, fatal=True) try: headers = {} if self._TOKEN: headers['Authorization'] = 'Token %s' % self._TOKEN sources = self._download_json( 'https://www.funimation.com/api/showexperience/%s/' % video_id, video_id, headers=headers, query={ 'pinst_id': ''.join([random.choice(string.digits + string.ascii_letters) for _ in range(8)]), })['items'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: error = self._parse_json(e.cause.read(), video_id)['errors'][0] raise ExtractorError('%s said: %s' % ( self.IE_NAME, error.get('detail') or error.get('title')), expected=True) raise formats = [] for source in sources: source_url = source.get('src') if not source_url: continue source_type = source.get('videoType') or determine_ext(source_url) if source_type == 'm3u8': formats.extend(self._extract_m3u8_formats( source_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) else: formats.append({ 'format_id': source_type, 'url': source_url, }) self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': self._og_search_thumbnail(webpage), 'series': series, 'season_number': int_or_none(title_data.get('seasonNum') or _search_kane('season')), 'episode_number': int_or_none(title_data.get('episodeNum')), 'episode': episode, 'season_id': title_data.get('seriesId'), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vlive.py
youtube_dl/extractor/vlive.py
# coding: utf-8 from __future__ import unicode_literals import itertools import json from .naver import NaverBaseIE from ..compat import ( compat_HTTPError, compat_str, ) from ..utils import ( ExtractorError, int_or_none, merge_dicts, str_or_none, strip_or_none, try_get, urlencode_postdata, ) class VLiveBaseIE(NaverBaseIE): _APP_ID = '8c6cc7b45d2568fb668be6e05b6e5a3b' class VLiveIE(VLiveBaseIE): IE_NAME = 'vlive' _VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/(?:video|embed)/(?P<id>[0-9]+)' _NETRC_MACHINE = 'vlive' _TESTS = [{ 'url': 'http://www.vlive.tv/video/1326', 'md5': 'cc7314812855ce56de70a06a27314983', 'info_dict': { 'id': '1326', 'ext': 'mp4', 'title': "Girl's Day's Broadcast", 'creator': "Girl's Day", 'view_count': int, 'uploader_id': 'muploader_a', }, }, { 'url': 'http://www.vlive.tv/video/16937', 'info_dict': { 'id': '16937', 'ext': 'mp4', 'title': '첸백시 걍방', 'creator': 'EXO', 'view_count': int, 'subtitles': 'mincount:12', 'uploader_id': 'muploader_j', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.vlive.tv/video/129100', 'md5': 'ca2569453b79d66e5b919e5d308bff6b', 'info_dict': { 'id': '129100', 'ext': 'mp4', 'title': '[V LIVE] [BTS+] Run BTS! 2019 - EP.71 :: Behind the scene', 'creator': 'BTS+', 'view_count': int, 'subtitles': 'mincount:10', }, 'skip': 'This video is only available for CH+ subscribers', }, { 'url': 'https://www.vlive.tv/embed/1326', 'only_matching': True, }, { # works only with gcc=KR 'url': 'https://www.vlive.tv/video/225019', 'only_matching': True, }] def _real_initialize(self): self._login() def _login(self): email, password = self._get_login_info() if None in (email, password): return def is_logged_in(): login_info = self._download_json( 'https://www.vlive.tv/auth/loginInfo', None, note='Downloading login info', headers={'Referer': 'https://www.vlive.tv/home'}) return try_get( login_info, lambda x: x['message']['login'], bool) or False LOGIN_URL = 'https://www.vlive.tv/auth/email/login' self._request_webpage( LOGIN_URL, None, note='Downloading login cookies') self._download_webpage( LOGIN_URL, None, note='Logging in', data=urlencode_postdata({'email': email, 'pwd': password}), headers={ 'Referer': LOGIN_URL, 'Content-Type': 'application/x-www-form-urlencoded' }) if not is_logged_in(): raise ExtractorError('Unable to log in', expected=True) def _call_api(self, path_template, video_id, fields=None): query = {'appId': self._APP_ID, 'gcc': 'KR', 'platformType': 'PC'} if fields: query['fields'] = fields try: return self._download_json( 'https://www.vlive.tv/globalv-web/vam-web/' + path_template % video_id, video_id, 'Downloading %s JSON metadata' % path_template.split('/')[-1].split('-')[0], headers={'Referer': 'https://www.vlive.tv/'}, query=query) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: self.raise_login_required(json.loads(e.cause.read().decode('utf-8'))['message']) raise def _real_extract(self, url): video_id = self._match_id(url) post = self._call_api( 'post/v1.0/officialVideoPost-%s', video_id, 'author{nickname},channel{channelCode,channelName},officialVideo{commentCount,exposeStatus,likeCount,playCount,playTime,status,title,type,vodId}') video = post['officialVideo'] def get_common_fields(): channel = post.get('channel') or {} return { 'title': video.get('title'), 'creator': post.get('author', {}).get('nickname'), 'channel': channel.get('channelName'), 'channel_id': channel.get('channelCode'), 'duration': int_or_none(video.get('playTime')), 'view_count': int_or_none(video.get('playCount')), 'like_count': int_or_none(video.get('likeCount')), 'comment_count': int_or_none(video.get('commentCount')), } video_type = video.get('type') if video_type == 'VOD': inkey = self._call_api('video/v1.0/vod/%s/inkey', video_id)['inkey'] vod_id = video['vodId'] return merge_dicts( get_common_fields(), self._extract_video_info(video_id, vod_id, inkey)) elif video_type == 'LIVE': status = video.get('status') if status == 'ON_AIR': stream_url = self._call_api( 'old/v3/live/%s/playInfo', video_id)['result']['adaptiveStreamUrl'] formats = self._extract_m3u8_formats(stream_url, video_id, 'mp4') self._sort_formats(formats) info = get_common_fields() info.update({ 'title': self._live_title(video['title']), 'id': video_id, 'formats': formats, 'is_live': True, }) return info elif status == 'ENDED': raise ExtractorError( 'Uploading for replay. Please wait...', expected=True) elif status == 'RESERVED': raise ExtractorError('Coming soon!', expected=True) elif video.get('exposeStatus') == 'CANCEL': raise ExtractorError( 'We are sorry, but the live broadcast has been canceled.', expected=True) else: raise ExtractorError('Unknown status ' + status) class VLivePostIE(VLiveIE): IE_NAME = 'vlive:post' _VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/post/(?P<id>\d-\d+)' _TESTS = [{ # uploadType = SOS 'url': 'https://www.vlive.tv/post/1-20088044', 'info_dict': { 'id': '1-20088044', 'title': 'Hola estrellitas la tierra les dice hola (si era así no?) Ha...', 'description': 'md5:fab8a1e50e6e51608907f46c7fa4b407', }, 'playlist_count': 3, }, { # uploadType = V 'url': 'https://www.vlive.tv/post/1-20087926', 'info_dict': { 'id': '1-20087926', 'title': 'James Corden: And so, the baby becamos the Papa💜😭💪😭', }, 'playlist_count': 1, }] _FVIDEO_TMPL = 'fvideo/v1.0/fvideo-%%s/%s' _SOS_TMPL = _FVIDEO_TMPL % 'sosPlayInfo' _INKEY_TMPL = _FVIDEO_TMPL % 'inKey' def _real_extract(self, url): post_id = self._match_id(url) post = self._call_api( 'post/v1.0/post-%s', post_id, 'attachments{video},officialVideo{videoSeq},plainBody,title') video_seq = str_or_none(try_get( post, lambda x: x['officialVideo']['videoSeq'])) if video_seq: return self.url_result( 'http://www.vlive.tv/video/' + video_seq, VLiveIE.ie_key(), video_seq) title = post['title'] entries = [] for idx, video in enumerate(post['attachments']['video'].values()): video_id = video.get('videoId') if not video_id: continue upload_type = video.get('uploadType') upload_info = video.get('uploadInfo') or {} entry = None if upload_type == 'SOS': download = self._call_api( self._SOS_TMPL, video_id)['videoUrl']['download'] formats = [] for f_id, f_url in download.items(): formats.append({ 'format_id': f_id, 'url': f_url, 'height': int_or_none(f_id[:-1]), }) self._sort_formats(formats) entry = { 'formats': formats, 'id': video_id, 'thumbnail': upload_info.get('imageUrl'), } elif upload_type == 'V': vod_id = upload_info.get('videoId') if not vod_id: continue inkey = self._call_api(self._INKEY_TMPL, video_id)['inKey'] entry = self._extract_video_info(video_id, vod_id, inkey) if entry: entry['title'] = '%s_part%s' % (title, idx) entries.append(entry) return self.playlist_result( entries, post_id, title, strip_or_none(post.get('plainBody'))) class VLiveChannelIE(VLiveBaseIE): IE_NAME = 'vlive:channel' _VALID_URL = r'https?://(?:channels\.vlive\.tv|(?:(?:www|m)\.)?vlive\.tv/channel)/(?P<id>[0-9A-Z]+)' _TESTS = [{ 'url': 'http://channels.vlive.tv/FCD4B', 'info_dict': { 'id': 'FCD4B', 'title': 'MAMAMOO', }, 'playlist_mincount': 110 }, { 'url': 'https://www.vlive.tv/channel/FCD4B', 'only_matching': True, }] def _call_api(self, path, channel_key_suffix, channel_value, note, query): q = { 'app_id': self._APP_ID, 'channel' + channel_key_suffix: channel_value, } q.update(query) return self._download_json( 'http://api.vfan.vlive.tv/vproxy/channelplus/' + path, channel_value, note='Downloading ' + note, query=q)['result'] def _real_extract(self, url): channel_code = self._match_id(url) channel_seq = self._call_api( 'decodeChannelCode', 'Code', channel_code, 'decode channel code', {})['channelSeq'] channel_name = None entries = [] for page_num in itertools.count(1): video_list = self._call_api( 'getChannelVideoList', 'Seq', channel_seq, 'channel list page #%d' % page_num, { # Large values of maxNumOfRows (~300 or above) may cause # empty responses (see [1]), e.g. this happens for [2] that # has more than 300 videos. # 1. https://github.com/ytdl-org/youtube-dl/issues/13830 # 2. http://channels.vlive.tv/EDBF. 'maxNumOfRows': 100, 'pageNo': page_num } ) if not channel_name: channel_name = try_get( video_list, lambda x: x['channelInfo']['channelName'], compat_str) videos = try_get( video_list, lambda x: x['videoList'], list) if not videos: break for video in videos: video_id = video.get('videoSeq') if not video_id: continue video_id = compat_str(video_id) entries.append( self.url_result( 'http://www.vlive.tv/video/%s' % video_id, ie=VLiveIE.ie_key(), video_id=video_id)) return self.playlist_result( entries, channel_code, channel_name)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/howstuffworks.py
youtube_dl/extractor/howstuffworks.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( find_xpath_attr, int_or_none, js_to_json, unescapeHTML, determine_ext, ) class HowStuffWorksIE(InfoExtractor): _VALID_URL = r'https?://[\da-z-]+\.(?:howstuffworks|stuff(?:(?:youshould|theydontwantyouto)know|toblowyourmind|momnevertoldyou)|(?:brain|car)stuffshow|fwthinking|geniusstuff)\.com/(?:[^/]+/)*(?:\d+-)?(?P<id>.+?)-video\.htm' _TESTS = [ { 'url': 'http://www.stufftoblowyourmind.com/videos/optical-illusions-video.htm', 'md5': '76646a5acc0c92bf7cd66751ca5db94d', 'info_dict': { 'id': '855410', 'ext': 'mp4', 'title': 'Your Trickster Brain: Optical Illusions -- Science on the Web', 'description': 'md5:e374ff9561f6833ad076a8cc0a5ab2fb', }, }, { 'url': 'http://shows.howstuffworks.com/more-shows/why-does-balloon-stick-to-hair-video.htm', 'only_matching': True, } ] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) clip_js = self._search_regex( r'(?s)var clip = ({.*?});', webpage, 'clip info') clip_info = self._parse_json( clip_js, display_id, transform_source=js_to_json) video_id = clip_info['content_id'] formats = [] m3u8_url = clip_info.get('m3u8') if m3u8_url and determine_ext(m3u8_url) == 'm3u8': formats.extend(self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', format_id='hls', fatal=True)) flv_url = clip_info.get('flv_url') if flv_url: formats.append({ 'url': flv_url, 'format_id': 'flv', }) for video in clip_info.get('mp4', []): formats.append({ 'url': video['src'], 'format_id': 'mp4-%s' % video['bitrate'], 'vbr': int_or_none(video['bitrate'].rstrip('k')), }) if not formats: smil = self._download_xml( 'http://services.media.howstuffworks.com/videos/%s/smil-service.smil' % video_id, video_id, 'Downloading video SMIL') http_base = find_xpath_attr( smil, './{0}head/{0}meta'.format('{http://www.w3.org/2001/SMIL20/Language}'), 'name', 'httpBase').get('content') URL_SUFFIX = '?v=2.11.3&fp=LNX 11,2,202,356&r=A&g=A' for video in smil.findall( './{0}body/{0}switch/{0}video'.format('{http://www.w3.org/2001/SMIL20/Language}')): vbr = int_or_none(video.attrib['system-bitrate'], scale=1000) formats.append({ 'url': '%s/%s%s' % (http_base, video.attrib['src'], URL_SUFFIX), 'format_id': '%dk' % vbr, 'vbr': vbr, }) self._sort_formats(formats) return { 'id': '%s' % video_id, 'display_id': display_id, 'title': unescapeHTML(clip_info['clip_title']), 'description': unescapeHTML(clip_info.get('caption')), 'thumbnail': clip_info.get('video_still_url'), 'duration': int_or_none(clip_info.get('duration')), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/extractors.py
youtube_dl/extractor/extractors.py
# flake8: noqa from __future__ import unicode_literals from .abc import ( ABCIE, ABCIViewIE, ) from .abcnews import ( AbcNewsIE, AbcNewsVideoIE, ) from .abcotvs import ( ABCOTVSIE, ABCOTVSClipsIE, ) from .academicearth import AcademicEarthCourseIE from .acast import ( ACastIE, ACastChannelIE, ) from .adn import ADNIE from .adobeconnect import AdobeConnectIE from .adobetv import ( AdobeTVEmbedIE, AdobeTVIE, AdobeTVShowIE, AdobeTVChannelIE, AdobeTVVideoIE, ) from .adultswim import AdultSwimIE from .aenetworks import ( AENetworksIE, AENetworksCollectionIE, AENetworksShowIE, HistoryTopicIE, HistoryPlayerIE, BiographyIE, ) from .afreecatv import AfreecaTVIE from .airmozilla import AirMozillaIE from .aljazeera import AlJazeeraIE from .alphaporno import AlphaPornoIE from .amara import AmaraIE from .amcnetworks import AMCNetworksIE from .americastestkitchen import ( AmericasTestKitchenIE, AmericasTestKitchenSeasonIE, ) from .animeondemand import AnimeOnDemandIE from .anvato import AnvatoIE from .aol import AolIE from .allocine import AllocineIE from .aliexpress import AliExpressLiveIE from .alsace20tv import ( Alsace20TVIE, Alsace20TVEmbedIE, ) from .apa import APAIE from .aparat import AparatIE from .appleconnect import AppleConnectIE from .appletrailers import ( AppleTrailersIE, AppleTrailersSectionIE, ) from .applepodcasts import ApplePodcastsIE from .archiveorg import ArchiveOrgIE from .arcpublishing import ArcPublishingIE from .arkena import ArkenaIE from .ard import ( ARDBetaMediathekIE, ARDIE, ARDMediathekIE, ) from .arte import ( ArteTVIE, ArteTVEmbedIE, ArteTVPlaylistIE, ArteTVCategoryIE, ) from .arnes import ArnesIE from .asiancrush import ( AsianCrushIE, AsianCrushPlaylistIE, ) from .atresplayer import AtresPlayerIE from .atttechchannel import ATTTechChannelIE from .atvat import ATVAtIE from .audimedia import AudiMediaIE from .audioboom import AudioBoomIE from .audiomack import AudiomackIE, AudiomackAlbumIE from .awaan import ( AWAANIE, AWAANVideoIE, AWAANLiveIE, AWAANSeasonIE, ) from .azmedien import AZMedienIE from .baidu import BaiduVideoIE from .bandaichannel import BandaiChannelIE from .bandcamp import BandcampIE, BandcampAlbumIE, BandcampWeeklyIE from .bbc import ( BBCCoUkIE, BBCCoUkArticleIE, BBCCoUkIPlayerEpisodesIE, BBCCoUkIPlayerGroupIE, BBCCoUkPlaylistIE, BBCIE, ) from .beeg import BeegIE from .behindkink import BehindKinkIE from .bellmedia import BellMediaIE from .beatport import BeatportIE from .bet import BetIE from .bfi import BFIPlayerIE from .bfmtv import ( BFMTVIE, BFMTVLiveIE, BFMTVArticleIE, ) from .bibeltv import BibelTVIE from .bigflix import BigflixIE from .bigo import BigoIE from .bild import BildIE from .bilibili import ( BiliBiliIE, BiliBiliBangumiIE, BilibiliAudioIE, BilibiliAudioAlbumIE, BiliBiliPlayerIE, ) from .biobiochiletv import BioBioChileTVIE from .bitchute import ( BitChuteIE, BitChuteChannelIE, ) from .biqle import BIQLEIE from .bleacherreport import ( BleacherReportIE, BleacherReportCMSIE, ) from .blerp import BlerpIE from .bloomberg import BloombergIE from .bokecc import BokeCCIE from .bongacams import BongaCamsIE from .bostonglobe import BostonGlobeIE from .box import BoxIE from .bpb import BpbIE from .br import ( BRIE, BRMediathekIE, ) from .bravotv import BravoTVIE from .breakcom import BreakIE from .brightcove import ( BrightcoveLegacyIE, BrightcoveNewIE, ) from .businessinsider import BusinessInsiderIE from .buzzfeed import BuzzFeedIE from .byutv import BYUtvIE from .c56 import C56IE from .caffeine import CaffeineTVIE from .callin import CallinIE from .camdemy import ( CamdemyIE, CamdemyFolderIE ) from .cammodels import CamModelsIE from .camtube import CamTubeIE from .camwithher import CamWithHerIE from .canalplus import CanalplusIE from .canalc2 import Canalc2IE from .canvas import ( CanvasIE, CanvasEenIE, VrtNUIE, DagelijkseKostIE, ) from .carambatv import ( CarambaTVIE, CarambaTVPageIE, ) from .cartoonnetwork import CartoonNetworkIE from .cbc import ( CBCIE, CBCPlayerIE, CBCWatchVideoIE, CBCWatchIE, CBCOlympicsIE, ) from .cbs import CBSIE from .cbslocal import ( CBSLocalIE, CBSLocalArticleIE, ) from .cbsinteractive import CBSInteractiveIE from .cbsnews import ( CBSNewsEmbedIE, CBSNewsIE, CBSNewsLiveVideoIE, ) from .cbssports import ( CBSSportsEmbedIE, CBSSportsIE, TwentyFourSevenSportsIE, ) from .ccc import ( CCCIE, CCCPlaylistIE, ) from .ccma import CCMAIE from .cctv import CCTVIE from .cda import CDAIE from .ceskatelevize import CeskaTelevizeIE from .channel9 import Channel9IE from .charlierose import CharlieRoseIE from .chaturbate import ChaturbateIE from .chilloutzone import ChilloutzoneIE from .chirbit import ( ChirbitIE, ChirbitProfileIE, ) from .cinchcast import CinchcastIE from .cinemax import CinemaxIE from .ciscolive import ( CiscoLiveSessionIE, CiscoLiveSearchIE, ) from .cjsw import CJSWIE from .clipchamp import ClipchampIE from .cliphunter import CliphunterIE from .clippit import ClippitIE from .cliprs import ClipRsIE from .clipsyndicate import ClipsyndicateIE from .closertotruth import CloserToTruthIE from .cloudflarestream import CloudflareStreamIE from .cloudy import CloudyIE from .clubic import ClubicIE from .clyp import ClypIE from .cmt import CMTIE from .cnbc import ( CNBCIE, CNBCVideoIE, ) from .cnn import ( CNNIE, CNNBlogsIE, CNNArticleIE, ) from .coub import CoubIE from .comedycentral import ( ComedyCentralIE, ComedyCentralTVIE, ) from .commonmistakes import CommonMistakesIE, UnicodeBOMIE from .commonprotocols import ( MmsIE, RtmpIE, ) from .condenast import CondeNastIE from .contv import CONtvIE from .corus import CorusIE from .cpac import ( CPACIE, CPACPlaylistIE, ) from .cracked import CrackedIE from .crackle import CrackleIE from .crooksandliars import CrooksAndLiarsIE from .crunchyroll import ( CrunchyrollIE, CrunchyrollShowPlaylistIE ) from .cspan import CSpanIE from .ctsnews import CtsNewsIE from .ctv import CTVIE from .ctvnews import CTVNewsIE from .cultureunplugged import CultureUnpluggedIE from .curiositystream import ( CuriosityStreamIE, CuriosityStreamCollectionIE, ) from .cwtv import CWTVIE from .dailymail import DailyMailIE from .dailymotion import ( DailymotionIE, DailymotionPlaylistIE, DailymotionUserIE, ) from .daum import ( DaumIE, DaumClipIE, DaumPlaylistIE, DaumUserIE, ) from .dbtv import DBTVIE from .dctp import DctpTvIE from .deezer import DeezerPlaylistIE from .democracynow import DemocracynowIE from .dlf import ( DLFCorpusIE, DLFIE, ) from .dfb import DFBIE from .dhm import DHMIE from .digg import DiggIE from .dotsub import DotsubIE from .douyutv import ( DouyuShowIE, DouyuTVIE, ) from .dplay import ( DPlayIE, DiscoveryPlusIE, HGTVDeIE, ) from .dreisat import DreiSatIE from .drbonanza import DRBonanzaIE from .drtuber import DrTuberIE from .drtv import ( DRTVIE, DRTVLiveIE, ) from .dtube import DTubeIE from .dvtv import DVTVIE from .dumpert import DumpertIE from .defense import DefenseGouvFrIE from .discovery import DiscoveryIE from .discoverygo import ( DiscoveryGoIE, DiscoveryGoPlaylistIE, ) from .discoverynetworks import DiscoveryNetworksDeIE from .discoveryvr import DiscoveryVRIE from .disney import DisneyIE from .dispeak import DigitallySpeakingIE from .dropbox import DropboxIE from .dw import ( DWIE, DWArticleIE, ) from .eagleplatform import EaglePlatformIE from .ebaumsworld import EbaumsWorldIE from .echomsk import EchoMskIE from .egghead import ( EggheadCourseIE, EggheadLessonIE, ) from .ehow import EHowIE from .eighttracks import EightTracksIE from .einthusan import EinthusanIE from .eitb import EitbIE from .ellentube import ( EllenTubeIE, EllenTubeVideoIE, EllenTubePlaylistIE, ) from .elpais import ElPaisIE from .embedly import EmbedlyIE from .engadget import EngadgetIE from .epidemicsound import EpidemicSoundIE from .eporner import EpornerIE from .eroprofile import EroProfileIE from .escapist import EscapistIE from .espn import ( ESPNIE, ESPNArticleIE, FiveThirtyEightIE, ) from .esri import EsriVideoIE from .europa import EuropaIE from .expotv import ExpoTVIE from .expressen import ExpressenIE from .extremetube import ExtremeTubeIE from .eyedotv import EyedoTVIE from .facebook import ( FacebookIE, FacebookPluginsVideoIE, ) from .faz import FazIE from .fc2 import ( FC2IE, FC2EmbedIE, ) from .fczenit import FczenitIE from .fifa import FifaIE from .filmon import ( FilmOnIE, FilmOnChannelIE, ) from .filmweb import FilmwebIE from .firsttv import FirstTVIE from .fivemin import FiveMinIE from .fivetv import FiveTVIE from .flickr import FlickrIE from .folketinget import FolketingetIE from .footyroom import FootyRoomIE from .formula1 import Formula1IE from .fourtube import ( FourTubeIE, PornTubeIE, PornerBrosIE, FuxIE, ) from .fox import FOXIE from .fox9 import ( FOX9IE, FOX9NewsIE, ) from .foxgay import FoxgayIE from .foxnews import ( FoxNewsIE, FoxNewsArticleIE, ) from .foxsports import FoxSportsIE from .franceculture import FranceCultureIE from .franceinter import FranceInterIE from .francetv import ( FranceTVIE, FranceTVSiteIE, FranceTVEmbedIE, FranceTVInfoIE, FranceTVInfoSportIE, FranceTVJeunesseIE, GenerationWhatIE, CultureboxIE, ) from .freesound import FreesoundIE from .freespeech import FreespeechIE from .freshlive import FreshLiveIE from .frontendmasters import ( FrontendMastersIE, FrontendMastersLessonIE, FrontendMastersCourseIE ) from .fujitv import FujiTVFODPlus7IE from .funimation import FunimationIE from .funk import FunkIE from .fusion import FusionIE from .gaia import GaiaIE from .gameinformer import GameInformerIE from .gamespot import GameSpotIE from .gamestar import GameStarIE from .gaskrank import GaskrankIE from .gazeta import GazetaIE from .gbnews import GBNewsIE from .gdcvault import GDCVaultIE from .gedidigital import GediDigitalIE from .generic import GenericIE from .gfycat import GfycatIE from .giantbomb import GiantBombIE from .giga import GigaIE from .glide import GlideIE from .globalplayer import ( GlobalPlayerLiveIE, GlobalPlayerLivePlaylistIE, GlobalPlayerAudioIE, GlobalPlayerAudioEpisodeIE, GlobalPlayerVideoIE ) from .globo import ( GloboIE, GloboArticleIE, ) from .go import GoIE from .godtube import GodTubeIE from .golem import GolemIE from .googledrive import GoogleDriveIE from .googlepodcasts import ( GooglePodcastsIE, GooglePodcastsFeedIE, ) from .googlesearch import GoogleSearchIE from .goshgay import GoshgayIE from .gputechconf import GPUTechConfIE from .groupon import GrouponIE from .hbo import HBOIE from .hearthisat import HearThisAtIE from .heise import HeiseIE from .hellporno import HellPornoIE from .helsinki import HelsinkiIE from .hentaistigma import HentaiStigmaIE from .hgtv import HGTVComShowIE from .hketv import HKETVIE from .hidive import HiDiveIE from .historicfilms import HistoricFilmsIE from .hitbox import HitboxIE, HitboxLiveIE from .hitrecord import HitRecordIE from .hornbunny import HornBunnyIE from .hotnewhiphop import HotNewHipHopIE from .hotstar import ( HotStarIE, HotStarPlaylistIE, ) from .howcast import HowcastIE from .howstuffworks import HowStuffWorksIE from .hrfernsehen import HRFernsehenIE from .hrti import ( HRTiIE, HRTiPlaylistIE, ) from .huajiao import HuajiaoIE from .huffpost import HuffPostIE from .hungama import ( HungamaIE, HungamaSongIE, ) from .hypem import HypemIE from .ign import ( IGNIE, IGNVideoIE, IGNArticleIE, ) from .iheart import ( IHeartRadioIE, IHeartRadioPodcastIE, ) from .imdb import ( ImdbIE, ImdbListIE ) from .imgur import ( ImgurIE, ImgurAlbumIE, ImgurGalleryIE, ) from .ina import InaIE from .inc import IncIE from .indavideo import IndavideoEmbedIE from .infoq import InfoQIE from .instagram import ( InstagramIE, InstagramUserIE, InstagramTagIE, ) from .internazionale import InternazionaleIE from .internetvideoarchive import InternetVideoArchiveIE from .iprima import IPrimaIE from .iqiyi import IqiyiIE from .ir90tv import Ir90TvIE from .itv import ( ITVIE, ITVBTCCIE, ) from .ivi import ( IviIE, IviCompilationIE ) from .ivideon import IvideonIE from .iwara import IwaraIE from .izlesene import IzleseneIE from .jamendo import ( JamendoIE, JamendoAlbumIE, ) from .jeuxvideo import JeuxVideoIE from .jove import JoveIE from .joj import JojIE from .jwplatform import JWPlatformIE from .kakao import KakaoIE from .kaltura import KalturaIE from .kankan import KankanIE from .karaoketv import KaraoketvIE from .karrierevideos import KarriereVideosIE from .keezmovies import KeezMoviesIE from .ketnet import KetnetIE from .khanacademy import ( KhanAcademyIE, KhanAcademyUnitIE, ) from .kickstarter import KickStarterIE from .kinja import KinjaEmbedIE from .kinopoisk import KinoPoiskIE from .kommunetv import KommunetvIE from .konserthusetplay import KonserthusetPlayIE from .krasview import KrasViewIE from .kth import KTHIE from .ku6 import Ku6IE from .kusi import KUSIIE from .kuwo import ( KuwoIE, KuwoAlbumIE, KuwoChartIE, KuwoSingerIE, KuwoCategoryIE, KuwoMvIE, ) from .la7 import LA7IE from .laola1tv import ( Laola1TvEmbedIE, Laola1TvIE, EHFTVIE, ITTFIE, ) from .lbry import ( LBRYIE, LBRYChannelIE, ) from .lci import LCIIE from .lcp import ( LcpPlayIE, LcpIE, ) from .lecture2go import Lecture2GoIE from .lecturio import ( LecturioIE, LecturioCourseIE, LecturioDeCourseIE, ) from .leeco import ( LeIE, LePlaylistIE, LetvCloudIE, ) from .lego import LEGOIE from .lemonde import LemondeIE from .lenta import LentaIE from .libraryofcongress import LibraryOfCongressIE from .libsyn import LibsynIE from .lifenews import ( LifeNewsIE, LifeEmbedIE, ) from .limelight import ( LimelightMediaIE, LimelightChannelIE, LimelightChannelListIE, ) from .line import ( LineTVIE, LineLiveIE, LineLiveChannelIE, ) from .linkedin import ( LinkedInLearningIE, LinkedInLearningCourseIE, ) from .linuxacademy import LinuxAcademyIE from .litv import LiTVIE from .livejournal import LiveJournalIE from .livestream import ( LivestreamIE, LivestreamOriginalIE, LivestreamShortenerIE, ) from .lnkgo import LnkGoIE from .localnews8 import LocalNews8IE from .lovehomeporn import LoveHomePornIE from .lrt import LRTIE from .lynda import ( LyndaIE, LyndaCourseIE ) from .m6 import M6IE from .mailru import ( MailRuIE, MailRuMusicIE, MailRuMusicSearchIE, ) from .malltv import MallTVIE from .mangomolo import ( MangomoloVideoIE, MangomoloLiveIE, ) from .manyvids import ManyVidsIE from .maoritv import MaoriTVIE from .markiza import ( MarkizaIE, MarkizaPageIE, ) from .massengeschmacktv import MassengeschmackTVIE from .matchtv import MatchTVIE from .mdr import MDRIE from .medaltv import MedalTVIE from .mediaset import MediasetIE from .mediasite import ( MediasiteIE, MediasiteCatalogIE, MediasiteNamedCatalogIE, ) from .medici import MediciIE from .megaphone import MegaphoneIE from .meipai import MeipaiIE from .melonvod import MelonVODIE from .meta import METAIE from .metacafe import MetacafeIE from .metacritic import MetacriticIE from .mgoon import MgoonIE from .mgtv import MGTVIE from .miaopai import MiaoPaiIE from .microsoftvirtualacademy import ( MicrosoftVirtualAcademyIE, MicrosoftVirtualAcademyCourseIE, ) from .minds import ( MindsIE, MindsChannelIE, MindsGroupIE, ) from .ministrygrid import MinistryGridIE from .minoto import MinotoIE from .miomio import MioMioIE from .mit import TechTVMITIE, OCWMITIE from .mitele import MiTeleIE from .mixcloud import ( MixcloudIE, MixcloudUserIE, MixcloudPlaylistIE, ) from .mlb import ( MLBIE, MLBVideoIE, ) from .mnet import MnetIE from .moevideo import MoeVideoIE from .mofosex import ( MofosexIE, MofosexEmbedIE, ) from .mojvideo import MojvideoIE from .morningstar import MorningstarIE from .motherless import ( MotherlessIE, MotherlessGroupIE ) from .motorsport import MotorsportIE from .movieclips import MovieClipsIE from .moviezine import MoviezineIE from .movingimage import MovingImageIE from .msn import MSNIE from .mtv import ( MTVIE, MTVVideoIE, MTVServicesEmbeddedIE, MTVDEIE, MTVJapanIE, ) from .muenchentv import MuenchenTVIE from .mwave import MwaveIE, MwaveMeetGreetIE from .mychannels import MyChannelsIE from .myspace import MySpaceIE, MySpaceAlbumIE from .myspass import MySpassIE from .myvi import ( MyviIE, MyviEmbedIE, ) from .myvideoge import MyVideoGeIE from .myvidster import MyVidsterIE from .nationalgeographic import ( NationalGeographicVideoIE, NationalGeographicTVIE, ) from .naver import NaverIE from .nba import ( NBAWatchEmbedIE, NBAWatchIE, NBAWatchCollectionIE, NBAEmbedIE, NBAIE, NBAChannelIE, ) from .nbc import ( NBCIE, NBCNewsIE, NBCOlympicsIE, NBCOlympicsStreamIE, NBCSportsIE, NBCSportsStreamIE, NBCSportsVPlayerIE, ) from .ndr import ( NDRIE, NJoyIE, NDREmbedBaseIE, NDREmbedIE, NJoyEmbedIE, ) from .ndtv import NDTVIE from .netzkino import NetzkinoIE from .nerdcubed import NerdCubedFeedIE from .neteasemusic import ( NetEaseMusicIE, NetEaseMusicAlbumIE, NetEaseMusicSingerIE, NetEaseMusicListIE, NetEaseMusicMvIE, NetEaseMusicProgramIE, NetEaseMusicDjRadioIE, ) from .newgrounds import ( NewgroundsIE, NewgroundsPlaylistIE, ) from .newstube import NewstubeIE from .nextmedia import ( NextMediaIE, NextMediaActionNewsIE, AppleDailyIE, NextTVIE, ) from .nexx import ( NexxIE, NexxEmbedIE, ) from .nfl import ( NFLIE, NFLArticleIE, ) from .nhk import ( NhkVodIE, NhkVodProgramIE, ) from .nhl import NHLIE from .nick import ( NickIE, NickBrIE, NickDeIE, NickNightIE, NickRuIE, ) from .niconico import ( NiconicoIE, NiconicoPlaylistIE, NiconicoUserIE, NicovideoSearchIE, NicovideoSearchDateIE, NicovideoSearchURLIE, ) from .ninecninemedia import NineCNineMediaIE from .ninegag import NineGagIE from .ninenow import NineNowIE from .nintendo import NintendoIE from .njpwworld import NJPWWorldIE from .nobelprize import NobelPrizeIE from .nonktube import NonkTubeIE from .noovo import NoovoIE from .normalboots import NormalbootsIE from .nosvideo import NosVideoIE from .nova import ( NovaEmbedIE, NovaIE, ) from .nowness import ( NownessIE, NownessPlaylistIE, NownessSeriesIE, ) from .noz import NozIE from .npo import ( AndereTijdenIE, NPOIE, NPOLiveIE, NPORadioIE, NPORadioFragmentIE, SchoolTVIE, HetKlokhuisIE, VPROIE, WNLIE, ) from .npr import NprIE from .nrk import ( NRKIE, NRKPlaylistIE, NRKSkoleIE, NRKTVIE, NRKTVDirekteIE, NRKRadioPodkastIE, NRKTVEpisodeIE, NRKTVEpisodesIE, NRKTVSeasonIE, NRKTVSeriesIE, ) from .nrl import NRLTVIE from .ntvcojp import NTVCoJpCUIE from .ntvde import NTVDeIE from .ntvru import NTVRuIE from .nytimes import ( NYTimesIE, NYTimesArticleIE, NYTimesCookingIE, ) from .nuvid import NuvidIE from .nzz import NZZIE from .odatv import OdaTVIE from .odnoklassniki import OdnoklassnikiIE from .oktoberfesttv import OktoberfestTVIE from .ondemandkorea import OnDemandKoreaIE from .onet import ( OnetIE, OnetChannelIE, OnetMVPIE, OnetPlIE, ) from .onionstudios import OnionStudiosIE from .ooyala import ( OoyalaIE, OoyalaExternalIE, ) from .ora import OraTVIE from .orf import ( ORFONIE, ORFONLiveIE, ORFFM4StoryIE, ORFIPTVIE, ORFPodcastIE, ORFRadioIE, ORFRadioCollectionIE, ) from .outsidetv import OutsideTVIE from .packtpub import ( PacktPubIE, PacktPubCourseIE, ) from .palcomp3 import ( PalcoMP3IE, PalcoMP3ArtistIE, PalcoMP3VideoIE, ) from .pandoratv import PandoraTVIE from .parliamentliveuk import ParliamentLiveUKIE from .patreon import PatreonIE from .pbs import PBSIE from .pearvideo import PearVideoIE from .peekvids import ( PeekVidsIE, PlayVidsIE, ) from .peertube import PeerTubeIE from .people import PeopleIE from .performgroup import PerformGroupIE from .periscope import ( PeriscopeIE, PeriscopeUserIE, ) from .philharmoniedeparis import PhilharmonieDeParisIE from .phoenix import PhoenixIE from .photobucket import PhotobucketIE from .picarto import ( PicartoIE, PicartoVodIE, ) from .piksel import PikselIE from .pinkbike import PinkbikeIE from .pinterest import ( PinterestIE, PinterestCollectionIE, ) from .pladform import PladformIE from .platzi import ( PlatziIE, PlatziCourseIE, ) from .playfm import PlayFMIE from .playplustv import PlayPlusTVIE from .plays import PlaysTVIE from .playstuff import PlayStuffIE from .playtvak import PlaytvakIE from .playvid import PlayvidIE from .playwire import PlaywireIE from .pluralsight import ( PluralsightIE, PluralsightCourseIE, ) from .podomatic import PodomaticIE from .pokemon import PokemonIE from .polskieradio import ( PolskieRadioIE, PolskieRadioCategoryIE, ) from .popcorntimes import PopcorntimesIE from .popcorntv import PopcornTVIE from .porn91 import Porn91IE from .porncom import PornComIE from .pornhd import PornHdIE from .pornhub import ( PornHubIE, PornHubUserIE, PornHubPagedVideoListIE, PornHubUserVideosUploadIE, ) from .pornotube import PornotubeIE from .pornovoisines import PornoVoisinesIE from .pornoxo import PornoXOIE from .pr0gramm import ( Pr0grammIE, Pr0grammStaticIE, ) from .puhutv import ( PuhuTVIE, PuhuTVSerieIE, ) from .presstv import PressTVIE from .prosiebensat1 import ProSiebenSat1IE from .puls4 import Puls4IE from .pyvideo import PyvideoIE from .qqmusic import ( QQMusicIE, QQMusicSingerIE, QQMusicAlbumIE, QQMusicToplistIE, QQMusicPlaylistIE, ) from .r7 import ( R7IE, R7ArticleIE, ) from .radiocanada import ( RadioCanadaIE, RadioCanadaAudioVideoIE, ) from .radiode import RadioDeIE from .radiojavan import RadioJavanIE from .radiobremen import RadioBremenIE from .radiofrance import RadioFranceIE from .rai import ( RaiPlayIE, RaiPlayLiveIE, RaiPlayPlaylistIE, RaiIE, ) from .raywenderlich import ( RayWenderlichIE, RayWenderlichCourseIE, ) from .rbgtum import ( RbgTumIE, RbgTumCourseIE, ) from .rbmaradio import RBMARadioIE from .rds import RDSIE from .redbulltv import ( RedBullTVIE, RedBullEmbedIE, RedBullTVRrnContentIE, RedBullIE, ) from .reddit import ( RedditIE, RedditRIE, ) from .redtube import RedTubeIE from .regiotv import RegioTVIE from .rentv import ( RENTVIE, RENTVArticleIE, ) from .restudy import RestudyIE from .reuters import ReutersIE from .reverbnation import ReverbNationIE from .rice import RICEIE from .rmcdecouverte import RMCDecouverteIE from .ro220 import Ro220IE from .rockstargames import RockstarGamesIE from .roosterteeth import RoosterTeethIE from .rottentomatoes import RottenTomatoesIE from .roxwel import RoxwelIE from .rozhlas import RozhlasIE from .rtbf import RTBFIE from .rte import RteIE, RteRadioIE from .rtlnl import RtlNlIE from .rtl2 import ( RTL2IE, RTL2YouIE, RTL2YouSeriesIE, ) from .rtp import RTPIE from .rts import RTSIE from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE, RTVELiveIE, RTVETelevisionIE from .rtvnh import RTVNHIE from .rtvs import RTVSIE from .ruhd import RUHDIE from .rumble import RumbleEmbedIE from .rutube import ( RutubeIE, RutubeChannelIE, RutubeEmbedIE, RutubeMovieIE, RutubePersonIE, RutubePlaylistIE, ) from .rutv import RUTVIE from .ruutu import RuutuIE from .ruv import RuvIE from .s4c import ( S4CIE, S4CSeriesIE, ) from .safari import ( SafariIE, SafariApiIE, SafariCourseIE, ) from .samplefocus import SampleFocusIE from .sapo import SapoIE from .savefrom import SaveFromIE from .sbs import SBSIE from .screencast import ScreencastIE from .screencastomatic import ScreencastOMaticIE from .scrippsnetworks import ( ScrippsNetworksWatchIE, ScrippsNetworksIE, ) from .scte import ( SCTEIE, SCTECourseIE, ) from .seeker import SeekerIE from .senateisvp import SenateISVPIE from .sendtonews import SendtoNewsIE from .servus import ServusIE from .sevenplus import SevenPlusIE from .sexu import SexuIE from .seznamzpravy import ( SeznamZpravyIE, SeznamZpravyArticleIE, ) from .shahid import ( ShahidIE, ShahidShowIE, ) from .shared import ( SharedIE, VivoIE, ) from .showroomlive import ShowRoomLiveIE from .simplecast import ( SimplecastIE, SimplecastEpisodeIE, SimplecastPodcastIE, ) from .sina import SinaIE from .sixplay import SixPlayIE from .skyit import ( SkyItPlayerIE, SkyItVideoIE, SkyItVideoLiveIE, SkyItIE, SkyItAcademyIE, SkyItArteIE, CieloTVItIE, TV8ItIE, ) from .skylinewebcams import SkylineWebcamsIE from .skynewsarabia import ( SkyNewsArabiaIE, SkyNewsArabiaArticleIE, ) from .sky import ( SkyNewsIE, SkySportsIE, SkySportsNewsIE, ) from .slideshare import SlideshareIE from .slideslive import SlidesLiveIE from .slutload import SlutloadIE from .snotr import SnotrIE from .sohu import SohuIE from .sonyliv import SonyLIVIE from .soundcloud import ( SoundcloudEmbedIE, SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE, SoundcloudTrackStationIE, SoundcloudPlaylistIE, SoundcloudSearchIE, ) from .soundgasm import ( SoundgasmIE, SoundgasmProfileIE ) from .southpark import ( SouthParkIE, SouthParkDeIE, SouthParkDkIE, SouthParkEsIE, SouthParkNlIE ) from .spankbang import ( SpankBangIE, SpankBangPlaylistIE, ) from .spankwire import SpankwireIE from .spiegel import SpiegelIE from .spike import ( BellatorIE, ParamountNetworkIE, ) from .stitcher import ( StitcherIE, StitcherShowIE, ) from .sport5 import Sport5IE from .sportbox import SportBoxIE from .sportdeutschland import SportDeutschlandIE from .spotify import ( SpotifyIE, SpotifyShowIE, ) from .spreaker import ( SpreakerIE, SpreakerPageIE, SpreakerShowIE, SpreakerShowPageIE, ) from .springboardplatform import SpringboardPlatformIE from .sprout import SproutIE from .srgssr import ( SRGSSRIE, SRGSSRPlayIE, ) from .srmediathek import SRMediathekIE from .stanfordoc import StanfordOpenClassroomIE from .steam import SteamIE from .storyfire import ( StoryFireIE, StoryFireUserIE, StoryFireSeriesIE, ) from .streamable import StreamableIE from .streamcloud import StreamcloudIE from .streamcz import StreamCZIE from .streamsb import StreamsbIE from .streetvoice import StreetVoiceIE from .stretchinternet import StretchInternetIE from .stv import STVPlayerIE from .sunporno import SunPornoIE from .sverigesradio import ( SverigesRadioEpisodeIE, SverigesRadioPublicationIE, ) from .svt import ( SVTIE, SVTPageIE, SVTPlayIE, SVTSeriesIE, ) from .swrmediathek import SWRMediathekIE from .syfy import SyfyIE from .sztvhu import SztvHuIE from .tagesschau import ( TagesschauPlayerIE, TagesschauIE, ) from .tass import TassIE from .tbs import TBSIE from .tdslifeway import TDSLifewayIE from .teachable import ( TeachableIE, TeachableCourseIE, ) from .teachertube import ( TeacherTubeIE, TeacherTubeUserIE, ) from .teachingchannel import TeachingChannelIE from .teamcoco import TeamcocoIE from .teamtreehouse import TeamTreeHouseIE from .techtalks import TechTalksIE from .ted import TEDIE from .tele5 import Tele5IE from .tele13 import Tele13IE from .telebruxelles import TeleBruxellesIE from .telecinco import TelecincoIE from .telegraaf import TelegraafIE from .telemb import TeleMBIE from .telequebec import ( TeleQuebecIE, TeleQuebecSquatIE, TeleQuebecEmissionIE, TeleQuebecLiveIE, TeleQuebecVideoIE, ) from .teletask import TeleTaskIE from .telewebion import TelewebionIE from .tennistv import TennisTVIE from .tenplay import TenPlayIE from .testurl import TestURLIE from .tf1 import TF1IE from .tfo import TFOIE from .theintercept import TheInterceptIE from .theplatform import ( ThePlatformIE, ThePlatformFeedIE, ) from .thescene import TheSceneIE from .thestar import TheStarIE from .thesun import TheSunIE from .theweatherchannel import TheWeatherChannelIE from .thisamericanlife import ThisAmericanLifeIE from .thisav import ThisAVIE from .thisoldhouse import ThisOldHouseIE from .thisvid import ( ThisVidIE, ThisVidMemberIE, ThisVidPlaylistIE, ) from .threeqsdn import ThreeQSDNIE from .tiktok import ( TikTokIE, TikTokUserIE, ) from .tinypic import TinyPicIE from .tmz import ( TMZIE, TMZArticleIE, ) from .tnaflix import ( TNAFlixNetworkEmbedIE, TNAFlixIE, EMPFlixIE, MovieFapIE, ) from .toggle import ( ToggleIE, MeWatchIE, ) from .tonline import TOnlineIE from .toongoggles import ToonGogglesIE from .toutv import TouTvIE from .toypics import ToypicsUserIE, ToypicsIE from .traileraddict import TrailerAddictIE from .trilulilu import TriluliluIE from .trovo import ( TrovoIE, TrovoVodIE, ) from .trunews import TruNewsIE from .trutv import TruTVIE from .tube8 import Tube8IE from .tubitv import TubiTvIE from .tumblr import TumblrIE from .tunein import ( TuneInClipIE, TuneInStationIE, TuneInProgramIE, TuneInTopicIE, TuneInShortenerIE, ) from .tunepk import TunePkIE from .turbo import TurboIE from .tv2 import ( TV2IE, TV2ArticleIE, KatsomoIE, MTVUutisetArticleIE, ) from .tv2dk import ( TV2DKIE, TV2DKBornholmPlayIE, ) from .tv2hu import TV2HuIE from .tv4 import TV4IE from .tv5mondeplus import TV5MondePlusIE from .tv5unis import ( TV5UnisVideoIE, TV5UnisIE, ) from .tva import ( TVAIE, QubIE, ) from .tvanouvelles import ( TVANouvellesIE, TVANouvellesArticleIE, ) from .tvc import ( TVCIE, TVCArticleIE, ) from .tver import TVerIE from .tvigle import TvigleIE from .tvland import TVLandIE from .tvn24 import TVN24IE from .tvnet import TVNetIE from .tvnoe import TVNoeIE from .tvnow import ( TVNowIE, TVNowNewIE, TVNowSeasonIE, TVNowAnnualIE, TVNowShowIE, ) from .tvp import ( TVPEmbedIE, TVPIE, TVPWebsiteIE, ) from .tvplay import ( TVPlayIE, ViafreeIE, TVPlayHomeIE, ) from .tvplayer import TVPlayerIE from .tweakers import TweakersIE from .twentyfourvideo import TwentyFourVideoIE from .twentymin import TwentyMinutenIE from .twentythreevideo import TwentyThreeVideoIE from .twitcasting import TwitCastingIE from .twitch import ( TwitchVodIE, TwitchCollectionIE, TwitchVideosIE, TwitchVideosClipsIE, TwitchVideosCollectionsIE, TwitchStreamIE, TwitchClipsIE, ) from .twitter import ( TwitterCardIE, TwitterIE, TwitterAmplifyIE, TwitterBroadcastIE, ) from .udemy import ( UdemyIE, UdemyCourseIE ) from .udn import UDNEmbedIE from .ufctv import ( UFCTVIE, UFCArabiaIE, ) from .uktvplay import UKTVPlayIE from .digiteka import DigitekaIE from .dlive import ( DLiveVODIE, DLiveStreamIE, ) from .umg import UMGDeIE from .unistra import UnistraIE from .unity import UnityIE from .uol import UOLIE from .uplynk import ( UplynkIE, UplynkPreplayIE, ) from .urort import UrortIE from .urplay import URPlayIE from .usanetwork import USANetworkIE from .usatoday import USATodayIE from .ustream import UstreamIE, UstreamChannelIE from .ustudio import ( UstudioIE, UstudioEmbedIE, ) from .varzesh3 import Varzesh3IE from .vbox7 import Vbox7IE from .veehd import VeeHDIE from .veoh import VeohIE from .vesti import VestiIE from .vevo import ( VevoIE, VevoPlaylistIE, ) from .vgtv import ( BTArticleIE, BTVestlendingenIE, VGTVIE, ) from .vh1 import VH1IE from .vice import ( ViceIE, ViceArticleIE, ViceShowIE, ) from .vidbit import VidbitIE from .viddler import ViddlerIE from .videa import VideaIE from .videodetective import VideoDetectiveIE from .videofyme import VideofyMeIE from .videomore import ( VideomoreIE, VideomoreVideoIE,
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/commonmistakes.py
youtube_dl/extractor/commonmistakes.py
from __future__ import unicode_literals import sys from .common import InfoExtractor from ..utils import ExtractorError class CommonMistakesIE(InfoExtractor): IE_DESC = False # Do not list _VALID_URL = r'''(?x) (?:url|URL)$ ''' _TESTS = [{ 'url': 'url', 'only_matching': True, }, { 'url': 'URL', 'only_matching': True, }] def _real_extract(self, url): msg = ( 'You\'ve asked youtube-dl to download the URL "%s". ' 'That doesn\'t make any sense. ' 'Simply remove the parameter in your command or configuration.' ) % url if not self._downloader.params.get('verbose'): msg += ' Add -v to the command line to see what arguments and configuration youtube-dl got.' raise ExtractorError(msg, expected=True) class UnicodeBOMIE(InfoExtractor): IE_DESC = False _VALID_URL = r'(?P<bom>\ufeff)(?P<id>.*)$' # Disable test for python 3.2 since BOM is broken in re in this version # (see https://github.com/ytdl-org/youtube-dl/issues/9751) _TESTS = [] if (3, 0) < sys.version_info <= (3, 3) else [{ 'url': '\ufeffhttp://www.youtube.com/watch?v=BaW_jenozKc', 'only_matching': True, }] def _real_extract(self, url): real_url = self._match_id(url) self.report_warning( 'Your URL starts with a Byte Order Mark (BOM). ' 'Removing the BOM and looking for "%s" ...' % real_url) return self.url_result(real_url)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/dfb.py
youtube_dl/extractor/dfb.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import unified_strdate class DFBIE(InfoExtractor): IE_NAME = 'tv.dfb.de' _VALID_URL = r'https?://tv\.dfb\.de/video/(?P<display_id>[^/]+)/(?P<id>\d+)' _TEST = { 'url': 'http://tv.dfb.de/video/u-19-em-stimmen-zum-spiel-gegen-russland/11633/', 'md5': 'ac0f98a52a330f700b4b3034ad240649', 'info_dict': { 'id': '11633', 'display_id': 'u-19-em-stimmen-zum-spiel-gegen-russland', 'ext': 'mp4', 'title': 'U 19-EM: Stimmen zum Spiel gegen Russland', 'upload_date': '20150714', }, } def _real_extract(self, url): display_id, video_id = re.match(self._VALID_URL, url).groups() player_info = self._download_xml( 'http://tv.dfb.de/server/hd_video.php?play=%s' % video_id, display_id) video_info = player_info.find('video') stream_access_url = self._proto_relative_url(video_info.find('url').text.strip()) formats = [] # see http://tv.dfb.de/player/js/ajax.js for the method to extract m3u8 formats for sa_url in (stream_access_url, stream_access_url + '&area=&format=iphone'): stream_access_info = self._download_xml(sa_url, display_id) token_el = stream_access_info.find('token') manifest_url = token_el.attrib['url'] + '?' + 'hdnea=' + token_el.attrib['auth'] if '.f4m' in manifest_url: formats.extend(self._extract_f4m_formats( manifest_url + '&hdcore=3.2.0', display_id, f4m_id='hds', fatal=False)) else: formats.extend(self._extract_m3u8_formats( manifest_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'title': video_info.find('title').text, 'thumbnail': 'http://tv.dfb.de/images/%s_640x360.jpg' % video_id, 'upload_date': unified_strdate(video_info.find('time_date').text), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/sexu.py
youtube_dl/extractor/sexu.py
from __future__ import unicode_literals from .common import InfoExtractor class SexuIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?sexu\.com/(?P<id>\d+)' _TEST = { 'url': 'http://sexu.com/961791/', 'md5': 'ff615aca9691053c94f8f10d96cd7884', 'info_dict': { 'id': '961791', 'ext': 'mp4', 'title': 'md5:4d05a19a5fc049a63dbbaf05fb71d91b', 'description': 'md5:2b75327061310a3afb3fbd7d09e2e403', 'categories': list, # NSFW 'thumbnail': r're:https?://.*\.jpg$', 'age_limit': 18, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) jwvideo = self._parse_json( self._search_regex(r'\.setup\(\s*({.+?})\s*\);', webpage, 'jwvideo'), video_id) sources = jwvideo['sources'] formats = [{ 'url': source['file'].replace('\\', ''), 'format_id': source.get('label'), 'height': int(self._search_regex( r'^(\d+)[pP]', source.get('label', ''), 'height', default=None)), } for source in sources if source.get('file')] self._sort_formats(formats) title = self._html_search_regex( r'<title>([^<]+)\s*-\s*Sexu\.Com</title>', webpage, 'title') description = self._html_search_meta( 'description', webpage, 'description') thumbnail = jwvideo.get('image') categories_str = self._html_search_meta( 'keywords', webpage, 'categories') categories = ( None if categories_str is None else categories_str.split(',')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'categories': categories, 'formats': formats, 'age_limit': 18, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/soundcloud.py
youtube_dl/extractor/soundcloud.py
# coding: utf-8 from __future__ import unicode_literals import itertools import re from .common import ( InfoExtractor, SearchInfoExtractor ) from ..compat import ( compat_HTTPError, compat_kwargs, compat_str, compat_urlparse, ) from ..utils import ( error_to_compat_str, ExtractorError, float_or_none, HEADRequest, int_or_none, KNOWN_EXTENSIONS, mimetype2ext, str_or_none, try_get, unified_timestamp, update_url_query, url_or_none, urlhandle_detect_ext, ) class SoundcloudEmbedIE(InfoExtractor): _VALID_URL = r'https?://(?:w|player|p)\.soundcloud\.com/player/?.*?\burl=(?P<id>.+)' _TEST = { # from https://www.soundi.fi/uutiset/ennakkokuuntelussa-timo-kaukolammen-station-to-station-to-station-julkaisua-juhlitaan-tanaan-g-livelabissa/ 'url': 'https://w.soundcloud.com/player/?visual=true&url=https%3A%2F%2Fapi.soundcloud.com%2Fplaylists%2F922213810&show_artwork=true&maxwidth=640&maxheight=960&dnt=1&secret_token=s-ziYey', 'only_matching': True, } @staticmethod def _extract_urls(webpage): return [m.group('url') for m in re.finditer( r'<iframe[^>]+src=(["\'])(?P<url>(?:https?://)?(?:w\.)?soundcloud\.com/player.+?)\1', webpage)] def _real_extract(self, url): query = compat_urlparse.parse_qs( compat_urlparse.urlparse(url).query) api_url = query['url'][0] secret_token = query.get('secret_token') if secret_token: api_url = update_url_query(api_url, {'secret_token': secret_token[0]}) return self.url_result(api_url) class SoundcloudIE(InfoExtractor): """Information extractor for soundcloud.com To access the media, the uid of the song and a stream token must be extracted from the page source and the script must make a request to media.soundcloud.com/crossdomain.xml. Then the media can be grabbed by requesting from an url composed of the stream token and uid """ _VALID_URL = r'''(?x)^(?:https?://)? (?:(?:(?:www\.|m\.)?soundcloud\.com/ (?!stations/track) (?P<uploader>[\w\d-]+)/ (?!(?:tracks|albums|sets(?:/.+?)?|reposts|likes|spotlight)/?(?:$|[?#])) (?P<title>[\w\d-]+)/? (?P<token>[^?]+?)?(?:[?].*)?$) |(?:api(?:-v2)?\.soundcloud\.com/tracks/(?P<track_id>\d+) (?:/?\?secret_token=(?P<secret_token>[^&]+))?) ) ''' IE_NAME = 'soundcloud' _TESTS = [ { 'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy', 'md5': 'ebef0a451b909710ed1d7787dddbf0d7', 'info_dict': { 'id': '62986583', 'ext': 'mp3', 'title': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1', 'description': 'No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o\'d', 'uploader': 'E.T. ExTerrestrial Music', 'uploader_id': '1571244', 'timestamp': 1349920598, 'upload_date': '20121011', 'duration': 143.216, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, } }, # geo-restricted { 'url': 'https://soundcloud.com/the-concept-band/goldrushed-mastered?in=the-concept-band/sets/the-royal-concept-ep', 'info_dict': { 'id': '47127627', 'ext': 'mp3', 'title': 'Goldrushed', 'description': 'From Stockholm Sweden\r\nPovel / Magnus / Filip / David\r\nwww.theroyalconcept.com', 'uploader': 'The Royal Concept', 'uploader_id': '9615865', 'timestamp': 1337635207, 'upload_date': '20120521', 'duration': 227.155, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, }, }, # private link { 'url': 'https://soundcloud.com/jaimemf/youtube-dl-test-video-a-y-baw/s-8Pjrp', 'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604', 'info_dict': { 'id': '123998367', 'ext': 'mp3', 'title': 'Youtube - Dl Test Video \'\' Ä↭', 'description': 'test chars: \"\'/\\ä↭', 'uploader': 'jaimeMF', 'uploader_id': '69767071', 'timestamp': 1386604920, 'upload_date': '20131209', 'duration': 9.927, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, }, }, # private link (alt format) { 'url': 'https://api.soundcloud.com/tracks/123998367?secret_token=s-8Pjrp', 'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604', 'info_dict': { 'id': '123998367', 'ext': 'mp3', 'title': 'Youtube - Dl Test Video \'\' Ä↭', 'description': 'test chars: \"\'/\\ä↭', 'uploader': 'jaimeMF', 'uploader_id': '69767071', 'timestamp': 1386604920, 'upload_date': '20131209', 'duration': 9.927, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, }, }, # downloadable song { 'url': 'https://soundcloud.com/oddsamples/bus-brakes', 'md5': '7624f2351f8a3b2e7cd51522496e7631', 'info_dict': { 'id': '128590877', 'ext': 'mp3', 'title': 'Bus Brakes', 'description': 'md5:0053ca6396e8d2fd7b7e1595ef12ab66', 'uploader': 'oddsamples', 'uploader_id': '73680509', 'timestamp': 1389232924, 'upload_date': '20140109', 'duration': 17.346, 'license': 'cc-by-sa', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, }, }, # private link, downloadable format { 'url': 'https://soundcloud.com/oriuplift/uponly-238-no-talking-wav/s-AyZUd', 'md5': '64a60b16e617d41d0bef032b7f55441e', 'info_dict': { 'id': '340344461', 'ext': 'wav', 'title': 'Uplifting Only 238 [No Talking] (incl. Alex Feed Guestmix) (Aug 31, 2017) [wav]', 'description': 'md5:fa20ee0fca76a3d6df8c7e57f3715366', 'uploader': 'Ori Uplift Music', 'uploader_id': '12563093', 'timestamp': 1504206263, 'upload_date': '20170831', 'duration': 7449.096, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, }, }, # no album art, use avatar pic for thumbnail { 'url': 'https://soundcloud.com/garyvee/sideways-prod-mad-real', 'md5': '59c7872bc44e5d99b7211891664760c2', 'info_dict': { 'id': '309699954', 'ext': 'mp3', 'title': 'Sideways (Prod. Mad Real)', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'uploader': 'garyvee', 'uploader_id': '2366352', 'timestamp': 1488152409, 'upload_date': '20170226', 'duration': 207.012, 'thumbnail': r're:https?://.*\.jpg', 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://soundcloud.com/giovannisarani/mezzo-valzer', 'md5': 'e22aecd2bc88e0e4e432d7dcc0a1abf7', 'info_dict': { 'id': '583011102', 'ext': 'mp3', 'title': 'Mezzo Valzer', 'description': 'md5:4138d582f81866a530317bae316e8b61', 'uploader': 'Micronie', 'uploader_id': '3352531', 'timestamp': 1551394171, 'upload_date': '20190228', 'duration': 180.157, 'thumbnail': r're:https?://.*\.jpg', 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, }, }, { # with AAC HQ format available via OAuth token 'url': 'https://soundcloud.com/wandw/the-chainsmokers-ft-daya-dont-let-me-down-ww-remix-1', 'only_matching': True, }, ] _API_V2_BASE = 'https://api-v2.soundcloud.com/' _BASE_URL = 'https://soundcloud.com/' _IMAGE_REPL_RE = r'-([0-9a-z]+)\.jpg' _ARTWORK_MAP = { 'mini': 16, 'tiny': 20, 'small': 32, 'badge': 47, 't67x67': 67, 'large': 100, 't300x300': 300, 'crop': 400, 't500x500': 500, 'original': 0, } def _store_client_id(self, client_id): self._downloader.cache.store('soundcloud', 'client_id', client_id) def _update_client_id(self): webpage = self._download_webpage('https://soundcloud.com/', None) for src in reversed(re.findall(r'<script[^>]+src="([^"]+)"', webpage)): script = self._download_webpage(src, None, fatal=False) if script: client_id = self._search_regex( r'client_id\s*:\s*"([0-9a-zA-Z]{32})"', script, 'client id', default=None) if client_id: self._CLIENT_ID = client_id self._store_client_id(client_id) return raise ExtractorError('Unable to extract client id') def _download_json(self, *args, **kwargs): non_fatal = kwargs.get('fatal') is False if non_fatal: del kwargs['fatal'] query = kwargs.get('query', {}).copy() for _ in range(2): query['client_id'] = self._CLIENT_ID kwargs['query'] = query try: return super(SoundcloudIE, self)._download_json(*args, **compat_kwargs(kwargs)) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: self._store_client_id(None) self._update_client_id() continue elif non_fatal: self._downloader.report_warning(error_to_compat_str(e)) return False raise def _real_initialize(self): self._CLIENT_ID = self._downloader.cache.load('soundcloud', 'client_id') or 'YUKXoArFcqrlQn9tfNHvvyfnDISj04zk' @classmethod def _resolv_url(cls, url): return SoundcloudIE._API_V2_BASE + 'resolve?url=' + url def _extract_info_dict(self, info, full_title=None, secret_token=None): track_id = compat_str(info['id']) title = info['title'] format_urls = set() formats = [] query = {'client_id': self._CLIENT_ID} if secret_token: query['secret_token'] = secret_token if info.get('downloadable') and info.get('has_downloads_left'): download_url = update_url_query( self._API_V2_BASE + 'tracks/' + track_id + '/download', query) redirect_url = (self._download_json(download_url, track_id, fatal=False) or {}).get('redirectUri') if redirect_url: urlh = self._request_webpage( HEADRequest(redirect_url), track_id, fatal=False) if urlh: format_url = urlh.geturl() format_urls.add(format_url) formats.append({ 'format_id': 'download', 'ext': urlhandle_detect_ext(urlh) or 'mp3', 'filesize': int_or_none(urlh.headers.get('Content-Length')), 'url': format_url, 'preference': 10, }) def invalid_url(url): return not url or url in format_urls def add_format(f, protocol, is_preview=False): mobj = re.search(r'\.(?P<abr>\d+)\.(?P<ext>[0-9a-z]{3,4})(?=[/?])', stream_url) if mobj: for k, v in mobj.groupdict().items(): if not f.get(k): f[k] = v format_id_list = [] if protocol: format_id_list.append(protocol) ext = f.get('ext') if ext == 'aac': f['abr'] = '256' for k in ('ext', 'abr'): v = f.get(k) if v: format_id_list.append(v) preview = is_preview or re.search(r'/(?:preview|playlist)/0/30/', f['url']) if preview: format_id_list.append('preview') abr = f.get('abr') if abr: f['abr'] = int(abr) if protocol == 'hls': protocol = 'm3u8' if ext == 'aac' else 'm3u8_native' else: protocol = 'http' f.update({ 'format_id': '_'.join(format_id_list), 'protocol': protocol, 'preference': -10 if preview else None, }) formats.append(f) # New API transcodings = try_get( info, lambda x: x['media']['transcodings'], list) or [] for t in transcodings: if not isinstance(t, dict): continue format_url = url_or_none(t.get('url')) if not format_url: continue stream = self._download_json( format_url, track_id, query=query, fatal=False) if not isinstance(stream, dict): continue stream_url = url_or_none(stream.get('url')) if invalid_url(stream_url): continue format_urls.add(stream_url) stream_format = t.get('format') or {} protocol = stream_format.get('protocol') if protocol != 'hls' and '/hls' in format_url: protocol = 'hls' ext = None preset = str_or_none(t.get('preset')) if preset: ext = preset.split('_')[0] if ext not in KNOWN_EXTENSIONS: ext = mimetype2ext(stream_format.get('mime_type')) add_format({ 'url': stream_url, 'ext': ext, }, 'http' if protocol == 'progressive' else protocol, t.get('snipped') or '/preview/' in format_url) for f in formats: f['vcodec'] = 'none' if not formats and info.get('policy') == 'BLOCK': self.raise_geo_restricted() self._sort_formats(formats) user = info.get('user') or {} thumbnails = [] artwork_url = info.get('artwork_url') thumbnail = artwork_url or user.get('avatar_url') if isinstance(thumbnail, compat_str): if re.search(self._IMAGE_REPL_RE, thumbnail): for image_id, size in self._ARTWORK_MAP.items(): i = { 'id': image_id, 'url': re.sub(self._IMAGE_REPL_RE, '-%s.jpg' % image_id, thumbnail), } if image_id == 'tiny' and not artwork_url: size = 18 elif image_id == 'original': i['preference'] = 10 if size: i.update({ 'width': size, 'height': size, }) thumbnails.append(i) else: thumbnails = [{'url': thumbnail}] def extract_count(key): return int_or_none(info.get('%s_count' % key)) return { 'id': track_id, 'uploader': user.get('username'), 'uploader_id': str_or_none(user.get('id')) or user.get('permalink'), 'uploader_url': user.get('permalink_url'), 'timestamp': unified_timestamp(info.get('created_at')), 'title': title, 'description': info.get('description'), 'thumbnails': thumbnails, 'duration': float_or_none(info.get('duration'), 1000), 'webpage_url': info.get('permalink_url'), 'license': info.get('license'), 'view_count': extract_count('playback'), 'like_count': extract_count('favoritings') or extract_count('likes'), 'comment_count': extract_count('comment'), 'repost_count': extract_count('reposts'), 'genre': info.get('genre'), 'formats': formats } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) track_id = mobj.group('track_id') query = {} if track_id: info_json_url = self._API_V2_BASE + 'tracks/' + track_id full_title = track_id token = mobj.group('secret_token') if token: query['secret_token'] = token else: full_title = resolve_title = '%s/%s' % mobj.group('uploader', 'title') token = mobj.group('token') if token: resolve_title += '/%s' % token info_json_url = self._resolv_url(self._BASE_URL + resolve_title) info = self._download_json( info_json_url, full_title, 'Downloading info JSON', query=query) return self._extract_info_dict(info, full_title, token) class SoundcloudPlaylistBaseIE(SoundcloudIE): def _extract_set(self, playlist, token=None): playlist_id = compat_str(playlist['id']) tracks = playlist.get('tracks') or [] if not all([t.get('permalink_url') for t in tracks]) and token: tracks = self._download_json( self._API_V2_BASE + 'tracks', playlist_id, 'Downloading tracks', query={ 'ids': ','.join([compat_str(t['id']) for t in tracks]), 'playlistId': playlist_id, 'playlistSecretToken': token, }) entries = [] for track in tracks: track_id = str_or_none(track.get('id')) url = track.get('permalink_url') if not url: if not track_id: continue url = self._API_V2_BASE + 'tracks/' + track_id if token: url += '?secret_token=' + token entries.append(self.url_result( url, SoundcloudIE.ie_key(), track_id)) return self.playlist_result( entries, playlist_id, playlist.get('title'), playlist.get('description')) class SoundcloudSetIE(SoundcloudPlaylistBaseIE): _VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/(?P<uploader>[\w\d-]+)/sets/(?P<slug_title>[\w\d-]+)(?:/(?P<token>[^?/]+))?' IE_NAME = 'soundcloud:set' _TESTS = [{ 'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep', 'info_dict': { 'id': '2284613', 'title': 'The Royal Concept EP', 'description': 'md5:71d07087c7a449e8941a70a29e34671e', }, 'playlist_mincount': 5, }, { 'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep/token', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) full_title = '%s/sets/%s' % mobj.group('uploader', 'slug_title') token = mobj.group('token') if token: full_title += '/' + token info = self._download_json(self._resolv_url( self._BASE_URL + full_title), full_title) if 'errors' in info: msgs = (compat_str(err['error_message']) for err in info['errors']) raise ExtractorError('unable to download video webpage: %s' % ','.join(msgs)) return self._extract_set(info, token) class SoundcloudPagedPlaylistBaseIE(SoundcloudIE): def _extract_playlist(self, base_url, playlist_id, playlist_title): # Per the SoundCloud documentation, the maximum limit for a linked partitioning query is 200. # https://developers.soundcloud.com/blog/offset-pagination-deprecated COMMON_QUERY = { 'limit': 200, 'linked_partitioning': '1', } query = COMMON_QUERY.copy() query['offset'] = 0 next_href = base_url entries = [] for i in itertools.count(): response = self._download_json( next_href, playlist_id, 'Downloading track page %s' % (i + 1), query=query) collection = response['collection'] if not isinstance(collection, list): collection = [] # Empty collection may be returned, in this case we proceed # straight to next_href def resolve_entry(candidates): for cand in candidates: if not isinstance(cand, dict): continue permalink_url = url_or_none(cand.get('permalink_url')) if not permalink_url: continue return self.url_result( permalink_url, SoundcloudIE.ie_key() if SoundcloudIE.suitable(permalink_url) else None, str_or_none(cand.get('id')), cand.get('title')) for e in collection: entry = resolve_entry((e, e.get('track'), e.get('playlist'))) if entry: entries.append(entry) next_href = response.get('next_href') if not next_href: break next_href = response['next_href'] parsed_next_href = compat_urlparse.urlparse(next_href) query = compat_urlparse.parse_qs(parsed_next_href.query) query.update(COMMON_QUERY) return { '_type': 'playlist', 'id': playlist_id, 'title': playlist_title, 'entries': entries, } class SoundcloudUserIE(SoundcloudPagedPlaylistBaseIE): _VALID_URL = r'''(?x) https?:// (?:(?:www|m)\.)?soundcloud\.com/ (?P<user>[^/]+) (?:/ (?P<rsrc>tracks|albums|sets|reposts|likes|spotlight) )? /?(?:[?#].*)?$ ''' IE_NAME = 'soundcloud:user' _TESTS = [{ 'url': 'https://soundcloud.com/soft-cell-official', 'info_dict': { 'id': '207965082', 'title': 'Soft Cell (All)', }, 'playlist_mincount': 28, }, { 'url': 'https://soundcloud.com/soft-cell-official/tracks', 'info_dict': { 'id': '207965082', 'title': 'Soft Cell (Tracks)', }, 'playlist_mincount': 27, }, { 'url': 'https://soundcloud.com/soft-cell-official/albums', 'info_dict': { 'id': '207965082', 'title': 'Soft Cell (Albums)', }, 'playlist_mincount': 1, }, { 'url': 'https://soundcloud.com/jcv246/sets', 'info_dict': { 'id': '12982173', 'title': 'Jordi / cv (Sets)', }, 'playlist_mincount': 2, }, { 'url': 'https://soundcloud.com/jcv246/reposts', 'info_dict': { 'id': '12982173', 'title': 'Jordi / cv (Reposts)', }, 'playlist_mincount': 6, }, { 'url': 'https://soundcloud.com/clalberg/likes', 'info_dict': { 'id': '11817582', 'title': 'clalberg (Likes)', }, 'playlist_mincount': 5, }, { 'url': 'https://soundcloud.com/grynpyret/spotlight', 'info_dict': { 'id': '7098329', 'title': 'Grynpyret (Spotlight)', }, 'playlist_mincount': 1, }] _BASE_URL_MAP = { 'all': 'stream/users/%s', 'tracks': 'users/%s/tracks', 'albums': 'users/%s/albums', 'sets': 'users/%s/playlists', 'reposts': 'stream/users/%s/reposts', 'likes': 'users/%s/likes', 'spotlight': 'users/%s/spotlight', } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) uploader = mobj.group('user') user = self._download_json( self._resolv_url(self._BASE_URL + uploader), uploader, 'Downloading user info') resource = mobj.group('rsrc') or 'all' return self._extract_playlist( self._API_V2_BASE + self._BASE_URL_MAP[resource] % user['id'], str_or_none(user.get('id')), '%s (%s)' % (user['username'], resource.capitalize())) class SoundcloudTrackStationIE(SoundcloudPagedPlaylistBaseIE): _VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/stations/track/[^/]+/(?P<id>[^/?#&]+)' IE_NAME = 'soundcloud:trackstation' _TESTS = [{ 'url': 'https://soundcloud.com/stations/track/officialsundial/your-text', 'info_dict': { 'id': '286017854', 'title': 'Track station: your text', }, 'playlist_mincount': 47, }] def _real_extract(self, url): track_name = self._match_id(url) track = self._download_json(self._resolv_url(url), track_name) track_id = self._search_regex( r'soundcloud:track-stations:(\d+)', track['id'], 'track id') return self._extract_playlist( self._API_V2_BASE + 'stations/%s/tracks' % track['id'], track_id, 'Track station: %s' % track['title']) class SoundcloudPlaylistIE(SoundcloudPlaylistBaseIE): _VALID_URL = r'https?://api(?:-v2)?\.soundcloud\.com/playlists/(?P<id>[0-9]+)(?:/?\?secret_token=(?P<token>[^&]+?))?$' IE_NAME = 'soundcloud:playlist' _TESTS = [{ 'url': 'https://api.soundcloud.com/playlists/4110309', 'info_dict': { 'id': '4110309', 'title': 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]', 'description': 're:.*?TILT Brass - Bowery Poetry Club', }, 'playlist_count': 6, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) playlist_id = mobj.group('id') query = {} token = mobj.group('token') if token: query['secret_token'] = token data = self._download_json( self._API_V2_BASE + 'playlists/' + playlist_id, playlist_id, 'Downloading playlist', query=query) return self._extract_set(data, token) class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE): IE_NAME = 'soundcloud:search' IE_DESC = 'Soundcloud search' _MAX_RESULTS = float('inf') _TESTS = [{ 'url': 'scsearch15:post-avant jazzcore', 'info_dict': { 'title': 'post-avant jazzcore', }, 'playlist_count': 15, }] _SEARCH_KEY = 'scsearch' _MAX_RESULTS_PER_PAGE = 200 _DEFAULT_RESULTS_PER_PAGE = 50 def _get_collection(self, endpoint, collection_id, **query): limit = min( query.get('limit', self._DEFAULT_RESULTS_PER_PAGE), self._MAX_RESULTS_PER_PAGE) query.update({ 'limit': limit, 'linked_partitioning': 1, 'offset': 0, }) next_url = update_url_query(self._API_V2_BASE + endpoint, query) collected_results = 0 for i in itertools.count(1): response = self._download_json( next_url, collection_id, 'Downloading page {0}'.format(i), 'Unable to download API page') collection = response.get('collection', []) if not collection: break collection = list(filter(bool, collection)) collected_results += len(collection) for item in collection: yield self.url_result(item['uri'], SoundcloudIE.ie_key()) if not collection or collected_results >= limit: break next_url = response.get('next_href') if not next_url: break def _get_n_results(self, query, n): tracks = self._get_collection('search/tracks', query, limit=n, q=query) return self.playlist_result(tracks, playlist_title=query)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/redtube.py
youtube_dl/extractor/redtube.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, ExtractorError, int_or_none, merge_dicts, str_to_int, unified_strdate, url_or_none, ) class RedTubeIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:\w+\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://www.redtube.com/66418', 'md5': 'fc08071233725f26b8f014dba9590005', 'info_dict': { 'id': '66418', 'ext': 'mp4', 'title': 'Sucked on a toilet', 'upload_date': '20110811', 'duration': 596, 'view_count': int, 'age_limit': 18, } }, { 'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286', 'only_matching': True, }, { 'url': 'http://it.redtube.com/66418', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)', webpage) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'http://www.redtube.com/%s' % video_id, video_id) ERRORS = ( (('video-deleted-info', '>This video has been removed'), 'has been removed'), (('private_video_text', '>This video is private', '>Send a friend request to its owner to be able to view it'), 'is private'), ) for patterns, message in ERRORS: if any(p in webpage for p in patterns): raise ExtractorError( 'Video %s %s' % (video_id, message), expected=True) info = self._search_json_ld(webpage, video_id, default={}) if not info.get('title'): info['title'] = self._html_search_regex( (r'<h(\d)[^>]+class="(?:video_title_text|videoTitle|video_title)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>', r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',), webpage, 'title', group='title', default=None) or self._og_search_title(webpage) formats = [] sources = self._parse_json( self._search_regex( r'sources\s*:\s*({.+?})', webpage, 'source', default='{}'), video_id, fatal=False) if sources and isinstance(sources, dict): for format_id, format_url in sources.items(): if format_url: formats.append({ 'url': format_url, 'format_id': format_id, 'height': int_or_none(format_id), }) medias = self._parse_json( self._search_regex( r'mediaDefinition["\']?\s*:\s*(\[.+?}\s*\])', webpage, 'media definitions', default='{}'), video_id, fatal=False) if medias and isinstance(medias, list): for media in medias: format_url = url_or_none(media.get('videoUrl')) if not format_url: continue if media.get('format') == 'hls' or determine_ext(format_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) continue format_id = media.get('quality') formats.append({ 'url': format_url, 'format_id': format_id, 'height': int_or_none(format_id), }) if not formats: video_url = self._html_search_regex( r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL') formats.append({'url': video_url}) self._sort_formats(formats) thumbnail = self._og_search_thumbnail(webpage) upload_date = unified_strdate(self._search_regex( r'<span[^>]+>(?:ADDED|Published on) ([^<]+)<', webpage, 'upload date', default=None)) duration = int_or_none(self._og_search_property( 'video:duration', webpage, default=None) or self._search_regex( r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None)) view_count = str_to_int(self._search_regex( (r'<div[^>]*>Views</div>\s*<div[^>]*>\s*([\d,.]+)', r'<span[^>]*>VIEWS</span>\s*</td>\s*<td>\s*([\d,.]+)', r'<span[^>]+\bclass=["\']video_view_count[^>]*>\s*([\d,.]+)'), webpage, 'view count', default=None)) # No self-labeling, but they describe themselves as # "Home of Videos Porno" age_limit = 18 return merge_dicts(info, { 'id': video_id, 'ext': 'mp4', 'thumbnail': thumbnail, 'upload_date': upload_date, 'duration': duration, 'view_count': view_count, 'age_limit': age_limit, 'formats': formats, })
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/mit.py
youtube_dl/extractor/mit.py
from __future__ import unicode_literals import re import json from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import ( clean_html, ExtractorError, get_element_by_id, ) class TechTVMITIE(InfoExtractor): IE_NAME = 'techtv.mit.edu' _VALID_URL = r'https?://techtv\.mit\.edu/(?:videos|embeds)/(?P<id>\d+)' _TEST = { 'url': 'http://techtv.mit.edu/videos/25418-mit-dna-learning-center-set', 'md5': '00a3a27ee20d44bcaa0933ccec4a2cf7', 'info_dict': { 'id': '25418', 'ext': 'mp4', 'title': 'MIT DNA and Protein Sets', 'description': 'md5:46f5c69ce434f0a97e7c628cc142802d', }, } def _real_extract(self, url): video_id = self._match_id(url) raw_page = self._download_webpage( 'http://techtv.mit.edu/videos/%s' % video_id, video_id) clean_page = re.compile(r'<!--.*?-->', re.S).sub('', raw_page) base_url = self._proto_relative_url(self._search_regex( r'ipadUrl: \'(.+?cloudfront.net/)', raw_page, 'base url'), 'http:') formats_json = self._search_regex( r'bitrates: (\[.+?\])', raw_page, 'video formats') formats_mit = json.loads(formats_json) formats = [ { 'format_id': f['label'], 'url': base_url + f['url'].partition(':')[2], 'ext': f['url'].partition(':')[0], 'format': f['label'], 'width': f['width'], 'vbr': f['bitrate'], } for f in formats_mit ] title = get_element_by_id('edit-title', clean_page) description = clean_html(get_element_by_id('edit-description', clean_page)) thumbnail = self._search_regex( r'playlist:.*?url: \'(.+?)\'', raw_page, 'thumbnail', flags=re.DOTALL) return { 'id': video_id, 'title': title, 'formats': formats, 'description': description, 'thumbnail': thumbnail, } class OCWMITIE(InfoExtractor): IE_NAME = 'ocw.mit.edu' _VALID_URL = r'^https?://ocw\.mit\.edu/courses/(?P<topic>[a-z0-9\-]+)' _BASE_URL = 'http://ocw.mit.edu/' _TESTS = [ { 'url': 'http://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-041-probabilistic-systems-analysis-and-applied-probability-fall-2010/video-lectures/lecture-7-multiple-variables-expectations-independence/', 'info_dict': { 'id': 'EObHWIEKGjA', 'ext': 'webm', 'title': 'Lecture 7: Multiple Discrete Random Variables: Expectations, Conditioning, Independence', 'description': 'In this lecture, the professor discussed multiple random variables, expectations, and binomial distribution.', 'upload_date': '20121109', 'uploader_id': 'MIT', 'uploader': 'MIT OpenCourseWare', } }, { 'url': 'http://ocw.mit.edu/courses/mathematics/18-01sc-single-variable-calculus-fall-2010/1.-differentiation/part-a-definition-and-basic-rules/session-1-introduction-to-derivatives/', 'info_dict': { 'id': '7K1sB05pE0A', 'ext': 'mp4', 'title': 'Session 1: Introduction to Derivatives', 'upload_date': '20090818', 'uploader_id': 'MIT', 'uploader': 'MIT OpenCourseWare', 'description': 'This section contains lecture video excerpts, lecture notes, an interactive mathlet with supporting documents, and problem solving videos.', } } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) topic = mobj.group('topic') webpage = self._download_webpage(url, topic) title = self._html_search_meta('WT.cg_s', webpage) description = self._html_search_meta('Description', webpage) # search for call to ocw_embed_chapter_media(container_id, media_url, provider, page_url, image_url, start, stop, captions_file) embed_chapter_media = re.search(r'ocw_embed_chapter_media\((.+?)\)', webpage) if embed_chapter_media: metadata = re.sub(r'[\'"]', '', embed_chapter_media.group(1)) metadata = re.split(r', ?', metadata) yt = metadata[1] else: # search for call to ocw_embed_chapter_media(container_id, media_url, provider, page_url, image_url, captions_file) embed_media = re.search(r'ocw_embed_media\((.+?)\)', webpage) if embed_media: metadata = re.sub(r'[\'"]', '', embed_media.group(1)) metadata = re.split(r', ?', metadata) yt = metadata[1] else: raise ExtractorError('Unable to find embedded YouTube video.') video_id = YoutubeIE.extract_id(yt) return { '_type': 'url_transparent', 'id': video_id, 'title': title, 'description': description, 'url': yt, 'ie_key': 'Youtube', }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/globalplayer.py
youtube_dl/extractor/globalplayer.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( clean_html, join_nonempty, merge_dicts, parse_duration, str_or_none, T, traverse_obj, unified_strdate, unified_timestamp, urlhandle_detect_ext, ) class GlobalPlayerBaseIE(InfoExtractor): def _get_page_props(self, url, video_id): webpage = self._download_webpage(url, video_id) return self._search_nextjs_data(webpage, video_id)['props']['pageProps'] def _request_ext(self, url, video_id): return urlhandle_detect_ext(self._request_webpage( # Server rejects HEAD requests url, video_id, note='Determining source extension')) @staticmethod def _clean_desc(x): x = clean_html(x) if x: x = x.replace('\xa0', ' ') return x def _extract_audio(self, episode, series): return merge_dicts({ 'vcodec': 'none', }, traverse_obj(series, { 'series': 'title', 'series_id': 'id', 'thumbnail': 'imageUrl', 'uploader': 'itunesAuthor', # podcasts only }), traverse_obj(episode, { 'id': 'id', 'description': ('description', T(self._clean_desc)), 'duration': ('duration', T(parse_duration)), 'thumbnail': 'imageUrl', 'url': 'streamUrl', 'timestamp': (('pubDate', 'startDate'), T(unified_timestamp)), 'title': 'title', }, get_all=False), rev=True) class GlobalPlayerLiveIE(GlobalPlayerBaseIE): _VALID_URL = r'https?://www\.globalplayer\.com/live/(?P<id>\w+)/\w+' _TESTS = [{ 'url': 'https://www.globalplayer.com/live/smoothchill/uk/', 'info_dict': { 'id': '2mx1E', 'ext': 'aac', 'display_id': 'smoothchill-uk', 'title': 're:^Smooth Chill.+$', 'thumbnail': 'https://herald.musicradio.com/media/f296ade8-50c9-4f60-911f-924e96873620.png', 'description': 'Music To Chill To', # 'live_status': 'is_live', 'is_live': True, }, }, { # national station 'url': 'https://www.globalplayer.com/live/heart/uk/', 'info_dict': { 'id': '2mwx4', 'ext': 'aac', 'description': 'turn up the feel good!', 'thumbnail': 'https://herald.musicradio.com/media/49b9e8cb-15bf-4bf2-8c28-a4850cc6b0f3.png', # 'live_status': 'is_live', 'is_live': True, 'title': 're:^Heart UK.+$', 'display_id': 'heart-uk', }, }, { # regional variation 'url': 'https://www.globalplayer.com/live/heart/london/', 'info_dict': { 'id': 'AMqg', 'ext': 'aac', 'thumbnail': 'https://herald.musicradio.com/media/49b9e8cb-15bf-4bf2-8c28-a4850cc6b0f3.png', 'title': 're:^Heart London.+$', # 'live_status': 'is_live', 'is_live': True, 'display_id': 'heart-london', 'description': 'turn up the feel good!', }, }] def _real_extract(self, url): video_id = self._match_id(url) station = self._get_page_props(url, video_id)['station'] stream_url = station['streamUrl'] return merge_dicts({ 'id': station['id'], 'display_id': ( join_nonempty('brandSlug', 'slug', from_dict=station) or station.get('legacyStationPrefix')), 'url': stream_url, 'ext': self._request_ext(stream_url, video_id), 'vcodec': 'none', 'is_live': True, }, { 'title': self._live_title(traverse_obj( station, (('name', 'brandName'), T(str_or_none)), get_all=False)), }, traverse_obj(station, { 'description': 'tagline', 'thumbnail': 'brandLogo', }), rev=True) class GlobalPlayerLivePlaylistIE(GlobalPlayerBaseIE): _VALID_URL = r'https?://www\.globalplayer\.com/playlists/(?P<id>\w+)' _TESTS = [{ # "live playlist" 'url': 'https://www.globalplayer.com/playlists/8bLk/', 'info_dict': { 'id': '8bLk', 'ext': 'aac', # 'live_status': 'is_live', 'is_live': True, 'description': r're:(?s).+\bclassical\b.+\bClassic FM Hall [oO]f Fame\b', 'thumbnail': 'https://images.globalplayer.com/images/551379?width=450&signature=oMLPZIoi5_dBSHnTMREW0Xg76mA=', 'title': 're:Classic FM Hall of Fame.+$' }, }] def _real_extract(self, url): video_id = self._match_id(url) station = self._get_page_props(url, video_id)['playlistData'] stream_url = station['streamUrl'] return merge_dicts({ 'id': video_id, 'url': stream_url, 'ext': self._request_ext(stream_url, video_id), 'vcodec': 'none', 'is_live': True, }, traverse_obj(station, { 'title': 'title', 'description': ('description', T(self._clean_desc)), 'thumbnail': 'image', }), rev=True) class GlobalPlayerAudioIE(GlobalPlayerBaseIE): _VALID_URL = r'https?://www\.globalplayer\.com/(?:(?P<podcast>podcasts)/|catchup/\w+/\w+/)(?P<id>\w+)/?(?:$|[?#])' _TESTS = [{ # podcast 'url': 'https://www.globalplayer.com/podcasts/42KuaM/', 'playlist_mincount': 5, 'info_dict': { 'id': '42KuaM', 'title': 'Filthy Ritual', 'thumbnail': 'md5:60286e7d12d795bd1bbc9efc6cee643e', 'categories': ['Society & Culture', 'True Crime'], 'uploader': 'Global', 'description': r're:(?s).+\bscam\b.+?\bseries available now\b', }, }, { # radio catchup 'url': 'https://www.globalplayer.com/catchup/lbc/uk/46vyD7z/', 'playlist_mincount': 2, 'info_dict': { 'id': '46vyD7z', 'description': 'Nick Ferrari At Breakfast is Leading Britain\'s Conversation.', 'title': 'Nick Ferrari', 'thumbnail': 'md5:4df24d8a226f5b2508efbcc6ae874ebf', }, }] def _real_extract(self, url): video_id, podcast = self._match_valid_url(url).group('id', 'podcast') props = self._get_page_props(url, video_id) series = props['podcastInfo'] if podcast else props['catchupInfo'] return merge_dicts({ '_type': 'playlist', 'id': video_id, 'entries': [self._extract_audio(ep, series) for ep in traverse_obj( series, ('episodes', lambda _, v: v['id'] and v['streamUrl']))], 'categories': traverse_obj(series, ('categories', Ellipsis, 'name')) or None, }, traverse_obj(series, { 'description': ('description', T(self._clean_desc)), 'thumbnail': 'imageUrl', 'title': 'title', 'uploader': 'itunesAuthor', # podcasts only }), rev=True) class GlobalPlayerAudioEpisodeIE(GlobalPlayerBaseIE): _VALID_URL = r'https?://www\.globalplayer\.com/(?:(?P<podcast>podcasts)|catchup/\w+/\w+)/episodes/(?P<id>\w+)/?(?:$|[?#])' _TESTS = [{ # podcast 'url': 'https://www.globalplayer.com/podcasts/episodes/7DrfNnE/', 'info_dict': { 'id': '7DrfNnE', 'ext': 'mp3', 'title': 'Filthy Ritual - Trailer', 'description': 'md5:1f1562fd0f01b4773b590984f94223e0', 'thumbnail': 'md5:60286e7d12d795bd1bbc9efc6cee643e', 'duration': 225.0, 'timestamp': 1681254900, 'series': 'Filthy Ritual', 'series_id': '42KuaM', 'upload_date': '20230411', 'uploader': 'Global', }, }, { # radio catchup 'url': 'https://www.globalplayer.com/catchup/lbc/uk/episodes/2zGq26Vcv1fCWhddC4JAwETXWe/', 'only_matching': True, # expired: refresh the details with a current show for a full test 'info_dict': { 'id': '2zGq26Vcv1fCWhddC4JAwETXWe', 'ext': 'm4a', 'timestamp': 1682056800, 'series': 'Nick Ferrari', 'thumbnail': 'md5:4df24d8a226f5b2508efbcc6ae874ebf', 'upload_date': '20230421', 'series_id': '46vyD7z', 'description': 'Nick Ferrari At Breakfast is Leading Britain\'s Conversation.', 'title': 'Nick Ferrari', 'duration': 10800.0, }, }] def _real_extract(self, url): video_id, podcast = self._match_valid_url(url).group('id', 'podcast') props = self._get_page_props(url, video_id) episode = props['podcastEpisode'] if podcast else props['catchupEpisode'] return self._extract_audio( episode, traverse_obj(episode, 'podcast', 'show', expected_type=dict) or {}) class GlobalPlayerVideoIE(GlobalPlayerBaseIE): _VALID_URL = r'https?://www\.globalplayer\.com/videos/(?P<id>\w+)' _TESTS = [{ 'url': 'https://www.globalplayer.com/videos/2JsSZ7Gm2uP/', 'info_dict': { 'id': '2JsSZ7Gm2uP', 'ext': 'mp4', 'description': 'md5:6a9f063c67c42f218e42eee7d0298bfd', 'thumbnail': 'md5:d4498af48e15aae4839ce77b97d39550', 'upload_date': '20230420', 'title': 'Treble Malakai Bayoh sings a sublime Handel aria at Classic FM Live', }, }] def _real_extract(self, url): video_id = self._match_id(url) meta = self._get_page_props(url, video_id)['videoData'] return merge_dicts({ 'id': video_id, }, traverse_obj(meta, { 'url': 'url', 'thumbnail': ('image', 'url'), 'title': 'title', 'upload_date': ('publish_date', T(unified_strdate)), 'description': 'description', }), rev=True)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/nytimes.py
youtube_dl/extractor/nytimes.py
# coding: utf-8 from __future__ import unicode_literals import hmac import hashlib import base64 from .common import InfoExtractor from ..utils import ( determine_ext, float_or_none, int_or_none, js_to_json, mimetype2ext, parse_iso8601, remove_start, ) class NYTimesBaseIE(InfoExtractor): _SECRET = b'pX(2MbU2);4N{7J8)>YwKRJ+/pQ3JkiU2Q^V>mFYv6g6gYvt6v' def _extract_video_from_id(self, video_id): # Authorization generation algorithm is reverse engineered from `signer` in # http://graphics8.nytimes.com/video/vhs/vhs-2.x.min.js path = '/svc/video/api/v3/video/' + video_id hm = hmac.new(self._SECRET, (path + ':vhs').encode(), hashlib.sha512).hexdigest() video_data = self._download_json('http://www.nytimes.com' + path, video_id, 'Downloading video JSON', headers={ 'Authorization': 'NYTV ' + base64.b64encode(hm.encode()).decode(), 'X-NYTV': 'vhs', }, fatal=False) if not video_data: video_data = self._download_json( 'http://www.nytimes.com/svc/video/api/v2/video/' + video_id, video_id, 'Downloading video JSON') title = video_data['headline'] def get_file_size(file_size): if isinstance(file_size, int): return file_size elif isinstance(file_size, dict): return int(file_size.get('value', 0)) else: return None urls = [] formats = [] for video in video_data.get('renditions', []): video_url = video.get('url') format_id = video.get('type') if not video_url or format_id == 'thumbs' or video_url in urls: continue urls.append(video_url) ext = mimetype2ext(video.get('mimetype')) or determine_ext(video_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', 'm3u8_native', m3u8_id=format_id or 'hls', fatal=False)) elif ext == 'mpd': continue # formats.extend(self._extract_mpd_formats( # video_url, video_id, format_id or 'dash', fatal=False)) else: formats.append({ 'url': video_url, 'format_id': format_id, 'vcodec': video.get('videoencoding') or video.get('video_codec'), 'width': int_or_none(video.get('width')), 'height': int_or_none(video.get('height')), 'filesize': get_file_size(video.get('file_size') or video.get('fileSize')), 'tbr': int_or_none(video.get('bitrate'), 1000) or None, 'ext': ext, }) self._sort_formats(formats, ('height', 'width', 'filesize', 'tbr', 'fps', 'format_id')) thumbnails = [] for image in video_data.get('images', []): image_url = image.get('url') if not image_url: continue thumbnails.append({ 'url': 'http://www.nytimes.com/' + image_url, 'width': int_or_none(image.get('width')), 'height': int_or_none(image.get('height')), }) publication_date = video_data.get('publication_date') timestamp = parse_iso8601(publication_date[:-8]) if publication_date else None return { 'id': video_id, 'title': title, 'description': video_data.get('summary'), 'timestamp': timestamp, 'uploader': video_data.get('byline'), 'duration': float_or_none(video_data.get('duration'), 1000), 'formats': formats, 'thumbnails': thumbnails, } class NYTimesIE(NYTimesBaseIE): _VALID_URL = r'https?://(?:(?:www\.)?nytimes\.com/video/(?:[^/]+/)+?|graphics8\.nytimes\.com/bcvideo/\d+(?:\.\d+)?/iframe/embed\.html\?videoId=)(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.nytimes.com/video/opinion/100000002847155/verbatim-what-is-a-photocopier.html?playlistId=100000001150263', 'md5': 'd665342765db043f7e225cff19df0f2d', 'info_dict': { 'id': '100000002847155', 'ext': 'mov', 'title': 'Verbatim: What Is a Photocopier?', 'description': 'md5:93603dada88ddbda9395632fdc5da260', 'timestamp': 1398631707, 'upload_date': '20140427', 'uploader': 'Brett Weiner', 'duration': 419, } }, { 'url': 'http://www.nytimes.com/video/travel/100000003550828/36-hours-in-dubai.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) return self._extract_video_from_id(video_id) class NYTimesArticleIE(NYTimesBaseIE): _VALID_URL = r'https?://(?:www\.)?nytimes\.com/(.(?<!video))*?/(?:[^/]+/)*(?P<id>[^.]+)(?:\.html)?' _TESTS = [{ 'url': 'http://www.nytimes.com/2015/04/14/business/owner-of-gravity-payments-a-credit-card-processor-is-setting-a-new-minimum-wage-70000-a-year.html?_r=0', 'md5': 'e2076d58b4da18e6a001d53fd56db3c9', 'info_dict': { 'id': '100000003628438', 'ext': 'mov', 'title': 'New Minimum Wage: $70,000 a Year', 'description': 'Dan Price, C.E.O. of Gravity Payments, surprised his 120-person staff by announcing that he planned over the next three years to raise the salary of every employee to $70,000 a year.', 'timestamp': 1429033037, 'upload_date': '20150414', 'uploader': 'Matthew Williams', } }, { 'url': 'http://www.nytimes.com/2016/10/14/podcasts/revelations-from-the-final-weeks.html', 'md5': 'e0d52040cafb07662acf3c9132db3575', 'info_dict': { 'id': '100000004709062', 'title': 'The Run-Up: ‘He Was Like an Octopus’', 'ext': 'mp3', 'description': 'md5:fb5c6b93b12efc51649b4847fe066ee4', 'series': 'The Run-Up', 'episode': '‘He Was Like an Octopus’', 'episode_number': 20, 'duration': 2130, } }, { 'url': 'http://www.nytimes.com/2016/10/16/books/review/inside-the-new-york-times-book-review-the-rise-of-hitler.html', 'info_dict': { 'id': '100000004709479', 'title': 'The Rise of Hitler', 'ext': 'mp3', 'description': 'md5:bce877fd9e3444990cb141875fab0028', 'creator': 'Pamela Paul', 'duration': 3475, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.nytimes.com/news/minute/2014/03/17/times-minute-whats-next-in-crimea/?_php=true&_type=blogs&_php=true&_type=blogs&_r=1', 'only_matching': True, }] def _extract_podcast_from_json(self, json, page_id, webpage): podcast_audio = self._parse_json( json, page_id, transform_source=js_to_json) audio_data = podcast_audio['data'] track = audio_data['track'] episode_title = track['title'] video_url = track['source'] description = track.get('description') or self._html_search_meta( ['og:description', 'twitter:description'], webpage) podcast_title = audio_data.get('podcast', {}).get('title') title = ('%s: %s' % (podcast_title, episode_title) if podcast_title else episode_title) episode = audio_data.get('podcast', {}).get('episode') or '' episode_number = int_or_none(self._search_regex( r'[Ee]pisode\s+(\d+)', episode, 'episode number', default=None)) return { 'id': remove_start(podcast_audio.get('target'), 'FT') or page_id, 'url': video_url, 'title': title, 'description': description, 'creator': track.get('credit'), 'series': podcast_title, 'episode': episode_title, 'episode_number': episode_number, 'duration': int_or_none(track.get('duration')), } def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) video_id = self._search_regex( r'data-videoid=["\'](\d+)', webpage, 'video id', default=None, fatal=False) if video_id is not None: return self._extract_video_from_id(video_id) podcast_data = self._search_regex( (r'NYTD\.FlexTypes\.push\s*\(\s*({.+?})\s*\)\s*;\s*</script', r'NYTD\.FlexTypes\.push\s*\(\s*({.+})\s*\)\s*;'), webpage, 'podcast data') return self._extract_podcast_from_json(podcast_data, page_id, webpage) class NYTimesCookingIE(NYTimesBaseIE): _VALID_URL = r'https?://cooking\.nytimes\.com/(?:guid|recip)es/(?P<id>\d+)' _TESTS = [{ 'url': 'https://cooking.nytimes.com/recipes/1017817-cranberry-curd-tart', 'md5': 'dab81fa2eaeb3f9ed47498bdcfcdc1d3', 'info_dict': { 'id': '100000004756089', 'ext': 'mov', 'timestamp': 1479383008, 'uploader': 'By SHAW LASH, ADAM SAEWITZ and JAMES HERRON', 'title': 'Cranberry Tart', 'upload_date': '20161117', 'description': 'If you are a fan of lemon curd or the classic French tarte au citron, you will love this cranberry version.', }, }, { 'url': 'https://cooking.nytimes.com/guides/13-how-to-cook-a-turkey', 'md5': '4b2e8c70530a89b8d905a2b572316eb8', 'info_dict': { 'id': '100000003951728', 'ext': 'mov', 'timestamp': 1445509539, 'description': 'Turkey guide', 'upload_date': '20151022', 'title': 'Turkey', } }] def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) video_id = self._search_regex( r'data-video-id=["\'](\d+)', webpage, 'video id') return self._extract_video_from_id(video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/teamtreehouse.py
youtube_dl/extractor/teamtreehouse.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, ExtractorError, float_or_none, get_element_by_class, get_element_by_id, parse_duration, remove_end, urlencode_postdata, urljoin, ) class TeamTreeHouseIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?teamtreehouse\.com/library/(?P<id>[^/]+)' _TESTS = [{ # Course 'url': 'https://teamtreehouse.com/library/introduction-to-user-authentication-in-php', 'info_dict': { 'id': 'introduction-to-user-authentication-in-php', 'title': 'Introduction to User Authentication in PHP', 'description': 'md5:405d7b4287a159b27ddf30ca72b5b053', }, 'playlist_mincount': 24, }, { # WorkShop 'url': 'https://teamtreehouse.com/library/deploying-a-react-app', 'info_dict': { 'id': 'deploying-a-react-app', 'title': 'Deploying a React App', 'description': 'md5:10a82e3ddff18c14ac13581c9b8e5921', }, 'playlist_mincount': 4, }, { # Video 'url': 'https://teamtreehouse.com/library/application-overview-2', 'info_dict': { 'id': 'application-overview-2', 'ext': 'mp4', 'title': 'Application Overview', 'description': 'md5:4b0a234385c27140a4378de5f1e15127', }, 'expected_warnings': ['This is just a preview'], }] _NETRC_MACHINE = 'teamtreehouse' def _real_initialize(self): email, password = self._get_login_info() if email is None: return signin_page = self._download_webpage( 'https://teamtreehouse.com/signin', None, 'Downloading signin page') data = self._form_hidden_inputs('new_user_session', signin_page) data.update({ 'user_session[email]': email, 'user_session[password]': password, }) error_message = get_element_by_class('error-message', self._download_webpage( 'https://teamtreehouse.com/person_session', None, 'Logging in', data=urlencode_postdata(data))) if error_message: raise ExtractorError(clean_html(error_message), expected=True) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = self._html_search_meta(['og:title', 'twitter:title'], webpage) description = self._html_search_meta( ['description', 'og:description', 'twitter:description'], webpage) entries = self._parse_html5_media_entries(url, webpage, display_id) if entries: info = entries[0] for subtitles in info.get('subtitles', {}).values(): for subtitle in subtitles: subtitle['ext'] = determine_ext(subtitle['url'], 'srt') is_preview = 'data-preview="true"' in webpage if is_preview: self.report_warning( 'This is just a preview. You need to be signed in with a Basic account to download the entire video.', display_id) duration = 30 else: duration = float_or_none(self._search_regex( r'data-duration="(\d+)"', webpage, 'duration'), 1000) if not duration: duration = parse_duration(get_element_by_id( 'video-duration', webpage)) info.update({ 'id': display_id, 'title': title, 'description': description, 'duration': duration, }) return info else: def extract_urls(html, extract_info=None): for path in re.findall(r'<a[^>]+href="([^"]+)"', html): page_url = urljoin(url, path) entry = { '_type': 'url_transparent', 'id': self._match_id(page_url), 'url': page_url, 'id_key': self.ie_key(), } if extract_info: entry.update(extract_info) entries.append(entry) workshop_videos = self._search_regex( r'(?s)<ul[^>]+id="workshop-videos"[^>]*>(.+?)</ul>', webpage, 'workshop videos', default=None) if workshop_videos: extract_urls(workshop_videos) else: stages_path = self._search_regex( r'(?s)<div[^>]+id="syllabus-stages"[^>]+data-url="([^"]+)"', webpage, 'stages path') if stages_path: stages_page = self._download_webpage( urljoin(url, stages_path), display_id, 'Downloading stages page') for chapter_number, (chapter, steps_list) in enumerate(re.findall(r'(?s)<h2[^>]*>\s*(.+?)\s*</h2>.+?<ul[^>]*>(.+?)</ul>', stages_page), 1): extract_urls(steps_list, { 'chapter': chapter, 'chapter_number': chapter_number, }) title = remove_end(title, ' Course') return self.playlist_result( entries, display_id, title, description)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/limelight.py
youtube_dl/extractor/limelight.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( determine_ext, float_or_none, int_or_none, smuggle_url, try_get, unsmuggle_url, ExtractorError, ) class LimelightBaseIE(InfoExtractor): _PLAYLIST_SERVICE_URL = 'http://production-ps.lvp.llnw.net/r/PlaylistService/%s/%s/%s' @classmethod def _extract_urls(cls, webpage, source_url): lm = { 'Media': 'media', 'Channel': 'channel', 'ChannelList': 'channel_list', } def smuggle(url): return smuggle_url(url, {'source_url': source_url}) entries = [] for kind, video_id in re.findall( r'LimelightPlayer\.doLoad(Media|Channel|ChannelList)\(["\'](?P<id>[a-z0-9]{32})', webpage): entries.append(cls.url_result( smuggle('limelight:%s:%s' % (lm[kind], video_id)), 'Limelight%s' % kind, video_id)) for mobj in re.finditer( # As per [1] class attribute should be exactly equal to # LimelightEmbeddedPlayerFlash but numerous examples seen # that don't exactly match it (e.g. [2]). # 1. http://support.3playmedia.com/hc/en-us/articles/227732408-Limelight-Embedding-the-Captions-Plugin-with-the-Limelight-Player-on-Your-Webpage # 2. http://www.sedona.com/FacilitatorTraining2017 r'''(?sx) <object[^>]+class=(["\'])(?:(?!\1).)*\bLimelightEmbeddedPlayerFlash\b(?:(?!\1).)*\1[^>]*>.*? <param[^>]+ name=(["\'])flashVars\2[^>]+ value=(["\'])(?:(?!\3).)*(?P<kind>media|channel(?:List)?)Id=(?P<id>[a-z0-9]{32}) ''', webpage): kind, video_id = mobj.group('kind'), mobj.group('id') entries.append(cls.url_result( smuggle('limelight:%s:%s' % (kind, video_id)), 'Limelight%s' % kind.capitalize(), video_id)) # http://support.3playmedia.com/hc/en-us/articles/115009517327-Limelight-Embedding-the-Audio-Description-Plugin-with-the-Limelight-Player-on-Your-Web-Page) for video_id in re.findall( r'(?s)LimelightPlayerUtil\.embed\s*\(\s*{.*?\bmediaId["\']\s*:\s*["\'](?P<id>[a-z0-9]{32})', webpage): entries.append(cls.url_result( smuggle('limelight:media:%s' % video_id), LimelightMediaIE.ie_key(), video_id)) return entries def _call_playlist_service(self, item_id, method, fatal=True, referer=None): headers = {} if referer: headers['Referer'] = referer try: return self._download_json( self._PLAYLIST_SERVICE_URL % (self._PLAYLIST_SERVICE_PATH, item_id, method), item_id, 'Downloading PlaylistService %s JSON' % method, fatal=fatal, headers=headers) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: error = self._parse_json(e.cause.read().decode(), item_id)['detail']['contentAccessPermission'] if error == 'CountryDisabled': self.raise_geo_restricted() raise ExtractorError(error, expected=True) raise def _extract(self, item_id, pc_method, mobile_method, referer=None): pc = self._call_playlist_service(item_id, pc_method, referer=referer) mobile = self._call_playlist_service( item_id, mobile_method, fatal=False, referer=referer) return pc, mobile def _extract_info(self, pc, mobile, i, referer): get_item = lambda x, y: try_get(x, lambda x: x[y][i], dict) or {} pc_item = get_item(pc, 'playlistItems') mobile_item = get_item(mobile, 'mediaList') video_id = pc_item.get('mediaId') or mobile_item['mediaId'] title = pc_item.get('title') or mobile_item['title'] formats = [] urls = [] for stream in pc_item.get('streams', []): stream_url = stream.get('url') if not stream_url or stream.get('drmProtected') or stream_url in urls: continue urls.append(stream_url) ext = determine_ext(stream_url) if ext == 'f4m': formats.extend(self._extract_f4m_formats( stream_url, video_id, f4m_id='hds', fatal=False)) else: fmt = { 'url': stream_url, 'abr': float_or_none(stream.get('audioBitRate')), 'fps': float_or_none(stream.get('videoFrameRate')), 'ext': ext, } width = int_or_none(stream.get('videoWidthInPixels')) height = int_or_none(stream.get('videoHeightInPixels')) vbr = float_or_none(stream.get('videoBitRate')) if width or height or vbr: fmt.update({ 'width': width, 'height': height, 'vbr': vbr, }) else: fmt['vcodec'] = 'none' rtmp = re.search(r'^(?P<url>rtmpe?://(?P<host>[^/]+)/(?P<app>.+))/(?P<playpath>mp[34]:.+)$', stream_url) if rtmp: format_id = 'rtmp' if stream.get('videoBitRate'): format_id += '-%d' % int_or_none(stream['videoBitRate']) http_format_id = format_id.replace('rtmp', 'http') CDN_HOSTS = ( ('delvenetworks.com', 'cpl.delvenetworks.com'), ('video.llnw.net', 's2.content.video.llnw.net'), ) for cdn_host, http_host in CDN_HOSTS: if cdn_host not in rtmp.group('host').lower(): continue http_url = 'http://%s/%s' % (http_host, rtmp.group('playpath')[4:]) urls.append(http_url) if self._is_valid_url(http_url, video_id, http_format_id): http_fmt = fmt.copy() http_fmt.update({ 'url': http_url, 'format_id': http_format_id, }) formats.append(http_fmt) break fmt.update({ 'url': rtmp.group('url'), 'play_path': rtmp.group('playpath'), 'app': rtmp.group('app'), 'ext': 'flv', 'format_id': format_id, }) formats.append(fmt) for mobile_url in mobile_item.get('mobileUrls', []): media_url = mobile_url.get('mobileUrl') format_id = mobile_url.get('targetMediaPlatform') if not media_url or format_id in ('Widevine', 'SmoothStreaming') or media_url in urls: continue urls.append(media_url) ext = determine_ext(media_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( media_url, video_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( stream_url, video_id, f4m_id=format_id, fatal=False)) else: formats.append({ 'url': media_url, 'format_id': format_id, 'preference': -1, 'ext': ext, }) self._sort_formats(formats) subtitles = {} for flag in mobile_item.get('flags'): if flag == 'ClosedCaptions': closed_captions = self._call_playlist_service( video_id, 'getClosedCaptionsDetailsByMediaId', False, referer) or [] for cc in closed_captions: cc_url = cc.get('webvttFileUrl') if not cc_url: continue lang = cc.get('languageCode') or self._search_regex(r'/[a-z]{2}\.vtt', cc_url, 'lang', default='en') subtitles.setdefault(lang, []).append({ 'url': cc_url, }) break get_meta = lambda x: pc_item.get(x) or mobile_item.get(x) return { 'id': video_id, 'title': title, 'description': get_meta('description'), 'formats': formats, 'duration': float_or_none(get_meta('durationInMilliseconds'), 1000), 'thumbnail': get_meta('previewImageUrl') or get_meta('thumbnailImageUrl'), 'subtitles': subtitles, } class LimelightMediaIE(LimelightBaseIE): IE_NAME = 'limelight' _VALID_URL = r'''(?x) (?: limelight:media:| https?:// (?: link\.videoplatform\.limelight\.com/media/| assets\.delvenetworks\.com/player/loader\.swf ) \?.*?\bmediaId= ) (?P<id>[a-z0-9]{32}) ''' _TESTS = [{ 'url': 'http://link.videoplatform.limelight.com/media/?mediaId=3ffd040b522b4485b6d84effc750cd86', 'info_dict': { 'id': '3ffd040b522b4485b6d84effc750cd86', 'ext': 'mp4', 'title': 'HaP and the HB Prince Trailer', 'description': 'md5:8005b944181778e313d95c1237ddb640', 'thumbnail': r're:^https?://.*\.jpeg$', 'duration': 144.23, }, 'params': { # m3u8 download 'skip_download': True, }, }, { # video with subtitles 'url': 'limelight:media:a3e00274d4564ec4a9b29b9466432335', 'md5': '2fa3bad9ac321e23860ca23bc2c69e3d', 'info_dict': { 'id': 'a3e00274d4564ec4a9b29b9466432335', 'ext': 'mp4', 'title': '3Play Media Overview Video', 'thumbnail': r're:^https?://.*\.jpeg$', 'duration': 78.101, # TODO: extract all languages that were accessible via API # 'subtitles': 'mincount:9', 'subtitles': 'mincount:1', }, }, { 'url': 'https://assets.delvenetworks.com/player/loader.swf?mediaId=8018a574f08d416e95ceaccae4ba0452', 'only_matching': True, }] _PLAYLIST_SERVICE_PATH = 'media' def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) video_id = self._match_id(url) source_url = smuggled_data.get('source_url') self._initialize_geo_bypass({ 'countries': smuggled_data.get('geo_countries'), }) pc, mobile = self._extract( video_id, 'getPlaylistByMediaId', 'getMobilePlaylistByMediaId', source_url) return self._extract_info(pc, mobile, 0, source_url) class LimelightChannelIE(LimelightBaseIE): IE_NAME = 'limelight:channel' _VALID_URL = r'''(?x) (?: limelight:channel:| https?:// (?: link\.videoplatform\.limelight\.com/media/| assets\.delvenetworks\.com/player/loader\.swf ) \?.*?\bchannelId= ) (?P<id>[a-z0-9]{32}) ''' _TESTS = [{ 'url': 'http://link.videoplatform.limelight.com/media/?channelId=ab6a524c379342f9b23642917020c082', 'info_dict': { 'id': 'ab6a524c379342f9b23642917020c082', 'title': 'Javascript Sample Code', 'description': 'Javascript Sample Code - http://www.delvenetworks.com/sample-code/playerCode-demo.html', }, 'playlist_mincount': 3, }, { 'url': 'http://assets.delvenetworks.com/player/loader.swf?channelId=ab6a524c379342f9b23642917020c082', 'only_matching': True, }] _PLAYLIST_SERVICE_PATH = 'channel' def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) channel_id = self._match_id(url) source_url = smuggled_data.get('source_url') pc, mobile = self._extract( channel_id, 'getPlaylistByChannelId', 'getMobilePlaylistWithNItemsByChannelId?begin=0&count=-1', source_url) entries = [ self._extract_info(pc, mobile, i, source_url) for i in range(len(pc['playlistItems']))] return self.playlist_result( entries, channel_id, pc.get('title'), mobile.get('description')) class LimelightChannelListIE(LimelightBaseIE): IE_NAME = 'limelight:channel_list' _VALID_URL = r'''(?x) (?: limelight:channel_list:| https?:// (?: link\.videoplatform\.limelight\.com/media/| assets\.delvenetworks\.com/player/loader\.swf ) \?.*?\bchannelListId= ) (?P<id>[a-z0-9]{32}) ''' _TESTS = [{ 'url': 'http://link.videoplatform.limelight.com/media/?channelListId=301b117890c4465c8179ede21fd92e2b', 'info_dict': { 'id': '301b117890c4465c8179ede21fd92e2b', 'title': 'Website - Hero Player', }, 'playlist_mincount': 2, }, { 'url': 'https://assets.delvenetworks.com/player/loader.swf?channelListId=301b117890c4465c8179ede21fd92e2b', 'only_matching': True, }] _PLAYLIST_SERVICE_PATH = 'channel_list' def _real_extract(self, url): channel_list_id = self._match_id(url) channel_list = self._call_playlist_service( channel_list_id, 'getMobileChannelListById') entries = [ self.url_result('limelight:channel:%s' % channel['id'], 'LimelightChannel') for channel in channel_list['channelList']] return self.playlist_result( entries, channel_list_id, channel_list['title'])
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/mychannels.py
youtube_dl/extractor/mychannels.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class MyChannelsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?mychannels\.com/.*(?P<id_type>video|production)_id=(?P<id>[0-9]+)' _TEST = { 'url': 'https://mychannels.com/missholland/miss-holland?production_id=3416', 'md5': 'b8993daad4262dd68d89d651c0c52c45', 'info_dict': { 'id': 'wUUDZZep6vQD', 'ext': 'mp4', 'title': 'Miss Holland joins VOTE LEAVE', 'description': 'Miss Holland | #13 Not a potato', 'uploader': 'Miss Holland', } } def _real_extract(self, url): id_type, url_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, url_id) video_data = self._html_search_regex(r'<div([^>]+data-%s-id="%s"[^>]+)>' % (id_type, url_id), webpage, 'video data') def extract_data_val(attr, fatal=False): return self._html_search_regex(r'data-%s\s*=\s*"([^"]+)"' % attr, video_data, attr, fatal=fatal) minoto_id = extract_data_val('minoto-id') or self._search_regex(r'/id/([a-zA-Z0-9]+)', extract_data_val('video-src', True), 'minoto id') return { '_type': 'url_transparent', 'url': 'minoto:%s' % minoto_id, 'id': url_id, 'title': extract_data_val('title', True), 'description': extract_data_val('description'), 'thumbnail': extract_data_val('image'), 'uploader': extract_data_val('channel'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vuclip.py
youtube_dl/extractor/vuclip.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_urllib_parse_urlparse, ) from ..utils import ( ExtractorError, parse_duration, remove_end, ) class VuClipIE(InfoExtractor): _VALID_URL = r'https?://(?:m\.)?vuclip\.com/w\?.*?cid=(?P<id>[0-9]+)' _TEST = { 'url': 'http://m.vuclip.com/w?cid=1129900602&bu=8589892792&frm=w&z=34801&op=0&oc=843169247&section=recommend', 'info_dict': { 'id': '1129900602', 'ext': '3gp', 'title': 'Top 10 TV Convicts', 'duration': 733, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) ad_m = re.search( r'''value="No.*?" onClick="location.href='([^"']+)'"''', webpage) if ad_m: urlr = compat_urllib_parse_urlparse(url) adfree_url = urlr.scheme + '://' + urlr.netloc + ad_m.group(1) webpage = self._download_webpage( adfree_url, video_id, note='Download post-ad page') error_msg = self._html_search_regex( r'<p class="message">(.*?)</p>', webpage, 'error message', default=None) if error_msg: raise ExtractorError( '%s said: %s' % (self.IE_NAME, error_msg), expected=True) # These clowns alternate between two page types video_url = self._search_regex( r'<a[^>]+href="([^"]+)"[^>]*><img[^>]+src="[^"]*/play\.gif', webpage, 'video URL', default=None) if video_url: formats = [{ 'url': video_url, }] else: formats = self._parse_html5_media_entries(url, webpage, video_id)[0]['formats'] title = remove_end(self._html_search_regex( r'<title>(.*?)-\s*Vuclip</title>', webpage, 'title').strip(), ' - Video') duration = parse_duration(self._html_search_regex( r'[(>]([0-9]+:[0-9]+)(?:<span|\))', webpage, 'duration', fatal=False)) return { 'id': video_id, 'formats': formats, 'title': title, 'duration': duration, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/nationalgeographic.py
youtube_dl/extractor/nationalgeographic.py
from __future__ import unicode_literals from .common import InfoExtractor from .fox import FOXIE from ..utils import ( smuggle_url, url_basename, ) class NationalGeographicVideoIE(InfoExtractor): IE_NAME = 'natgeo:video' _VALID_URL = r'https?://video\.nationalgeographic\.com/.*?' _TESTS = [ { 'url': 'http://video.nationalgeographic.com/video/news/150210-news-crab-mating-vin?source=featuredvideo', 'md5': '730855d559abbad6b42c2be1fa584917', 'info_dict': { 'id': '0000014b-70a1-dd8c-af7f-f7b559330001', 'ext': 'mp4', 'title': 'Mating Crabs Busted by Sharks', 'description': 'md5:16f25aeffdeba55aaa8ec37e093ad8b3', 'timestamp': 1423523799, 'upload_date': '20150209', 'uploader': 'NAGS', }, 'add_ie': ['ThePlatform'], }, { 'url': 'http://video.nationalgeographic.com/wild/when-sharks-attack/the-real-jaws', 'md5': '6a3105eb448c070503b3105fb9b320b5', 'info_dict': { 'id': 'ngc-I0IauNSWznb_UV008GxSbwY35BZvgi2e', 'ext': 'mp4', 'title': 'The Real Jaws', 'description': 'md5:8d3e09d9d53a85cd397b4b21b2c77be6', 'timestamp': 1433772632, 'upload_date': '20150608', 'uploader': 'NAGS', }, 'add_ie': ['ThePlatform'], }, ] def _real_extract(self, url): name = url_basename(url) webpage = self._download_webpage(url, name) guid = self._search_regex( r'id="(?:videoPlayer|player-container)"[^>]+data-guid="([^"]+)"', webpage, 'guid') return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': smuggle_url( 'http://link.theplatform.com/s/ngs/media/guid/2423130747/%s?mbr=true' % guid, {'force_smil_url': True}), 'id': guid, } class NationalGeographicTVIE(FOXIE): _VALID_URL = r'https?://(?:www\.)?nationalgeographic\.com/tv/watch/(?P<id>[\da-fA-F]+)' _TESTS = [{ 'url': 'https://www.nationalgeographic.com/tv/watch/6a875e6e734b479beda26438c9f21138/', 'info_dict': { 'id': '6a875e6e734b479beda26438c9f21138', 'ext': 'mp4', 'title': 'Why Nat Geo? Valley of the Boom', 'description': 'The lives of prominent figures in the tech world, including their friendships, rivalries, victories and failures.', 'timestamp': 1542662458, 'upload_date': '20181119', 'age_limit': 14, }, 'params': { 'skip_download': True, }, }] _HOME_PAGE_URL = 'https://www.nationalgeographic.com/tv/' _API_KEY = '238bb0a0c2aba67922c48709ce0c06fd'
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/sohu.py
youtube_dl/extractor/sohu.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse_urlencode, ) from ..utils import ( ExtractorError, int_or_none, try_get, ) class SohuIE(InfoExtractor): _VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?' # Sohu videos give different MD5 sums on Travis CI and my machine _TESTS = [{ 'note': 'This video is available only in Mainland China', 'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super', 'info_dict': { 'id': '382479172', 'ext': 'mp4', 'title': 'MV:Far East Movement《The Illest》', }, 'skip': 'On available in China', }, { 'url': 'http://tv.sohu.com/20150305/n409385080.shtml', 'info_dict': { 'id': '409385080', 'ext': 'mp4', 'title': '《2015湖南卫视羊年元宵晚会》唐嫣《花好月圆》', } }, { 'url': 'http://my.tv.sohu.com/us/232799889/78693464.shtml', 'info_dict': { 'id': '78693464', 'ext': 'mp4', 'title': '【爱范品】第31期:MWC见不到的奇葩手机', } }, { 'note': 'Multipart video', 'url': 'http://my.tv.sohu.com/pl/8384802/78910339.shtml', 'info_dict': { 'id': '78910339', 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', }, 'playlist': [{ 'info_dict': { 'id': '78910339_part1', 'ext': 'mp4', 'duration': 294, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', } }, { 'info_dict': { 'id': '78910339_part2', 'ext': 'mp4', 'duration': 300, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', } }, { 'info_dict': { 'id': '78910339_part3', 'ext': 'mp4', 'duration': 150, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', } }] }, { 'note': 'Video with title containing dash', 'url': 'http://my.tv.sohu.com/us/249884221/78932792.shtml', 'info_dict': { 'id': '78932792', 'ext': 'mp4', 'title': 'youtube-dl testing video', }, 'params': { 'skip_download': True } }] def _real_extract(self, url): def _fetch_data(vid_id, mytv=False): if mytv: base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid=' else: base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid=' return self._download_json( base_data_url + vid_id, video_id, 'Downloading JSON data for %s' % vid_id, headers=self.geo_verification_headers()) mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') mytv = mobj.group('mytv') is not None webpage = self._download_webpage(url, video_id) title = re.sub(r' - 搜狐视频$', '', self._og_search_title(webpage)) vid = self._html_search_regex( r'var vid ?= ?["\'](\d+)["\']', webpage, 'video path') vid_data = _fetch_data(vid, mytv) if vid_data['play'] != 1: if vid_data.get('status') == 12: raise ExtractorError( '%s said: There\'s something wrong in the video.' % self.IE_NAME, expected=True) else: self.raise_geo_restricted( '%s said: The video is only licensed to users in Mainland China.' % self.IE_NAME) formats_json = {} for format_id in ('nor', 'high', 'super', 'ori', 'h2644k', 'h2654k'): vid_id = vid_data['data'].get('%sVid' % format_id) if not vid_id: continue vid_id = compat_str(vid_id) formats_json[format_id] = vid_data if vid == vid_id else _fetch_data(vid_id, mytv) part_count = vid_data['data']['totalBlocks'] playlist = [] for i in range(part_count): formats = [] for format_id, format_data in formats_json.items(): allot = format_data['allot'] data = format_data['data'] clips_url = data['clipsURL'] su = data['su'] video_url = 'newflv.sohu.ccgslb.net' cdnId = None retries = 0 while 'newflv.sohu.ccgslb.net' in video_url: params = { 'prot': 9, 'file': clips_url[i], 'new': su[i], 'prod': 'flash', 'rb': 1, } if cdnId is not None: params['idc'] = cdnId download_note = 'Downloading %s video URL part %d of %d' % ( format_id, i + 1, part_count) if retries > 0: download_note += ' (retry #%d)' % retries part_info = self._parse_json(self._download_webpage( 'http://%s/?%s' % (allot, compat_urllib_parse_urlencode(params)), video_id, download_note), video_id) video_url = part_info['url'] cdnId = part_info.get('nid') retries += 1 if retries > 5: raise ExtractorError('Failed to get video URL') formats.append({ 'url': video_url, 'format_id': format_id, 'filesize': int_or_none( try_get(data, lambda x: x['clipsBytes'][i])), 'width': int_or_none(data.get('width')), 'height': int_or_none(data.get('height')), 'fps': int_or_none(data.get('fps')), }) self._sort_formats(formats) playlist.append({ 'id': '%s_part%d' % (video_id, i + 1), 'title': title, 'duration': vid_data['data']['clipsDuration'][i], 'formats': formats, }) if len(playlist) == 1: info = playlist[0] info['id'] = video_id else: info = { '_type': 'multi_video', 'entries': playlist, 'id': video_id, 'title': title, } return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/playwire.py
youtube_dl/extractor/playwire.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( dict_get, float_or_none, ) class PlaywireIE(InfoExtractor): _VALID_URL = r'https?://(?:config|cdn)\.playwire\.com(?:/v2)?/(?P<publisher_id>\d+)/(?:videos/v2|embed|config)/(?P<id>\d+)' _TESTS = [{ 'url': 'http://config.playwire.com/14907/videos/v2/3353705/player.json', 'md5': 'e6398701e3595888125729eaa2329ed9', 'info_dict': { 'id': '3353705', 'ext': 'mp4', 'title': 'S04_RM_UCL_Rus', 'thumbnail': r're:^https?://.*\.png$', 'duration': 145.94, }, }, { # m3u8 in f4m 'url': 'http://config.playwire.com/21772/videos/v2/4840492/zeus.json', 'info_dict': { 'id': '4840492', 'ext': 'mp4', 'title': 'ITV EL SHOW FULL', }, 'params': { # m3u8 download 'skip_download': True, }, }, { # Multiple resolutions while bitrates missing 'url': 'http://cdn.playwire.com/11625/embed/85228.html', 'only_matching': True, }, { 'url': 'http://config.playwire.com/12421/videos/v2/3389892/zeus.json', 'only_matching': True, }, { 'url': 'http://cdn.playwire.com/v2/12342/config/1532636.json', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) publisher_id, video_id = mobj.group('publisher_id'), mobj.group('id') player = self._download_json( 'http://config.playwire.com/%s/videos/v2/%s/zeus.json' % (publisher_id, video_id), video_id) title = player['settings']['title'] duration = float_or_none(player.get('duration'), 1000) content = player['content'] thumbnail = content.get('poster') src = content['media']['f4m'] formats = self._extract_f4m_formats(src, video_id, m3u8_id='hls') for a_format in formats: if not dict_get(a_format, ['tbr', 'width', 'height']): a_format['quality'] = 1 if '-hd.' in a_format['url'] else 0 self._sort_formats(formats) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/safari.py
youtube_dl/extractor/safari.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urlparse, ) from ..utils import ( ExtractorError, update_url_query, ) class SafariBaseIE(InfoExtractor): _LOGIN_URL = 'https://learning.oreilly.com/accounts/login/' _NETRC_MACHINE = 'safari' _API_BASE = 'https://learning.oreilly.com/api/v1' _API_FORMAT = 'json' LOGGED_IN = False def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return _, urlh = self._download_webpage_handle( 'https://learning.oreilly.com/accounts/login-check/', None, 'Downloading login page') def is_logged(urlh): return 'learning.oreilly.com/home/' in urlh.geturl() if is_logged(urlh): self.LOGGED_IN = True return redirect_url = urlh.geturl() parsed_url = compat_urlparse.urlparse(redirect_url) qs = compat_parse_qs(parsed_url.query) next_uri = compat_urlparse.urljoin( 'https://api.oreilly.com', qs['next'][0]) auth, urlh = self._download_json_handle( 'https://www.oreilly.com/member/auth/login/', None, 'Logging in', data=json.dumps({ 'email': username, 'password': password, 'redirect_uri': next_uri, }).encode(), headers={ 'Content-Type': 'application/json', 'Referer': redirect_url, }, expected_status=400) credentials = auth.get('credentials') if (not auth.get('logged_in') and not auth.get('redirect_uri') and credentials): raise ExtractorError( 'Unable to login: %s' % credentials, expected=True) # oreilly serves two same instances of the following cookies # in Set-Cookie header and expects first one to be actually set for cookie in ('groot_sessionid', 'orm-jwt', 'orm-rt'): self._apply_first_set_cookie_header(urlh, cookie) _, urlh = self._download_webpage_handle( auth.get('redirect_uri') or next_uri, None, 'Completing login',) if is_logged(urlh): self.LOGGED_IN = True return raise ExtractorError('Unable to log in') class SafariIE(SafariBaseIE): IE_NAME = 'safari' IE_DESC = 'safaribooksonline.com online video' _VALID_URL = r'''(?x) https?:// (?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/ (?: library/view/[^/]+/(?P<course_id>[^/]+)/(?P<part>[^/?\#&]+)\.html| videos/[^/]+/[^/]+/(?P<reference_id>[^-]+-[^/?\#&]+) ) ''' _TESTS = [{ 'url': 'https://www.safaribooksonline.com/library/view/hadoop-fundamentals-livelessons/9780133392838/part00.html', 'md5': 'dcc5a425e79f2564148652616af1f2a3', 'info_dict': { 'id': '0_qbqx90ic', 'ext': 'mp4', 'title': 'Introduction to Hadoop Fundamentals LiveLessons', 'timestamp': 1437758058, 'upload_date': '20150724', 'uploader_id': 'stork', }, }, { # non-digits in course id 'url': 'https://www.safaribooksonline.com/library/view/create-a-nodejs/100000006A0210/part00.html', 'only_matching': True, }, { 'url': 'https://www.safaribooksonline.com/library/view/learning-path-red/9780134664057/RHCE_Introduction.html', 'only_matching': True, }, { 'url': 'https://www.safaribooksonline.com/videos/python-programming-language/9780134217314/9780134217314-PYMC_13_00', 'only_matching': True, }, { 'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838/9780133392838-00_SeriesIntro', 'only_matching': True, }, { 'url': 'https://www.oreilly.com/library/view/hadoop-fundamentals-livelessons/9780133392838/00_SeriesIntro.html', 'only_matching': True, }] _PARTNER_ID = '1926081' _UICONF_ID = '29375172' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) reference_id = mobj.group('reference_id') if reference_id: video_id = reference_id partner_id = self._PARTNER_ID ui_id = self._UICONF_ID else: video_id = '%s-%s' % (mobj.group('course_id'), mobj.group('part')) webpage, urlh = self._download_webpage_handle(url, video_id) mobj = re.match(self._VALID_URL, urlh.geturl()) reference_id = mobj.group('reference_id') if not reference_id: reference_id = self._search_regex( r'data-reference-id=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'kaltura reference id', group='id') partner_id = self._search_regex( r'data-partner-id=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'kaltura widget id', default=self._PARTNER_ID, group='id') ui_id = self._search_regex( r'data-ui-id=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'kaltura uiconf id', default=self._UICONF_ID, group='id') query = { 'wid': '_%s' % partner_id, 'uiconf_id': ui_id, 'flashvars[referenceId]': reference_id, } if self.LOGGED_IN: kaltura_session = self._download_json( '%s/player/kaltura_session/?reference_id=%s' % (self._API_BASE, reference_id), video_id, 'Downloading kaltura session JSON', 'Unable to download kaltura session JSON', fatal=False, headers={'Accept': 'application/json'}) if kaltura_session: session = kaltura_session.get('session') if session: query['flashvars[ks]'] = session return self.url_result(update_url_query( 'https://cdnapisec.kaltura.com/html5/html5lib/v2.37.1/mwEmbedFrame.php', query), 'Kaltura') class SafariApiIE(SafariBaseIE): IE_NAME = 'safari:api' _VALID_URL = r'https?://(?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/api/v1/book/(?P<course_id>[^/]+)/chapter(?:-content)?/(?P<part>[^/?#&]+)\.html' _TESTS = [{ 'url': 'https://www.safaribooksonline.com/api/v1/book/9780133392838/chapter/part00.html', 'only_matching': True, }, { 'url': 'https://www.safaribooksonline.com/api/v1/book/9780134664057/chapter/RHCE_Introduction.html', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) part = self._download_json( url, '%s/%s' % (mobj.group('course_id'), mobj.group('part')), 'Downloading part JSON') return self.url_result(part['web_url'], SafariIE.ie_key()) class SafariCourseIE(SafariBaseIE): IE_NAME = 'safari:course' IE_DESC = 'safaribooksonline.com online courses' _VALID_URL = r'''(?x) https?:// (?: (?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/ (?: library/view/[^/]+| api/v1/book| videos/[^/]+ )| techbus\.safaribooksonline\.com ) /(?P<id>[^/]+) ''' _TESTS = [{ 'url': 'https://www.safaribooksonline.com/library/view/hadoop-fundamentals-livelessons/9780133392838/', 'info_dict': { 'id': '9780133392838', 'title': 'Hadoop Fundamentals LiveLessons', }, 'playlist_count': 22, 'skip': 'Requires safaribooksonline account credentials', }, { 'url': 'https://www.safaribooksonline.com/api/v1/book/9781449396459/?override_format=json', 'only_matching': True, }, { 'url': 'http://techbus.safaribooksonline.com/9780134426365', 'only_matching': True, }, { 'url': 'https://www.safaribooksonline.com/videos/python-programming-language/9780134217314', 'only_matching': True, }, { 'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838', 'only_matching': True, }, { 'url': 'https://www.oreilly.com/library/view/hadoop-fundamentals-livelessons/9780133392838/', 'only_matching': True, }] @classmethod def suitable(cls, url): return (False if SafariIE.suitable(url) or SafariApiIE.suitable(url) else super(SafariCourseIE, cls).suitable(url)) def _real_extract(self, url): course_id = self._match_id(url) course_json = self._download_json( '%s/book/%s/?override_format=%s' % (self._API_BASE, course_id, self._API_FORMAT), course_id, 'Downloading course JSON') if 'chapters' not in course_json: raise ExtractorError( 'No chapters found for course %s' % course_id, expected=True) entries = [ self.url_result(chapter, SafariApiIE.ie_key()) for chapter in course_json['chapters']] course_title = course_json['title'] return self.playlist_result(entries, course_id, course_title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/folketinget.py
youtube_dl/extractor/folketinget.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_parse_qs from ..utils import ( int_or_none, parse_duration, parse_iso8601, xpath_text, ) class FolketingetIE(InfoExtractor): IE_DESC = 'Folketinget (ft.dk; Danish parliament)' _VALID_URL = r'https?://(?:www\.)?ft\.dk/webtv/video/[^?#]*?\.(?P<id>[0-9]+)\.aspx' _TEST = { 'url': 'http://www.ft.dk/webtv/video/20141/eru/td.1165642.aspx?as=1#player', 'md5': '6269e8626fa1a891bf5369b386ae996a', 'info_dict': { 'id': '1165642', 'ext': 'mp4', 'title': 'Åbent samråd i Erhvervsudvalget', 'description': 'Åbent samråd med erhvervs- og vækstministeren om regeringens politik på teleområdet', 'view_count': int, 'width': 768, 'height': 432, 'tbr': 928000, 'timestamp': 1416493800, 'upload_date': '20141120', 'duration': 3960, }, 'params': { # rtmp download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage) description = self._html_search_regex( r'(?s)<div class="video-item-agenda"[^>]*>(.*?)<', webpage, 'description', fatal=False) player_params = compat_parse_qs(self._search_regex( r'<embed src="http://ft\.arkena\.tv/flash/ftplayer\.swf\?([^"]+)"', webpage, 'player params')) xml_url = player_params['xml'][0] doc = self._download_xml(xml_url, video_id) timestamp = parse_iso8601(xpath_text(doc, './/date')) duration = parse_duration(xpath_text(doc, './/duration')) width = int_or_none(xpath_text(doc, './/width')) height = int_or_none(xpath_text(doc, './/height')) view_count = int_or_none(xpath_text(doc, './/views')) formats = [{ 'format_id': n.attrib['bitrate'], 'url': xpath_text(n, './url', fatal=True), 'tbr': int_or_none(n.attrib['bitrate']), } for n in doc.findall('.//streams/stream')] self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'description': description, 'timestamp': timestamp, 'width': width, 'height': height, 'duration': duration, 'view_count': view_count, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/spreaker.py
youtube_dl/extractor/spreaker.py
# coding: utf-8 from __future__ import unicode_literals import itertools from .common import InfoExtractor from ..compat import compat_str from ..utils import ( float_or_none, int_or_none, str_or_none, try_get, unified_timestamp, url_or_none, ) def _extract_episode(data, episode_id=None): title = data['title'] download_url = data['download_url'] series = try_get(data, lambda x: x['show']['title'], compat_str) uploader = try_get(data, lambda x: x['author']['fullname'], compat_str) thumbnails = [] for image in ('image_original', 'image_medium', 'image'): image_url = url_or_none(data.get('%s_url' % image)) if image_url: thumbnails.append({'url': image_url}) def stats(key): return int_or_none(try_get( data, (lambda x: x['%ss_count' % key], lambda x: x['stats']['%ss' % key]))) def duration(key): return float_or_none(data.get(key), scale=1000) return { 'id': compat_str(episode_id or data['episode_id']), 'url': download_url, 'display_id': data.get('permalink'), 'title': title, 'description': data.get('description'), 'timestamp': unified_timestamp(data.get('published_at')), 'uploader': uploader, 'uploader_id': str_or_none(data.get('author_id')), 'creator': uploader, 'duration': duration('duration') or duration('length'), 'view_count': stats('play'), 'like_count': stats('like'), 'comment_count': stats('message'), 'format': 'MPEG Layer 3', 'format_id': 'mp3', 'container': 'mp3', 'ext': 'mp3', 'thumbnails': thumbnails, 'series': series, 'extractor_key': SpreakerIE.ie_key(), } class SpreakerIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// api\.spreaker\.com/ (?: (?:download/)?episode| v2/episodes )/ (?P<id>\d+) ''' _TESTS = [{ 'url': 'https://api.spreaker.com/episode/12534508', 'info_dict': { 'id': '12534508', 'display_id': 'swm-ep15-how-to-market-your-music-part-2', 'ext': 'mp3', 'title': 'EP:15 | Music Marketing (Likes) - Part 2', 'description': 'md5:0588c43e27be46423e183076fa071177', 'timestamp': 1502250336, 'upload_date': '20170809', 'uploader': 'SWM', 'uploader_id': '9780658', 'duration': 1063.42, 'view_count': int, 'like_count': int, 'comment_count': int, 'series': 'Success With Music (SWM)', }, }, { 'url': 'https://api.spreaker.com/download/episode/12534508/swm_ep15_how_to_market_your_music_part_2.mp3', 'only_matching': True, }, { 'url': 'https://api.spreaker.com/v2/episodes/12534508?export=episode_segments', 'only_matching': True, }] def _real_extract(self, url): episode_id = self._match_id(url) data = self._download_json( 'https://api.spreaker.com/v2/episodes/%s' % episode_id, episode_id)['response']['episode'] return _extract_episode(data, episode_id) class SpreakerPageIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?spreaker\.com/user/[^/]+/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.spreaker.com/user/9780658/swm-ep15-how-to-market-your-music-part-2', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) episode_id = self._search_regex( (r'data-episode_id=["\'](?P<id>\d+)', r'episode_id\s*:\s*(?P<id>\d+)'), webpage, 'episode id') return self.url_result( 'https://api.spreaker.com/episode/%s' % episode_id, ie=SpreakerIE.ie_key(), video_id=episode_id) class SpreakerShowIE(InfoExtractor): _VALID_URL = r'https?://api\.spreaker\.com/show/(?P<id>\d+)' _TESTS = [{ 'url': 'https://api.spreaker.com/show/4652058', 'info_dict': { 'id': '4652058', }, 'playlist_mincount': 118, }] def _entries(self, show_id): for page_num in itertools.count(1): episodes = self._download_json( 'https://api.spreaker.com/show/%s/episodes' % show_id, show_id, note='Downloading JSON page %d' % page_num, query={ 'page': page_num, 'max_per_page': 100, }) pager = try_get(episodes, lambda x: x['response']['pager'], dict) if not pager: break results = pager.get('results') if not results or not isinstance(results, list): break for result in results: if not isinstance(result, dict): continue yield _extract_episode(result) if page_num == pager.get('last_page'): break def _real_extract(self, url): show_id = self._match_id(url) return self.playlist_result(self._entries(show_id), playlist_id=show_id) class SpreakerShowPageIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?spreaker\.com/show/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.spreaker.com/show/success-with-music', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) show_id = self._search_regex( r'show_id\s*:\s*(?P<id>\d+)', webpage, 'show id') return self.url_result( 'https://api.spreaker.com/show/%s' % show_id, ie=SpreakerShowIE.ie_key(), video_id=show_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/roxwel.py
youtube_dl/extractor/roxwel.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import unified_strdate, determine_ext class RoxwelIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?roxwel\.com/player/(?P<filename>.+?)(\.|\?|$)' _TEST = { 'url': 'http://www.roxwel.com/player/passionpittakeawalklive.html', 'info_dict': { 'id': 'passionpittakeawalklive', 'ext': 'flv', 'title': 'Take A Walk (live)', 'uploader': 'Passion Pit', 'uploader_id': 'passionpit', 'upload_date': '20120928', 'description': 'Passion Pit performs "Take A Walk\" live at The Backyard in Austin, Texas. ', }, 'params': { # rtmp download 'skip_download': True, } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) filename = mobj.group('filename') info_url = 'http://www.roxwel.com/api/videos/%s' % filename info = self._download_json(info_url, filename) rtmp_rates = sorted([int(r.replace('flv_', '')) for r in info['media_rates'] if r.startswith('flv_')]) best_rate = rtmp_rates[-1] url_page_url = 'http://roxwel.com/pl_one_time.php?filename=%s&quality=%s' % (filename, best_rate) rtmp_url = self._download_webpage(url_page_url, filename, 'Downloading video url') ext = determine_ext(rtmp_url) if ext == 'f4v': rtmp_url = rtmp_url.replace(filename, 'mp4:%s' % filename) return { 'id': filename, 'title': info['title'], 'url': rtmp_url, 'ext': 'flv', 'description': info['description'], 'thumbnail': info.get('player_image_url') or info.get('image_url_large'), 'uploader': info['artist'], 'uploader_id': info['artistname'], 'upload_date': unified_strdate(info['dbdate']), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/mdr.py
youtube_dl/extractor/mdr.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( determine_ext, int_or_none, parse_duration, parse_iso8601, url_or_none, xpath_text, ) class MDRIE(InfoExtractor): IE_DESC = 'MDR.DE and KiKA' _VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z-]+-?(?P<id>\d+)(?:_.+?)?\.html' _GEO_COUNTRIES = ['DE'] _TESTS = [{ # MDR regularly deletes its videos 'url': 'http://www.mdr.de/fakt/video189002.html', 'only_matching': True, }, { # audio 'url': 'http://www.mdr.de/kultur/audio1312272_zc-15948bad_zs-86171fdd.html', 'md5': '64c4ee50f0a791deb9479cd7bbe9d2fa', 'info_dict': { 'id': '1312272', 'ext': 'mp3', 'title': 'Feuilleton vom 30. Oktober 2015', 'duration': 250, 'uploader': 'MITTELDEUTSCHER RUNDFUNK', }, 'skip': '404 not found', }, { 'url': 'http://www.kika.de/baumhaus/videos/video19636.html', 'md5': '4930515e36b06c111213e80d1e4aad0e', 'info_dict': { 'id': '19636', 'ext': 'mp4', 'title': 'Baumhaus vom 30. Oktober 2015', 'duration': 134, 'uploader': 'KIKA', }, 'skip': '404 not found', }, { 'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/videos/video8182.html', 'md5': '5fe9c4dd7d71e3b238f04b8fdd588357', 'info_dict': { 'id': '8182', 'ext': 'mp4', 'title': 'Beutolomäus und der geheime Weihnachtswunsch', 'description': 'md5:b69d32d7b2c55cbe86945ab309d39bbd', 'timestamp': 1482541200, 'upload_date': '20161224', 'duration': 4628, 'uploader': 'KIKA', }, }, { # audio with alternative playerURL pattern 'url': 'http://www.mdr.de/kultur/videos-und-audios/audio-radio/operation-mindfuck-robert-wilson100.html', 'info_dict': { 'id': '100', 'ext': 'mp4', 'title': 'Feature: Operation Mindfuck - Robert Anton Wilson', 'duration': 3239, 'uploader': 'MITTELDEUTSCHER RUNDFUNK', }, }, { # empty bitrateVideo and bitrateAudio 'url': 'https://www.kika.de/filme/sendung128372_zc-572e3f45_zs-1d9fb70e.html', 'info_dict': { 'id': '128372', 'ext': 'mp4', 'title': 'Der kleine Wichtel kehrt zurück', 'description': 'md5:f77fafdff90f7aa1e9dca14f662c052a', 'duration': 4876, 'timestamp': 1607823300, 'upload_date': '20201213', 'uploader': 'ZDF', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.kika.de/baumhaus/sendungen/video19636_zc-fea7f8a0_zs-4bf89c60.html', 'only_matching': True, }, { 'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/einzelsendung2534.html', 'only_matching': True, }, { 'url': 'http://www.mdr.de/mediathek/mdr-videos/a/video-1334.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data_url = self._search_regex( r'(?:dataURL|playerXml(?:["\'])?)\s*:\s*(["\'])(?P<url>.+?-avCustom\.xml)\1', webpage, 'data url', group='url').replace(r'\/', '/') doc = self._download_xml( compat_urlparse.urljoin(url, data_url), video_id) title = xpath_text(doc, ['./title', './broadcast/broadcastName'], 'title', fatal=True) type_ = xpath_text(doc, './type', default=None) formats = [] processed_urls = [] for asset in doc.findall('./assets/asset'): for source in ( 'download', 'progressiveDownload', 'dynamicHttpStreamingRedirector', 'adaptiveHttpStreamingRedirector'): url_el = asset.find('./%sUrl' % source) if url_el is None: continue video_url = url_or_none(url_el.text) if not video_url or video_url in processed_urls: continue processed_urls.append(video_url) ext = determine_ext(video_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', preference=0, m3u8_id='HLS', fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, preference=0, f4m_id='HDS', fatal=False)) else: media_type = xpath_text(asset, './mediaType', 'media type', default='MP4') vbr = int_or_none(xpath_text(asset, './bitrateVideo', 'vbr'), 1000) abr = int_or_none(xpath_text(asset, './bitrateAudio', 'abr'), 1000) filesize = int_or_none(xpath_text(asset, './fileSize', 'file size')) format_id = [media_type] if vbr or abr: format_id.append(compat_str(vbr or abr)) f = { 'url': video_url, 'format_id': '-'.join(format_id), 'filesize': filesize, 'abr': abr, 'vbr': vbr, } if vbr: f.update({ 'width': int_or_none(xpath_text(asset, './frameWidth', 'width')), 'height': int_or_none(xpath_text(asset, './frameHeight', 'height')), }) if type_ == 'audio': f['vcodec'] = 'none' formats.append(f) self._sort_formats(formats) description = xpath_text(doc, './broadcast/broadcastDescription', 'description') timestamp = parse_iso8601( xpath_text( doc, [ './broadcast/broadcastDate', './broadcast/broadcastStartDate', './broadcast/broadcastEndDate'], 'timestamp', default=None)) duration = parse_duration(xpath_text(doc, './duration', 'duration')) uploader = xpath_text(doc, './rights', 'uploader') return { 'id': video_id, 'title': title, 'description': description, 'timestamp': timestamp, 'duration': duration, 'uploader': uploader, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ro220.py
youtube_dl/extractor/ro220.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urllib_parse_unquote class Ro220IE(InfoExtractor): IE_NAME = '220.ro' _VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<id>[^/]+)' _TEST = { 'url': 'http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/', 'md5': '03af18b73a07b4088753930db7a34add', 'info_dict': { 'id': 'LYV6doKo7f', 'ext': 'mp4', 'title': 'Luati-le Banii sez 4 ep 1', 'description': r're:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) url = compat_urllib_parse_unquote(self._search_regex( r'(?s)clip\s*:\s*{.*?url\s*:\s*\'([^\']+)\'', webpage, 'url')) title = self._og_search_title(webpage) description = self._og_search_description(webpage) thumbnail = self._og_search_thumbnail(webpage) formats = [{ 'format_id': 'sd', 'url': url, 'ext': 'mp4', }] return { 'id': video_id, 'formats': formats, 'title': title, 'description': description, 'thumbnail': thumbnail, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/airmozilla.py
youtube_dl/extractor/airmozilla.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, parse_iso8601, ) class AirMozillaIE(InfoExtractor): _VALID_URL = r'https?://air\.mozilla\.org/(?P<id>[0-9a-z-]+)/?' _TEST = { 'url': 'https://air.mozilla.org/privacy-lab-a-meetup-for-privacy-minded-people-in-san-francisco/', 'md5': '8d02f53ee39cf006009180e21df1f3ba', 'info_dict': { 'id': '6x4q2w', 'ext': 'mp4', 'title': 'Privacy Lab - a meetup for privacy minded people in San Francisco', 'thumbnail': r're:https?://.*/poster\.jpg', 'description': 'Brings together privacy professionals and others interested in privacy at for-profits, non-profits, and NGOs in an effort to contribute to the state of the ecosystem...', 'timestamp': 1422487800, 'upload_date': '20150128', 'location': 'SFO Commons', 'duration': 3780, 'view_count': int, 'categories': ['Main', 'Privacy'], } } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._html_search_regex(r'//vid\.ly/(.*?)/embed', webpage, 'id') embed_script = self._download_webpage('https://vid.ly/{0}/embed'.format(video_id), video_id) jwconfig = self._parse_json(self._search_regex( r'initCallback\((.*)\);', embed_script, 'metadata'), video_id)['config'] info_dict = self._parse_jwplayer_data(jwconfig, video_id) view_count = int_or_none(self._html_search_regex( r'Views since archived: ([0-9]+)', webpage, 'view count', fatal=False)) timestamp = parse_iso8601(self._html_search_regex( r'<time datetime="(.*?)"', webpage, 'timestamp', fatal=False)) duration = parse_duration(self._search_regex( r'Duration:\s*(\d+\s*hours?\s*\d+\s*minutes?)', webpage, 'duration', fatal=False)) info_dict.update({ 'id': video_id, 'title': self._og_search_title(webpage), 'url': self._og_search_url(webpage), 'display_id': display_id, 'description': self._og_search_description(webpage), 'timestamp': timestamp, 'location': self._html_search_regex(r'Location: (.*)', webpage, 'location', default=None), 'duration': duration, 'view_count': view_count, 'categories': re.findall(r'<a href=".*?" class="channel">(.*?)</a>', webpage), }) return info_dict
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/espn.py
youtube_dl/extractor/espn.py
from __future__ import unicode_literals import re from .common import InfoExtractor from .once import OnceIE from ..compat import compat_str from ..utils import ( determine_ext, int_or_none, unified_timestamp, ) class ESPNIE(OnceIE): _VALID_URL = r'''(?x) https?:// (?: (?: (?: (?:(?:\w+\.)+)?espn\.go| (?:www\.)?espn )\.com/ (?: (?: video/(?:clip|iframe/twitter)| watch/player ) (?: .*?\?.*?\bid=| /_/id/ )| [^/]+/video/ ) )| (?:www\.)espnfc\.(?:com|us)/(?:video/)?[^/]+/\d+/video/ ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'http://espn.go.com/video/clip?id=10365079', 'info_dict': { 'id': '10365079', 'ext': 'mp4', 'title': '30 for 30 Shorts: Judging Jewell', 'description': 'md5:39370c2e016cb4ecf498ffe75bef7f0f', 'timestamp': 1390936111, 'upload_date': '20140128', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://broadband.espn.go.com/video/clip?id=18910086', 'info_dict': { 'id': '18910086', 'ext': 'mp4', 'title': 'Kyrie spins around defender for two', 'description': 'md5:2b0f5bae9616d26fba8808350f0d2b9b', 'timestamp': 1489539155, 'upload_date': '20170315', }, 'params': { 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest'], }, { 'url': 'http://nonredline.sports.espn.go.com/video/clip?id=19744672', 'only_matching': True, }, { 'url': 'https://cdn.espn.go.com/video/clip/_/id/19771774', 'only_matching': True, }, { 'url': 'http://www.espn.com/watch/player?id=19141491', 'only_matching': True, }, { 'url': 'http://www.espn.com/watch/player?bucketId=257&id=19505875', 'only_matching': True, }, { 'url': 'http://www.espn.com/watch/player/_/id/19141491', 'only_matching': True, }, { 'url': 'http://www.espn.com/video/clip?id=10365079', 'only_matching': True, }, { 'url': 'http://www.espn.com/video/clip/_/id/17989860', 'only_matching': True, }, { 'url': 'https://espn.go.com/video/iframe/twitter/?cms=espn&id=10365079', 'only_matching': True, }, { 'url': 'http://www.espnfc.us/video/espn-fc-tv/86/video/3319154/nashville-unveiled-as-the-newest-club-in-mls', 'only_matching': True, }, { 'url': 'http://www.espnfc.com/english-premier-league/23/video/3324163/premier-league-in-90-seconds-golden-tweets', 'only_matching': True, }, { 'url': 'http://www.espn.com/espnw/video/26066627/arkansas-gibson-completes-hr-cycle-four-innings', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) clip = self._download_json( 'http://api-app.espn.com/v1/video/clips/%s' % video_id, video_id)['videos'][0] title = clip['headline'] format_urls = set() formats = [] def traverse_source(source, base_source_id=None): for source_id, source in source.items(): if source_id == 'alert': continue elif isinstance(source, compat_str): extract_source(source, base_source_id) elif isinstance(source, dict): traverse_source( source, '%s-%s' % (base_source_id, source_id) if base_source_id else source_id) def extract_source(source_url, source_id=None): if source_url in format_urls: return format_urls.add(source_url) ext = determine_ext(source_url) if OnceIE.suitable(source_url): formats.extend(self._extract_once_formats(source_url)) elif ext == 'smil': formats.extend(self._extract_smil_formats( source_url, video_id, fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( source_url, video_id, f4m_id=source_id, fatal=False)) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( source_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=source_id, fatal=False)) else: f = { 'url': source_url, 'format_id': source_id, } mobj = re.search(r'(\d+)p(\d+)_(\d+)k\.', source_url) if mobj: f.update({ 'height': int(mobj.group(1)), 'fps': int(mobj.group(2)), 'tbr': int(mobj.group(3)), }) if source_id == 'mezzanine': f['preference'] = 1 formats.append(f) links = clip.get('links', {}) traverse_source(links.get('source', {})) traverse_source(links.get('mobile', {})) self._sort_formats(formats) description = clip.get('caption') or clip.get('description') thumbnail = clip.get('thumbnail') duration = int_or_none(clip.get('duration')) timestamp = unified_timestamp(clip.get('originalPublishDate')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, 'formats': formats, } class ESPNArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:espn\.go|(?:www\.)?espn)\.com/(?:[^/]+/)*(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://espn.go.com/nba/recap?gameId=400793786', 'only_matching': True, }, { 'url': 'http://espn.go.com/blog/golden-state-warriors/post/_/id/593/how-warriors-rapidly-regained-a-winning-edge', 'only_matching': True, }, { 'url': 'http://espn.go.com/sports/endurance/story/_/id/12893522/dzhokhar-tsarnaev-sentenced-role-boston-marathon-bombings', 'only_matching': True, }, { 'url': 'http://espn.go.com/nba/playoffs/2015/story/_/id/12887571/john-wall-washington-wizards-no-swelling-left-hand-wrist-game-5-return', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if ESPNIE.suitable(url) else super(ESPNArticleIE, cls).suitable(url) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_id = self._search_regex( r'class=(["\']).*?video-play-button.*?\1[^>]+data-id=["\'](?P<id>\d+)', webpage, 'video id', group='id') return self.url_result( 'http://espn.go.com/video/clip?id=%s' % video_id, ESPNIE.ie_key()) class FiveThirtyEightIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?fivethirtyeight\.com/features/(?P<id>[^/?#]+)' _TEST = { 'url': 'http://fivethirtyeight.com/features/how-the-6-8-raiders-can-still-make-the-playoffs/', 'info_dict': { 'id': '56032156', 'ext': 'flv', 'title': 'FiveThirtyEight: The Raiders can still make the playoffs', 'description': 'Neil Paine breaks down the simplest scenario that will put the Raiders into the playoffs at 8-8.', }, 'params': { 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) embed_url = self._search_regex( r'<iframe[^>]+src=["\'](https?://fivethirtyeight\.abcnews\.go\.com/video/embed/\d+/\d+)', webpage, 'embed url') return self.url_result(embed_url, 'AbcNewsVideo')
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/jamendo.py
youtube_dl/extractor/jamendo.py
# coding: utf-8 from __future__ import unicode_literals import hashlib import random from ..compat import compat_str from .common import InfoExtractor from ..utils import ( clean_html, int_or_none, try_get, ) class JamendoIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?: licensing\.jamendo\.com/[^/]+| (?:www\.)?jamendo\.com ) /track/(?P<id>[0-9]+)(?:/(?P<display_id>[^/?#&]+))? ''' _TESTS = [{ 'url': 'https://www.jamendo.com/track/196219/stories-from-emona-i', 'md5': '6e9e82ed6db98678f171c25a8ed09ffd', 'info_dict': { 'id': '196219', 'display_id': 'stories-from-emona-i', 'ext': 'flac', # 'title': 'Maya Filipič - Stories from Emona I', 'title': 'Stories from Emona I', # 'artist': 'Maya Filipič', 'track': 'Stories from Emona I', 'duration': 210, 'thumbnail': r're:^https?://.*\.jpg', 'timestamp': 1217438117, 'upload_date': '20080730', 'license': 'by-nc-nd', 'view_count': int, 'like_count': int, 'average_rating': int, 'tags': ['piano', 'peaceful', 'newage', 'strings', 'upbeat'], } }, { 'url': 'https://licensing.jamendo.com/en/track/1496667/energetic-rock', 'only_matching': True, }] def _call_api(self, resource, resource_id): path = '/api/%ss' % resource rand = compat_str(random.random()) return self._download_json( 'https://www.jamendo.com' + path, resource_id, query={ 'id[]': resource_id, }, headers={ 'X-Jam-Call': '$%s*%s~' % (hashlib.sha1((path + rand).encode()).hexdigest(), rand) })[0] def _real_extract(self, url): track_id, display_id = self._VALID_URL_RE.match(url).groups() # webpage = self._download_webpage( # 'https://www.jamendo.com/track/' + track_id, track_id) # models = self._parse_json(self._html_search_regex( # r"data-bundled-models='([^']+)", # webpage, 'bundled models'), track_id) # track = models['track']['models'][0] track = self._call_api('track', track_id) title = track_name = track['name'] # get_model = lambda x: try_get(models, lambda y: y[x]['models'][0], dict) or {} # artist = get_model('artist') # artist_name = artist.get('name') # if artist_name: # title = '%s - %s' % (artist_name, title) # album = get_model('album') formats = [{ 'url': 'https://%s.jamendo.com/?trackid=%s&format=%s&from=app-97dab294' % (sub_domain, track_id, format_id), 'format_id': format_id, 'ext': ext, 'quality': quality, } for quality, (format_id, sub_domain, ext) in enumerate(( ('mp31', 'mp3l', 'mp3'), ('mp32', 'mp3d', 'mp3'), ('ogg1', 'ogg', 'ogg'), ('flac', 'flac', 'flac'), ))] self._sort_formats(formats) urls = [] thumbnails = [] for covers in (track.get('cover') or {}).values(): for cover_id, cover_url in covers.items(): if not cover_url or cover_url in urls: continue urls.append(cover_url) size = int_or_none(cover_id.lstrip('size')) thumbnails.append({ 'id': cover_id, 'url': cover_url, 'width': size, 'height': size, }) tags = [] for tag in (track.get('tags') or []): tag_name = tag.get('name') if not tag_name: continue tags.append(tag_name) stats = track.get('stats') or {} license = track.get('licenseCC') or [] return { 'id': track_id, 'display_id': display_id, 'thumbnails': thumbnails, 'title': title, 'description': track.get('description'), 'duration': int_or_none(track.get('duration')), # 'artist': artist_name, 'track': track_name, # 'album': album.get('name'), 'formats': formats, 'license': '-'.join(license) if license else None, 'timestamp': int_or_none(track.get('dateCreated')), 'view_count': int_or_none(stats.get('listenedAll')), 'like_count': int_or_none(stats.get('favorited')), 'average_rating': int_or_none(stats.get('averageNote')), 'tags': tags, } class JamendoAlbumIE(JamendoIE): _VALID_URL = r'https?://(?:www\.)?jamendo\.com/album/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.jamendo.com/album/121486/duck-on-cover', 'info_dict': { 'id': '121486', 'title': 'Duck On Cover', 'description': 'md5:c2920eaeef07d7af5b96d7c64daf1239', }, 'playlist': [{ 'md5': 'e1a2fcb42bda30dfac990212924149a8', 'info_dict': { 'id': '1032333', 'ext': 'flac', 'title': 'Shearer - Warmachine', 'artist': 'Shearer', 'track': 'Warmachine', 'timestamp': 1368089771, 'upload_date': '20130509', } }, { 'md5': '1f358d7b2f98edfe90fd55dac0799d50', 'info_dict': { 'id': '1032330', 'ext': 'flac', 'title': 'Shearer - Without Your Ghost', 'artist': 'Shearer', 'track': 'Without Your Ghost', 'timestamp': 1368089771, 'upload_date': '20130509', } }], 'params': { 'playlistend': 2 } }] def _real_extract(self, url): album_id = self._match_id(url) album = self._call_api('album', album_id) album_name = album.get('name') entries = [] for track in (album.get('tracks') or []): track_id = track.get('id') if not track_id: continue track_id = compat_str(track_id) entries.append({ '_type': 'url_transparent', 'url': 'https://www.jamendo.com/track/' + track_id, 'ie_key': JamendoIE.ie_key(), 'id': track_id, 'album': album_name, }) return self.playlist_result( entries, album_id, album_name, clean_html(try_get(album, lambda x: x['description']['en'], compat_str)))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ku6.py
youtube_dl/extractor/ku6.py
from __future__ import unicode_literals from .common import InfoExtractor class Ku6IE(InfoExtractor): _VALID_URL = r'https?://v\.ku6\.com/show/(?P<id>[a-zA-Z0-9\-\_]+)(?:\.)*html' _TEST = { 'url': 'http://v.ku6.com/show/JG-8yS14xzBr4bCn1pu0xw...html', 'md5': '01203549b9efbb45f4b87d55bdea1ed1', 'info_dict': { 'id': 'JG-8yS14xzBr4bCn1pu0xw', 'ext': 'f4v', 'title': 'techniques test', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<h1 title=.*>(.*?)</h1>', webpage, 'title') dataUrl = 'http://v.ku6.com/fetchVideo4Player/%s.html' % video_id jsonData = self._download_json(dataUrl, video_id) downloadUrl = jsonData['data']['f'] return { 'id': video_id, 'title': title, 'url': downloadUrl }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/outsidetv.py
youtube_dl/extractor/outsidetv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class OutsideTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?outsidetv\.com/(?:[^/]+/)*?play/[a-zA-Z0-9]{8}/\d+/\d+/(?P<id>[a-zA-Z0-9]{8})' _TESTS = [{ 'url': 'http://www.outsidetv.com/category/snow/play/ZjQYboH6/1/10/Hdg0jukV/4', 'md5': '192d968fedc10b2f70ec31865ffba0da', 'info_dict': { 'id': 'Hdg0jukV', 'ext': 'mp4', 'title': 'Home - Jackson Ep 1 | Arbor Snowboards', 'description': 'md5:41a12e94f3db3ca253b04bb1e8d8f4cd', 'upload_date': '20181225', 'timestamp': 1545742800, } }, { 'url': 'http://www.outsidetv.com/home/play/ZjQYboH6/1/10/Hdg0jukV/4', 'only_matching': True, }] def _real_extract(self, url): jw_media_id = self._match_id(url) return self.url_result( 'jwplatform:' + jw_media_id, 'JWPlatform', jw_media_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/xbef.py
youtube_dl/extractor/xbef.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urllib_parse_unquote class XBefIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?xbef\.com/video/(?P<id>[0-9]+)' _TEST = { 'url': 'http://xbef.com/video/5119-glamourous-lesbians-smoking-drinking-and-fucking', 'md5': 'a478b565baff61634a98f5e5338be995', 'info_dict': { 'id': '5119', 'ext': 'mp4', 'title': 'md5:7358a9faef8b7b57acda7c04816f170e', 'age_limit': 18, 'thumbnail': r're:^http://.*\.jpg', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<h1[^>]*>(.*?)</h1>', webpage, 'title') config_url_enc = self._download_webpage( 'http://xbef.com/Main/GetVideoURLEncoded/%s' % video_id, video_id, note='Retrieving config URL') config_url = compat_urllib_parse_unquote(config_url_enc) config = self._download_xml( config_url, video_id, note='Retrieving config') video_url = config.find('./file').text thumbnail = config.find('./image').text return { 'id': video_id, 'url': video_url, 'title': title, 'thumbnail': thumbnail, 'age_limit': 18, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/scrippsnetworks.py
youtube_dl/extractor/scrippsnetworks.py
# coding: utf-8 from __future__ import unicode_literals import json import hashlib import re from .aws import AWSIE from .anvato import AnvatoIE from .common import InfoExtractor from ..utils import ( smuggle_url, urlencode_postdata, xpath_text, ) class ScrippsNetworksWatchIE(AWSIE): IE_NAME = 'scrippsnetworks:watch' _VALID_URL = r'''(?x) https?:// watch\. (?P<site>geniuskitchen)\.com/ (?: player\.[A-Z0-9]+\.html\#| show/(?:[^/]+/){2}| player/ ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'http://watch.geniuskitchen.com/player/3787617/Ample-Hills-Ice-Cream-Bike/', 'info_dict': { 'id': '4194875', 'ext': 'mp4', 'title': 'Ample Hills Ice Cream Bike', 'description': 'Courtney Rada churns up a signature GK Now ice cream with The Scoopmaster.', 'uploader': 'ANV', 'upload_date': '20171011', 'timestamp': 1507698000, }, 'params': { 'skip_download': True, }, 'add_ie': [AnvatoIE.ie_key()], }] _SNI_TABLE = { 'geniuskitchen': 'genius', } _AWS_API_KEY = 'E7wSQmq0qK6xPrF13WmzKiHo4BQ7tip4pQcSXVl1' _AWS_PROXY_HOST = 'web.api.video.snidigital.com' _AWS_USER_AGENT = 'aws-sdk-js/2.80.0 callback' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) site_id, video_id = mobj.group('site', 'id') aws_identity_id_json = json.dumps({ 'IdentityId': '%s:7655847c-0ae7-4d9b-80d6-56c062927eb3' % self._AWS_REGION }).encode('utf-8') token = self._download_json( 'https://cognito-identity.%s.amazonaws.com/' % self._AWS_REGION, video_id, data=aws_identity_id_json, headers={ 'Accept': '*/*', 'Content-Type': 'application/x-amz-json-1.1', 'Referer': url, 'X-Amz-Content-Sha256': hashlib.sha256(aws_identity_id_json).hexdigest(), 'X-Amz-Target': 'AWSCognitoIdentityService.GetOpenIdToken', 'X-Amz-User-Agent': self._AWS_USER_AGENT, })['Token'] sts = self._download_xml( 'https://sts.amazonaws.com/', video_id, data=urlencode_postdata({ 'Action': 'AssumeRoleWithWebIdentity', 'RoleArn': 'arn:aws:iam::710330595350:role/Cognito_WebAPIUnauth_Role', 'RoleSessionName': 'web-identity', 'Version': '2011-06-15', 'WebIdentityToken': token, }), headers={ 'Referer': url, 'X-Amz-User-Agent': self._AWS_USER_AGENT, 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8', }) def get(key): return xpath_text( sts, './/{https://sts.amazonaws.com/doc/2011-06-15/}%s' % key, fatal=True) mcp_id = self._aws_execute_api({ 'uri': '/1/web/brands/%s/episodes/scrid/%s' % (self._SNI_TABLE[site_id], video_id), 'access_key': get('AccessKeyId'), 'secret_key': get('SecretAccessKey'), 'session_token': get('SessionToken'), }, video_id)['results'][0]['mcpId'] return self.url_result( smuggle_url( 'anvato:anvato_scripps_app_web_prod_0837996dbe373629133857ae9eb72e740424d80a:%s' % mcp_id, {'geo_countries': ['US']}), AnvatoIE.ie_key(), video_id=mcp_id) class ScrippsNetworksIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?P<site>cookingchanneltv|discovery|(?:diy|food)network|hgtv|travelchannel)\.com/videos/[0-9a-z-]+-(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.cookingchanneltv.com/videos/the-best-of-the-best-0260338', 'info_dict': { 'id': '0260338', 'ext': 'mp4', 'title': 'The Best of the Best', 'description': 'Catch a new episode of MasterChef Canada Tuedsay at 9/8c.', 'timestamp': 1475678834, 'upload_date': '20161005', 'uploader': 'SCNI-SCND', }, 'add_ie': ['ThePlatform'], }, { 'url': 'https://www.diynetwork.com/videos/diy-barnwood-tablet-stand-0265790', 'only_matching': True, }, { 'url': 'https://www.foodnetwork.com/videos/chocolate-strawberry-cake-roll-7524591', 'only_matching': True, }, { 'url': 'https://www.hgtv.com/videos/cookie-decorating-101-0301929', 'only_matching': True, }, { 'url': 'https://www.travelchannel.com/videos/two-climates-one-bag-5302184', 'only_matching': True, }, { 'url': 'https://www.discovery.com/videos/guardians-of-the-glades-cooking-with-tom-cobb-5578368', 'only_matching': True, }] _ACCOUNT_MAP = { 'cookingchanneltv': 2433005105, 'discovery': 2706091867, 'diynetwork': 2433004575, 'foodnetwork': 2433005105, 'hgtv': 2433004575, 'travelchannel': 2433005739, } _TP_TEMPL = 'https://link.theplatform.com/s/ip77QC/media/guid/%d/%s?mbr=true' def _real_extract(self, url): site, guid = re.match(self._VALID_URL, url).groups() return self.url_result(smuggle_url( self._TP_TEMPL % (self._ACCOUNT_MAP[site], guid), {'force_smil_url': True}), 'ThePlatform', guid)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/clippit.py
youtube_dl/extractor/clippit.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( parse_iso8601, qualities, ) import re class ClippitIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?clippituser\.tv/c/(?P<id>[a-z]+)' _TEST = { 'url': 'https://www.clippituser.tv/c/evmgm', 'md5': '963ae7a59a2ec4572ab8bf2f2d2c5f09', 'info_dict': { 'id': 'evmgm', 'ext': 'mp4', 'title': 'Bye bye Brutus. #BattleBots - Clippit', 'uploader': 'lizllove', 'uploader_url': 'https://www.clippituser.tv/p/lizllove', 'timestamp': 1472183818, 'upload_date': '20160826', 'description': 'BattleBots | ABC', 'thumbnail': r're:^https?://.*\.jpg$', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex(r'<title.*>(.+?)</title>', webpage, 'title') FORMATS = ('sd', 'hd') quality = qualities(FORMATS) formats = [] for format_id in FORMATS: url = self._html_search_regex(r'data-%s-file="(.+?)"' % format_id, webpage, 'url', fatal=False) if not url: continue match = re.search(r'/(?P<height>\d+)\.mp4', url) formats.append({ 'url': url, 'format_id': format_id, 'quality': quality(format_id), 'height': int(match.group('height')) if match else None, }) uploader = self._html_search_regex(r'class="username".*>\s+(.+?)\n', webpage, 'uploader', fatal=False) uploader_url = ('https://www.clippituser.tv/p/' + uploader if uploader else None) timestamp = self._html_search_regex(r'datetime="(.+?)"', webpage, 'date', fatal=False) thumbnail = self._html_search_regex(r'data-image="(.+?)"', webpage, 'thumbnail', fatal=False) return { 'id': video_id, 'title': title, 'formats': formats, 'uploader': uploader, 'uploader_url': uploader_url, 'timestamp': parse_iso8601(timestamp), 'description': self._og_search_description(webpage), 'thumbnail': thumbnail, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/abc.py
youtube_dl/extractor/abc.py
from __future__ import unicode_literals import hashlib import hmac import re import time from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, js_to_json, int_or_none, parse_iso8601, try_get, unescapeHTML, update_url_query, ) class ABCIE(InfoExtractor): IE_NAME = 'abc.net.au' _VALID_URL = r'https?://(?:www\.)?abc\.net\.au/news/(?:[^/]+/){1,2}(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334', 'md5': 'cb3dd03b18455a661071ee1e28344d9f', 'info_dict': { 'id': '5868334', 'ext': 'mp4', 'title': 'Australia to help staff Ebola treatment centre in Sierra Leone', 'description': 'md5:809ad29c67a05f54eb41f2a105693a67', }, 'skip': 'this video has expired', }, { 'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326', 'md5': 'db2a5369238b51f9811ad815b69dc086', 'info_dict': { 'id': 'NvqvPeNZsHU', 'ext': 'mp4', 'upload_date': '20150816', 'uploader': 'ABC News (Australia)', 'description': 'Government backbencher Warren Entsch introduces a cross-party sponsored bill to legalise same-sex marriage, saying the bill is designed to promote "an inclusive Australia, not a divided one.". Read more here: http://ab.co/1Mwc6ef', 'uploader_id': 'NewsOnABC', 'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill', }, 'add_ie': ['Youtube'], 'skip': 'Not accessible from Travis CI server', }, { 'url': 'http://www.abc.net.au/news/2015-10-23/nab-lifts-interest-rates-following-westpac-and-cba/6880080', 'md5': 'b96eee7c9edf4fc5a358a0252881cc1f', 'info_dict': { 'id': '6880080', 'ext': 'mp3', 'title': 'NAB lifts interest rates, following Westpac and CBA', 'description': 'md5:f13d8edc81e462fce4a0437c7dc04728', }, }, { 'url': 'http://www.abc.net.au/news/2015-10-19/6866214', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) mobj = re.search( r'inline(?P<type>Video|Audio|YouTube)Data\.push\((?P<json_data>[^)]+)\);', webpage) if mobj is None: expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None) if expired: raise ExtractorError('%s said: %s' % (self.IE_NAME, expired), expected=True) raise ExtractorError('Unable to extract video urls') urls_info = self._parse_json( mobj.group('json_data'), video_id, transform_source=js_to_json) if not isinstance(urls_info, list): urls_info = [urls_info] if mobj.group('type') == 'YouTube': return self.playlist_result([ self.url_result(url_info['url']) for url_info in urls_info]) formats = [{ 'url': url_info['url'], 'vcodec': url_info.get('codec') if mobj.group('type') == 'Video' else 'none', 'width': int_or_none(url_info.get('width')), 'height': int_or_none(url_info.get('height')), 'tbr': int_or_none(url_info.get('bitrate')), 'filesize': int_or_none(url_info.get('filesize')), } for url_info in urls_info] self._sort_formats(formats) return { 'id': video_id, 'title': self._og_search_title(webpage), 'formats': formats, 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), } class ABCIViewIE(InfoExtractor): IE_NAME = 'abc.net.au:iview' _VALID_URL = r'https?://iview\.abc\.net\.au/(?:[^/]+/)*video/(?P<id>[^/?#]+)' _GEO_COUNTRIES = ['AU'] # ABC iview programs are normally available for 14 days only. _TESTS = [{ 'url': 'https://iview.abc.net.au/show/gruen/series/11/video/LE1927H001S00', 'md5': '67715ce3c78426b11ba167d875ac6abf', 'info_dict': { 'id': 'LE1927H001S00', 'ext': 'mp4', 'title': "Series 11 Ep 1", 'series': "Gruen", 'description': 'md5:52cc744ad35045baf6aded2ce7287f67', 'upload_date': '20190925', 'uploader_id': 'abc1', 'timestamp': 1569445289, }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) video_params = self._download_json( 'https://iview.abc.net.au/api/programs/' + video_id, video_id) title = unescapeHTML(video_params.get('title') or video_params['seriesTitle']) stream = next(s for s in video_params['playlist'] if s.get('type') in ('program', 'livestream')) house_number = video_params.get('episodeHouseNumber') or video_id path = '/auth/hls/sign?ts={0}&hn={1}&d=android-tablet'.format( int(time.time()), house_number) sig = hmac.new( b'android.content.res.Resources', path.encode('utf-8'), hashlib.sha256).hexdigest() token = self._download_webpage( 'http://iview.abc.net.au{0}&sig={1}'.format(path, sig), video_id) def tokenize_url(url, token): return update_url_query(url, { 'hdnea': token, }) for sd in ('720', 'sd', 'sd-low'): sd_url = try_get( stream, lambda x: x['streams']['hls'][sd], compat_str) if not sd_url: continue formats = self._extract_m3u8_formats( tokenize_url(sd_url, token), video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) if formats: break self._sort_formats(formats) subtitles = {} src_vtt = stream.get('captions', {}).get('src-vtt') if src_vtt: subtitles['en'] = [{ 'url': src_vtt, 'ext': 'vtt', }] is_live = video_params.get('livestream') == '1' if is_live: title = self._live_title(title) return { 'id': video_id, 'title': title, 'description': video_params.get('description'), 'thumbnail': video_params.get('thumbnail'), 'duration': int_or_none(video_params.get('eventDuration')), 'timestamp': parse_iso8601(video_params.get('pubDate'), ' '), 'series': unescapeHTML(video_params.get('seriesTitle')), 'series_id': video_params.get('seriesHouseNumber') or video_id[:7], 'season_number': int_or_none(self._search_regex( r'\bSeries\s+(\d+)\b', title, 'season number', default=None)), 'episode_number': int_or_none(self._search_regex( r'\bEp\s+(\d+)\b', title, 'episode number', default=None)), 'episode_id': house_number, 'uploader_id': video_params.get('channel'), 'formats': formats, 'subtitles': subtitles, 'is_live': is_live, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/digg.py
youtube_dl/extractor/digg.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import js_to_json class DiggIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?digg\.com/video/(?P<id>[^/?#&]+)' _TESTS = [{ # JWPlatform via provider 'url': 'http://digg.com/video/sci-fi-short-jonah-daniel-kaluuya-get-out', 'info_dict': { 'id': 'LcqvmS0b', 'ext': 'mp4', 'title': "'Get Out' Star Daniel Kaluuya Goes On 'Moby Dick'-Like Journey In Sci-Fi Short 'Jonah'", 'description': 'md5:541bb847648b6ee3d6514bc84b82efda', 'upload_date': '20180109', 'timestamp': 1515530551, }, 'params': { 'skip_download': True, }, }, { # Youtube via provider 'url': 'http://digg.com/video/dog-boat-seal-play', 'only_matching': True, }, { # vimeo as regular embed 'url': 'http://digg.com/video/dream-girl-short-film', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) info = self._parse_json( self._search_regex( r'(?s)video_info\s*=\s*({.+?});\n', webpage, 'video info', default='{}'), display_id, transform_source=js_to_json, fatal=False) video_id = info.get('video_id') if video_id: provider = info.get('provider_name') if provider == 'youtube': return self.url_result( video_id, ie='Youtube', video_id=video_id) elif provider == 'jwplayer': return self.url_result( 'jwplatform:%s' % video_id, ie='JWPlatform', video_id=video_id) return self.url_result(url, 'Generic')
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/aparat.py
youtube_dl/extractor/aparat.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( get_element_by_id, int_or_none, merge_dicts, mimetype2ext, url_or_none, ) class AparatIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)' _TESTS = [{ 'url': 'http://www.aparat.com/v/wP8On', 'md5': '131aca2e14fe7c4dcb3c4877ba300c89', 'info_dict': { 'id': 'wP8On', 'ext': 'mp4', 'title': 'تیم گلکسی 11 - زومیت', 'description': 'md5:096bdabcdcc4569f2b8a5e903a3b3028', 'duration': 231, 'timestamp': 1387394859, 'upload_date': '20131218', 'view_count': int, }, }, { # multiple formats 'url': 'https://www.aparat.com/v/8dflw/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) # Provides more metadata webpage = self._download_webpage(url, video_id, fatal=False) if not webpage: webpage = self._download_webpage( 'http://www.aparat.com/video/video/embed/vt/frame/showvideo/yes/videohash/' + video_id, video_id) options = self._parse_json(self._search_regex( r'options\s*=\s*({.+?})\s*;', webpage, 'options'), video_id) formats = [] for sources in (options.get('multiSRC') or []): for item in sources: if not isinstance(item, dict): continue file_url = url_or_none(item.get('src')) if not file_url: continue item_type = item.get('type') if item_type == 'application/vnd.apple.mpegurl': formats.extend(self._extract_m3u8_formats( file_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) else: ext = mimetype2ext(item.get('type')) label = item.get('label') formats.append({ 'url': file_url, 'ext': ext, 'format_id': 'http-%s' % (label or ext), 'height': int_or_none(self._search_regex( r'(\d+)[pP]', label or '', 'height', default=None)), }) self._sort_formats( formats, field_preference=('height', 'width', 'tbr', 'format_id')) info = self._search_json_ld(webpage, video_id, default={}) if not info.get('title'): info['title'] = get_element_by_id('videoTitle', webpage) or \ self._html_search_meta(['og:title', 'twitter:title', 'DC.Title', 'title'], webpage, fatal=True) return merge_dicts(info, { 'id': video_id, 'thumbnail': url_or_none(options.get('poster')), 'duration': int_or_none(options.get('duration')), 'formats': formats, })
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/people.py
youtube_dl/extractor/people.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class PeopleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?people\.com/people/videos/0,,(?P<id>\d+),00\.html' _TEST = { 'url': 'http://www.people.com/people/videos/0,,20995451,00.html', 'info_dict': { 'id': 'ref:20995451', 'ext': 'mp4', 'title': 'Astronaut Love Triangle Victim Speaks Out: “The Crime in 2007 Hasn’t Defined Us”', 'description': 'Colleen Shipman speaks to PEOPLE for the first time about life after the attack', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 246.318, 'timestamp': 1458720585, 'upload_date': '20160323', 'uploader_id': '416418724', }, 'params': { 'skip_download': True, }, 'add_ie': ['BrightcoveNew'], } def _real_extract(self, url): return self.url_result( 'http://players.brightcove.net/416418724/default_default/index.html?videoId=ref:%s' % self._match_id(url), 'BrightcoveNew')
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/iwara.py
youtube_dl/extractor/iwara.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urllib_parse_urlparse from ..utils import ( int_or_none, mimetype2ext, remove_end, url_or_none, ) class IwaraIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.|ecchi\.)?iwara\.tv/videos/(?P<id>[a-zA-Z0-9]+)' _TESTS = [{ 'url': 'http://iwara.tv/videos/amVwUl1EHpAD9RD', # md5 is unstable 'info_dict': { 'id': 'amVwUl1EHpAD9RD', 'ext': 'mp4', 'title': '【MMD R-18】ガールフレンド carry_me_off', 'age_limit': 18, }, }, { 'url': 'http://ecchi.iwara.tv/videos/Vb4yf2yZspkzkBO', 'md5': '7e5f1f359cd51a027ba4a7b7710a50f0', 'info_dict': { 'id': '0B1LvuHnL-sRFNXB1WHNqbGw4SXc', 'ext': 'mp4', 'title': '[3D Hentai] Kyonyu × Genkai × Emaki Shinobi Girls.mp4', 'age_limit': 18, }, 'add_ie': ['GoogleDrive'], }, { 'url': 'http://www.iwara.tv/videos/nawkaumd6ilezzgq', # md5 is unstable 'info_dict': { 'id': '6liAP9s2Ojc', 'ext': 'mp4', 'age_limit': 18, 'title': '[MMD] Do It Again Ver.2 [1080p 60FPS] (Motion,Camera,Wav+DL)', 'description': 'md5:590c12c0df1443d833fbebe05da8c47a', 'upload_date': '20160910', 'uploader': 'aMMDsork', 'uploader_id': 'UCVOFyOSCyFkXTYYHITtqB7A', }, 'add_ie': ['Youtube'], }] def _real_extract(self, url): video_id = self._match_id(url) webpage, urlh = self._download_webpage_handle(url, video_id) hostname = compat_urllib_parse_urlparse(urlh.geturl()).hostname # ecchi is 'sexy' in Japanese age_limit = 18 if hostname.split('.')[0] == 'ecchi' else 0 video_data = self._download_json('http://www.iwara.tv/api/video/%s' % video_id, video_id) if not video_data: iframe_url = self._html_search_regex( r'<iframe[^>]+src=([\'"])(?P<url>[^\'"]+)\1', webpage, 'iframe URL', group='url') return { '_type': 'url_transparent', 'url': iframe_url, 'age_limit': age_limit, } title = remove_end(self._html_search_regex( r'<title>([^<]+)</title>', webpage, 'title'), ' | Iwara') formats = [] for a_format in video_data: format_uri = url_or_none(a_format.get('uri')) if not format_uri: continue format_id = a_format.get('resolution') height = int_or_none(self._search_regex( r'(\d+)p', format_id, 'height', default=None)) formats.append({ 'url': self._proto_relative_url(format_uri, 'https:'), 'format_id': format_id, 'ext': mimetype2ext(a_format.get('mime')) or 'mp4', 'height': height, 'width': int_or_none(height / 9.0 * 16.0 if height else None), 'quality': 1 if format_id == 'Source' else 0, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'age_limit': age_limit, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false