repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/puls4.py
youtube_dl/extractor/puls4.py
# coding: utf-8 from __future__ import unicode_literals from .prosiebensat1 import ProSiebenSat1BaseIE from ..utils import ( unified_strdate, parse_duration, compat_str, ) class Puls4IE(ProSiebenSat1BaseIE): _VALID_URL = r'https?://(?:www\.)?puls4\.com/(?P<id>[^?#&]+)' _TESTS = [{ 'url': 'http://www.puls4.com/2-minuten-2-millionen/staffel-3/videos/2min2miotalk/Tobias-Homberger-von-myclubs-im-2min2miotalk-118118', 'md5': 'fd3c6b0903ac72c9d004f04bc6bb3e03', 'info_dict': { 'id': '118118', 'ext': 'flv', 'title': 'Tobias Homberger von myclubs im #2min2miotalk', 'description': 'md5:f9def7c5e8745d6026d8885487d91955', 'upload_date': '20160830', 'uploader': 'PULS_4', }, }, { 'url': 'http://www.puls4.com/pro-und-contra/wer-wird-prasident/Ganze-Folgen/Wer-wird-Praesident.-Norbert-Hofer', 'only_matching': True, }, { 'url': 'http://www.puls4.com/pro-und-contra/wer-wird-prasident/Ganze-Folgen/Wer-wird-Praesident-Analyse-des-Interviews-mit-Norbert-Hofer-416598', 'only_matching': True, }] _TOKEN = 'puls4' _SALT = '01!kaNgaiNgah1Ie4AeSha' _CLIENT_NAME = '' def _real_extract(self, url): path = self._match_id(url) content_path = self._download_json( 'http://www.puls4.com/api/json-fe/page/' + path, path)['content'][0]['url'] media = self._download_json( 'http://www.puls4.com' + content_path, content_path)['mediaCurrent'] player_content = media['playerContent'] info = self._extract_video_info(url, player_content['id']) info.update({ 'id': compat_str(media['objectId']), 'title': player_content['title'], 'description': media.get('description'), 'thumbnail': media.get('previewLink'), 'upload_date': unified_strdate(media.get('date')), 'duration': parse_duration(player_content.get('duration')), 'episode': player_content.get('episodePartName'), 'show': media.get('channel'), 'season_id': player_content.get('seasonId'), 'uploader': player_content.get('sourceCompany'), }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cspan.py
youtube_dl/extractor/cspan.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, ExtractorError, extract_attributes, find_xpath_attr, get_element_by_attribute, get_element_by_class, int_or_none, js_to_json, merge_dicts, parse_iso8601, smuggle_url, str_to_int, unescapeHTML, ) from .senateisvp import SenateISVPIE from .ustream import UstreamIE class CSpanIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?c-span\.org/video/\?(?P<id>[0-9a-f]+)' IE_DESC = 'C-SPAN' _TESTS = [{ 'url': 'http://www.c-span.org/video/?313572-1/HolderonV', 'md5': '94b29a4f131ff03d23471dd6f60b6a1d', 'info_dict': { 'id': '315139', 'title': 'Attorney General Eric Holder on Voting Rights Act Decision', }, 'playlist_mincount': 2, 'skip': 'Regularly fails on travis, for unknown reasons', }, { 'url': 'http://www.c-span.org/video/?c4486943/cspan-international-health-care-models', # md5 is unstable 'info_dict': { 'id': 'c4486943', 'ext': 'mp4', 'title': 'CSPAN - International Health Care Models', 'description': 'md5:7a985a2d595dba00af3d9c9f0783c967', } }, { 'url': 'http://www.c-span.org/video/?318608-1/gm-ignition-switch-recall', 'info_dict': { 'id': '342759', 'title': 'General Motors Ignition Switch Recall', }, 'playlist_mincount': 6, }, { # Video from senate.gov 'url': 'http://www.c-span.org/video/?104517-1/immigration-reforms-needed-protect-skilled-american-workers', 'info_dict': { 'id': 'judiciary031715', 'ext': 'mp4', 'title': 'Immigration Reforms Needed to Protect Skilled American Workers', }, 'params': { 'skip_download': True, # m3u8 downloads } }, { # Ustream embedded video 'url': 'https://www.c-span.org/video/?114917-1/armed-services', 'info_dict': { 'id': '58428542', 'ext': 'flv', 'title': 'USHR07 Armed Services Committee', 'description': 'hsas00-2118-20150204-1000et-07\n\n\nUSHR07 Armed Services Committee', 'timestamp': 1423060374, 'upload_date': '20150204', 'uploader': 'HouseCommittee', 'uploader_id': '12987475', }, }, { # Audio Only 'url': 'https://www.c-span.org/video/?437336-1/judiciary-antitrust-competition-policy-consumer-rights', 'only_matching': True, }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s' def _real_extract(self, url): video_id = self._match_id(url) video_type = None webpage = self._download_webpage(url, video_id) ustream_url = UstreamIE._extract_url(webpage) if ustream_url: return self.url_result(ustream_url, UstreamIE.ie_key()) if '&vod' not in url: bc = self._search_regex( r"(<[^>]+id='brightcove-player-embed'[^>]+>)", webpage, 'brightcove embed', default=None) if bc: bc_attr = extract_attributes(bc) bc_url = self.BRIGHTCOVE_URL_TEMPLATE % ( bc_attr.get('data-bcaccountid', '3162030207001'), bc_attr.get('data-noprebcplayerid', 'SyGGpuJy3g'), bc_attr.get('data-newbcplayerid', 'default'), bc_attr['data-bcid']) return self.url_result(smuggle_url(bc_url, {'source_url': url})) def add_referer(formats): for f in formats: f.setdefault('http_headers', {})['Referer'] = url # As of 01.12.2020 this path looks to cover all cases making the rest # of the code unnecessary jwsetup = self._parse_json( self._search_regex( r'(?s)jwsetup\s*=\s*({.+?})\s*;', webpage, 'jwsetup', default='{}'), video_id, transform_source=js_to_json, fatal=False) if jwsetup: info = self._parse_jwplayer_data( jwsetup, video_id, require_title=False, m3u8_id='hls', base_url=url) add_referer(info['formats']) for subtitles in info['subtitles'].values(): for subtitle in subtitles: ext = determine_ext(subtitle['url']) if ext == 'php': ext = 'vtt' subtitle['ext'] = ext ld_info = self._search_json_ld(webpage, video_id, default={}) title = get_element_by_class('video-page-title', webpage) or \ self._og_search_title(webpage) description = get_element_by_attribute('itemprop', 'description', webpage) or \ self._html_search_meta(['og:description', 'description'], webpage) return merge_dicts(info, ld_info, { 'title': title, 'thumbnail': get_element_by_attribute('itemprop', 'thumbnailUrl', webpage), 'description': description, 'timestamp': parse_iso8601(get_element_by_attribute('itemprop', 'uploadDate', webpage)), 'location': get_element_by_attribute('itemprop', 'contentLocation', webpage), 'duration': int_or_none(self._search_regex( r'jwsetup\.seclength\s*=\s*(\d+);', webpage, 'duration', fatal=False)), 'view_count': str_to_int(self._search_regex( r"<span[^>]+class='views'[^>]*>([\d,]+)\s+Views</span>", webpage, 'views', fatal=False)), }) # Obsolete # We first look for clipid, because clipprog always appears before patterns = [r'id=\'clip(%s)\'\s*value=\'([0-9]+)\'' % t for t in ('id', 'prog')] results = list(filter(None, (re.search(p, webpage) for p in patterns))) if results: matches = results[0] video_type, video_id = matches.groups() video_type = 'clip' if video_type == 'id' else 'program' else: m = re.search(r'data-(?P<type>clip|prog)id=["\'](?P<id>\d+)', webpage) if m: video_id = m.group('id') video_type = 'program' if m.group('type') == 'prog' else 'clip' else: senate_isvp_url = SenateISVPIE._search_iframe_url(webpage) if senate_isvp_url: title = self._og_search_title(webpage) surl = smuggle_url(senate_isvp_url, {'force_title': title}) return self.url_result(surl, 'SenateISVP', video_id, title) video_id = self._search_regex( r'jwsetup\.clipprog\s*=\s*(\d+);', webpage, 'jwsetup program id', default=None) if video_id: video_type = 'program' if video_type is None or video_id is None: error_message = get_element_by_class('VLplayer-error-message', webpage) if error_message: raise ExtractorError(error_message) raise ExtractorError('unable to find video id and type') def get_text_attr(d, attr): return d.get(attr, {}).get('#text') data = self._download_json( 'http://www.c-span.org/assets/player/ajax-player.php?os=android&html5=%s&id=%s' % (video_type, video_id), video_id)['video'] if data['@status'] != 'Success': raise ExtractorError('%s said: %s' % (self.IE_NAME, get_text_attr(data, 'error')), expected=True) doc = self._download_xml( 'http://www.c-span.org/common/services/flashXml.php?%sid=%s' % (video_type, video_id), video_id) description = self._html_search_meta('description', webpage) title = find_xpath_attr(doc, './/string', 'name', 'title').text thumbnail = find_xpath_attr(doc, './/string', 'name', 'poster').text files = data['files'] capfile = get_text_attr(data, 'capfile') entries = [] for partnum, f in enumerate(files): formats = [] for quality in f.get('qualities', []): formats.append({ 'format_id': '%s-%sp' % (get_text_attr(quality, 'bitrate'), get_text_attr(quality, 'height')), 'url': unescapeHTML(get_text_attr(quality, 'file')), 'height': int_or_none(get_text_attr(quality, 'height')), 'tbr': int_or_none(get_text_attr(quality, 'bitrate')), }) if not formats: path = unescapeHTML(get_text_attr(f, 'path')) if not path: continue formats = self._extract_m3u8_formats( path, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') if determine_ext(path) == 'm3u8' else [{'url': path, }] add_referer(formats) self._sort_formats(formats) entries.append({ 'id': '%s_%d' % (video_id, partnum + 1), 'title': ( title if len(files) == 1 else '%s part %d' % (title, partnum + 1)), 'formats': formats, 'description': description, 'thumbnail': thumbnail, 'duration': int_or_none(get_text_attr(f, 'length')), 'subtitles': { 'en': [{ 'url': capfile, 'ext': determine_ext(capfile, 'dfxp') }], } if capfile else None, }) if len(entries) == 1: entry = dict(entries[0]) entry['id'] = 'c' + video_id if video_type == 'clip' else video_id return entry else: return { '_type': 'playlist', 'entries': entries, 'title': title, 'id': 'c' + video_id if video_type == 'clip' else video_id, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vyborymos.py
youtube_dl/extractor/vyborymos.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str class VyboryMosIE(InfoExtractor): _VALID_URL = r'https?://vybory\.mos\.ru/(?:#precinct/|account/channels\?.*?\bstation_id=)(?P<id>\d+)' _TESTS = [{ 'url': 'http://vybory.mos.ru/#precinct/13636', 'info_dict': { 'id': '13636', 'ext': 'mp4', 'title': 're:^Участковая избирательная комиссия №2231 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'Россия, Москва, улица Введенского, 32А', 'is_live': True, }, 'params': { 'skip_download': True, } }, { 'url': 'http://vybory.mos.ru/account/channels?station_id=13636', 'only_matching': True, }] def _real_extract(self, url): station_id = self._match_id(url) channels = self._download_json( 'http://vybory.mos.ru/account/channels?station_id=%s' % station_id, station_id, 'Downloading channels JSON') formats = [] for cam_num, (sid, hosts, name, _) in enumerate(channels, 1): for num, host in enumerate(hosts, 1): formats.append({ 'url': 'http://%s/master.m3u8?sid=%s' % (host, sid), 'ext': 'mp4', 'format_id': 'camera%d-host%d' % (cam_num, num), 'format_note': '%s, %s' % (name, host), }) info = self._download_json( 'http://vybory.mos.ru/json/voting_stations/%s/%s.json' % (compat_str(station_id)[:3], station_id), station_id, 'Downloading station JSON', fatal=False) return { 'id': station_id, 'title': self._live_title(info['name'] if info else station_id), 'description': info.get('address'), 'is_live': True, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/usatoday.py
youtube_dl/extractor/usatoday.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, get_element_by_attribute, parse_duration, try_get, update_url_query, ) from ..compat import compat_str class USATodayIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?usatoday\.com/(?:[^/]+/)*(?P<id>[^?/#]+)' _TESTS = [{ # Brightcove Partner ID = 29906170001 'url': 'http://www.usatoday.com/media/cinematic/video/81729424/us-france-warn-syrian-regime-ahead-of-new-peace-talks/', 'md5': '033587d2529dc3411a1ab3644c3b8827', 'info_dict': { 'id': '4799374959001', 'ext': 'mp4', 'title': 'US, France warn Syrian regime ahead of new peace talks', 'timestamp': 1457891045, 'description': 'md5:7e50464fdf2126b0f533748d3c78d58f', 'uploader_id': '29906170001', 'upload_date': '20160313', } }, { # ui-video-data[asset_metadata][items][brightcoveaccount] = 28911775001 'url': 'https://www.usatoday.com/story/tech/science/2018/08/21/yellowstone-supervolcano-eruption-stop-worrying-its-blow/973633002/', 'info_dict': { 'id': '5824495846001', 'ext': 'mp4', 'title': 'Yellowstone more likely to crack rather than explode', 'timestamp': 1534790612, 'description': 'md5:3715e7927639a4f16b474e9391687c62', 'uploader_id': '28911775001', 'upload_date': '20180820', } }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s' def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(update_url_query(url, {'ajax': 'true'}), display_id) ui_video_data = get_element_by_attribute('class', 'ui-video-data', webpage) if not ui_video_data: raise ExtractorError('no video on the webpage', expected=True) video_data = self._parse_json(ui_video_data, display_id) item = try_get(video_data, lambda x: x['asset_metadata']['items'], dict) or {} return { '_type': 'url_transparent', 'url': self.BRIGHTCOVE_URL_TEMPLATE % (item.get('brightcoveaccount', '29906170001'), item.get('brightcoveid') or video_data['brightcove_id']), 'id': compat_str(video_data['id']), 'title': video_data['title'], 'thumbnail': video_data.get('thumbnail'), 'description': video_data.get('description'), 'duration': parse_duration(video_data.get('length')), 'ie_key': 'BrightcoveNew', }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/globo.py
youtube_dl/extractor/globo.py
# coding: utf-8 from __future__ import unicode_literals import base64 import hashlib import json import random import re from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_str, ) from ..utils import ( ExtractorError, float_or_none, int_or_none, orderedSet, str_or_none, ) class GloboIE(InfoExtractor): _VALID_URL = r'(?:globo:|https?://.+?\.globo\.com/(?:[^/]+/)*(?:v/(?:[^/]+/)?|videos/))(?P<id>\d{7,})' _NETRC_MACHINE = 'globo' _TESTS = [{ 'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/', 'md5': 'b3ccc801f75cd04a914d51dadb83a78d', 'info_dict': { 'id': '3607726', 'ext': 'mp4', 'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa', 'duration': 103.204, 'uploader': 'Globo.com', 'uploader_id': '265', }, }, { 'url': 'http://globoplay.globo.com/v/4581987/', 'md5': 'f36a1ecd6a50da1577eee6dd17f67eff', 'info_dict': { 'id': '4581987', 'ext': 'mp4', 'title': 'Acidentes de trânsito estão entre as maiores causas de queda de energia em SP', 'duration': 137.973, 'uploader': 'Rede Globo', 'uploader_id': '196', }, }, { 'url': 'http://canalbrasil.globo.com/programas/sangue-latino/videos/3928201.html', 'only_matching': True, }, { 'url': 'http://globosatplay.globo.com/globonews/v/4472924/', 'only_matching': True, }, { 'url': 'http://globotv.globo.com/t/programa/v/clipe-sexo-e-as-negas-adeus/3836166/', 'only_matching': True, }, { 'url': 'http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/', 'only_matching': True, }, { 'url': 'http://canaloff.globo.com/programas/desejar-profundo/videos/4518560.html', 'only_matching': True, }, { 'url': 'globo:3607726', 'only_matching': True, }] def _real_initialize(self): email, password = self._get_login_info() if email is None: return try: glb_id = (self._download_json( 'https://login.globo.com/api/authentication', None, data=json.dumps({ 'payload': { 'email': email, 'password': password, 'serviceId': 4654, }, }).encode(), headers={ 'Content-Type': 'application/json; charset=utf-8', }) or {}).get('glbId') if glb_id: self._set_cookie('.globo.com', 'GLBID', glb_id) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: resp = self._parse_json(e.cause.read(), None) raise ExtractorError(resp.get('userMessage') or resp['id'], expected=True) raise def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'http://api.globovideos.com/videos/%s/playlist' % video_id, video_id)['videos'][0] if video.get('encrypted') is True: raise ExtractorError('This video is DRM protected.', expected=True) title = video['title'] formats = [] subtitles = {} for resource in video['resources']: resource_id = resource.get('_id') resource_url = resource.get('url') resource_type = resource.get('type') if not resource_url or (resource_type == 'media' and not resource_id) or resource_type not in ('subtitle', 'media'): continue if resource_type == 'subtitle': subtitles.setdefault(resource.get('language') or 'por', []).append({ 'url': resource_url, }) continue security = self._download_json( 'http://security.video.globo.com/videos/%s/hash' % video_id, video_id, 'Downloading security hash for %s' % resource_id, query={ 'player': 'desktop', 'version': '5.19.1', 'resource_id': resource_id, }) security_hash = security.get('hash') if not security_hash: message = security.get('message') if message: raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, message), expected=True) continue hash_code = security_hash[:2] padding = '%010d' % random.randint(1, 10000000000) if hash_code in ('04', '14'): received_time = security_hash[3:13] received_md5 = security_hash[24:] hash_prefix = security_hash[:23] elif hash_code in ('02', '12', '03', '13'): received_time = security_hash[2:12] received_md5 = security_hash[22:] padding += '1' hash_prefix = '05' + security_hash[:22] padded_sign_time = compat_str(int(received_time) + 86400) + padding md5_data = (received_md5 + padded_sign_time + '0xAC10FD').encode() signed_md5 = base64.urlsafe_b64encode(hashlib.md5(md5_data).digest()).decode().strip('=') signed_hash = hash_prefix + padded_sign_time + signed_md5 signed_url = '%s?h=%s&k=html5&a=%s&u=%s' % (resource_url, signed_hash, 'F' if video.get('subscriber_only') else 'A', security.get('user') or '') if resource_id.endswith('m3u8') or resource_url.endswith('.m3u8'): formats.extend(self._extract_m3u8_formats( signed_url, resource_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif resource_id.endswith('mpd') or resource_url.endswith('.mpd'): formats.extend(self._extract_mpd_formats( signed_url, resource_id, mpd_id='dash', fatal=False)) elif resource_id.endswith('manifest') or resource_url.endswith('/manifest'): formats.extend(self._extract_ism_formats( signed_url, resource_id, ism_id='mss', fatal=False)) else: formats.append({ 'url': signed_url, 'format_id': 'http-%s' % resource_id, 'height': int_or_none(resource.get('height')), }) self._sort_formats(formats) duration = float_or_none(video.get('duration'), 1000) uploader = video.get('channel') uploader_id = str_or_none(video.get('channel_id')) return { 'id': video_id, 'title': title, 'duration': duration, 'uploader': uploader, 'uploader_id': uploader_id, 'formats': formats, 'subtitles': subtitles, } class GloboArticleIE(InfoExtractor): _VALID_URL = r'https?://.+?\.globo\.com/(?:[^/]+/)*(?P<id>[^/.]+)(?:\.html)?' _VIDEOID_REGEXES = [ r'\bdata-video-id=["\'](\d{7,})', r'\bdata-player-videosids=["\'](\d{7,})', r'\bvideosIDs\s*:\s*["\']?(\d{7,})', r'\bdata-id=["\'](\d{7,})', r'<div[^>]+\bid=["\'](\d{7,})', ] _TESTS = [{ 'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html', 'info_dict': { 'id': 'novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes', 'title': 'Novidade na fiscalização de bagagem pela Receita provoca discussões', 'description': 'md5:c3c4b4d4c30c32fce460040b1ac46b12', }, 'playlist_count': 1, }, { 'url': 'http://g1.globo.com/pr/parana/noticia/2016/09/mpf-denuncia-lula-marisa-e-mais-seis-na-operacao-lava-jato.html', 'info_dict': { 'id': 'mpf-denuncia-lula-marisa-e-mais-seis-na-operacao-lava-jato', 'title': "Lula era o 'comandante máximo' do esquema da Lava Jato, diz MPF", 'description': 'md5:8aa7cc8beda4dc71cc8553e00b77c54c', }, 'playlist_count': 6, }, { 'url': 'http://gq.globo.com/Prazeres/Poder/noticia/2015/10/all-o-desafio-assista-ao-segundo-capitulo-da-serie.html', 'only_matching': True, }, { 'url': 'http://gshow.globo.com/programas/tv-xuxa/O-Programa/noticia/2014/01/xuxa-e-junno-namoram-muuuito-em-luau-de-zeze-di-camargo-e-luciano.html', 'only_matching': True, }, { 'url': 'http://oglobo.globo.com/rio/a-amizade-entre-um-entregador-de-farmacia-um-piano-19946271', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if GloboIE.suitable(url) else super(GloboArticleIE, cls).suitable(url) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_ids = [] for video_regex in self._VIDEOID_REGEXES: video_ids.extend(re.findall(video_regex, webpage)) entries = [ self.url_result('globo:%s' % video_id, GloboIE.ie_key()) for video_id in orderedSet(video_ids)] title = self._og_search_title(webpage, fatal=False) description = self._html_search_meta('description', webpage) return self.playlist_result(entries, display_id, title, description)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/biqle.py
youtube_dl/extractor/biqle.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from .vk import VKIE from ..compat import ( compat_b64decode, compat_urllib_parse_unquote, ) from ..utils import int_or_none class BIQLEIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?biqle\.(?:com|org|ru)/watch/(?P<id>-?\d+_\d+)' _TESTS = [{ # Youtube embed 'url': 'https://biqle.ru/watch/-115995369_456239081', 'md5': '97af5a06ee4c29bbf9c001bdb1cf5c06', 'info_dict': { 'id': '8v4f-avW-VI', 'ext': 'mp4', 'title': "PASSE-PARTOUT - L'ete c'est fait pour jouer", 'description': 'Passe-Partout', 'uploader_id': 'mrsimpsonstef3', 'uploader': 'Phanolito', 'upload_date': '20120822', }, }, { 'url': 'http://biqle.org/watch/-44781847_168547604', 'md5': '7f24e72af1db0edf7c1aaba513174f97', 'info_dict': { 'id': '-44781847_168547604', 'ext': 'mp4', 'title': 'Ребенок в шоке от автоматической мойки', 'timestamp': 1396633454, 'uploader': 'Dmitry Kotov', 'upload_date': '20140404', 'uploader_id': '47850140', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) embed_url = self._proto_relative_url(self._search_regex( r'<iframe.+?src="((?:https?:)?//(?:daxab\.com|dxb\.to|[^/]+/player)/[^"]+)".*?></iframe>', webpage, 'embed url')) if VKIE.suitable(embed_url): return self.url_result(embed_url, VKIE.ie_key(), video_id) embed_page = self._download_webpage( embed_url, video_id, headers={'Referer': url}) video_ext = self._get_cookies(embed_url).get('video_ext') if video_ext: video_ext = compat_urllib_parse_unquote(video_ext.value) if not video_ext: video_ext = compat_b64decode(self._search_regex( r'video_ext\s*:\s*[\'"]([A-Za-z0-9+/=]+)', embed_page, 'video_ext')).decode() video_id, sig, _, access_token = video_ext.split(':') item = self._download_json( 'https://api.vk.com/method/video.get', video_id, headers={'User-Agent': 'okhttp/3.4.1'}, query={ 'access_token': access_token, 'sig': sig, 'v': 5.44, 'videos': video_id, })['response']['items'][0] title = item['title'] formats = [] for f_id, f_url in item.get('files', {}).items(): if f_id == 'external': return self.url_result(f_url) ext, height = f_id.split('_') formats.append({ 'format_id': height + 'p', 'url': f_url, 'height': int_or_none(height), 'ext': ext, }) self._sort_formats(formats) thumbnails = [] for k, v in item.items(): if k.startswith('photo_') and v: width = k.replace('photo_', '') thumbnails.append({ 'id': width, 'url': v, 'width': int_or_none(width), }) return { 'id': video_id, 'title': title, 'formats': formats, 'comment_count': int_or_none(item.get('comments')), 'description': item.get('description'), 'duration': int_or_none(item.get('duration')), 'thumbnails': thumbnails, 'timestamp': int_or_none(item.get('date')), 'uploader': item.get('owner_id'), 'view_count': int_or_none(item.get('views')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/flickr.py
youtube_dl/extractor/flickr.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse_urlencode, ) from ..utils import ( ExtractorError, int_or_none, qualities, ) class FlickrIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.|secure\.)?flickr\.com/photos/[\w\-_@]+/(?P<id>\d+)' _TEST = { 'url': 'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/', 'md5': '164fe3fa6c22e18d448d4d5af2330f31', 'info_dict': { 'id': '5645318632', 'ext': 'mpg', 'description': 'Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.', 'title': 'Dark Hollow Waterfalls', 'duration': 19, 'timestamp': 1303528740, 'upload_date': '20110423', 'uploader_id': '10922353@N03', 'uploader': 'Forest Wander', 'uploader_url': 'https://www.flickr.com/photos/forestwander-nature-pictures/', 'comment_count': int, 'view_count': int, 'tags': list, 'license': 'Attribution-ShareAlike', } } _API_BASE_URL = 'https://api.flickr.com/services/rest?' # https://help.yahoo.com/kb/flickr/SLN25525.html _LICENSES = { '0': 'All Rights Reserved', '1': 'Attribution-NonCommercial-ShareAlike', '2': 'Attribution-NonCommercial', '3': 'Attribution-NonCommercial-NoDerivs', '4': 'Attribution', '5': 'Attribution-ShareAlike', '6': 'Attribution-NoDerivs', '7': 'No known copyright restrictions', '8': 'United States government work', '9': 'Public Domain Dedication (CC0)', '10': 'Public Domain Work', } def _call_api(self, method, video_id, api_key, note, secret=None): query = { 'photo_id': video_id, 'method': 'flickr.%s' % method, 'api_key': api_key, 'format': 'json', 'nojsoncallback': 1, } if secret: query['secret'] = secret data = self._download_json(self._API_BASE_URL + compat_urllib_parse_urlencode(query), video_id, note) if data['stat'] != 'ok': raise ExtractorError(data['message']) return data def _real_extract(self, url): video_id = self._match_id(url) api_key = self._download_json( 'https://www.flickr.com/hermes_error_beacon.gne', video_id, 'Downloading api key')['site_key'] video_info = self._call_api( 'photos.getInfo', video_id, api_key, 'Downloading video info')['photo'] if video_info['media'] == 'video': streams = self._call_api( 'video.getStreamInfo', video_id, api_key, 'Downloading streams info', video_info['secret'])['streams'] preference = qualities( ['288p', 'iphone_wifi', '100', '300', '700', '360p', 'appletv', '720p', '1080p', 'orig']) formats = [] for stream in streams['stream']: stream_type = compat_str(stream.get('type')) formats.append({ 'format_id': stream_type, 'url': stream['_content'], 'preference': preference(stream_type), }) self._sort_formats(formats) owner = video_info.get('owner', {}) uploader_id = owner.get('nsid') uploader_path = owner.get('path_alias') or uploader_id uploader_url = 'https://www.flickr.com/photos/%s/' % uploader_path if uploader_path else None return { 'id': video_id, 'title': video_info['title']['_content'], 'description': video_info.get('description', {}).get('_content'), 'formats': formats, 'timestamp': int_or_none(video_info.get('dateuploaded')), 'duration': int_or_none(video_info.get('video', {}).get('duration')), 'uploader_id': uploader_id, 'uploader': owner.get('realname'), 'uploader_url': uploader_url, 'comment_count': int_or_none(video_info.get('comments', {}).get('_content')), 'view_count': int_or_none(video_info.get('views')), 'tags': [tag.get('_content') for tag in video_info.get('tags', {}).get('tag', [])], 'license': self._LICENSES.get(video_info.get('license')), } else: raise ExtractorError('not a video', expected=True)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/nrk.py
youtube_dl/extractor/nrk.py
# coding: utf-8 from __future__ import unicode_literals import itertools import random import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( determine_ext, ExtractorError, int_or_none, parse_duration, str_or_none, try_get, urljoin, url_or_none, ) class NRKBaseIE(InfoExtractor): _GEO_COUNTRIES = ['NO'] _CDN_REPL_REGEX = r'''(?x):// (?: nrkod\d{1,2}-httpcache0-47115-cacheod0\.dna\.ip-only\.net/47115-cacheod0| nrk-od-no\.telenorcdn\.net| minicdn-od\.nrk\.no/od/nrkhd-osl-rr\.netwerk\.no/no )/''' def _extract_nrk_formats(self, asset_url, video_id): if re.match(r'https?://[^/]+\.akamaihd\.net/i/', asset_url): return self._extract_akamai_formats(asset_url, video_id) asset_url = re.sub(r'(?:bw_(?:low|high)=\d+|no_audio_only)&?', '', asset_url) formats = self._extract_m3u8_formats( asset_url, video_id, 'mp4', 'm3u8_native', fatal=False) if not formats and re.search(self._CDN_REPL_REGEX, asset_url): formats = self._extract_m3u8_formats( re.sub(self._CDN_REPL_REGEX, '://nrk-od-%02d.akamaized.net/no/' % random.randint(0, 99), asset_url), video_id, 'mp4', 'm3u8_native', fatal=False) return formats def _raise_error(self, data): MESSAGES = { 'ProgramRightsAreNotReady': 'Du kan dessverre ikke se eller høre programmet', 'ProgramRightsHasExpired': 'Programmet har gått ut', 'NoProgramRights': 'Ikke tilgjengelig', 'ProgramIsGeoBlocked': 'NRK har ikke rettigheter til å vise dette programmet utenfor Norge', } message_type = data.get('messageType', '') # Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked* if 'IsGeoBlocked' in message_type or try_get(data, lambda x: x['usageRights']['isGeoBlocked']) is True: self.raise_geo_restricted( msg=MESSAGES.get('ProgramIsGeoBlocked'), countries=self._GEO_COUNTRIES) message = data.get('endUserMessage') or MESSAGES.get(message_type, message_type) raise ExtractorError('%s said: %s' % (self.IE_NAME, message), expected=True) def _call_api(self, path, video_id, item=None, note=None, fatal=True, query=None): return self._download_json( urljoin('https://psapi.nrk.no/', path), video_id, note or 'Downloading %s JSON' % item, fatal=fatal, query=query) class NRKIE(NRKBaseIE): _VALID_URL = r'''(?x) (?: nrk:| https?:// (?: (?:www\.)?nrk\.no/video/(?:PS\*|[^_]+_)| v8[-.]psapi\.nrk\.no/mediaelement/ ) ) (?P<id>[^?\#&]+) ''' _TESTS = [{ # video 'url': 'http://www.nrk.no/video/PS*150533', 'md5': 'f46be075326e23ad0e524edfcb06aeb6', 'info_dict': { 'id': '150533', 'ext': 'mp4', 'title': 'Dompap og andre fugler i Piip-Show', 'description': 'md5:d9261ba34c43b61c812cb6b0269a5c8f', 'duration': 262, } }, { # audio 'url': 'http://www.nrk.no/video/PS*154915', # MD5 is unstable 'info_dict': { 'id': '154915', 'ext': 'mp4', 'title': 'Slik høres internett ut når du er blind', 'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568', 'duration': 20, } }, { 'url': 'nrk:ecc1b952-96dc-4a98-81b9-5296dc7a98d9', 'only_matching': True, }, { 'url': 'nrk:clip/7707d5a3-ebe7-434a-87d5-a3ebe7a34a70', 'only_matching': True, }, { 'url': 'https://v8-psapi.nrk.no/mediaelement/ecc1b952-96dc-4a98-81b9-5296dc7a98d9', 'only_matching': True, }, { 'url': 'https://www.nrk.no/video/dompap-og-andre-fugler-i-piip-show_150533', 'only_matching': True, }, { 'url': 'https://www.nrk.no/video/humor/kommentatorboksen-reiser-til-sjos_d1fda11f-a4ad-437a-a374-0398bc84e999', 'only_matching': True, }, { # podcast 'url': 'nrk:l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8', 'only_matching': True, }, { 'url': 'nrk:podcast/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8', 'only_matching': True, }, { # clip 'url': 'nrk:150533', 'only_matching': True, }, { 'url': 'nrk:clip/150533', 'only_matching': True, }, { # program 'url': 'nrk:MDDP12000117', 'only_matching': True, }, { 'url': 'nrk:program/ENRK10100318', 'only_matching': True, }, { # direkte 'url': 'nrk:nrk1', 'only_matching': True, }, { 'url': 'nrk:channel/nrk1', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url).split('/')[-1] path_templ = 'playback/%s/' + video_id def call_playback_api(item, query=None): return self._call_api(path_templ % item, video_id, item, query=query) # known values for preferredCdn: akamai, iponly, minicdn and telenor manifest = call_playback_api('manifest', {'preferredCdn': 'akamai'}) video_id = try_get(manifest, lambda x: x['id'], compat_str) or video_id if manifest.get('playability') == 'nonPlayable': self._raise_error(manifest['nonPlayable']) playable = manifest['playable'] formats = [] for asset in playable['assets']: if not isinstance(asset, dict): continue if asset.get('encrypted'): continue format_url = url_or_none(asset.get('url')) if not format_url: continue asset_format = (asset.get('format') or '').lower() if asset_format == 'hls' or determine_ext(format_url) == 'm3u8': formats.extend(self._extract_nrk_formats(format_url, video_id)) elif asset_format == 'mp3': formats.append({ 'url': format_url, 'format_id': asset_format, 'vcodec': 'none', }) self._sort_formats(formats) data = call_playback_api('metadata') preplay = data['preplay'] titles = preplay['titles'] title = titles['title'] alt_title = titles.get('subtitle') description = preplay.get('description') duration = parse_duration(playable.get('duration')) or parse_duration(data.get('duration')) thumbnails = [] for image in try_get( preplay, lambda x: x['poster']['images'], list) or []: if not isinstance(image, dict): continue image_url = url_or_none(image.get('url')) if not image_url: continue thumbnails.append({ 'url': image_url, 'width': int_or_none(image.get('pixelWidth')), 'height': int_or_none(image.get('pixelHeight')), }) subtitles = {} for sub in try_get(playable, lambda x: x['subtitles'], list) or []: if not isinstance(sub, dict): continue sub_url = url_or_none(sub.get('webVtt')) if not sub_url: continue sub_key = str_or_none(sub.get('language')) or 'nb' sub_type = str_or_none(sub.get('type')) if sub_type: sub_key += '-%s' % sub_type subtitles.setdefault(sub_key, []).append({ 'url': sub_url, }) legal_age = try_get( data, lambda x: x['legalAge']['body']['rating']['code'], compat_str) # https://en.wikipedia.org/wiki/Norwegian_Media_Authority age_limit = None if legal_age: if legal_age == 'A': age_limit = 0 elif legal_age.isdigit(): age_limit = int_or_none(legal_age) is_series = try_get(data, lambda x: x['_links']['series']['name']) == 'series' info = { 'id': video_id, 'title': title, 'alt_title': alt_title, 'description': description, 'duration': duration, 'thumbnails': thumbnails, 'age_limit': age_limit, 'formats': formats, 'subtitles': subtitles, } if is_series: series = season_id = season_number = episode = episode_number = None programs = self._call_api( 'programs/%s' % video_id, video_id, 'programs', fatal=False) if programs and isinstance(programs, dict): series = str_or_none(programs.get('seriesTitle')) season_id = str_or_none(programs.get('seasonId')) season_number = int_or_none(programs.get('seasonNumber')) episode = str_or_none(programs.get('episodeTitle')) episode_number = int_or_none(programs.get('episodeNumber')) if not series: series = title if alt_title: title += ' - %s' % alt_title if not season_number: season_number = int_or_none(self._search_regex( r'Sesong\s+(\d+)', description or '', 'season number', default=None)) if not episode: episode = alt_title if is_series else None if not episode_number: episode_number = int_or_none(self._search_regex( r'^(\d+)\.', episode or '', 'episode number', default=None)) if not episode_number: episode_number = int_or_none(self._search_regex( r'\((\d+)\s*:\s*\d+\)', description or '', 'episode number', default=None)) info.update({ 'title': title, 'series': series, 'season_id': season_id, 'season_number': season_number, 'episode': episode, 'episode_number': episode_number, }) return info class NRKTVIE(InfoExtractor): IE_DESC = 'NRK TV and NRK Radio' _EPISODE_RE = r'(?P<id>[a-zA-Z]{4}\d{8})' _VALID_URL = r'https?://(?:tv|radio)\.nrk(?:super)?\.no/(?:[^/]+/)*%s' % _EPISODE_RE _TESTS = [{ 'url': 'https://tv.nrk.no/program/MDDP12000117', 'md5': 'c4a5960f1b00b40d47db65c1064e0ab1', 'info_dict': { 'id': 'MDDP12000117', 'ext': 'mp4', 'title': 'Alarm Trolltunga', 'description': 'md5:46923a6e6510eefcce23d5ef2a58f2ce', 'duration': 2223.44, 'age_limit': 6, 'subtitles': { 'nb-nor': [{ 'ext': 'vtt', }], 'nb-ttv': [{ 'ext': 'vtt', }] }, }, }, { 'url': 'https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014', 'md5': '8d40dab61cea8ab0114e090b029a0565', 'info_dict': { 'id': 'MUHH48000314', 'ext': 'mp4', 'title': '20 spørsmål - 23. mai 2014', 'alt_title': '23. mai 2014', 'description': 'md5:bdea103bc35494c143c6a9acdd84887a', 'duration': 1741, 'series': '20 spørsmål', 'episode': '23. mai 2014', 'age_limit': 0, }, }, { 'url': 'https://tv.nrk.no/program/mdfp15000514', 'info_dict': { 'id': 'MDFP15000514', 'ext': 'mp4', 'title': 'Kunnskapskanalen - Grunnlovsjubiléet - Stor ståhei for ingenting', 'description': 'md5:89290c5ccde1b3a24bb8050ab67fe1db', 'duration': 4605.08, 'series': 'Kunnskapskanalen', 'episode': 'Grunnlovsjubiléet - Stor ståhei for ingenting', 'age_limit': 0, }, 'params': { 'skip_download': True, }, }, { # single playlist video 'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2', 'info_dict': { 'id': 'MSPO40010515', 'ext': 'mp4', 'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015', 'description': 'md5:c03aba1e917561eface5214020551b7a', 'age_limit': 0, }, 'params': { 'skip_download': True, }, 'expected_warnings': ['Failed to download m3u8 information'], 'skip': 'particular part is not supported currently', }, { 'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015', 'info_dict': { 'id': 'MSPO40010515', 'ext': 'mp4', 'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015', 'description': 'md5:c03aba1e917561eface5214020551b7a', 'age_limit': 0, }, 'expected_warnings': ['Failed to download m3u8 information'], 'skip': 'Ikke tilgjengelig utenfor Norge', }, { 'url': 'https://tv.nrk.no/serie/anno/KMTE50001317/sesong-3/episode-13', 'info_dict': { 'id': 'KMTE50001317', 'ext': 'mp4', 'title': 'Anno - 13. episode', 'description': 'md5:11d9613661a8dbe6f9bef54e3a4cbbfa', 'duration': 2340, 'series': 'Anno', 'episode': '13. episode', 'season_number': 3, 'episode_number': 13, 'age_limit': 0, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://tv.nrk.no/serie/nytt-paa-nytt/MUHH46000317/27-01-2017', 'info_dict': { 'id': 'MUHH46000317', 'ext': 'mp4', 'title': 'Nytt på Nytt 27.01.2017', 'description': 'md5:5358d6388fba0ea6f0b6d11c48b9eb4b', 'duration': 1796, 'series': 'Nytt på nytt', 'episode': '27.01.2017', 'age_limit': 0, }, 'params': { 'skip_download': True, }, 'skip': 'ProgramRightsHasExpired', }, { 'url': 'https://radio.nrk.no/serie/dagsnytt/NPUB21019315/12-07-2015#', 'only_matching': True, }, { 'url': 'https://tv.nrk.no/serie/lindmo/2018/MUHU11006318/avspiller', 'only_matching': True, }, { 'url': 'https://radio.nrk.no/serie/dagsnytt/sesong/201507/NPUB21019315', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) return self.url_result( 'nrk:%s' % video_id, ie=NRKIE.ie_key(), video_id=video_id) class NRKTVEpisodeIE(InfoExtractor): _VALID_URL = r'https?://tv\.nrk\.no/serie/(?P<id>[^/]+/sesong/(?P<season_number>\d+)/episode/(?P<episode_number>\d+))' _TESTS = [{ 'url': 'https://tv.nrk.no/serie/hellums-kro/sesong/1/episode/2', 'info_dict': { 'id': 'MUHH36005220', 'ext': 'mp4', 'title': 'Hellums kro - 2. Kro, krig og kjærlighet', 'description': 'md5:ad92ddffc04cea8ce14b415deef81787', 'duration': 1563.92, 'series': 'Hellums kro', 'season_number': 1, 'episode_number': 2, 'episode': '2. Kro, krig og kjærlighet', 'age_limit': 6, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://tv.nrk.no/serie/backstage/sesong/1/episode/8', 'info_dict': { 'id': 'MSUI14000816', 'ext': 'mp4', 'title': 'Backstage - 8. episode', 'description': 'md5:de6ca5d5a2d56849e4021f2bf2850df4', 'duration': 1320, 'series': 'Backstage', 'season_number': 1, 'episode_number': 8, 'episode': '8. episode', 'age_limit': 0, }, 'params': { 'skip_download': True, }, 'skip': 'ProgramRightsHasExpired', }] def _real_extract(self, url): display_id, season_number, episode_number = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, display_id) info = self._search_json_ld(webpage, display_id, default={}) nrk_id = info.get('@id') or self._html_search_meta( 'nrk:program-id', webpage, default=None) or self._search_regex( r'data-program-id=["\'](%s)' % NRKTVIE._EPISODE_RE, webpage, 'nrk id') assert re.match(NRKTVIE._EPISODE_RE, nrk_id) info.update({ '_type': 'url', 'id': nrk_id, 'url': 'nrk:%s' % nrk_id, 'ie_key': NRKIE.ie_key(), 'season_number': int(season_number), 'episode_number': int(episode_number), }) return info class NRKTVSerieBaseIE(NRKBaseIE): def _extract_entries(self, entry_list): if not isinstance(entry_list, list): return [] entries = [] for episode in entry_list: nrk_id = episode.get('prfId') or episode.get('episodeId') if not nrk_id or not isinstance(nrk_id, compat_str): continue entries.append(self.url_result( 'nrk:%s' % nrk_id, ie=NRKIE.ie_key(), video_id=nrk_id)) return entries _ASSETS_KEYS = ('episodes', 'instalments',) def _extract_assets_key(self, embedded): for asset_key in self._ASSETS_KEYS: if embedded.get(asset_key): return asset_key @staticmethod def _catalog_name(serie_kind): return 'podcast' if serie_kind in ('podcast', 'podkast') else 'series' def _entries(self, data, display_id): for page_num in itertools.count(1): embedded = data.get('_embedded') or data if not isinstance(embedded, dict): break assets_key = self._extract_assets_key(embedded) if not assets_key: break # Extract entries entries = try_get( embedded, (lambda x: x[assets_key]['_embedded'][assets_key], lambda x: x[assets_key]), list) for e in self._extract_entries(entries): yield e # Find next URL next_url_path = try_get( data, (lambda x: x['_links']['next']['href'], lambda x: x['_embedded'][assets_key]['_links']['next']['href']), compat_str) if not next_url_path: break data = self._call_api( next_url_path, display_id, note='Downloading %s JSON page %d' % (assets_key, page_num), fatal=False) if not data: break class NRKTVSeasonIE(NRKTVSerieBaseIE): _VALID_URL = r'''(?x) https?:// (?P<domain>tv|radio)\.nrk\.no/ (?P<serie_kind>serie|pod[ck]ast)/ (?P<serie>[^/]+)/ (?: (?:sesong/)?(?P<id>\d+)| sesong/(?P<id_2>[^/?#&]+) ) ''' _TESTS = [{ 'url': 'https://tv.nrk.no/serie/backstage/sesong/1', 'info_dict': { 'id': 'backstage/1', 'title': 'Sesong 1', }, 'playlist_mincount': 30, }, { # no /sesong/ in path 'url': 'https://tv.nrk.no/serie/lindmo/2016', 'info_dict': { 'id': 'lindmo/2016', 'title': '2016', }, 'playlist_mincount': 29, }, { # weird nested _embedded in catalog JSON response 'url': 'https://radio.nrk.no/serie/dickie-dick-dickens/sesong/1', 'info_dict': { 'id': 'dickie-dick-dickens/1', 'title': 'Sesong 1', }, 'playlist_mincount': 11, }, { # 841 entries, multi page 'url': 'https://radio.nrk.no/serie/dagsnytt/sesong/201509', 'info_dict': { 'id': 'dagsnytt/201509', 'title': 'September 2015', }, 'playlist_mincount': 841, }, { # 180 entries, single page 'url': 'https://tv.nrk.no/serie/spangas/sesong/1', 'only_matching': True, }, { 'url': 'https://radio.nrk.no/podkast/hele_historien/sesong/diagnose-kverulant', 'info_dict': { 'id': 'hele_historien/diagnose-kverulant', 'title': 'Diagnose kverulant', }, 'playlist_mincount': 3, }, { 'url': 'https://radio.nrk.no/podkast/loerdagsraadet/sesong/202101', 'only_matching': True, }] @classmethod def suitable(cls, url): return (False if NRKTVIE.suitable(url) or NRKTVEpisodeIE.suitable(url) or NRKRadioPodkastIE.suitable(url) else super(NRKTVSeasonIE, cls).suitable(url)) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) domain = mobj.group('domain') serie_kind = mobj.group('serie_kind') serie = mobj.group('serie') season_id = mobj.group('id') or mobj.group('id_2') display_id = '%s/%s' % (serie, season_id) data = self._call_api( '%s/catalog/%s/%s/seasons/%s' % (domain, self._catalog_name(serie_kind), serie, season_id), display_id, 'season', query={'pageSize': 50}) title = try_get(data, lambda x: x['titles']['title'], compat_str) or display_id return self.playlist_result( self._entries(data, display_id), display_id, title) class NRKTVSeriesIE(NRKTVSerieBaseIE): _VALID_URL = r'https?://(?P<domain>(?:tv|radio)\.nrk|(?:tv\.)?nrksuper)\.no/(?P<serie_kind>serie|pod[ck]ast)/(?P<id>[^/]+)' _TESTS = [{ # new layout, instalments 'url': 'https://tv.nrk.no/serie/groenn-glede', 'info_dict': { 'id': 'groenn-glede', 'title': 'Grønn glede', 'description': 'md5:7576e92ae7f65da6993cf90ee29e4608', }, 'playlist_mincount': 90, }, { # new layout, instalments, more entries 'url': 'https://tv.nrk.no/serie/lindmo', 'only_matching': True, }, { 'url': 'https://tv.nrk.no/serie/blank', 'info_dict': { 'id': 'blank', 'title': 'Blank', 'description': 'md5:7664b4e7e77dc6810cd3bca367c25b6e', }, 'playlist_mincount': 30, }, { # new layout, seasons 'url': 'https://tv.nrk.no/serie/backstage', 'info_dict': { 'id': 'backstage', 'title': 'Backstage', 'description': 'md5:63692ceb96813d9a207e9910483d948b', }, 'playlist_mincount': 60, }, { # old layout 'url': 'https://tv.nrksuper.no/serie/labyrint', 'info_dict': { 'id': 'labyrint', 'title': 'Labyrint', 'description': 'I Daidalos sin undersjøiske Labyrint venter spennende oppgaver, skumle robotskapninger og slim.', }, 'playlist_mincount': 3, }, { 'url': 'https://tv.nrk.no/serie/broedrene-dal-og-spektralsteinene', 'only_matching': True, }, { 'url': 'https://tv.nrk.no/serie/saving-the-human-race', 'only_matching': True, }, { 'url': 'https://tv.nrk.no/serie/postmann-pat', 'only_matching': True, }, { 'url': 'https://radio.nrk.no/serie/dickie-dick-dickens', 'info_dict': { 'id': 'dickie-dick-dickens', 'title': 'Dickie Dick Dickens', 'description': 'md5:19e67411ffe57f7dce08a943d7a0b91f', }, 'playlist_mincount': 8, }, { 'url': 'https://nrksuper.no/serie/labyrint', 'only_matching': True, }, { 'url': 'https://radio.nrk.no/podkast/ulrikkes_univers', 'info_dict': { 'id': 'ulrikkes_univers', }, 'playlist_mincount': 10, }, { 'url': 'https://radio.nrk.no/podkast/ulrikkes_univers/nrkno-poddkast-26588-134079-05042018030000', 'only_matching': True, }] @classmethod def suitable(cls, url): return ( False if any(ie.suitable(url) for ie in (NRKTVIE, NRKTVEpisodeIE, NRKRadioPodkastIE, NRKTVSeasonIE)) else super(NRKTVSeriesIE, cls).suitable(url)) def _real_extract(self, url): site, serie_kind, series_id = re.match(self._VALID_URL, url).groups() is_radio = site == 'radio.nrk' domain = 'radio' if is_radio else 'tv' size_prefix = 'p' if is_radio else 'embeddedInstalmentsP' series = self._call_api( '%s/catalog/%s/%s' % (domain, self._catalog_name(serie_kind), series_id), series_id, 'serie', query={size_prefix + 'ageSize': 50}) titles = try_get(series, [ lambda x: x['titles'], lambda x: x[x['type']]['titles'], lambda x: x[x['seriesType']]['titles'], ]) or {} entries = [] entries.extend(self._entries(series, series_id)) embedded = series.get('_embedded') or {} linked_seasons = try_get(series, lambda x: x['_links']['seasons']) or [] embedded_seasons = embedded.get('seasons') or [] if len(linked_seasons) > len(embedded_seasons): for season in linked_seasons: season_url = urljoin(url, season.get('href')) if not season_url: season_name = season.get('name') if season_name and isinstance(season_name, compat_str): season_url = 'https://%s.nrk.no/serie/%s/sesong/%s' % (domain, series_id, season_name) if season_url: entries.append(self.url_result( season_url, ie=NRKTVSeasonIE.ie_key(), video_title=season.get('title'))) else: for season in embedded_seasons: entries.extend(self._entries(season, series_id)) entries.extend(self._entries( embedded.get('extraMaterial') or {}, series_id)) return self.playlist_result( entries, series_id, titles.get('title'), titles.get('subtitle')) class NRKTVDirekteIE(NRKTVIE): IE_DESC = 'NRK TV Direkte and NRK Radio Direkte' _VALID_URL = r'https?://(?:tv|radio)\.nrk\.no/direkte/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://tv.nrk.no/direkte/nrk1', 'only_matching': True, }, { 'url': 'https://radio.nrk.no/direkte/p1_oslo_akershus', 'only_matching': True, }] class NRKRadioPodkastIE(InfoExtractor): _VALID_URL = r'https?://radio\.nrk\.no/pod[ck]ast/(?:[^/]+/)+(?P<id>l_[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _TESTS = [{ 'url': 'https://radio.nrk.no/podkast/ulrikkes_univers/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8', 'md5': '8d40dab61cea8ab0114e090b029a0565', 'info_dict': { 'id': 'MUHH48000314AA', 'ext': 'mp4', 'title': '20 spørsmål 23.05.2014', 'description': 'md5:bdea103bc35494c143c6a9acdd84887a', 'duration': 1741, 'series': '20 spørsmål', 'episode': '23.05.2014', }, }, { 'url': 'https://radio.nrk.no/podcast/ulrikkes_univers/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8', 'only_matching': True, }, { 'url': 'https://radio.nrk.no/podkast/ulrikkes_univers/sesong/1/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8', 'only_matching': True, }, { 'url': 'https://radio.nrk.no/podkast/hele_historien/sesong/bortfoert-i-bergen/l_774d1a2c-7aa7-4965-8d1a-2c7aa7d9652c', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) return self.url_result( 'nrk:%s' % video_id, ie=NRKIE.ie_key(), video_id=video_id) class NRKPlaylistBaseIE(InfoExtractor): def _extract_description(self, webpage): pass def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) entries = [ self.url_result('nrk:%s' % video_id, NRKIE.ie_key()) for video_id in re.findall(self._ITEM_RE, webpage) ] playlist_title = self. _extract_title(webpage) playlist_description = self._extract_description(webpage) return self.playlist_result( entries, playlist_id, playlist_title, playlist_description) class NRKPlaylistIE(NRKPlaylistBaseIE): _VALID_URL = r'https?://(?:www\.)?nrk\.no/(?!video|skole)(?:[^/]+/)+(?P<id>[^/]+)' _ITEM_RE = r'class="[^"]*\brich\b[^"]*"[^>]+data-video-id="([^"]+)"' _TESTS = [{ 'url': 'http://www.nrk.no/troms/gjenopplev-den-historiske-solformorkelsen-1.12270763', 'info_dict': { 'id': 'gjenopplev-den-historiske-solformorkelsen-1.12270763', 'title': 'Gjenopplev den historiske solformørkelsen', 'description': 'md5:c2df8ea3bac5654a26fc2834a542feed', }, 'playlist_count': 2, }, { 'url': 'http://www.nrk.no/kultur/bok/rivertonprisen-til-karin-fossum-1.12266449', 'info_dict': { 'id': 'rivertonprisen-til-karin-fossum-1.12266449', 'title': 'Rivertonprisen til Karin Fossum', 'description': 'Første kvinne på 15 år til å vinne krimlitteraturprisen.', }, 'playlist_count': 2, }] def _extract_title(self, webpage): return self._og_search_title(webpage, fatal=False) def _extract_description(self, webpage): return self._og_search_description(webpage) class NRKTVEpisodesIE(NRKPlaylistBaseIE): _VALID_URL = r'https?://tv\.nrk\.no/program/[Ee]pisodes/[^/]+/(?P<id>\d+)' _ITEM_RE = r'data-episode=["\']%s' % NRKTVIE._EPISODE_RE _TESTS = [{ 'url': 'https://tv.nrk.no/program/episodes/nytt-paa-nytt/69031', 'info_dict': { 'id': '69031', 'title': 'Nytt på nytt, sesong: 201210', }, 'playlist_count': 4, }] def _extract_title(self, webpage): return self._html_search_regex( r'<h1>([^<]+)</h1>', webpage, 'title', fatal=False) class NRKSkoleIE(InfoExtractor): IE_DESC = 'NRK Skole' _VALID_URL = r'https?://(?:www\.)?nrk\.no/skole/?\?.*\bmediaId=(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.nrk.no/skole/?page=search&q=&mediaId=14099', 'md5': '18c12c3d071953c3bf8d54ef6b2587b7', 'info_dict': { 'id': '6021', 'ext': 'mp4', 'title': 'Genetikk og eneggede tvillinger', 'description': 'md5:3aca25dcf38ec30f0363428d2b265f8d', 'duration': 399, }, }, { 'url': 'https://www.nrk.no/skole/?page=objectives&subject=naturfag&objective=K15114&mediaId=19355', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) nrk_id = self._download_json( 'https://nrkno-skole-prod.kube.nrk.no/skole/api/media/%s' % video_id, video_id)['psId'] return self.url_result('nrk:%s' % nrk_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/foxgay.py
youtube_dl/extractor/foxgay.py
from __future__ import unicode_literals import itertools from .common import InfoExtractor from ..utils import ( get_element_by_id, int_or_none, remove_end, ) class FoxgayIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?foxgay\.com/videos/(?:\S+-)?(?P<id>\d+)\.shtml' _TEST = { 'url': 'http://foxgay.com/videos/fuck-turkish-style-2582.shtml', 'md5': '344558ccfea74d33b7adbce22e577f54', 'info_dict': { 'id': '2582', 'ext': 'mp4', 'title': 'Fuck Turkish-style', 'description': 'md5:6ae2d9486921891efe89231ace13ffdf', 'age_limit': 18, 'thumbnail': r're:https?://.*\.jpg$', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = remove_end(self._html_search_regex( r'<title>([^<]+)</title>', webpage, 'title'), ' - Foxgay.com') description = get_element_by_id('inf_tit', webpage) # The default user-agent with foxgay cookies leads to pages without videos self._downloader.cookiejar.clear('.foxgay.com') # Find the URL for the iFrame which contains the actual video. iframe_url = self._html_search_regex( r'<iframe[^>]+src=([\'"])(?P<url>[^\'"]+)\1', webpage, 'video frame', group='url') iframe = self._download_webpage( iframe_url, video_id, headers={'User-Agent': 'curl/7.50.1'}, note='Downloading video frame') video_data = self._parse_json(self._search_regex( r'video_data\s*=\s*([^;]+);', iframe, 'video data'), video_id) formats = [{ 'url': source, 'height': int_or_none(resolution), } for source, resolution in zip( video_data['sources'], video_data.get('resolutions', itertools.repeat(None)))] self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'description': description, 'thumbnail': video_data.get('act_vid', {}).get('thumb'), 'age_limit': 18, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/patreon.py
youtube_dl/extractor/patreon.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, int_or_none, KNOWN_EXTENSIONS, mimetype2ext, parse_iso8601, str_or_none, try_get, ) class PatreonIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?patreon\.com/(?:creation\?hid=|posts/(?:[\w-]+-)?)(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.patreon.com/creation?hid=743933', 'md5': 'e25505eec1053a6e6813b8ed369875cc', 'info_dict': { 'id': '743933', 'ext': 'mp3', 'title': 'Episode 166: David Smalley of Dogma Debate', 'description': 'md5:713b08b772cd6271b9f3906683cfacdf', 'uploader': 'Cognitive Dissonance Podcast', 'thumbnail': 're:^https?://.*$', 'timestamp': 1406473987, 'upload_date': '20140727', 'uploader_id': '87145', }, }, { 'url': 'http://www.patreon.com/creation?hid=754133', 'md5': '3eb09345bf44bf60451b8b0b81759d0a', 'info_dict': { 'id': '754133', 'ext': 'mp3', 'title': 'CD 167 Extra', 'uploader': 'Cognitive Dissonance Podcast', 'thumbnail': 're:^https?://.*$', }, 'skip': 'Patron-only content', }, { 'url': 'https://www.patreon.com/creation?hid=1682498', 'info_dict': { 'id': 'SU4fj_aEMVw', 'ext': 'mp4', 'title': 'I\'m on Patreon!', 'uploader': 'TraciJHines', 'thumbnail': 're:^https?://.*$', 'upload_date': '20150211', 'description': 'md5:c5a706b1f687817a3de09db1eb93acd4', 'uploader_id': 'TraciJHines', }, 'params': { 'noplaylist': True, 'skip_download': True, } }, { 'url': 'https://www.patreon.com/posts/episode-166-of-743933', 'only_matching': True, }, { 'url': 'https://www.patreon.com/posts/743933', 'only_matching': True, }] # Currently Patreon exposes download URL via hidden CSS, so login is not # needed. Keeping this commented for when this inevitably changes. ''' def _login(self): username, password = self._get_login_info() if username is None: return login_form = { 'redirectUrl': 'http://www.patreon.com/', 'email': username, 'password': password, } request = sanitized_Request( 'https://www.patreon.com/processLogin', compat_urllib_parse_urlencode(login_form).encode('utf-8') ) login_page = self._download_webpage(request, None, note='Logging in') if re.search(r'onLoginFailed', login_page): raise ExtractorError('Unable to login, incorrect username and/or password', expected=True) def _real_initialize(self): self._login() ''' def _real_extract(self, url): video_id = self._match_id(url) post = self._download_json( 'https://www.patreon.com/api/posts/' + video_id, video_id, query={ 'fields[media]': 'download_url,mimetype,size_bytes', 'fields[post]': 'comment_count,content,embed,image,like_count,post_file,published_at,title', 'fields[user]': 'full_name,url', 'json-api-use-default-includes': 'false', 'include': 'media,user', }) attributes = post['data']['attributes'] title = attributes['title'].strip() image = attributes.get('image') or {} info = { 'id': video_id, 'title': title, 'description': clean_html(attributes.get('content')), 'thumbnail': image.get('large_url') or image.get('url'), 'timestamp': parse_iso8601(attributes.get('published_at')), 'like_count': int_or_none(attributes.get('like_count')), 'comment_count': int_or_none(attributes.get('comment_count')), } for i in post.get('included', []): i_type = i.get('type') if i_type == 'media': media_attributes = i.get('attributes') or {} download_url = media_attributes.get('download_url') ext = mimetype2ext(media_attributes.get('mimetype')) if download_url and ext in KNOWN_EXTENSIONS: info.update({ 'ext': ext, 'filesize': int_or_none(media_attributes.get('size_bytes')), 'url': download_url, }) elif i_type == 'user': user_attributes = i.get('attributes') if user_attributes: info.update({ 'uploader': user_attributes.get('full_name'), 'uploader_id': str_or_none(i.get('id')), 'uploader_url': user_attributes.get('url'), }) if not info.get('url'): embed_url = try_get(attributes, lambda x: x['embed']['url']) if embed_url: info.update({ '_type': 'url', 'url': embed_url, }) if not info.get('url'): post_file = attributes['post_file'] ext = determine_ext(post_file.get('name')) if ext in KNOWN_EXTENSIONS: info.update({ 'ext': ext, 'url': post_file['url'], }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/pokemon.py
youtube_dl/extractor/pokemon.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( extract_attributes, int_or_none, ) class PokemonIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?pokemon\.com/[a-z]{2}(?:.*?play=(?P<id>[a-z0-9]{32})|/(?:[^/]+/)+(?P<display_id>[^/?#&]+))' _TESTS = [{ 'url': 'https://www.pokemon.com/us/pokemon-episodes/20_30-the-ol-raise-and-switch/', 'md5': '2fe8eaec69768b25ef898cda9c43062e', 'info_dict': { 'id': 'afe22e30f01c41f49d4f1d9eab5cd9a4', 'ext': 'mp4', 'title': 'The Ol’ Raise and Switch!', 'description': 'md5:7db77f7107f98ba88401d3adc80ff7af', }, 'add_id': ['LimelightMedia'], }, { # no data-video-title 'url': 'https://www.pokemon.com/fr/episodes-pokemon/films-pokemon/pokemon-lascension-de-darkrai-2008', 'info_dict': { 'id': 'dfbaf830d7e54e179837c50c0c6cc0e1', 'ext': 'mp4', 'title': "Pokémon : L'ascension de Darkrai", 'description': 'md5:d1dbc9e206070c3e14a06ff557659fb5', }, 'add_id': ['LimelightMedia'], 'params': { 'skip_download': True, }, }, { 'url': 'http://www.pokemon.com/uk/pokemon-episodes/?play=2e8b5c761f1d4a9286165d7748c1ece2', 'only_matching': True, }, { 'url': 'http://www.pokemon.com/fr/episodes-pokemon/18_09-un-hiver-inattendu/', 'only_matching': True, }, { 'url': 'http://www.pokemon.com/de/pokemon-folgen/01_20-bye-bye-smettbo/', 'only_matching': True, }] def _real_extract(self, url): video_id, display_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, video_id or display_id) video_data = extract_attributes(self._search_regex( r'(<[^>]+data-video-id="%s"[^>]*>)' % (video_id if video_id else '[a-z0-9]{32}'), webpage, 'video data element')) video_id = video_data['data-video-id'] title = video_data.get('data-video-title') or self._html_search_meta( 'pkm-title', webpage, ' title', default=None) or self._search_regex( r'<h1[^>]+\bclass=["\']us-title[^>]+>([^<]+)', webpage, 'title') return { '_type': 'url_transparent', 'id': video_id, 'url': 'limelight:media:%s' % video_id, 'title': title, 'description': video_data.get('data-video-summary'), 'thumbnail': video_data.get('data-video-poster'), 'series': 'Pokémon', 'season_number': int_or_none(video_data.get('data-video-season')), 'episode': title, 'episode_number': int_or_none(video_data.get('data-video-episode')), 'ie_key': 'LimelightMedia', }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/lecturio.py
youtube_dl/extractor/lecturio.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, ExtractorError, float_or_none, int_or_none, str_or_none, url_or_none, urlencode_postdata, urljoin, ) class LecturioBaseIE(InfoExtractor): _API_BASE_URL = 'https://app.lecturio.com/api/en/latest/html5/' _LOGIN_URL = 'https://app.lecturio.com/en/login' _NETRC_MACHINE = 'lecturio' def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return # Sets some cookies _, urlh = self._download_webpage_handle( self._LOGIN_URL, None, 'Downloading login popup') def is_logged(url_handle): return self._LOGIN_URL not in url_handle.geturl() # Already logged in if is_logged(urlh): return login_form = { 'signin[email]': username, 'signin[password]': password, 'signin[remember]': 'on', } response, urlh = self._download_webpage_handle( self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(login_form)) # Logged in successfully if is_logged(urlh): return errors = self._html_search_regex( r'(?s)<ul[^>]+class=["\']error_list[^>]+>(.+?)</ul>', response, 'errors', default=None) if errors: raise ExtractorError('Unable to login: %s' % errors, expected=True) raise ExtractorError('Unable to log in') class LecturioIE(LecturioBaseIE): _VALID_URL = r'''(?x) https:// (?: app\.lecturio\.com/([^/]+/(?P<nt>[^/?#&]+)\.lecture|(?:\#/)?lecture/c/\d+/(?P<id>\d+))| (?:www\.)?lecturio\.de/[^/]+/(?P<nt_de>[^/?#&]+)\.vortrag ) ''' _TESTS = [{ 'url': 'https://app.lecturio.com/medical-courses/important-concepts-and-terms-introduction-to-microbiology.lecture#tab/videos', 'md5': '9a42cf1d8282a6311bf7211bbde26fde', 'info_dict': { 'id': '39634', 'ext': 'mp4', 'title': 'Important Concepts and Terms — Introduction to Microbiology', }, 'skip': 'Requires lecturio account credentials', }, { 'url': 'https://www.lecturio.de/jura/oeffentliches-recht-staatsexamen.vortrag', 'only_matching': True, }, { 'url': 'https://app.lecturio.com/#/lecture/c/6434/39634', 'only_matching': True, }] _CC_LANGS = { 'Arabic': 'ar', 'Bulgarian': 'bg', 'German': 'de', 'English': 'en', 'Spanish': 'es', 'Persian': 'fa', 'French': 'fr', 'Japanese': 'ja', 'Polish': 'pl', 'Pashto': 'ps', 'Russian': 'ru', } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) nt = mobj.group('nt') or mobj.group('nt_de') lecture_id = mobj.group('id') display_id = nt or lecture_id api_path = 'lectures/' + lecture_id if lecture_id else 'lecture/' + nt + '.json' video = self._download_json( self._API_BASE_URL + api_path, display_id) title = video['title'].strip() if not lecture_id: pid = video.get('productId') or video.get('uid') if pid: spid = pid.split('_') if spid and len(spid) == 2: lecture_id = spid[1] formats = [] for format_ in video['content']['media']: if not isinstance(format_, dict): continue file_ = format_.get('file') if not file_: continue ext = determine_ext(file_) if ext == 'smil': # smil contains only broken RTMP formats anyway continue file_url = url_or_none(file_) if not file_url: continue label = str_or_none(format_.get('label')) filesize = int_or_none(format_.get('fileSize')) f = { 'url': file_url, 'format_id': label, 'filesize': float_or_none(filesize, invscale=1000) } if label: mobj = re.match(r'(\d+)p\s*\(([^)]+)\)', label) if mobj: f.update({ 'format_id': mobj.group(2), 'height': int(mobj.group(1)), }) formats.append(f) self._sort_formats(formats) subtitles = {} automatic_captions = {} captions = video.get('captions') or [] for cc in captions: cc_url = cc.get('url') if not cc_url: continue cc_label = cc.get('translatedCode') lang = cc.get('languageCode') or self._search_regex( r'/([a-z]{2})_', cc_url, 'lang', default=cc_label.split()[0] if cc_label else 'en') original_lang = self._search_regex( r'/[a-z]{2}_([a-z]{2})_', cc_url, 'original lang', default=None) sub_dict = (automatic_captions if 'auto-translated' in cc_label or original_lang else subtitles) sub_dict.setdefault(self._CC_LANGS.get(lang, lang), []).append({ 'url': cc_url, }) return { 'id': lecture_id or nt, 'title': title, 'formats': formats, 'subtitles': subtitles, 'automatic_captions': automatic_captions, } class LecturioCourseIE(LecturioBaseIE): _VALID_URL = r'https://app\.lecturio\.com/(?:[^/]+/(?P<nt>[^/?#&]+)\.course|(?:#/)?course/c/(?P<id>\d+))' _TESTS = [{ 'url': 'https://app.lecturio.com/medical-courses/microbiology-introduction.course#/', 'info_dict': { 'id': 'microbiology-introduction', 'title': 'Microbiology: Introduction', 'description': 'md5:13da8500c25880c6016ae1e6d78c386a', }, 'playlist_count': 45, 'skip': 'Requires lecturio account credentials', }, { 'url': 'https://app.lecturio.com/#/course/c/6434', 'only_matching': True, }] def _real_extract(self, url): nt, course_id = re.match(self._VALID_URL, url).groups() display_id = nt or course_id api_path = 'courses/' + course_id if course_id else 'course/content/' + nt + '.json' course = self._download_json( self._API_BASE_URL + api_path, display_id) entries = [] for lecture in course.get('lectures', []): lecture_id = str_or_none(lecture.get('id')) lecture_url = lecture.get('url') if lecture_url: lecture_url = urljoin(url, lecture_url) else: lecture_url = 'https://app.lecturio.com/#/lecture/c/%s/%s' % (course_id, lecture_id) entries.append(self.url_result( lecture_url, ie=LecturioIE.ie_key(), video_id=lecture_id)) return self.playlist_result( entries, display_id, course.get('title'), clean_html(course.get('description'))) class LecturioDeCourseIE(LecturioBaseIE): _VALID_URL = r'https://(?:www\.)?lecturio\.de/[^/]+/(?P<id>[^/?#&]+)\.kurs' _TEST = { 'url': 'https://www.lecturio.de/jura/grundrechte.kurs', 'only_matching': True, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) entries = [] for mobj in re.finditer( r'(?s)<td[^>]+\bdata-lecture-id=["\'](?P<id>\d+).+?\bhref=(["\'])(?P<url>(?:(?!\2).)+\.vortrag)\b[^>]+>', webpage): lecture_url = urljoin(url, mobj.group('url')) lecture_id = mobj.group('id') entries.append(self.url_result( lecture_url, ie=LecturioIE.ie_key(), video_id=lecture_id)) title = self._search_regex( r'<h1[^>]*>([^<]+)', webpage, 'title', default=None) return self.playlist_result(entries, display_id, title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/umg.py
youtube_dl/extractor/umg.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_filesize, parse_iso8601, ) class UMGDeIE(InfoExtractor): IE_NAME = 'umg:de' IE_DESC = 'Universal Music Deutschland' _VALID_URL = r'https?://(?:www\.)?universal-music\.de/[^/]+/videos/[^/?#]+-(?P<id>\d+)' _TEST = { 'url': 'https://www.universal-music.de/sido/videos/jedes-wort-ist-gold-wert-457803', 'md5': 'ebd90f48c80dcc82f77251eb1902634f', 'info_dict': { 'id': '457803', 'ext': 'mp4', 'title': 'Jedes Wort ist Gold wert', 'timestamp': 1513591800, 'upload_date': '20171218', } } def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( 'https://graphql.universal-music.de/', video_id, query={ 'query': '''{ universalMusic(channel:16) { video(id:%s) { headline formats { formatId url type width height mimeType fileSize } duration createdDate } } }''' % video_id})['data']['universalMusic']['video'] title = video_data['headline'] hls_url_template = 'http://mediadelivery.universal-music-services.de/vod/mp4:autofill/storage/' + '/'.join(list(video_id)) + '/content/%s/file/playlist.m3u8' thumbnails = [] formats = [] def add_m3u8_format(format_id): formats.extend(self._extract_m3u8_formats( hls_url_template % format_id, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) for f in video_data.get('formats', []): f_url = f.get('url') mime_type = f.get('mimeType') if not f_url or mime_type == 'application/mxf': continue fmt = { 'url': f_url, 'width': int_or_none(f.get('width')), 'height': int_or_none(f.get('height')), 'filesize': parse_filesize(f.get('fileSize')), } f_type = f.get('type') if f_type == 'Image': thumbnails.append(fmt) elif f_type == 'Video': format_id = f.get('formatId') if format_id: fmt['format_id'] = format_id if mime_type == 'video/mp4': add_m3u8_format(format_id) urlh = self._request_webpage(f_url, video_id, fatal=False) if urlh: first_byte = urlh.read(1) if first_byte not in (b'F', b'\x00'): continue formats.append(fmt) if not formats: for format_id in (867, 836, 940): add_m3u8_format(format_id) self._sort_formats(formats, ('width', 'height', 'filesize', 'tbr')) return { 'id': video_id, 'title': title, 'duration': int_or_none(video_data.get('duration')), 'timestamp': parse_iso8601(video_data.get('createdDate'), ' '), 'thumbnails': thumbnails, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/toggle.py
youtube_dl/extractor/toggle.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import ( determine_ext, ExtractorError, float_or_none, int_or_none, parse_iso8601, strip_or_none, ) class ToggleIE(InfoExtractor): IE_NAME = 'toggle' _VALID_URL = r'(?:https?://(?:(?:www\.)?mewatch|video\.toggle)\.sg/(?:en|zh)/(?:[^/]+/){2,}|toggle:)(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://www.mewatch.sg/en/series/lion-moms-tif/trailers/lion-moms-premier/343115', 'info_dict': { 'id': '343115', 'ext': 'mp4', 'title': 'Lion Moms Premiere', 'description': 'md5:aea1149404bff4d7f7b6da11fafd8e6b', 'upload_date': '20150910', 'timestamp': 1441858274, }, 'params': { 'skip_download': 'm3u8 download', } }, { 'note': 'DRM-protected video', 'url': 'http://www.mewatch.sg/en/movies/dug-s-special-mission/341413', 'info_dict': { 'id': '341413', 'ext': 'wvm', 'title': 'Dug\'s Special Mission', 'description': 'md5:e86c6f4458214905c1772398fabc93e0', 'upload_date': '20150827', 'timestamp': 1440644006, }, 'params': { 'skip_download': 'DRM-protected wvm download', } }, { # this also tests correct video id extraction 'note': 'm3u8 links are geo-restricted, but Android/mp4 is okay', 'url': 'http://www.mewatch.sg/en/series/28th-sea-games-5-show/28th-sea-games-5-show-ep11/332861', 'info_dict': { 'id': '332861', 'ext': 'mp4', 'title': '28th SEA Games (5 Show) - Episode 11', 'description': 'md5:3cd4f5f56c7c3b1340c50a863f896faa', 'upload_date': '20150605', 'timestamp': 1433480166, }, 'params': { 'skip_download': 'DRM-protected wvm download', }, 'skip': 'm3u8 links are geo-restricted' }, { 'url': 'http://video.toggle.sg/en/clips/seraph-sun-aloysius-will-suddenly-sing-some-old-songs-in-high-pitch-on-set/343331', 'only_matching': True, }, { 'url': 'http://www.mewatch.sg/en/clips/seraph-sun-aloysius-will-suddenly-sing-some-old-songs-in-high-pitch-on-set/343331', 'only_matching': True, }, { 'url': 'http://www.mewatch.sg/zh/series/zero-calling-s2-hd/ep13/336367', 'only_matching': True, }, { 'url': 'http://www.mewatch.sg/en/series/vetri-s2/webisodes/jeeva-is-an-orphan-vetri-s2-webisode-7/342302', 'only_matching': True, }, { 'url': 'http://www.mewatch.sg/en/movies/seven-days/321936', 'only_matching': True, }, { 'url': 'https://www.mewatch.sg/en/tv-show/news/may-2017-cna-singapore-tonight/fri-19-may-2017/512456', 'only_matching': True, }, { 'url': 'http://www.mewatch.sg/en/channels/eleven-plus/401585', 'only_matching': True, }] _API_USER = 'tvpapi_147' _API_PASS = '11111' def _real_extract(self, url): video_id = self._match_id(url) params = { 'initObj': { 'Locale': { 'LocaleLanguage': '', 'LocaleCountry': '', 'LocaleDevice': '', 'LocaleUserState': 0 }, 'Platform': 0, 'SiteGuid': 0, 'DomainID': '0', 'UDID': '', 'ApiUser': self._API_USER, 'ApiPass': self._API_PASS }, 'MediaID': video_id, 'mediaType': 0, } info = self._download_json( 'http://tvpapi.as.tvinci.com/v2_9/gateways/jsonpostgw.aspx?m=GetMediaInfo', video_id, 'Downloading video info json', data=json.dumps(params).encode('utf-8')) title = info['MediaName'] formats = [] for video_file in info.get('Files', []): video_url, vid_format = video_file.get('URL'), video_file.get('Format') if not video_url or video_url == 'NA' or not vid_format: continue ext = determine_ext(video_url) vid_format = vid_format.replace(' ', '') # if geo-restricted, m3u8 is inaccessible, but mp4 is okay if ext == 'm3u8': m3u8_formats = self._extract_m3u8_formats( video_url, video_id, ext='mp4', m3u8_id=vid_format, note='Downloading %s m3u8 information' % vid_format, errnote='Failed to download %s m3u8 information' % vid_format, fatal=False) for f in m3u8_formats: # Apple FairPlay Streaming if '/fpshls/' in f['url']: continue formats.append(f) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( video_url, video_id, mpd_id=vid_format, note='Downloading %s MPD manifest' % vid_format, errnote='Failed to download %s MPD manifest' % vid_format, fatal=False)) elif ext == 'ism': formats.extend(self._extract_ism_formats( video_url, video_id, ism_id=vid_format, note='Downloading %s ISM manifest' % vid_format, errnote='Failed to download %s ISM manifest' % vid_format, fatal=False)) elif ext == 'mp4': formats.append({ 'ext': ext, 'url': video_url, 'format_id': vid_format, }) if not formats: for meta in (info.get('Metas') or []): if meta.get('Key') == 'Encryption' and meta.get('Value') == '1': raise ExtractorError( 'This video is DRM protected.', expected=True) # Most likely because geo-blocked raise ExtractorError('No downloadable videos found', expected=True) self._sort_formats(formats) thumbnails = [] for picture in info.get('Pictures', []): if not isinstance(picture, dict): continue pic_url = picture.get('URL') if not pic_url: continue thumbnail = { 'url': pic_url, } pic_size = picture.get('PicSize', '') m = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', pic_size) if m: thumbnail.update({ 'width': int(m.group('width')), 'height': int(m.group('height')), }) thumbnails.append(thumbnail) def counter(prefix): return int_or_none( info.get(prefix + 'Counter') or info.get(prefix.lower() + '_counter')) return { 'id': video_id, 'title': title, 'description': strip_or_none(info.get('Description')), 'duration': int_or_none(info.get('Duration')), 'timestamp': parse_iso8601(info.get('CreationDate') or None), 'average_rating': float_or_none(info.get('Rating')), 'view_count': counter('View'), 'like_count': counter('Like'), 'thumbnails': thumbnails, 'formats': formats, } class MeWatchIE(InfoExtractor): IE_NAME = 'mewatch' _VALID_URL = r'https?://(?:(?:www|live)\.)?mewatch\.sg/watch/[^/?#&]+-(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.mewatch.sg/watch/Recipe-Of-Life-E1-179371', 'info_dict': { 'id': '1008625', 'ext': 'mp4', 'title': 'Recipe Of Life 味之道', 'timestamp': 1603306526, 'description': 'md5:6e88cde8af2068444fc8e1bc3ebf257c', 'upload_date': '20201021', }, 'params': { 'skip_download': 'm3u8 download', }, }, { 'url': 'https://www.mewatch.sg/watch/Little-Red-Dot-Detectives-S2-搜密。打卡。小红点-S2-E1-176232', 'only_matching': True, }, { 'url': 'https://www.mewatch.sg/watch/Little-Red-Dot-Detectives-S2-%E6%90%9C%E5%AF%86%E3%80%82%E6%89%93%E5%8D%A1%E3%80%82%E5%B0%8F%E7%BA%A2%E7%82%B9-S2-E1-176232', 'only_matching': True, }, { 'url': 'https://live.mewatch.sg/watch/Recipe-Of-Life-E41-189759', 'only_matching': True, }] def _real_extract(self, url): item_id = self._match_id(url) custom_id = self._download_json( 'https://cdn.mewatch.sg/api/items/' + item_id, item_id, query={'segments': 'all'})['customId'] return self.url_result( 'toggle:' + custom_id, ToggleIE.ie_key(), custom_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/expotv.py
youtube_dl/extractor/expotv.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, unified_strdate, ) class ExpoTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?expotv\.com/videos/[^?#]*/(?P<id>[0-9]+)($|[?#])' _TEST = { 'url': 'http://www.expotv.com/videos/reviews/3/40/NYX-Butter-lipstick/667916', 'md5': 'fe1d728c3a813ff78f595bc8b7a707a8', 'info_dict': { 'id': '667916', 'ext': 'mp4', 'title': 'NYX Butter Lipstick Little Susie', 'description': 'Goes on like butter, but looks better!', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Stephanie S.', 'upload_date': '20150520', 'view_count': int, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) player_key = self._search_regex( r'<param name="playerKey" value="([^"]+)"', webpage, 'player key') config = self._download_json( 'http://client.expotv.com/video/config/%s/%s' % (video_id, player_key), video_id, 'Downloading video configuration') formats = [] for fcfg in config['sources']: media_url = fcfg.get('file') if not media_url: continue if fcfg.get('type') == 'm3u8': formats.extend(self._extract_m3u8_formats( media_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')) else: formats.append({ 'url': media_url, 'height': int_or_none(fcfg.get('height')), 'format_id': fcfg.get('label'), 'ext': self._search_regex( r'filename=.*\.([a-z0-9_A-Z]+)&', media_url, 'file extension', default=None) or fcfg.get('type'), }) self._sort_formats(formats) title = self._og_search_title(webpage) description = self._og_search_description(webpage) thumbnail = config.get('image') view_count = int_or_none(self._search_regex( r'<h5>Plays: ([0-9]+)</h5>', webpage, 'view counts')) uploader = self._search_regex( r'<div class="reviewer">\s*<img alt="([^"]+)"', webpage, 'uploader', fatal=False) upload_date = unified_strdate(self._search_regex( r'<h5>Reviewed on ([0-9/.]+)</h5>', webpage, 'upload date', fatal=False), day_first=False) return { 'id': video_id, 'formats': formats, 'title': title, 'description': description, 'view_count': view_count, 'thumbnail': thumbnail, 'uploader': uploader, 'upload_date': upload_date, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/dreisat.py
youtube_dl/extractor/dreisat.py
from __future__ import unicode_literals from .zdf import ZDFIE class DreiSatIE(ZDFIE): IE_NAME = '3sat' _VALID_URL = r'https?://(?:www\.)?3sat\.de/(?:[^/]+/)*(?P<id>[^/?#&]+)\.html' _TESTS = [{ # Same as https://www.zdf.de/dokumentation/ab-18/10-wochen-sommer-102.html 'url': 'https://www.3sat.de/film/ab-18/10-wochen-sommer-108.html', 'md5': '0aff3e7bc72c8813f5e0fae333316a1d', 'info_dict': { 'id': '141007_ab18_10wochensommer_film', 'ext': 'mp4', 'title': 'Ab 18! - 10 Wochen Sommer', 'description': 'md5:8253f41dc99ce2c3ff892dac2d65fe26', 'duration': 2660, 'timestamp': 1608604200, 'upload_date': '20201222', }, }, { 'url': 'https://www.3sat.de/gesellschaft/schweizweit/waidmannsheil-100.html', 'info_dict': { 'id': '140913_sendung_schweizweit', 'ext': 'mp4', 'title': 'Waidmannsheil', 'description': 'md5:cce00ca1d70e21425e72c86a98a56817', 'timestamp': 1410623100, 'upload_date': '20140913' }, 'params': { 'skip_download': True, } }, { # Same as https://www.zdf.de/filme/filme-sonstige/der-hauptmann-112.html 'url': 'https://www.3sat.de/film/spielfilm/der-hauptmann-100.html', 'only_matching': True, }, { # Same as https://www.zdf.de/wissen/nano/nano-21-mai-2019-102.html, equal media ids 'url': 'https://www.3sat.de/wissen/nano/nano-21-mai-2019-102.html', 'only_matching': True, }]
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/imggaming.py
youtube_dl/extractor/imggaming.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( ExtractorError, int_or_none, str_or_none, try_get, ) class ImgGamingBaseIE(InfoExtractor): _API_BASE = 'https://dce-frontoffice.imggaming.com/api/v2/' _API_KEY = '857a1e5d-e35e-4fdf-805b-a87b6f8364bf' _HEADERS = None _MANIFEST_HEADERS = {'Accept-Encoding': 'identity'} _REALM = None _VALID_URL_TEMPL = r'https?://(?P<domain>%s)/(?P<type>live|playlist|video)/(?P<id>\d+)(?:\?.*?\bplaylistId=(?P<playlist_id>\d+))?' def _real_initialize(self): self._HEADERS = { 'Realm': 'dce.' + self._REALM, 'x-api-key': self._API_KEY, } email, password = self._get_login_info() if email is None: self.raise_login_required() p_headers = self._HEADERS.copy() p_headers['Content-Type'] = 'application/json' self._HEADERS['Authorization'] = 'Bearer ' + self._download_json( self._API_BASE + 'login', None, 'Logging in', data=json.dumps({ 'id': email, 'secret': password, }).encode(), headers=p_headers)['authorisationToken'] def _call_api(self, path, media_id): return self._download_json( self._API_BASE + path + media_id, media_id, headers=self._HEADERS) def _extract_dve_api_url(self, media_id, media_type): stream_path = 'stream' if media_type == 'video': stream_path += '/vod/' else: stream_path += '?eventId=' try: return self._call_api( stream_path, media_id)['playerUrlCallback'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: raise ExtractorError( self._parse_json(e.cause.read().decode(), media_id)['messages'][0], expected=True) raise def _real_extract(self, url): domain, media_type, media_id, playlist_id = re.match(self._VALID_URL, url).groups() if playlist_id: if self._downloader.params.get('noplaylist'): self.to_screen('Downloading just video %s because of --no-playlist' % media_id) else: self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % playlist_id) media_type, media_id = 'playlist', playlist_id if media_type == 'playlist': playlist = self._call_api('vod/playlist/', media_id) entries = [] for video in try_get(playlist, lambda x: x['videos']['vods']) or []: video_id = str_or_none(video.get('id')) if not video_id: continue entries.append(self.url_result( 'https://%s/video/%s' % (domain, video_id), self.ie_key(), video_id)) return self.playlist_result( entries, media_id, playlist.get('title'), playlist.get('description')) dve_api_url = self._extract_dve_api_url(media_id, media_type) video_data = self._download_json(dve_api_url, media_id) is_live = media_type == 'live' if is_live: title = self._live_title(self._call_api('event/', media_id)['title']) else: title = video_data['name'] formats = [] for proto in ('hls', 'dash'): media_url = video_data.get(proto + 'Url') or try_get(video_data, lambda x: x[proto]['url']) if not media_url: continue if proto == 'hls': m3u8_formats = self._extract_m3u8_formats( media_url, media_id, 'mp4', 'm3u8' if is_live else 'm3u8_native', m3u8_id='hls', fatal=False, headers=self._MANIFEST_HEADERS) for f in m3u8_formats: f.setdefault('http_headers', {}).update(self._MANIFEST_HEADERS) formats.append(f) else: formats.extend(self._extract_mpd_formats( media_url, media_id, mpd_id='dash', fatal=False, headers=self._MANIFEST_HEADERS)) self._sort_formats(formats) subtitles = {} for subtitle in video_data.get('subtitles', []): subtitle_url = subtitle.get('url') if not subtitle_url: continue subtitles.setdefault(subtitle.get('lang', 'en_US'), []).append({ 'url': subtitle_url, }) return { 'id': media_id, 'title': title, 'formats': formats, 'thumbnail': video_data.get('thumbnailUrl'), 'description': video_data.get('description'), 'duration': int_or_none(video_data.get('duration')), 'tags': video_data.get('tags'), 'is_live': is_live, 'subtitles': subtitles, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/hgtv.py
youtube_dl/extractor/hgtv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class HGTVComShowIE(InfoExtractor): IE_NAME = 'hgtv.com:show' _VALID_URL = r'https?://(?:www\.)?hgtv\.com/shows/[^/]+/(?P<id>[^/?#&]+)' _TESTS = [{ # data-module="video" 'url': 'http://www.hgtv.com/shows/flip-or-flop/flip-or-flop-full-episodes-season-4-videos', 'info_dict': { 'id': 'flip-or-flop-full-episodes-season-4-videos', 'title': 'Flip or Flop Full Episodes', }, 'playlist_mincount': 15, }, { # data-deferred-module="video" 'url': 'http://www.hgtv.com/shows/good-bones/episodes/an-old-victorian-house-gets-a-new-facelift', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) config = self._parse_json( self._search_regex( r'(?s)data-(?:deferred-)?module=["\']video["\'][^>]*>.*?<script[^>]+type=["\']text/x-config["\'][^>]*>(.+?)</script', webpage, 'video config'), display_id)['channels'][0] entries = [ self.url_result(video['releaseUrl']) for video in config['videos'] if video.get('releaseUrl')] return self.playlist_result( entries, display_id, config.get('title'), config.get('description'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/foxnews.py
youtube_dl/extractor/foxnews.py
from __future__ import unicode_literals import re from .amp import AMPIE from .common import InfoExtractor class FoxNewsIE(AMPIE): IE_NAME = 'foxnews' IE_DESC = 'Fox News and Fox Business Video' _VALID_URL = r'https?://(?P<host>video\.(?:insider\.)?fox(?:news|business)\.com)/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)' _TESTS = [ { 'url': 'http://video.foxnews.com/v/3937480/frozen-in-time/#sp=show-clips', 'md5': '32aaded6ba3ef0d1c04e238d01031e5e', 'info_dict': { 'id': '3937480', 'ext': 'flv', 'title': 'Frozen in Time', 'description': '16-year-old girl is size of toddler', 'duration': 265, 'timestamp': 1304411491, 'upload_date': '20110503', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'http://video.foxnews.com/v/3922535568001/rep-luis-gutierrez-on-if-obamas-immigration-plan-is-legal/#sp=show-clips', 'md5': '5846c64a1ea05ec78175421b8323e2df', 'info_dict': { 'id': '3922535568001', 'ext': 'mp4', 'title': "Rep. Luis Gutierrez on if Obama's immigration plan is legal", 'description': "Congressman discusses president's plan", 'duration': 292, 'timestamp': 1417662047, 'upload_date': '20141204', 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://video.foxnews.com/v/video-embed.html?video_id=3937480&d=video.foxnews.com', 'only_matching': True, }, { 'url': 'http://video.foxbusiness.com/v/4442309889001', 'only_matching': True, }, { # From http://insider.foxnews.com/2016/08/25/univ-wisconsin-student-group-pushing-silence-certain-words 'url': 'http://video.insider.foxnews.com/v/video-embed.html?video_id=5099377331001&autoplay=true&share_url=http://insider.foxnews.com/2016/08/25/univ-wisconsin-student-group-pushing-silence-certain-words&share_title=Student%20Group:%20Saying%20%27Politically%20Correct,%27%20%27Trash%27%20and%20%27Lame%27%20Is%20Offensive&share=true', 'only_matching': True, }, ] @staticmethod def _extract_urls(webpage): return [ mobj.group('url') for mobj in re.finditer( r'<(?:amp-)?iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//video\.foxnews\.com/v/video-embed\.html?.*?\bvideo_id=\d+.*?)\1', webpage)] def _real_extract(self, url): host, video_id = re.match(self._VALID_URL, url).groups() info = self._extract_feed_info( 'http://%s/v/feed/video/%s.js?template=fox' % (host, video_id)) info['id'] = video_id return info class FoxNewsArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:insider\.)?foxnews\.com/(?!v)([^/]+/)+(?P<id>[a-z-]+)' IE_NAME = 'foxnews:article' _TESTS = [{ # data-video-id 'url': 'http://www.foxnews.com/politics/2016/09/08/buzz-about-bud-clinton-camp-denies-claims-wore-earpiece-at-forum.html', 'md5': '83d44e1aff1433e7a29a7b537d1700b5', 'info_dict': { 'id': '5116295019001', 'ext': 'mp4', 'title': 'Trump and Clinton asked to defend positions on Iraq War', 'description': 'Veterans react on \'The Kelly File\'', 'timestamp': 1473301045, 'upload_date': '20160908', }, }, { # iframe embed 'url': 'http://www.foxnews.com/us/2018/03/09/parkland-survivor-kyle-kashuv-on-meeting-trump-his-app-to-prevent-another-school-shooting.amp.html?__twitter_impression=true', 'info_dict': { 'id': '5748266721001', 'ext': 'flv', 'title': 'Kyle Kashuv has a positive message for the Trump White House', 'description': 'Marjory Stoneman Douglas student disagrees with classmates.', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 229, 'timestamp': 1520594670, 'upload_date': '20180309', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://insider.foxnews.com/2016/08/25/univ-wisconsin-student-group-pushing-silence-certain-words', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._html_search_regex( r'data-video-id=([\'"])(?P<id>[^\'"]+)\1', webpage, 'video ID', group='id', default=None) if video_id: return self.url_result( 'http://video.foxnews.com/v/' + video_id, FoxNewsIE.ie_key()) return self.url_result( FoxNewsIE._extract_urls(webpage)[0], FoxNewsIE.ie_key())
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/dhm.py
youtube_dl/extractor/dhm.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import parse_duration class DHMIE(InfoExtractor): IE_DESC = 'Filmarchiv - Deutsches Historisches Museum' _VALID_URL = r'https?://(?:www\.)?dhm\.de/filmarchiv/(?:[^/]+/)+(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.dhm.de/filmarchiv/die-filme/the-marshallplan-at-work-in-west-germany/', 'md5': '11c475f670209bf6acca0b2b7ef51827', 'info_dict': { 'id': 'the-marshallplan-at-work-in-west-germany', 'ext': 'flv', 'title': 'MARSHALL PLAN AT WORK IN WESTERN GERMANY, THE', 'description': 'md5:1fabd480c153f97b07add61c44407c82', 'duration': 660, 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'http://www.dhm.de/filmarchiv/02-mapping-the-wall/peter-g/rolle-1/', 'md5': '09890226332476a3e3f6f2cb74734aa5', 'info_dict': { 'id': 'rolle-1', 'ext': 'flv', 'title': 'ROLLE 1', 'thumbnail': r're:^https?://.*\.jpg$', }, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) playlist_url = self._search_regex( r"file\s*:\s*'([^']+)'", webpage, 'playlist url') entries = self._extract_xspf_playlist(playlist_url, playlist_id) title = self._search_regex( [r'dc:title="([^"]+)"', r'<title> &raquo;([^<]+)</title>'], webpage, 'title').strip() description = self._html_search_regex( r'<p><strong>Description:</strong>(.+?)</p>', webpage, 'description', default=None) duration = parse_duration(self._search_regex( r'<em>Length\s*</em>\s*:\s*</strong>([^<]+)', webpage, 'duration', default=None)) entries[0].update({ 'title': title, 'description': description, 'duration': duration, }) return self.playlist_result(entries, playlist_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/nrl.py
youtube_dl/extractor/nrl.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class NRLTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?nrl\.com/tv(/[^/]+)*/(?P<id>[^/?&#]+)' _TEST = { 'url': 'https://www.nrl.com/tv/news/match-highlights-titans-v-knights-862805/', 'info_dict': { 'id': 'YyNnFuaDE6kPJqlDhG4CGQ_w89mKTau4', 'ext': 'mp4', 'title': 'Match Highlights: Titans v Knights', }, 'params': { # m3u8 download 'skip_download': True, 'format': 'bestvideo', }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) q_data = self._parse_json(self._html_search_regex( r'(?s)q-data="({.+?})"', webpage, 'player data'), display_id) ooyala_id = q_data['videoId'] return self.url_result( 'ooyala:' + ooyala_id, 'Ooyala', ooyala_id, q_data.get('title'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/beatport.py
youtube_dl/extractor/beatport.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import int_or_none class BeatportIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.|pro\.)?beatport\.com/track/(?P<display_id>[^/]+)/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://beatport.com/track/synesthesia-original-mix/5379371', 'md5': 'b3c34d8639a2f6a7f734382358478887', 'info_dict': { 'id': '5379371', 'display_id': 'synesthesia-original-mix', 'ext': 'mp4', 'title': 'Froxic - Synesthesia (Original Mix)', }, }, { 'url': 'https://beatport.com/track/love-and-war-original-mix/3756896', 'md5': 'e44c3025dfa38c6577fbaeb43da43514', 'info_dict': { 'id': '3756896', 'display_id': 'love-and-war-original-mix', 'ext': 'mp3', 'title': 'Wolfgang Gartner - Love & War (Original Mix)', }, }, { 'url': 'https://beatport.com/track/birds-original-mix/4991738', 'md5': 'a1fd8e8046de3950fd039304c186c05f', 'info_dict': { 'id': '4991738', 'display_id': 'birds-original-mix', 'ext': 'mp4', 'title': "Tos, Middle Milk, Mumblin' Johnsson - Birds (Original Mix)", } }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) track_id = mobj.group('id') display_id = mobj.group('display_id') webpage = self._download_webpage(url, display_id) playables = self._parse_json( self._search_regex( r'window\.Playables\s*=\s*({.+?});', webpage, 'playables info', flags=re.DOTALL), track_id) track = next(t for t in playables['tracks'] if t['id'] == int(track_id)) title = ', '.join((a['name'] for a in track['artists'])) + ' - ' + track['name'] if track['mix']: title += ' (' + track['mix'] + ')' formats = [] for ext, info in track['preview'].items(): if not info['url']: continue fmt = { 'url': info['url'], 'ext': ext, 'format_id': ext, 'vcodec': 'none', } if ext == 'mp3': fmt['preference'] = 0 fmt['acodec'] = 'mp3' fmt['abr'] = 96 fmt['asr'] = 44100 elif ext == 'mp4': fmt['preference'] = 1 fmt['acodec'] = 'aac' fmt['abr'] = 96 fmt['asr'] = 44100 formats.append(fmt) self._sort_formats(formats) images = [] for name, info in track['images'].items(): image_url = info.get('url') if name == 'dynamic' or not image_url: continue image = { 'id': name, 'url': image_url, 'height': int_or_none(info.get('height')), 'width': int_or_none(info.get('width')), } images.append(image) return { 'id': compat_str(track.get('id')) or track_id, 'display_id': track.get('slug') or display_id, 'title': title, 'formats': formats, 'thumbnails': images, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vesti.py
youtube_dl/extractor/vesti.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ExtractorError from .rutv import RUTVIE class VestiIE(InfoExtractor): IE_DESC = 'Вести.Ru' _VALID_URL = r'https?://(?:.+?\.)?vesti\.ru/(?P<id>.+)' _TESTS = [ { 'url': 'http://www.vesti.ru/videos?vid=575582&cid=1', 'info_dict': { 'id': '765035', 'ext': 'mp4', 'title': 'Вести.net: биткоины в России не являются законными', 'description': 'md5:d4bb3859dc1177b28a94c5014c35a36b', 'duration': 302, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.vesti.ru/doc.html?id=1349233', 'info_dict': { 'id': '773865', 'ext': 'mp4', 'title': 'Участники митинга штурмуют Донецкую областную администрацию', 'description': 'md5:1a160e98b3195379b4c849f2f4958009', 'duration': 210, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.vesti.ru/only_video.html?vid=576180', 'info_dict': { 'id': '766048', 'ext': 'mp4', 'title': 'США заморозило, Британию затопило', 'description': 'md5:f0ed0695ec05aed27c56a70a58dc4cc1', 'duration': 87, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://hitech.vesti.ru/news/view/id/4000', 'info_dict': { 'id': '766888', 'ext': 'mp4', 'title': 'Вести.net: интернет-гиганты начали перетягивание программных "одеял"', 'description': 'md5:65ddd47f9830c4f42ed6475f8730c995', 'duration': 279, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://sochi2014.vesti.ru/video/index/video_id/766403', 'info_dict': { 'id': '766403', 'ext': 'mp4', 'title': 'XXII зимние Олимпийские игры. Российские хоккеисты стартовали на Олимпиаде с победы', 'description': 'md5:55805dfd35763a890ff50fa9e35e31b3', 'duration': 271, }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': 'Blocked outside Russia', }, { 'url': 'http://sochi2014.vesti.ru/live/play/live_id/301', 'info_dict': { 'id': '51499', 'ext': 'flv', 'title': 'Сочи-2014. Биатлон. Индивидуальная гонка. Мужчины ', 'description': 'md5:9e0ed5c9d2fa1efbfdfed90c9a6d179c', }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Translation has finished' }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') page = self._download_webpage(url, video_id, 'Downloading page') mobj = re.search( r'<meta[^>]+?property="og:video"[^>]+?content="http://www\.vesti\.ru/i/flvplayer_videoHost\.swf\?vid=(?P<id>\d+)', page) if mobj: video_id = mobj.group('id') page = self._download_webpage('http://www.vesti.ru/only_video.html?vid=%s' % video_id, video_id, 'Downloading video page') rutv_url = RUTVIE._extract_url(page) if rutv_url: return self.url_result(rutv_url, 'RUTV') raise ExtractorError('No video found', expected=True)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/kankan.py
youtube_dl/extractor/kankan.py
from __future__ import unicode_literals import re import hashlib from .common import InfoExtractor _md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest() class KankanIE(InfoExtractor): _VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml' _TEST = { 'url': 'http://yinyue.kankan.com/vod/48/48863.shtml', 'md5': '29aca1e47ae68fc28804aca89f29507e', 'info_dict': { 'id': '48863', 'ext': 'flv', 'title': 'Ready To Go', }, 'skip': 'Only available from China', } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, 'video title') surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0) gcids = re.findall(r'http://.+?/.+?/(.+?)/', surls) gcid = gcids[-1] info_url = 'http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid video_info_page = self._download_webpage( info_url, video_id, 'Downloading video url info') ip = self._search_regex(r'ip:"(.+?)"', video_info_page, 'video url ip') path = self._search_regex(r'path:"(.+?)"', video_info_page, 'video url path') param1 = self._search_regex(r'param1:(\d+)', video_info_page, 'param1') param2 = self._search_regex(r'param2:(\d+)', video_info_page, 'param2') key = _md5('xl_mp43651' + param1 + param2) video_url = 'http://%s%s?key=%s&key1=%s' % (ip, path, key, param2) return { 'id': video_id, 'title': title, 'url': video_url, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rutube.py
youtube_dl/extractor/rutube.py
# coding: utf-8 from __future__ import unicode_literals import re import itertools from .common import InfoExtractor from ..compat import ( compat_str, compat_parse_qs, compat_urllib_parse_urlparse, ) from ..utils import ( determine_ext, bool_or_none, int_or_none, try_get, unified_timestamp, url_or_none, ) class RutubeBaseIE(InfoExtractor): def _download_api_info(self, video_id, query=None): if not query: query = {} query['format'] = 'json' return self._download_json( 'http://rutube.ru/api/video/%s/' % video_id, video_id, 'Downloading video JSON', 'Unable to download video JSON', query=query) @staticmethod def _extract_info(video, video_id=None, require_title=True): title = video['title'] if require_title else video.get('title') age_limit = video.get('is_adult') if age_limit is not None: age_limit = 18 if age_limit is True else 0 uploader_id = try_get(video, lambda x: x['author']['id']) category = try_get(video, lambda x: x['category']['name']) return { 'id': video.get('id') or video_id if video_id else video['id'], 'title': title, 'description': video.get('description'), 'thumbnail': video.get('thumbnail_url'), 'duration': int_or_none(video.get('duration')), 'uploader': try_get(video, lambda x: x['author']['name']), 'uploader_id': compat_str(uploader_id) if uploader_id else None, 'timestamp': unified_timestamp(video.get('created_ts')), 'category': [category] if category else None, 'age_limit': age_limit, 'view_count': int_or_none(video.get('hits')), 'comment_count': int_or_none(video.get('comments_count')), 'is_live': bool_or_none(video.get('is_livestream')), } def _download_and_extract_info(self, video_id, query=None): return self._extract_info( self._download_api_info(video_id, query=query), video_id) def _download_api_options(self, video_id, query=None): if not query: query = {} query['format'] = 'json' return self._download_json( 'http://rutube.ru/api/play/options/%s/' % video_id, video_id, 'Downloading options JSON', 'Unable to download options JSON', headers=self.geo_verification_headers(), query=query) def _extract_formats(self, options, video_id): formats = [] for format_id, format_url in options['video_balancer'].items(): ext = determine_ext(format_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( format_url, video_id, f4m_id=format_id, fatal=False)) else: formats.append({ 'url': format_url, 'format_id': format_id, }) self._sort_formats(formats) return formats def _download_and_extract_formats(self, video_id, query=None): return self._extract_formats( self._download_api_options(video_id, query=query), video_id) class RutubeIE(RutubeBaseIE): IE_NAME = 'rutube' IE_DESC = 'Rutube videos' _VALID_URL = r'https?://rutube\.ru/(?:video|(?:play/)?embed)/(?P<id>[\da-z]{32})' _TESTS = [{ 'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/', 'md5': '1d24f180fac7a02f3900712e5a5764d6', 'info_dict': { 'id': '3eac3b4561676c17df9132a9a1e62e3e', 'ext': 'mp4', 'title': 'Раненный кенгуру забежал в аптеку', 'description': 'http://www.ntdtv.ru ', 'duration': 81, 'uploader': 'NTDRussian', 'uploader_id': '29790', 'timestamp': 1381943602, 'upload_date': '20131016', 'age_limit': 0, }, }, { 'url': 'http://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661', 'only_matching': True, }, { 'url': 'http://rutube.ru/embed/a10e53b86e8f349080f718582ce4c661', 'only_matching': True, }, { 'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/?pl_id=4252', 'only_matching': True, }, { 'url': 'https://rutube.ru/video/10b3a03fc01d5bbcc632a2f3514e8aab/?pl_type=source', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if RutubePlaylistIE.suitable(url) else super(RutubeIE, cls).suitable(url) @staticmethod def _extract_urls(webpage): return [mobj.group('url') for mobj in re.finditer( r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//rutube\.ru/embed/[\da-z]{32}.*?)\1', webpage)] def _real_extract(self, url): video_id = self._match_id(url) info = self._download_and_extract_info(video_id) info['formats'] = self._download_and_extract_formats(video_id) return info class RutubeEmbedIE(RutubeBaseIE): IE_NAME = 'rutube:embed' IE_DESC = 'Rutube embedded videos' _VALID_URL = r'https?://rutube\.ru/(?:video|play)/embed/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://rutube.ru/video/embed/6722881?vk_puid37=&vk_puid38=', 'info_dict': { 'id': 'a10e53b86e8f349080f718582ce4c661', 'ext': 'mp4', 'timestamp': 1387830582, 'upload_date': '20131223', 'uploader_id': '297833', 'description': 'Видео группы ★http://vk.com/foxkidsreset★ музей Fox Kids и Jetix<br/><br/> восстановлено и сделано в шикоформате subziro89 http://vk.com/subziro89', 'uploader': 'subziro89 ILya', 'title': 'Мистический городок Эйри в Индиан 5 серия озвучка subziro89', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://rutube.ru/play/embed/8083783', 'only_matching': True, }, { # private video 'url': 'https://rutube.ru/play/embed/10631925?p=IbAigKqWd1do4mjaM5XLIQ', 'only_matching': True, }] def _real_extract(self, url): embed_id = self._match_id(url) # Query may contain private videos token and should be passed to API # requests (see #19163) query = compat_parse_qs(compat_urllib_parse_urlparse(url).query) options = self._download_api_options(embed_id, query) video_id = options['effective_video'] formats = self._extract_formats(options, video_id) info = self._download_and_extract_info(video_id, query) info.update({ 'extractor_key': 'Rutube', 'formats': formats, }) return info class RutubePlaylistBaseIE(RutubeBaseIE): def _next_page_url(self, page_num, playlist_id, *args, **kwargs): return self._PAGE_TEMPLATE % (playlist_id, page_num) def _entries(self, playlist_id, *args, **kwargs): next_page_url = None for pagenum in itertools.count(1): page = self._download_json( next_page_url or self._next_page_url( pagenum, playlist_id, *args, **kwargs), playlist_id, 'Downloading page %s' % pagenum) results = page.get('results') if not results or not isinstance(results, list): break for result in results: video_url = url_or_none(result.get('video_url')) if not video_url: continue entry = self._extract_info(result, require_title=False) entry.update({ '_type': 'url', 'url': video_url, 'ie_key': RutubeIE.ie_key(), }) yield entry next_page_url = page.get('next') if not next_page_url or not page.get('has_next'): break def _extract_playlist(self, playlist_id, *args, **kwargs): return self.playlist_result( self._entries(playlist_id, *args, **kwargs), playlist_id, kwargs.get('playlist_name')) def _real_extract(self, url): return self._extract_playlist(self._match_id(url)) class RutubeChannelIE(RutubePlaylistBaseIE): IE_NAME = 'rutube:channel' IE_DESC = 'Rutube channels' _VALID_URL = r'https?://rutube\.ru/tags/video/(?P<id>\d+)' _TESTS = [{ 'url': 'http://rutube.ru/tags/video/1800/', 'info_dict': { 'id': '1800', }, 'playlist_mincount': 68, }] _PAGE_TEMPLATE = 'http://rutube.ru/api/tags/video/%s/?page=%s&format=json' class RutubeMovieIE(RutubePlaylistBaseIE): IE_NAME = 'rutube:movie' IE_DESC = 'Rutube movies' _VALID_URL = r'https?://rutube\.ru/metainfo/tv/(?P<id>\d+)' _TESTS = [] _MOVIE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/?format=json' _PAGE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json' def _real_extract(self, url): movie_id = self._match_id(url) movie = self._download_json( self._MOVIE_TEMPLATE % movie_id, movie_id, 'Downloading movie JSON') return self._extract_playlist( movie_id, playlist_name=movie.get('name')) class RutubePersonIE(RutubePlaylistBaseIE): IE_NAME = 'rutube:person' IE_DESC = 'Rutube person videos' _VALID_URL = r'https?://rutube\.ru/video/person/(?P<id>\d+)' _TESTS = [{ 'url': 'http://rutube.ru/video/person/313878/', 'info_dict': { 'id': '313878', }, 'playlist_mincount': 37, }] _PAGE_TEMPLATE = 'http://rutube.ru/api/video/person/%s/?page=%s&format=json' class RutubePlaylistIE(RutubePlaylistBaseIE): IE_NAME = 'rutube:playlist' IE_DESC = 'Rutube playlists' _VALID_URL = r'https?://rutube\.ru/(?:video|(?:play/)?embed)/[\da-z]{32}/\?.*?\bpl_id=(?P<id>\d+)' _TESTS = [{ 'url': 'https://rutube.ru/video/cecd58ed7d531fc0f3d795d51cee9026/?pl_id=3097&pl_type=tag', 'info_dict': { 'id': '3097', }, 'playlist_count': 27, }, { 'url': 'https://rutube.ru/video/10b3a03fc01d5bbcc632a2f3514e8aab/?pl_id=4252&pl_type=source', 'only_matching': True, }] _PAGE_TEMPLATE = 'http://rutube.ru/api/playlist/%s/%s/?page=%s&format=json' @classmethod def suitable(cls, url): if not super(RutubePlaylistIE, cls).suitable(url): return False params = compat_parse_qs(compat_urllib_parse_urlparse(url).query) return params.get('pl_type', [None])[0] and int_or_none(params.get('pl_id', [None])[0]) def _next_page_url(self, page_num, playlist_id, item_kind): return self._PAGE_TEMPLATE % (item_kind, playlist_id, page_num) def _real_extract(self, url): qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query) playlist_kind = qs['pl_type'][0] playlist_id = qs['pl_id'][0] return self._extract_playlist(playlist_id, item_kind=playlist_kind)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cnn.py
youtube_dl/extractor/cnn.py
from __future__ import unicode_literals import re from .common import InfoExtractor from .turner import TurnerBaseIE from ..utils import url_basename class CNNIE(TurnerBaseIE): _VALID_URL = r'''(?x)https?://(?:(?P<sub_domain>edition|www|money)\.)?cnn\.com/(?:video/(?:data/.+?|\?)/)?videos?/ (?P<path>.+?/(?P<title>[^/]+?)(?:\.(?:[a-z\-]+)|(?=&)))''' _TESTS = [{ 'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn', 'md5': '3e6121ea48df7e2259fe73a0628605c4', 'info_dict': { 'id': 'sports/2013/06/09/nadal-1-on-1.cnn', 'ext': 'mp4', 'title': 'Nadal wins 8th French Open title', 'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.', 'duration': 135, 'upload_date': '20130609', }, 'expected_warnings': ['Failed to download m3u8 information'], }, { 'url': 'http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29', 'md5': 'b5cc60c60a3477d185af8f19a2a26f4e', 'info_dict': { 'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology', 'ext': 'mp4', 'title': "Student's epic speech stuns new freshmen", 'description': "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"", 'upload_date': '20130821', }, 'expected_warnings': ['Failed to download m3u8 information'], }, { 'url': 'http://www.cnn.com/video/data/2.0/video/living/2014/12/22/growing-america-nashville-salemtown-board-episode-1.hln.html', 'md5': 'f14d02ebd264df951feb2400e2c25a1b', 'info_dict': { 'id': 'living/2014/12/22/growing-america-nashville-salemtown-board-episode-1.hln', 'ext': 'mp4', 'title': 'Nashville Ep. 1: Hand crafted skateboards', 'description': 'md5:e7223a503315c9f150acac52e76de086', 'upload_date': '20141222', }, 'expected_warnings': ['Failed to download m3u8 information'], }, { 'url': 'http://money.cnn.com/video/news/2016/08/19/netflix-stunning-stats.cnnmoney/index.html', 'md5': '52a515dc1b0f001cd82e4ceda32be9d1', 'info_dict': { 'id': '/video/news/2016/08/19/netflix-stunning-stats.cnnmoney', 'ext': 'mp4', 'title': '5 stunning stats about Netflix', 'description': 'Did you know that Netflix has more than 80 million members? Here are five facts about the online video distributor that you probably didn\'t know.', 'upload_date': '20160819', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://cnn.com/video/?/video/politics/2015/03/27/pkg-arizona-senator-church-attendance-mandatory.ktvk', 'only_matching': True, }, { 'url': 'http://cnn.com/video/?/video/us/2015/04/06/dnt-baker-refuses-anti-gay-order.wkmg', 'only_matching': True, }, { 'url': 'http://edition.cnn.com/videos/arts/2016/04/21/olympic-games-cultural-a-z-brazil.cnn', 'only_matching': True, }] _CONFIG = { # http://edition.cnn.com/.element/apps/cvp/3.0/cfg/spider/cnn/expansion/config.xml 'edition': { 'data_src': 'http://edition.cnn.com/video/data/3.0/video/%s/index.xml', 'media_src': 'http://pmd.cdn.turner.com/cnn/big', }, # http://money.cnn.com/.element/apps/cvp2/cfg/config.xml 'money': { 'data_src': 'http://money.cnn.com/video/data/4.0/video/%s.xml', 'media_src': 'http://ht3.cdn.turner.com/money/big', }, } def _extract_timestamp(self, video_data): # TODO: fix timestamp extraction return None def _real_extract(self, url): sub_domain, path, page_title = re.match(self._VALID_URL, url).groups() if sub_domain not in ('money', 'edition'): sub_domain = 'edition' config = self._CONFIG[sub_domain] return self._extract_cvp_info( config['data_src'] % path, page_title, { 'default': { 'media_src': config['media_src'], }, 'f4m': { 'host': 'cnn-vh.akamaihd.net', }, }) class CNNBlogsIE(InfoExtractor): _VALID_URL = r'https?://[^\.]+\.blogs\.cnn\.com/.+' _TEST = { 'url': 'http://reliablesources.blogs.cnn.com/2014/02/09/criminalizing-journalism/', 'md5': '3e56f97b0b6ffb4b79f4ea0749551084', 'info_dict': { 'id': 'bestoftv/2014/02/09/criminalizing-journalism.cnn', 'ext': 'mp4', 'title': 'Criminalizing journalism?', 'description': 'Glenn Greenwald responds to comments made this week on Capitol Hill that journalists could be criminal accessories.', 'upload_date': '20140209', }, 'expected_warnings': ['Failed to download m3u8 information'], 'add_ie': ['CNN'], } def _real_extract(self, url): webpage = self._download_webpage(url, url_basename(url)) cnn_url = self._html_search_regex(r'data-url="(.+?)"', webpage, 'cnn url') return self.url_result(cnn_url, CNNIE.ie_key()) class CNNArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:edition|www)\.)?cnn\.com/(?!videos?/)' _TEST = { 'url': 'http://www.cnn.com/2014/12/21/politics/obama-north-koreas-hack-not-war-but-cyber-vandalism/', 'md5': '689034c2a3d9c6dc4aa72d65a81efd01', 'info_dict': { 'id': 'bestoftv/2014/12/21/ip-north-korea-obama.cnn', 'ext': 'mp4', 'title': 'Obama: Cyberattack not an act of war', 'description': 'md5:0a802a40d2376f60e6b04c8d5bcebc4b', 'upload_date': '20141221', }, 'expected_warnings': ['Failed to download m3u8 information'], 'add_ie': ['CNN'], } def _real_extract(self, url): webpage = self._download_webpage(url, url_basename(url)) cnn_url = self._html_search_regex(r"video:\s*'([^']+)'", webpage, 'cnn url') return self.url_result('http://cnn.com/video/?/video/' + cnn_url, CNNIE.ie_key())
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/philharmoniedeparis.py
youtube_dl/extractor/philharmoniedeparis.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( try_get, urljoin, ) class PhilharmonieDeParisIE(InfoExtractor): IE_DESC = 'Philharmonie de Paris' _VALID_URL = r'''(?x) https?:// (?: live\.philharmoniedeparis\.fr/(?:[Cc]oncert/|embed(?:app)?/|misc/Playlist\.ashx\?id=)| pad\.philharmoniedeparis\.fr/doc/CIMU/ ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'http://pad.philharmoniedeparis.fr/doc/CIMU/1086697/jazz-a-la-villette-knower', 'md5': 'a0a4b195f544645073631cbec166a2c2', 'info_dict': { 'id': '1086697', 'ext': 'mp4', 'title': 'Jazz à la Villette : Knower', }, }, { 'url': 'http://live.philharmoniedeparis.fr/concert/1032066.html', 'info_dict': { 'id': '1032066', 'title': 'md5:0a031b81807b3593cffa3c9a87a167a0', }, 'playlist_mincount': 2, }, { 'url': 'http://live.philharmoniedeparis.fr/Concert/1030324.html', 'only_matching': True, }, { 'url': 'http://live.philharmoniedeparis.fr/misc/Playlist.ashx?id=1030324&track=&lang=fr', 'only_matching': True, }, { 'url': 'https://live.philharmoniedeparis.fr/embedapp/1098406/berlioz-fantastique-lelio-les-siecles-national-youth-choir-of.html?lang=fr-FR', 'only_matching': True, }, { 'url': 'https://live.philharmoniedeparis.fr/embed/1098406/berlioz-fantastique-lelio-les-siecles-national-youth-choir-of.html?lang=fr-FR', 'only_matching': True, }] _LIVE_URL = 'https://live.philharmoniedeparis.fr' def _real_extract(self, url): video_id = self._match_id(url) config = self._download_json( '%s/otoPlayer/config.ashx' % self._LIVE_URL, video_id, query={ 'id': video_id, 'lang': 'fr-FR', }) def extract_entry(source): if not isinstance(source, dict): return title = source.get('title') if not title: return files = source.get('files') if not isinstance(files, dict): return format_urls = set() formats = [] for format_id in ('mobile', 'desktop'): format_url = try_get( files, lambda x: x[format_id]['file'], compat_str) if not format_url or format_url in format_urls: continue format_urls.add(format_url) m3u8_url = urljoin(self._LIVE_URL, format_url) formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) if not formats: return self._sort_formats(formats) return { 'title': title, 'formats': formats, } thumbnail = urljoin(self._LIVE_URL, config.get('image')) info = extract_entry(config) if info: info.update({ 'id': video_id, 'thumbnail': thumbnail, }) return info entries = [] for num, chapter in enumerate(config['chapters'], start=1): entry = extract_entry(chapter) entry['id'] = '%s-%d' % (video_id, num) entries.append(entry) return self.playlist_result(entries, video_id, config.get('title'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/curiositystream.py
youtube_dl/extractor/curiositystream.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, urlencode_postdata, compat_str, ExtractorError, ) class CuriosityStreamBaseIE(InfoExtractor): _NETRC_MACHINE = 'curiositystream' _auth_token = None _API_BASE_URL = 'https://api.curiositystream.com/v1/' def _handle_errors(self, result): error = result.get('error', {}).get('message') if error: if isinstance(error, dict): error = ', '.join(error.values()) raise ExtractorError( '%s said: %s' % (self.IE_NAME, error), expected=True) def _call_api(self, path, video_id, query=None): headers = {} if self._auth_token: headers['X-Auth-Token'] = self._auth_token result = self._download_json( self._API_BASE_URL + path, video_id, headers=headers, query=query) self._handle_errors(result) return result['data'] def _real_initialize(self): email, password = self._get_login_info() if email is None: return result = self._download_json( self._API_BASE_URL + 'login', None, data=urlencode_postdata({ 'email': email, 'password': password, })) self._handle_errors(result) self._auth_token = result['message']['auth_token'] class CuriosityStreamIE(CuriosityStreamBaseIE): IE_NAME = 'curiositystream' _VALID_URL = r'https?://(?:app\.)?curiositystream\.com/video/(?P<id>\d+)' _TEST = { 'url': 'https://app.curiositystream.com/video/2', 'info_dict': { 'id': '2', 'ext': 'mp4', 'title': 'How Did You Develop The Internet?', 'description': 'Vint Cerf, Google\'s Chief Internet Evangelist, describes how he and Bob Kahn created the internet.', }, 'params': { 'format': 'bestvideo', # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) formats = [] for encoding_format in ('m3u8', 'mpd'): media = self._call_api('media/' + video_id, video_id, query={ 'encodingsNew': 'true', 'encodingsFormat': encoding_format, }) for encoding in media.get('encodings', []): playlist_url = encoding.get('master_playlist_url') if encoding_format == 'm3u8': # use `m3u8` entry_protocol until EXT-X-MAP is properly supported by `m3u8_native` entry_protocol formats.extend(self._extract_m3u8_formats( playlist_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) elif encoding_format == 'mpd': formats.extend(self._extract_mpd_formats( playlist_url, video_id, mpd_id='dash', fatal=False)) encoding_url = encoding.get('url') file_url = encoding.get('file_url') if not encoding_url and not file_url: continue f = { 'width': int_or_none(encoding.get('width')), 'height': int_or_none(encoding.get('height')), 'vbr': int_or_none(encoding.get('video_bitrate')), 'abr': int_or_none(encoding.get('audio_bitrate')), 'filesize': int_or_none(encoding.get('size_in_bytes')), 'vcodec': encoding.get('video_codec'), 'acodec': encoding.get('audio_codec'), 'container': encoding.get('container_type'), } for f_url in (encoding_url, file_url): if not f_url: continue fmt = f.copy() rtmp = re.search(r'^(?P<url>rtmpe?://(?P<host>[^/]+)/(?P<app>.+))/(?P<playpath>mp[34]:.+)$', f_url) if rtmp: fmt.update({ 'url': rtmp.group('url'), 'play_path': rtmp.group('playpath'), 'app': rtmp.group('app'), 'ext': 'flv', 'format_id': 'rtmp', }) else: fmt.update({ 'url': f_url, 'format_id': 'http', }) formats.append(fmt) self._sort_formats(formats) title = media['title'] subtitles = {} for closed_caption in media.get('closed_captions', []): sub_url = closed_caption.get('file') if not sub_url: continue lang = closed_caption.get('code') or closed_caption.get('language') or 'en' subtitles.setdefault(lang, []).append({ 'url': sub_url, }) return { 'id': video_id, 'formats': formats, 'title': title, 'description': media.get('description'), 'thumbnail': media.get('image_large') or media.get('image_medium') or media.get('image_small'), 'duration': int_or_none(media.get('duration')), 'tags': media.get('tags'), 'subtitles': subtitles, } class CuriosityStreamCollectionIE(CuriosityStreamBaseIE): IE_NAME = 'curiositystream:collection' _VALID_URL = r'https?://(?:app\.)?curiositystream\.com/(?:collections?|series)/(?P<id>\d+)' _TESTS = [{ 'url': 'https://app.curiositystream.com/collection/2', 'info_dict': { 'id': '2', 'title': 'Curious Minds: The Internet', 'description': 'How is the internet shaping our lives in the 21st Century?', }, 'playlist_mincount': 16, }, { 'url': 'https://curiositystream.com/series/2', 'only_matching': True, }, { 'url': 'https://curiositystream.com/collections/36', 'only_matching': True, }] def _real_extract(self, url): collection_id = self._match_id(url) collection = self._call_api( 'collections/' + collection_id, collection_id) entries = [] for media in collection.get('media', []): media_id = compat_str(media.get('id')) entries.append(self.url_result( 'https://curiositystream.com/video/' + media_id, CuriosityStreamIE.ie_key(), media_id)) return self.playlist_result( entries, collection_id, collection.get('title'), collection.get('description'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/zype.py
youtube_dl/extractor/zype.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( dict_get, ExtractorError, int_or_none, js_to_json, parse_iso8601, ) class ZypeIE(InfoExtractor): _ID_RE = r'[\da-fA-F]+' _COMMON_RE = r'//player\.zype\.com/embed/%s\.(?:js|json|html)\?.*?(?:access_token|(?:ap[ip]|player)_key)=' _VALID_URL = r'https?:%s[^&]+' % (_COMMON_RE % ('(?P<id>%s)' % _ID_RE)) _TEST = { 'url': 'https://player.zype.com/embed/5b400b834b32992a310622b9.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ&autoplay=false&controls=true&da=false', 'md5': 'eaee31d474c76a955bdaba02a505c595', 'info_dict': { 'id': '5b400b834b32992a310622b9', 'ext': 'mp4', 'title': 'Smoky Barbecue Favorites', 'thumbnail': r're:^https?://.*\.jpe?g', 'description': 'md5:5ff01e76316bd8d46508af26dc86023b', 'timestamp': 1504915200, 'upload_date': '20170909', }, } @staticmethod def _extract_urls(webpage): return [ mobj.group('url') for mobj in re.finditer( r'<script[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?%s.+?)\1' % (ZypeIE._COMMON_RE % ZypeIE._ID_RE), webpage)] def _real_extract(self, url): video_id = self._match_id(url) try: response = self._download_json(re.sub( r'\.(?:js|html)\?', '.json?', url), video_id)['response'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code in (400, 401, 403): raise ExtractorError(self._parse_json( e.cause.read().decode(), video_id)['message'], expected=True) raise body = response['body'] video = response['video'] title = video['title'] if isinstance(body, dict): formats = [] for output in body.get('outputs', []): output_url = output.get('url') if not output_url: continue name = output.get('name') if name == 'm3u8': formats = self._extract_m3u8_formats( output_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) else: f = { 'format_id': name, 'tbr': int_or_none(output.get('bitrate')), 'url': output_url, } if name in ('m4a', 'mp3'): f['vcodec'] = 'none' else: f.update({ 'height': int_or_none(output.get('height')), 'width': int_or_none(output.get('width')), }) formats.append(f) text_tracks = body.get('subtitles') or [] else: m3u8_url = self._search_regex( r'(["\'])(?P<url>(?:(?!\1).)+\.m3u8(?:(?!\1).)*)\1', body, 'm3u8 url', group='url', default=None) if not m3u8_url: source = self._search_regex( r'(?s)sources\s*:\s*\[\s*({.+?})\s*\]', body, 'source') def get_attr(key): return self._search_regex( r'\b%s\s*:\s*([\'"])(?P<val>(?:(?!\1).)+)\1' % key, source, key, group='val') if get_attr('integration') == 'verizon-media': m3u8_url = 'https://content.uplynk.com/%s.m3u8' % get_attr('id') formats = self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls') text_tracks = self._search_regex( r'textTracks\s*:\s*(\[[^]]+\])', body, 'text tracks', default=None) if text_tracks: text_tracks = self._parse_json( text_tracks, video_id, js_to_json, False) self._sort_formats(formats) subtitles = {} if text_tracks: for text_track in text_tracks: tt_url = dict_get(text_track, ('file', 'src')) if not tt_url: continue subtitles.setdefault(text_track.get('label') or 'English', []).append({ 'url': tt_url, }) thumbnails = [] for thumbnail in video.get('thumbnails', []): thumbnail_url = thumbnail.get('url') if not thumbnail_url: continue thumbnails.append({ 'url': thumbnail_url, 'width': int_or_none(thumbnail.get('width')), 'height': int_or_none(thumbnail.get('height')), }) return { 'id': video_id, 'display_id': video.get('friendly_title'), 'title': title, 'thumbnails': thumbnails, 'description': dict_get(video, ('description', 'ott_description', 'short_description')), 'timestamp': parse_iso8601(video.get('published_at')), 'duration': int_or_none(video.get('duration')), 'view_count': int_or_none(video.get('request_count')), 'average_rating': int_or_none(video.get('rating')), 'season_number': int_or_none(video.get('season')), 'episode_number': int_or_none(video.get('episode')), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/echomsk.py
youtube_dl/extractor/echomsk.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class EchoMskIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?echo\.msk\.ru/sounds/(?P<id>\d+)' _TEST = { 'url': 'http://www.echo.msk.ru/sounds/1464134.html', 'md5': '2e44b3b78daff5b458e4dbc37f191f7c', 'info_dict': { 'id': '1464134', 'ext': 'mp3', 'title': 'Особое мнение - 29 декабря 2014, 19:08', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) audio_url = self._search_regex( r'<a rel="mp3" href="([^"]+)">', webpage, 'audio URL') title = self._html_search_regex( r'<a href="/programs/[^"]+" target="_blank">([^<]+)</a>', webpage, 'title') air_date = self._html_search_regex( r'(?s)<div class="date">(.+?)</div>', webpage, 'date', fatal=False, default=None) if air_date: air_date = re.sub(r'(\s)\1+', r'\1', air_date) if air_date: title = '%s - %s' % (title, air_date) return { 'id': video_id, 'url': audio_url, 'title': title, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/thisoldhouse.py
youtube_dl/extractor/thisoldhouse.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class ThisOldHouseIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?thisoldhouse\.com/(?:watch|how-to|tv-episode|(?:[^/]+/)?\d+)/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.thisoldhouse.com/how-to/how-to-build-storage-bench', 'info_dict': { 'id': '5dcdddf673c3f956ef5db202', 'ext': 'mp4', 'title': 'How to Build a Storage Bench', 'description': 'In the workshop, Tom Silva and Kevin O\'Connor build a storage bench for an entryway.', 'timestamp': 1442548800, 'upload_date': '20150918', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.thisoldhouse.com/watch/arlington-arts-crafts-arts-and-crafts-class-begins', 'only_matching': True, }, { 'url': 'https://www.thisoldhouse.com/tv-episode/ask-toh-shelf-rough-electric', 'only_matching': True, }, { 'url': 'https://www.thisoldhouse.com/furniture/21017078/how-to-build-a-storage-bench', 'only_matching': True, }, { 'url': 'https://www.thisoldhouse.com/21113884/s41-e13-paradise-lost', 'only_matching': True, }, { # iframe www.thisoldhouse.com 'url': 'https://www.thisoldhouse.com/21083431/seaside-transformation-the-westerly-project', 'only_matching': True, }] _ZYPE_TMPL = 'https://player.zype.com/embed/%s.html?api_key=hsOk_yMSPYNrT22e9pu8hihLXjaZf0JW5jsOWv4ZqyHJFvkJn6rtToHl09tbbsbe' def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._search_regex( r'<iframe[^>]+src=[\'"](?:https?:)?//(?:www\.)?thisoldhouse\.(?:chorus\.build|com)/videos/zype/([0-9a-f]{24})', webpage, 'video id') return self.url_result(self._ZYPE_TMPL % video_id, 'Zype', video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vier.py
youtube_dl/extractor/vier.py
# coding: utf-8 from __future__ import unicode_literals import re import itertools from .common import InfoExtractor from ..utils import ( urlencode_postdata, int_or_none, unified_strdate, ) class VierIE(InfoExtractor): IE_NAME = 'vier' IE_DESC = 'vier.be and vijf.be' _VALID_URL = r'''(?x) https?:// (?:www\.)?(?P<site>vier|vijf)\.be/ (?: (?: [^/]+/videos| video(?:/[^/]+)* )/ (?P<display_id>[^/]+)(?:/(?P<id>\d+))?| (?: video/v3/embed| embed/video/public )/(?P<embed_id>\d+) ) ''' _NETRC_MACHINE = 'vier' _TESTS = [{ 'url': 'http://www.vier.be/planb/videos/het-wordt-warm-de-moestuin/16129', 'md5': 'e4ae2054a6b040ef1e289e20d111b46e', 'info_dict': { 'id': '16129', 'display_id': 'het-wordt-warm-de-moestuin', 'ext': 'mp4', 'title': 'Het wordt warm in De Moestuin', 'description': 'De vele uren werk eisen hun tol. Wim droomt van assistentie...', 'upload_date': '20121025', 'series': 'Plan B', 'tags': ['De Moestuin', 'Moestuin', 'meisjes', 'Tomaat', 'Wim', 'Droom'], }, }, { 'url': 'http://www.vijf.be/temptationisland/videos/zo-grappig-temptation-island-hosts-moeten-kiezen-tussen-onmogelijke-dilemmas/2561614', 'info_dict': { 'id': '2561614', 'display_id': 'zo-grappig-temptation-island-hosts-moeten-kiezen-tussen-onmogelijke-dilemmas', 'ext': 'mp4', 'title': 'md5:84f45fe48b8c1fa296a7f6d208d080a7', 'description': 'md5:0356d4981e58b8cbee19355cbd51a8fe', 'upload_date': '20170228', 'series': 'Temptation Island', 'tags': list, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.vier.be/janigaat/videos/jani-gaat-naar-tokio-aflevering-4/2674839', 'info_dict': { 'id': '2674839', 'display_id': 'jani-gaat-naar-tokio-aflevering-4', 'ext': 'mp4', 'title': 'Jani gaat naar Tokio - Aflevering 4', 'description': 'md5:aa8d611541db6ae9e863125704511f88', 'upload_date': '20170501', 'series': 'Jani gaat', 'episode_number': 4, 'tags': ['Jani Gaat', 'Volledige Aflevering'], }, 'params': { 'skip_download': True, }, 'skip': 'Requires account credentials', }, { # Requires account credentials but bypassed extraction via v3/embed page # without metadata 'url': 'http://www.vier.be/janigaat/videos/jani-gaat-naar-tokio-aflevering-4/2674839', 'info_dict': { 'id': '2674839', 'display_id': 'jani-gaat-naar-tokio-aflevering-4', 'ext': 'mp4', 'title': 'jani-gaat-naar-tokio-aflevering-4', }, 'params': { 'skip_download': True, }, 'expected_warnings': ['Log in to extract metadata'], }, { # Without video id in URL 'url': 'http://www.vier.be/planb/videos/dit-najaar-plan-b', 'only_matching': True, }, { 'url': 'http://www.vier.be/video/v3/embed/16129', 'only_matching': True, }, { 'url': 'https://www.vijf.be/embed/video/public/4093', 'only_matching': True, }, { 'url': 'https://www.vier.be/video/blockbusters/in-juli-en-augustus-summer-classics', 'only_matching': True, }, { 'url': 'https://www.vier.be/video/achter-de-rug/2017/achter-de-rug-seizoen-1-aflevering-6', 'only_matching': True, }] def _real_initialize(self): self._logged_in = False def _login(self, site): username, password = self._get_login_info() if username is None or password is None: return login_page = self._download_webpage( 'http://www.%s.be/user/login' % site, None, note='Logging in', errnote='Unable to log in', data=urlencode_postdata({ 'form_id': 'user_login', 'name': username, 'pass': password, }), headers={'Content-Type': 'application/x-www-form-urlencoded'}) login_error = self._html_search_regex( r'(?s)<div class="messages error">\s*<div>\s*<h2.+?</h2>(.+?)<', login_page, 'login error', default=None) if login_error: self.report_warning('Unable to log in: %s' % login_error) else: self._logged_in = True def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) embed_id = mobj.group('embed_id') display_id = mobj.group('display_id') or embed_id video_id = mobj.group('id') or embed_id site = mobj.group('site') if not self._logged_in: self._login(site) webpage = self._download_webpage(url, display_id) if r'id="user-login"' in webpage: self.report_warning( 'Log in to extract metadata', video_id=display_id) webpage = self._download_webpage( 'http://www.%s.be/video/v3/embed/%s' % (site, video_id), display_id) video_id = self._search_regex( [r'data-nid="(\d+)"', r'"nid"\s*:\s*"(\d+)"'], webpage, 'video id', default=video_id or display_id) playlist_url = self._search_regex( r'data-file=(["\'])(?P<url>(?:https?:)?//[^/]+/.+?\.m3u8.*?)\1', webpage, 'm3u8 url', default=None, group='url') if not playlist_url: application = self._search_regex( [r'data-application="([^"]+)"', r'"application"\s*:\s*"([^"]+)"'], webpage, 'application', default=site + '_vod') filename = self._search_regex( [r'data-filename="([^"]+)"', r'"filename"\s*:\s*"([^"]+)"'], webpage, 'filename') playlist_url = 'http://vod.streamcloud.be/%s/_definst_/mp4:%s.mp4/playlist.m3u8' % (application, filename) formats = self._extract_wowza_formats( playlist_url, display_id, skip_protocols=['dash']) self._sort_formats(formats) title = self._og_search_title(webpage, default=display_id) description = self._html_search_regex( r'(?s)<div\b[^>]+\bclass=(["\'])[^>]*?\bfield-type-text-with-summary\b[^>]*?\1[^>]*>.*?<p>(?P<value>.+?)</p>', webpage, 'description', default=None, group='value') thumbnail = self._og_search_thumbnail(webpage, default=None) upload_date = unified_strdate(self._html_search_regex( r'(?s)<div\b[^>]+\bclass=(["\'])[^>]*?\bfield-name-post-date\b[^>]*?\1[^>]*>.*?(?P<value>\d{2}/\d{2}/\d{4})', webpage, 'upload date', default=None, group='value')) series = self._search_regex( r'data-program=(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'series', default=None, group='value') episode_number = int_or_none(self._search_regex( r'(?i)aflevering (\d+)', title, 'episode number', default=None)) tags = re.findall(r'<a\b[^>]+\bhref=["\']/tags/[^>]+>([^<]+)<', webpage) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'upload_date': upload_date, 'series': series, 'episode_number': episode_number, 'tags': tags, 'formats': formats, } class VierVideosIE(InfoExtractor): IE_NAME = 'vier:videos' _VALID_URL = r'https?://(?:www\.)?(?P<site>vier|vijf)\.be/(?P<program>[^/]+)/videos(?:\?.*\bpage=(?P<page>\d+)|$)' _TESTS = [{ 'url': 'http://www.vier.be/demoestuin/videos', 'info_dict': { 'id': 'demoestuin', }, 'playlist_mincount': 153, }, { 'url': 'http://www.vijf.be/temptationisland/videos', 'info_dict': { 'id': 'temptationisland', }, 'playlist_mincount': 159, }, { 'url': 'http://www.vier.be/demoestuin/videos?page=6', 'info_dict': { 'id': 'demoestuin-page6', }, 'playlist_mincount': 20, }, { 'url': 'http://www.vier.be/demoestuin/videos?page=7', 'info_dict': { 'id': 'demoestuin-page7', }, 'playlist_mincount': 13, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) program = mobj.group('program') site = mobj.group('site') page_id = mobj.group('page') if page_id: page_id = int(page_id) start_page = page_id playlist_id = '%s-page%d' % (program, page_id) else: start_page = 0 playlist_id = program entries = [] for current_page_id in itertools.count(start_page): current_page = self._download_webpage( 'http://www.%s.be/%s/videos?page=%d' % (site, program, current_page_id), program, 'Downloading page %d' % (current_page_id + 1)) page_entries = [ self.url_result('http://www.' + site + '.be' + video_url, 'Vier') for video_url in re.findall( r'<h[23]><a href="(/[^/]+/videos/[^/]+(?:/\d+)?)">', current_page)] entries.extend(page_entries) if page_id or '>Meer<' not in current_page: break return self.playlist_result(entries, playlist_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tiktok.py
youtube_dl/extractor/tiktok.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( compat_str, ExtractorError, float_or_none, int_or_none, str_or_none, try_get, url_or_none, ) class TikTokBaseIE(InfoExtractor): def _extract_video(self, data, video_id=None): video = data['video'] description = str_or_none(try_get(data, lambda x: x['desc'])) width = int_or_none(try_get(data, lambda x: video['width'])) height = int_or_none(try_get(data, lambda x: video['height'])) format_urls = set() formats = [] for format_id in ('download', 'play'): format_url = url_or_none(video.get('%sAddr' % format_id)) if not format_url: continue if format_url in format_urls: continue format_urls.add(format_url) formats.append({ 'url': format_url, 'ext': 'mp4', 'height': height, 'width': width, 'http_headers': { 'Referer': 'https://www.tiktok.com/', } }) self._sort_formats(formats) thumbnail = url_or_none(video.get('cover')) duration = float_or_none(video.get('duration')) uploader = try_get(data, lambda x: x['author']['nickname'], compat_str) uploader_id = try_get(data, lambda x: x['author']['id'], compat_str) timestamp = int_or_none(data.get('createTime')) def stats(key): return int_or_none(try_get( data, lambda x: x['stats']['%sCount' % key])) view_count = stats('play') like_count = stats('digg') comment_count = stats('comment') repost_count = stats('share') aweme_id = data.get('id') or video_id return { 'id': aweme_id, 'title': uploader or aweme_id, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'uploader': uploader, 'uploader_id': uploader_id, 'timestamp': timestamp, 'view_count': view_count, 'like_count': like_count, 'comment_count': comment_count, 'repost_count': repost_count, 'formats': formats, } class TikTokIE(TikTokBaseIE): _VALID_URL = r'https?://(?:www\.)?tiktok\.com/@[^/]+/video/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.tiktok.com/@zureeal/video/6606727368545406213', 'md5': '163ceff303bb52de60e6887fe399e6cd', 'info_dict': { 'id': '6606727368545406213', 'ext': 'mp4', 'title': 'Zureeal', 'description': '#bowsette#mario#cosplay#uk#lgbt#gaming#asian#bowsettecosplay', 'thumbnail': r're:^https?://.*', 'duration': 15, 'uploader': 'Zureeal', 'uploader_id': '188294915489964032', 'timestamp': 1538248586, 'upload_date': '20180929', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, } }] def _real_initialize(self): # Setup session (will set necessary cookies) self._request_webpage( 'https://www.tiktok.com/', None, note='Setting up session') def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) page_props = self._parse_json(self._search_regex( r'<script[^>]+\bid=["\']__NEXT_DATA__[^>]+>\s*({.+?})\s*</script', webpage, 'data'), video_id)['props']['pageProps'] data = try_get(page_props, lambda x: x['itemInfo']['itemStruct'], dict) if not data and page_props.get('statusCode') == 10216: raise ExtractorError('This video is private', expected=True) return self._extract_video(data, video_id) class TikTokUserIE(TikTokBaseIE): _VALID_URL = r'https://(?:www\.)?tiktok\.com/@(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.tiktok.com/@zureeal', 'info_dict': { 'id': '188294915489964032', }, 'playlist_mincount': 24, }] _WORKING = False @classmethod def suitable(cls, url): return False if TikTokIE.suitable(url) else super(TikTokUserIE, cls).suitable(url) def _real_extract(self, url): user_id = self._match_id(url) data = self._download_json( 'https://m.tiktok.com/h5/share/usr/list/%s/' % user_id, user_id, query={'_signature': '_'}) entries = [] for aweme in data['aweme_list']: try: entry = self._extract_video(aweme) except ExtractorError: continue entry['extractor_key'] = TikTokIE.ie_key() entries.append(entry) return self.playlist_result(entries, user_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/discoveryvr.py
youtube_dl/extractor/discoveryvr.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import parse_duration class DiscoveryVRIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?discoveryvr\.com/watch/(?P<id>[^/?#]+)' _TEST = { 'url': 'http://www.discoveryvr.com/watch/discovery-vr-an-introduction', 'md5': '32b1929798c464a54356378b7912eca4', 'info_dict': { 'id': 'discovery-vr-an-introduction', 'ext': 'mp4', 'title': 'Discovery VR - An Introduction', 'description': 'md5:80d418a10efb8899d9403e61d8790f06', } } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) bootstrap_data = self._search_regex( r'root\.DVR\.bootstrapData\s+=\s+"({.+?})";', webpage, 'bootstrap data') bootstrap_data = self._parse_json( bootstrap_data.encode('utf-8').decode('unicode_escape'), display_id) videos = self._parse_json(bootstrap_data['videos'], display_id)['allVideos'] video_data = next(video for video in videos if video.get('slug') == display_id) series = video_data.get('showTitle') title = episode = video_data.get('title') or series if series and series != title: title = '%s - %s' % (series, title) formats = [] for f, format_id in (('cdnUriM3U8', 'mobi'), ('webVideoUrlSd', 'sd'), ('webVideoUrlHd', 'hd')): f_url = video_data.get(f) if not f_url: continue formats.append({ 'format_id': format_id, 'url': f_url, }) return { 'id': display_id, 'display_id': display_id, 'title': title, 'description': video_data.get('description'), 'thumbnail': video_data.get('thumbnail'), 'duration': parse_duration(video_data.get('runTime')), 'formats': formats, 'episode': episode, 'series': series, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/radiocanada.py
youtube_dl/extractor/radiocanada.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( determine_ext, ExtractorError, int_or_none, unified_strdate, ) class RadioCanadaIE(InfoExtractor): IE_NAME = 'radiocanada' _VALID_URL = r'(?:radiocanada:|https?://ici\.radio-canada\.ca/widgets/mediaconsole/)(?P<app_code>[^:/]+)[:/](?P<id>[0-9]+)' _TESTS = [ { 'url': 'http://ici.radio-canada.ca/widgets/mediaconsole/medianet/7184272', 'info_dict': { 'id': '7184272', 'ext': 'mp4', 'title': 'Le parcours du tireur capté sur vidéo', 'description': 'Images des caméras de surveillance fournies par la GRC montrant le parcours du tireur d\'Ottawa', 'upload_date': '20141023', }, 'params': { # m3u8 download 'skip_download': True, } }, { # empty Title 'url': 'http://ici.radio-canada.ca/widgets/mediaconsole/medianet/7754998/', 'info_dict': { 'id': '7754998', 'ext': 'mp4', 'title': 'letelejournal22h', 'description': 'INTEGRALE WEB 22H-TJ', 'upload_date': '20170720', }, 'params': { # m3u8 download 'skip_download': True, }, }, { # with protectionType but not actually DRM protected 'url': 'radiocanada:toutv:140872', 'info_dict': { 'id': '140872', 'title': 'Épisode 1', 'series': 'District 31', }, 'only_matching': True, } ] _GEO_COUNTRIES = ['CA'] _access_token = None _claims = None def _call_api(self, path, video_id=None, app_code=None, query=None): if not query: query = {} query.update({ 'client_key': '773aea60-0e80-41bb-9c7f-e6d7c3ad17fb', 'output': 'json', }) if video_id: query.update({ 'appCode': app_code, 'idMedia': video_id, }) if self._access_token: query['access_token'] = self._access_token try: return self._download_json( 'https://services.radio-canada.ca/media/' + path, video_id, query=query) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code in (401, 422): data = self._parse_json(e.cause.read().decode(), None) error = data.get('error_description') or data['errorMessage']['text'] raise ExtractorError(error, expected=True) raise def _extract_info(self, app_code, video_id): metas = self._call_api('meta/v1/index.ashx', video_id, app_code)['Metas'] def get_meta(name): for meta in metas: if meta.get('name') == name: text = meta.get('text') if text: return text # protectionType does not necessarily mean the video is DRM protected (see # https://github.com/ytdl-org/youtube-dl/pull/18609). if get_meta('protectionType'): self.report_warning('This video is probably DRM protected.') query = { 'connectionType': 'hd', 'deviceType': 'ipad', 'multibitrate': 'true', } if self._claims: query['claims'] = self._claims v_data = self._call_api('validation/v2/', video_id, app_code, query) v_url = v_data.get('url') if not v_url: error = v_data['message'] if error == "Le contenu sélectionné n'est pas disponible dans votre pays": raise self.raise_geo_restricted(error, self._GEO_COUNTRIES) if error == 'Le contenu sélectionné est disponible seulement en premium': self.raise_login_required(error) raise ExtractorError( '%s said: %s' % (self.IE_NAME, error), expected=True) formats = self._extract_m3u8_formats(v_url, video_id, 'mp4') self._sort_formats(formats) subtitles = {} closed_caption_url = get_meta('closedCaption') or get_meta('closedCaptionHTML5') if closed_caption_url: subtitles['fr'] = [{ 'url': closed_caption_url, 'ext': determine_ext(closed_caption_url, 'vtt'), }] return { 'id': video_id, 'title': get_meta('Title') or get_meta('AV-nomEmission'), 'description': get_meta('Description') or get_meta('ShortDescription'), 'thumbnail': get_meta('imageHR') or get_meta('imageMR') or get_meta('imageBR'), 'duration': int_or_none(get_meta('length')), 'series': get_meta('Emission'), 'season_number': int_or_none('SrcSaison'), 'episode_number': int_or_none('SrcEpisode'), 'upload_date': unified_strdate(get_meta('Date')), 'subtitles': subtitles, 'formats': formats, } def _real_extract(self, url): return self._extract_info(*re.match(self._VALID_URL, url).groups()) class RadioCanadaAudioVideoIE(InfoExtractor): IE_NAME = 'radiocanada:audiovideo' _VALID_URL = r'https?://ici\.radio-canada\.ca/([^/]+/)*media-(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://ici.radio-canada.ca/audio-video/media-7527184/barack-obama-au-vietnam', 'info_dict': { 'id': '7527184', 'ext': 'mp4', 'title': 'Barack Obama au Vietnam', 'description': 'Les États-Unis lèvent l\'embargo sur la vente d\'armes qui datait de la guerre du Vietnam', 'upload_date': '20160523', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://ici.radio-canada.ca/info/videos/media-7527184/barack-obama-au-vietnam', 'only_matching': True, }] def _real_extract(self, url): return self.url_result('radiocanada:medianet:%s' % self._match_id(url))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/myvi.py
youtube_dl/extractor/myvi.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from .vimple import SprutoBaseIE class MyviIE(SprutoBaseIE): _VALID_URL = r'''(?x) (?: https?:// (?:www\.)? myvi\. (?: (?:ru/player|tv)/ (?: (?: embed/html| flash| api/Video/Get )/| content/preloader\.swf\?.*\bid= )| ru/watch/ )| myvi: ) (?P<id>[\da-zA-Z_-]+) ''' _TESTS = [{ 'url': 'http://myvi.ru/player/embed/html/oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wObeRTZaCATzucDQIDph8hQU0', 'md5': '571bbdfba9f9ed229dc6d34cc0f335bf', 'info_dict': { 'id': 'f16b2bbd-cde8-481c-a981-7cd48605df43', 'ext': 'mp4', 'title': 'хозяин жизни', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 25, }, }, { 'url': 'http://myvi.ru/player/content/preloader.swf?id=oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wOYf1WFpPfc_bWTKGVf_Zafr0', 'only_matching': True, }, { 'url': 'http://myvi.ru/player/api/Video/Get/oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wObeRTZaCATzucDQIDph8hQU0', 'only_matching': True, }, { 'url': 'http://myvi.tv/embed/html/oTGTNWdyz4Zwy_u1nraolwZ1odenTd9WkTnRfIL9y8VOgHYqOHApE575x4_xxS9Vn0?ap=0', 'only_matching': True, }, { 'url': 'http://myvi.ru/player/flash/ocp2qZrHI-eZnHKQBK4cZV60hslH8LALnk0uBfKsB-Q4WnY26SeGoYPi8HWHxu0O30', 'only_matching': True, }, { 'url': 'https://www.myvi.ru/watch/YwbqszQynUaHPn_s82sx0Q2', 'only_matching': True, }, { 'url': 'myvi:YwbqszQynUaHPn_s82sx0Q2', 'only_matching': True, }] @classmethod def _extract_url(cls, webpage): mobj = re.search( r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//myvi\.(?:ru/player|tv)/(?:embed/html|flash)/[^"]+)\1', webpage) if mobj: return mobj.group('url') def _real_extract(self, url): video_id = self._match_id(url) spruto = self._download_json( 'http://myvi.ru/player/api/Video/Get/%s?sig' % video_id, video_id)['sprutoData'] return self._extract_spruto(spruto, video_id) class MyviEmbedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?myvi\.tv/(?:[^?]+\?.*?\bv=|embed/)(?P<id>[\da-z]+)' _TESTS = [{ 'url': 'https://www.myvi.tv/embed/ccdqic3wgkqwpb36x9sxg43t4r', 'info_dict': { 'id': 'b3ea0663-3234-469d-873e-7fecf36b31d1', 'ext': 'mp4', 'title': 'Твоя (original song).mp4', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 277, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.myvi.tv/idmi6o?v=ccdqic3wgkqwpb36x9sxg43t4r#watch', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if MyviIE.suitable(url) else super(MyviEmbedIE, cls).suitable(url) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://www.myvi.tv/embed/%s' % video_id, video_id) myvi_id = self._search_regex( r'CreatePlayer\s*\(\s*["\'].*?\bv=([\da-zA-Z_]+)', webpage, 'video id') return self.url_result('myvi:%s' % myvi_id, ie=MyviIE.ie_key())
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rds.py
youtube_dl/extractor/rds.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( parse_duration, parse_iso8601, js_to_json, ) from ..compat import compat_str class RDSIE(InfoExtractor): IE_DESC = 'RDS.ca' _VALID_URL = r'https?://(?:www\.)?rds\.ca/vid(?:[eé]|%C3%A9)os/(?:[^/]+/)*(?P<id>[^/]+)-\d+\.\d+' _TESTS = [{ # has two 9c9media ContentPackages, the web player selects the first ContentPackage 'url': 'https://www.rds.ca/videos/Hockey/NationalHockeyLeague/teams/9/forum-du-5-a-7-jesperi-kotkaniemi-de-retour-de-finlande-3.1377606', 'info_dict': { 'id': '2083309', 'display_id': 'forum-du-5-a-7-jesperi-kotkaniemi-de-retour-de-finlande', 'ext': 'flv', 'title': 'Forum du 5 à 7 : Kotkaniemi de retour de Finlande', 'description': 'md5:83fa38ecc4a79b19e433433254077f25', 'timestamp': 1606129030, 'upload_date': '20201123', 'duration': 773.039, } }, { 'url': 'http://www.rds.ca/vid%C3%A9os/un-voyage-positif-3.877934', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) item = self._parse_json(self._search_regex(r'(?s)itemToPush\s*=\s*({.+?});', webpage, 'item'), display_id, js_to_json) video_id = compat_str(item['id']) title = item.get('title') or self._og_search_title(webpage) or self._html_search_meta( 'title', webpage, 'title', fatal=True) description = self._og_search_description(webpage) or self._html_search_meta( 'description', webpage, 'description') thumbnail = item.get('urlImageBig') or self._og_search_thumbnail(webpage) or self._search_regex( [r'<link[^>]+itemprop="thumbnailUrl"[^>]+href="([^"]+)"', r'<span[^>]+itemprop="thumbnailUrl"[^>]+content="([^"]+)"'], webpage, 'thumbnail', fatal=False) timestamp = parse_iso8601(self._search_regex( r'<span[^>]+itemprop="uploadDate"[^>]+content="([^"]+)"', webpage, 'upload date', fatal=False)) duration = parse_duration(self._search_regex( r'<span[^>]+itemprop="duration"[^>]+content="([^"]+)"', webpage, 'duration', fatal=False)) age_limit = self._family_friendly_search(webpage) return { '_type': 'url_transparent', 'id': video_id, 'display_id': display_id, 'url': '9c9media:rds_web:%s' % video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, 'age_limit': age_limit, 'ie_key': 'NineCNineMedia', }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vshare.py
youtube_dl/extractor/vshare.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_chr from ..utils import ( decode_packed_codes, ExtractorError, ) class VShareIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?vshare\.io/[dv]/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://vshare.io/d/0f64ce6', 'md5': '17b39f55b5497ae8b59f5fbce8e35886', 'info_dict': { 'id': '0f64ce6', 'title': 'vl14062007715967', 'ext': 'mp4', } }, { 'url': 'https://vshare.io/v/0f64ce6/width-650/height-430/1', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?vshare\.io/v/[^/?#&]+)', webpage) def _extract_packed(self, webpage): packed = self._search_regex( r'(eval\(function.+)', webpage, 'packed code') unpacked = decode_packed_codes(packed) digits = self._search_regex(r'\[((?:\d+,?)+)\]', unpacked, 'digits') digits = [int(digit) for digit in digits.split(',')] key_digit = self._search_regex( r'fromCharCode\(.+?(\d+)\)}', unpacked, 'key digit') chars = [compat_chr(d - int(key_digit)) for d in digits] return ''.join(chars) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://vshare.io/v/%s/width-650/height-430/1' % video_id, video_id, headers={'Referer': url}) title = self._html_search_regex( r'<title>([^<]+)</title>', webpage, 'title') title = title.split(' - ')[0] error = self._html_search_regex( r'(?s)<div[^>]+\bclass=["\']xxx-error[^>]+>(.+?)</div', webpage, 'error', default=None) if error: raise ExtractorError(error, expected=True) info = self._parse_html5_media_entries( url, '<video>%s</video>' % self._extract_packed(webpage), video_id)[0] self._sort_formats(info['formats']) info.update({ 'id': video_id, 'title': title, }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/alsace20tv.py
youtube_dl/extractor/alsace20tv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( clean_html, dict_get, get_element_by_class, int_or_none, unified_strdate, url_or_none, ) class Alsace20TVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?alsace20\.tv/(?:[\w-]+/)+[\w-]+-(?P<id>[\w]+)' _TESTS = [{ 'url': 'https://www.alsace20.tv/VOD/Actu/JT/Votre-JT-jeudi-3-fevrier-lyNHCXpYJh.html', # 'md5': 'd91851bf9af73c0ad9b2cdf76c127fbb', 'info_dict': { 'id': 'lyNHCXpYJh', 'ext': 'mp4', 'description': 'md5:fc0bc4a0692d3d2dba4524053de4c7b7', 'title': 'Votre JT du jeudi 3 février', 'upload_date': '20220203', 'thumbnail': r're:https?://.+\.jpg', 'duration': 1073, 'view_count': int, }, 'params': { 'format': 'bestvideo', }, }] def _extract_video(self, video_id, url=None): info = self._download_json( 'https://www.alsace20.tv/visionneuse/visio_v9_js.php?key=%s&habillage=0&mode=html' % (video_id, ), video_id) or {} title = info['titre'] formats = [] for res, fmt_url in (info.get('files') or {}).items(): formats.extend( self._extract_smil_formats(fmt_url, video_id, fatal=False) if '/smil:_' in fmt_url else self._extract_mpd_formats(fmt_url, video_id, mpd_id=res, fatal=False)) self._sort_formats(formats) webpage = (url and self._download_webpage(url, video_id, fatal=False)) or '' thumbnail = url_or_none(dict_get(info, ('image', 'preview', )) or self._og_search_thumbnail(webpage)) upload_date = self._search_regex(r'/(\d{6})_', thumbnail, 'upload_date', default=None) upload_date = unified_strdate('20%s-%s-%s' % (upload_date[:2], upload_date[2:4], upload_date[4:])) if upload_date else None return { 'id': video_id, 'title': title, 'formats': formats, 'description': clean_html(get_element_by_class('wysiwyg', webpage)), 'upload_date': upload_date, 'thumbnail': thumbnail, 'duration': int_or_none(self._og_search_property('video:duration', webpage) if webpage else None), 'view_count': int_or_none(info.get('nb_vues')), } def _real_extract(self, url): video_id = self._match_id(url) return self._extract_video(video_id, url) class Alsace20TVEmbedIE(Alsace20TVIE): _VALID_URL = r'https?://(?:www\.)?alsace20\.tv/emb/(?P<id>[\w]+)' _TESTS = [{ 'url': 'https://www.alsace20.tv/emb/lyNHCXpYJh', # 'md5': 'd91851bf9af73c0ad9b2cdf76c127fbb', 'info_dict': { 'id': 'lyNHCXpYJh', 'ext': 'mp4', 'title': 'Votre JT du jeudi 3 février', 'upload_date': '20220203', 'thumbnail': r're:https?://.+\.jpg', 'view_count': int, }, 'params': { 'format': 'bestvideo', }, }] def _real_extract(self, url): video_id = self._match_id(url) return self._extract_video(video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/streamcloud.py
youtube_dl/extractor/streamcloud.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, urlencode_postdata, ) class StreamcloudIE(InfoExtractor): IE_NAME = 'streamcloud.eu' _VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)(?:/(?P<fname>[^#?]*)\.html)?' _TESTS = [{ 'url': 'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html', 'md5': '6bea4c7fa5daaacc2a946b7146286686', 'info_dict': { 'id': 'skp9j99s4bpz', 'ext': 'mp4', 'title': 'youtube-dl test video \'/\\ ä ↭', }, 'skip': 'Only available from the EU' }, { 'url': 'http://streamcloud.eu/ua8cmfh1nbe6/NSHIP-148--KUC-NG--H264-.mp4.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) url = 'http://streamcloud.eu/%s' % video_id orig_webpage = self._download_webpage(url, video_id) if '>File Not Found<' in orig_webpage: raise ExtractorError( 'Video %s does not exist' % video_id, expected=True) fields = re.findall(r'''(?x)<input\s+ type="(?:hidden|submit)"\s+ name="([^"]+)"\s+ (?:id="[^"]+"\s+)? value="([^"]*)" ''', orig_webpage) self._sleep(6, video_id) webpage = self._download_webpage( url, video_id, data=urlencode_postdata(fields), headers={ b'Content-Type': b'application/x-www-form-urlencoded', }) try: title = self._html_search_regex( r'<h1[^>]*>([^<]+)<', webpage, 'title') video_url = self._search_regex( r'file:\s*"([^"]+)"', webpage, 'video URL') except ExtractorError: message = self._html_search_regex( r'(?s)<div[^>]+class=(["\']).*?msgboxinfo.*?\1[^>]*>(?P<message>.+?)</div>', webpage, 'message', default=None, group='message') if message: raise ExtractorError('%s said: %s' % (self.IE_NAME, message), expected=True) raise thumbnail = self._search_regex( r'image:\s*"([^"]+)"', webpage, 'thumbnail URL', fatal=False) return { 'id': video_id, 'title': title, 'url': video_url, 'thumbnail': thumbnail, 'http_headers': { 'Referer': url, }, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/piksel.py
youtube_dl/extractor/piksel.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( dict_get, ExtractorError, int_or_none, parse_iso8601, try_get, unescapeHTML, ) class PikselIE(InfoExtractor): _VALID_URL = r'''(?x)https?:// (?: (?: player\. (?: olympusattelecom| vibebyvista )| (?:api|player)\.multicastmedia| (?:api-ovp|player)\.piksel )\.com| (?: mz-edge\.stream\.co| movie-s\.nhk\.or )\.jp| vidego\.baltimorecity\.gov )/v/(?:refid/(?P<refid>[^/]+)/prefid/)?(?P<id>[\w-]+)''' _TESTS = [ { 'url': 'http://player.piksel.com/v/ums2867l', 'md5': '34e34c8d89dc2559976a6079db531e85', 'info_dict': { 'id': 'ums2867l', 'ext': 'mp4', 'title': 'GX-005 with Caption', 'timestamp': 1481335659, 'upload_date': '20161210' } }, { # Original source: http://www.uscourts.gov/cameras-courts/state-washington-vs-donald-j-trump-et-al 'url': 'https://player.piksel.com/v/v80kqp41', 'md5': '753ddcd8cc8e4fa2dda4b7be0e77744d', 'info_dict': { 'id': 'v80kqp41', 'ext': 'mp4', 'title': 'WAW- State of Washington vs. Donald J. Trump, et al', 'description': 'State of Washington vs. Donald J. Trump, et al, Case Number 17-CV-00141-JLR, TRO Hearing, Civil Rights Case, 02/3/2017, 1:00 PM (PST), Seattle Federal Courthouse, Seattle, WA, Judge James L. Robart presiding.', 'timestamp': 1486171129, 'upload_date': '20170204' } }, { # https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2019240/ 'url': 'http://player.piksel.com/v/refid/nhkworld/prefid/nw_vod_v_en_2019_240_20190823233000_02_1566873477', 'only_matching': True, } ] @staticmethod def _extract_url(webpage): mobj = re.search( r'<iframe[^>]+src=["\'](?P<url>(?:https?:)?//player\.piksel\.com/v/[a-z0-9]+)', webpage) if mobj: return mobj.group('url') def _call_api(self, app_token, resource, display_id, query, fatal=True): response = (self._download_json( 'http://player.piksel.com/ws/ws_%s/api/%s/mode/json/apiv/5' % (resource, app_token), display_id, query=query, fatal=fatal) or {}).get('response') failure = try_get(response, lambda x: x['failure']['reason']) if failure: if fatal: raise ExtractorError(failure, expected=True) self.report_warning(failure) return response def _real_extract(self, url): ref_id, display_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, display_id) app_token = self._search_regex([ r'clientAPI\s*:\s*"([^"]+)"', r'data-de-api-key\s*=\s*"([^"]+)"' ], webpage, 'app token') query = {'refid': ref_id, 'prefid': display_id} if ref_id else {'v': display_id} program = self._call_api( app_token, 'program', display_id, query)['WsProgramResponse']['program'] video_id = program['uuid'] video_data = program['asset'] title = video_data['title'] asset_type = dict_get(video_data, ['assetType', 'asset_type']) formats = [] def process_asset_file(asset_file): if not asset_file: return # TODO: extract rtmp formats http_url = asset_file.get('http_url') if not http_url: return tbr = None vbr = int_or_none(asset_file.get('videoBitrate'), 1024) abr = int_or_none(asset_file.get('audioBitrate'), 1024) if asset_type == 'video': tbr = vbr + abr elif asset_type == 'audio': tbr = abr format_id = ['http'] if tbr: format_id.append(compat_str(tbr)) formats.append({ 'format_id': '-'.join(format_id), 'url': unescapeHTML(http_url), 'vbr': vbr, 'abr': abr, 'width': int_or_none(asset_file.get('videoWidth')), 'height': int_or_none(asset_file.get('videoHeight')), 'filesize': int_or_none(asset_file.get('filesize')), 'tbr': tbr, }) def process_asset_files(asset_files): for asset_file in (asset_files or []): process_asset_file(asset_file) process_asset_files(video_data.get('assetFiles')) process_asset_file(video_data.get('referenceFile')) if not formats: asset_id = video_data.get('assetid') or program.get('assetid') if asset_id: process_asset_files(try_get(self._call_api( app_token, 'asset_file', display_id, { 'assetid': asset_id, }, False), lambda x: x['WsAssetFileResponse']['AssetFiles'])) m3u8_url = dict_get(video_data, [ 'm3u8iPadURL', 'ipadM3u8Url', 'm3u8AndroidURL', 'm3u8iPhoneURL', 'iphoneM3u8Url']) if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) smil_url = dict_get(video_data, ['httpSmil', 'hdSmil', 'rtmpSmil']) if smil_url: transform_source = None if ref_id == 'nhkworld': # TODO: figure out if this is something to be fixed in urljoin, # _parse_smil_formats or keep it here transform_source = lambda x: x.replace('src="/', 'src="').replace('/media"', '/media/"') formats.extend(self._extract_smil_formats( re.sub(r'/od/[^/]+/', '/od/http/', smil_url), video_id, transform_source=transform_source, fatal=False)) self._sort_formats(formats) subtitles = {} for caption in video_data.get('captions', []): caption_url = caption.get('url') if caption_url: subtitles.setdefault(caption.get('locale', 'en'), []).append({ 'url': caption_url}) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'thumbnail': video_data.get('thumbnailUrl'), 'timestamp': parse_iso8601(video_data.get('dateadd')), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/esri.py
youtube_dl/extractor/esri.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( int_or_none, parse_filesize, unified_strdate, ) class EsriVideoIE(InfoExtractor): _VALID_URL = r'https?://video\.esri\.com/watch/(?P<id>[0-9]+)' _TEST = { 'url': 'https://video.esri.com/watch/1124/arcgis-online-_dash_-developing-applications', 'md5': 'd4aaf1408b221f1b38227a9bbaeb95bc', 'info_dict': { 'id': '1124', 'ext': 'mp4', 'title': 'ArcGIS Online - Developing Applications', 'description': 'Jeremy Bartley demonstrates how to develop applications with ArcGIS Online.', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 185, 'upload_date': '20120419', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) formats = [] for width, height, content in re.findall( r'(?s)<li><strong>(\d+)x(\d+):</strong>(.+?)</li>', webpage): for video_url, ext, filesize in re.findall( r'<a[^>]+href="([^"]+)">([^<]+)&nbsp;\(([^<]+)\)</a>', content): formats.append({ 'url': compat_urlparse.urljoin(url, video_url), 'ext': ext.lower(), 'format_id': '%s-%s' % (ext.lower(), height), 'width': int(width), 'height': int(height), 'filesize_approx': parse_filesize(filesize), }) self._sort_formats(formats) title = self._html_search_meta('title', webpage, 'title') description = self._html_search_meta( 'description', webpage, 'description', fatal=False) thumbnail = self._html_search_meta('thumbnail', webpage, 'thumbnail', fatal=False) if thumbnail: thumbnail = re.sub(r'_[st]\.jpg$', '_x.jpg', thumbnail) duration = int_or_none(self._search_regex( [r'var\s+videoSeconds\s*=\s*(\d+)', r"'duration'\s*:\s*(\d+)"], webpage, 'duration', fatal=False)) upload_date = unified_strdate(self._html_search_meta( 'last-modified', webpage, 'upload date', fatal=False)) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'upload_date': upload_date, 'formats': formats }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/keezmovies.py
youtube_dl/extractor/keezmovies.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..aes import aes_decrypt_text from ..compat import compat_urllib_parse_unquote from ..utils import ( determine_ext, ExtractorError, int_or_none, str_to_int, strip_or_none, url_or_none, ) class KeezMoviesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?keezmovies\.com/video/(?:(?P<display_id>[^/]+)-)?(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.keezmovies.com/video/arab-wife-want-it-so-bad-i-see-she-thirsty-and-has-tiny-money-18070681', 'md5': '2ac69cdb882055f71d82db4311732a1a', 'info_dict': { 'id': '18070681', 'display_id': 'arab-wife-want-it-so-bad-i-see-she-thirsty-and-has-tiny-money', 'ext': 'mp4', 'title': 'Arab wife want it so bad I see she thirsty and has tiny money.', 'thumbnail': None, 'view_count': int, 'age_limit': 18, } }, { 'url': 'http://www.keezmovies.com/video/18070681', 'only_matching': True, }] def _extract_info(self, url, fatal=True): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = (mobj.group('display_id') if 'display_id' in mobj.groupdict() else None) or mobj.group('id') webpage = self._download_webpage( url, display_id, headers={'Cookie': 'age_verified=1'}) formats = [] format_urls = set() title = None thumbnail = None duration = None encrypted = False def extract_format(format_url, height=None): format_url = url_or_none(format_url) if not format_url or not format_url.startswith(('http', '//')): return if format_url in format_urls: return format_urls.add(format_url) tbr = int_or_none(self._search_regex( r'[/_](\d+)[kK][/_]', format_url, 'tbr', default=None)) if not height: height = int_or_none(self._search_regex( r'[/_](\d+)[pP][/_]', format_url, 'height', default=None)) if encrypted: format_url = aes_decrypt_text( video_url, title, 32).decode('utf-8') formats.append({ 'url': format_url, 'format_id': '%dp' % height if height else None, 'height': height, 'tbr': tbr, }) flashvars = self._parse_json( self._search_regex( r'flashvars\s*=\s*({.+?});', webpage, 'flashvars', default='{}'), display_id, fatal=False) if flashvars: title = flashvars.get('video_title') thumbnail = flashvars.get('image_url') duration = int_or_none(flashvars.get('video_duration')) encrypted = flashvars.get('encrypted') is True for key, value in flashvars.items(): mobj = re.search(r'quality_(\d+)[pP]', key) if mobj: extract_format(value, int(mobj.group(1))) video_url = flashvars.get('video_url') if video_url and determine_ext(video_url, None): extract_format(video_url) video_url = self._html_search_regex( r'flashvars\.video_url\s*=\s*(["\'])(?P<url>http.+?)\1', webpage, 'video url', default=None, group='url') if video_url: extract_format(compat_urllib_parse_unquote(video_url)) if not formats: if 'title="This video is no longer available"' in webpage: raise ExtractorError( 'Video %s is no longer available' % video_id, expected=True) try: self._sort_formats(formats) except ExtractorError: if fatal: raise if not title: title = self._html_search_regex( r'<h1[^>]*>([^<]+)', webpage, 'title') return webpage, { 'id': video_id, 'display_id': display_id, 'title': strip_or_none(title), 'thumbnail': thumbnail, 'duration': duration, 'age_limit': 18, 'formats': formats, } def _real_extract(self, url): webpage, info = self._extract_info(url, fatal=False) if not info['formats']: return self.url_result(url, 'Generic') info['view_count'] = str_to_int(self._search_regex( r'<b>([\d,.]+)</b> Views?', webpage, 'view count', fatal=False)) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/abcotvs.py
youtube_dl/extractor/abcotvs.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( dict_get, int_or_none, try_get, ) class ABCOTVSIE(InfoExtractor): IE_NAME = 'abcotvs' IE_DESC = 'ABC Owned Television Stations' _VALID_URL = r'https?://(?P<site>abc(?:7(?:news|ny|chicago)?|11|13|30)|6abc)\.com(?:(?:/[^/]+)*/(?P<display_id>[^/]+))?/(?P<id>\d+)' _TESTS = [ { 'url': 'http://abc7news.com/entertainment/east-bay-museum-celebrates-vintage-synthesizers/472581/', 'info_dict': { 'id': '472548', 'display_id': 'east-bay-museum-celebrates-vintage-synthesizers', 'ext': 'mp4', 'title': 'East Bay museum celebrates synthesized music', 'description': 'md5:24ed2bd527096ec2a5c67b9d5a9005f3', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1421118520, 'upload_date': '20150113', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://abc7news.com/472581', 'only_matching': True, }, { 'url': 'https://6abc.com/man-75-killed-after-being-struck-by-vehicle-in-chester/5725182/', 'only_matching': True, }, ] _SITE_MAP = { '6abc': 'wpvi', 'abc11': 'wtvd', 'abc13': 'ktrk', 'abc30': 'kfsn', 'abc7': 'kabc', 'abc7chicago': 'wls', 'abc7news': 'kgo', 'abc7ny': 'wabc', } def _real_extract(self, url): site, display_id, video_id = re.match(self._VALID_URL, url).groups() display_id = display_id or video_id station = self._SITE_MAP[site] data = self._download_json( 'https://api.abcotvs.com/v2/content', display_id, query={ 'id': video_id, 'key': 'otv.web.%s.story' % station, 'station': station, })['data'] video = try_get(data, lambda x: x['featuredMedia']['video'], dict) or data video_id = compat_str(dict_get(video, ('id', 'publishedKey'), video_id)) title = video.get('title') or video['linkText'] formats = [] m3u8_url = video.get('m3u8') if m3u8_url: formats = self._extract_m3u8_formats( video['m3u8'].split('?')[0], display_id, 'mp4', m3u8_id='hls', fatal=False) mp4_url = video.get('mp4') if mp4_url: formats.append({ 'abr': 128, 'format_id': 'https', 'height': 360, 'url': mp4_url, 'width': 640, }) self._sort_formats(formats) image = video.get('image') or {} return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': dict_get(video, ('description', 'caption'), try_get(video, lambda x: x['meta']['description'])), 'thumbnail': dict_get(image, ('source', 'dynamicSource')), 'timestamp': int_or_none(video.get('date')), 'duration': int_or_none(video.get('length')), 'formats': formats, } class ABCOTVSClipsIE(InfoExtractor): IE_NAME = 'abcotvs:clips' _VALID_URL = r'https?://clips\.abcotvs\.com/(?:[^/]+/)*video/(?P<id>\d+)' _TEST = { 'url': 'https://clips.abcotvs.com/kabc/video/214814', 'info_dict': { 'id': '214814', 'ext': 'mp4', 'title': 'SpaceX launch pad explosion destroys rocket, satellite', 'description': 'md5:9f186e5ad8f490f65409965ee9c7be1b', 'upload_date': '20160901', 'timestamp': 1472756695, }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json('https://clips.abcotvs.com/vogo/video/getByIds?ids=' + video_id, video_id)['results'][0] title = video_data['title'] formats = self._extract_m3u8_formats( video_data['videoURL'].split('?')[0], video_id, 'mp4') self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'thumbnail': video_data.get('thumbnailURL'), 'duration': int_or_none(video_data.get('duration')), 'timestamp': int_or_none(video_data.get('pubDate')), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/pandoratv.py
youtube_dl/extractor/pandoratv.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( ExtractorError, float_or_none, parse_duration, str_to_int, urlencode_postdata, ) class PandoraTVIE(InfoExtractor): IE_NAME = 'pandora.tv' IE_DESC = '판도라TV' _VALID_URL = r'''(?x) https?:// (?: (?:www\.)?pandora\.tv/view/(?P<user_id>[^/]+)/(?P<id>\d+)| # new format (?:.+?\.)?channel\.pandora\.tv/channel/video\.ptv\?| # old format m\.pandora\.tv/?\? # mobile ) ''' _TESTS = [{ 'url': 'http://jp.channel.pandora.tv/channel/video.ptv?c1=&prgid=53294230&ch_userid=mikakim&ref=main&lot=cate_01_2', 'info_dict': { 'id': '53294230', 'ext': 'flv', 'title': '頭を撫でてくれる?', 'description': '頭を撫でてくれる?', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 39, 'upload_date': '20151218', 'uploader': 'カワイイ動物まとめ', 'uploader_id': 'mikakim', 'view_count': int, 'like_count': int, } }, { 'url': 'http://channel.pandora.tv/channel/video.ptv?ch_userid=gogoucc&prgid=54721744', 'info_dict': { 'id': '54721744', 'ext': 'flv', 'title': '[HD] JAPAN COUNTDOWN 170423', 'description': '[HD] JAPAN COUNTDOWN 170423', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1704.9, 'upload_date': '20170423', 'uploader': 'GOGO_UCC', 'uploader_id': 'gogoucc', 'view_count': int, 'like_count': int, }, 'params': { # Test metadata only 'skip_download': True, }, }, { 'url': 'http://www.pandora.tv/view/mikakim/53294230#36797454_new', 'only_matching': True, }, { 'url': 'http://m.pandora.tv/?c=view&ch_userid=mikakim&prgid=54600346', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) user_id = mobj.group('user_id') video_id = mobj.group('id') if not user_id or not video_id: qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) video_id = qs.get('prgid', [None])[0] user_id = qs.get('ch_userid', [None])[0] if any(not f for f in (video_id, user_id,)): raise ExtractorError('Invalid URL', expected=True) data = self._download_json( 'http://m.pandora.tv/?c=view&m=viewJsonApi&ch_userid=%s&prgid=%s' % (user_id, video_id), video_id) info = data['data']['rows']['vod_play_info']['result'] formats = [] for format_id, format_url in info.items(): if not format_url: continue height = self._search_regex( r'^v(\d+)[Uu]rl$', format_id, 'height', default=None) if not height: continue play_url = self._download_json( 'http://m.pandora.tv/?c=api&m=play_url', video_id, data=urlencode_postdata({ 'prgid': video_id, 'runtime': info.get('runtime'), 'vod_url': format_url, }), headers={ 'Origin': url, 'Content-Type': 'application/x-www-form-urlencoded', }) format_url = play_url.get('url') if not format_url: continue formats.append({ 'format_id': '%sp' % height, 'url': format_url, 'height': int(height), }) self._sort_formats(formats) return { 'id': video_id, 'title': info['subject'], 'description': info.get('body'), 'thumbnail': info.get('thumbnail') or info.get('poster'), 'duration': float_or_none(info.get('runtime'), 1000) or parse_duration(info.get('time')), 'upload_date': info['fid'].split('/')[-1][:8] if isinstance(info.get('fid'), compat_str) else None, 'uploader': info.get('nickname'), 'uploader_id': info.get('upload_userid'), 'view_count': str_to_int(info.get('hit')), 'like_count': str_to_int(info.get('likecnt')), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/bfi.py
youtube_dl/extractor/bfi.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import extract_attributes class BFIPlayerIE(InfoExtractor): IE_NAME = 'bfi:player' _VALID_URL = r'https?://player\.bfi\.org\.uk/[^/]+/film/watch-(?P<id>[\w-]+)-online' _TEST = { 'url': 'https://player.bfi.org.uk/free/film/watch-computer-doctor-1974-online', 'md5': 'e8783ebd8e061ec4bc6e9501ed547de8', 'info_dict': { 'id': 'htNnhlZjE60C9VySkQEIBtU-cNV1Xx63', 'ext': 'mp4', 'title': 'Computer Doctor', 'description': 'md5:fb6c240d40c4dbe40428bdd62f78203b', }, 'skip': 'BFI Player films cannot be played outside of the UK', } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) entries = [] for player_el in re.findall(r'(?s)<[^>]+class="player"[^>]*>', webpage): player_attr = extract_attributes(player_el) ooyala_id = player_attr.get('data-video-id') if not ooyala_id: continue entries.append(self.url_result( 'ooyala:' + ooyala_id, 'Ooyala', ooyala_id, player_attr.get('data-label'))) return self.playlist_result(entries)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cliprs.py
youtube_dl/extractor/cliprs.py
# coding: utf-8 from __future__ import unicode_literals from .onet import OnetBaseIE class ClipRsIE(OnetBaseIE): _VALID_URL = r'https?://(?:www\.)?clip\.rs/(?P<id>[^/]+)/\d+' _TEST = { 'url': 'http://www.clip.rs/premijera-frajle-predstavljaju-novi-spot-za-pesmu-moli-me-moli/3732', 'md5': 'c412d57815ba07b56f9edc7b5d6a14e5', 'info_dict': { 'id': '1488842.1399140381', 'ext': 'mp4', 'title': 'PREMIJERA Frajle predstavljaju novi spot za pesmu Moli me, moli', 'description': 'md5:56ce2c3b4ab31c5a2e0b17cb9a453026', 'duration': 229, 'timestamp': 1459850243, 'upload_date': '20160405', } } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) mvp_id = self._search_mvp_id(webpage) info_dict = self._extract_from_id(mvp_id, webpage) info_dict['display_id'] = display_id return info_dict
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/shared.py
youtube_dl/extractor/shared.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_b64decode, compat_urllib_parse_unquote_plus, ) from ..utils import ( determine_ext, ExtractorError, int_or_none, js_to_json, KNOWN_EXTENSIONS, parse_filesize, rot47, url_or_none, urlencode_postdata, ) class SharedBaseIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) webpage, urlh = self._download_webpage_handle(url, video_id) if self._FILE_NOT_FOUND in webpage: raise ExtractorError( 'Video %s does not exist' % video_id, expected=True) video_url = self._extract_video_url(webpage, video_id, url) title = self._extract_title(webpage) filesize = int_or_none(self._extract_filesize(webpage)) return { 'id': video_id, 'url': video_url, 'ext': 'mp4', 'filesize': filesize, 'title': title, } def _extract_title(self, webpage): return compat_b64decode(self._html_search_meta( 'full:title', webpage, 'title')).decode('utf-8') def _extract_filesize(self, webpage): return self._html_search_meta( 'full:size', webpage, 'file size', fatal=False) class SharedIE(SharedBaseIE): IE_DESC = 'shared.sx' _VALID_URL = r'https?://shared\.sx/(?P<id>[\da-z]{10})' _FILE_NOT_FOUND = '>File does not exist<' _TEST = { 'url': 'http://shared.sx/0060718775', 'md5': '106fefed92a8a2adb8c98e6a0652f49b', 'info_dict': { 'id': '0060718775', 'ext': 'mp4', 'title': 'Bmp4', 'filesize': 1720110, }, } def _extract_video_url(self, webpage, video_id, url): download_form = self._hidden_inputs(webpage) video_page = self._download_webpage( url, video_id, 'Downloading video page', data=urlencode_postdata(download_form), headers={ 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': url, }) video_url = self._html_search_regex( r'data-url=(["\'])(?P<url>(?:(?!\1).)+)\1', video_page, 'video URL', group='url') return video_url class VivoIE(SharedBaseIE): IE_DESC = 'vivo.sx' _VALID_URL = r'https?://vivo\.s[xt]/(?P<id>[\da-z]{10})' _FILE_NOT_FOUND = '>The file you have requested does not exists or has been removed' _TESTS = [{ 'url': 'http://vivo.sx/d7ddda0e78', 'md5': '15b3af41be0b4fe01f4df075c2678b2c', 'info_dict': { 'id': 'd7ddda0e78', 'ext': 'mp4', 'title': 'Chicken', 'filesize': 515659, }, }, { 'url': 'http://vivo.st/d7ddda0e78', 'only_matching': True, }] def _extract_title(self, webpage): title = self._html_search_regex( r'data-name\s*=\s*(["\'])(?P<title>(?:(?!\1).)+)\1', webpage, 'title', default=None, group='title') if title: ext = determine_ext(title) if ext.lower() in KNOWN_EXTENSIONS: title = title.rpartition('.' + ext)[0] return title return self._og_search_title(webpage) def _extract_filesize(self, webpage): return parse_filesize(self._search_regex( r'data-type=["\']video["\'][^>]*>Watch.*?<strong>\s*\((.+?)\)', webpage, 'filesize', fatal=False)) def _extract_video_url(self, webpage, video_id, url): def decode_url_old(encoded_url): return compat_b64decode(encoded_url).decode('utf-8') stream_url = self._search_regex( r'data-stream\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'stream url', default=None, group='url') if stream_url: stream_url = url_or_none(decode_url_old(stream_url)) if stream_url: return stream_url def decode_url(encoded_url): return rot47(compat_urllib_parse_unquote_plus(encoded_url)) return decode_url(self._parse_json( self._search_regex( r'(?s)InitializeStream\s*\(\s*({.+?})\s*\)\s*;', webpage, 'stream'), video_id, transform_source=js_to_json)['source'])
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rts.py
youtube_dl/extractor/rts.py
# coding: utf-8 from __future__ import unicode_literals import re from .srgssr import SRGSSRIE from ..compat import compat_str from ..utils import ( determine_ext, int_or_none, parse_duration, parse_iso8601, unescapeHTML, urljoin, ) class RTSIE(SRGSSRIE): IE_DESC = 'RTS.ch' _VALID_URL = r'rts:(?P<rts_id>\d+)|https?://(?:.+?\.)?rts\.ch/(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html' _TESTS = [ { 'url': 'http://www.rts.ch/archives/tv/divers/3449373-les-enfants-terribles.html', 'md5': '753b877968ad8afaeddccc374d4256a5', 'info_dict': { 'id': '3449373', 'display_id': 'les-enfants-terribles', 'ext': 'mp4', 'duration': 1488, 'title': 'Les Enfants Terribles', 'description': 'France Pommier et sa soeur Luce Feral, les deux filles de ce groupe de 5.', 'uploader': 'Divers', 'upload_date': '19680921', 'timestamp': -40280400, 'thumbnail': r're:^https?://.*\.image', 'view_count': int, }, 'expected_warnings': ['Unable to download f4m manifest', 'Failed to download m3u8 information'], }, { 'url': 'http://www.rts.ch/emissions/passe-moi-les-jumelles/5624067-entre-ciel-et-mer.html', 'info_dict': { 'id': '5624065', 'title': 'Passe-moi les jumelles', }, 'playlist_mincount': 4, }, { 'url': 'http://www.rts.ch/video/sport/hockey/5745975-1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski.html', 'info_dict': { 'id': '5745975', 'display_id': '1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski', 'ext': 'mp4', 'duration': 48, 'title': '1/2, Kloten - Fribourg (5-2): second but pour Gottéron par Kwiatowski', 'description': 'Hockey - Playoff', 'uploader': 'Hockey', 'upload_date': '20140403', 'timestamp': 1396556882, 'thumbnail': r're:^https?://.*\.image', 'view_count': int, }, 'params': { # m3u8 download 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest', 'Failed to download m3u8 information'], 'skip': 'Blocked outside Switzerland', }, { 'url': 'http://www.rts.ch/video/info/journal-continu/5745356-londres-cachee-par-un-epais-smog.html', 'md5': '9bb06503773c07ce83d3cbd793cebb91', 'info_dict': { 'id': '5745356', 'display_id': 'londres-cachee-par-un-epais-smog', 'ext': 'mp4', 'duration': 33, 'title': 'Londres cachée par un épais smog', 'description': 'Un important voile de smog recouvre Londres depuis mercredi, provoqué par la pollution et du sable du Sahara.', 'uploader': 'L\'actu en vidéo', 'upload_date': '20140403', 'timestamp': 1396537322, 'thumbnail': r're:^https?://.*\.image', 'view_count': int, }, 'expected_warnings': ['Unable to download f4m manifest', 'Failed to download m3u8 information'], }, { 'url': 'http://www.rts.ch/audio/couleur3/programmes/la-belle-video-de-stephane-laurenceau/5706148-urban-hippie-de-damien-krisl-03-04-2014.html', 'md5': 'dd8ef6a22dff163d063e2a52bc8adcae', 'info_dict': { 'id': '5706148', 'display_id': 'urban-hippie-de-damien-krisl-03-04-2014', 'ext': 'mp3', 'duration': 123, 'title': '"Urban Hippie", de Damien Krisl', 'description': 'Des Hippies super glam.', 'upload_date': '20140403', 'timestamp': 1396551600, }, }, { # article with videos on rhs 'url': 'http://www.rts.ch/sport/hockey/6693917-hockey-davos-decroche-son-31e-titre-de-champion-de-suisse.html', 'info_dict': { 'id': '6693917', 'title': 'Hockey: Davos décroche son 31e titre de champion de Suisse', }, 'playlist_mincount': 5, }, { 'url': 'http://pages.rts.ch/emissions/passe-moi-les-jumelles/5624065-entre-ciel-et-mer.html', 'only_matching': True, } ] def _real_extract(self, url): m = re.match(self._VALID_URL, url) media_id = m.group('rts_id') or m.group('id') display_id = m.group('display_id') or media_id def download_json(internal_id): return self._download_json( 'http://www.rts.ch/a/%s.html?f=json/article' % internal_id, display_id) all_info = download_json(media_id) # media_id extracted out of URL is not always a real id if 'video' not in all_info and 'audio' not in all_info: entries = [] for item in all_info.get('items', []): item_url = item.get('url') if not item_url: continue entries.append(self.url_result(item_url, 'RTS')) if not entries: page, urlh = self._download_webpage_handle(url, display_id) if re.match(self._VALID_URL, urlh.geturl()).group('id') != media_id: return self.url_result(urlh.geturl(), 'RTS') # article with videos on rhs videos = re.findall( r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:([^"]+)"', page) if not videos: videos = re.findall( r'(?s)<iframe[^>]+class="srg-player"[^>]+src="[^"]+urn:([^"]+)"', page) if videos: entries = [self.url_result('srgssr:%s' % video_urn, 'SRGSSR') for video_urn in videos] if entries: return self.playlist_result(entries, media_id, all_info.get('title')) internal_id = self._html_search_regex( r'<(?:video|audio) data-id="([0-9]+)"', page, 'internal video id') all_info = download_json(internal_id) media_type = 'video' if 'video' in all_info else 'audio' # check for errors self._get_media_data('rts', media_type, media_id) info = all_info['video']['JSONinfo'] if 'video' in all_info else all_info['audio'] title = info['title'] def extract_bitrate(url): return int_or_none(self._search_regex( r'-([0-9]+)k\.', url, 'bitrate', default=None)) formats = [] streams = info.get('streams', {}) for format_id, format_url in streams.items(): if format_id == 'hds_sd' and 'hds' in streams: continue if format_id == 'hls_sd' and 'hls' in streams: continue ext = determine_ext(format_url) if ext in ('m3u8', 'f4m'): format_url = self._get_tokenized_src(format_url, media_id, format_id) if ext == 'f4m': formats.extend(self._extract_f4m_formats( format_url + ('?' if '?' not in format_url else '&') + 'hdcore=3.4.0', media_id, f4m_id=format_id, fatal=False)) else: formats.extend(self._extract_m3u8_formats( format_url, media_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False)) else: formats.append({ 'format_id': format_id, 'url': format_url, 'tbr': extract_bitrate(format_url), }) download_base = 'http://rtsww%s-d.rts.ch/' % ('-a' if media_type == 'audio' else '') for media in info.get('media', []): media_url = media.get('url') if not media_url or re.match(r'https?://', media_url): continue rate = media.get('rate') ext = media.get('ext') or determine_ext(media_url, 'mp4') format_id = ext if rate: format_id += '-%dk' % rate formats.append({ 'format_id': format_id, 'url': urljoin(download_base, media_url), 'tbr': rate or extract_bitrate(media_url), }) self._check_formats(formats, media_id) self._sort_formats(formats) duration = info.get('duration') or info.get('cutout') or info.get('cutduration') if isinstance(duration, compat_str): duration = parse_duration(duration) return { 'id': media_id, 'display_id': display_id, 'formats': formats, 'title': title, 'description': info.get('intro'), 'duration': duration, 'view_count': int_or_none(info.get('plays')), 'uploader': info.get('programName'), 'timestamp': parse_iso8601(info.get('broadcast_date')), 'thumbnail': unescapeHTML(info.get('preview_image_url')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/s4c.py
youtube_dl/extractor/s4c.py
# coding: utf-8 from __future__ import unicode_literals from functools import partial as partial_f from .common import InfoExtractor from ..utils import ( float_or_none, merge_dicts, T, traverse_obj, txt_or_none, url_or_none, ) class S4CIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?s4c\.cymru/clic/programme/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.s4c.cymru/clic/programme/861362209', 'info_dict': { 'id': '861362209', 'ext': 'mp4', 'title': 'Y Swn', 'description': 'md5:f7681a30e4955b250b3224aa9fe70cf0', 'duration': 5340, 'thumbnail': 'https://www.s4c.cymru/amg/1920x1080/Y_Swn_2023S4C_099_ii.jpg', }, }, { 'url': 'https://www.s4c.cymru/clic/programme/856636948', 'info_dict': { 'id': '856636948', 'ext': 'mp4', 'title': 'Am Dro', 'duration': 2880, 'description': 'md5:100d8686fc9a632a0cb2db52a3433ffe', 'thumbnail': 'https://www.s4c.cymru/amg/1920x1080/Am_Dro_2022-23S4C_P6_4005.jpg', }, }] def _real_extract(self, url): video_id = self._match_id(url) details = self._download_json( 'https://www.s4c.cymru/df/full_prog_details', video_id, query={ 'lang': 'e', 'programme_id': video_id, }, fatal=False) player_config = self._download_json( 'https://player-api.s4c-cdn.co.uk/player-configuration/prod', video_id, query={ 'programme_id': video_id, 'signed': '0', 'lang': 'en', 'mode': 'od', 'appId': 'clic', 'streamName': '', }, note='Downloading player config JSON') m3u8_url = self._download_json( 'https://player-api.s4c-cdn.co.uk/streaming-urls/prod', video_id, query={ 'mode': 'od', 'application': 'clic', 'region': 'WW', 'extra': 'false', 'thirdParty': 'false', 'filename': player_config['filename'], }, note='Downloading streaming urls JSON')['hls'] formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', m3u8_id='hls', entry_protocol='m3u8_native') self._sort_formats(formats) subtitles = {} for sub in traverse_obj(player_config, ('subtitles', lambda _, v: url_or_none(v['0']))): subtitles.setdefault(sub.get('3', 'en'), []).append({ 'url': sub['0'], 'name': sub.get('1'), }) return merge_dicts({ 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'thumbnail': url_or_none(player_config.get('poster')), }, traverse_obj(details, ('full_prog_details', 0, { 'title': (('programme_title', 'series_title'), T(txt_or_none)), 'description': ('full_billing', T(txt_or_none)), 'duration': ('duration', T(partial_f(float_or_none, invscale=60))), }), get_all=False), rev=True) class S4CSeriesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?s4c\.cymru/clic/series/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.s4c.cymru/clic/series/864982911', 'playlist_mincount': 6, 'info_dict': { 'id': '864982911', 'title': 'Iaith ar Daith', }, }, { 'url': 'https://www.s4c.cymru/clic/series/866852587', 'playlist_mincount': 8, 'info_dict': { 'id': '866852587', 'title': 'FFIT Cymru', }, }] def _real_extract(self, url): series_id = self._match_id(url) series_details = self._download_json( 'https://www.s4c.cymru/df/series_details', series_id, query={ 'lang': 'e', 'series_id': series_id, 'show_prog_in_series': 'Y' }, note='Downloading series details JSON') return self.playlist_result( (self.url_result('https://www.s4c.cymru/clic/programme/' + episode_id, S4CIE, episode_id) for episode_id in traverse_obj(series_details, ('other_progs_in_series', Ellipsis, 'id'))), playlist_id=series_id, playlist_title=traverse_obj( series_details, ('full_prog_details', 0, 'series_title', T(txt_or_none))))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/wdr.py
youtube_dl/extractor/wdr.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( determine_ext, dict_get, ExtractorError, js_to_json, strip_jsonp, try_get, unified_strdate, update_url_query, urlhandle_detect_ext, url_or_none, ) class WDRIE(InfoExtractor): __API_URL_TPL = '//deviceids-medp.wdr.de/ondemand/%s/%s' _VALID_URL = (r'(?:https?:' + __API_URL_TPL) % (r'\d+', r'(?=\d+\.js)|wdr:)(?P<id>\d{6,})') _GEO_COUNTRIES = ['DE'] _TESTS = [{ 'url': 'http://deviceids-medp.wdr.de/ondemand/155/1557833.js', 'info_dict': { 'id': 'mdb-1557833', 'ext': 'mp4', 'title': 'Biathlon-Staffel verpasst Podest bei Olympia-Generalprobe', 'upload_date': '20180112', }, }, ] def _asset_url(self, wdr_id): id_len = max(len(wdr_id), 5) return ''.join(('https:', self.__API_URL_TPL % (wdr_id[:id_len - 4], wdr_id, ), '.js')) def _real_extract(self, url): video_id = self._match_id(url) if url.startswith('wdr:'): video_id = url[4:] url = self._asset_url(video_id) metadata = self._download_json( url, video_id, transform_source=strip_jsonp) is_live = metadata.get('mediaType') == 'live' tracker_data = metadata['trackerData'] title = tracker_data['trackerClipTitle'] media_resource = metadata['mediaResource'] formats = [] # check if the metadata contains a direct URL to a file for kind, media in media_resource.items(): if not isinstance(media, dict): continue if kind not in ('dflt', 'alt'): continue for tag_name, medium_url in media.items(): if tag_name not in ('videoURL', 'audioURL'): continue ext = determine_ext(medium_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( medium_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls')) elif ext == 'f4m': manifest_url = update_url_query( medium_url, {'hdcore': '3.2.0', 'plugin': 'aasp-3.2.0.77.18'}) formats.extend(self._extract_f4m_formats( manifest_url, video_id, f4m_id='hds', fatal=False)) elif ext == 'smil': formats.extend(self._extract_smil_formats( medium_url, 'stream', fatal=False)) else: a_format = { 'url': medium_url } if ext == 'unknown_video': urlh = self._request_webpage( medium_url, video_id, note='Determining extension') ext = urlhandle_detect_ext(urlh) a_format['ext'] = ext formats.append(a_format) self._sort_formats(formats) subtitles = {} caption_url = media_resource.get('captionURL') if caption_url: subtitles['de'] = [{ 'url': caption_url, 'ext': 'ttml', }] captions_hash = media_resource.get('captionsHash') if isinstance(captions_hash, dict): for ext, format_url in captions_hash.items(): format_url = url_or_none(format_url) if not format_url: continue subtitles.setdefault('de', []).append({ 'url': format_url, 'ext': determine_ext(format_url, None) or ext, }) return { 'id': tracker_data.get('trackerClipId', video_id), 'title': self._live_title(title) if is_live else title, 'alt_title': tracker_data.get('trackerClipSubcategory'), 'formats': formats, 'subtitles': subtitles, 'upload_date': unified_strdate(tracker_data.get('trackerClipAirTime')), 'is_live': is_live, } class WDRPageIE(WDRIE): _MAUS_REGEX = r'https?://(?:www\.)wdrmaus.de/(?:[^/]+/)*?(?P<maus_id>[^/?#.]+)(?:/?|/index\.php5|\.php5)$' _PAGE_REGEX = r'/(?:mediathek/)?(?:[^/]+/)*(?P<display_id>[^/]+)\.html' _VALID_URL = r'https?://(?:www\d?\.)?(?:(?:kinder\.)?wdr\d?|sportschau)\.de' + _PAGE_REGEX + '|' + _MAUS_REGEX _TESTS = [ { 'url': 'http://www1.wdr.de/mediathek/video/sendungen/doku-am-freitag/video-geheimnis-aachener-dom-100.html', # HDS download, MD5 is unstable 'info_dict': { 'id': 'mdb-1058683', 'ext': 'flv', 'display_id': 'doku-am-freitag/video-geheimnis-aachener-dom-100', 'title': 'Geheimnis Aachener Dom', 'alt_title': 'Doku am Freitag', 'upload_date': '20160304', 'description': 'md5:87be8ff14d8dfd7a7ee46f0299b52318', 'is_live': False, 'subtitles': {'de': [{ 'url': 'http://ondemand-ww.wdr.de/medp/fsk0/105/1058683/1058683_12220974.xml', 'ext': 'ttml', }]}, }, 'skip': 'HTTP Error 404: Not Found', }, { 'url': 'http://www1.wdr.de/mediathek/audio/wdr3/wdr3-gespraech-am-samstag/audio-schriftstellerin-juli-zeh-100.html', 'md5': 'f4c1f96d01cf285240f53ea4309663d8', 'info_dict': { 'id': 'mdb-1072000', 'ext': 'mp3', 'display_id': 'wdr3-gespraech-am-samstag/audio-schriftstellerin-juli-zeh-100', 'title': 'Schriftstellerin Juli Zeh', 'alt_title': 'WDR 3 Gespräch am Samstag', 'upload_date': '20160312', 'description': 'md5:e127d320bc2b1f149be697ce044a3dd7', 'is_live': False, 'subtitles': {} }, 'skip': 'HTTP Error 404: Not Found', }, { 'url': 'http://www1.wdr.de/mediathek/video/live/index.html', 'info_dict': { 'id': 'mdb-2296252', 'ext': 'mp4', 'title': r're:^WDR Fernsehen im Livestream (?:\(nur in Deutschland erreichbar\) )?[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'alt_title': 'WDR Fernsehen Live', 'upload_date': '20201112', 'is_live': True, }, 'params': { 'skip_download': True, # m3u8 download }, }, { 'url': 'http://www1.wdr.de/mediathek/video/sendungen/aktuelle-stunde/aktuelle-stunde-120.html', 'playlist_mincount': 6, 'info_dict': { 'id': 'aktuelle-stunde-120', }, }, { 'url': 'http://www.wdrmaus.de/aktuelle-sendung/index.php5', 'info_dict': { 'id': 'mdb-2627637', 'ext': 'mp4', 'upload_date': 're:^[0-9]{8}$', 'title': 're:^Die Sendung (?:mit der Maus )?vom [0-9.]{10}$', }, 'skip': 'The id changes from week to week because of the new episode' }, { 'url': 'http://www.wdrmaus.de/filme/sachgeschichten/achterbahn.php5', 'md5': '803138901f6368ee497b4d195bb164f2', 'info_dict': { 'id': 'mdb-186083', 'ext': 'mp4', 'upload_date': '20130919', 'title': 'Sachgeschichte - Achterbahn ', }, 'skip': 'HTTP Error 404: Not Found', }, { 'url': 'http://www1.wdr.de/radio/player/radioplayer116~_layout-popupVersion.html', # Live stream, MD5 unstable 'info_dict': { 'id': 'mdb-869971', 'ext': 'mp4', 'title': r're:^COSMO Livestream [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'upload_date': '20160101', }, 'params': { 'skip_download': True, # m3u8 download } }, { 'url': 'http://www.sportschau.de/handballem2018/handball-nationalmannschaft-em-stolperstein-vorrunde-100.html', 'info_dict': { 'id': 'mdb-1556012', 'ext': 'mp4', 'title': 'DHB-Vizepräsident Bob Hanning - "Die Weltspitze ist extrem breit"', 'upload_date': '20180111', }, 'params': { 'skip_download': True, }, 'skip': 'HTTP Error 404: Not Found', }, { 'url': 'http://www.sportschau.de/handballem2018/audio-vorschau---die-handball-em-startet-mit-grossem-favoritenfeld-100.html', 'only_matching': True, }, { 'url': 'https://kinder.wdr.de/tv/die-sendung-mit-dem-elefanten/av/video-folge---astronaut-100.html', 'only_matching': True, }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id = dict_get(mobj.groupdict(), ('display_id', 'maus_id'), 'wdrmaus') webpage = self._download_webpage(url, display_id) entries = [] # Article with several videos # for wdr.de the data-extension is in a tag with the class "mediaLink" # for wdr.de radio players, in a tag with the class "wdrrPlayerPlayBtn" # for wdrmaus, in a tag with the class "videoButton" (previously a link # to the page in a multiline "videoLink"-tag) for mobj in re.finditer( r'''(?sx)class= (?: (["\'])(?:mediaLink|wdrrPlayerPlayBtn|videoButton)\b.*?\1[^>]+| (["\'])videoLink\b.*?\2[\s]*>\n[^\n]* )data-extension=(["\'])(?P<data>(?:(?!\3).)+)\3 ''', webpage): media_link_obj = self._parse_json( mobj.group('data'), display_id, transform_source=js_to_json, fatal=False) if not media_link_obj: continue jsonp_url = try_get( media_link_obj, lambda x: x['mediaObj']['url'], compat_str) if jsonp_url: # metadata, or player JS with ['ref'] giving WDR id, or just media, perhaps clip_id = media_link_obj['mediaObj'].get('ref') if jsonp_url.endswith('.assetjsonp'): asset = self._download_json( jsonp_url, display_id, fatal=False, transform_source=strip_jsonp) clip_id = try_get(asset, lambda x: x['trackerData']['trackerClipId'], compat_str) if clip_id: jsonp_url = self._asset_url(clip_id[4:]) entries.append(self.url_result(jsonp_url, ie=WDRIE.ie_key())) # Playlist (e.g. https://www1.wdr.de/mediathek/video/sendungen/aktuelle-stunde/aktuelle-stunde-120.html) if not entries: entries = [ self.url_result( compat_urlparse.urljoin(url, mobj.group('href')), ie=WDRPageIE.ie_key()) for mobj in re.finditer( r'<a[^>]+\bhref=(["\'])(?P<href>(?:(?!\1).)+)\1[^>]+\bdata-extension=', webpage) if re.match(self._PAGE_REGEX, mobj.group('href')) ] return self.playlist_result(entries, playlist_id=display_id) class WDRElefantIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)wdrmaus\.de/elefantenseite/#(?P<id>.+)' _TEST = { 'url': 'http://www.wdrmaus.de/elefantenseite/#elefantenkino_wippe', # adaptive stream: unstable file MD5 'info_dict': { 'title': 'Wippe', 'id': 'mdb-1198320', 'ext': 'mp4', 'age_limit': None, 'upload_date': '20071003' }, } def _real_extract(self, url): display_id = self._match_id(url) # Table of Contents seems to always be at this address, so fetch it directly. # The website fetches configurationJS.php5, which links to tableOfContentsJS.php5. table_of_contents = self._download_json( 'https://www.wdrmaus.de/elefantenseite/data/tableOfContentsJS.php5', display_id) if display_id not in table_of_contents: raise ExtractorError( 'No entry in site\'s table of contents for this URL. ' 'Is the fragment part of the URL (after the #) correct?', expected=True) xml_metadata_path = table_of_contents[display_id]['xmlPath'] xml_metadata = self._download_xml( 'https://www.wdrmaus.de/elefantenseite/' + xml_metadata_path, display_id) zmdb_url_element = xml_metadata.find('./movie/zmdb_url') if zmdb_url_element is None: raise ExtractorError( '%s is not a video' % display_id, expected=True) return self.url_result(zmdb_url_element.text, ie=WDRIE.ie_key()) class WDRMobileIE(InfoExtractor): _VALID_URL = r'''(?x) https?://mobile-ondemand\.wdr\.de/ .*?/fsk(?P<age_limit>[0-9]+) /[0-9]+/[0-9]+/ (?P<id>[0-9]+)_(?P<title>[0-9]+)''' IE_NAME = 'wdr:mobile' _WORKING = False # no such domain _TEST = { 'url': 'http://mobile-ondemand.wdr.de/CMS2010/mdb/ondemand/weltweit/fsk0/42/421735/421735_4283021.mp4', 'info_dict': { 'title': '4283021', 'id': '421735', 'ext': 'mp4', 'age_limit': 0, }, 'skip': 'Problems with loading data.' } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) return { 'id': mobj.group('id'), 'title': mobj.group('title'), 'age_limit': int(mobj.group('age_limit')), 'url': url, 'http_headers': { 'User-Agent': 'mobile', }, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/jwplatform.py
youtube_dl/extractor/jwplatform.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import unsmuggle_url class JWPlatformIE(InfoExtractor): _VALID_URL = r'(?:https?://(?:content\.jwplatform|cdn\.jwplayer)\.com/(?:(?:feed|player|thumb|preview)s|jw6|v2/media)/|jwplatform:)(?P<id>[a-zA-Z0-9]{8})' _TESTS = [{ 'url': 'http://content.jwplatform.com/players/nPripu9l-ALJ3XQCI.js', 'md5': 'fa8899fa601eb7c83a64e9d568bdf325', 'info_dict': { 'id': 'nPripu9l', 'ext': 'mov', 'title': 'Big Buck Bunny Trailer', 'description': 'Big Buck Bunny is a short animated film by the Blender Institute. It is made using free and open source software.', 'upload_date': '20081127', 'timestamp': 1227796140, } }, { 'url': 'https://cdn.jwplayer.com/players/nPripu9l-ALJ3XQCI.js', 'only_matching': True, }] @staticmethod def _extract_url(webpage): urls = JWPlatformIE._extract_urls(webpage) return urls[0] if urls else None @staticmethod def _extract_urls(webpage): return re.findall( r'<(?:script|iframe)[^>]+?src=["\']((?:https?:)?//(?:content\.jwplatform|cdn\.jwplayer)\.com/players/[a-zA-Z0-9]{8})', webpage) def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) self._initialize_geo_bypass({ 'countries': smuggled_data.get('geo_countries'), }) video_id = self._match_id(url) json_data = self._download_json('https://cdn.jwplayer.com/v2/media/' + video_id, video_id) return self._parse_jwplayer_data(json_data, video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/zdf.py
youtube_dl/extractor/zdf.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( determine_ext, ExtractorError, extract_attributes, float_or_none, int_or_none, merge_dicts, NO_DEFAULT, parse_codecs, qualities, str_or_none, try_get, unified_timestamp, update_url_query, url_or_none, urljoin, ) class ZDFBaseIE(InfoExtractor): _GEO_COUNTRIES = ['DE'] _QUALITIES = ('auto', 'low', 'med', 'high', 'veryhigh', 'hd') def _call_api(self, url, video_id, item, api_token=None, referrer=None): headers = {} if api_token: headers['Api-Auth'] = 'Bearer %s' % api_token if referrer: headers['Referer'] = referrer return self._download_json( url, video_id, 'Downloading JSON %s' % item, headers=headers) @staticmethod def _extract_subtitles(src): subtitles = {} for caption in try_get(src, lambda x: x['captions'], list) or []: subtitle_url = url_or_none(caption.get('uri')) if subtitle_url: lang = caption.get('language', 'deu') subtitles.setdefault(lang, []).append({ 'url': subtitle_url, }) return subtitles def _extract_format(self, video_id, formats, format_urls, meta): format_url = url_or_none(meta.get('url')) if not format_url: return if format_url in format_urls: return format_urls.add(format_url) mime_type = meta.get('mimeType') ext = determine_ext(format_url) join_nonempty = lambda s, l: s.join(filter(None, l)) meta_map = lambda t: map(lambda x: str_or_none(meta.get(x)), t) if mime_type == 'application/x-mpegURL' or ext == 'm3u8': new_formats = self._extract_m3u8_formats( format_url, video_id, 'mp4', m3u8_id='hls', entry_protocol='m3u8_native', fatal=False) elif mime_type == 'application/f4m+xml' or ext == 'f4m': new_formats = self._extract_f4m_formats( update_url_query(format_url, {'hdcore': '3.7.0'}), video_id, f4m_id='hds', fatal=False) else: f = parse_codecs(meta.get('mimeCodec')) if not f: data = meta.get('type', '').split('_') if try_get(data, lambda x: x[2]) == ext: f = dict(zip(('vcodec', 'acodec'), data[1])) format_id = ['http'] format_id.extend(join_nonempty('-', meta_map(('type', 'quality')))) f.update({ 'url': format_url, 'format_id': '-'.join(format_id), 'tbr': int_or_none(self._search_regex(r'_(\d+)k_', format_url, 'tbr', default=None)) }) new_formats = [f] formats.extend(merge_dicts(f, { 'format_note': join_nonempty(',', meta_map(('quality', 'class'))), 'language': meta.get('language'), 'language_preference': 10 if meta.get('class') == 'main' else -10 if meta.get('class') == 'ad' else -1, 'quality': qualities(self._QUALITIES)(meta.get('quality')), }) for f in new_formats) def _extract_ptmd(self, ptmd_url, video_id, api_token, referrer): ptmd = self._call_api( ptmd_url, video_id, 'metadata', api_token, referrer) content_id = ptmd.get('basename') or ptmd_url.split('/')[-1] formats = [] track_uris = set() for p in ptmd['priorityList']: formitaeten = p.get('formitaeten') if not isinstance(formitaeten, list): continue for f in formitaeten: f_qualities = f.get('qualities') if not isinstance(f_qualities, list): continue for quality in f_qualities: tracks = try_get(quality, lambda x: x['audio']['tracks'], list) if not tracks: continue for track in tracks: self._extract_format( content_id, formats, track_uris, { 'url': track.get('uri'), 'type': f.get('type'), 'mimeType': f.get('mimeType'), 'quality': quality.get('quality'), 'class': track.get('class'), 'language': track.get('language'), }) self._sort_formats(formats) duration = float_or_none(try_get( ptmd, lambda x: x['attributes']['duration']['value']), scale=1000) return { 'extractor_key': ZDFIE.ie_key(), 'id': content_id, 'duration': duration, 'formats': formats, 'subtitles': self._extract_subtitles(ptmd), } def _extract_player(self, webpage, video_id, fatal=True): return self._parse_json( self._search_regex( r'(?s)data-zdfplayer-jsb=(["\'])(?P<json>{.+?})\1', webpage, 'player JSON', default='{}' if not fatal else NO_DEFAULT, group='json'), video_id) class ZDFIE(ZDFBaseIE): _VALID_URL = r'https?://www\.zdf\.de/(?:[^/]+/)*(?P<id>[^/?#&]+)\.html' _TESTS = [{ # Same as https://www.phoenix.de/sendungen/ereignisse/corona-nachgehakt/wohin-fuehrt-der-protest-in-der-pandemie-a-2050630.html 'url': 'https://www.zdf.de/politik/phoenix-sendungen/wohin-fuehrt-der-protest-in-der-pandemie-100.html', 'md5': '34ec321e7eb34231fd88616c65c92db0', 'info_dict': { 'id': '210222_phx_nachgehakt_corona_protest', 'ext': 'mp4', 'title': 'Wohin führt der Protest in der Pandemie?', 'description': 'md5:7d643fe7f565e53a24aac036b2122fbd', 'duration': 1691, 'timestamp': 1613948400, 'upload_date': '20210221', }, 'skip': 'No longer available: "Diese Seite wurde leider nicht gefunden"', }, { # Same as https://www.3sat.de/film/ab-18/10-wochen-sommer-108.html 'url': 'https://www.zdf.de/dokumentation/ab-18/10-wochen-sommer-102.html', 'md5': '0aff3e7bc72c8813f5e0fae333316a1d', 'info_dict': { 'id': '141007_ab18_10wochensommer_film', 'ext': 'mp4', 'title': 'Ab 18! - 10 Wochen Sommer', 'description': 'md5:8253f41dc99ce2c3ff892dac2d65fe26', 'duration': 2660, 'timestamp': 1608604200, 'upload_date': '20201222', }, 'skip': 'No longer available: "Diese Seite wurde leider nicht gefunden"', }, { 'url': 'https://www.zdf.de/dokumentation/terra-x/die-magie-der-farben-von-koenigspurpur-und-jeansblau-100.html', 'info_dict': { 'id': '151025_magie_farben2_tex', 'ext': 'mp4', 'title': 'Die Magie der Farben (2/2)', 'description': 'md5:a89da10c928c6235401066b60a6d5c1a', 'duration': 2615, 'timestamp': 1465021200, 'upload_date': '20160604', 'thumbnail': 'https://www.zdf.de/assets/mauve-im-labor-100~768x432?cb=1464909117806', }, }, { 'url': 'https://www.zdf.de/funk/druck-11790/funk-alles-ist-verzaubert-102.html', 'md5': '1b93bdec7d02fc0b703c5e7687461628', 'info_dict': { 'ext': 'mp4', 'id': 'video_funk_1770473', 'duration': 1278, 'description': 'Die Neue an der Schule verdreht Ismail den Kopf.', 'title': 'Alles ist verzaubert', 'timestamp': 1635520560, 'upload_date': '20211029', 'thumbnail': 'https://www.zdf.de/assets/teaser-funk-alles-ist-verzaubert-100~1920x1080?cb=1636466431799', }, }, { # Same as https://www.phoenix.de/sendungen/dokumentationen/gesten-der-maechtigen-i-a-89468.html?ref=suche 'url': 'https://www.zdf.de/politik/phoenix-sendungen/die-gesten-der-maechtigen-100.html', 'only_matching': True, }, { # Same as https://www.3sat.de/film/spielfilm/der-hauptmann-100.html 'url': 'https://www.zdf.de/filme/filme-sonstige/der-hauptmann-112.html', 'only_matching': True, }, { # Same as https://www.3sat.de/wissen/nano/nano-21-mai-2019-102.html, equal media ids 'url': 'https://www.zdf.de/wissen/nano/nano-21-mai-2019-102.html', 'only_matching': True, }, { 'url': 'https://www.zdf.de/service-und-hilfe/die-neue-zdf-mediathek/zdfmediathek-trailer-100.html', 'only_matching': True, }, { 'url': 'https://www.zdf.de/filme/taunuskrimi/die-lebenden-und-die-toten-1---ein-taunuskrimi-100.html', 'only_matching': True, }, { 'url': 'https://www.zdf.de/dokumentation/planet-e/planet-e-uebersichtsseite-weitere-dokumentationen-von-planet-e-100.html', 'only_matching': True, }, { 'url': 'https://www.zdf.de/arte/todliche-flucht/page-video-artede-toedliche-flucht-16-100.html', 'info_dict': { 'id': 'video_artede_083871-001-A', 'ext': 'mp4', 'title': 'Tödliche Flucht (1/6)', 'description': 'md5:e34f96a9a5f8abd839ccfcebad3d5315', 'duration': 3193.0, 'timestamp': 1641355200, 'upload_date': '20220105', }, 'skip': 'No longer available "Diese Seite wurde leider nicht gefunden"' }, { 'url': 'https://www.zdf.de/serien/soko-stuttgart/das-geld-anderer-leute-100.html', 'info_dict': { 'id': '191205_1800_sendung_sok8', 'ext': 'mp4', 'title': 'Das Geld anderer Leute', 'description': 'md5:cb6f660850dc5eb7d1ab776ea094959d', 'duration': 2581.0, 'timestamp': 1654790700, 'upload_date': '20220609', 'thumbnail': 'https://epg-image.zdf.de/fotobase-webdelivery/images/e2d7e55a-09f0-424e-ac73-6cac4dd65f35?layout=2400x1350', }, }] def _extract_entry(self, url, player, content, video_id): title = content.get('title') or content['teaserHeadline'] t = content['mainVideoContent']['http://zdf.de/rels/target'] def get_ptmd_path(d): return ( d.get('http://zdf.de/rels/streams/ptmd') or d.get('http://zdf.de/rels/streams/ptmd-template', '').replace('{playerId}', 'ngplayer_2_4')) ptmd_path = get_ptmd_path(try_get(t, lambda x: x['streams']['default'], dict) or {}) if not ptmd_path: ptmd_path = get_ptmd_path(t) if not ptmd_path: raise ExtractorError('Could not extract ptmd_path') info = self._extract_ptmd( urljoin(url, ptmd_path), video_id, player['apiToken'], url) thumbnails = [] layouts = try_get( content, lambda x: x['teaserImageRef']['layouts'], dict) if layouts: for layout_key, layout_url in layouts.items(): layout_url = url_or_none(layout_url) if not layout_url: continue thumbnail = { 'url': layout_url, 'format_id': layout_key, } mobj = re.search(r'(?P<width>\d+)x(?P<height>\d+)', layout_key) if mobj: thumbnail.update({ 'width': int(mobj.group('width')), 'height': int(mobj.group('height')), }) thumbnails.append(thumbnail) return merge_dicts(info, { 'title': title, 'description': content.get('leadParagraph') or content.get('teasertext'), 'duration': int_or_none(t.get('duration')), 'timestamp': unified_timestamp(content.get('editorialDate')), 'thumbnails': thumbnails, }) def _extract_regular(self, url, player, video_id): content = self._call_api( player['content'], video_id, 'content', player['apiToken'], url) return self._extract_entry(player['content'], player, content, video_id) def _extract_mobile(self, video_id): video = self._download_json( 'https://zdf-cdn.live.cellular.de/mediathekV2/document/%s' % video_id, video_id) formats = [] formitaeten = try_get(video, lambda x: x['document']['formitaeten'], list) document = formitaeten and video['document'] if formitaeten: title = document['titel'] content_id = document['basename'] format_urls = set() for f in formitaeten or []: self._extract_format(content_id, formats, format_urls, f) self._sort_formats(formats) thumbnails = [] teaser_bild = document.get('teaserBild') if isinstance(teaser_bild, dict): for thumbnail_key, thumbnail in teaser_bild.items(): thumbnail_url = try_get( thumbnail, lambda x: x['url'], compat_str) if thumbnail_url: thumbnails.append({ 'url': thumbnail_url, 'id': thumbnail_key, 'width': int_or_none(thumbnail.get('width')), 'height': int_or_none(thumbnail.get('height')), }) return { 'id': content_id, 'title': title, 'description': document.get('beschreibung'), 'duration': int_or_none(document.get('length')), 'timestamp': unified_timestamp(document.get('date')) or unified_timestamp( try_get(video, lambda x: x['meta']['editorialDate'], compat_str)), 'thumbnails': thumbnails, 'subtitles': self._extract_subtitles(document), 'formats': formats, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id, fatal=False) if webpage: player = self._extract_player(webpage, url, fatal=False) if player: return self._extract_regular(url, player, video_id) return self._extract_mobile(video_id) class ZDFChannelIE(ZDFBaseIE): _VALID_URL = r'https?://www\.zdf\.de/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.zdf.de/sport/das-aktuelle-sportstudio', 'info_dict': { 'id': 'das-aktuelle-sportstudio', 'title': 'das aktuelle sportstudio', }, 'playlist_mincount': 18, }, { 'url': 'https://www.zdf.de/dokumentation/planet-e', 'info_dict': { 'id': 'planet-e', 'title': 'planet e.', }, 'playlist_mincount': 50, }, { 'url': 'https://www.zdf.de/gesellschaft/aktenzeichen-xy-ungeloest', 'info_dict': { 'id': 'aktenzeichen-xy-ungeloest', 'title': 'Aktenzeichen XY... ungelöst', 'entries': "lambda x: not any('xy580-fall1-kindermoerder-gesucht-100' in e['url'] for e in x)", }, 'playlist_mincount': 2, }, { 'url': 'https://www.zdf.de/filme/taunuskrimi/', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if ZDFIE.suitable(url) else super(ZDFChannelIE, cls).suitable(url) def _og_search_title(self, webpage, fatal=False): title = super(ZDFChannelIE, self)._og_search_title(webpage, fatal=fatal) return re.split(r'\s+[-|]\s+ZDF(?:mediathek)?$', title or '')[0] or None def _real_extract(self, url): channel_id = self._match_id(url) webpage = self._download_webpage(url, channel_id) matches = re.finditer( r'''<div\b[^>]*?\sdata-plusbar-id\s*=\s*(["'])(?P<p_id>[\w-]+)\1[^>]*?\sdata-plusbar-url=\1(?P<url>%s)\1''' % ZDFIE._VALID_URL, webpage) if self._downloader.params.get('noplaylist', False): entry = next( (self.url_result(m.group('url'), ie=ZDFIE.ie_key()) for m in matches), None) self.to_screen('Downloading just the main video because of --no-playlist') if entry: return entry else: self.to_screen('Downloading playlist %s - add --no-playlist to download just the main video' % (channel_id, )) def check_video(m): v_ref = self._search_regex( r'''(<a\b[^>]*?\shref\s*=[^>]+?\sdata-target-id\s*=\s*(["'])%s\2[^>]*>)''' % (m.group('p_id'), ), webpage, 'check id', default='') v_ref = extract_attributes(v_ref) return v_ref.get('data-target-video-type') != 'novideo' return self.playlist_from_matches( (m.group('url') for m in matches if check_video(m)), channel_id, self._og_search_title(webpage, fatal=False))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vube.py
youtube_dl/extractor/vube.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, ) from ..utils import ( int_or_none, ExtractorError, ) class VubeIE(InfoExtractor): IE_NAME = 'vube' IE_DESC = 'Vube.com' _VALID_URL = r'https?://vube\.com/(?:[^/]+/)+(?P<id>[\da-zA-Z]{10})\b' _TESTS = [ { 'url': 'http://vube.com/trending/William+Wei/Y8NUZ69Tf7?t=s', 'md5': 'e7aabe1f8f1aa826b9e4735e1f9cee42', 'info_dict': { 'id': 'Y8NUZ69Tf7', 'ext': 'mp4', 'title': 'Best Drummer Ever [HD]', 'description': 'md5:2d63c4b277b85c2277761c2cf7337d71', 'thumbnail': r're:^https?://.*\.jpg', 'uploader': 'William', 'timestamp': 1406876915, 'upload_date': '20140801', 'duration': 258.051, 'like_count': int, 'dislike_count': int, 'comment_count': int, 'categories': ['amazing', 'hd', 'best drummer ever', 'william wei', 'bucket drumming', 'street drummer', 'epic street drumming'], }, 'skip': 'Not accessible from Travis CI server', }, { 'url': 'http://vube.com/Chiara+Grispo+Video+Channel/YL2qNPkqon', 'md5': 'db7aba89d4603dadd627e9d1973946fe', 'info_dict': { 'id': 'YL2qNPkqon', 'ext': 'mp4', 'title': 'Chiara Grispo - Price Tag by Jessie J', 'description': 'md5:8ea652a1f36818352428cb5134933313', 'thumbnail': r're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/102e7e63057-5ebc-4f5c-4065-6ce4ebde131f\.jpg$', 'uploader': 'Chiara.Grispo', 'timestamp': 1388743358, 'upload_date': '20140103', 'duration': 170.56, 'like_count': int, 'dislike_count': int, 'comment_count': int, 'categories': ['pop', 'music', 'cover', 'singing', 'jessie j', 'price tag', 'chiara grispo'], }, 'skip': 'Removed due to DMCA', }, { 'url': 'http://vube.com/SerainaMusic/my-7-year-old-sister-and-i-singing-alive-by-krewella/UeBhTudbfS?t=s&n=1', 'md5': '5d4a52492d76f72712117ce6b0d98d08', 'info_dict': { 'id': 'UeBhTudbfS', 'ext': 'mp4', 'title': 'My 7 year old Sister and I singing "Alive" by Krewella', 'description': 'md5:40bcacb97796339f1690642c21d56f4a', 'thumbnail': r're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/102265d5a9f-0f17-4f6b-5753-adf08484ee1e\.jpg$', 'uploader': 'Seraina', 'timestamp': 1396492438, 'upload_date': '20140403', 'duration': 240.107, 'like_count': int, 'dislike_count': int, 'comment_count': int, 'categories': ['seraina', 'jessica', 'krewella', 'alive'], }, 'skip': 'Removed due to DMCA', }, { 'url': 'http://vube.com/vote/Siren+Gene/0nmsMY5vEq?n=2&t=s', 'md5': '0584fc13b50f887127d9d1007589d27f', 'info_dict': { 'id': '0nmsMY5vEq', 'ext': 'mp4', 'title': 'Frozen - Let It Go Cover by Siren Gene', 'description': 'My rendition of "Let It Go" originally sung by Idina Menzel.', 'thumbnail': r're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/10283ab622a-86c9-4681-51f2-30d1f65774af\.jpg$', 'uploader': 'Siren', 'timestamp': 1395448018, 'upload_date': '20140322', 'duration': 221.788, 'like_count': int, 'dislike_count': int, 'comment_count': int, 'categories': ['let it go', 'cover', 'idina menzel', 'frozen', 'singing', 'disney', 'siren gene'], }, 'skip': 'Removed due to DMCA', } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') video = self._download_json( 'http://vube.com/t-api/v1/video/%s' % video_id, video_id, 'Downloading video JSON') public_id = video['public_id'] formats = [] for media in video['media'].get('video', []) + video['media'].get('audio', []): if media['transcoding_status'] != 'processed': continue fmt = { 'url': 'http://video.thestaticvube.com/video/%s/%s.mp4' % (media['media_resolution_id'], public_id), 'abr': int(media['audio_bitrate']), 'format_id': compat_str(media['media_resolution_id']), } vbr = int(media['video_bitrate']) if vbr: fmt.update({ 'vbr': vbr, 'height': int(media['height']), }) formats.append(fmt) self._sort_formats(formats) if not formats and video.get('vst') == 'dmca': raise ExtractorError( 'This video has been removed in response to a complaint received under the US Digital Millennium Copyright Act.', expected=True) title = video['title'] description = video.get('description') thumbnail = self._proto_relative_url(video.get('thumbnail_src'), scheme='http:') uploader = video.get('user_alias') or video.get('channel') timestamp = int_or_none(video.get('upload_time')) duration = video['duration'] view_count = video.get('raw_view_count') like_count = video.get('total_likes') dislike_count = video.get('total_hates') comments = video.get('comments') comment_count = None if comments is None: comment_data = self._download_json( 'http://vube.com/api/video/%s/comment' % video_id, video_id, 'Downloading video comment JSON', fatal=False) if comment_data is not None: comment_count = int_or_none(comment_data.get('total')) else: comment_count = len(comments) categories = [tag['text'] for tag in video['tags']] return { 'id': video_id, 'formats': formats, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'timestamp': timestamp, 'duration': duration, 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, 'comment_count': comment_count, 'categories': categories, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/trunews.py
youtube_dl/extractor/trunews.py
from __future__ import unicode_literals from .common import InfoExtractor class TruNewsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?trunews\.com/stream/(?P<id>[^/?#&]+)' _TEST = { 'url': 'https://www.trunews.com/stream/will-democrats-stage-a-circus-during-president-trump-s-state-of-the-union-speech', 'info_dict': { 'id': '5c5a21e65d3c196e1c0020cc', 'display_id': 'will-democrats-stage-a-circus-during-president-trump-s-state-of-the-union-speech', 'ext': 'mp4', 'title': "Will Democrats Stage a Circus During President Trump's State of the Union Speech?", 'description': 'md5:c583b72147cc92cf21f56a31aff7a670', 'duration': 3685, 'timestamp': 1549411440, 'upload_date': '20190206', }, 'add_ie': ['Zype'], } _ZYPE_TEMPL = 'https://player.zype.com/embed/%s.js?api_key=X5XnahkjCwJrT_l5zUqypnaLEObotyvtUKJWWlONxDoHVjP8vqxlArLV8llxMbyt' def _real_extract(self, url): display_id = self._match_id(url) zype_id = self._download_json( 'https://api.zype.com/videos', display_id, query={ 'app_key': 'PUVKp9WgGUb3-JUw6EqafLx8tFVP6VKZTWbUOR-HOm__g4fNDt1bCsm_LgYf_k9H', 'per_page': 1, 'active': 'true', 'friendly_title': display_id, })['response'][0]['_id'] return self.url_result(self._ZYPE_TEMPL % zype_id, 'Zype', zype_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/adobeconnect.py
youtube_dl/extractor/adobeconnect.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urlparse, ) class AdobeConnectIE(InfoExtractor): _VALID_URL = r'https?://\w+\.adobeconnect\.com/(?P<id>[\w-]+)' def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex(r'<title>(.+?)</title>', webpage, 'title') qs = compat_parse_qs(self._search_regex(r"swfUrl\s*=\s*'([^']+)'", webpage, 'swf url').split('?')[1]) is_live = qs.get('isLive', ['false'])[0] == 'true' formats = [] for con_string in qs['conStrings'][0].split(','): formats.append({ 'format_id': con_string.split('://')[0], 'app': compat_urlparse.quote('?' + con_string.split('?')[1] + 'flvplayerapp/' + qs['appInstance'][0]), 'ext': 'flv', 'play_path': 'mp4:' + qs['streamName'][0], 'rtmp_conn': 'S:' + qs['ticket'][0], 'rtmp_live': is_live, 'url': con_string, }) return { 'id': video_id, 'title': self._live_title(title) if is_live else title, 'formats': formats, 'is_live': is_live, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/allocine.py
youtube_dl/extractor/allocine.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, qualities, remove_end, try_get, unified_timestamp, url_basename, ) class AllocineIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?allocine\.fr/(?:article|video|film)/(?:fichearticle_gen_carticle=|player_gen_cmedia=|fichefilm_gen_cfilm=|video-)(?P<id>[0-9]+)(?:\.html)?' _TESTS = [{ 'url': 'http://www.allocine.fr/article/fichearticle_gen_carticle=18635087.html', 'md5': '0c9fcf59a841f65635fa300ac43d8269', 'info_dict': { 'id': '19546517', 'display_id': '18635087', 'ext': 'mp4', 'title': 'Astérix - Le Domaine des Dieux Teaser VF', 'description': 'md5:4a754271d9c6f16c72629a8a993ee884', 'thumbnail': r're:http://.*\.jpg', 'duration': 39, 'timestamp': 1404273600, 'upload_date': '20140702', 'view_count': int, }, }, { 'url': 'http://www.allocine.fr/video/player_gen_cmedia=19540403&cfilm=222257.html', 'md5': 'd0cdce5d2b9522ce279fdfec07ff16e0', 'info_dict': { 'id': '19540403', 'display_id': '19540403', 'ext': 'mp4', 'title': 'Planes 2 Bande-annonce VF', 'description': 'Regardez la bande annonce du film Planes 2 (Planes 2 Bande-annonce VF). Planes 2, un film de Roberts Gannaway', 'thumbnail': r're:http://.*\.jpg', 'duration': 69, 'timestamp': 1385659800, 'upload_date': '20131128', 'view_count': int, }, }, { 'url': 'http://www.allocine.fr/video/player_gen_cmedia=19544709&cfilm=181290.html', 'md5': '101250fb127ef9ca3d73186ff22a47ce', 'info_dict': { 'id': '19544709', 'display_id': '19544709', 'ext': 'mp4', 'title': 'Dragons 2 - Bande annonce finale VF', 'description': 'md5:6cdd2d7c2687d4c6aafe80a35e17267a', 'thumbnail': r're:http://.*\.jpg', 'duration': 144, 'timestamp': 1397589900, 'upload_date': '20140415', 'view_count': int, }, }, { 'url': 'http://www.allocine.fr/video/video-19550147/', 'md5': '3566c0668c0235e2d224fd8edb389f67', 'info_dict': { 'id': '19550147', 'ext': 'mp4', 'title': 'Faux Raccord N°123 - Les gaffes de Cliffhanger', 'description': 'md5:bc734b83ffa2d8a12188d9eb48bb6354', 'thumbnail': r're:http://.*\.jpg', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) formats = [] quality = qualities(['ld', 'md', 'hd']) model = self._html_search_regex( r'data-model="([^"]+)"', webpage, 'data model', default=None) if model: model_data = self._parse_json(model, display_id) video = model_data['videos'][0] title = video['title'] for video_url in video['sources'].values(): video_id, format_id = url_basename(video_url).split('_')[:2] formats.append({ 'format_id': format_id, 'quality': quality(format_id), 'url': video_url, }) duration = int_or_none(video.get('duration')) view_count = int_or_none(video.get('view_count')) timestamp = unified_timestamp(try_get( video, lambda x: x['added_at']['date'], compat_str)) else: video_id = display_id media_data = self._download_json( 'http://www.allocine.fr/ws/AcVisiondataV5.ashx?media=%s' % video_id, display_id) title = remove_end( self._html_search_regex( r'(?s)<title>(.+?)</title>', webpage, 'title').strip(), ' - AlloCiné') for key, value in media_data['video'].items(): if not key.endswith('Path'): continue format_id = key[:-len('Path')] formats.append({ 'format_id': format_id, 'quality': quality(format_id), 'url': value, }) duration, view_count, timestamp = [None] * 3 self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), 'duration': duration, 'timestamp': timestamp, 'view_count': view_count, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rbgtum.py
youtube_dl/extractor/rbgtum.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class RbgTumIE(InfoExtractor): _VALID_URL = r'https://live\.rbg\.tum\.de/w/(?P<id>.+)' _TESTS = [{ # Combined view 'url': 'https://live.rbg.tum.de/w/cpp/22128', 'md5': '53a5e7b3e07128e33bbf36687fe1c08f', 'info_dict': { 'id': 'cpp/22128', 'ext': 'mp4', 'title': 'Lecture: October 18. 2022', 'series': 'Concepts of C++ programming (IN2377)', } }, { # Presentation only 'url': 'https://live.rbg.tum.de/w/I2DL/12349/PRES', 'md5': '36c584272179f3e56b0db5d880639cba', 'info_dict': { 'id': 'I2DL/12349/PRES', 'ext': 'mp4', 'title': 'Lecture 3: Introduction to Neural Networks', 'series': 'Introduction to Deep Learning (IN2346)', } }, { # Camera only 'url': 'https://live.rbg.tum.de/w/fvv-info/16130/CAM', 'md5': 'e04189d92ff2f56aedf5cede65d37aad', 'info_dict': { 'id': 'fvv-info/16130/CAM', 'ext': 'mp4', 'title': 'Fachschaftsvollversammlung', 'series': 'Fachschaftsvollversammlung Informatik', } }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) m3u8 = self._html_search_regex(r'(https://.+?\.m3u8)', webpage, 'm3u8') lecture_title = self._html_search_regex(r'(?si)<h1.*?>(.*)</h1>', webpage, 'title') lecture_series_title = self._html_search_regex( r'(?s)<title\b[^>]*>\s*(?:TUM-Live\s\|\s?)?([^:]+):?.*?</title>', webpage, 'series') formats = self._extract_m3u8_formats(m3u8, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') self._sort_formats(formats) return { 'id': video_id, 'title': lecture_title, 'series': lecture_series_title, 'formats': formats, } class RbgTumCourseIE(InfoExtractor): _VALID_URL = r'https://live\.rbg\.tum\.de/course/(?P<id>.+)' _TESTS = [{ 'url': 'https://live.rbg.tum.de/course/2022/S/fpv', 'info_dict': { 'title': 'Funktionale Programmierung und Verifikation (IN0003)', 'id': '2022/S/fpv', }, 'params': { 'noplaylist': False, }, 'playlist_count': 13, }, { 'url': 'https://live.rbg.tum.de/course/2022/W/set', 'info_dict': { 'title': 'SET FSMPIC', 'id': '2022/W/set', }, 'params': { 'noplaylist': False, }, 'playlist_count': 6, }, ] def _real_extract(self, url): course_id = self._match_id(url) webpage = self._download_webpage(url, course_id) lecture_series_title = self._html_search_regex(r'(?si)<h1.*?>(.*)</h1>', webpage, 'title') lecture_urls = [] for lecture_url in re.findall(r'(?i)href="/w/(.+)(?<!/cam)(?<!/pres)(?<!/chat)"', webpage): lecture_urls.append(self.url_result('https://live.rbg.tum.de/w/' + lecture_url, ie=RbgTumIE.ie_key())) return self.playlist_result(lecture_urls, course_id, lecture_series_title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/viddler.py
youtube_dl/extractor/viddler.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, ) class ViddlerIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?viddler\.com/(?:v|embed|player)/(?P<id>[a-z0-9]+)(?:.+?\bsecret=(\d+))?' _TESTS = [{ 'url': 'http://www.viddler.com/v/43903784', 'md5': '9eee21161d2c7f5b39690c3e325fab2f', 'info_dict': { 'id': '43903784', 'ext': 'mov', 'title': 'Video Made Easy', 'description': 'md5:6a697ebd844ff3093bd2e82c37b409cd', 'uploader': 'viddler', 'timestamp': 1335371429, 'upload_date': '20120425', 'duration': 100.89, 'thumbnail': r're:^https?://.*\.jpg$', 'view_count': int, 'comment_count': int, 'categories': ['video content', 'high quality video', 'video made easy', 'how to produce video with limited resources', 'viddler'], } }, { 'url': 'http://www.viddler.com/v/4d03aad9/', 'md5': 'f12c5a7fa839c47a79363bfdf69404fb', 'info_dict': { 'id': '4d03aad9', 'ext': 'ts', 'title': 'WALL-TO-GORTAT', 'upload_date': '20150126', 'uploader': 'deadspin', 'timestamp': 1422285291, 'view_count': int, 'comment_count': int, } }, { 'url': 'http://www.viddler.com/player/221ebbbd/0/', 'md5': '740511f61d3d1bb71dc14a0fe01a1c10', 'info_dict': { 'id': '221ebbbd', 'ext': 'mov', 'title': 'LETeens-Grammar-snack-third-conditional', 'description': ' ', 'upload_date': '20140929', 'uploader': 'BCLETeens', 'timestamp': 1411997190, 'view_count': int, 'comment_count': int, } }, { # secret protected 'url': 'http://www.viddler.com/v/890c0985?secret=34051570', 'info_dict': { 'id': '890c0985', 'ext': 'mp4', 'title': 'Complete Property Training - Traineeships', 'description': ' ', 'upload_date': '20130606', 'uploader': 'TiffanyBowtell', 'timestamp': 1370496993, 'view_count': int, 'comment_count': int, }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id, secret = re.match(self._VALID_URL, url).groups() query = { 'video_id': video_id, 'key': 'v0vhrt7bg2xq1vyxhkct', } if secret: query['secret'] = secret data = self._download_json( 'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json', video_id, headers={'Referer': url}, query=query)['video'] formats = [] for filed in data['files']: if filed.get('status', 'ready') != 'ready': continue format_id = filed.get('profile_id') or filed['profile_name'] f = { 'format_id': format_id, 'format_note': filed['profile_name'], 'url': self._proto_relative_url(filed['url']), 'width': int_or_none(filed.get('width')), 'height': int_or_none(filed.get('height')), 'filesize': int_or_none(filed.get('size')), 'ext': filed.get('ext'), 'source_preference': -1, } formats.append(f) if filed.get('cdn_url'): f = f.copy() f['url'] = self._proto_relative_url(filed['cdn_url'], 'http:') f['format_id'] = format_id + '-cdn' f['source_preference'] = 1 formats.append(f) if filed.get('html5_video_source'): f = f.copy() f['url'] = self._proto_relative_url(filed['html5_video_source']) f['format_id'] = format_id + '-html5' f['source_preference'] = 0 formats.append(f) self._sort_formats(formats) categories = [ t.get('text') for t in data.get('tags', []) if 'text' in t] return { 'id': video_id, 'title': data['title'], 'formats': formats, 'description': data.get('description'), 'timestamp': int_or_none(data.get('upload_time')), 'thumbnail': self._proto_relative_url(data.get('thumbnail_url')), 'uploader': data.get('author'), 'duration': float_or_none(data.get('length')), 'view_count': int_or_none(data.get('view_count')), 'comment_count': int_or_none(data.get('comment_count')), 'categories': categories, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/kommunetv.py
youtube_dl/extractor/kommunetv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import update_url class KommunetvIE(InfoExtractor): _VALID_URL = r'https://(\w+).kommunetv.no/archive/(?P<id>\w+)' _TEST = { 'url': 'https://oslo.kommunetv.no/archive/921', 'md5': '5f102be308ee759be1e12b63d5da4bbc', 'info_dict': { 'id': '921', 'title': 'Bystyremøte', 'ext': 'mp4' } } def _real_extract(self, url): video_id = self._match_id(url) headers = { 'Accept': 'application/json' } data = self._download_json('https://oslo.kommunetv.no/api/streams?streamType=1&id=%s' % video_id, video_id, headers=headers) title = data['stream']['title'] file = data['playlist'][0]['playlist'][0]['file'] url = update_url(file, query=None, fragment=None) formats = self._extract_m3u8_formats(url, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) self._sort_formats(formats) return { 'id': video_id, 'formats': formats, 'title': title }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/bostonglobe.py
youtube_dl/extractor/bostonglobe.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( extract_attributes, ) class BostonGlobeIE(InfoExtractor): _VALID_URL = r'(?i)https?://(?:www\.)?bostonglobe\.com/.*/(?P<id>[^/]+)/\w+(?:\.html)?' _TESTS = [ { 'url': 'http://www.bostonglobe.com/metro/2017/02/11/tree-finally-succumbs-disease-leaving-hole-neighborhood/h1b4lviqzMTIn9sVy8F3gP/story.html', 'md5': '0a62181079c85c2d2b618c9a738aedaf', 'info_dict': { 'title': 'A tree finally succumbs to disease, leaving a hole in a neighborhood', 'id': '5320421710001', 'ext': 'mp4', 'description': 'It arrived as a sapling when the Back Bay was in its infancy, a spindly American elm tamped down into a square of dirt cut into the brick sidewalk of 1880s Marlborough Street, no higher than the first bay window of the new brownstone behind it.', 'timestamp': 1486877593, 'upload_date': '20170212', 'uploader_id': '245991542', }, }, { # Embedded youtube video; we hand it off to the Generic extractor. 'url': 'https://www.bostonglobe.com/lifestyle/names/2017/02/17/does-ben-affleck-play-matt-damon-favorite-version-batman/ruqkc9VxKBYmh5txn1XhSI/story.html', 'md5': '582b40327089d5c0c949b3c54b13c24b', 'info_dict': { 'title': "Who Is Matt Damon's Favorite Batman?", 'id': 'ZW1QCnlA6Qc', 'ext': 'mp4', 'upload_date': '20170217', 'description': 'md5:3b3dccb9375867e0b4d527ed87d307cb', 'uploader': 'The Late Late Show with James Corden', 'uploader_id': 'TheLateLateShow', }, 'expected_warnings': ['404'], }, ] def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) page_title = self._og_search_title(webpage, default=None) # <video data-brightcove-video-id="5320421710001" data-account="245991542" data-player="SJWAiyYWg" data-embed="default" class="video-js" controls itemscope itemtype="http://schema.org/VideoObject"> entries = [] for video in re.findall(r'(?i)(<video[^>]+>)', webpage): attrs = extract_attributes(video) video_id = attrs.get('data-brightcove-video-id') account_id = attrs.get('data-account') player_id = attrs.get('data-player') embed = attrs.get('data-embed') if video_id and account_id and player_id and embed: entries.append( 'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s' % (account_id, player_id, embed, video_id)) if len(entries) == 0: return self.url_result(url, 'Generic') elif len(entries) == 1: return self.url_result(entries[0], 'BrightcoveNew') else: return self.playlist_from_matches(entries, page_id, page_title, ie='BrightcoveNew')
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/neteasemusic.py
youtube_dl/extractor/neteasemusic.py
# coding: utf-8 from __future__ import unicode_literals from base64 import b64encode from binascii import hexlify from datetime import datetime from hashlib import md5 from random import randint import json import re import time from .common import InfoExtractor from ..aes import aes_ecb_encrypt, pkcs7_padding from ..compat import ( compat_urllib_parse_urlencode, compat_str, compat_itertools_count, ) from ..utils import ( ExtractorError, bytes_to_intlist, error_to_compat_str, float_or_none, int_or_none, intlist_to_bytes, sanitized_Request, std_headers, try_get, ) class NetEaseMusicBaseIE(InfoExtractor): _FORMATS = ['bMusic', 'mMusic', 'hMusic'] _NETEASE_SALT = '3go8&$8*3*3h0k(2)2' _API_BASE = 'http://music.163.com/api/' @classmethod def _encrypt(cls, dfsid): salt_bytes = bytearray(cls._NETEASE_SALT.encode('utf-8')) string_bytes = bytearray(compat_str(dfsid).encode('ascii')) salt_len = len(salt_bytes) for i in range(len(string_bytes)): string_bytes[i] = string_bytes[i] ^ salt_bytes[i % salt_len] m = md5() m.update(bytes(string_bytes)) result = b64encode(m.digest()).decode('ascii') return result.replace('/', '_').replace('+', '-') @classmethod def make_player_api_request_data_and_headers(cls, song_id, bitrate): KEY = b'e82ckenh8dichen8' URL = '/api/song/enhance/player/url' now = int(time.time() * 1000) rand = randint(0, 1000) cookie = { 'osver': None, 'deviceId': None, 'appver': '8.0.0', 'versioncode': '140', 'mobilename': None, 'buildver': '1623435496', 'resolution': '1920x1080', '__csrf': '', 'os': 'pc', 'channel': None, 'requestId': '{0}_{1:04}'.format(now, rand), } request_text = json.dumps( {'ids': '[{0}]'.format(song_id), 'br': bitrate, 'header': cookie}, separators=(',', ':')) message = 'nobody{0}use{1}md5forencrypt'.format( URL, request_text).encode('latin1') msg_digest = md5(message).hexdigest() data = '{0}-36cd479b6b5-{1}-36cd479b6b5-{2}'.format( URL, request_text, msg_digest) data = pkcs7_padding(bytes_to_intlist(data)) encrypted = intlist_to_bytes(aes_ecb_encrypt(data, bytes_to_intlist(KEY))) encrypted_params = hexlify(encrypted).decode('ascii').upper() cookie = '; '.join( ['{0}={1}'.format(k, v if v is not None else 'undefined') for [k, v] in cookie.items()]) headers = { 'User-Agent': std_headers['User-Agent'], 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': 'https://music.163.com', 'Cookie': cookie, } return ('params={0}'.format(encrypted_params), headers) def _call_player_api(self, song_id, bitrate): url = 'https://interface3.music.163.com/eapi/song/enhance/player/url' data, headers = self.make_player_api_request_data_and_headers(song_id, bitrate) try: msg = 'empty result' result = self._download_json( url, song_id, data=data.encode('ascii'), headers=headers) if result: return result except ExtractorError as e: if type(e.cause) in (ValueError, TypeError): # JSON load failure raise except Exception as e: msg = error_to_compat_str(e) self.report_warning('%s API call (%s) failed: %s' % ( song_id, bitrate, msg)) return {} def extract_formats(self, info): err = 0 formats = [] song_id = info['id'] for song_format in self._FORMATS: details = info.get(song_format) if not details: continue bitrate = int_or_none(details.get('bitrate')) or 999000 data = self._call_player_api(song_id, bitrate) for song in try_get(data, lambda x: x['data'], list) or []: song_url = try_get(song, lambda x: x['url']) if not song_url: continue if self._is_valid_url(song_url, info['id'], 'song'): formats.append({ 'url': song_url, 'ext': details.get('extension'), 'abr': float_or_none(song.get('br'), scale=1000), 'format_id': song_format, 'filesize': int_or_none(song.get('size')), 'asr': int_or_none(details.get('sr')), }) elif err == 0: err = try_get(song, lambda x: x['code'], int) if not formats: msg = 'No media links found' if err != 0 and (err < 200 or err >= 400): raise ExtractorError( '%s (site code %d)' % (msg, err, ), expected=True) else: self.raise_geo_restricted( msg + ': probably this video is not available from your location due to geo restriction.', countries=['CN']) return formats @classmethod def convert_milliseconds(cls, ms): return int(round(ms / 1000.0)) def query_api(self, endpoint, video_id, note): req = sanitized_Request('%s%s' % (self._API_BASE, endpoint)) req.add_header('Referer', self._API_BASE) return self._download_json(req, video_id, note) class NetEaseMusicIE(NetEaseMusicBaseIE): IE_NAME = 'netease:song' IE_DESC = '网易云音乐' _VALID_URL = r'https?://(y\.)?music\.163\.com/(?:[#m]/)?song\?.*?\bid=(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://music.163.com/#/song?id=32102397', 'md5': '3e909614ce09b1ccef4a3eb205441190', 'info_dict': { 'id': '32102397', 'ext': 'mp3', 'title': 'Bad Blood', 'creator': 'Taylor Swift / Kendrick Lamar', 'upload_date': '20150516', 'timestamp': 1431792000, 'description': 'md5:25fc5f27e47aad975aa6d36382c7833c', }, }, { 'note': 'No lyrics.', 'url': 'http://music.163.com/song?id=17241424', 'info_dict': { 'id': '17241424', 'ext': 'mp3', 'title': 'Opus 28', 'creator': 'Dustin O\'Halloran', 'upload_date': '20080211', 'description': 'md5:f12945b0f6e0365e3b73c5032e1b0ff4', 'timestamp': 1202745600, }, }, { 'note': 'Has translated name.', 'url': 'http://music.163.com/#/song?id=22735043', 'info_dict': { 'id': '22735043', 'ext': 'mp3', 'title': '소원을 말해봐 (Genie)', 'creator': '少女时代', 'description': 'md5:79d99cc560e4ca97e0c4d86800ee4184', 'upload_date': '20100127', 'timestamp': 1264608000, 'alt_title': '说出愿望吧(Genie)', }, }, { 'url': 'https://y.music.163.com/m/song?app_version=8.8.45&id=95670&uct2=sKnvS4+0YStsWkqsPhFijw%3D%3D&dlt=0846', 'md5': '95826c73ea50b1c288b22180ec9e754d', 'info_dict': { 'id': '95670', 'ext': 'mp3', 'title': '国际歌', 'creator': '马备', 'upload_date': '19911130', 'timestamp': 691516800, 'description': 'md5:1ba2f911a2b0aa398479f595224f2141', }, }] def _process_lyrics(self, lyrics_info): original = lyrics_info.get('lrc', {}).get('lyric') translated = lyrics_info.get('tlyric', {}).get('lyric') if not translated: return original lyrics_expr = r'(\[[0-9]{2}:[0-9]{2}\.[0-9]{2,}\])([^\n]+)' original_ts_texts = re.findall(lyrics_expr, original) translation_ts_dict = dict( (time_stamp, text) for time_stamp, text in re.findall(lyrics_expr, translated) ) lyrics = '\n'.join([ '%s%s / %s' % (time_stamp, text, translation_ts_dict.get(time_stamp, '')) for time_stamp, text in original_ts_texts ]) return lyrics def _real_extract(self, url): song_id = self._match_id(url) params = { 'id': song_id, 'ids': '[%s]' % song_id } info = self.query_api( 'song/detail?' + compat_urllib_parse_urlencode(params), song_id, 'Downloading song info')['songs'][0] formats = self.extract_formats(info) self._sort_formats(formats) lyrics_info = self.query_api( 'song/lyric?id=%s&lv=-1&tv=-1' % song_id, song_id, 'Downloading lyrics data') lyrics = self._process_lyrics(lyrics_info) alt_title = None if info.get('transNames'): alt_title = '/'.join(info.get('transNames')) return { 'id': song_id, 'title': info['name'], 'alt_title': alt_title, 'creator': ' / '.join([artist['name'] for artist in info.get('artists', [])]), 'timestamp': self.convert_milliseconds(info.get('album', {}).get('publishTime')), 'thumbnail': info.get('album', {}).get('picUrl'), 'duration': self.convert_milliseconds(info.get('duration', 0)), 'description': lyrics, 'formats': formats, } class NetEaseMusicAlbumIE(NetEaseMusicBaseIE): IE_NAME = 'netease:album' IE_DESC = '网易云音乐 - 专辑' _VALID_URL = r'https?://music\.163\.com/(#/)?album\?id=(?P<id>[0-9]+)' _TEST = { 'url': 'http://music.163.com/#/album?id=220780', 'info_dict': { 'id': '220780', 'title': 'B\'day', }, 'playlist_count': 23, 'skip': 'Blocked outside Mainland China', } def _real_extract(self, url): album_id = self._match_id(url) info = self.query_api( 'album/%s?id=%s' % (album_id, album_id), album_id, 'Downloading album data')['album'] name = info['name'] desc = info.get('description') entries = [ self.url_result('http://music.163.com/#/song?id=%s' % song['id'], 'NetEaseMusic', song['id']) for song in info['songs'] ] return self.playlist_result(entries, album_id, name, desc) class NetEaseMusicSingerIE(NetEaseMusicBaseIE): IE_NAME = 'netease:singer' IE_DESC = '网易云音乐 - 歌手' _VALID_URL = r'https?://music\.163\.com/(#/)?artist\?id=(?P<id>[0-9]+)' _TESTS = [{ 'note': 'Singer has aliases.', 'url': 'http://music.163.com/#/artist?id=10559', 'info_dict': { 'id': '10559', 'title': '张惠妹 - aMEI;阿密特', }, 'playlist_count': 50, 'skip': 'Blocked outside Mainland China', }, { 'note': 'Singer has translated name.', 'url': 'http://music.163.com/#/artist?id=124098', 'info_dict': { 'id': '124098', 'title': '李昇基 - 이승기', }, 'playlist_count': 50, 'skip': 'Blocked outside Mainland China', }] def _real_extract(self, url): singer_id = self._match_id(url) info = self.query_api( 'artist/%s?id=%s' % (singer_id, singer_id), singer_id, 'Downloading singer data') name = info['artist']['name'] if info['artist']['trans']: name = '%s - %s' % (name, info['artist']['trans']) if info['artist']['alias']: name = '%s - %s' % (name, ';'.join(info['artist']['alias'])) entries = [ self.url_result('http://music.163.com/#/song?id=%s' % song['id'], 'NetEaseMusic', song['id']) for song in info['hotSongs'] ] return self.playlist_result(entries, singer_id, name) class NetEaseMusicListIE(NetEaseMusicBaseIE): IE_NAME = 'netease:playlist' IE_DESC = '网易云音乐 - 歌单' _VALID_URL = r'https?://music\.163\.com/(#/)?(playlist|discover/toplist)\?id=(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://music.163.com/#/playlist?id=79177352', 'info_dict': { 'id': '79177352', 'title': 'Billboard 2007 Top 100', 'description': 'md5:12fd0819cab2965b9583ace0f8b7b022' }, 'playlist_count': 99, 'skip': 'Blocked outside Mainland China', }, { 'note': 'Toplist/Charts sample', 'url': 'http://music.163.com/#/discover/toplist?id=3733003', 'info_dict': { 'id': '3733003', 'title': 're:韩国Melon排行榜周榜 [0-9]{4}-[0-9]{2}-[0-9]{2}', 'description': 'md5:73ec782a612711cadc7872d9c1e134fc', }, 'playlist_count': 50, 'skip': 'Blocked outside Mainland China', }] def _real_extract(self, url): list_id = self._match_id(url) info = self.query_api( 'playlist/detail?id=%s&lv=-1&tv=-1' % list_id, list_id, 'Downloading playlist data')['result'] name = info['name'] desc = info.get('description') if info.get('specialType') == 10: # is a chart/toplist datestamp = datetime.fromtimestamp( self.convert_milliseconds(info['updateTime'])).strftime('%Y-%m-%d') name = '%s %s' % (name, datestamp) entries = [ self.url_result('http://music.163.com/#/song?id=%s' % song['id'], 'NetEaseMusic', song['id']) for song in info['tracks'] ] return self.playlist_result(entries, list_id, name, desc) class NetEaseMusicMvIE(NetEaseMusicBaseIE): IE_NAME = 'netease:mv' IE_DESC = '网易云音乐 - MV' _VALID_URL = r'https?://music\.163\.com/(#/)?mv\?id=(?P<id>[0-9]+)' _TEST = { 'url': 'http://music.163.com/#/mv?id=415350', 'info_dict': { 'id': '415350', 'ext': 'mp4', 'title': '이럴거면 그러지말지', 'description': '白雅言自作曲唱甜蜜爱情', 'creator': '白雅言', 'upload_date': '20150520', }, 'skip': 'Blocked outside Mainland China', } def _real_extract(self, url): mv_id = self._match_id(url) info = self.query_api( 'mv/detail?id=%s&type=mp4' % mv_id, mv_id, 'Downloading mv info')['data'] formats = [ {'url': mv_url, 'ext': 'mp4', 'format_id': '%sp' % brs, 'height': int(brs)} for brs, mv_url in info['brs'].items() ] self._sort_formats(formats) return { 'id': mv_id, 'title': info['name'], 'description': info.get('desc') or info.get('briefDesc'), 'creator': info['artistName'], 'upload_date': info['publishTime'].replace('-', ''), 'formats': formats, 'thumbnail': info.get('cover'), 'duration': self.convert_milliseconds(info.get('duration', 0)), } class NetEaseMusicProgramIE(NetEaseMusicBaseIE): IE_NAME = 'netease:program' IE_DESC = '网易云音乐 - 电台节目' _VALID_URL = r'https?://music\.163\.com/(#/?)program\?id=(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://music.163.com/#/program?id=10109055', 'info_dict': { 'id': '10109055', 'ext': 'mp3', 'title': '不丹足球背后的故事', 'description': '喜马拉雅人的足球梦 ...', 'creator': '大话西藏', 'timestamp': 1434179342, 'upload_date': '20150613', 'duration': 900, }, 'skip': 'Blocked outside Mainland China', }, { 'note': 'This program has accompanying songs.', 'url': 'http://music.163.com/#/program?id=10141022', 'info_dict': { 'id': '10141022', 'title': '25岁,你是自在如风的少年<27°C>', 'description': 'md5:8d594db46cc3e6509107ede70a4aaa3b', }, 'playlist_count': 4, 'skip': 'Blocked outside Mainland China', }, { 'note': 'This program has accompanying songs.', 'url': 'http://music.163.com/#/program?id=10141022', 'info_dict': { 'id': '10141022', 'ext': 'mp3', 'title': '25岁,你是自在如风的少年<27°C>', 'description': 'md5:8d594db46cc3e6509107ede70a4aaa3b', 'timestamp': 1434450841, 'upload_date': '20150616', }, 'params': { 'noplaylist': True }, 'skip': 'Blocked outside Mainland China', }] def _real_extract(self, url): program_id = self._match_id(url) info = self.query_api( 'dj/program/detail?id=%s' % program_id, program_id, 'Downloading program info')['program'] name = info['name'] description = info['description'] if not info['songs'] or self._downloader.params.get('noplaylist'): if info['songs']: self.to_screen( 'Downloading just the main audio %s because of --no-playlist' % info['mainSong']['id']) formats = self.extract_formats(info['mainSong']) self._sort_formats(formats) return { 'id': program_id, 'title': name, 'description': description, 'creator': info['dj']['brand'], 'timestamp': self.convert_milliseconds(info['createTime']), 'thumbnail': info['coverUrl'], 'duration': self.convert_milliseconds(info.get('duration', 0)), 'formats': formats, } self.to_screen( 'Downloading playlist %s - add --no-playlist to just download the main audio %s' % (program_id, info['mainSong']['id'])) song_ids = [info['mainSong']['id']] song_ids.extend([song['id'] for song in info['songs']]) entries = [ self.url_result('http://music.163.com/#/song?id=%s' % song_id, 'NetEaseMusic', song_id) for song_id in song_ids ] return self.playlist_result(entries, program_id, name, description) class NetEaseMusicDjRadioIE(NetEaseMusicBaseIE): IE_NAME = 'netease:djradio' IE_DESC = '网易云音乐 - 电台' _VALID_URL = r'https?://music\.163\.com/(#/)?djradio\?id=(?P<id>[0-9]+)' _TEST = { 'url': 'http://music.163.com/#/djradio?id=42', 'info_dict': { 'id': '42', 'title': '声音蔓延', 'description': 'md5:766220985cbd16fdd552f64c578a6b15' }, 'playlist_mincount': 40, 'skip': 'Blocked outside Mainland China', } _PAGE_SIZE = 1000 def _real_extract(self, url): dj_id = self._match_id(url) name = None desc = None entries = [] for offset in compat_itertools_count(start=0, step=self._PAGE_SIZE): info = self.query_api( 'dj/program/byradio?asc=false&limit=%d&radioId=%s&offset=%d' % (self._PAGE_SIZE, dj_id, offset), dj_id, 'Downloading dj programs - %d' % offset) entries.extend([ self.url_result( 'http://music.163.com/#/program?id=%s' % program['id'], 'NetEaseMusicProgram', program['id']) for program in info['programs'] ]) if name is None: radio = info['programs'][0]['radio'] name = radio['name'] desc = radio['desc'] if not info['more']: break return self.playlist_result(entries, dj_id, name, desc)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rai.py
youtube_dl/extractor/rai.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( determine_ext, ExtractorError, find_xpath_attr, fix_xml_ampersands, GeoRestrictedError, HEADRequest, int_or_none, parse_duration, remove_start, strip_or_none, try_get, unified_strdate, unified_timestamp, update_url_query, urljoin, xpath_text, ) class RaiBaseIE(InfoExtractor): _UUID_RE = r'[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}' _GEO_COUNTRIES = ['IT'] _GEO_BYPASS = False def _extract_relinker_info(self, relinker_url, video_id): if not re.match(r'https?://', relinker_url): return {'formats': [{'url': relinker_url}]} formats = [] geoprotection = None is_live = None duration = None for platform in ('mon', 'flash', 'native'): relinker = self._download_xml( relinker_url, video_id, note='Downloading XML metadata for platform %s' % platform, transform_source=fix_xml_ampersands, query={'output': 45, 'pl': platform}, headers=self.geo_verification_headers()) if not geoprotection: geoprotection = xpath_text( relinker, './geoprotection', default=None) == 'Y' if not is_live: is_live = xpath_text( relinker, './is_live', default=None) == 'Y' if not duration: duration = parse_duration(xpath_text( relinker, './duration', default=None)) url_elem = find_xpath_attr(relinker, './url', 'type', 'content') if url_elem is None: continue media_url = url_elem.text # This does not imply geo restriction (e.g. # http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html) if '/video_no_available.mp4' in media_url: continue ext = determine_ext(media_url) if (ext == 'm3u8' and platform != 'mon') or (ext == 'f4m' and platform != 'flash'): continue if ext == 'm3u8' or 'format=m3u8' in media_url or platform == 'mon': formats.extend(self._extract_m3u8_formats( media_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif ext == 'f4m' or platform == 'flash': manifest_url = update_url_query( media_url.replace('manifest#live_hds.f4m', 'manifest.f4m'), {'hdcore': '3.7.0', 'plugin': 'aasp-3.7.0.39.44'}) formats.extend(self._extract_f4m_formats( manifest_url, video_id, f4m_id='hds', fatal=False)) else: bitrate = int_or_none(xpath_text(relinker, 'bitrate')) formats.append({ 'url': media_url, 'tbr': bitrate if bitrate > 0 else None, 'format_id': 'http-%d' % bitrate if bitrate > 0 else 'http', }) if not formats and geoprotection is True: self.raise_geo_restricted(countries=self._GEO_COUNTRIES) formats.extend(self._create_http_urls(relinker_url, formats)) return dict((k, v) for k, v in { 'is_live': is_live, 'duration': duration, 'formats': formats, }.items() if v is not None) def _create_http_urls(self, relinker_url, fmts): _RELINKER_REG = r'https?://(?P<host>[^/]+?)/(?:i/)?(?P<extra>[^/]+?)/(?P<path>.+?)/(?P<id>\w+)(?:_(?P<quality>[\d\,]+))?(?:\.mp4|/playlist\.m3u8).+?' _MP4_TMPL = '%s&overrideUserAgentRule=mp4-%s' _QUALITY = { # tbr: w, h '250': [352, 198], '400': [512, 288], '700': [512, 288], '800': [700, 394], '1200': [736, 414], '1800': [1024, 576], '2400': [1280, 720], '3200': [1440, 810], '3600': [1440, 810], '5000': [1920, 1080], '10000': [1920, 1080], } def test_url(url): resp = self._request_webpage( HEADRequest(url), None, headers={'User-Agent': 'Rai'}, fatal=False, errnote=False, note=False) if resp is False: return False if resp.code == 200: return False if resp.url == url else resp.url return None def get_format_info(tbr): import math br = int_or_none(tbr) if len(fmts) == 1 and not br: br = fmts[0].get('tbr') if br > 300: tbr = compat_str(math.floor(br / 100) * 100) else: tbr = '250' # try extracting info from available m3u8 formats format_copy = None for f in fmts: if f.get('tbr'): br_limit = math.floor(br / 100) if br_limit - 1 <= math.floor(f['tbr'] / 100) <= br_limit + 1: format_copy = f.copy() return { 'width': format_copy.get('width'), 'height': format_copy.get('height'), 'tbr': format_copy.get('tbr'), 'vcodec': format_copy.get('vcodec'), 'acodec': format_copy.get('acodec'), 'fps': format_copy.get('fps'), 'format_id': 'https-%s' % tbr, } if format_copy else { 'width': _QUALITY[tbr][0], 'height': _QUALITY[tbr][1], 'format_id': 'https-%s' % tbr, 'tbr': int(tbr), } loc = test_url(_MP4_TMPL % (relinker_url, '*')) if not isinstance(loc, compat_str): return [] mobj = re.match( _RELINKER_REG, test_url(relinker_url) or '') if not mobj: return [] available_qualities = mobj.group('quality').split(',') if mobj.group('quality') else ['*'] available_qualities = [i for i in available_qualities if i] formats = [] for q in available_qualities: fmt = { 'url': _MP4_TMPL % (relinker_url, q), 'protocol': 'https', 'ext': 'mp4', } fmt.update(get_format_info(q)) formats.append(fmt) return formats @staticmethod def _extract_subtitles(url, video_data): STL_EXT = 'stl' SRT_EXT = 'srt' subtitles = {} subtitles_array = video_data.get('subtitlesArray') or [] for k in ('subtitles', 'subtitlesUrl'): subtitles_array.append({'url': video_data.get(k)}) for subtitle in subtitles_array: sub_url = subtitle.get('url') if sub_url and isinstance(sub_url, compat_str): sub_lang = subtitle.get('language') or 'it' sub_url = urljoin(url, sub_url) sub_ext = determine_ext(sub_url, SRT_EXT) subtitles.setdefault(sub_lang, []).append({ 'ext': sub_ext, 'url': sub_url, }) if STL_EXT == sub_ext: subtitles[sub_lang].append({ 'ext': SRT_EXT, 'url': sub_url[:-len(STL_EXT)] + SRT_EXT, }) return subtitles class RaiPlayIE(RaiBaseIE): _VALID_URL = r'(?P<base>https?://(?:www\.)?raiplay\.it/.+?-(?P<id>%s))\.(?:html|json)' % RaiBaseIE._UUID_RE _TESTS = [{ 'url': 'http://www.raiplay.it/video/2014/04/Report-del-07042014-cb27157f-9dd0-4aee-b788-b1f67643a391.html', 'md5': '8970abf8caf8aef4696e7b1f2adfc696', 'info_dict': { 'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391', 'ext': 'mp4', 'title': 'Report del 07/04/2014', 'alt_title': 'St 2013/14 - Espresso nel caffè - 07/04/2014', 'description': 'md5:d730c168a58f4bb35600fc2f881ec04e', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Rai Gulp', 'duration': 6160, 'series': 'Report', 'season': '2013/14', 'subtitles': { 'it': 'count:2', }, }, 'params': { 'skip_download': True, }, }, { # 1080p direct mp4 url 'url': 'https://www.raiplay.it/video/2021/03/Leonardo-S1E1-b5703b02-82ee-475a-85b6-c9e4a8adf642.html', 'md5': '2e501e8651d72f05ffe8f5d286ad560b', 'info_dict': { 'id': 'b5703b02-82ee-475a-85b6-c9e4a8adf642', 'ext': 'mp4', 'title': 'Leonardo - S1E1', 'alt_title': 'St 1 Ep 1 - Episodio 1', 'description': 'md5:f5360cd267d2de146e4e3879a5a47d31', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Rai 1', 'duration': 3229, 'series': 'Leonardo', 'season': 'Season 1', }, }, { 'url': 'http://www.raiplay.it/video/2016/11/gazebotraindesi-efebe701-969c-4593-92f3-285f0d1ce750.html?', 'only_matching': True, }, { # subtitles at 'subtitlesArray' key (see #27698) 'url': 'https://www.raiplay.it/video/2020/12/Report---04-01-2021-2e90f1de-8eee-4de4-ac0e-78d21db5b600.html', 'only_matching': True, }, { # DRM protected 'url': 'https://www.raiplay.it/video/2020/09/Lo-straordinario-mondo-di-Zoey-S1E1-Lo-straordinario-potere-di-Zoey-ed493918-1d32-44b7-8454-862e473d00ff.html', 'only_matching': True, }] def _real_extract(self, url): base, video_id = re.match(self._VALID_URL, url).groups() media = self._download_json( base + '.json', video_id, 'Downloading video JSON') if try_get( media, (lambda x: x['rights_management']['rights']['drm'], lambda x: x['program_info']['rights_management']['rights']['drm']), dict): raise ExtractorError('This video is DRM protected.', expected=True) title = media['name'] video = media['video'] relinker_info = self._extract_relinker_info(video['content_url'], video_id) self._sort_formats(relinker_info['formats']) thumbnails = [] for _, value in media.get('images', {}).items(): if value: thumbnails.append({ 'url': urljoin(url, value), }) date_published = media.get('date_published') time_published = media.get('time_published') if date_published and time_published: date_published += ' ' + time_published subtitles = self._extract_subtitles(url, video) program_info = media.get('program_info') or {} season = media.get('season') info = { 'id': remove_start(media.get('id'), 'ContentItem-') or video_id, 'display_id': video_id, 'title': self._live_title(title) if relinker_info.get( 'is_live') else title, 'alt_title': strip_or_none(media.get('subtitle')), 'description': media.get('description'), 'uploader': strip_or_none(media.get('channel')), 'creator': strip_or_none(media.get('editor') or None), 'duration': parse_duration(video.get('duration')), 'timestamp': unified_timestamp(date_published), 'thumbnails': thumbnails, 'series': program_info.get('name'), 'season_number': int_or_none(season), 'season': season if (season and not season.isdigit()) else None, 'episode': media.get('episode_title'), 'episode_number': int_or_none(media.get('episode')), 'subtitles': subtitles, } info.update(relinker_info) return info class RaiPlayLiveIE(RaiPlayIE): _VALID_URL = r'(?P<base>https?://(?:www\.)?raiplay\.it/dirette/(?P<id>[^/?#&]+))' _TESTS = [{ 'url': 'http://www.raiplay.it/dirette/rainews24', 'info_dict': { 'id': 'd784ad40-e0ae-4a69-aa76-37519d238a9c', 'display_id': 'rainews24', 'ext': 'mp4', 'title': 're:^Diretta di Rai News 24 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'md5:4d00bcf6dc98b27c6ec480de329d1497', 'uploader': 'Rai News 24', 'creator': 'Rai News 24', 'is_live': True, }, 'params': { 'skip_download': True, }, }] class RaiPlayPlaylistIE(InfoExtractor): _VALID_URL = r'(?P<base>https?://(?:www\.)?raiplay\.it/programmi/(?P<id>[^/?#&]+))' _TESTS = [{ 'url': 'http://www.raiplay.it/programmi/nondirloalmiocapo/', 'info_dict': { 'id': 'nondirloalmiocapo', 'title': 'Non dirlo al mio capo', 'description': 'md5:98ab6b98f7f44c2843fd7d6f045f153b', }, 'playlist_mincount': 12, }] def _real_extract(self, url): base, playlist_id = re.match(self._VALID_URL, url).groups() program = self._download_json( base + '.json', playlist_id, 'Downloading program JSON') entries = [] for b in (program.get('blocks') or []): for s in (b.get('sets') or []): s_id = s.get('id') if not s_id: continue medias = self._download_json( '%s/%s.json' % (base, s_id), s_id, 'Downloading content set JSON', fatal=False) if not medias: continue for m in (medias.get('items') or []): path_id = m.get('path_id') if not path_id: continue video_url = urljoin(url, path_id) entries.append(self.url_result( video_url, ie=RaiPlayIE.ie_key(), video_id=RaiPlayIE._match_id(video_url))) return self.playlist_result( entries, playlist_id, program.get('name'), try_get(program, lambda x: x['program_info']['description'])) class RaiIE(RaiBaseIE): _VALID_URL = r'https?://[^/]+\.(?:rai\.(?:it|tv)|rainews\.it)/.+?-(?P<id>%s)(?:-.+?)?\.html' % RaiBaseIE._UUID_RE _TESTS = [{ # var uniquename = "ContentItem-..." # data-id="ContentItem-..." 'url': 'http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html', 'info_dict': { 'id': '04a9f4bd-b563-40cf-82a6-aad3529cb4a9', 'ext': 'mp4', 'title': 'TG PRIMO TEMPO', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1758, 'upload_date': '20140612', }, 'skip': 'This content is available only in Italy', }, { # with ContentItem in many metas 'url': 'http://www.rainews.it/dl/rainews/media/Weekend-al-cinema-da-Hollywood-arriva-il-thriller-di-Tate-Taylor-La-ragazza-del-treno-1632c009-c843-4836-bb65-80c33084a64b.html', 'info_dict': { 'id': '1632c009-c843-4836-bb65-80c33084a64b', 'ext': 'mp4', 'title': 'Weekend al cinema, da Hollywood arriva il thriller di Tate Taylor "La ragazza del treno"', 'description': 'I film in uscita questa settimana.', 'thumbnail': r're:^https?://.*\.png$', 'duration': 833, 'upload_date': '20161103', } }, { # with ContentItem in og:url 'url': 'http://www.rai.it/dl/RaiTV/programmi/media/ContentItem-efb17665-691c-45d5-a60c-5301333cbb0c.html', 'md5': '06345bd97c932f19ffb129973d07a020', 'info_dict': { 'id': 'efb17665-691c-45d5-a60c-5301333cbb0c', 'ext': 'mp4', 'title': 'TG1 ore 20:00 del 03/11/2016', 'description': 'TG1 edizione integrale ore 20:00 del giorno 03/11/2016', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2214, 'upload_date': '20161103', } }, { # initEdizione('ContentItem-...' 'url': 'http://www.tg1.rai.it/dl/tg1/2010/edizioni/ContentSet-9b6e0cba-4bef-4aef-8cf0-9f7f665b7dfb-tg1.html?item=undefined', 'info_dict': { 'id': 'c2187016-8484-4e3a-8ac8-35e475b07303', 'ext': 'mp4', 'title': r're:TG1 ore \d{2}:\d{2} del \d{2}/\d{2}/\d{4}', 'duration': 2274, 'upload_date': '20170401', }, 'skip': 'Changes daily', }, { # HLS live stream with ContentItem in og:url 'url': 'http://www.rainews.it/dl/rainews/live/ContentItem-3156f2f2-dc70-4953-8e2f-70d7489d4ce9.html', 'info_dict': { 'id': '3156f2f2-dc70-4953-8e2f-70d7489d4ce9', 'ext': 'mp4', 'title': 'La diretta di Rainews24', }, 'params': { 'skip_download': True, }, }, { # ContentItem in iframe (see #12652) and subtitle at 'subtitlesUrl' key 'url': 'http://www.presadiretta.rai.it/dl/portali/site/puntata/ContentItem-3ed19d13-26c2-46ff-a551-b10828262f1b.html', 'info_dict': { 'id': '1ad6dc64-444a-42a4-9bea-e5419ad2f5fd', 'ext': 'mp4', 'title': 'Partiti acchiappavoti - Presa diretta del 13/09/2015', 'description': 'md5:d291b03407ec505f95f27970c0b025f4', 'upload_date': '20150913', 'subtitles': { 'it': 'count:2', }, }, 'params': { 'skip_download': True, }, }, { # Direct MMS URL 'url': 'http://www.rai.it/dl/RaiTV/programmi/media/ContentItem-b63a4089-ac28-48cf-bca5-9f5b5bc46df5.html', 'only_matching': True, }, { 'url': 'https://www.rainews.it/tgr/marche/notiziari/video/2019/02/ContentItem-6ba945a2-889c-4a80-bdeb-8489c70a8db9.html', 'only_matching': True, }] def _extract_from_content_id(self, content_id, url): media = self._download_json( 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-%s.html?json' % content_id, content_id, 'Downloading video JSON') title = media['name'].strip() media_type = media['type'] if 'Audio' in media_type: relinker_info = { 'formats': [{ 'format_id': media.get('formatoAudio'), 'url': media['audioUrl'], 'ext': media.get('formatoAudio'), }] } elif 'Video' in media_type: relinker_info = self._extract_relinker_info(media['mediaUri'], content_id) else: raise ExtractorError('not a media file') self._sort_formats(relinker_info['formats']) thumbnails = [] for image_type in ('image', 'image_medium', 'image_300'): thumbnail_url = media.get(image_type) if thumbnail_url: thumbnails.append({ 'url': compat_urlparse.urljoin(url, thumbnail_url), }) subtitles = self._extract_subtitles(url, media) info = { 'id': content_id, 'title': title, 'description': strip_or_none(media.get('desc')), 'thumbnails': thumbnails, 'uploader': media.get('author'), 'upload_date': unified_strdate(media.get('date')), 'duration': parse_duration(media.get('length')), 'subtitles': subtitles, } info.update(relinker_info) return info def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) content_item_id = None content_item_url = self._html_search_meta( ('og:url', 'og:video', 'og:video:secure_url', 'twitter:url', 'twitter:player', 'jsonlink'), webpage, default=None) if content_item_url: content_item_id = self._search_regex( r'ContentItem-(%s)' % self._UUID_RE, content_item_url, 'content item id', default=None) if not content_item_id: content_item_id = self._search_regex( r'''(?x) (?: (?:initEdizione|drawMediaRaiTV)\(| <(?:[^>]+\bdata-id|var\s+uniquename)=| <iframe[^>]+\bsrc= ) (["\']) (?:(?!\1).)*\bContentItem-(?P<id>%s) ''' % self._UUID_RE, webpage, 'content item id', default=None, group='id') content_item_ids = set() if content_item_id: content_item_ids.add(content_item_id) if video_id not in content_item_ids: content_item_ids.add(video_id) for content_item_id in content_item_ids: try: return self._extract_from_content_id(content_item_id, url) except GeoRestrictedError: raise except ExtractorError: pass relinker_url = self._proto_relative_url(self._search_regex( r'''(?x) (?: var\s+videoURL| mediaInfo\.mediaUri )\s*=\s* ([\'"]) (?P<url> (?:https?:)? //mediapolis(?:vod)?\.rai\.it/relinker/relinkerServlet\.htm\? (?:(?!\1).)*\bcont=(?:(?!\1).)+)\1 ''', webpage, 'relinker URL', group='url')) relinker_info = self._extract_relinker_info( urljoin(url, relinker_url), video_id) self._sort_formats(relinker_info['formats']) title = self._search_regex( r'var\s+videoTitolo\s*=\s*([\'"])(?P<title>[^\'"]+)\1', webpage, 'title', group='title', default=None) or self._og_search_title(webpage) info = { 'id': video_id, 'title': title, } info.update(relinker_info) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/streamable.py
youtube_dl/extractor/streamable.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, ) class StreamableIE(InfoExtractor): _VALID_URL = r'https?://streamable\.com/(?:[es]/)?(?P<id>\w+)' _TESTS = [ { 'url': 'https://streamable.com/dnd1', 'md5': '3e3bc5ca088b48c2d436529b64397fef', 'info_dict': { 'id': 'dnd1', 'ext': 'mp4', 'title': 'Mikel Oiarzabal scores to make it 0-3 for La Real against Espanyol', 'thumbnail': r're:https?://.*\.jpg$', 'uploader': 'teabaker', 'timestamp': 1454964157.35115, 'upload_date': '20160208', 'duration': 61.516, 'view_count': int, } }, # older video without bitrate, width/height, etc. info { 'url': 'https://streamable.com/moo', 'md5': '2cf6923639b87fba3279ad0df3a64e73', 'info_dict': { 'id': 'moo', 'ext': 'mp4', 'title': '"Please don\'t eat me!"', 'thumbnail': r're:https?://.*\.jpg$', 'timestamp': 1426115495, 'upload_date': '20150311', 'duration': 12, 'view_count': int, } }, { 'url': 'https://streamable.com/e/dnd1', 'only_matching': True, }, { 'url': 'https://streamable.com/s/okkqk/drxjds', 'only_matching': True, } ] @staticmethod def _extract_url(webpage): mobj = re.search( r'<iframe[^>]+src=(?P<q1>[\'"])(?P<src>(?:https?:)?//streamable\.com/(?:(?!\1).+))(?P=q1)', webpage) if mobj: return mobj.group('src') def _real_extract(self, url): video_id = self._match_id(url) # Note: Using the ajax API, as the public Streamable API doesn't seem # to return video info like the title properly sometimes, and doesn't # include info like the video duration video = self._download_json( 'https://ajax.streamable.com/videos/%s' % video_id, video_id) # Format IDs: # 0 The video is being uploaded # 1 The video is being processed # 2 The video has at least one file ready # 3 The video is unavailable due to an error status = video.get('status') if status != 2: raise ExtractorError( 'This video is currently unavailable. It may still be uploading or processing.', expected=True) title = video.get('reddit_title') or video['title'] formats = [] for key, info in video['files'].items(): if not info.get('url'): continue formats.append({ 'format_id': key, 'url': self._proto_relative_url(info['url']), 'width': int_or_none(info.get('width')), 'height': int_or_none(info.get('height')), 'filesize': int_or_none(info.get('size')), 'fps': int_or_none(info.get('framerate')), 'vbr': float_or_none(info.get('bitrate'), 1000) }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': video.get('description'), 'thumbnail': self._proto_relative_url(video.get('thumbnail_url')), 'uploader': video.get('owner', {}).get('user_name'), 'timestamp': float_or_none(video.get('date_added')), 'duration': float_or_none(video.get('duration')), 'view_count': int_or_none(video.get('plays')), 'formats': formats }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/arcpublishing.py
youtube_dl/extractor/arcpublishing.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( extract_attributes, int_or_none, parse_iso8601, try_get, ) class ArcPublishingIE(InfoExtractor): _UUID_REGEX = r'[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12}' _VALID_URL = r'arcpublishing:(?P<org>[a-z]+):(?P<id>%s)' % _UUID_REGEX _TESTS = [{ # https://www.adn.com/politics/2020/11/02/video-senate-candidates-campaign-in-anchorage-on-eve-of-election-day/ 'url': 'arcpublishing:adn:8c99cb6e-b29c-4bc9-9173-7bf9979225ab', 'only_matching': True, }, { # https://www.bostonglobe.com/video/2020/12/30/metro/footage-released-showing-officer-talking-about-striking-protesters-with-car/ 'url': 'arcpublishing:bostonglobe:232b7ae6-7d73-432d-bc0a-85dbf0119ab1', 'only_matching': True, }, { # https://www.actionnewsjax.com/video/live-stream/ 'url': 'arcpublishing:cmg:cfb1cf1b-3ab5-4d1b-86c5-a5515d311f2a', 'only_matching': True, }, { # https://elcomercio.pe/videos/deportes/deporte-total-futbol-peruano-seleccion-peruana-la-valorizacion-de-los-peruanos-en-el-exterior-tras-un-2020-atipico-nnav-vr-video-noticia/ 'url': 'arcpublishing:elcomercio:27a7e1f8-2ec7-4177-874f-a4feed2885b3', 'only_matching': True, }, { # https://www.clickondetroit.com/video/community/2020/05/15/events-surrounding-woodward-dream-cruise-being-canceled/ 'url': 'arcpublishing:gmg:c8793fb2-8d44-4242-881e-2db31da2d9fe', 'only_matching': True, }, { # https://www.wabi.tv/video/2020/12/30/trenton-company-making-equipment-pfizer-covid-vaccine/ 'url': 'arcpublishing:gray:0b0ba30e-032a-4598-8810-901d70e6033e', 'only_matching': True, }, { # https://www.lateja.cr/el-mundo/video-china-aprueba-con-condiciones-su-primera/dfcbfa57-527f-45ff-a69b-35fe71054143/video/ 'url': 'arcpublishing:gruponacion:dfcbfa57-527f-45ff-a69b-35fe71054143', 'only_matching': True, }, { # https://www.fifthdomain.com/video/2018/03/09/is-america-vulnerable-to-a-cyber-attack/ 'url': 'arcpublishing:mco:aa0ca6fe-1127-46d4-b32c-be0d6fdb8055', 'only_matching': True, }, { # https://www.vl.no/kultur/2020/12/09/en-melding-fra-en-lytter-endret-julelista-til-lewi-bergrud/ 'url': 'arcpublishing:mentormedier:47a12084-650b-4011-bfd0-3699b6947b2d', 'only_matching': True, }, { # https://www.14news.com/2020/12/30/whiskey-theft-caught-camera-henderson-liquor-store/ 'url': 'arcpublishing:raycom:b89f61f8-79fa-4c09-8255-e64237119bf7', 'only_matching': True, }, { # https://www.theglobeandmail.com/world/video-ethiopian-woman-who-became-symbol-of-integration-in-italy-killed-on/ 'url': 'arcpublishing:tgam:411b34c1-8701-4036-9831-26964711664b', 'only_matching': True, }, { # https://www.pilotonline.com/460f2931-8130-4719-8ea1-ffcb2d7cb685-132.html 'url': 'arcpublishing:tronc:460f2931-8130-4719-8ea1-ffcb2d7cb685', 'only_matching': True, }] _POWA_DEFAULTS = [ (['cmg', 'prisa'], '%s-config-prod.api.cdn.arcpublishing.com/video'), ([ 'adn', 'advancelocal', 'answers', 'bonnier', 'bostonglobe', 'demo', 'gmg', 'gruponacion', 'infobae', 'mco', 'nzme', 'pmn', 'raycom', 'spectator', 'tbt', 'tgam', 'tronc', 'wapo', 'wweek', ], 'video-api-cdn.%s.arcpublishing.com/api'), ] @staticmethod def _extract_urls(webpage): entries = [] # https://arcpublishing.atlassian.net/wiki/spaces/POWA/overview for powa_el in re.findall(r'(<div[^>]+class="[^"]*\bpowa\b[^"]*"[^>]+data-uuid="%s"[^>]*>)' % ArcPublishingIE._UUID_REGEX, webpage): powa = extract_attributes(powa_el) or {} org = powa.get('data-org') uuid = powa.get('data-uuid') if org and uuid: entries.append('arcpublishing:%s:%s' % (org, uuid)) return entries def _real_extract(self, url): org, uuid = re.match(self._VALID_URL, url).groups() for orgs, tmpl in self._POWA_DEFAULTS: if org in orgs: base_api_tmpl = tmpl break else: base_api_tmpl = '%s-prod-cdn.video-api.arcpublishing.com/api' if org == 'wapo': org = 'washpost' video = self._download_json( 'https://%s/v1/ansvideos/findByUuid' % (base_api_tmpl % org), uuid, query={'uuid': uuid})[0] title = video['headlines']['basic'] is_live = video.get('status') == 'live' urls = [] formats = [] for s in video.get('streams', []): s_url = s.get('url') if not s_url or s_url in urls: continue urls.append(s_url) stream_type = s.get('stream_type') if stream_type == 'smil': smil_formats = self._extract_smil_formats( s_url, uuid, fatal=False) for f in smil_formats: if f['url'].endswith('/cfx/st'): f['app'] = 'cfx/st' if not f['play_path'].startswith('mp4:'): f['play_path'] = 'mp4:' + f['play_path'] if isinstance(f['tbr'], float): f['vbr'] = f['tbr'] * 1000 del f['tbr'] f['format_id'] = 'rtmp-%d' % f['vbr'] formats.extend(smil_formats) elif stream_type in ('ts', 'hls'): m3u8_formats = self._extract_m3u8_formats( s_url, uuid, 'mp4', 'm3u8' if is_live else 'm3u8_native', m3u8_id='hls', fatal=False) if all([f.get('acodec') == 'none' for f in m3u8_formats]): continue for f in m3u8_formats: if f.get('acodec') == 'none': f['preference'] = -40 elif f.get('vcodec') == 'none': f['preference'] = -50 height = f.get('height') if not height: continue vbr = self._search_regex( r'[_x]%d[_-](\d+)' % height, f['url'], 'vbr', default=None) if vbr: f['vbr'] = int(vbr) formats.extend(m3u8_formats) else: vbr = int_or_none(s.get('bitrate')) formats.append({ 'format_id': '%s-%d' % (stream_type, vbr) if vbr else stream_type, 'vbr': vbr, 'width': int_or_none(s.get('width')), 'height': int_or_none(s.get('height')), 'filesize': int_or_none(s.get('filesize')), 'url': s_url, 'preference': -1, }) self._sort_formats( formats, ('preference', 'width', 'height', 'vbr', 'filesize', 'tbr', 'ext', 'format_id')) subtitles = {} for subtitle in (try_get(video, lambda x: x['subtitles']['urls'], list) or []): subtitle_url = subtitle.get('url') if subtitle_url: subtitles.setdefault('en', []).append({'url': subtitle_url}) return { 'id': uuid, 'title': self._live_title(title) if is_live else title, 'thumbnail': try_get(video, lambda x: x['promo_image']['url']), 'description': try_get(video, lambda x: x['subheadlines']['basic']), 'formats': formats, 'duration': int_or_none(video.get('duration'), 100), 'timestamp': parse_iso8601(video.get('created_date')), 'subtitles': subtitles, 'is_live': is_live, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/gigya.py
youtube_dl/extractor/gigya.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, urlencode_postdata, ) class GigyaBaseIE(InfoExtractor): def _gigya_login(self, auth_data): auth_info = self._download_json( 'https://accounts.eu1.gigya.com/accounts.login', None, note='Logging in', errnote='Unable to log in', data=urlencode_postdata(auth_data)) error_message = auth_info.get('errorDetails') or auth_info.get('errorMessage') if error_message: raise ExtractorError( 'Unable to login: %s' % error_message, expected=True) return auth_info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/telebruxelles.py
youtube_dl/extractor/telebruxelles.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class TeleBruxellesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:telebruxelles|bx1)\.be/(?:[^/]+/)*(?P<id>[^/#?]+)' _TESTS = [{ 'url': 'http://bx1.be/news/que-risque-lauteur-dune-fausse-alerte-a-la-bombe/', 'md5': 'a2a67a5b1c3e8c9d33109b902f474fd9', 'info_dict': { 'id': '158856', 'display_id': 'que-risque-lauteur-dune-fausse-alerte-a-la-bombe', 'ext': 'mp4', 'title': 'Que risque l’auteur d’une fausse alerte à la bombe ?', 'description': 'md5:3cf8df235d44ebc5426373050840e466', }, }, { 'url': 'http://bx1.be/sport/futsal-schaerbeek-sincline-5-3-a-thulin/', 'md5': 'dfe07ecc9c153ceba8582ac912687675', 'info_dict': { 'id': '158433', 'display_id': 'futsal-schaerbeek-sincline-5-3-a-thulin', 'ext': 'mp4', 'title': 'Futsal : Schaerbeek s’incline 5-3 à Thulin', 'description': 'md5:fd013f1488d5e2dceb9cebe39e2d569b', }, }, { 'url': 'http://bx1.be/emission/bxenf1-gastronomie/', 'only_matching': True, }, { 'url': 'https://bx1.be/berchem-sainte-agathe/personnel-carrefour-de-berchem-sainte-agathe-inquiet/', 'only_matching': True, }, { 'url': 'https://bx1.be/dernier-jt/', 'only_matching': True, }, { # live stream 'url': 'https://bx1.be/lives/direct-tv/', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) article_id = self._html_search_regex( r'<article[^>]+\bid=["\']post-(\d+)', webpage, 'article ID', default=None) title = self._html_search_regex( r'<h1[^>]*>(.+?)</h1>', webpage, 'title', default=None) or self._og_search_title(webpage) description = self._og_search_description(webpage, default=None) rtmp_url = self._html_search_regex( r'file["\']?\s*:\s*"(r(?:tm|mt)ps?://[^/]+/(?:vod/mp4:"\s*\+\s*"[^"]+"\s*\+\s*"\.mp4|stream/live))"', webpage, 'RTMP url') # Yes, they have a typo in scheme name for live stream URLs (e.g. # https://bx1.be/lives/direct-tv/) rtmp_url = re.sub(r'^rmtp', 'rtmp', rtmp_url) rtmp_url = re.sub(r'"\s*\+\s*"', '', rtmp_url) formats = self._extract_wowza_formats(rtmp_url, article_id or display_id) self._sort_formats(formats) is_live = 'stream/live' in rtmp_url return { 'id': article_id or display_id, 'display_id': display_id, 'title': self._live_title(title) if is_live else title, 'description': description, 'formats': formats, 'is_live': is_live, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/playfm.py
youtube_dl/extractor/playfm.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, int_or_none, parse_iso8601, ) class PlayFMIE(InfoExtractor): IE_NAME = 'play.fm' _VALID_URL = r'https?://(?:www\.)?play\.fm/(?P<slug>(?:[^/]+/)+(?P<id>[^/]+))/?(?:$|[?#])' _TEST = { 'url': 'https://www.play.fm/dan-drastic/sven-tasnadi-leipzig-electronic-music-batofar-paris-fr-2014-07-12', 'md5': 'c505f8307825a245d0c7ad1850001f22', 'info_dict': { 'id': '71276', 'ext': 'mp3', 'title': 'Sven Tasnadi - LEIPZIG ELECTRONIC MUSIC @ Batofar (Paris,FR) - 2014-07-12', 'description': '', 'duration': 5627, 'timestamp': 1406033781, 'upload_date': '20140722', 'uploader': 'Dan Drastic', 'uploader_id': '71170', 'view_count': int, 'comment_count': int, }, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') slug = mobj.group('slug') recordings = self._download_json( 'http://v2api.play.fm/recordings/slug/%s' % slug, video_id) error = recordings.get('error') if isinstance(error, dict): raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, error.get('message')), expected=True) audio_url = recordings['audio'] video_id = compat_str(recordings.get('id') or video_id) title = recordings['title'] description = recordings.get('description') duration = int_or_none(recordings.get('recordingDuration')) timestamp = parse_iso8601(recordings.get('created_at')) uploader = recordings.get('page', {}).get('title') uploader_id = compat_str(recordings.get('page', {}).get('id')) view_count = int_or_none(recordings.get('playCount')) comment_count = int_or_none(recordings.get('commentCount')) categories = [tag['name'] for tag in recordings.get('tags', []) if tag.get('name')] return { 'id': video_id, 'url': audio_url, 'title': title, 'description': description, 'duration': duration, 'timestamp': timestamp, 'uploader': uploader, 'uploader_id': uploader_id, 'view_count': view_count, 'comment_count': comment_count, 'categories': categories, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/lcp.py
youtube_dl/extractor/lcp.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from .arkena import ArkenaIE class LcpPlayIE(ArkenaIE): _VALID_URL = r'https?://play\.lcp\.fr/embed/(?P<id>[^/]+)/(?P<account_id>[^/]+)/[^/]+/[^/]+' _TESTS = [{ 'url': 'http://play.lcp.fr/embed/327336/131064/darkmatter/0', 'md5': 'b8bd9298542929c06c1c15788b1f277a', 'info_dict': { 'id': '327336', 'ext': 'mp4', 'title': '327336', 'timestamp': 1456391602, 'upload_date': '20160225', }, 'params': { 'skip_download': True, }, }] class LcpIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?lcp\.fr/(?:[^/]+/)*(?P<id>[^/]+)' _TESTS = [{ # arkena embed 'url': 'http://www.lcp.fr/la-politique-en-video/schwartzenberg-prg-preconise-francois-hollande-de-participer-une-primaire', 'md5': 'b8bd9298542929c06c1c15788b1f277a', 'info_dict': { 'id': 'd56d03e9', 'ext': 'mp4', 'title': 'Schwartzenberg (PRG) préconise à François Hollande de participer à une primaire à gauche', 'description': 'md5:96ad55009548da9dea19f4120c6c16a8', 'timestamp': 1456488895, 'upload_date': '20160226', }, 'params': { 'skip_download': True, }, }, { # dailymotion live stream 'url': 'http://www.lcp.fr/le-direct', 'info_dict': { 'id': 'xji3qy', 'ext': 'mp4', 'title': 'La Chaine Parlementaire (LCP), Live TNT', 'description': 'md5:5c69593f2de0f38bd9a949f2c95e870b', 'uploader': 'LCP', 'uploader_id': 'xbz33d', 'timestamp': 1308923058, 'upload_date': '20110624', }, 'params': { # m3u8 live stream 'skip_download': True, }, }, { 'url': 'http://www.lcp.fr/emissions/277792-les-volontaires', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) play_url = self._search_regex( r'<iframe[^>]+src=(["\'])(?P<url>%s?(?:(?!\1).)*)\1' % LcpPlayIE._VALID_URL, webpage, 'play iframe', default=None, group='url') if not play_url: return self.url_result(url, 'Generic') title = self._og_search_title(webpage, default=None) or self._html_search_meta( 'twitter:title', webpage, fatal=True) description = self._html_search_meta( ('description', 'twitter:description'), webpage) return { '_type': 'url_transparent', 'ie_key': LcpPlayIE.ie_key(), 'url': play_url, 'display_id': display_id, 'title': title, 'description': description, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/generic.py
youtube_dl/extractor/generic.py
# coding: utf-8 from __future__ import unicode_literals import os import re import sys from .common import InfoExtractor from .youtube import YoutubeIE from ..compat import ( compat_etree_fromstring, compat_str, compat_urllib_parse_unquote, compat_urlparse, compat_xml_parse_error, ) from ..utils import ( determine_ext, ExtractorError, float_or_none, HEADRequest, int_or_none, is_html, js_to_json, KNOWN_EXTENSIONS, merge_dicts, mimetype2ext, orderedSet, parse_duration, parse_resolution, sanitized_Request, smuggle_url, unescapeHTML, unified_timestamp, unsmuggle_url, UnsupportedError, url_or_none, urljoin, xpath_attr, xpath_text, xpath_with_ns, ) from .commonprotocols import RtmpIE from .brightcove import ( BrightcoveLegacyIE, BrightcoveNewIE, ) from .nexx import ( NexxIE, NexxEmbedIE, ) from .nbc import NBCSportsVPlayerIE from .ooyala import OoyalaIE from .rutv import RUTVIE from .tvc import TVCIE from .sportbox import SportBoxIE from .myvi import MyviIE from .condenast import CondeNastIE from .udn import UDNEmbedIE from .senateisvp import SenateISVPIE from .svt import SVTIE from .pornhub import PornHubIE from .xhamster import XHamsterEmbedIE from .tnaflix import TNAFlixNetworkEmbedIE from .drtuber import DrTuberIE from .redtube import RedTubeIE from .tube8 import Tube8IE from .mofosex import MofosexEmbedIE from .spankwire import SpankwireIE from .youporn import YouPornIE from .vimeo import ( VimeoIE, VHXEmbedIE, ) from .dailymotion import DailymotionIE from .dailymail import DailyMailIE from .onionstudios import OnionStudiosIE from .viewlift import ViewLiftEmbedIE from .mtv import MTVServicesEmbeddedIE from .pladform import PladformIE from .videomore import VideomoreIE from .webcaster import WebcasterFeedIE from .googledrive import GoogleDriveIE from .jwplatform import JWPlatformIE from .digiteka import DigitekaIE from .arkena import ArkenaIE from .instagram import InstagramIE from .threeqsdn import ThreeQSDNIE from .theplatform import ThePlatformIE from .kaltura import KalturaIE from .eagleplatform import EaglePlatformIE from .facebook import FacebookIE from .soundcloud import SoundcloudEmbedIE from .tunein import TuneInBaseIE from .vbox7 import Vbox7IE from .dbtv import DBTVIE from .piksel import PikselIE from .videa import VideaIE from .twentymin import TwentyMinutenIE from .ustream import UstreamIE from .arte import ArteTVEmbedIE from .videopress import VideoPressIE from .rutube import RutubeIE from .limelight import LimelightBaseIE from .anvato import AnvatoIE from .washingtonpost import WashingtonPostIE from .wistia import WistiaIE from .mediaset import MediasetIE from .joj import JojIE from .megaphone import MegaphoneIE from .vzaar import VzaarIE from .channel9 import Channel9IE from .vshare import VShareIE from .mediasite import MediasiteIE from .springboardplatform import SpringboardPlatformIE from .yapfiles import YapFilesIE from .vice import ViceIE from .xfileshare import XFileShareIE from .cloudflarestream import CloudflareStreamIE from .peertube import PeerTubeIE from .teachable import TeachableIE from .indavideo import IndavideoEmbedIE from .apa import APAIE from .foxnews import FoxNewsIE from .viqeo import ViqeoIE from .expressen import ExpressenIE from .zype import ZypeIE from .odnoklassniki import OdnoklassnikiIE from .vk import VKIE from .kinja import KinjaEmbedIE from .arcpublishing import ArcPublishingIE from .medialaan import MedialaanIE from .simplecast import SimplecastIE class GenericIE(InfoExtractor): IE_DESC = 'Generic downloader that works on some sites' _VALID_URL = r'.*' IE_NAME = 'generic' _TESTS = [ # Direct link to a video { 'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4', 'md5': '67d406c2bcb6af27fa886f31aa934bbe', 'info_dict': { 'id': 'trailer', 'ext': 'mp4', 'title': 'trailer', 'upload_date': '20100513', } }, # Direct link to media delivered compressed (until Accept-Encoding is *) { 'url': 'http://calimero.tk/muzik/FictionJunction-Parallel_Hearts.flac', 'md5': '128c42e68b13950268b648275386fc74', 'info_dict': { 'id': 'FictionJunction-Parallel_Hearts', 'ext': 'flac', 'title': 'FictionJunction-Parallel_Hearts', 'upload_date': '20140522', }, 'expected_warnings': [ 'URL could be a direct video link, returning it as such.' ], 'skip': 'URL invalid', }, # Direct download with broken HEAD { 'url': 'http://ai-radio.org:8000/radio.opus', 'info_dict': { 'id': 'radio', 'ext': 'opus', 'title': 'radio', }, 'params': { 'skip_download': True, # infinite live stream }, 'expected_warnings': [ r'501.*Not Implemented', r'400.*Bad Request', ], }, # Direct link with incorrect MIME type { 'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm', 'md5': '4ccbebe5f36706d85221f204d7eb5913', 'info_dict': { 'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm', 'id': '5_Lennart_Poettering_-_Systemd', 'ext': 'webm', 'title': '5_Lennart_Poettering_-_Systemd', 'upload_date': '20141120', }, 'expected_warnings': [ 'URL could be a direct video link, returning it as such.' ] }, # RSS feed { 'url': 'http://phihag.de/2014/youtube-dl/rss2.xml', 'info_dict': { 'id': 'http://phihag.de/2014/youtube-dl/rss2.xml', 'title': 'Zero Punctuation', 'description': 're:.*groundbreaking video review series.*' }, 'playlist_mincount': 11, }, # RSS feed with enclosure { 'url': 'http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml', 'info_dict': { 'id': 'http://podcastfeeds.nbcnews.com/nbcnews/video/podcast/MSNBC-MADDOW-NETCAST-M4V.xml', 'title': 'MSNBC Rachel Maddow (video)', 'description': 're:.*her unique approach to storytelling.*', }, 'playlist': [{ 'info_dict': { 'ext': 'mov', 'id': 'pdv_maddow_netcast_mov-12-04-2020-224335', 'title': 're:MSNBC Rachel Maddow', 'description': 're:.*her unique approach to storytelling.*', 'timestamp': int, 'upload_date': compat_str, 'duration': float, }, }], }, # RSS feed with item with description and thumbnails { 'url': 'https://anchor.fm/s/dd00e14/podcast/rss', 'info_dict': { 'id': 'https://anchor.fm/s/dd00e14/podcast/rss', 'title': 're:.*100% Hydrogen.*', 'description': 're:.*In this episode.*', }, 'playlist': [{ 'info_dict': { 'ext': 'm4a', 'id': 'c1c879525ce2cb640b344507e682c36d', 'title': 're:Hydrogen!', 'description': 're:.*In this episode we are going.*', 'timestamp': 1567977776, 'upload_date': '20190908', 'duration': 459, 'thumbnail': r're:^https?://.*\.jpg$', 'episode_number': 1, 'season_number': 1, 'age_limit': 0, }, }], 'params': { 'skip_download': True, }, }, # RSS feed with enclosures and unsupported link URLs { 'url': 'http://www.hellointernet.fm/podcast?format=rss', 'info_dict': { 'id': 'http://www.hellointernet.fm/podcast?format=rss', 'description': 'CGP Grey and Brady Haran talk about YouTube, life, work, whatever.', 'title': 'Hello Internet', }, 'playlist_mincount': 100, }, # SMIL from http://videolectures.net/promogram_igor_mekjavic_eng { 'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml', 'info_dict': { 'id': 'smil', 'ext': 'mp4', 'title': 'Automatics, robotics and biocybernetics', 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482', 'upload_date': '20130627', 'formats': 'mincount:16', 'subtitles': 'mincount:1', }, 'params': { 'force_generic_extractor': True, 'skip_download': True, }, }, # SMIL from http://www1.wdr.de/mediathek/video/livestream/index.html { 'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil', 'info_dict': { 'id': 'hds', 'ext': 'flv', 'title': 'hds', 'formats': 'mincount:1', }, 'params': { 'skip_download': True, }, }, # SMIL from https://www.restudy.dk/video/play/id/1637 { 'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml', 'info_dict': { 'id': 'video_1637', 'ext': 'flv', 'title': 'video_1637', 'formats': 'mincount:3', }, 'params': { 'skip_download': True, }, }, # SMIL from http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm { 'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil', 'info_dict': { 'id': 'smil-service', 'ext': 'flv', 'title': 'smil-service', 'formats': 'mincount:1', }, 'params': { 'skip_download': True, }, }, # SMIL from http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370 { 'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil', 'info_dict': { 'id': '4719370', 'ext': 'mp4', 'title': '571de1fd-47bc-48db-abf9-238872a58d1f', 'formats': 'mincount:3', }, 'params': { 'skip_download': True, }, }, # XSPF playlist from http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html { 'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf', 'info_dict': { 'id': 'mZlp2ctYIUEB', 'ext': 'mp4', 'title': 'Tikibad ontruimd wegens brand', 'description': 'md5:05ca046ff47b931f9b04855015e163a4', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 33, }, 'params': { 'skip_download': True, }, }, # MPD from http://dash-mse-test.appspot.com/media.html { 'url': 'http://yt-dash-mse-test.commondatastorage.googleapis.com/media/car-20120827-manifest.mpd', 'md5': '4b57baab2e30d6eb3a6a09f0ba57ef53', 'info_dict': { 'id': 'car-20120827-manifest', 'ext': 'mp4', 'title': 'car-20120827-manifest', 'formats': 'mincount:9', 'upload_date': '20130904', }, 'params': { 'format': 'bestvideo', }, }, # m3u8 served with Content-Type: audio/x-mpegURL; charset=utf-8 { 'url': 'http://once.unicornmedia.com/now/master/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/93677179-2d99-4ef4-9e17-fe70d49abfbf/content.m3u8', 'info_dict': { 'id': 'content', 'ext': 'mp4', 'title': 'content', 'formats': 'mincount:8', }, 'params': { # m3u8 downloads 'skip_download': True, }, 'skip': 'video gone', }, # m3u8 served with Content-Type: text/plain { 'url': 'http://www.nacentapps.com/m3u8/index.m3u8', 'info_dict': { 'id': 'index', 'ext': 'mp4', 'title': 'index', 'upload_date': '20140720', 'formats': 'mincount:11', }, 'params': { # m3u8 downloads 'skip_download': True, }, 'skip': 'video gone', }, # google redirect { 'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE', 'info_dict': { 'id': 'cmQHVoWB5FY', 'ext': 'mp4', 'upload_date': '20130224', 'uploader_id': 'TheVerge', 'description': r're:^Chris Ziegler takes a look at the\.*', 'uploader': 'The Verge', 'title': 'First Firefox OS phones side-by-side', }, 'params': { 'skip_download': False, } }, { # redirect in Refresh HTTP header 'url': 'https://www.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DpO8h3EaFRdo&h=TAQHsoToz&enc=AZN16h-b6o4Zq9pZkCCdOLNKMN96BbGMNtcFwHSaazus4JHT_MFYkAA-WARTX2kvsCIdlAIyHZjl6d33ILIJU7Jzwk_K3mcenAXoAzBNoZDI_Q7EXGDJnIhrGkLXo_LJ_pAa2Jzbx17UHMd3jAs--6j2zaeto5w9RTn8T_1kKg3fdC5WPX9Dbb18vzH7YFX0eSJmoa6SP114rvlkw6pkS1-T&s=1', 'info_dict': { 'id': 'pO8h3EaFRdo', 'ext': 'mp4', 'title': 'Tripeo Boiler Room x Dekmantel Festival DJ Set', 'description': 'md5:6294cc1af09c4049e0652b51a2df10d5', 'upload_date': '20150917', 'uploader_id': 'brtvofficial', 'uploader': 'Boiler Room', }, 'params': { 'skip_download': False, }, }, { 'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html', 'md5': '85b90ccc9d73b4acd9138d3af4c27f89', 'info_dict': { 'id': '13601338388002', 'ext': 'mp4', 'uploader': 'www.hodiho.fr', 'title': 'R\u00e9gis plante sa Jeep', } }, # bandcamp page with custom domain { 'add_ie': ['Bandcamp'], 'url': 'http://bronyrock.com/track/the-pony-mash', 'info_dict': { 'id': '3235767654', 'ext': 'mp3', 'title': 'The Pony Mash', 'uploader': 'M_Pallante', }, 'skip': 'There is a limit of 200 free downloads / month for the test song', }, { # embedded brightcove video # it also tests brightcove videos that need to set the 'Referer' # in the http requests 'add_ie': ['BrightcoveLegacy'], 'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/', 'info_dict': { 'id': '2765128793001', 'ext': 'mp4', 'title': 'Le cours de bourse : l’analyse technique', 'description': 'md5:7e9ad046e968cb2d1114004aba466fd9', 'uploader': 'BFM BUSINESS', }, 'params': { 'skip_download': True, }, }, { # embedded with itemprop embedURL and video id spelled as `idVideo` 'add_id': ['BrightcoveLegacy'], 'url': 'http://bfmbusiness.bfmtv.com/mediaplayer/chroniques/olivier-delamarche/', 'info_dict': { 'id': '5255628253001', 'ext': 'mp4', 'title': 'md5:37c519b1128915607601e75a87995fc0', 'description': 'md5:37f7f888b434bb8f8cc8dbd4f7a4cf26', 'uploader': 'BFM BUSINESS', 'uploader_id': '876450612001', 'timestamp': 1482255315, 'upload_date': '20161220', }, 'params': { 'skip_download': True, }, }, { # https://github.com/ytdl-org/youtube-dl/issues/2253 'url': 'http://bcove.me/i6nfkrc3', 'md5': '0ba9446db037002366bab3b3eb30c88c', 'info_dict': { 'id': '3101154703001', 'ext': 'mp4', 'title': 'Still no power', 'uploader': 'thestar.com', 'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.', }, 'add_ie': ['BrightcoveLegacy'], 'skip': 'video gone', }, { 'url': 'http://www.championat.com/video/football/v/87/87499.html', 'md5': 'fb973ecf6e4a78a67453647444222983', 'info_dict': { 'id': '3414141473001', 'ext': 'mp4', 'title': 'Видео. Удаление Дзагоева (ЦСКА)', 'description': 'Онлайн-трансляция матча ЦСКА - "Волга"', 'uploader': 'Championat', }, }, { # https://github.com/ytdl-org/youtube-dl/issues/3541 'add_ie': ['BrightcoveLegacy'], 'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1', 'info_dict': { 'id': '3866516442001', 'ext': 'mp4', 'title': 'Leer mij vrouwen kennen: Aflevering 1', 'description': 'Leer mij vrouwen kennen: Aflevering 1', 'uploader': 'SBS Broadcasting', }, 'skip': 'Restricted to Netherlands', 'params': { 'skip_download': True, # m3u8 download }, }, { # Brightcove video in <iframe> 'url': 'http://www.un.org/chinese/News/story.asp?NewsID=27724', 'md5': '36d74ef5e37c8b4a2ce92880d208b968', 'info_dict': { 'id': '5360463607001', 'ext': 'mp4', 'title': '叙利亚失明儿童在废墟上演唱《心跳》 呼吁获得正常童年生活', 'description': '联合国儿童基金会中东和北非区域大使、作曲家扎德·迪拉尼(Zade Dirani)在3月15日叙利亚冲突爆发7周年纪念日之际发布了为叙利亚谱写的歌曲《心跳》(HEARTBEAT),为受到六年冲突影响的叙利亚儿童发出强烈呐喊,呼吁世界做出共同努力,使叙利亚儿童重新获得享有正常童年生活的权利。', 'uploader': 'United Nations', 'uploader_id': '1362235914001', 'timestamp': 1489593889, 'upload_date': '20170315', }, 'add_ie': ['BrightcoveLegacy'], }, { # Brightcove with alternative playerID key 'url': 'http://www.nature.com/nmeth/journal/v9/n7/fig_tab/nmeth.2062_SV1.html', 'info_dict': { 'id': 'nmeth.2062_SV1', 'title': 'Simultaneous multiview imaging of the Drosophila syncytial blastoderm : Quantitative high-speed imaging of entire developing embryos with simultaneous multiview light-sheet microscopy : Nature Methods : Nature Research', }, 'playlist': [{ 'info_dict': { 'id': '2228375078001', 'ext': 'mp4', 'title': 'nmeth.2062-sv1', 'description': 'nmeth.2062-sv1', 'timestamp': 1363357591, 'upload_date': '20130315', 'uploader': 'Nature Publishing Group', 'uploader_id': '1964492299001', }, }], }, { # Brightcove with UUID in videoPlayer 'url': 'http://www8.hp.com/cn/zh/home.html', 'info_dict': { 'id': '5255815316001', 'ext': 'mp4', 'title': 'Sprocket Video - China', 'description': 'Sprocket Video - China', 'uploader': 'HP-Video Gallery', 'timestamp': 1482263210, 'upload_date': '20161220', 'uploader_id': '1107601872001', }, 'params': { 'skip_download': True, # m3u8 download }, 'skip': 'video rotates...weekly?', }, { # Brightcove:new type [2]. 'url': 'http://www.delawaresportszone.com/video-st-thomas-more-earns-first-trip-to-basketball-semis', 'md5': '2b35148fcf48da41c9fb4591650784f3', 'info_dict': { 'id': '5348741021001', 'ext': 'mp4', 'upload_date': '20170306', 'uploader_id': '4191638492001', 'timestamp': 1488769918, 'title': 'VIDEO: St. Thomas More earns first trip to basketball semis', }, }, { # Alternative brightcove <video> attributes 'url': 'http://www.programme-tv.net/videos/extraits/81095-guillaume-canet-evoque-les-rumeurs-d-infidelite-de-marion-cotillard-avec-brad-pitt-dans-vivement-dimanche/', 'info_dict': { 'id': '81095-guillaume-canet-evoque-les-rumeurs-d-infidelite-de-marion-cotillard-avec-brad-pitt-dans-vivement-dimanche', 'title': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche, Extraits : toutes les vidéos avec Télé-Loisirs", }, 'playlist': [{ 'md5': '732d22ba3d33f2f3fc253c39f8f36523', 'info_dict': { 'id': '5311302538001', 'ext': 'mp4', 'title': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche", 'description': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche (France 2, 5 février 2017)", 'timestamp': 1486321708, 'upload_date': '20170205', 'uploader_id': '800000640001', }, 'only_matching': True, }], }, { # Brightcove with UUID in videoPlayer 'url': 'http://www8.hp.com/cn/zh/home.html', 'info_dict': { 'id': '5255815316001', 'ext': 'mp4', 'title': 'Sprocket Video - China', 'description': 'Sprocket Video - China', 'uploader': 'HP-Video Gallery', 'timestamp': 1482263210, 'upload_date': '20161220', 'uploader_id': '1107601872001', }, 'params': { 'skip_download': True, # m3u8 download }, }, # ooyala video { 'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219', 'md5': '166dd577b433b4d4ebfee10b0824d8ff', 'info_dict': { 'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ', 'ext': 'mp4', 'title': '2cc213299525360.mov', # that's what we get 'duration': 238.231, }, 'add_ie': ['Ooyala'], }, { # ooyala video embedded with http://player.ooyala.com/iframe.js 'url': 'http://www.macrumors.com/2015/07/24/steve-jobs-the-man-in-the-machine-first-trailer/', 'info_dict': { 'id': 'p0MGJndjoG5SOKqO_hZJuZFPB-Tr5VgB', 'ext': 'mp4', 'title': '"Steve Jobs: Man in the Machine" trailer', 'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."', 'duration': 135.427, }, 'params': { 'skip_download': True, }, 'skip': 'movie expired', }, # ooyala video embedded with http://player.ooyala.com/static/v4/production/latest/core.min.js { 'url': 'http://wnep.com/2017/07/22/steampunk-fest-comes-to-honesdale/', 'info_dict': { 'id': 'lwYWYxYzE6V5uJMjNGyKtwwiw9ZJD7t2', 'ext': 'mp4', 'title': 'Steampunk Fest Comes to Honesdale', 'duration': 43.276, }, 'params': { 'skip_download': True, } }, # embed.ly video { 'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/', 'info_dict': { 'id': '9ODmcdjQcHQ', 'ext': 'mp4', 'title': 'Tested: Grinding Coffee at 2000 Frames Per Second', 'upload_date': '20140225', 'description': 'md5:06a40fbf30b220468f1e0957c0f558ff', 'uploader': 'Tested', 'uploader_id': 'testedcom', }, # No need to test YoutubeIE here 'params': { 'skip_download': True, }, }, # funnyordie embed { 'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns', 'info_dict': { 'id': '18e820ec3f', 'ext': 'mp4', 'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama', 'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.', }, # HEAD requests lead to endless 301, while GET is OK 'expected_warnings': ['301'], }, # RUTV embed { 'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html', 'info_dict': { 'id': '776940', 'ext': 'mp4', 'title': 'Охотское море стало целиком российским', 'description': 'md5:5ed62483b14663e2a95ebbe115eb8f43', }, 'params': { # m3u8 download 'skip_download': True, }, }, # TVC embed { 'url': 'http://sch1298sz.mskobr.ru/dou_edu/karamel_ki/filial_galleries/video/iframe_src_http_tvc_ru_video_iframe_id_55304_isplay_false_acc_video_id_channel_brand_id_11_show_episodes_episode_id_32307_frameb/', 'info_dict': { 'id': '55304', 'ext': 'mp4', 'title': 'Дошкольное воспитание', }, }, # SportBox embed { 'url': 'http://www.vestifinance.ru/articles/25753', 'info_dict': { 'id': '25753', 'title': 'Прямые трансляции с Форума-выставки "Госзаказ-2013"', }, 'playlist': [{ 'info_dict': { 'id': '370908', 'title': 'Госзаказ. День 3', 'ext': 'mp4', } }, { 'info_dict': { 'id': '370905', 'title': 'Госзаказ. День 2', 'ext': 'mp4', } }, { 'info_dict': { 'id': '370902', 'title': 'Госзаказ. День 1', 'ext': 'mp4', } }], 'params': { # m3u8 download 'skip_download': True, }, }, # Myvi.ru embed { 'url': 'http://www.kinomyvi.tv/news/detail/Pervij-dublirovannij-trejler--Uzhastikov-_nOw1', 'info_dict': { 'id': 'f4dafcad-ff21-423d-89b5-146cfd89fa1e', 'ext': 'mp4', 'title': 'Ужастики, русский трейлер (2015)', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 153, } }, # XHamster embed { 'url': 'http://www.numisc.com/forum/showthread.php?11696-FM15-which-pumiscer-was-this-%28-vid-%29-%28-alfa-as-fuck-srx-%29&s=711f5db534502e22260dec8c5e2d66d8', 'info_dict': { 'id': 'showthread', 'title': '[NSFL] [FM15] which pumiscer was this ( vid ) ( alfa as fuck srx )', }, 'playlist_mincount': 7, # This forum does not allow <iframe> syntaxes anymore # Now HTML tags are displayed as-is 'skip': 'No videos on this page', }, # Embedded TED video { 'url': 'http://en.support.wordpress.com/videos/ted-talks/', 'md5': '65fdff94098e4a607385a60c5177c638', 'info_dict': { 'id': '1969', 'ext': 'mp4', 'title': 'Hidden miracles of the natural world', 'uploader': 'Louie Schwartzberg', 'description': 'md5:8145d19d320ff3e52f28401f4c4283b9', } }, # nowvideo embed hidden behind percent encoding { 'url': 'http://www.waoanime.tv/the-super-dimension-fortress-macross-episode-1/', 'md5': '2baf4ddd70f697d94b1c18cf796d5107', 'info_dict': { 'id': '06e53103ca9aa', 'ext': 'flv', 'title': 'Macross Episode 001 Watch Macross Episode 001 onl', 'description': 'No description', }, }, # arte embed { 'url': 'http://www.tv-replay.fr/redirection/20-03-14/x-enius-arte-10753389.html', 'md5': '7653032cbb25bf6c80d80f217055fa43', 'info_dict': { 'id': '048195-004_PLUS7-F', 'ext': 'flv', 'title': 'X:enius', 'description': 'md5:d5fdf32ef6613cdbfd516ae658abf168', 'upload_date': '20140320', }, 'params': { 'skip_download': 'Requires rtmpdump' }, 'skip': 'video gone', }, # francetv embed { 'url': 'http://www.tsprod.com/replay-du-concert-alcaline-de-calogero', 'info_dict': { 'id': 'EV_30231', 'ext': 'mp4', 'title': 'Alcaline, le concert avec Calogero', 'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff', 'upload_date': '20150226', 'timestamp': 1424989860, 'duration': 5400, }, 'params': { # m3u8 downloads 'skip_download': True, }, 'expected_warnings': [ 'Forbidden' ] }, # Condé Nast embed {
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/nintendo.py
youtube_dl/extractor/nintendo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from .ooyala import OoyalaIE class NintendoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?nintendo\.com/(?:games/detail|nintendo-direct)/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.nintendo.com/games/detail/duck-hunt-wii-u/', 'info_dict': { 'id': 'MzMmticjp0VPzO3CCj4rmFOuohEuEWoW', 'ext': 'flv', 'title': 'Duck Hunt Wii U VC NES - Trailer', 'duration': 60.326, }, 'params': { 'skip_download': True, }, 'add_ie': ['Ooyala'], }, { 'url': 'http://www.nintendo.com/games/detail/tokyo-mirage-sessions-fe-wii-u', 'info_dict': { 'id': 'tokyo-mirage-sessions-fe-wii-u', 'title': 'Tokyo Mirage Sessions ♯FE', }, 'playlist_count': 4, }, { 'url': 'https://www.nintendo.com/nintendo-direct/09-04-2019/', 'info_dict': { 'id': 'J2bXdmaTE6fe3dWJTPcc7m23FNbc_A1V', 'ext': 'mp4', 'title': 'Switch_ROS_ND0904-H264.mov', 'duration': 2324.758, }, 'params': { 'skip_download': True, }, 'add_ie': ['Ooyala'], }] def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) entries = [ OoyalaIE._build_url_result(m.group('code')) for m in re.finditer( r'data-(?:video-id|directVideoId)=(["\'])(?P<code>(?:(?!\1).)+)\1', webpage)] title = self._html_search_regex( r'(?s)<(?:span|div)[^>]+class="(?:title|wrapper)"[^>]*>.*?<h1>(.+?)</h1>', webpage, 'title', fatal=False) return self.playlist_result( entries, page_id, title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tvplay.py
youtube_dl/extractor/tvplay.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_urlparse, ) from ..utils import ( determine_ext, ExtractorError, int_or_none, parse_duration, parse_iso8601, qualities, try_get, update_url_query, url_or_none, urljoin, ) class TVPlayIE(InfoExtractor): IE_NAME = 'mtg' IE_DESC = 'MTG services' _VALID_URL = r'''(?x) (?: mtg:| https?:// (?:www\.)? (?: tvplay(?:\.skaties)?\.lv(?:/parraides)?| (?:tv3play|play\.tv3)\.lt(?:/programos)?| tv3play(?:\.tv3)?\.ee/sisu| (?:tv(?:3|6|8|10)play|viafree)\.se/program| (?:(?:tv3play|viasat4play|tv6play|viafree)\.no|(?:tv3play|viafree)\.dk)/programmer| play\.nova(?:tv)?\.bg/programi ) /(?:[^/]+/)+ ) (?P<id>\d+) ''' _TESTS = [ { 'url': 'http://www.tvplay.lv/parraides/vinas-melo-labak/418113?autostart=true', 'md5': 'a1612fe0849455423ad8718fe049be21', 'info_dict': { 'id': '418113', 'ext': 'mp4', 'title': 'Kādi ir īri? - Viņas melo labāk', 'description': 'Baiba apsmej īrus, kādi tie ir un ko viņi dara.', 'series': 'Viņas melo labāk', 'season': '2.sezona', 'season_number': 2, 'duration': 25, 'timestamp': 1406097056, 'upload_date': '20140723', }, }, { 'url': 'http://play.tv3.lt/programos/moterys-meluoja-geriau/409229?autostart=true', 'info_dict': { 'id': '409229', 'ext': 'flv', 'title': 'Moterys meluoja geriau', 'description': 'md5:9aec0fc68e2cbc992d2a140bd41fa89e', 'series': 'Moterys meluoja geriau', 'episode_number': 47, 'season': '1 sezonas', 'season_number': 1, 'duration': 1330, 'timestamp': 1403769181, 'upload_date': '20140626', }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://www.tv3play.ee/sisu/kodu-keset-linna/238551?autostart=true', 'info_dict': { 'id': '238551', 'ext': 'flv', 'title': 'Kodu keset linna 398537', 'description': 'md5:7df175e3c94db9e47c0d81ffa5d68701', 'duration': 1257, 'timestamp': 1292449761, 'upload_date': '20101215', }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://www.tv3play.se/program/husraddarna/395385?autostart=true', 'info_dict': { 'id': '395385', 'ext': 'mp4', 'title': 'Husräddarna S02E07', 'description': 'md5:f210c6c89f42d4fc39faa551be813777', 'duration': 2574, 'timestamp': 1400596321, 'upload_date': '20140520', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.tv6play.se/program/den-sista-dokusapan/266636?autostart=true', 'info_dict': { 'id': '266636', 'ext': 'mp4', 'title': 'Den sista dokusåpan S01E08', 'description': 'md5:295be39c872520221b933830f660b110', 'duration': 1492, 'timestamp': 1330522854, 'upload_date': '20120229', 'age_limit': 18, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.tv8play.se/program/antikjakten/282756?autostart=true', 'info_dict': { 'id': '282756', 'ext': 'mp4', 'title': 'Antikjakten S01E10', 'description': 'md5:1b201169beabd97e20c5ad0ad67b13b8', 'duration': 2646, 'timestamp': 1348575868, 'upload_date': '20120925', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.tv3play.no/programmer/anna-anka-soker-assistent/230898?autostart=true', 'info_dict': { 'id': '230898', 'ext': 'mp4', 'title': 'Anna Anka søker assistent - Ep. 8', 'description': 'md5:f80916bf5bbe1c5f760d127f8dd71474', 'duration': 2656, 'timestamp': 1277720005, 'upload_date': '20100628', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.viasat4play.no/programmer/budbringerne/21873?autostart=true', 'info_dict': { 'id': '21873', 'ext': 'mp4', 'title': 'Budbringerne program 10', 'description': 'md5:4db78dc4ec8a85bb04fd322a3ee5092d', 'duration': 1297, 'timestamp': 1254205102, 'upload_date': '20090929', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.tv6play.no/programmer/hotelinspektor-alex-polizzi/361883?autostart=true', 'info_dict': { 'id': '361883', 'ext': 'mp4', 'title': 'Hotelinspektør Alex Polizzi - Ep. 10', 'description': 'md5:3ecf808db9ec96c862c8ecb3a7fdaf81', 'duration': 2594, 'timestamp': 1393236292, 'upload_date': '20140224', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://play.novatv.bg/programi/zdravei-bulgariya/624952?autostart=true', 'info_dict': { 'id': '624952', 'ext': 'flv', 'title': 'Здравей, България (12.06.2015 г.) ', 'description': 'md5:99f3700451ac5bb71a260268b8daefd7', 'duration': 8838, 'timestamp': 1434100372, 'upload_date': '20150612', }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'https://play.nova.bg/programi/zdravei-bulgariya/764300?autostart=true', 'only_matching': True, }, { 'url': 'http://tvplay.skaties.lv/parraides/vinas-melo-labak/418113?autostart=true', 'only_matching': True, }, { 'url': 'https://tvplay.skaties.lv/vinas-melo-labak/418113/?autostart=true', 'only_matching': True, }, { # views is null 'url': 'http://tvplay.skaties.lv/parraides/tv3-zinas/760183', 'only_matching': True, }, { 'url': 'http://tv3play.tv3.ee/sisu/kodu-keset-linna/238551?autostart=true', 'only_matching': True, }, { 'url': 'http://www.viafree.se/program/underhallning/i-like-radio-live/sasong-1/676869', 'only_matching': True, }, { 'url': 'mtg:418113', 'only_matching': True, } ] def _real_extract(self, url): video_id = self._match_id(url) geo_country = self._search_regex( r'https?://[^/]+\.([a-z]{2})', url, 'geo country', default=None) if geo_country: self._initialize_geo_bypass({'countries': [geo_country.upper()]}) video = self._download_json( 'http://playapi.mtgx.tv/v3/videos/%s' % video_id, video_id, 'Downloading video JSON') title = video['title'] try: streams = self._download_json( 'http://playapi.mtgx.tv/v3/videos/stream/%s' % video_id, video_id, 'Downloading streams JSON') except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: msg = self._parse_json(e.cause.read().decode('utf-8'), video_id) raise ExtractorError(msg['msg'], expected=True) raise quality = qualities(['hls', 'medium', 'high']) formats = [] for format_id, video_url in streams.get('streams', {}).items(): video_url = url_or_none(video_url) if not video_url: continue ext = determine_ext(video_url) if ext == 'f4m': formats.extend(self._extract_f4m_formats( update_url_query(video_url, { 'hdcore': '3.5.0', 'plugin': 'aasp-3.5.0.151.81' }), video_id, f4m_id='hds', fatal=False)) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: fmt = { 'format_id': format_id, 'quality': quality(format_id), 'ext': ext, } if video_url.startswith('rtmp'): m = re.search( r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', video_url) if not m: continue fmt.update({ 'ext': 'flv', 'url': m.group('url'), 'app': m.group('app'), 'play_path': m.group('playpath'), 'preference': -1, }) else: fmt.update({ 'url': video_url, }) formats.append(fmt) if not formats and video.get('is_geo_blocked'): self.raise_geo_restricted( 'This content might not be available in your country due to copyright reasons') self._sort_formats(formats) # TODO: webvtt in m3u8 subtitles = {} sami_path = video.get('sami_path') if sami_path: lang = self._search_regex( r'_([a-z]{2})\.xml', sami_path, 'lang', default=compat_urlparse.urlparse(url).netloc.rsplit('.', 1)[-1]) subtitles[lang] = [{ 'url': sami_path, }] series = video.get('format_title') episode_number = int_or_none(video.get('format_position', {}).get('episode')) season = video.get('_embedded', {}).get('season', {}).get('title') season_number = int_or_none(video.get('format_position', {}).get('season')) return { 'id': video_id, 'title': title, 'description': video.get('description'), 'series': series, 'episode_number': episode_number, 'season': season, 'season_number': season_number, 'duration': int_or_none(video.get('duration')), 'timestamp': parse_iso8601(video.get('created_at')), 'view_count': try_get(video, lambda x: x['views']['total'], int), 'age_limit': int_or_none(video.get('age_limit', 0)), 'formats': formats, 'subtitles': subtitles, } class ViafreeIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:www\.)? viafree\.(?P<country>dk|no|se) /(?P<id>program(?:mer)?/(?:[^/]+/)+[^/?#&]+) ''' _TESTS = [{ 'url': 'http://www.viafree.no/programmer/underholdning/det-beste-vorspielet/sesong-2/episode-1', 'info_dict': { 'id': '757786', 'ext': 'mp4', 'title': 'Det beste vorspielet - Sesong 2 - Episode 1', 'description': 'md5:b632cb848331404ccacd8cd03e83b4c3', 'series': 'Det beste vorspielet', 'season_number': 2, 'duration': 1116, 'timestamp': 1471200600, 'upload_date': '20160814', }, 'params': { 'skip_download': True, }, }, { # with relatedClips 'url': 'http://www.viafree.se/program/reality/sommaren-med-youtube-stjarnorna/sasong-1/avsnitt-1', 'only_matching': True, }, { # Different og:image URL schema 'url': 'http://www.viafree.se/program/reality/sommaren-med-youtube-stjarnorna/sasong-1/avsnitt-2', 'only_matching': True, }, { 'url': 'http://www.viafree.se/program/livsstil/husraddarna/sasong-2/avsnitt-2', 'only_matching': True, }, { 'url': 'http://www.viafree.dk/programmer/reality/paradise-hotel/saeson-7/episode-5', 'only_matching': True, }] _GEO_BYPASS = False @classmethod def suitable(cls, url): return False if TVPlayIE.suitable(url) else super(ViafreeIE, cls).suitable(url) def _real_extract(self, url): country, path = re.match(self._VALID_URL, url).groups() content = self._download_json( 'https://viafree-content.mtg-api.com/viafree-content/v1/%s/path/%s' % (country, path), path) program = content['_embedded']['viafreeBlocks'][0]['_embedded']['program'] guid = program['guid'] meta = content['meta'] title = meta['title'] try: stream_href = self._download_json( program['_links']['streamLink']['href'], guid, headers=self.geo_verification_headers())['embedded']['prioritizedStreams'][0]['links']['stream']['href'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: self.raise_geo_restricted(countries=[country]) raise formats = self._extract_m3u8_formats(stream_href, guid, 'mp4') self._sort_formats(formats) episode = program.get('episode') or {} return { 'id': guid, 'title': title, 'thumbnail': meta.get('image'), 'description': meta.get('description'), 'series': episode.get('seriesTitle'), 'episode_number': int_or_none(episode.get('episodeNumber')), 'season_number': int_or_none(episode.get('seasonNumber')), 'duration': int_or_none(try_get(program, lambda x: x['video']['duration']['milliseconds']), 1000), 'timestamp': parse_iso8601(try_get(program, lambda x: x['availability']['start'])), 'formats': formats, } class TVPlayHomeIE(InfoExtractor): _VALID_URL = r'https?://(?:tv3?)?play\.(?:tv3\.lt|skaties\.lv|tv3\.ee)/(?:[^/]+/)*[^/?#&]+-(?P<id>\d+)' _TESTS = [{ 'url': 'https://tvplay.tv3.lt/aferistai-n-7/aferistai-10047125/', 'info_dict': { 'id': '366367', 'ext': 'mp4', 'title': 'Aferistai', 'description': 'Aferistai. Kalėdinė pasaka.', 'series': 'Aferistai [N-7]', 'season': '1 sezonas', 'season_number': 1, 'duration': 464, 'timestamp': 1394209658, 'upload_date': '20140307', 'age_limit': 18, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://tvplay.skaties.lv/vinas-melo-labak/vinas-melo-labak-10280317/', 'only_matching': True, }, { 'url': 'https://tvplay.tv3.ee/cool-d-ga-mehhikosse/cool-d-ga-mehhikosse-10044354/', 'only_matching': True, }, { 'url': 'https://play.tv3.lt/aferistai-10047125', 'only_matching': True, }, { 'url': 'https://tv3play.skaties.lv/vinas-melo-labak-10280317', 'only_matching': True, }, { 'url': 'https://play.tv3.ee/cool-d-ga-mehhikosse-10044354', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) asset = self._download_json( urljoin(url, '/sb/public/asset/' + video_id), video_id) m3u8_url = asset['movie']['contentUrl'] video_id = asset['assetId'] asset_title = asset['title'] title = asset_title['title'] formats = self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls') self._sort_formats(formats) thumbnails = None image_url = asset.get('imageUrl') if image_url: thumbnails = [{ 'url': urljoin(url, image_url), 'ext': 'jpg', }] metadata = asset.get('metadata') or {} return { 'id': video_id, 'title': title, 'description': asset_title.get('summaryLong') or asset_title.get('summaryShort'), 'thumbnails': thumbnails, 'duration': parse_duration(asset_title.get('runTime')), 'series': asset.get('tvSeriesTitle'), 'season': asset.get('tvSeasonTitle'), 'season_number': int_or_none(metadata.get('seasonNumber')), 'episode': asset_title.get('titleBrief'), 'episode_number': int_or_none(metadata.get('episodeNumber')), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ctv.py
youtube_dl/extractor/ctv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class CTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ctv\.ca/(?P<id>(?:show|movie)s/[^/]+/[^/?#&]+)' _TESTS = [{ 'url': 'https://www.ctv.ca/shows/your-morning/wednesday-december-23-2020-s5e88', 'info_dict': { 'id': '2102249', 'ext': 'flv', 'title': 'Wednesday, December 23, 2020', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'Your Morning delivers original perspectives and unique insights into the headlines of the day.', 'timestamp': 1608732000, 'upload_date': '20201223', 'series': 'Your Morning', 'season': '2020-2021', 'season_number': 5, 'episode_number': 88, 'tags': ['Your Morning'], 'categories': ['Talk Show'], 'duration': 7467.126, }, }, { 'url': 'https://www.ctv.ca/movies/adam-sandlers-eight-crazy-nights/adam-sandlers-eight-crazy-nights', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) content = self._download_json( 'https://www.ctv.ca/space-graphql/graphql', display_id, query={ 'query': '''{ resolvedPath(path: "/%s") { lastSegment { content { ... on AxisContent { axisId videoPlayerDestCode } } } } }''' % display_id, })['data']['resolvedPath']['lastSegment']['content'] video_id = content['axisId'] return self.url_result( '9c9media:%s:%s' % (content['videoPlayerDestCode'], video_id), 'NineCNineMedia', video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/giantbomb.py
youtube_dl/extractor/giantbomb.py
from __future__ import unicode_literals import re import json from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, qualities, unescapeHTML, ) class GiantBombIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?giantbomb\.com/(?:videos|shows)/(?P<display_id>[^/]+)/(?P<id>\d+-\d+)' _TESTS = [{ 'url': 'http://www.giantbomb.com/videos/quick-look-destiny-the-dark-below/2300-9782/', 'md5': '132f5a803e7e0ab0e274d84bda1e77ae', 'info_dict': { 'id': '2300-9782', 'display_id': 'quick-look-destiny-the-dark-below', 'ext': 'mp4', 'title': 'Quick Look: Destiny: The Dark Below', 'description': 'md5:0aa3aaf2772a41b91d44c63f30dfad24', 'duration': 2399, 'thumbnail': r're:^https?://.*\.jpg$', } }, { 'url': 'https://www.giantbomb.com/shows/ben-stranding/2970-20212', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') webpage = self._download_webpage(url, display_id) title = self._og_search_title(webpage) description = self._og_search_description(webpage) thumbnail = self._og_search_thumbnail(webpage) video = json.loads(unescapeHTML(self._search_regex( r'data-video="([^"]+)"', webpage, 'data-video'))) duration = int_or_none(video.get('lengthSeconds')) quality = qualities([ 'f4m_low', 'progressive_low', 'f4m_high', 'progressive_high', 'f4m_hd', 'progressive_hd']) formats = [] for format_id, video_url in video['videoStreams'].items(): if format_id == 'f4m_stream': continue ext = determine_ext(video_url) if ext == 'f4m': f4m_formats = self._extract_f4m_formats(video_url + '?hdcore=3.3.1', display_id) if f4m_formats: f4m_formats[0]['quality'] = quality(format_id) formats.extend(f4m_formats) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, display_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'url': video_url, 'format_id': format_id, 'quality': quality(format_id), }) if not formats: youtube_id = video.get('youtubeID') if youtube_id: return self.url_result(youtube_id, 'Youtube') self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/iprima.py
youtube_dl/extractor/iprima.py
# coding: utf-8 from __future__ import unicode_literals import re import time from .common import InfoExtractor from ..utils import ( determine_ext, js_to_json, ) class IPrimaIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+)\.iprima\.cz/(?:[^/]+/)*(?P<id>[^/?#&]+)' _GEO_BYPASS = False _TESTS = [{ 'url': 'https://prima.iprima.cz/particka/92-epizoda', 'info_dict': { 'id': 'p51388', 'ext': 'mp4', 'title': 'Partička (92)', 'description': 'md5:859d53beae4609e6dd7796413f1b6cac', }, 'params': { 'skip_download': True, # m3u8 download }, }, { 'url': 'https://cnn.iprima.cz/videa/70-epizoda', 'info_dict': { 'id': 'p681554', 'ext': 'mp4', 'title': 'HLAVNÍ ZPRÁVY 3.5.2020', }, 'params': { 'skip_download': True, # m3u8 download }, }, { 'url': 'http://play.iprima.cz/particka/particka-92', 'only_matching': True, }, { # geo restricted 'url': 'http://play.iprima.cz/closer-nove-pripady/closer-nove-pripady-iv-1', 'only_matching': True, }, { # iframe api.play-backend.iprima.cz 'url': 'https://prima.iprima.cz/my-little-pony/mapa-znameni-2-2', 'only_matching': True, }, { # iframe prima.iprima.cz 'url': 'https://prima.iprima.cz/porady/jak-se-stavi-sen/rodina-rathousova-praha', 'only_matching': True, }, { 'url': 'http://www.iprima.cz/filmy/desne-rande', 'only_matching': True, }, { 'url': 'https://zoom.iprima.cz/10-nejvetsich-tajemstvi-zahad/posvatna-mista-a-stavby', 'only_matching': True, }, { 'url': 'https://krimi.iprima.cz/mraz-0/sebevrazdy', 'only_matching': True, }, { 'url': 'https://cool.iprima.cz/derava-silnice-nevadi', 'only_matching': True, }, { 'url': 'https://love.iprima.cz/laska-az-za-hrob/slib-dany-bratrovi', 'only_matching': True, }, { 'url': 'https://autosalon.iprima.cz/motorsport/7-epizoda-1', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) self._set_cookie('play.iprima.cz', 'ott_adult_confirmed', '1') webpage = self._download_webpage(url, video_id) title = self._og_search_title( webpage, default=None) or self._search_regex( r'<h1>([^<]+)', webpage, 'title') video_id = self._search_regex( (r'<iframe[^>]+\bsrc=["\'](?:https?:)?//(?:api\.play-backend\.iprima\.cz/prehravac/embedded|prima\.iprima\.cz/[^/]+/[^/]+)\?.*?\bid=(p\d+)', r'data-product="([^"]+)">', r'id=["\']player-(p\d+)"', r'playerId\s*:\s*["\']player-(p\d+)', r'\bvideos\s*=\s*["\'](p\d+)'), webpage, 'real id') playerpage = self._download_webpage( 'http://play.iprima.cz/prehravac/init', video_id, note='Downloading player', query={ '_infuse': 1, '_ts': round(time.time()), 'productId': video_id, }, headers={'Referer': url}) formats = [] def extract_formats(format_url, format_key=None, lang=None): ext = determine_ext(format_url) new_formats = [] if format_key == 'hls' or ext == 'm3u8': new_formats = self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) elif format_key == 'dash' or ext == 'mpd': return new_formats = self._extract_mpd_formats( format_url, video_id, mpd_id='dash', fatal=False) if lang: for f in new_formats: if not f.get('language'): f['language'] = lang formats.extend(new_formats) options = self._parse_json( self._search_regex( r'(?s)(?:TDIPlayerOptions|playerOptions)\s*=\s*({.+?});\s*\]\]', playerpage, 'player options', default='{}'), video_id, transform_source=js_to_json, fatal=False) if options: for key, tracks in options.get('tracks', {}).items(): if not isinstance(tracks, list): continue for track in tracks: src = track.get('src') if src: extract_formats(src, key.lower(), track.get('lang')) if not formats: for _, src in re.findall(r'src["\']\s*:\s*(["\'])(.+?)\1', playerpage): extract_formats(src) if not formats and '>GEO_IP_NOT_ALLOWED<' in playerpage: self.raise_geo_restricted(countries=['CZ']) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'formats': formats, 'description': self._og_search_description(webpage, default=None), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/srmediathek.py
youtube_dl/extractor/srmediathek.py
# coding: utf-8 from __future__ import unicode_literals from .ard import ARDMediathekBaseIE from ..utils import ( ExtractorError, get_element_by_attribute, ) class SRMediathekIE(ARDMediathekBaseIE): IE_NAME = 'sr:mediathek' IE_DESC = 'Saarländischer Rundfunk' _VALID_URL = r'https?://sr-mediathek(?:\.sr-online)?\.de/index\.php\?.*?&id=(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=28455', 'info_dict': { 'id': '28455', 'ext': 'mp4', 'title': 'sportarena (26.10.2014)', 'description': 'Ringen: KSV Köllerbach gegen Aachen-Walheim; Frauen-Fußball: 1. FC Saarbrücken gegen Sindelfingen; Motorsport: Rallye in Losheim; dazu: Interview mit Timo Bernhard; Turnen: TG Saar; Reitsport: Deutscher Voltigier-Pokal; Badminton: Interview mit Michael Fuchs ', 'thumbnail': r're:^https?://.*\.jpg$', }, 'skip': 'no longer available', }, { 'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=37682', 'info_dict': { 'id': '37682', 'ext': 'mp4', 'title': 'Love, Cakes and Rock\'n\'Roll', 'description': 'md5:18bf9763631c7d326c22603681e1123d', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://sr-mediathek.de/index.php?seite=7&id=7480', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) if '>Der gew&uuml;nschte Beitrag ist leider nicht mehr verf&uuml;gbar.<' in webpage: raise ExtractorError('Video %s is no longer available' % video_id, expected=True) media_collection_url = self._search_regex( r'data-mediacollection-ardplayer="([^"]+)"', webpage, 'media collection url') info = self._extract_media_info(media_collection_url, webpage, video_id) info.update({ 'id': video_id, 'title': get_element_by_attribute('class', 'ardplayer-title', webpage), 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/gputechconf.py
youtube_dl/extractor/gputechconf.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class GPUTechConfIE(InfoExtractor): _VALID_URL = r'https?://on-demand\.gputechconf\.com/gtc/2015/video/S(?P<id>\d+)\.html' _TEST = { 'url': 'http://on-demand.gputechconf.com/gtc/2015/video/S5156.html', 'md5': 'a8862a00a0fd65b8b43acc5b8e33f798', 'info_dict': { 'id': '5156', 'ext': 'mp4', 'title': 'Coordinating More Than 3 Million CUDA Threads for Social Network Analysis', 'duration': 1219, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) root_path = self._search_regex( r'var\s+rootPath\s*=\s*"([^"]+)', webpage, 'root path', default='http://evt.dispeak.com/nvidia/events/gtc15/') xml_file_id = self._search_regex( r'var\s+xmlFileId\s*=\s*"([^"]+)', webpage, 'xml file id') return { '_type': 'url_transparent', 'id': video_id, 'url': '%sxml/%s.xml' % (root_path, xml_file_id), 'ie_key': 'DigitallySpeaking', }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/lecture2go.py
youtube_dl/extractor/lecture2go.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, determine_protocol, parse_duration, int_or_none, ) class Lecture2GoIE(InfoExtractor): _VALID_URL = r'https?://lecture2go\.uni-hamburg\.de/veranstaltungen/-/v/(?P<id>\d+)' _TEST = { 'url': 'https://lecture2go.uni-hamburg.de/veranstaltungen/-/v/17473', 'md5': 'ac02b570883020d208d405d5a3fd2f7f', 'info_dict': { 'id': '17473', 'ext': 'mp4', 'title': '2 - Endliche Automaten und reguläre Sprachen', 'creator': 'Frank Heitmann', 'duration': 5220, }, 'params': { # m3u8 download 'skip_download': True, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex(r'<em[^>]+class="title">(.+)</em>', webpage, 'title') formats = [] for url in set(re.findall(r'var\s+playerUri\d+\s*=\s*"([^"]+)"', webpage)): ext = determine_ext(url) protocol = determine_protocol({'url': url}) if ext == 'f4m': formats.extend(self._extract_f4m_formats(url, video_id, f4m_id='hds')) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats(url, video_id, ext='mp4', m3u8_id='hls')) else: if protocol == 'rtmp': continue # XXX: currently broken formats.append({ 'format_id': protocol, 'url': url, }) self._sort_formats(formats) creator = self._html_search_regex( r'<div[^>]+id="description">([^<]+)</div>', webpage, 'creator', fatal=False) duration = parse_duration(self._html_search_regex( r'Duration:\s*</em>\s*<em[^>]*>([^<]+)</em>', webpage, 'duration', fatal=False)) view_count = int_or_none(self._html_search_regex( r'Views:\s*</em>\s*<em[^>]+>(\d+)</em>', webpage, 'view count', fatal=False)) return { 'id': video_id, 'title': title, 'formats': formats, 'creator': creator, 'duration': duration, 'view_count': view_count, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/nexx.py
youtube_dl/extractor/nexx.py
# coding: utf-8 from __future__ import unicode_literals import hashlib import random import re import time from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, int_or_none, parse_duration, try_get, urlencode_postdata, ) class NexxIE(InfoExtractor): _VALID_URL = r'''(?x) (?: https?://api\.nexx(?:\.cloud|cdn\.com)/v3/(?P<domain_id>\d+)/videos/byid/| nexx:(?:(?P<domain_id_s>\d+):)?| https?://arc\.nexx\.cloud/api/video/ ) (?P<id>\d+) ''' _TESTS = [{ # movie 'url': 'https://api.nexx.cloud/v3/748/videos/byid/128907', 'md5': '31899fd683de49ad46f4ee67e53e83fe', 'info_dict': { 'id': '128907', 'ext': 'mp4', 'title': 'Stiftung Warentest', 'alt_title': 'Wie ein Test abläuft', 'description': 'md5:d1ddb1ef63de721132abd38639cc2fd2', 'creator': 'SPIEGEL TV', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2509, 'timestamp': 1384264416, 'upload_date': '20131112', }, }, { # episode 'url': 'https://api.nexx.cloud/v3/741/videos/byid/247858', 'info_dict': { 'id': '247858', 'ext': 'mp4', 'title': 'Return of the Golden Child (OV)', 'description': 'md5:5d969537509a92b733de21bae249dc63', 'release_year': 2017, 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1397, 'timestamp': 1495033267, 'upload_date': '20170517', 'episode_number': 2, 'season_number': 2, }, 'params': { 'skip_download': True, }, 'skip': 'HTTP Error 404: Not Found', }, { # does not work via arc 'url': 'nexx:741:1269984', 'md5': 'c714b5b238b2958dc8d5642addba6886', 'info_dict': { 'id': '1269984', 'ext': 'mp4', 'title': '1 TAG ohne KLO... wortwörtlich! 😑', 'alt_title': '1 TAG ohne KLO... wortwörtlich! 😑', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 607, 'timestamp': 1518614955, 'upload_date': '20180214', }, }, { # free cdn from http://www.spiegel.de/video/eifel-zoo-aufregung-um-ausgebrochene-raubtiere-video-99018031.html 'url': 'nexx:747:1533779', 'md5': '6bf6883912b82b7069fb86c2297e9893', 'info_dict': { 'id': '1533779', 'ext': 'mp4', 'title': 'Aufregung um ausgebrochene Raubtiere', 'alt_title': 'Eifel-Zoo', 'description': 'md5:f21375c91c74ad741dcb164c427999d2', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 111, 'timestamp': 1527874460, 'upload_date': '20180601', }, }, { 'url': 'https://api.nexxcdn.com/v3/748/videos/byid/128907', 'only_matching': True, }, { 'url': 'nexx:748:128907', 'only_matching': True, }, { 'url': 'nexx:128907', 'only_matching': True, }, { 'url': 'https://arc.nexx.cloud/api/video/128907.json', 'only_matching': True, }] @staticmethod def _extract_domain_id(webpage): mobj = re.search( r'<script\b[^>]+\bsrc=["\'](?:https?:)?//(?:require|arc)\.nexx(?:\.cloud|cdn\.com)/(?:sdk/)?(?P<id>\d+)', webpage) return mobj.group('id') if mobj else None @staticmethod def _extract_urls(webpage): # Reference: # 1. https://nx-s.akamaized.net/files/201510/44.pdf entries = [] # JavaScript Integration domain_id = NexxIE._extract_domain_id(webpage) if domain_id: for video_id in re.findall( r'(?is)onPLAYReady.+?_play\.(?:init|(?:control\.)?addPlayer)\s*\(.+?\s*,\s*["\']?(\d+)', webpage): entries.append( 'https://api.nexx.cloud/v3/%s/videos/byid/%s' % (domain_id, video_id)) # TODO: support more embed formats return entries @staticmethod def _extract_url(webpage): return NexxIE._extract_urls(webpage)[0] def _handle_error(self, response): status = int_or_none(try_get( response, lambda x: x['metadata']['status']) or 200) if 200 <= status < 300: return raise ExtractorError( '%s said: %s' % (self.IE_NAME, response['metadata']['errorhint']), expected=True) def _call_api(self, domain_id, path, video_id, data=None, headers={}): headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' result = self._download_json( 'https://api.nexx.cloud/v3/%s/%s' % (domain_id, path), video_id, 'Downloading %s JSON' % path, data=urlencode_postdata(data), headers=headers) self._handle_error(result) return result['result'] def _extract_free_formats(self, video, video_id): stream_data = video['streamdata'] cdn = stream_data['cdnType'] assert cdn == 'free' hash = video['general']['hash'] ps = compat_str(stream_data['originalDomain']) if stream_data['applyFolderHierarchy'] == 1: s = ('%04d' % int(video_id))[::-1] ps += '/%s/%s' % (s[0:2], s[2:4]) ps += '/%s/%s_' % (video_id, hash) t = 'http://%s' + ps fd = stream_data['azureFileDistribution'].split(',') cdn_provider = stream_data['cdnProvider'] def p0(p): return '_%s' % p if stream_data['applyAzureStructure'] == 1 else '' formats = [] if cdn_provider == 'ak': t += ',' for i in fd: p = i.split(':') t += p[1] + p0(int(p[0])) + ',' t += '.mp4.csmil/master.%s' elif cdn_provider == 'ce': k = t.split('/') h = k.pop() http_base = t = '/'.join(k) http_base = http_base % stream_data['cdnPathHTTP'] t += '/asset.ism/manifest.%s?dcp_ver=aos4&videostream=' for i in fd: p = i.split(':') tbr = int(p[0]) filename = '%s%s%s.mp4' % (h, p[1], p0(tbr)) f = { 'url': http_base + '/' + filename, 'format_id': '%s-http-%d' % (cdn, tbr), 'tbr': tbr, } width_height = p[1].split('x') if len(width_height) == 2: f.update({ 'width': int_or_none(width_height[0]), 'height': int_or_none(width_height[1]), }) formats.append(f) a = filename + ':%s' % (tbr * 1000) t += a + ',' t = t[:-1] + '&audiostream=' + a.split(':')[0] else: assert False if cdn_provider == 'ce': formats.extend(self._extract_mpd_formats( t % (stream_data['cdnPathDASH'], 'mpd'), video_id, mpd_id='%s-dash' % cdn, fatal=False)) formats.extend(self._extract_m3u8_formats( t % (stream_data['cdnPathHLS'], 'm3u8'), video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='%s-hls' % cdn, fatal=False)) return formats def _extract_azure_formats(self, video, video_id): stream_data = video['streamdata'] cdn = stream_data['cdnType'] assert cdn == 'azure' azure_locator = stream_data['azureLocator'] def get_cdn_shield_base(shield_type='', static=False): for secure in ('', 's'): cdn_shield = stream_data.get('cdnShield%sHTTP%s' % (shield_type, secure.upper())) if cdn_shield: return 'http%s://%s' % (secure, cdn_shield) else: if 'fb' in stream_data['azureAccount']: prefix = 'df' if static else 'f' else: prefix = 'd' if static else 'p' account = int(stream_data['azureAccount'].replace('nexxplayplus', '').replace('nexxplayfb', '')) return 'http://nx-%s%02d.akamaized.net/' % (prefix, account) language = video['general'].get('language_raw') or '' azure_stream_base = get_cdn_shield_base() is_ml = ',' in language azure_manifest_url = '%s%s/%s_src%s.ism/Manifest' % ( azure_stream_base, azure_locator, video_id, ('_manifest' if is_ml else '')) + '%s' protection_token = try_get( video, lambda x: x['protectiondata']['token'], compat_str) if protection_token: azure_manifest_url += '?hdnts=%s' % protection_token formats = self._extract_m3u8_formats( azure_manifest_url % '(format=m3u8-aapl)', video_id, 'mp4', 'm3u8_native', m3u8_id='%s-hls' % cdn, fatal=False) formats.extend(self._extract_mpd_formats( azure_manifest_url % '(format=mpd-time-csf)', video_id, mpd_id='%s-dash' % cdn, fatal=False)) formats.extend(self._extract_ism_formats( azure_manifest_url % '', video_id, ism_id='%s-mss' % cdn, fatal=False)) azure_progressive_base = get_cdn_shield_base('Prog', True) azure_file_distribution = stream_data.get('azureFileDistribution') if azure_file_distribution: fds = azure_file_distribution.split(',') if fds: for fd in fds: ss = fd.split(':') if len(ss) == 2: tbr = int_or_none(ss[0]) if tbr: f = { 'url': '%s%s/%s_src_%s_%d.mp4' % ( azure_progressive_base, azure_locator, video_id, ss[1], tbr), 'format_id': '%s-http-%d' % (cdn, tbr), 'tbr': tbr, } width_height = ss[1].split('x') if len(width_height) == 2: f.update({ 'width': int_or_none(width_height[0]), 'height': int_or_none(width_height[1]), }) formats.append(f) return formats def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) domain_id = mobj.group('domain_id') or mobj.group('domain_id_s') video_id = mobj.group('id') video = None def find_video(result): if isinstance(result, dict): return result elif isinstance(result, list): vid = int(video_id) for v in result: if try_get(v, lambda x: x['general']['ID'], int) == vid: return v return None response = self._download_json( 'https://arc.nexx.cloud/api/video/%s.json' % video_id, video_id, fatal=False) if response and isinstance(response, dict): result = response.get('result') if result: video = find_video(result) # not all videos work via arc, e.g. nexx:741:1269984 if not video: # Reverse engineered from JS code (see getDeviceID function) device_id = '%d:%d:%d%d' % ( random.randint(1, 4), int(time.time()), random.randint(1e4, 99999), random.randint(1, 9)) result = self._call_api(domain_id, 'session/init', video_id, data={ 'nxp_devh': device_id, 'nxp_userh': '', 'precid': '0', 'playlicense': '0', 'screenx': '1920', 'screeny': '1080', 'playerversion': '6.0.00', 'gateway': 'html5', 'adGateway': '', 'explicitlanguage': 'en-US', 'addTextTemplates': '1', 'addDomainData': '1', 'addAdModel': '1', }, headers={ 'X-Request-Enable-Auth-Fallback': '1', }) cid = result['general']['cid'] # As described in [1] X-Request-Token generation algorithm is # as follows: # md5( operation + domain_id + domain_secret ) # where domain_secret is a static value that will be given by nexx.tv # as per [1]. Here is how this "secret" is generated (reversed # from _play.api.init function, search for clienttoken). So it's # actually not static and not that much of a secret. # 1. https://nexxtvstorage.blob.core.windows.net/files/201610/27.pdf secret = result['device']['clienttoken'][int(device_id[0]):] secret = secret[0:len(secret) - int(device_id[-1])] op = 'byid' # Reversed from JS code for _play.api.call function (search for # X-Request-Token) request_token = hashlib.md5( ''.join((op, domain_id, secret)).encode('utf-8')).hexdigest() result = self._call_api( domain_id, 'videos/%s/%s' % (op, video_id), video_id, data={ 'additionalfields': 'language,channel,actors,studio,licenseby,slug,subtitle,teaser,description', 'addInteractionOptions': '1', 'addStatusDetails': '1', 'addStreamDetails': '1', 'addCaptions': '1', 'addScenes': '1', 'addHotSpots': '1', 'addBumpers': '1', 'captionFormat': 'data', }, headers={ 'X-Request-CID': cid, 'X-Request-Token': request_token, }) video = find_video(result) general = video['general'] title = general['title'] cdn = video['streamdata']['cdnType'] if cdn == 'azure': formats = self._extract_azure_formats(video, video_id) elif cdn == 'free': formats = self._extract_free_formats(video, video_id) else: # TODO: reverse more cdns assert False self._sort_formats(formats) return { 'id': video_id, 'title': title, 'alt_title': general.get('subtitle'), 'description': general.get('description'), 'release_year': int_or_none(general.get('year')), 'creator': general.get('studio') or general.get('studio_adref'), 'thumbnail': try_get( video, lambda x: x['imagedata']['thumb'], compat_str), 'duration': parse_duration(general.get('runtime')), 'timestamp': int_or_none(general.get('uploaded')), 'episode_number': int_or_none(try_get( video, lambda x: x['episodedata']['episode'])), 'season_number': int_or_none(try_get( video, lambda x: x['episodedata']['season'])), 'formats': formats, } class NexxEmbedIE(InfoExtractor): _VALID_URL = r'https?://embed\.nexx(?:\.cloud|cdn\.com)/\d+/(?:video/)?(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://embed.nexx.cloud/748/KC1614647Z27Y7T?autoplay=1', 'md5': '16746bfc28c42049492385c989b26c4a', 'info_dict': { 'id': '161464', 'ext': 'mp4', 'title': 'Nervenkitzel Achterbahn', 'alt_title': 'Karussellbauer in Deutschland', 'description': 'md5:ffe7b1cc59a01f585e0569949aef73cc', 'creator': 'SPIEGEL TV', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2761, 'timestamp': 1394021479, 'upload_date': '20140305', }, 'params': { 'format': 'bestvideo', 'skip_download': True, }, }, { 'url': 'https://embed.nexx.cloud/11888/video/DSRTO7UVOX06S7', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): # Reference: # 1. https://nx-s.akamaized.net/files/201510/44.pdf # iFrame Embed Integration return [mobj.group('url') for mobj in re.finditer( r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//embed\.nexx(?:\.cloud|cdn\.com)/\d+/(?:(?!\1).)+)\1', webpage)] def _real_extract(self, url): embed_id = self._match_id(url) webpage = self._download_webpage(url, embed_id) return self.url_result(NexxIE._extract_url(webpage), ie=NexxIE.ie_key())
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/kusi.py
youtube_dl/extractor/kusi.py
# coding: utf-8 from __future__ import unicode_literals import random import re from .common import InfoExtractor from ..compat import compat_urllib_parse_unquote_plus from ..utils import ( int_or_none, float_or_none, timeconvert, update_url_query, xpath_text, ) class KUSIIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?kusi\.com/(?P<path>story/.+|video\?clipId=(?P<clipId>\d+))' _TESTS = [{ 'url': 'http://www.kusi.com/story/32849881/turko-files-refused-to-help-it-aint-right', 'md5': '4e76ce8e53660ce9697d06c0ba6fc47d', 'info_dict': { 'id': '12689020', 'ext': 'mp4', 'title': "Turko Files: Refused to Help, It Ain't Right!", 'duration': 223.586, 'upload_date': '20160826', 'timestamp': 1472233118, 'thumbnail': r're:^https?://.*\.jpg$' }, }, { 'url': 'http://kusi.com/video?clipId=12203019', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) clip_id = mobj.group('clipId') video_id = clip_id or mobj.group('path') webpage = self._download_webpage(url, video_id) if clip_id is None: video_id = clip_id = self._html_search_regex( r'"clipId"\s*,\s*"(\d+)"', webpage, 'clip id') affiliate_id = self._search_regex( r'affiliateId\s*:\s*\'([^\']+)\'', webpage, 'affiliate id') # See __Packages/worldnow/model/GalleryModel.as of WNGallery.swf xml_url = update_url_query('http://www.kusi.com/build.asp', { 'buildtype': 'buildfeaturexmlrequest', 'featureType': 'Clip', 'featureid': clip_id, 'affiliateno': affiliate_id, 'clientgroupid': '1', 'rnd': int(round(random.random() * 1000000)), }) doc = self._download_xml(xml_url, video_id) video_title = xpath_text(doc, 'HEADLINE', fatal=True) duration = float_or_none(xpath_text(doc, 'DURATION'), scale=1000) description = xpath_text(doc, 'ABSTRACT') thumbnail = xpath_text(doc, './THUMBNAILIMAGE/FILENAME') creation_time = timeconvert(xpath_text(doc, 'rfc822creationdate')) quality_options = doc.find('{http://search.yahoo.com/mrss/}group').findall('{http://search.yahoo.com/mrss/}content') formats = [] for quality in quality_options: formats.append({ 'url': compat_urllib_parse_unquote_plus(quality.attrib['url']), 'height': int_or_none(quality.attrib.get('height')), 'width': int_or_none(quality.attrib.get('width')), 'vbr': float_or_none(quality.attrib.get('bitratebits'), scale=1000), }) self._sort_formats(formats) return { 'id': video_id, 'title': video_title, 'description': description, 'duration': duration, 'formats': formats, 'thumbnail': thumbnail, 'timestamp': creation_time, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/xxxymovies.py
youtube_dl/extractor/xxxymovies.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( parse_duration, int_or_none, ) class XXXYMoviesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?xxxymovies\.com/videos/(?P<id>\d+)/(?P<display_id>[^/]+)' _TEST = { 'url': 'http://xxxymovies.com/videos/138669/ecstatic-orgasm-sofcore/', 'md5': '810b1bdbbffff89dd13bdb369fe7be4b', 'info_dict': { 'id': '138669', 'display_id': 'ecstatic-orgasm-sofcore', 'ext': 'mp4', 'title': 'Ecstatic Orgasm Sofcore', 'duration': 931, 'categories': list, 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') webpage = self._download_webpage(url, display_id) video_url = self._search_regex( r"video_url\s*:\s*'([^']+)'", webpage, 'video URL') title = self._html_search_regex( [r'<div[^>]+\bclass="block_header"[^>]*>\s*<h1>([^<]+)<', r'<title>(.*?)\s*-\s*(?:XXXYMovies\.com|XXX\s+Movies)</title>'], webpage, 'title') thumbnail = self._search_regex( r"preview_url\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False) categories = self._html_search_meta( 'keywords', webpage, 'categories', default='').split(',') duration = parse_duration(self._search_regex( r'<span>Duration:</span>\s*(\d+:\d+)', webpage, 'duration', fatal=False)) view_count = int_or_none(self._html_search_regex( r'<div class="video_views">\s*(\d+)', webpage, 'view count', fatal=False)) like_count = int_or_none(self._search_regex( r'>\s*Likes? <b>\((\d+)\)', webpage, 'like count', fatal=False)) dislike_count = int_or_none(self._search_regex( r'>\s*Dislike <b>\((\d+)\)</b>', webpage, 'dislike count', fatal=False)) age_limit = self._rta_search(webpage) return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'title': title, 'thumbnail': thumbnail, 'categories': categories, 'duration': duration, 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, 'age_limit': age_limit, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/livestream.py
youtube_dl/extractor/livestream.py
from __future__ import unicode_literals import re import itertools from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( find_xpath_attr, xpath_attr, xpath_with_ns, xpath_text, orderedSet, update_url_query, int_or_none, float_or_none, parse_iso8601, determine_ext, ) class LivestreamIE(InfoExtractor): IE_NAME = 'livestream' _VALID_URL = r'https?://(?:new\.)?livestream\.com/(?:accounts/(?P<account_id>\d+)|(?P<account_name>[^/]+))/(?:events/(?P<event_id>\d+)|(?P<event_name>[^/]+))(?:/videos/(?P<id>\d+))?' _TESTS = [{ 'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370', 'md5': '53274c76ba7754fb0e8d072716f2292b', 'info_dict': { 'id': '4719370', 'ext': 'mp4', 'title': 'Live from Webster Hall NYC', 'timestamp': 1350008072, 'upload_date': '20121012', 'duration': 5968.0, 'like_count': int, 'view_count': int, 'thumbnail': r're:^http://.*\.jpg$' } }, { 'url': 'http://new.livestream.com/tedx/cityenglish', 'info_dict': { 'title': 'TEDCity2.0 (English)', 'id': '2245590', }, 'playlist_mincount': 4, }, { 'url': 'http://new.livestream.com/chess24/tatasteelchess', 'info_dict': { 'title': 'Tata Steel Chess', 'id': '3705884', }, 'playlist_mincount': 60, }, { 'url': 'https://new.livestream.com/accounts/362/events/3557232/videos/67864563/player?autoPlay=false&height=360&mute=false&width=640', 'only_matching': True, }, { 'url': 'http://livestream.com/bsww/concacafbeachsoccercampeonato2015', 'only_matching': True, }] _API_URL_TEMPLATE = 'http://livestream.com/api/accounts/%s/events/%s' def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None): base_ele = find_xpath_attr( smil, self._xpath_ns('.//meta', namespace), 'name', 'httpBase') base = base_ele.get('content') if base_ele is not None else 'http://livestreamvod-f.akamaihd.net/' formats = [] video_nodes = smil.findall(self._xpath_ns('.//video', namespace)) for vn in video_nodes: tbr = int_or_none(vn.attrib.get('system-bitrate'), 1000) furl = ( update_url_query(compat_urlparse.urljoin(base, vn.attrib['src']), { 'v': '3.0.3', 'fp': 'WIN% 14,0,0,145', })) if 'clipBegin' in vn.attrib: furl += '&ssek=' + vn.attrib['clipBegin'] formats.append({ 'url': furl, 'format_id': 'smil_%d' % tbr, 'ext': 'flv', 'tbr': tbr, 'preference': -1000, }) return formats def _extract_video_info(self, video_data): video_id = compat_str(video_data['id']) FORMAT_KEYS = ( ('sd', 'progressive_url'), ('hd', 'progressive_url_hd'), ) formats = [] for format_id, key in FORMAT_KEYS: video_url = video_data.get(key) if video_url: ext = determine_ext(video_url) if ext == 'm3u8': continue bitrate = int_or_none(self._search_regex( r'(\d+)\.%s' % ext, video_url, 'bitrate', default=None)) formats.append({ 'url': video_url, 'format_id': format_id, 'tbr': bitrate, 'ext': ext, }) smil_url = video_data.get('smil_url') if smil_url: formats.extend(self._extract_smil_formats(smil_url, video_id, fatal=False)) m3u8_url = video_data.get('m3u8_url') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) f4m_url = video_data.get('f4m_url') if f4m_url: formats.extend(self._extract_f4m_formats( f4m_url, video_id, f4m_id='hds', fatal=False)) self._sort_formats(formats) comments = [{ 'author_id': comment.get('author_id'), 'author': comment.get('author', {}).get('full_name'), 'id': comment.get('id'), 'text': comment['text'], 'timestamp': parse_iso8601(comment.get('created_at')), } for comment in video_data.get('comments', {}).get('data', [])] return { 'id': video_id, 'formats': formats, 'title': video_data['caption'], 'description': video_data.get('description'), 'thumbnail': video_data.get('thumbnail_url'), 'duration': float_or_none(video_data.get('duration'), 1000), 'timestamp': parse_iso8601(video_data.get('publish_at')), 'like_count': video_data.get('likes', {}).get('total'), 'comment_count': video_data.get('comments', {}).get('total'), 'view_count': video_data.get('views'), 'comments': comments, } def _extract_stream_info(self, stream_info): broadcast_id = compat_str(stream_info['broadcast_id']) is_live = stream_info.get('is_live') formats = [] smil_url = stream_info.get('play_url') if smil_url: formats.extend(self._extract_smil_formats(smil_url, broadcast_id)) m3u8_url = stream_info.get('m3u8_url') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, broadcast_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) rtsp_url = stream_info.get('rtsp_url') if rtsp_url: formats.append({ 'url': rtsp_url, 'format_id': 'rtsp', }) self._sort_formats(formats) return { 'id': broadcast_id, 'formats': formats, 'title': self._live_title(stream_info['stream_title']) if is_live else stream_info['stream_title'], 'thumbnail': stream_info.get('thumbnail_url'), 'is_live': is_live, } def _extract_event(self, event_data): event_id = compat_str(event_data['id']) account_id = compat_str(event_data['owner_account_id']) feed_root_url = self._API_URL_TEMPLATE % (account_id, event_id) + '/feed.json' stream_info = event_data.get('stream_info') if stream_info: return self._extract_stream_info(stream_info) last_video = None entries = [] for i in itertools.count(1): if last_video is None: info_url = feed_root_url else: info_url = '{root}?&id={id}&newer=-1&type=video'.format( root=feed_root_url, id=last_video) videos_info = self._download_json( info_url, event_id, 'Downloading page {0}'.format(i))['data'] videos_info = [v['data'] for v in videos_info if v['type'] == 'video'] if not videos_info: break for v in videos_info: v_id = compat_str(v['id']) entries.append(self.url_result( 'http://livestream.com/accounts/%s/events/%s/videos/%s' % (account_id, event_id, v_id), 'Livestream', v_id, v.get('caption'))) last_video = videos_info[-1]['id'] return self.playlist_result(entries, event_id, event_data['full_name']) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') event = mobj.group('event_id') or mobj.group('event_name') account = mobj.group('account_id') or mobj.group('account_name') api_url = self._API_URL_TEMPLATE % (account, event) if video_id: video_data = self._download_json( api_url + '/videos/%s' % video_id, video_id) return self._extract_video_info(video_data) else: event_data = self._download_json(api_url, video_id) return self._extract_event(event_data) # The original version of Livestream uses a different system class LivestreamOriginalIE(InfoExtractor): IE_NAME = 'livestream:original' _VALID_URL = r'''(?x)https?://original\.livestream\.com/ (?P<user>[^/\?#]+)(?:/(?P<type>video|folder) (?:(?:\?.*?Id=|/)(?P<id>.*?)(&|$))?)? ''' _TESTS = [{ 'url': 'http://original.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb', 'info_dict': { 'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb', 'ext': 'mp4', 'title': 'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital', 'duration': 771.301, 'view_count': int, }, }, { 'url': 'https://original.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3', 'info_dict': { 'id': 'a07bf706-d0e4-4e75-a747-b021d84f2fd3', }, 'playlist_mincount': 4, }, { # live stream 'url': 'http://original.livestream.com/znsbahamas', 'only_matching': True, }] def _extract_video_info(self, user, video_id): api_url = 'http://x%sx.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id=%s' % (user, video_id) info = self._download_xml(api_url, video_id) item = info.find('channel').find('item') title = xpath_text(item, 'title') media_ns = {'media': 'http://search.yahoo.com/mrss'} thumbnail_url = xpath_attr( item, xpath_with_ns('media:thumbnail', media_ns), 'url') duration = float_or_none(xpath_attr( item, xpath_with_ns('media:content', media_ns), 'duration')) ls_ns = {'ls': 'http://api.channel.livestream.com/2.0'} view_count = int_or_none(xpath_text( item, xpath_with_ns('ls:viewsCount', ls_ns))) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail_url, 'duration': duration, 'view_count': view_count, } def _extract_video_formats(self, video_data, video_id): formats = [] progressive_url = video_data.get('progressiveUrl') if progressive_url: formats.append({ 'url': progressive_url, 'format_id': 'http', }) m3u8_url = video_data.get('httpUrl') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) rtsp_url = video_data.get('rtspUrl') if rtsp_url: formats.append({ 'url': rtsp_url, 'format_id': 'rtsp', }) self._sort_formats(formats) return formats def _extract_folder(self, url, folder_id): webpage = self._download_webpage(url, folder_id) paths = orderedSet(re.findall( r'''(?x)(?: <li\s+class="folder">\s*<a\s+href="| <a\s+href="(?=https?://livestre\.am/) )([^"]+)"''', webpage)) entries = [{ '_type': 'url', 'url': compat_urlparse.urljoin(url, p), } for p in paths] return self.playlist_result(entries, folder_id) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) user = mobj.group('user') url_type = mobj.group('type') content_id = mobj.group('id') if url_type == 'folder': return self._extract_folder(url, content_id) else: # this url is used on mobile devices stream_url = 'http://x%sx.api.channel.livestream.com/3.0/getstream.json' % user info = {} if content_id: stream_url += '?id=%s' % content_id info = self._extract_video_info(user, content_id) else: content_id = user webpage = self._download_webpage(url, content_id) info = { 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), 'thumbnail': self._search_regex(r'channelLogo\.src\s*=\s*"([^"]+)"', webpage, 'thumbnail', None), } video_data = self._download_json(stream_url, content_id) is_live = video_data.get('isLive') info.update({ 'id': content_id, 'title': self._live_title(info['title']) if is_live else info['title'], 'formats': self._extract_video_formats(video_data, content_id), 'is_live': is_live, }) return info # The server doesn't support HEAD request, the generic extractor can't detect # the redirection class LivestreamShortenerIE(InfoExtractor): IE_NAME = 'livestream:shortener' IE_DESC = False # Do not list _VALID_URL = r'https?://livestre\.am/(?P<id>.+)' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) id = mobj.group('id') webpage = self._download_webpage(url, id) return self.url_result(self._og_search_url(webpage))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/streamsb.py
youtube_dl/extractor/streamsb.py
# coding: utf-8 from __future__ import unicode_literals import binascii import random import re import string from .common import InfoExtractor from ..utils import urljoin, url_basename def to_ascii_hex(str1): return binascii.hexlify(str1.encode('utf-8')).decode('ascii') def generate_random_string(length): return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length)) class StreamsbIE(InfoExtractor): _DOMAINS = ('viewsb.com', ) _VALID_URL = r'https://(?P<domain>%s)/(?P<id>.+)' % '|'.join(_DOMAINS) _TEST = { 'url': 'https://viewsb.com/dxfvlu4qanjx', 'md5': '488d111a63415369bf90ea83adc8a325', 'info_dict': { 'id': 'dxfvlu4qanjx', 'ext': 'mp4', 'title': 'Sintel' } } def _real_extract(self, url): domain, video_id = re.match(self._VALID_URL, url).group('domain', 'id') webpage = self._download_webpage(url, video_id) iframe_rel_url = self._search_regex(r'''(?i)<iframe\b[^>]+\bsrc\s*=\s*('|")(?P<path>/.*\.html)\1''', webpage, 'iframe', group='path') iframe_url = urljoin('https://' + domain, iframe_rel_url) iframe_data = self._download_webpage(iframe_url, video_id) app_version = self._search_regex(r'''<script\b[^>]+\bsrc\s*=\s*["|'].*/app\.min\.(\d+)\.js''', iframe_data, 'app version', fatal=False) or '50' video_code = url_basename(iframe_url).rsplit('.')[0] length = 12 req = '||'.join((generate_random_string(length), video_code, generate_random_string(length), 'streamsb')) ereq = 'https://{0}/sources{1}/{2}'.format(domain, app_version, to_ascii_hex(req)) video_data = self._download_webpage(ereq, video_id, headers={ 'Referer': iframe_url, 'watchsb': 'sbstream', }) player_data = self._parse_json(video_data, video_id) title = player_data['stream_data']['title'] formats = self._extract_m3u8_formats(player_data['stream_data']['file'], video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) return { 'id': video_id, 'formats': formats, 'title': title, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vk.py
youtube_dl/extractor/vk.py
# coding: utf-8 from __future__ import unicode_literals import collections import functools import re from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( clean_html, ExtractorError, get_element_by_class, int_or_none, OnDemandPagedList, orderedSet, str_or_none, str_to_int, unescapeHTML, unified_timestamp, url_or_none, urlencode_postdata, ) from .dailymotion import DailymotionIE from .odnoklassniki import OdnoklassnikiIE from .pladform import PladformIE from .vimeo import VimeoIE from .youtube import YoutubeIE class VKBaseIE(InfoExtractor): _NETRC_MACHINE = 'vk' def _login(self): username, password = self._get_login_info() if username is None: return login_page, url_handle = self._download_webpage_handle( 'https://vk.com', None, 'Downloading login page') login_form = self._hidden_inputs(login_page) login_form.update({ 'email': username.encode('cp1251'), 'pass': password.encode('cp1251'), }) # vk serves two same remixlhk cookies in Set-Cookie header and expects # first one to be actually set self._apply_first_set_cookie_header(url_handle, 'remixlhk') login_page = self._download_webpage( 'https://login.vk.com/?act=login', None, note='Logging in', data=urlencode_postdata(login_form)) if re.search(r'onLoginFailed', login_page): raise ExtractorError( 'Unable to login, incorrect username and/or password', expected=True) def _real_initialize(self): self._login() def _download_payload(self, path, video_id, data, fatal=True): data['al'] = 1 code, payload = self._download_json( 'https://vk.com/%s.php' % path, video_id, data=urlencode_postdata(data), fatal=fatal, headers={'X-Requested-With': 'XMLHttpRequest'})['payload'] if code == '3': self.raise_login_required() elif code == '8': raise ExtractorError(clean_html(payload[0][1:-1]), expected=True) return payload class VKIE(VKBaseIE): IE_NAME = 'vk' IE_DESC = 'VK' _VALID_URL = r'''(?x) https?:// (?: (?: (?:(?:m|new)\.)?vk\.com/video_| (?:www\.)?daxab.com/ ) ext\.php\?(?P<embed_query>.*?\boid=(?P<oid>-?\d+).*?\bid=(?P<id>\d+).*)| (?: (?:(?:m|new)\.)?vk\.com/(?:.+?\?.*?z=)?video| (?:www\.)?daxab.com/embed/ ) (?P<videoid>-?\d+_\d+)(?:.*\blist=(?P<list_id>[\da-f]+))? ) ''' _TESTS = [ { 'url': 'http://vk.com/videos-77521?z=video-77521_162222515%2Fclub77521', 'md5': '7babad3b85ea2e91948005b1b8b0cb84', 'info_dict': { 'id': '-77521_162222515', 'ext': 'mp4', 'title': 'ProtivoGunz - Хуёвая песня', 'uploader': 're:(?:Noize MC|Alexander Ilyashenko).*', 'uploader_id': '-77521', 'duration': 195, 'timestamp': 1329049880, 'upload_date': '20120212', }, }, { 'url': 'http://vk.com/video205387401_165548505', 'info_dict': { 'id': '205387401_165548505', 'ext': 'mp4', 'title': 'No name', 'uploader': 'Tom Cruise', 'uploader_id': '205387401', 'duration': 9, 'timestamp': 1374364108, 'upload_date': '20130720', } }, { 'note': 'Embedded video', 'url': 'https://vk.com/video_ext.php?oid=-77521&id=162222515&hash=87b046504ccd8bfa', 'md5': '7babad3b85ea2e91948005b1b8b0cb84', 'info_dict': { 'id': '-77521_162222515', 'ext': 'mp4', 'uploader': 're:(?:Noize MC|Alexander Ilyashenko).*', 'title': 'ProtivoGunz - Хуёвая песня', 'duration': 195, 'upload_date': '20120212', 'timestamp': 1329049880, 'uploader_id': '-77521', }, }, { # VIDEO NOW REMOVED # please update if you find a video whose URL follows the same pattern 'url': 'http://vk.com/video-8871596_164049491', 'md5': 'a590bcaf3d543576c9bd162812387666', 'note': 'Only available for registered users', 'info_dict': { 'id': '-8871596_164049491', 'ext': 'mp4', 'uploader': 'Триллеры', 'title': '► Бойцовский клуб / Fight Club 1999 [HD 720]', 'duration': 8352, 'upload_date': '20121218', 'view_count': int, }, 'skip': 'Removed', }, { 'url': 'http://vk.com/hd_kino_mania?z=video-43215063_168067957%2F15c66b9b533119788d', 'info_dict': { 'id': '-43215063_168067957', 'ext': 'mp4', 'uploader': 'Bro Mazter', 'title': ' ', 'duration': 7291, 'upload_date': '20140328', 'uploader_id': '223413403', 'timestamp': 1396018030, }, 'skip': 'Requires vk account credentials', }, { 'url': 'http://m.vk.com/video-43215063_169084319?list=125c627d1aa1cebb83&from=wall-43215063_2566540', 'md5': '0c45586baa71b7cb1d0784ee3f4e00a6', 'note': 'ivi.ru embed', 'info_dict': { 'id': '-43215063_169084319', 'ext': 'mp4', 'title': 'Книга Илая', 'duration': 6771, 'upload_date': '20140626', 'view_count': int, }, 'skip': 'Removed', }, { # video (removed?) only available with list id 'url': 'https://vk.com/video30481095_171201961?list=8764ae2d21f14088d4', 'md5': '091287af5402239a1051c37ec7b92913', 'info_dict': { 'id': '30481095_171201961', 'ext': 'mp4', 'title': 'ТюменцевВВ_09.07.2015', 'uploader': 'Anton Ivanov', 'duration': 109, 'upload_date': '20150709', 'view_count': int, }, 'skip': 'Removed', }, { # youtube embed 'url': 'https://vk.com/video276849682_170681728', 'info_dict': { 'id': 'V3K4mi0SYkc', 'ext': 'mp4', 'title': "DSWD Awards 'Children's Joy Foundation, Inc.' Certificate of Registration and License to Operate", 'description': 'md5:bf9c26cfa4acdfb146362682edd3827a', 'duration': 178, 'upload_date': '20130116', 'uploader': "Children's Joy Foundation Inc.", 'uploader_id': 'thecjf', 'view_count': int, }, }, { # dailymotion embed 'url': 'https://vk.com/video-37468416_456239855', 'info_dict': { 'id': 'k3lz2cmXyRuJQSjGHUv', 'ext': 'mp4', 'title': 'md5:d52606645c20b0ddbb21655adaa4f56f', 'description': 'md5:424b8e88cc873217f520e582ba28bb36', 'uploader': 'AniLibria.Tv', 'upload_date': '20160914', 'uploader_id': 'x1p5vl5', 'timestamp': 1473877246, }, 'params': { 'skip_download': True, }, }, { # video key is extra_data not url\d+ 'url': 'http://vk.com/video-110305615_171782105', 'md5': 'e13fcda136f99764872e739d13fac1d1', 'info_dict': { 'id': '-110305615_171782105', 'ext': 'mp4', 'title': 'S-Dance, репетиции к The way show', 'uploader': 'THE WAY SHOW | 17 апреля', 'uploader_id': '-110305615', 'timestamp': 1454859345, 'upload_date': '20160207', }, 'params': { 'skip_download': True, }, }, { # finished live stream, postlive_mp4 'url': 'https://vk.com/videos-387766?z=video-387766_456242764%2Fpl_-387766_-2', 'info_dict': { 'id': '-387766_456242764', 'ext': 'mp4', 'title': 'ИгроМир 2016 День 1 — Игромания Утром', 'uploader': 'Игромания', 'duration': 5239, # TODO: use act=show to extract view_count # 'view_count': int, 'upload_date': '20160929', 'uploader_id': '-387766', 'timestamp': 1475137527, }, 'params': { 'skip_download': True, }, }, { # live stream, hls and rtmp links, most likely already finished live # stream by the time you are reading this comment 'url': 'https://vk.com/video-140332_456239111', 'only_matching': True, }, { # removed video, just testing that we match the pattern 'url': 'http://vk.com/feed?z=video-43215063_166094326%2Fbb50cacd3177146d7a', 'only_matching': True, }, { # age restricted video, requires vk account credentials 'url': 'https://vk.com/video205387401_164765225', 'only_matching': True, }, { # pladform embed 'url': 'https://vk.com/video-76116461_171554880', 'only_matching': True, }, { 'url': 'http://new.vk.com/video205387401_165548505', 'only_matching': True, }, { # This video is no longer available, because its author has been blocked. 'url': 'https://vk.com/video-10639516_456240611', 'only_matching': True, }, { # The video is not available in your region. 'url': 'https://vk.com/video-51812607_171445436', 'only_matching': True, }] @staticmethod def _extract_sibnet_urls(webpage): # https://help.sibnet.ru/?sibnet_video_embed return [unescapeHTML(mobj.group('url')) for mobj in re.finditer( r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//video\.sibnet\.ru/shell\.php\?.*?\bvideoid=\d+.*?)\1', webpage)] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('videoid') mv_data = {} if video_id: data = { 'act': 'show_inline', 'video': video_id, } # Some videos (removed?) can only be downloaded with list id specified list_id = mobj.group('list_id') if list_id: data['list'] = list_id payload = self._download_payload('al_video', video_id, data) info_page = payload[1] opts = payload[-1] mv_data = opts.get('mvData') or {} player = opts.get('player') or {} else: video_id = '%s_%s' % (mobj.group('oid'), mobj.group('id')) info_page = self._download_webpage( 'http://vk.com/video_ext.php?' + mobj.group('embed_query'), video_id) error_message = self._html_search_regex( [r'(?s)<!><div[^>]+class="video_layer_message"[^>]*>(.+?)</div>', r'(?s)<div[^>]+id="video_ext_msg"[^>]*>(.+?)</div>'], info_page, 'error message', default=None) if error_message: raise ExtractorError(error_message, expected=True) if re.search(r'<!>/login\.php\?.*\bact=security_check', info_page): raise ExtractorError( 'You are trying to log in from an unusual location. You should confirm ownership at vk.com to log in with this IP.', expected=True) ERROR_COPYRIGHT = 'Video %s has been removed from public access due to rightholder complaint.' ERRORS = { r'>Видеозапись .*? была изъята из публичного доступа в связи с обращением правообладателя.<': ERROR_COPYRIGHT, r'>The video .*? was removed from public access by request of the copyright holder.<': ERROR_COPYRIGHT, r'<!>Please log in or <': 'Video %s is only available for registered users, ' 'use --username and --password options to provide account credentials.', r'<!>Unknown error': 'Video %s does not exist.', r'<!>Видео временно недоступно': 'Video %s is temporarily unavailable.', r'<!>Access denied': 'Access denied to video %s.', r'<!>Видеозапись недоступна, так как её автор был заблокирован.': 'Video %s is no longer available, because its author has been blocked.', r'<!>This video is no longer available, because its author has been blocked.': 'Video %s is no longer available, because its author has been blocked.', r'<!>This video is no longer available, because it has been deleted.': 'Video %s is no longer available, because it has been deleted.', r'<!>The video .+? is not available in your region.': 'Video %s is not available in your region.', } for error_re, error_msg in ERRORS.items(): if re.search(error_re, info_page): raise ExtractorError(error_msg % video_id, expected=True) player = self._parse_json(self._search_regex( r'var\s+playerParams\s*=\s*({.+?})\s*;\s*\n', info_page, 'player params'), video_id) youtube_url = YoutubeIE._extract_url(info_page) if youtube_url: return self.url_result(youtube_url, YoutubeIE.ie_key()) vimeo_url = VimeoIE._extract_url(url, info_page) if vimeo_url is not None: return self.url_result(vimeo_url, VimeoIE.ie_key()) pladform_url = PladformIE._extract_url(info_page) if pladform_url: return self.url_result(pladform_url, PladformIE.ie_key()) m_rutube = re.search( r'\ssrc="((?:https?:)?//rutube\.ru\\?/(?:video|play)\\?/embed(?:.*?))\\?"', info_page) if m_rutube is not None: rutube_url = self._proto_relative_url( m_rutube.group(1).replace('\\', '')) return self.url_result(rutube_url) dailymotion_urls = DailymotionIE._extract_urls(info_page) if dailymotion_urls: return self.url_result(dailymotion_urls[0], DailymotionIE.ie_key()) odnoklassniki_url = OdnoklassnikiIE._extract_url(info_page) if odnoklassniki_url: return self.url_result(odnoklassniki_url, OdnoklassnikiIE.ie_key()) sibnet_urls = self._extract_sibnet_urls(info_page) if sibnet_urls: return self.url_result(sibnet_urls[0]) m_opts = re.search(r'(?s)var\s+opts\s*=\s*({.+?});', info_page) if m_opts: m_opts_url = re.search(r"url\s*:\s*'((?!/\b)[^']+)", m_opts.group(1)) if m_opts_url: opts_url = m_opts_url.group(1) if opts_url.startswith('//'): opts_url = 'http:' + opts_url return self.url_result(opts_url) data = player['params'][0] title = unescapeHTML(data['md_title']) # 2 = live # 3 = post live (finished live) is_live = data.get('live') == 2 if is_live: title = self._live_title(title) timestamp = unified_timestamp(self._html_search_regex( r'class=["\']mv_info_date[^>]+>([^<]+)(?:<|from)', info_page, 'upload date', default=None)) or int_or_none(data.get('date')) view_count = str_to_int(self._search_regex( r'class=["\']mv_views_count[^>]+>\s*([\d,.]+)', info_page, 'view count', default=None)) formats = [] for format_id, format_url in data.items(): format_url = url_or_none(format_url) if not format_url or not format_url.startswith(('http', '//', 'rtmp')): continue if (format_id.startswith(('url', 'cache')) or format_id in ('extra_data', 'live_mp4', 'postlive_mp4')): height = int_or_none(self._search_regex( r'^(?:url|cache)(\d+)', format_id, 'height', default=None)) formats.append({ 'format_id': format_id, 'url': format_url, 'height': height, }) elif format_id == 'hls': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False, live=is_live)) elif format_id == 'rtmp': formats.append({ 'format_id': format_id, 'url': format_url, 'ext': 'flv', }) self._sort_formats(formats) return { 'id': video_id, 'formats': formats, 'title': title, 'thumbnail': data.get('jpg'), 'uploader': data.get('md_author'), 'uploader_id': str_or_none(data.get('author_id') or mv_data.get('authorId')), 'duration': int_or_none(data.get('duration') or mv_data.get('duration')), 'timestamp': timestamp, 'view_count': view_count, 'like_count': int_or_none(mv_data.get('likes')), 'comment_count': int_or_none(mv_data.get('commcount')), 'is_live': is_live, } class VKUserVideosIE(VKBaseIE): IE_NAME = 'vk:uservideos' IE_DESC = "VK - User's Videos" _VALID_URL = r'https?://(?:(?:m|new)\.)?vk\.com/videos(?P<id>-?[0-9]+)(?!\?.*\bz=video)(?:[/?#&](?:.*?\bsection=(?P<section>\w+))?|$)' _TEMPLATE_URL = 'https://vk.com/videos' _TESTS = [{ 'url': 'https://vk.com/videos-767561', 'info_dict': { 'id': '-767561_all', }, 'playlist_mincount': 1150, }, { 'url': 'https://vk.com/videos-767561?section=uploaded', 'info_dict': { 'id': '-767561_uploaded', }, 'playlist_mincount': 425, }, { 'url': 'http://vk.com/videos205387401', 'only_matching': True, }, { 'url': 'http://vk.com/videos-77521', 'only_matching': True, }, { 'url': 'http://vk.com/videos-97664626?section=all', 'only_matching': True, }, { 'url': 'http://m.vk.com/videos205387401', 'only_matching': True, }, { 'url': 'http://new.vk.com/videos205387401', 'only_matching': True, }] _PAGE_SIZE = 1000 _VIDEO = collections.namedtuple('Video', ['owner_id', 'id']) def _fetch_page(self, page_id, section, page): l = self._download_payload('al_video', page_id, { 'act': 'load_videos_silent', 'offset': page * self._PAGE_SIZE, 'oid': page_id, 'section': section, })[0][section]['list'] for video in l: v = self._VIDEO._make(video[:2]) video_id = '%d_%d' % (v.owner_id, v.id) yield self.url_result( 'http://vk.com/video' + video_id, VKIE.ie_key(), video_id) def _real_extract(self, url): page_id, section = re.match(self._VALID_URL, url).groups() if not section: section = 'all' entries = OnDemandPagedList( functools.partial(self._fetch_page, page_id, section), self._PAGE_SIZE) return self.playlist_result(entries, '%s_%s' % (page_id, section)) class VKWallPostIE(VKBaseIE): IE_NAME = 'vk:wallpost' _VALID_URL = r'https?://(?:(?:(?:(?:m|new)\.)?vk\.com/(?:[^?]+\?.*\bw=)?wall(?P<id>-?\d+_\d+)))' _TESTS = [{ # public page URL, audio playlist 'url': 'https://vk.com/bs.official?w=wall-23538238_35', 'info_dict': { 'id': '-23538238_35', 'title': 'Black Shadow - Wall post -23538238_35', 'description': 'md5:3f84b9c4f9ef499731cf1ced9998cc0c', }, 'playlist': [{ 'md5': '5ba93864ec5b85f7ce19a9af4af080f6', 'info_dict': { 'id': '135220665_111806521', 'ext': 'mp4', 'title': 'Black Shadow - Слепое Верование', 'duration': 370, 'uploader': 'Black Shadow', 'artist': 'Black Shadow', 'track': 'Слепое Верование', }, }, { 'md5': '4cc7e804579122b17ea95af7834c9233', 'info_dict': { 'id': '135220665_111802303', 'ext': 'mp4', 'title': 'Black Shadow - Война - Негасимое Бездны Пламя!', 'duration': 423, 'uploader': 'Black Shadow', 'artist': 'Black Shadow', 'track': 'Война - Негасимое Бездны Пламя!', }, }], 'params': { 'skip_download': True, 'usenetrc': True, }, 'skip': 'Requires vk account credentials', }, { # single YouTube embed, no leading - 'url': 'https://vk.com/wall85155021_6319', 'info_dict': { 'id': '85155021_6319', 'title': 'Сергей Горбунов - Wall post 85155021_6319', }, 'playlist_count': 1, 'params': { 'usenetrc': True, }, 'skip': 'Requires vk account credentials', }, { # wall page URL 'url': 'https://vk.com/wall-23538238_35', 'only_matching': True, }, { # mobile wall page URL 'url': 'https://m.vk.com/wall-23538238_35', 'only_matching': True, }] _BASE64_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN0PQRSTUVWXYZO123456789+/=' _AUDIO = collections.namedtuple('Audio', ['id', 'owner_id', 'url', 'title', 'performer', 'duration', 'album_id', 'unk', 'author_link', 'lyrics', 'flags', 'context', 'extra', 'hashes', 'cover_url', 'ads']) def _decode(self, enc): dec = '' e = n = 0 for c in enc: r = self._BASE64_CHARS.index(c) cond = n % 4 e = 64 * e + r if cond else r n += 1 if cond: dec += chr(255 & e >> (-2 * n & 6)) return dec def _unmask_url(self, mask_url, vk_id): if 'audio_api_unavailable' in mask_url: extra = mask_url.split('?extra=')[1].split('#') func, base = self._decode(extra[1]).split(chr(11)) mask_url = list(self._decode(extra[0])) url_len = len(mask_url) indexes = [None] * url_len index = int(base) ^ vk_id for n in range(url_len - 1, -1, -1): index = (url_len * (n + 1) ^ index + n) % url_len indexes[n] = index for n in range(1, url_len): c = mask_url[n] index = indexes[url_len - 1 - n] mask_url[n] = mask_url[index] mask_url[index] = c mask_url = ''.join(mask_url) return mask_url def _real_extract(self, url): post_id = self._match_id(url) webpage = self._download_payload('wkview', post_id, { 'act': 'show', 'w': 'wall' + post_id, })[1] description = clean_html(get_element_by_class('wall_post_text', webpage)) uploader = clean_html(get_element_by_class('author', webpage)) entries = [] for audio in re.findall(r'data-audio="([^"]+)', webpage): audio = self._parse_json(unescapeHTML(audio), post_id) a = self._AUDIO._make(audio[:16]) if not a.url: continue title = unescapeHTML(a.title) performer = unescapeHTML(a.performer) entries.append({ 'id': '%s_%s' % (a.owner_id, a.id), 'url': self._unmask_url(a.url, a.ads['vk_id']), 'title': '%s - %s' % (performer, title) if performer else title, 'thumbnails': [{'url': c_url} for c_url in a.cover_url.split(',')] if a.cover_url else None, 'duration': int_or_none(a.duration), 'uploader': uploader, 'artist': performer, 'track': title, 'ext': 'mp4', 'protocol': 'm3u8', }) for video in re.finditer( r'<a[^>]+href=(["\'])(?P<url>/video(?:-?[\d_]+).*?)\1', webpage): entries.append(self.url_result( compat_urlparse.urljoin(url, video.group('url')), VKIE.ie_key())) title = 'Wall post %s' % post_id return self.playlist_result( orderedSet(entries), post_id, '%s - %s' % (uploader, title) if uploader else title, description)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/weiqitv.py
youtube_dl/extractor/weiqitv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class WeiqiTVIE(InfoExtractor): IE_DESC = 'WQTV' _VALID_URL = r'https?://(?:www\.)?weiqitv\.com/index/video_play\?videoId=(?P<id>[A-Za-z0-9]+)' _TESTS = [{ 'url': 'http://www.weiqitv.com/index/video_play?videoId=53c744f09874f0e76a8b46f3', 'md5': '26450599afd64c513bc77030ad15db44', 'info_dict': { 'id': '53c744f09874f0e76a8b46f3', 'ext': 'mp4', 'title': '2013年度盘点', }, }, { 'url': 'http://www.weiqitv.com/index/video_play?videoId=567379a2d4c36cca518b4569', 'info_dict': { 'id': '567379a2d4c36cca518b4569', 'ext': 'mp4', 'title': '民国围棋史', }, }, { 'url': 'http://www.weiqitv.com/index/video_play?videoId=5430220a9874f088658b4567', 'info_dict': { 'id': '5430220a9874f088658b4567', 'ext': 'mp4', 'title': '二路托过的手段和运用', }, }] def _real_extract(self, url): media_id = self._match_id(url) page = self._download_webpage(url, media_id) info_json_str = self._search_regex( r'var\s+video\s*=\s*(.+});', page, 'info json str') info_json = self._parse_json(info_json_str, media_id) letvcloud_url = self._search_regex( r'var\s+letvurl\s*=\s*"([^"]+)', page, 'letvcloud url') return { '_type': 'url_transparent', 'ie_key': 'LetvCloud', 'url': letvcloud_url, 'title': info_json['name'], 'id': media_id, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/stitcher.py
youtube_dl/extractor/stitcher.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( clean_html, clean_podcast_url, ExtractorError, int_or_none, str_or_none, try_get, url_or_none, ) class StitcherBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:www\.)?stitcher\.com/(?:podcast|show)/' def _call_api(self, path, video_id, query): resp = self._download_json( 'https://api.prod.stitcher.com/' + path, video_id, query=query) error_massage = try_get(resp, lambda x: x['errors'][0]['message']) if error_massage: raise ExtractorError(error_massage, expected=True) return resp['data'] def _extract_description(self, data): return clean_html(data.get('html_description') or data.get('description')) def _extract_audio_url(self, episode): return url_or_none(episode.get('audio_url') or episode.get('guid')) def _extract_show_info(self, show): return { 'thumbnail': show.get('image_base_url'), 'series': show.get('title'), } def _extract_episode(self, episode, audio_url, show_info): info = { 'id': compat_str(episode['id']), 'display_id': episode.get('slug'), 'title': episode['title'].strip(), 'description': self._extract_description(episode), 'duration': int_or_none(episode.get('duration')), 'url': clean_podcast_url(audio_url), 'vcodec': 'none', 'timestamp': int_or_none(episode.get('date_published')), 'season_number': int_or_none(episode.get('season')), 'season_id': str_or_none(episode.get('season_id')), } info.update(show_info) return info class StitcherIE(StitcherBaseIE): _VALID_URL = StitcherBaseIE._VALID_URL_BASE + r'(?:[^/]+/)+e(?:pisode)?/(?:[^/#?&]+-)?(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.stitcher.com/podcast/the-talking-machines/e/40789481?autoplay=true', 'md5': 'e9635098e0da10b21a0e2b85585530f6', 'info_dict': { 'id': '40789481', 'ext': 'mp3', 'title': 'Machine Learning Mastery and Cancer Clusters', 'description': 'md5:547adb4081864be114ae3831b4c2b42f', 'duration': 1604, 'thumbnail': r're:^https?://.*\.jpg', 'upload_date': '20151008', 'timestamp': 1444285800, 'series': 'Talking Machines', }, }, { 'url': 'http://www.stitcher.com/podcast/panoply/vulture-tv/e/the-rare-hourlong-comedy-plus-40846275?autoplay=true', 'info_dict': { 'id': '40846275', 'display_id': 'the-rare-hourlong-comedy-plus', 'ext': 'mp3', 'title': "The CW's 'Crazy Ex-Girlfriend'", 'description': 'md5:04f1e2f98eb3f5cbb094cea0f9e19b17', 'duration': 2235, 'thumbnail': r're:^https?://.*\.jpg', }, 'params': { 'skip_download': True, }, 'skip': 'Page Not Found', }, { # escaped title 'url': 'http://www.stitcher.com/podcast/marketplace-on-stitcher/e/40910226?autoplay=true', 'only_matching': True, }, { 'url': 'http://www.stitcher.com/podcast/panoply/getting-in/e/episode-2a-how-many-extracurriculars-should-i-have-40876278?autoplay=true', 'only_matching': True, }, { 'url': 'https://www.stitcher.com/show/threedom/episode/circles-on-a-stick-200212584', 'only_matching': True, }] def _real_extract(self, url): audio_id = self._match_id(url) data = self._call_api( 'shows/episodes', audio_id, {'episode_ids': audio_id}) episode = data['episodes'][0] audio_url = self._extract_audio_url(episode) if not audio_url: self.raise_login_required() show = try_get(data, lambda x: x['shows'][0], dict) or {} return self._extract_episode( episode, audio_url, self._extract_show_info(show)) class StitcherShowIE(StitcherBaseIE): _VALID_URL = StitcherBaseIE._VALID_URL_BASE + r'(?P<id>[^/#?&]+)/?(?:[?#&]|$)' _TESTS = [{ 'url': 'http://www.stitcher.com/podcast/the-talking-machines', 'info_dict': { 'id': 'the-talking-machines', 'title': 'Talking Machines', 'description': 'md5:831f0995e40f26c10231af39cf1ebf0b', }, 'playlist_mincount': 106, }, { 'url': 'https://www.stitcher.com/show/the-talking-machines', 'only_matching': True, }] def _real_extract(self, url): show_slug = self._match_id(url) data = self._call_api( 'search/show/%s/allEpisodes' % show_slug, show_slug, {'count': 10000}) show = try_get(data, lambda x: x['shows'][0], dict) or {} show_info = self._extract_show_info(show) entries = [] for episode in (data.get('episodes') or []): audio_url = self._extract_audio_url(episode) if not audio_url: continue entries.append(self._extract_episode(episode, audio_url, show_info)) return self.playlist_result( entries, show_slug, show.get('title'), self._extract_description(show))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tunein.py
youtube_dl/extractor/tunein.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ExtractorError from ..compat import compat_urlparse class TuneInBaseIE(InfoExtractor): _API_BASE_URL = 'http://tunein.com/tuner/tune/' @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+src=["\'](?P<url>(?:https?://)?tunein\.com/embed/player/[pst]\d+)', webpage) def _real_extract(self, url): content_id = self._match_id(url) content_info = self._download_json( self._API_BASE_URL + self._API_URL_QUERY % content_id, content_id, note='Downloading JSON metadata') title = content_info['Title'] thumbnail = content_info.get('Logo') location = content_info.get('Location') streams_url = content_info.get('StreamUrl') if not streams_url: raise ExtractorError('No downloadable streams found', expected=True) if not streams_url.startswith('http://'): streams_url = compat_urlparse.urljoin(url, streams_url) streams = self._download_json( streams_url, content_id, note='Downloading stream data', transform_source=lambda s: re.sub(r'^\s*\((.*)\);\s*$', r'\1', s))['Streams'] is_live = None formats = [] for stream in streams: if stream.get('Type') == 'Live': is_live = True reliability = stream.get('Reliability') format_note = ( 'Reliability: %d%%' % reliability if reliability is not None else None) formats.append({ 'preference': ( 0 if reliability is None or reliability > 90 else 1), 'abr': stream.get('Bandwidth'), 'ext': stream.get('MediaType').lower(), 'acodec': stream.get('MediaType'), 'vcodec': 'none', 'url': stream.get('Url'), 'source_preference': reliability, 'format_note': format_note, }) self._sort_formats(formats) return { 'id': content_id, 'title': self._live_title(title) if is_live else title, 'formats': formats, 'thumbnail': thumbnail, 'location': location, 'is_live': is_live, } class TuneInClipIE(TuneInBaseIE): IE_NAME = 'tunein:clip' _VALID_URL = r'https?://(?:www\.)?tunein\.com/station/.*?audioClipId\=(?P<id>\d+)' _API_URL_QUERY = '?tuneType=AudioClip&audioclipId=%s' _TESTS = [{ 'url': 'http://tunein.com/station/?stationId=246119&audioClipId=816', 'md5': '99f00d772db70efc804385c6b47f4e77', 'info_dict': { 'id': '816', 'title': '32m', 'ext': 'mp3', }, }] class TuneInStationIE(TuneInBaseIE): IE_NAME = 'tunein:station' _VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:radio/.*?-s|station/.*?StationId=|embed/player/s)(?P<id>\d+)' _API_URL_QUERY = '?tuneType=Station&stationId=%s' @classmethod def suitable(cls, url): return False if TuneInClipIE.suitable(url) else super(TuneInStationIE, cls).suitable(url) _TESTS = [{ 'url': 'http://tunein.com/radio/Jazz24-885-s34682/', 'info_dict': { 'id': '34682', 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2', 'ext': 'mp3', 'location': 'Tacoma, WA', }, 'params': { 'skip_download': True, # live stream }, }, { 'url': 'http://tunein.com/embed/player/s6404/', 'only_matching': True, }] class TuneInProgramIE(TuneInBaseIE): IE_NAME = 'tunein:program' _VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:radio/.*?-p|program/.*?ProgramId=|embed/player/p)(?P<id>\d+)' _API_URL_QUERY = '?tuneType=Program&programId=%s' _TESTS = [{ 'url': 'http://tunein.com/radio/Jazz-24-p2506/', 'info_dict': { 'id': '2506', 'title': 'Jazz 24 on 91.3 WUKY-HD3', 'ext': 'mp3', 'location': 'Lexington, KY', }, 'params': { 'skip_download': True, # live stream }, }, { 'url': 'http://tunein.com/embed/player/p191660/', 'only_matching': True, }] class TuneInTopicIE(TuneInBaseIE): IE_NAME = 'tunein:topic' _VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:topic/.*?TopicId=|embed/player/t)(?P<id>\d+)' _API_URL_QUERY = '?tuneType=Topic&topicId=%s' _TESTS = [{ 'url': 'http://tunein.com/topic/?TopicId=101830576', 'md5': 'c31a39e6f988d188252eae7af0ef09c9', 'info_dict': { 'id': '101830576', 'title': 'Votez pour moi du 29 octobre 2015 (29/10/15)', 'ext': 'mp3', 'location': 'Belgium', }, }, { 'url': 'http://tunein.com/embed/player/t101830576/', 'only_matching': True, }] class TuneInShortenerIE(InfoExtractor): IE_NAME = 'tunein:shortener' IE_DESC = False # Do not list _VALID_URL = r'https?://tun\.in/(?P<id>[A-Za-z0-9]+)' _TEST = { # test redirection 'url': 'http://tun.in/ser7s', 'info_dict': { 'id': '34682', 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2', 'ext': 'mp3', 'location': 'Tacoma, WA', }, 'params': { 'skip_download': True, # live stream }, } def _real_extract(self, url): redirect_id = self._match_id(url) # The server doesn't support HEAD requests urlh = self._request_webpage( url, redirect_id, note='Downloading redirect page') url = urlh.geturl() self.to_screen('Following redirect: %s' % url) return self.url_result(url)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/franceculture.py
youtube_dl/extractor/franceculture.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, extract_attributes, int_or_none, ) class FranceCultureIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?franceculture\.fr/emissions/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://www.franceculture.fr/emissions/carnet-nomade/rendez-vous-au-pays-des-geeks', 'info_dict': { 'id': 'rendez-vous-au-pays-des-geeks', 'display_id': 'rendez-vous-au-pays-des-geeks', 'ext': 'mp3', 'title': 'Rendez-vous au pays des geeks', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20140301', 'timestamp': 1393700400, 'vcodec': 'none', } }, { # no thumbnail 'url': 'https://www.franceculture.fr/emissions/la-recherche-montre-en-main/la-recherche-montre-en-main-du-mercredi-10-octobre-2018', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_data = extract_attributes(self._search_regex( r'''(?sx) (?: </h1>| <div[^>]+class="[^"]*?(?:title-zone-diffusion|heading-zone-(?:wrapper|player-button))[^"]*?"[^>]*> ).*? (<button[^>]+data-(?:url|asset-source)="[^"]+"[^>]+>) ''', webpage, 'video data')) video_url = video_data.get('data-url') or video_data['data-asset-source'] title = video_data.get('data-asset-title') or video_data.get('data-diffusion-title') or self._og_search_title(webpage) description = self._html_search_regex( r'(?s)<div[^>]+class="intro"[^>]*>.*?<h2>(.+?)</h2>', webpage, 'description', default=None) thumbnail = self._search_regex( r'(?s)<figure[^>]+itemtype="https://schema.org/ImageObject"[^>]*>.*?<img[^>]+(?:data-dejavu-)?src="([^"]+)"', webpage, 'thumbnail', default=None) uploader = self._html_search_regex( r'(?s)<span class="author">(.*?)</span>', webpage, 'uploader', default=None) ext = determine_ext(video_url.lower()) return { 'id': display_id, 'display_id': display_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, 'ext': ext, 'vcodec': 'none' if ext == 'mp3' else None, 'uploader': uploader, 'timestamp': int_or_none(video_data.get('data-start-time')) or int_or_none(video_data.get('data-asset-created-date')), 'duration': int_or_none(video_data.get('data-duration')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/mangomolo.py
youtube_dl/extractor/mangomolo.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_b64decode, compat_urllib_parse_unquote, ) from ..utils import int_or_none class MangomoloBaseIE(InfoExtractor): _BASE_REGEX = r'https?://(?:admin\.mangomolo\.com/analytics/index\.php/customers/embed/|player\.mangomolo\.com/v1/)' def _get_real_id(self, page_id): return page_id def _real_extract(self, url): page_id = self._get_real_id(self._match_id(url)) webpage = self._download_webpage( 'https://player.mangomolo.com/v1/%s?%s' % (self._TYPE, url.split('?')[1]), page_id) hidden_inputs = self._hidden_inputs(webpage) m3u8_entry_protocol = 'm3u8' if self._IS_LIVE else 'm3u8_native' format_url = self._html_search_regex( [ r'(?:file|src)\s*:\s*"(https?://[^"]+?/playlist\.m3u8)', r'<a[^>]+href="(rtsp://[^"]+)"' ], webpage, 'format url') formats = self._extract_wowza_formats( format_url, page_id, m3u8_entry_protocol, ['smil']) self._sort_formats(formats) return { 'id': page_id, 'title': self._live_title(page_id) if self._IS_LIVE else page_id, 'uploader_id': hidden_inputs.get('userid'), 'duration': int_or_none(hidden_inputs.get('duration')), 'is_live': self._IS_LIVE, 'formats': formats, } class MangomoloVideoIE(MangomoloBaseIE): _TYPE = 'video' IE_NAME = 'mangomolo:' + _TYPE _VALID_URL = MangomoloBaseIE._BASE_REGEX + r'video\?.*?\bid=(?P<id>\d+)' _IS_LIVE = False class MangomoloLiveIE(MangomoloBaseIE): _TYPE = 'live' IE_NAME = 'mangomolo:' + _TYPE _VALID_URL = MangomoloBaseIE._BASE_REGEX + r'(live|index)\?.*?\bchannelid=(?P<id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)' _IS_LIVE = True def _get_real_id(self, page_id): return compat_b64decode(compat_urllib_parse_unquote(page_id)).decode()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/sverigesradio.py
youtube_dl/extractor/sverigesradio.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, str_or_none, ) class SverigesRadioBaseIE(InfoExtractor): _BASE_URL = 'https://sverigesradio.se/sida/playerajax/' _QUALITIES = ['low', 'medium', 'high'] _EXT_TO_CODEC_MAP = { 'mp3': 'mp3', 'm4a': 'aac', } _CODING_FORMAT_TO_ABR_MAP = { 5: 128, 11: 192, 12: 32, 13: 96, } def _real_extract(self, url): audio_id = self._match_id(url) query = { 'id': audio_id, 'type': self._AUDIO_TYPE, } item = self._download_json( self._BASE_URL + 'audiometadata', audio_id, 'Downloading audio JSON metadata', query=query)['items'][0] title = item['subtitle'] query['format'] = 'iis' urls = [] formats = [] for quality in self._QUALITIES: query['quality'] = quality audio_url_data = self._download_json( self._BASE_URL + 'getaudiourl', audio_id, 'Downloading %s format JSON metadata' % quality, fatal=False, query=query) or {} audio_url = audio_url_data.get('audioUrl') if not audio_url or audio_url in urls: continue urls.append(audio_url) ext = determine_ext(audio_url) coding_format = audio_url_data.get('codingFormat') abr = int_or_none(self._search_regex( r'_a(\d+)\.m4a', audio_url, 'audio bitrate', default=None)) or self._CODING_FORMAT_TO_ABR_MAP.get(coding_format) formats.append({ 'abr': abr, 'acodec': self._EXT_TO_CODEC_MAP.get(ext), 'ext': ext, 'format_id': str_or_none(coding_format), 'vcodec': 'none', 'url': audio_url, }) self._sort_formats(formats) return { 'id': audio_id, 'title': title, 'formats': formats, 'series': item.get('title'), 'duration': int_or_none(item.get('duration')), 'thumbnail': item.get('displayimageurl'), 'description': item.get('description'), } class SverigesRadioPublicationIE(SverigesRadioBaseIE): IE_NAME = 'sverigesradio:publication' _VALID_URL = r'https?://(?:www\.)?sverigesradio\.se/sida/(?:artikel|gruppsida)\.aspx\?.*?\bartikel=(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://sverigesradio.se/sida/artikel.aspx?programid=83&artikel=7038546', 'md5': '6a4917e1923fccb080e5a206a5afa542', 'info_dict': { 'id': '7038546', 'ext': 'm4a', 'duration': 132, 'series': 'Nyheter (Ekot)', 'title': 'Esa Teittinen: Sanningen har inte kommit fram', 'description': 'md5:daf7ce66a8f0a53d5465a5984d3839df', 'thumbnail': r're:^https?://.*\.jpg', }, }, { 'url': 'https://sverigesradio.se/sida/gruppsida.aspx?programid=3304&grupp=6247&artikel=7146887', 'only_matching': True, }] _AUDIO_TYPE = 'publication' class SverigesRadioEpisodeIE(SverigesRadioBaseIE): IE_NAME = 'sverigesradio:episode' _VALID_URL = r'https?://(?:www\.)?sverigesradio\.se/(?:sida/)?avsnitt/(?P<id>[0-9]+)' _TEST = { 'url': 'https://sverigesradio.se/avsnitt/1140922?programid=1300', 'md5': '20dc4d8db24228f846be390b0c59a07c', 'info_dict': { 'id': '1140922', 'ext': 'mp3', 'duration': 3307, 'series': 'Konflikt', 'title': 'Metoo och valen', 'description': 'md5:fcb5c1f667f00badcc702b196f10a27e', 'thumbnail': r're:^https?://.*\.jpg', } } _AUDIO_TYPE = 'episode'
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/chilloutzone.py
youtube_dl/extractor/chilloutzone.py
from __future__ import unicode_literals import re import json from .common import InfoExtractor from .youtube import YoutubeIE from ..compat import compat_b64decode from ..utils import ( clean_html, ExtractorError ) class ChilloutzoneIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?chilloutzone\.net/video/(?P<id>[\w|-]+)\.html' _TESTS = [{ 'url': 'http://www.chilloutzone.net/video/enemene-meck-alle-katzen-weg.html', 'md5': 'a76f3457e813ea0037e5244f509e66d1', 'info_dict': { 'id': 'enemene-meck-alle-katzen-weg', 'ext': 'mp4', 'title': 'Enemene Meck - Alle Katzen weg', 'description': 'Ist das der Umkehrschluss des Niesenden Panda-Babys?', }, }, { 'note': 'Video hosted at YouTube', 'url': 'http://www.chilloutzone.net/video/eine-sekunde-bevor.html', 'info_dict': { 'id': '1YVQaAgHyRU', 'ext': 'mp4', 'title': '16 Photos Taken 1 Second Before Disaster', 'description': 'md5:58a8fcf6a459fe0a08f54140f0ad1814', 'uploader': 'BuzzFeedVideo', 'uploader_id': 'BuzzFeedVideo', 'upload_date': '20131105', }, }, { 'note': 'Video hosted at Vimeo', 'url': 'http://www.chilloutzone.net/video/icon-blending.html', 'md5': '2645c678b8dc4fefcc0e1b60db18dac1', 'info_dict': { 'id': '85523671', 'ext': 'mp4', 'title': 'The Sunday Times - Icons', 'description': 're:(?s)^Watch the making of - makingoficons.com.{300,}', 'uploader': 'Us', 'uploader_id': 'usfilms', 'upload_date': '20140131' }, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) base64_video_info = self._html_search_regex( r'var cozVidData = "(.+?)";', webpage, 'video data') decoded_video_info = compat_b64decode(base64_video_info).decode('utf-8') video_info_dict = json.loads(decoded_video_info) # get video information from dict video_url = video_info_dict['mediaUrl'] description = clean_html(video_info_dict.get('description')) title = video_info_dict['title'] native_platform = video_info_dict['nativePlatform'] native_video_id = video_info_dict['nativeVideoId'] source_priority = video_info_dict['sourcePriority'] # If nativePlatform is None a fallback mechanism is used (i.e. youtube embed) if native_platform is None: youtube_url = YoutubeIE._extract_url(webpage) if youtube_url: return self.url_result(youtube_url, ie=YoutubeIE.ie_key()) # Non Fallback: Decide to use native source (e.g. youtube or vimeo) or # the own CDN if source_priority == 'native': if native_platform == 'youtube': return self.url_result(native_video_id, ie='Youtube') if native_platform == 'vimeo': return self.url_result( 'http://vimeo.com/' + native_video_id, ie='Vimeo') if not video_url: raise ExtractorError('No video found') return { 'id': video_id, 'url': video_url, 'ext': 'mp4', 'title': title, 'description': description, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ciscolive.py
youtube_dl/extractor/ciscolive.py
# coding: utf-8 from __future__ import unicode_literals import itertools from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urllib_parse_urlparse, ) from ..utils import ( clean_html, float_or_none, int_or_none, try_get, urlencode_postdata, ) class CiscoLiveBaseIE(InfoExtractor): # These appear to be constant across all Cisco Live presentations # and are not tied to any user session or event RAINFOCUS_API_URL = 'https://events.rainfocus.com/api/%s' RAINFOCUS_API_PROFILE_ID = 'Na3vqYdAlJFSxhYTYQGuMbpafMqftalz' RAINFOCUS_WIDGET_ID = 'n6l4Lo05R8fiy3RpUBm447dZN8uNWoye' BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/5647924234001/SyK2FdqjM_default/index.html?videoId=%s' HEADERS = { 'Origin': 'https://ciscolive.cisco.com', 'rfApiProfileId': RAINFOCUS_API_PROFILE_ID, 'rfWidgetId': RAINFOCUS_WIDGET_ID, } def _call_api(self, ep, rf_id, query, referrer, note=None): headers = self.HEADERS.copy() headers['Referer'] = referrer return self._download_json( self.RAINFOCUS_API_URL % ep, rf_id, note=note, data=urlencode_postdata(query), headers=headers) def _parse_rf_item(self, rf_item): event_name = rf_item.get('eventName') title = rf_item['title'] description = clean_html(rf_item.get('abstract')) presenter_name = try_get(rf_item, lambda x: x['participants'][0]['fullName']) bc_id = rf_item['videos'][0]['url'] bc_url = self.BRIGHTCOVE_URL_TEMPLATE % bc_id duration = float_or_none(try_get(rf_item, lambda x: x['times'][0]['length'])) location = try_get(rf_item, lambda x: x['times'][0]['room']) if duration: duration = duration * 60 return { '_type': 'url_transparent', 'url': bc_url, 'ie_key': 'BrightcoveNew', 'title': title, 'description': description, 'duration': duration, 'creator': presenter_name, 'location': location, 'series': event_name, } class CiscoLiveSessionIE(CiscoLiveBaseIE): _VALID_URL = r'https?://(?:www\.)?ciscolive(?:\.cisco)?\.com/[^#]*#/session/(?P<id>[^/?&]+)' _TESTS = [{ 'url': 'https://ciscolive.cisco.com/on-demand-library/?#/session/1423353499155001FoSs', 'md5': 'c98acf395ed9c9f766941c70f5352e22', 'info_dict': { 'id': '5803694304001', 'ext': 'mp4', 'title': '13 Smart Automations to Monitor Your Cisco IOS Network', 'description': 'md5:ec4a436019e09a918dec17714803f7cc', 'timestamp': 1530305395, 'upload_date': '20180629', 'uploader_id': '5647924234001', 'location': '16B Mezz.', }, }, { 'url': 'https://www.ciscolive.com/global/on-demand-library.html?search.event=ciscoliveemea2019#/session/15361595531500013WOU', 'only_matching': True, }, { 'url': 'https://www.ciscolive.com/global/on-demand-library.html?#/session/1490051371645001kNaS', 'only_matching': True, }] def _real_extract(self, url): rf_id = self._match_id(url) rf_result = self._call_api('session', rf_id, {'id': rf_id}, url) return self._parse_rf_item(rf_result['items'][0]) class CiscoLiveSearchIE(CiscoLiveBaseIE): _VALID_URL = r'https?://(?:www\.)?ciscolive(?:\.cisco)?\.com/(?:global/)?on-demand-library(?:\.html|/)' _TESTS = [{ 'url': 'https://ciscolive.cisco.com/on-demand-library/?search.event=ciscoliveus2018&search.technicallevel=scpsSkillLevel_aintroductory&search.focus=scpsSessionFocus_designAndDeployment#/', 'info_dict': { 'title': 'Search query', }, 'playlist_count': 5, }, { 'url': 'https://ciscolive.cisco.com/on-demand-library/?search.technology=scpsTechnology_applicationDevelopment&search.technology=scpsTechnology_ipv6&search.focus=scpsSessionFocus_troubleshootingTroubleshooting#/', 'only_matching': True, }, { 'url': 'https://www.ciscolive.com/global/on-demand-library.html?search.technicallevel=scpsSkillLevel_aintroductory&search.event=ciscoliveemea2019&search.technology=scpsTechnology_dataCenter&search.focus=scpsSessionFocus_bestPractices#/', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if CiscoLiveSessionIE.suitable(url) else super(CiscoLiveSearchIE, cls).suitable(url) @staticmethod def _check_bc_id_exists(rf_item): return int_or_none(try_get(rf_item, lambda x: x['videos'][0]['url'])) is not None def _entries(self, query, url): query['size'] = 50 query['from'] = 0 for page_num in itertools.count(1): results = self._call_api( 'search', None, query, url, 'Downloading search JSON page %d' % page_num) sl = try_get(results, lambda x: x['sectionList'][0], dict) if sl: results = sl items = results.get('items') if not items or not isinstance(items, list): break for item in items: if not isinstance(item, dict): continue if not self._check_bc_id_exists(item): continue yield self._parse_rf_item(item) size = int_or_none(results.get('size')) if size is not None: query['size'] = size total = int_or_none(results.get('total')) if total is not None and query['from'] + query['size'] > total: break query['from'] += query['size'] def _real_extract(self, url): query = compat_parse_qs(compat_urllib_parse_urlparse(url).query) query['type'] = 'session' return self.playlist_result( self._entries(query, url), playlist_title='Search query')
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/medici.py
youtube_dl/extractor/medici.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( unified_strdate, update_url_query, urlencode_postdata, ) class MediciIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?medici\.tv/#!/(?P<id>[^?#&]+)' _TEST = { 'url': 'http://www.medici.tv/#!/daniel-harding-frans-helmerson-verbier-festival-music-camp', 'md5': '004c21bb0a57248085b6ff3fec72719d', 'info_dict': { 'id': '3059', 'ext': 'flv', 'title': 'Daniel Harding conducts the Verbier Festival Music Camp \u2013 With Frans Helmerson', 'description': 'md5:322a1e952bafb725174fd8c1a8212f58', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20170408', }, } def _real_extract(self, url): video_id = self._match_id(url) # Sets csrftoken cookie self._download_webpage(url, video_id) MEDICI_URL = 'http://www.medici.tv/' data = self._download_json( MEDICI_URL, video_id, data=urlencode_postdata({ 'json': 'true', 'page': '/%s' % video_id, 'timezone_offset': -420, }), headers={ 'X-CSRFToken': self._get_cookies(url)['csrftoken'].value, 'X-Requested-With': 'XMLHttpRequest', 'Referer': MEDICI_URL, 'Content-Type': 'application/x-www-form-urlencoded', }) video = data['video']['videos']['video1'] title = video.get('nom') or data['title'] video_id = video.get('id') or video_id formats = self._extract_f4m_formats( update_url_query(video['url_akamai'], { 'hdcore': '3.1.0', 'plugin=aasp': '3.1.0.43.124', }), video_id, f4m_id='hds') description = data.get('meta_description') thumbnail = video.get('url_thumbnail') or data.get('main_image') upload_date = unified_strdate(data['video'].get('date')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'upload_date': upload_date, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ufctv.py
youtube_dl/extractor/ufctv.py
# coding: utf-8 from __future__ import unicode_literals from .imggaming import ImgGamingBaseIE class UFCTVIE(ImgGamingBaseIE): _VALID_URL = ImgGamingBaseIE._VALID_URL_TEMPL % r'(?:(?:app|www)\.)?(?:ufc\.tv|(?:ufc)?fightpass\.com)|ufcfightpass\.img(?:dge|gaming)\.com' _NETRC_MACHINE = 'ufctv' _REALM = 'ufc' class UFCArabiaIE(ImgGamingBaseIE): _VALID_URL = ImgGamingBaseIE._VALID_URL_TEMPL % r'(?:(?:app|www)\.)?ufcarabia\.(?:ae|com)' _NETRC_MACHINE = 'ufcarabia' _REALM = 'admufc'
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/skylinewebcams.py
youtube_dl/extractor/skylinewebcams.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class SkylineWebcamsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?skylinewebcams\.com/[^/]+/webcam/(?:[^/]+/)+(?P<id>[^/]+)\.html' _TEST = { 'url': 'https://www.skylinewebcams.com/it/webcam/italia/lazio/roma/scalinata-piazza-di-spagna-barcaccia.html', 'info_dict': { 'id': 'scalinata-piazza-di-spagna-barcaccia', 'ext': 'mp4', 'title': 're:^Live Webcam Scalinata di Piazza di Spagna - La Barcaccia [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'Roma, veduta sulla Scalinata di Piazza di Spagna e sulla Barcaccia', 'is_live': True, }, 'params': { 'skip_download': True, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) stream_url = self._search_regex( r'(?:url|source)\s*:\s*(["\'])(?P<url>(?:https?:)?//.+?\.m3u8.*?)\1', webpage, 'stream url', group='url') title = self._og_search_title(webpage) description = self._og_search_description(webpage) return { 'id': video_id, 'url': stream_url, 'ext': 'mp4', 'title': self._live_title(title), 'description': description, 'is_live': True, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/hrti.py
youtube_dl/extractor/hrti.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( clean_html, ExtractorError, int_or_none, parse_age_limit, sanitized_Request, try_get, ) class HRTiBaseIE(InfoExtractor): """ Base Information Extractor for Croatian Radiotelevision video on demand site https://hrti.hrt.hr Reverse engineered from the JavaScript app in app.min.js """ _NETRC_MACHINE = 'hrti' _APP_LANGUAGE = 'hr' _APP_VERSION = '1.1' _APP_PUBLICATION_ID = 'all_in_one' _API_URL = 'http://clientapi.hrt.hr/client_api.php/config/identify/format/json' def _initialize_api(self): init_data = { 'application_publication_id': self._APP_PUBLICATION_ID } uuid = self._download_json( self._API_URL, None, note='Downloading uuid', errnote='Unable to download uuid', data=json.dumps(init_data).encode('utf-8'))['uuid'] app_data = { 'uuid': uuid, 'application_publication_id': self._APP_PUBLICATION_ID, 'application_version': self._APP_VERSION } req = sanitized_Request(self._API_URL, data=json.dumps(app_data).encode('utf-8')) req.get_method = lambda: 'PUT' resources = self._download_json( req, None, note='Downloading session information', errnote='Unable to download session information') self._session_id = resources['session_id'] modules = resources['modules'] self._search_url = modules['vod_catalog']['resources']['search']['uri'].format( language=self._APP_LANGUAGE, application_id=self._APP_PUBLICATION_ID) self._login_url = (modules['user']['resources']['login']['uri'] + '/format/json').format(session_id=self._session_id) self._logout_url = modules['user']['resources']['logout']['uri'] def _login(self): username, password = self._get_login_info() # TODO: figure out authentication with cookies if username is None or password is None: self.raise_login_required() auth_data = { 'username': username, 'password': password, } try: auth_info = self._download_json( self._login_url, None, note='Logging in', errnote='Unable to log in', data=json.dumps(auth_data).encode('utf-8')) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 406: auth_info = self._parse_json(e.cause.read().encode('utf-8'), None) else: raise error_message = auth_info.get('error', {}).get('message') if error_message: raise ExtractorError( '%s said: %s' % (self.IE_NAME, error_message), expected=True) self._token = auth_info['secure_streaming_token'] def _real_initialize(self): self._initialize_api() self._login() class HRTiIE(HRTiBaseIE): _VALID_URL = r'''(?x) (?: hrti:(?P<short_id>[0-9]+)| https?:// hrti\.hrt\.hr/(?:\#/)?video/show/(?P<id>[0-9]+)/(?P<display_id>[^/]+)? ) ''' _TESTS = [{ 'url': 'https://hrti.hrt.hr/#/video/show/2181385/republika-dokumentarna-serija-16-hd', 'info_dict': { 'id': '2181385', 'display_id': 'republika-dokumentarna-serija-16-hd', 'ext': 'mp4', 'title': 'REPUBLIKA, dokumentarna serija (1/6) (HD)', 'description': 'md5:48af85f620e8e0e1df4096270568544f', 'duration': 2922, 'view_count': int, 'average_rating': int, 'episode_number': int, 'season_number': int, 'age_limit': 12, }, 'skip': 'Requires account credentials', }, { 'url': 'https://hrti.hrt.hr/#/video/show/2181385/', 'only_matching': True, }, { 'url': 'hrti:2181385', 'only_matching': True, }, { 'url': 'https://hrti.hrt.hr/video/show/3873068/cuvar-dvorca-dramska-serija-14', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('short_id') or mobj.group('id') display_id = mobj.group('display_id') or video_id video = self._download_json( '%s/video_id/%s/format/json' % (self._search_url, video_id), display_id, 'Downloading video metadata JSON')['video'][0] title_info = video['title'] title = title_info['title_long'] movie = video['video_assets']['movie'][0] m3u8_url = movie['url'].format(TOKEN=self._token) formats = self._extract_m3u8_formats( m3u8_url, display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') self._sort_formats(formats) description = clean_html(title_info.get('summary_long')) age_limit = parse_age_limit(video.get('parental_control', {}).get('rating')) view_count = int_or_none(video.get('views')) average_rating = int_or_none(video.get('user_rating')) duration = int_or_none(movie.get('duration')) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'duration': duration, 'view_count': view_count, 'average_rating': average_rating, 'age_limit': age_limit, 'formats': formats, } class HRTiPlaylistIE(HRTiBaseIE): _VALID_URL = r'https?://hrti\.hrt\.hr/(?:#/)?video/list/category/(?P<id>[0-9]+)/(?P<display_id>[^/]+)?' _TESTS = [{ 'url': 'https://hrti.hrt.hr/#/video/list/category/212/ekumena', 'info_dict': { 'id': '212', 'title': 'ekumena', }, 'playlist_mincount': 8, 'skip': 'Requires account credentials', }, { 'url': 'https://hrti.hrt.hr/#/video/list/category/212/', 'only_matching': True, }, { 'url': 'https://hrti.hrt.hr/video/list/category/212/ekumena', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) category_id = mobj.group('id') display_id = mobj.group('display_id') or category_id response = self._download_json( '%s/category_id/%s/format/json' % (self._search_url, category_id), display_id, 'Downloading video metadata JSON') video_ids = try_get( response, lambda x: x['video_listings'][0]['alternatives'][0]['list'], list) or [video['id'] for video in response.get('videos', []) if video.get('id')] entries = [self.url_result('hrti:%s' % video_id) for video_id in video_ids] return self.playlist_result(entries, category_id, display_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cctv.py
youtube_dl/extractor/cctv.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( float_or_none, try_get, unified_timestamp, ) class CCTVIE(InfoExtractor): IE_DESC = '央视网' _VALID_URL = r'https?://(?:(?:[^/]+)\.(?:cntv|cctv)\.(?:com|cn)|(?:www\.)?ncpa-classic\.com)/(?:[^/]+/)*?(?P<id>[^/?#&]+?)(?:/index)?(?:\.s?html|[?#&]|$)' _TESTS = [{ # fo.addVariable("videoCenterId","id") 'url': 'http://sports.cntv.cn/2016/02/12/ARTIaBRxv4rTT1yWf1frW2wi160212.shtml', 'md5': 'd61ec00a493e09da810bf406a078f691', 'info_dict': { 'id': '5ecdbeab623f4973b40ff25f18b174e8', 'ext': 'mp4', 'title': '[NBA]二少联手砍下46分 雷霆主场击败鹈鹕(快讯)', 'description': 'md5:7e14a5328dc5eb3d1cd6afbbe0574e95', 'duration': 98, 'uploader': 'songjunjie', 'timestamp': 1455279956, 'upload_date': '20160212', }, }, { # var guid = "id" 'url': 'http://tv.cctv.com/2016/02/05/VIDEUS7apq3lKrHG9Dncm03B160205.shtml', 'info_dict': { 'id': 'efc5d49e5b3b4ab2b34f3a502b73d3ae', 'ext': 'mp4', 'title': '[赛车]“车王”舒马赫恢复情况成谜(快讯)', 'description': '2月4日,蒙特泽莫罗透露了关于“车王”舒马赫恢复情况,但情况是否属实遭到了质疑。', 'duration': 37, 'uploader': 'shujun', 'timestamp': 1454677291, 'upload_date': '20160205', }, 'params': { 'skip_download': True, }, }, { # changePlayer('id') 'url': 'http://english.cntv.cn/special/four_comprehensives/index.shtml', 'info_dict': { 'id': '4bb9bb4db7a6471ba85fdeda5af0381e', 'ext': 'mp4', 'title': 'NHnews008 ANNUAL POLITICAL SEASON', 'description': 'Four Comprehensives', 'duration': 60, 'uploader': 'zhangyunlei', 'timestamp': 1425385521, 'upload_date': '20150303', }, 'params': { 'skip_download': True, }, }, { # loadvideo('id') 'url': 'http://cctv.cntv.cn/lm/tvseries_russian/yilugesanghua/index.shtml', 'info_dict': { 'id': 'b15f009ff45c43968b9af583fc2e04b2', 'ext': 'mp4', 'title': 'Путь,усыпанный космеями Серия 1', 'description': 'Путь, усыпанный космеями', 'duration': 2645, 'uploader': 'renxue', 'timestamp': 1477479241, 'upload_date': '20161026', }, 'params': { 'skip_download': True, }, }, { # var initMyAray = 'id' 'url': 'http://www.ncpa-classic.com/2013/05/22/VIDE1369219508996867.shtml', 'info_dict': { 'id': 'a194cfa7f18c426b823d876668325946', 'ext': 'mp4', 'title': '小泽征尔音乐塾 音乐梦想无国界', 'duration': 2173, 'timestamp': 1369248264, 'upload_date': '20130522', }, 'params': { 'skip_download': True, }, }, { # var ids = ["id"] 'url': 'http://www.ncpa-classic.com/clt/more/416/index.shtml', 'info_dict': { 'id': 'a8606119a4884588a79d81c02abecc16', 'ext': 'mp3', 'title': '来自维也纳的新年贺礼', 'description': 'md5:f13764ae8dd484e84dd4b39d5bcba2a7', 'duration': 1578, 'uploader': 'djy', 'timestamp': 1482942419, 'upload_date': '20161228', }, 'params': { 'skip_download': True, }, 'expected_warnings': ['Failed to download m3u8 information'], }, { 'url': 'http://ent.cntv.cn/2016/01/18/ARTIjprSSJH8DryTVr5Bx8Wb160118.shtml', 'only_matching': True, }, { 'url': 'http://tv.cntv.cn/video/C39296/e0210d949f113ddfb38d31f00a4e5c44', 'only_matching': True, }, { 'url': 'http://english.cntv.cn/2016/09/03/VIDEhnkB5y9AgHyIEVphCEz1160903.shtml', 'only_matching': True, }, { 'url': 'http://tv.cctv.com/2016/09/07/VIDE5C1FnlX5bUywlrjhxXOV160907.shtml', 'only_matching': True, }, { 'url': 'http://tv.cntv.cn/video/C39296/95cfac44cabd3ddc4a9438780a4e5c44', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_id = self._search_regex( [r'var\s+guid\s*=\s*["\']([\da-fA-F]+)', r'videoCenterId["\']\s*,\s*["\']([\da-fA-F]+)', r'changePlayer\s*\(\s*["\']([\da-fA-F]+)', r'load[Vv]ideo\s*\(\s*["\']([\da-fA-F]+)', r'var\s+initMyAray\s*=\s*["\']([\da-fA-F]+)', r'var\s+ids\s*=\s*\[["\']([\da-fA-F]+)'], webpage, 'video id') data = self._download_json( 'http://vdn.apps.cntv.cn/api/getHttpVideoInfo.do', video_id, query={ 'pid': video_id, 'url': url, 'idl': 32, 'idlr': 32, 'modifyed': 'false', }) title = data['title'] formats = [] video = data.get('video') if isinstance(video, dict): for quality, chapters_key in enumerate(('lowChapters', 'chapters')): video_url = try_get( video, lambda x: x[chapters_key][0]['url'], compat_str) if video_url: formats.append({ 'url': video_url, 'format_id': 'http', 'quality': quality, 'preference': -1, }) hls_url = try_get(data, lambda x: x['hls_url'], compat_str) if hls_url: hls_url = re.sub(r'maxbr=\d+&?', '', hls_url) formats.extend(self._extract_m3u8_formats( hls_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) self._sort_formats(formats) uploader = data.get('editer_name') description = self._html_search_meta( 'description', webpage, default=None) timestamp = unified_timestamp(data.get('f_pgmtime')) duration = float_or_none(try_get(video, lambda x: x['totalLength'])) return { 'id': video_id, 'title': title, 'description': description, 'uploader': uploader, 'timestamp': timestamp, 'duration': duration, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/sixplay.py
youtube_dl/extractor/sixplay.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_str, compat_urllib_parse_urlparse, ) from ..utils import ( determine_ext, int_or_none, try_get, qualities, ) class SixPlayIE(InfoExtractor): IE_NAME = '6play' _VALID_URL = r'(?:6play:|https?://(?:www\.)?(?P<domain>6play\.fr|rtlplay\.be|play\.rtl\.hr|rtlmost\.hu)/.+?-c_)(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.6play.fr/minute-par-minute-p_9533/le-but-qui-a-marque-lhistoire-du-football-francais-c_12041051', 'md5': '31fcd112637baa0c2ab92c4fcd8baf27', 'info_dict': { 'id': '12041051', 'ext': 'mp4', 'title': 'Le but qui a marqué l\'histoire du football français !', 'description': 'md5:b59e7e841d646ef1eb42a7868eb6a851', }, }, { 'url': 'https://www.rtlplay.be/rtl-info-13h-p_8551/les-titres-du-rtlinfo-13h-c_12045869', 'only_matching': True, }, { 'url': 'https://play.rtl.hr/pj-masks-p_9455/epizoda-34-sezona-1-catboyevo-cudo-na-dva-kotaca-c_11984989', 'only_matching': True, }, { 'url': 'https://www.rtlmost.hu/megtorve-p_14167/megtorve-6-resz-c_12397787', 'only_matching': True, }] def _real_extract(self, url): domain, video_id = re.search(self._VALID_URL, url).groups() service, consumer_name = { '6play.fr': ('6play', 'm6web'), 'rtlplay.be': ('rtlbe_rtl_play', 'rtlbe'), 'play.rtl.hr': ('rtlhr_rtl_play', 'rtlhr'), 'rtlmost.hu': ('rtlhu_rtl_most', 'rtlhu'), }.get(domain, ('6play', 'm6web')) data = self._download_json( 'https://pc.middleware.6play.fr/6play/v2/platforms/m6group_web/services/%s/videos/clip_%s' % (service, video_id), video_id, headers={ 'x-customer-name': consumer_name }, query={ 'csa': 5, 'with': 'clips', }) clip_data = data['clips'][0] title = clip_data['title'] urls = [] quality_key = qualities(['lq', 'sd', 'hq', 'hd']) formats = [] subtitles = {} assets = clip_data.get('assets') or [] for asset in assets: asset_url = asset.get('full_physical_path') protocol = asset.get('protocol') if not asset_url or ((protocol == 'primetime' or asset.get('type') == 'usp_hlsfp_h264') and not ('_drmnp.ism/' in asset_url or '_unpnp.ism/' in asset_url)) or asset_url in urls: continue urls.append(asset_url) container = asset.get('video_container') ext = determine_ext(asset_url) if protocol == 'http_subtitle' or ext == 'vtt': subtitles.setdefault('fr', []).append({'url': asset_url}) continue if container == 'm3u8' or ext == 'm3u8': if protocol == 'usp': if compat_parse_qs(compat_urllib_parse_urlparse(asset_url).query).get('token', [None])[0]: urlh = self._request_webpage( asset_url, video_id, fatal=False, headers=self.geo_verification_headers()) if not urlh: continue asset_url = urlh.geturl() asset_url = asset_url.replace('_drmnp.ism/', '_unpnp.ism/') for i in range(3, 0, -1): asset_url = asset_url = asset_url.replace('_sd1/', '_sd%d/' % i) m3u8_formats = self._extract_m3u8_formats( asset_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) formats.extend(m3u8_formats) formats.extend(self._extract_mpd_formats( asset_url.replace('.m3u8', '.mpd'), video_id, mpd_id='dash', fatal=False)) if m3u8_formats: break else: formats.extend(self._extract_m3u8_formats( asset_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif container == 'mp4' or ext == 'mp4': quality = asset.get('video_quality') formats.append({ 'url': asset_url, 'format_id': quality, 'quality': quality_key(quality), 'ext': ext, }) self._sort_formats(formats) def get(getter): for src in (data, clip_data): v = try_get(src, getter, compat_str) if v: return v return { 'id': video_id, 'title': title, 'description': get(lambda x: x['description']), 'duration': int_or_none(clip_data.get('duration')), 'series': get(lambda x: x['program']['title']), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/slideslive.py
youtube_dl/extractor/slideslive.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( bool_or_none, smuggle_url, try_get, url_or_none, ) class SlidesLiveIE(InfoExtractor): _VALID_URL = r'https?://slideslive\.com/(?P<id>[0-9]+)' _TESTS = [{ # video_service_name = YOUTUBE 'url': 'https://slideslive.com/38902413/gcc-ia16-backend', 'md5': 'b29fcd6c6952d0c79c5079b0e7a07e6f', 'info_dict': { 'id': 'LMtgR8ba0b0', 'ext': 'mp4', 'title': 'GCC IA16 backend', 'description': 'Watch full version of this video at https://slideslive.com/38902413.', 'uploader': 'SlidesLive Videos - A', 'uploader_id': 'UC62SdArr41t_-_fX40QCLRw', 'timestamp': 1597615266, 'upload_date': '20170925', } }, { # video_service_name = yoda 'url': 'https://slideslive.com/38935785', 'md5': '575cd7a6c0acc6e28422fe76dd4bcb1a', 'info_dict': { 'id': 'RMraDYN5ozA_', 'ext': 'mp4', 'title': 'Offline Reinforcement Learning: From Algorithms to Practical Challenges', }, 'params': { 'format': 'bestvideo', }, }, { # video_service_name = youtube 'url': 'https://slideslive.com/38903721/magic-a-scientific-resurrection-of-an-esoteric-legend', 'only_matching': True, }, { # video_service_name = url 'url': 'https://slideslive.com/38922070/learning-transferable-skills-1', 'only_matching': True, }, { # video_service_name = vimeo 'url': 'https://slideslive.com/38921896/retrospectives-a-venue-for-selfreflection-in-ml-research-3', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( 'https://ben.slideslive.com/player/' + video_id, video_id) service_name = video_data['video_service_name'].lower() assert service_name in ('url', 'yoda', 'vimeo', 'youtube') service_id = video_data['video_service_id'] subtitles = {} for sub in try_get(video_data, lambda x: x['subtitles'], list) or []: if not isinstance(sub, dict): continue webvtt_url = url_or_none(sub.get('webvtt_url')) if not webvtt_url: continue lang = sub.get('language') or 'en' subtitles.setdefault(lang, []).append({ 'url': webvtt_url, }) info = { 'id': video_id, 'thumbnail': video_data.get('thumbnail'), 'is_live': bool_or_none(video_data.get('is_live')), 'subtitles': subtitles, } if service_name in ('url', 'yoda'): info['title'] = video_data['title'] if service_name == 'url': info['url'] = service_id else: formats = [] _MANIFEST_PATTERN = 'https://01.cdn.yoda.slideslive.com/%s/master.%s' # use `m3u8` entry_protocol until EXT-X-MAP is properly supported by `m3u8_native` entry_protocol formats.extend(self._extract_m3u8_formats( _MANIFEST_PATTERN % (service_id, 'm3u8'), service_id, 'mp4', m3u8_id='hls', fatal=False)) formats.extend(self._extract_mpd_formats( _MANIFEST_PATTERN % (service_id, 'mpd'), service_id, mpd_id='dash', fatal=False)) self._sort_formats(formats) info.update({ 'id': service_id, 'formats': formats, }) else: info.update({ '_type': 'url_transparent', 'url': service_id, 'ie_key': service_name.capitalize(), 'title': video_data.get('title'), }) if service_name == 'vimeo': info['url'] = smuggle_url( 'https://player.vimeo.com/video/' + service_id, {'http_headers': {'Referer': url}}) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/clyp.py
youtube_dl/extractor/clyp.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urllib_parse_urlparse, ) from ..utils import ( float_or_none, unified_timestamp, ) class ClypIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?clyp\.it/(?P<id>[a-z0-9]+)' _TESTS = [{ 'url': 'https://clyp.it/ojz2wfah', 'md5': '1d4961036c41247ecfdcc439c0cddcbb', 'info_dict': { 'id': 'ojz2wfah', 'ext': 'mp3', 'title': 'Krisson80 - bits wip wip', 'description': '#Krisson80BitsWipWip #chiptune\n#wip', 'duration': 263.21, 'timestamp': 1443515251, 'upload_date': '20150929', }, }, { 'url': 'https://clyp.it/b04p1odi?token=b0078e077e15835845c528a44417719d', 'info_dict': { 'id': 'b04p1odi', 'ext': 'mp3', 'title': 'GJ! (Reward Edit)', 'description': 'Metal Resistance (THE ONE edition)', 'duration': 177.789, 'timestamp': 1528241278, 'upload_date': '20180605', }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): audio_id = self._match_id(url) qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query) token = qs.get('token', [None])[0] query = {} if token: query['token'] = token metadata = self._download_json( 'https://api.clyp.it/%s' % audio_id, audio_id, query=query) formats = [] for secure in ('', 'Secure'): for ext in ('Ogg', 'Mp3'): format_id = '%s%s' % (secure, ext) format_url = metadata.get('%sUrl' % format_id) if format_url: formats.append({ 'url': format_url, 'format_id': format_id, 'vcodec': 'none', }) self._sort_formats(formats) title = metadata['Title'] description = metadata.get('Description') duration = float_or_none(metadata.get('Duration')) timestamp = unified_timestamp(metadata.get('DateCreated')) return { 'id': audio_id, 'title': title, 'description': description, 'duration': duration, 'timestamp': timestamp, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false