repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nrk.py | yt_dlp/extractor/nrk.py | import itertools
import random
import re
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
parse_duration,
parse_iso8601,
str_or_none,
try_get,
update_url_query,
url_or_none,
urljoin,
)
class NRKBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['NO']
_CDN_REPL_REGEX = r'''(?x)://
(?:
nrkod\d{1,2}-httpcache0-47115-cacheod0\.dna\.ip-only\.net/47115-cacheod0|
nrk-od-no\.telenorcdn\.net|
minicdn-od\.nrk\.no/od/nrkhd-osl-rr\.netwerk\.no/no
)/'''
def _extract_nrk_formats(self, asset_url, video_id):
asset_url = update_url_query(asset_url, {
# Remove 'adap' to return all streams (known values are: small, large, small_h265, large_h265)
'adap': [],
# Disable subtitles since they are fetched separately
's': 0,
})
if re.match(r'https?://[^/]+\.akamaihd\.net/i/', asset_url):
return self._extract_akamai_formats(asset_url, video_id)
asset_url = re.sub(r'(?:bw_(?:low|high)=\d+|no_audio_only)&?', '', asset_url)
formats = self._extract_m3u8_formats(
asset_url, video_id, 'mp4', 'm3u8_native', fatal=False)
if not formats and re.search(self._CDN_REPL_REGEX, asset_url):
formats = self._extract_m3u8_formats(
re.sub(self._CDN_REPL_REGEX, '://nrk-od-%02d.akamaized.net/no/' % random.randint(0, 99), asset_url),
video_id, 'mp4', 'm3u8_native', fatal=False)
return formats
def _raise_error(self, data):
MESSAGES = {
'ProgramRightsAreNotReady': 'Du kan dessverre ikke se eller høre programmet',
'ProgramRightsHasExpired': 'Programmet har gått ut',
'NoProgramRights': 'Ikke tilgjengelig',
'ProgramIsGeoBlocked': 'NRK har ikke rettigheter til å vise dette programmet utenfor Norge',
}
message_type = data.get('messageType', '')
# Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked*
if 'IsGeoBlocked' in message_type or try_get(data, lambda x: x['usageRights']['isGeoBlocked']) is True:
self.raise_geo_restricted(
msg=MESSAGES.get('ProgramIsGeoBlocked'),
countries=self._GEO_COUNTRIES)
message = data.get('endUserMessage') or MESSAGES.get(message_type, message_type)
raise ExtractorError(f'{self.IE_NAME} said: {message}', expected=True)
def _call_api(self, path, video_id, item=None, note=None, fatal=True, query=None):
return self._download_json(
urljoin('https://psapi.nrk.no/', path),
video_id, note or f'Downloading {item} JSON',
fatal=fatal, query=query, headers={
# Needed for working stream URLs, see https://github.com/yt-dlp/yt-dlp/issues/12192
'Accept': 'application/vnd.nrk.psapi+json; version=9; player=tv-player; device=player-core',
})
class NRKIE(NRKBaseIE):
_VALID_URL = r'''(?x)
(?:
nrk:|
https?://
(?:
(?:www\.)?nrk\.no/video/(?:PS\*|[^_]+_)|
v8[-.]psapi\.nrk\.no/mediaelement/
)
)
(?P<id>[^?\#&]+)
'''
_TESTS = [{
# video
'url': 'http://www.nrk.no/video/PS*150533',
'md5': '2b88a652ad2e275591e61cf550887eec',
'info_dict': {
'id': '150533',
'ext': 'mp4',
'title': 'Dompap og andre fugler i Piip-Show',
'description': 'md5:d9261ba34c43b61c812cb6b0269a5c8f',
'duration': 262,
'upload_date': '20140325',
'thumbnail': r're:^https?://gfx\.nrk\.no/.*$',
'timestamp': 1395751833,
'alt_title': 'md5:d9261ba34c43b61c812cb6b0269a5c8f',
},
}, {
# audio
'url': 'http://www.nrk.no/video/PS*154915',
# MD5 is unstable
'info_dict': {
'id': '154915',
'ext': 'mp4',
'title': 'Slik høres internett ut når du er blind',
'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568',
'duration': 20,
'timestamp': 1398429565,
'alt_title': 'Cathrine Lie Wathne er blind, og bruker hurtigtaster for å navigere seg rundt på ulike nettsider.',
'thumbnail': 'https://gfx.nrk.no/urxQMSXF-WnbfjBH5ke2igLGyN27EdJVWZ6FOsEAclhA',
'upload_date': '20140425',
},
}, {
'url': 'nrk:ecc1b952-96dc-4a98-81b9-5296dc7a98d9',
'only_matching': True,
}, {
'url': 'nrk:clip/7707d5a3-ebe7-434a-87d5-a3ebe7a34a70',
'only_matching': True,
}, {
'url': 'https://v8-psapi.nrk.no/mediaelement/ecc1b952-96dc-4a98-81b9-5296dc7a98d9',
'only_matching': True,
}, {
'url': 'https://www.nrk.no/video/dompap-og-andre-fugler-i-piip-show_150533',
'only_matching': True,
}, {
'url': 'https://www.nrk.no/video/humor/kommentatorboksen-reiser-til-sjos_d1fda11f-a4ad-437a-a374-0398bc84e999',
'only_matching': True,
}, {
# podcast
'url': 'nrk:l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8',
'only_matching': True,
}, {
'url': 'nrk:podcast/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8',
'only_matching': True,
}, {
# clip
'url': 'nrk:150533',
'only_matching': True,
}, {
'url': 'nrk:clip/150533',
'only_matching': True,
}, {
# program
'url': 'nrk:MDDP12000117',
'only_matching': True,
}, {
'url': 'nrk:program/ENRK10100318',
'only_matching': True,
}, {
# direkte
'url': 'nrk:nrk1',
'only_matching': True,
}, {
'url': 'nrk:channel/nrk1',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url).split('/')[-1]
def call_playback_api(item, query=None):
try:
return self._call_api(f'playback/{item}/program/{video_id}', video_id, item, query=query)
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 400:
return self._call_api(f'playback/{item}/{video_id}', video_id, item, query=query)
raise
# known values for preferredCdn: akamai, globalconnect and telenor
manifest = call_playback_api('manifest', {'preferredCdn': 'akamai'})
video_id = try_get(manifest, lambda x: x['id'], str) or video_id
if manifest.get('playability') == 'nonPlayable':
self._raise_error(manifest['nonPlayable'])
playable = manifest['playable']
formats = []
for asset in playable['assets']:
if not isinstance(asset, dict):
continue
if asset.get('encrypted'):
continue
format_url = url_or_none(asset.get('url'))
if not format_url:
continue
asset_format = (asset.get('format') or '').lower()
if asset_format == 'hls' or determine_ext(format_url) == 'm3u8':
formats.extend(self._extract_nrk_formats(format_url, video_id))
elif asset_format == 'mp3':
formats.append({
'url': format_url,
'format_id': asset_format,
'vcodec': 'none',
})
data = call_playback_api('metadata')
preplay = data['preplay']
titles = preplay['titles']
title = titles['title']
alt_title = titles.get('subtitle')
description = try_get(preplay, lambda x: x['description'].replace('\r', '\n'))
duration = parse_duration(playable.get('duration')) or parse_duration(data.get('duration'))
thumbnails = []
for image in try_get(
preplay, lambda x: x['poster']['images'], list) or []:
if not isinstance(image, dict):
continue
image_url = url_or_none(image.get('url'))
if not image_url:
continue
thumbnails.append({
'url': image_url,
'width': int_or_none(image.get('pixelWidth')),
'height': int_or_none(image.get('pixelHeight')),
})
subtitles = {}
for sub in try_get(playable, lambda x: x['subtitles'], list) or []:
if not isinstance(sub, dict):
continue
sub_url = url_or_none(sub.get('webVtt'))
if not sub_url:
continue
sub_key = str_or_none(sub.get('language')) or 'nb'
sub_type = str_or_none(sub.get('type'))
if sub_type:
sub_key += f'-{sub_type}'
subtitles.setdefault(sub_key, []).append({
'url': sub_url,
})
legal_age = try_get(
data, lambda x: x['legalAge']['body']['rating']['code'], str)
# https://en.wikipedia.org/wiki/Norwegian_Media_Authority
age_limit = None
if legal_age:
if legal_age == 'A':
age_limit = 0
elif legal_age.isdigit():
age_limit = int_or_none(legal_age)
is_series = try_get(data, lambda x: x['_links']['series']['name']) == 'series'
info = {
'id': video_id,
'title': title,
'alt_title': alt_title,
'description': description,
'duration': duration,
'thumbnails': thumbnails,
'age_limit': age_limit,
'formats': formats,
'subtitles': subtitles,
'timestamp': parse_iso8601(try_get(manifest, lambda x: x['availability']['onDemand']['from'], str)),
}
if is_series:
series = season_id = season_number = episode = episode_number = None
programs = self._call_api(
f'programs/{video_id}', video_id, 'programs', fatal=False)
if programs and isinstance(programs, dict):
series = str_or_none(programs.get('seriesTitle'))
season_id = str_or_none(programs.get('seasonId'))
season_number = int_or_none(programs.get('seasonNumber'))
episode = str_or_none(programs.get('episodeTitle'))
episode_number = int_or_none(programs.get('episodeNumber'))
if not series:
series = title
if alt_title:
title += f' - {alt_title}'
if not season_number:
season_number = int_or_none(self._search_regex(
r'Sesong\s+(\d+)', description or '', 'season number',
default=None))
if not episode:
episode = alt_title if is_series else None
if not episode_number:
episode_number = int_or_none(self._search_regex(
r'^(\d+)\.', episode or '', 'episode number',
default=None))
if not episode_number:
episode_number = int_or_none(self._search_regex(
r'\((\d+)\s*:\s*\d+\)', description or '',
'episode number', default=None))
info.update({
'title': title,
'series': series,
'season_id': season_id,
'season_number': season_number,
'episode': episode,
'episode_number': episode_number,
})
return info
class NRKTVIE(InfoExtractor):
IE_DESC = 'NRK TV and NRK Radio'
_EPISODE_RE = r'(?P<id>[a-zA-Z]{4}\d{8})'
_VALID_URL = rf'https?://(?:tv|radio)\.nrk(?:super)?\.no/(?:[^/]+/)*{_EPISODE_RE}'
_TESTS = [{
'url': 'https://tv.nrk.no/program/MDDP12000117',
'md5': 'c4a5960f1b00b40d47db65c1064e0ab1',
'info_dict': {
'id': 'MDDP12000117',
'ext': 'mp4',
'title': 'Alarm Trolltunga',
'description': 'md5:46923a6e6510eefcce23d5ef2a58f2ce',
'duration': 2223.44,
'age_limit': 6,
'subtitles': {
'nb-nor': [{
'ext': 'vtt',
}],
'nb-ttv': [{
'ext': 'vtt',
}],
},
'upload_date': '20170627',
'timestamp': 1498591822,
'thumbnail': 'https://gfx.nrk.no/myRSc4vuFlahB60P3n6swwRTQUZI1LqJZl9B7icZFgzA',
'alt_title': 'md5:46923a6e6510eefcce23d5ef2a58f2ce',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014',
'md5': '8d40dab61cea8ab0114e090b029a0565',
'info_dict': {
'id': 'MUHH48000314',
'ext': 'mp4',
'title': '20 spørsmål - 23. mai 2014',
'alt_title': '23. mai 2014',
'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
'duration': 1741,
'series': '20 spørsmål',
'episode': '23. mai 2014',
'age_limit': 0,
'timestamp': 1584593700,
'thumbnail': 'https://gfx.nrk.no/u7uCe79SEfPVGRAGVp2_uAZnNc4mfz_kjXg6Bgek8lMQ',
'season_id': '126936',
'upload_date': '20200319',
'season': 'Season 2014',
'season_number': 2014,
'episode_number': 3,
},
}, {
'url': 'https://tv.nrk.no/program/mdfp15000514',
'info_dict': {
'id': 'MDFP15000514',
'ext': 'mp4',
'title': 'Kunnskapskanalen - Grunnlovsjubiléet - Stor ståhei for ingenting',
'description': 'md5:89290c5ccde1b3a24bb8050ab67fe1db',
'duration': 4605.08,
'series': 'Kunnskapskanalen',
'episode': 'Grunnlovsjubiléet - Stor ståhei for ingenting',
'age_limit': 0,
},
'params': {
'skip_download': True,
},
}, {
# single playlist video
'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2',
'info_dict': {
'id': 'MSPO40010515',
'ext': 'mp4',
'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015',
'description': 'md5:c03aba1e917561eface5214020551b7a',
'age_limit': 0,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Failed to download m3u8 information'],
'skip': 'particular part is not supported currently',
}, {
'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015',
'info_dict': {
'id': 'MSPO40010515',
'ext': 'mp4',
'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015',
'description': 'md5:c03aba1e917561eface5214020551b7a',
'age_limit': 0,
},
'expected_warnings': ['Failed to download m3u8 information'],
'skip': 'Ikke tilgjengelig utenfor Norge',
}, {
'url': 'https://tv.nrk.no/serie/anno/KMTE50001317/sesong-3/episode-13',
'info_dict': {
'id': 'KMTE50001317',
'ext': 'mp4',
'title': 'Anno - 13. episode',
'description': 'md5:11d9613661a8dbe6f9bef54e3a4cbbfa',
'duration': 2340,
'series': 'Anno',
'episode': '13. episode',
'season_number': 3,
'episode_number': 13,
'age_limit': 0,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://tv.nrk.no/serie/nytt-paa-nytt/MUHH46000317/27-01-2017',
'info_dict': {
'id': 'MUHH46000317',
'ext': 'mp4',
'title': 'Nytt på Nytt 27.01.2017',
'description': 'md5:5358d6388fba0ea6f0b6d11c48b9eb4b',
'duration': 1796,
'series': 'Nytt på nytt',
'episode': '27.01.2017',
'age_limit': 0,
},
'params': {
'skip_download': True,
},
'skip': 'ProgramRightsHasExpired',
}, {
'url': 'https://radio.nrk.no/serie/dagsnytt/NPUB21019315/12-07-2015#',
'only_matching': True,
}, {
'url': 'https://tv.nrk.no/serie/lindmo/2018/MUHU11006318/avspiller',
'only_matching': True,
}, {
'url': 'https://radio.nrk.no/serie/dagsnytt/sesong/201507/NPUB21019315',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result(
f'nrk:{video_id}', ie=NRKIE.ie_key(), video_id=video_id)
class NRKTVEpisodeIE(InfoExtractor):
_VALID_URL = r'https?://tv\.nrk\.no/serie/(?P<id>[^/?#]+/sesong/(?P<season_number>\d+)/episode/(?P<episode_number>\d+))'
_TESTS = [{
'url': 'https://tv.nrk.no/serie/hellums-kro/sesong/1/episode/2',
'add_ie': [NRKIE.ie_key()],
'info_dict': {
'id': 'MUHH36005220',
'ext': 'mp4',
'title': 'Hellums kro - 2. Kro, krig og kjærlighet',
'description': 'md5:ad92ddffc04cea8ce14b415deef81787',
'duration': 1563.92,
'series': 'Hellums kro',
'season_number': 1,
'episode_number': 2,
'episode': '2. Kro, krig og kjærlighet',
'age_limit': 6,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://tv.nrk.no/serie/backstage/sesong/1/episode/8',
'info_dict': {
'id': 'MSUI14000816',
'ext': 'mp4',
'title': 'Backstage - 8. episode',
'description': 'md5:de6ca5d5a2d56849e4021f2bf2850df4',
'duration': 1320,
'series': 'Backstage',
'season_number': 1,
'episode_number': 8,
'episode': '8. episode',
'age_limit': 0,
},
'params': {
'skip_download': True,
},
'skip': 'ProgramRightsHasExpired',
}]
def _real_extract(self, url):
display_id, season_number, episode_number = self._match_valid_url(url).group(
'id', 'season_number', 'episode_number')
webpage, urlh = self._download_webpage_handle(url, display_id)
if NRKTVIE.suitable(urlh.url):
nrk_id = NRKTVIE._match_id(urlh.url)
else:
nrk_id = self._search_json(
r'<script\b[^>]+\bid="pageData"[^>]*>', webpage,
'page data', display_id)['initialState']['selectedEpisodePrfId']
if not re.fullmatch(NRKTVIE._EPISODE_RE, nrk_id):
raise ExtractorError('Unable to extract NRK ID')
return self.url_result(
f'nrk:{nrk_id}', NRKIE, nrk_id,
season_number=int(season_number),
episode_number=int(episode_number))
class NRKTVSerieBaseIE(NRKBaseIE):
def _extract_entries(self, entry_list):
if not isinstance(entry_list, list):
return []
entries = []
for episode in entry_list:
nrk_id = episode.get('prfId') or episode.get('episodeId')
if not nrk_id or not isinstance(nrk_id, str):
continue
entries.append(self.url_result(
f'nrk:{nrk_id}', ie=NRKIE.ie_key(), video_id=nrk_id))
return entries
_ASSETS_KEYS = ('episodes', 'instalments')
def _extract_assets_key(self, embedded):
for asset_key in self._ASSETS_KEYS:
if embedded.get(asset_key):
return asset_key
@staticmethod
def _catalog_name(serie_kind):
return 'podcast' if serie_kind in ('podcast', 'podkast') else 'series'
def _entries(self, data, display_id):
for page_num in itertools.count(1):
embedded = data.get('_embedded') or data
if not isinstance(embedded, dict):
break
assets_key = self._extract_assets_key(embedded)
if not assets_key:
break
# Extract entries
entries = try_get(
embedded,
(lambda x: x[assets_key]['_embedded'][assets_key],
lambda x: x[assets_key]),
list)
yield from self._extract_entries(entries)
# Find next URL
next_url_path = try_get(
data,
(lambda x: x['_links']['next']['href'],
lambda x: x['_embedded'][assets_key]['_links']['next']['href']),
str)
if not next_url_path:
break
data = self._call_api(
next_url_path, display_id,
note=f'Downloading {assets_key} JSON page {page_num}',
fatal=False)
if not data:
break
class NRKTVSeasonIE(NRKTVSerieBaseIE):
_VALID_URL = r'''(?x)
https?://
(?P<domain>tv|radio)\.nrk\.no/
(?P<serie_kind>serie|pod[ck]ast)/
(?P<serie>[^/]+)/
(?:
(?:sesong/)?(?P<id>\d+)|
sesong/(?P<id_2>[^/?#&]+)
)
'''
_TESTS = [{
'url': 'https://tv.nrk.no/serie/backstage/sesong/1',
'info_dict': {
'id': 'backstage/1',
'title': 'Sesong 1',
},
'playlist_mincount': 30,
}, {
# no /sesong/ in path
'url': 'https://tv.nrk.no/serie/lindmo/2016',
'info_dict': {
'id': 'lindmo/2016',
'title': '2016',
},
'playlist_mincount': 29,
}, {
# weird nested _embedded in catalog JSON response
'url': 'https://radio.nrk.no/serie/dickie-dick-dickens/sesong/1',
'info_dict': {
'id': 'dickie-dick-dickens/1',
'title': 'Sesong 1',
},
'playlist_mincount': 11,
}, {
# 841 entries, multi page
'url': 'https://radio.nrk.no/serie/dagsnytt/sesong/201509',
'info_dict': {
'id': 'dagsnytt/201509',
'title': 'September 2015',
},
'playlist_mincount': 841,
}, {
# 180 entries, single page
'url': 'https://tv.nrk.no/serie/spangas/sesong/1',
'only_matching': True,
}, {
'url': 'https://radio.nrk.no/podkast/hele_historien/sesong/diagnose-kverulant',
'info_dict': {
'id': 'hele_historien/diagnose-kverulant',
'title': 'Diagnose kverulant',
},
'playlist_mincount': 3,
}, {
'url': 'https://radio.nrk.no/podkast/loerdagsraadet/sesong/202101',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False if NRKTVIE.suitable(url) or NRKTVEpisodeIE.suitable(url) or NRKRadioPodkastIE.suitable(url)
else super().suitable(url))
def _real_extract(self, url):
mobj = self._match_valid_url(url)
domain = mobj.group('domain')
serie_kind = mobj.group('serie_kind')
serie = mobj.group('serie')
season_id = mobj.group('id') or mobj.group('id_2')
display_id = f'{serie}/{season_id}'
data = self._call_api(
f'{domain}/catalog/{self._catalog_name(serie_kind)}/{serie}/seasons/{season_id}',
display_id, 'season', query={'pageSize': 50})
title = try_get(data, lambda x: x['titles']['title'], str) or display_id
return self.playlist_result(
self._entries(data, display_id),
display_id, title)
class NRKTVSeriesIE(NRKTVSerieBaseIE):
_VALID_URL = r'https?://(?P<domain>(?:tv|radio)\.nrk|(?:tv\.)?nrksuper)\.no/(?P<serie_kind>serie|pod[ck]ast)/(?P<id>[^/]+)'
_TESTS = [{
# new layout, instalments
'url': 'https://tv.nrk.no/serie/groenn-glede',
'info_dict': {
'id': 'groenn-glede',
'title': 'Grønn glede',
'description': 'md5:7576e92ae7f65da6993cf90ee29e4608',
},
'playlist_mincount': 90,
}, {
# new layout, instalments, more entries
'url': 'https://tv.nrk.no/serie/lindmo',
'only_matching': True,
}, {
'url': 'https://tv.nrk.no/serie/blank',
'info_dict': {
'id': 'blank',
'title': 'Blank',
'description': 'md5:7664b4e7e77dc6810cd3bca367c25b6e',
},
'playlist_mincount': 30,
}, {
# new layout, seasons
'url': 'https://tv.nrk.no/serie/backstage',
'info_dict': {
'id': 'backstage',
'title': 'Backstage',
'description': 'md5:63692ceb96813d9a207e9910483d948b',
},
'playlist_mincount': 60,
}, {
# old layout
'url': 'https://tv.nrksuper.no/serie/labyrint',
'info_dict': {
'id': 'labyrint',
'title': 'Labyrint',
'description': 'I Daidalos sin undersjøiske Labyrint venter spennende oppgaver, skumle robotskapninger og slim.',
},
'playlist_mincount': 3,
}, {
'url': 'https://tv.nrk.no/serie/broedrene-dal-og-spektralsteinene',
'only_matching': True,
}, {
'url': 'https://tv.nrk.no/serie/saving-the-human-race',
'only_matching': True,
}, {
'url': 'https://tv.nrk.no/serie/postmann-pat',
'only_matching': True,
}, {
'url': 'https://radio.nrk.no/serie/dickie-dick-dickens',
'info_dict': {
'id': 'dickie-dick-dickens',
'title': 'Dickie Dick Dickens',
'description': 'md5:19e67411ffe57f7dce08a943d7a0b91f',
},
'playlist_mincount': 8,
}, {
'url': 'https://nrksuper.no/serie/labyrint',
'only_matching': True,
}, {
'url': 'https://radio.nrk.no/podkast/ulrikkes_univers',
'info_dict': {
'id': 'ulrikkes_univers',
},
'playlist_mincount': 10,
}, {
'url': 'https://radio.nrk.no/podkast/ulrikkes_univers/nrkno-poddkast-26588-134079-05042018030000',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (
False if any(ie.suitable(url)
for ie in (NRKTVIE, NRKTVEpisodeIE, NRKRadioPodkastIE, NRKTVSeasonIE))
else super().suitable(url))
def _real_extract(self, url):
site, serie_kind, series_id = self._match_valid_url(url).groups()
is_radio = site == 'radio.nrk'
domain = 'radio' if is_radio else 'tv'
size_prefix = 'p' if is_radio else 'embeddedInstalmentsP'
series = self._call_api(
f'{domain}/catalog/{self._catalog_name(serie_kind)}/{series_id}',
series_id, 'serie', query={size_prefix + 'ageSize': 50})
titles = try_get(series, [
lambda x: x['titles'],
lambda x: x[x['type']]['titles'],
lambda x: x[x['seriesType']]['titles'],
]) or {}
entries = []
entries.extend(self._entries(series, series_id))
embedded = series.get('_embedded') or {}
linked_seasons = try_get(series, lambda x: x['_links']['seasons']) or []
embedded_seasons = embedded.get('seasons') or []
if len(linked_seasons) > len(embedded_seasons):
for season in linked_seasons:
season_url = urljoin(url, season.get('href'))
if not season_url:
season_name = season.get('name')
if season_name and isinstance(season_name, str):
season_url = f'https://{domain}.nrk.no/serie/{series_id}/sesong/{season_name}'
if season_url:
entries.append(self.url_result(
season_url, ie=NRKTVSeasonIE.ie_key(),
video_title=season.get('title')))
else:
for season in embedded_seasons:
entries.extend(self._entries(season, series_id))
entries.extend(self._entries(
embedded.get('extraMaterial') or {}, series_id))
return self.playlist_result(
entries, series_id, titles.get('title'), titles.get('subtitle'))
class NRKTVDirekteIE(NRKTVIE): # XXX: Do not subclass from concrete IE
IE_DESC = 'NRK TV Direkte and NRK Radio Direkte'
_VALID_URL = r'https?://(?:tv|radio)\.nrk\.no/direkte/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://tv.nrk.no/direkte/nrk1',
'only_matching': True,
}, {
'url': 'https://radio.nrk.no/direkte/p1_oslo_akershus',
'only_matching': True,
}]
class NRKRadioPodkastIE(InfoExtractor):
_VALID_URL = r'https?://radio\.nrk\.no/pod[ck]ast/(?:[^/]+/)+(?P<id>l_[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
_TESTS = [{
'url': 'https://radio.nrk.no/podkast/ulrikkes_univers/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8',
'md5': '8d40dab61cea8ab0114e090b029a0565',
'info_dict': {
'id': 'MUHH48000314AA',
'ext': 'mp4',
'title': '20 spørsmål 23.05.2014',
'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
'duration': 1741,
'series': '20 spørsmål',
'episode': '23.05.2014',
},
}, {
'url': 'https://radio.nrk.no/podcast/ulrikkes_univers/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8',
'only_matching': True,
}, {
'url': 'https://radio.nrk.no/podkast/ulrikkes_univers/sesong/1/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8',
'only_matching': True,
}, {
'url': 'https://radio.nrk.no/podkast/hele_historien/sesong/bortfoert-i-bergen/l_774d1a2c-7aa7-4965-8d1a-2c7aa7d9652c',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result(
f'nrk:{video_id}', ie=NRKIE.ie_key(), video_id=video_id)
class NRKPlaylistBaseIE(InfoExtractor):
def _extract_description(self, webpage):
pass
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result(f'nrk:{video_id}', NRKIE.ie_key())
for video_id in re.findall(self._ITEM_RE, webpage)
]
playlist_title = self._extract_title(webpage)
playlist_description = self._extract_description(webpage)
return self.playlist_result(
entries, playlist_id, playlist_title, playlist_description)
class NRKPlaylistIE(NRKPlaylistBaseIE):
_VALID_URL = r'https?://(?:www\.)?nrk\.no/(?!video|skole)(?:[^/]+/)+(?P<id>[^/]+)'
_ITEM_RE = r'class="[^"]*\brich\b[^"]*"[^>]+data-video-id="([^"]+)"'
_TESTS = [{
'url': 'http://www.nrk.no/troms/gjenopplev-den-historiske-solformorkelsen-1.12270763',
'info_dict': {
'id': 'gjenopplev-den-historiske-solformorkelsen-1.12270763',
'title': 'Gjenopplev den historiske solformørkelsen',
'description': 'md5:c2df8ea3bac5654a26fc2834a542feed',
},
'playlist_count': 2,
}, {
'url': 'http://www.nrk.no/kultur/bok/rivertonprisen-til-karin-fossum-1.12266449',
'info_dict': {
'id': 'rivertonprisen-til-karin-fossum-1.12266449',
'title': 'Rivertonprisen til Karin Fossum',
'description': 'Første kvinne på 15 år til å vinne krimlitteraturprisen.',
},
'playlist_count': 2,
}]
def _extract_title(self, webpage):
return self._og_search_title(webpage, fatal=False)
def _extract_description(self, webpage):
return self._og_search_description(webpage)
class NRKTVEpisodesIE(NRKPlaylistBaseIE):
_VALID_URL = r'https?://tv\.nrk\.no/program/[Ee]pisodes/[^/]+/(?P<id>\d+)'
_ITEM_RE = rf'data-episode=["\']{NRKTVIE._EPISODE_RE}'
_TESTS = [{
'url': 'https://tv.nrk.no/program/episodes/nytt-paa-nytt/69031',
'info_dict': {
'id': '69031',
'title': 'Nytt på nytt, sesong: 201210',
},
'playlist_count': 4,
}]
def _extract_title(self, webpage):
return self._html_search_regex(
r'<h1>([^<]+)</h1>', webpage, 'title', fatal=False)
class NRKSkoleIE(InfoExtractor):
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | true |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/patreon.py | yt_dlp/extractor/patreon.py | import functools
import itertools
import urllib.parse
from .common import InfoExtractor
from .sproutvideo import VidsIoIE
from .vimeo import VimeoIE
from ..networking.exceptions import HTTPError
from ..utils import (
KNOWN_EXTENSIONS,
ExtractorError,
clean_html,
determine_ext,
int_or_none,
mimetype2ext,
parse_iso8601,
smuggle_url,
str_or_none,
url_or_none,
urljoin,
)
from ..utils.traversal import require, traverse_obj, value
class PatreonBaseIE(InfoExtractor):
@functools.cached_property
def patreon_user_agent(self):
# Patreon mobile UA is needed to avoid triggering Cloudflare anti-bot protection.
# Newer UA yields higher res m3u8 formats for locked posts, but gives 401 if not logged-in
if self._get_cookies('https://www.patreon.com/').get('session_id'):
return 'Patreon/72.2.28 (Android; Android 14; Scale/2.10)'
return 'Patreon/7.6.28 (Android; Android 11; Scale/2.10)'
def _call_api(self, ep, item_id, query=None, headers=None, fatal=True, note=None):
if headers is None:
headers = {}
if 'User-Agent' not in headers:
headers['User-Agent'] = self.patreon_user_agent
if query:
query.update({'json-api-version': 1.0})
try:
return self._download_json(
f'https://www.patreon.com/api/{ep}',
item_id, note=note if note else 'Downloading API JSON',
query=query, fatal=fatal, headers=headers)
except ExtractorError as e:
if not isinstance(e.cause, HTTPError) or mimetype2ext(e.cause.response.headers.get('Content-Type')) != 'json':
raise
err_json = self._parse_json(self._webpage_read_content(e.cause.response, None, item_id), item_id, fatal=False)
err_message = traverse_obj(err_json, ('errors', ..., 'detail'), get_all=False)
if err_message:
raise ExtractorError(f'Patreon said: {err_message}', expected=True)
raise
class PatreonIE(PatreonBaseIE):
IE_NAME = 'patreon'
_VALID_URL = r'https?://(?:www\.)?patreon\.com/(?:creation\?hid=|posts/(?:[\w-]+-)?)(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.patreon.com/creation?hid=743933',
'md5': 'e25505eec1053a6e6813b8ed369875cc',
'info_dict': {
'id': '743933',
'ext': 'mp3',
'alt_title': 'cd166.mp3',
'title': 'Episode 166: David Smalley of Dogma Debate',
'description': 'md5:34d207dd29aa90e24f1b3f58841b81c7',
'uploader': 'Cognitive Dissonance Podcast',
'thumbnail': 're:^https?://.*$',
'timestamp': 1406473987,
'upload_date': '20140727',
'uploader_id': '87145',
'like_count': int,
'comment_count': int,
'uploader_url': 'https://www.patreon.com/dissonancepod',
'channel_id': '80642',
'channel_url': 'https://www.patreon.com/dissonancepod',
'channel_follower_count': int,
},
}, {
'url': 'http://www.patreon.com/creation?hid=754133',
'md5': '3eb09345bf44bf60451b8b0b81759d0a',
'info_dict': {
'id': '754133',
'ext': 'mp3',
'title': 'CD 167 Extra',
'uploader': 'Cognitive Dissonance Podcast',
'thumbnail': 're:^https?://.*$',
'like_count': int,
'comment_count': int,
'uploader_url': 'https://www.patreon.com/dissonancepod',
},
'skip': 'Patron-only content',
}, {
'url': 'https://www.patreon.com/creation?hid=1682498',
'info_dict': {
'id': 'SU4fj_aEMVw',
'ext': 'mp4',
'title': 'I\'m on Patreon!',
'uploader': 'TraciJHines',
'thumbnail': 're:^https?://.*$',
'upload_date': '20150211',
'description': 'md5:8af6425f50bd46fbf29f3db0fc3a8364',
'uploader_id': '@TraciHinesMusic',
'categories': ['Entertainment'],
'duration': 282,
'view_count': int,
'tags': 'count:39',
'age_limit': 0,
'channel': 'TraciJHines',
'channel_url': 'https://www.youtube.com/channel/UCGLim4T2loE5rwCMdpCIPVg',
'live_status': 'not_live',
'like_count': int,
'channel_id': 'UCGLim4T2loE5rwCMdpCIPVg',
'availability': 'public',
'channel_follower_count': int,
'playable_in_embed': True,
'uploader_url': 'https://www.youtube.com/@TraciHinesMusic',
'comment_count': int,
'channel_is_verified': True,
'chapters': 'count:4',
'timestamp': 1423689666,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://www.patreon.com/posts/episode-166-of-743933',
'only_matching': True,
}, {
'url': 'https://www.patreon.com/posts/743933',
'only_matching': True,
}, {
'url': 'https://www.patreon.com/posts/kitchen-as-seen-51706779',
'md5': '96656690071f6d64895866008484251b',
'info_dict': {
'id': '555089736',
'ext': 'mp4',
'title': 'KITCHEN AS SEEN ON DEEZ NUTS EXTENDED!',
'uploader': 'Cold Ones',
'thumbnail': 're:^https?://.*$',
'upload_date': '20210526',
'description': 'md5:557a409bd79d3898689419094934ba79',
'uploader_id': '14936315',
},
'skip': 'Patron-only content',
}, {
# m3u8 video (https://github.com/yt-dlp/yt-dlp/issues/2277)
'url': 'https://www.patreon.com/posts/video-sketchbook-32452882',
'info_dict': {
'id': '32452882',
'ext': 'mp4',
'comment_count': int,
'uploader_id': '4301314',
'like_count': int,
'timestamp': 1576696962,
'upload_date': '20191218',
'thumbnail': r're:^https?://.*$',
'uploader_url': 'https://www.patreon.com/loish',
'description': 'md5:e2693e97ee299c8ece47ffdb67e7d9d2',
'title': 'VIDEO // sketchbook flipthrough',
'uploader': 'Loish ',
'tags': ['sketchbook', 'video'],
'channel_id': '1641751',
'channel_url': 'https://www.patreon.com/loish',
'channel_follower_count': int,
},
}, {
# bad videos under media (if media is included). Real one is under post_file
'url': 'https://www.patreon.com/posts/premium-access-70282931',
'info_dict': {
'id': '70282931',
'ext': 'mp4',
'title': '[Premium Access + Uncut] The Office - 2x6 The Fight - Group Reaction',
'channel_url': 'https://www.patreon.com/thenormies',
'channel_id': '573397',
'uploader_id': '2929435',
'uploader': 'The Normies',
'description': 'md5:79c9fd8778e2cef84049a94c058a5e23',
'comment_count': int,
'upload_date': '20220809',
'thumbnail': r're:^https?://.*$',
'channel_follower_count': int,
'like_count': int,
'timestamp': 1660052820,
'tags': ['The Office', 'early access', 'uncut'],
'uploader_url': 'https://www.patreon.com/thenormies',
},
'skip': 'Patron-only content',
}, {
# dead vimeo and embed URLs, need to extract post_file
'url': 'https://www.patreon.com/posts/hunter-x-hunter-34007913',
'info_dict': {
'id': '34007913',
'ext': 'mp4',
'title': 'Hunter x Hunter | Kurapika DESTROYS Uvogin!!!',
'like_count': int,
'uploader': 'YaBoyRoshi',
'timestamp': 1581636833,
'channel_url': 'https://www.patreon.com/yaboyroshi',
'thumbnail': r're:^https?://.*$',
'tags': ['Hunter x Hunter'],
'uploader_id': '14264111',
'comment_count': int,
'channel_follower_count': int,
'description': 'Kurapika is a walking cheat code!',
'upload_date': '20200213',
'channel_id': '2147162',
'uploader_url': 'https://www.patreon.com/yaboyroshi',
},
}, {
# NSFW vimeo embed URL
'url': 'https://www.patreon.com/posts/4k-spiderman-4k-96414599',
'info_dict': {
'id': '902250943',
'ext': 'mp4',
'title': '❤️(4K) Spiderman Girl Yeonhwa’s Gift ❤️(4K) 스파이더맨걸 연화의 선물',
'description': '❤️(4K) Spiderman Girl Yeonhwa’s Gift \n❤️(4K) 스파이더맨걸 연화의 선물',
'uploader': 'Npickyeonhwa',
'uploader_id': '90574422',
'uploader_url': 'https://www.patreon.com/Yeonhwa726',
'channel_id': '10237902',
'channel_url': 'https://www.patreon.com/Yeonhwa726',
'duration': 70,
'timestamp': 1705150153,
'upload_date': '20240113',
'comment_count': int,
'like_count': int,
'thumbnail': r're:^https?://.+',
},
'params': {'skip_download': 'm3u8'},
'expected_warnings': ['Failed to parse XML: not well-formed'],
}, {
# multiple attachments/embeds
'url': 'https://www.patreon.com/posts/holy-wars-solos-100601977',
'playlist_count': 3,
'info_dict': {
'id': '100601977',
'title': '"Holy Wars" (Megadeth) Solos Transcription & Lesson/Analysis',
'description': 'md5:d099ab976edfce6de2a65c2b169a88d3',
'uploader': 'Bradley Hall',
'uploader_id': '24401883',
'uploader_url': 'https://www.patreon.com/bradleyhallguitar',
'channel_id': '3193932',
'channel_url': 'https://www.patreon.com/bradleyhallguitar',
'channel_follower_count': int,
'timestamp': 1710777855,
'upload_date': '20240318',
'like_count': int,
'comment_count': int,
'thumbnail': r're:^https?://.+',
},
'skip': 'Patron-only content',
}, {
# Contains a comment reply in the 'included' section
'url': 'https://www.patreon.com/posts/114721679',
'info_dict': {
'id': '114721679',
'ext': 'mp4',
'upload_date': '20241025',
'uploader': 'Japanalysis',
'like_count': int,
'thumbnail': r're:^https?://.+',
'comment_count': int,
'title': 'Karasawa Part 2',
'description': 'Part 2 of this video https://www.youtube.com/watch?v=Azms2-VTASk',
'uploader_url': 'https://www.patreon.com/japanalysis',
'uploader_id': '80504268',
'channel_url': 'https://www.patreon.com/japanalysis',
'channel_follower_count': int,
'timestamp': 1729897015,
'channel_id': '9346307',
},
'params': {'getcomments': True},
}]
_RETURN_TYPE = 'video'
def _real_extract(self, url):
video_id = self._match_id(url)
post = self._call_api(
f'posts/{video_id}', video_id, query={
'fields[media]': 'download_url,mimetype,size_bytes,file_name',
'fields[post]': 'comment_count,content,embed,image,like_count,post_file,published_at,title,current_user_can_view',
'fields[user]': 'full_name,url',
'fields[post_tag]': 'value',
'fields[campaign]': 'url,name,patron_count',
'json-api-use-default-includes': 'false',
'include': 'audio,user,user_defined_tags,campaign,attachments_media',
})
attributes = post['data']['attributes']
info = traverse_obj(attributes, {
'title': ('title', {str.strip}),
'description': ('content', {clean_html}),
'thumbnail': ('image', ('large_url', 'url'), {url_or_none}, any),
'timestamp': ('published_at', {parse_iso8601}),
'like_count': ('like_count', {int_or_none}),
'comment_count': ('comment_count', {int_or_none}),
})
entries = []
idx = 0
for include in traverse_obj(post, ('included', lambda _, v: v['type'])):
include_type = include['type']
if include_type == 'media':
media_attributes = traverse_obj(include, ('attributes', {dict})) or {}
download_url = url_or_none(media_attributes.get('download_url'))
ext = mimetype2ext(media_attributes.get('mimetype'))
# if size_bytes is None, this media file is likely unavailable
# See: https://github.com/yt-dlp/yt-dlp/issues/4608
size_bytes = int_or_none(media_attributes.get('size_bytes'))
if download_url and ext in KNOWN_EXTENSIONS and size_bytes is not None:
idx += 1
entries.append({
'id': f'{video_id}-{idx}',
'ext': ext,
'filesize': size_bytes,
'url': download_url,
'alt_title': traverse_obj(media_attributes, ('file_name', {str})),
})
elif include_type == 'user':
info.update(traverse_obj(include, {
'uploader': ('attributes', 'full_name', {str}),
'uploader_id': ('id', {str_or_none}),
'uploader_url': ('attributes', 'url', {url_or_none}),
}))
elif include_type == 'post_tag':
if post_tag := traverse_obj(include, ('attributes', 'value', {str})):
info.setdefault('tags', []).append(post_tag)
elif include_type == 'campaign':
info.update(traverse_obj(include, {
'channel': ('attributes', 'title', {str}),
'channel_id': ('id', {str_or_none}),
'channel_url': ('attributes', 'url', {url_or_none}),
'channel_follower_count': ('attributes', 'patron_count', {int_or_none}),
}))
# Must be all-lowercase 'referer' so we can smuggle it to Generic, SproutVideo, and Vimeo.
# patreon.com URLs redirect to www.patreon.com; this matters when requesting mux.com m3u8s
headers = {'referer': 'https://www.patreon.com/'}
# handle Vimeo embeds
if traverse_obj(attributes, ('embed', 'provider')) == 'Vimeo':
v_url = urllib.parse.unquote(self._html_search_regex(
r'(https(?:%3A%2F%2F|://)player\.vimeo\.com.+app_id(?:=|%3D)+\d+)',
traverse_obj(attributes, ('embed', 'html', {str})), 'vimeo url', fatal=False) or '')
if url_or_none(v_url) and self._request_webpage(
v_url, video_id, 'Checking Vimeo embed URL', headers=headers,
fatal=False, errnote=False, expected_status=429): # 429 is TLS fingerprint rejection
entries.append(self.url_result(
VimeoIE._smuggle_referrer(v_url, headers['referer']),
VimeoIE, url_transparent=True))
embed_url = traverse_obj(attributes, ('embed', 'url', {url_or_none}))
if embed_url and (urlh := self._request_webpage(
embed_url, video_id, 'Checking embed URL', headers=headers,
fatal=False, errnote=False, expected_status=403)):
# Vimeo's Cloudflare anti-bot protection will return HTTP status 200 for 404, so we need
# to check for "Sorry, we couldn&rsquo;t find that page" in the meta description tag
meta_description = clean_html(self._html_search_meta(
'description', self._webpage_read_content(urlh, embed_url, video_id, fatal=False), default=None))
# Password-protected vids.io embeds return 403 errors w/o --video-password or session cookie
if ((urlh.status != 403 and meta_description != 'Sorry, we couldn’t find that page')
or VidsIoIE.suitable(embed_url)):
entries.append(self.url_result(smuggle_url(embed_url, headers)))
post_file = traverse_obj(attributes, ('post_file', {dict}))
if post_file:
name = post_file.get('name')
ext = determine_ext(name)
if ext in KNOWN_EXTENSIONS:
entries.append({
'id': video_id,
'ext': ext,
'url': post_file['url'],
})
elif name == 'video' or determine_ext(post_file.get('url')) == 'm3u8':
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
post_file['url'], video_id, headers=headers)
entries.append({
'id': video_id,
'formats': formats,
'subtitles': subtitles,
'http_headers': headers,
})
can_view_post = traverse_obj(attributes, 'current_user_can_view')
comments = None
if can_view_post and info.get('comment_count'):
comments = self.extract_comments(video_id)
if not entries and can_view_post is False:
self.raise_no_formats('You do not have access to this post', video_id=video_id, expected=True)
elif not entries:
self.raise_no_formats('No supported media found in this post', video_id=video_id, expected=True)
elif len(entries) == 1:
info.update(entries[0])
else:
for entry in entries:
entry.update(info)
return self.playlist_result(entries, video_id, **info, __post_extractor=comments)
info['id'] = video_id
info['__post_extractor'] = comments
return info
def _get_comments(self, post_id):
cursor = None
count = 0
params = {
'page[count]': 50,
'include': 'parent.commenter.campaign,parent.post.user,parent.post.campaign.creator,parent.replies.parent,parent.replies.commenter.campaign,parent.replies.post.user,parent.replies.post.campaign.creator,commenter.campaign,post.user,post.campaign.creator,replies.parent,replies.commenter.campaign,replies.post.user,replies.post.campaign.creator,on_behalf_of_campaign',
'fields[comment]': 'body,created,is_by_creator',
'fields[user]': 'image_url,full_name,url',
'filter[flair]': 'image_tiny_url,name',
'sort': '-created',
'json-api-version': 1.0,
'json-api-use-default-includes': 'false',
}
for page in itertools.count(1):
params.update({'page[cursor]': cursor} if cursor else {})
response = self._call_api(
f'posts/{post_id}/comments', post_id, query=params, note=f'Downloading comments page {page}')
cursor = None
for comment in traverse_obj(response, (('data', 'included'), lambda _, v: v['type'] == 'comment' and v['id'])):
count += 1
author_id = traverse_obj(comment, ('relationships', 'commenter', 'data', 'id'))
yield {
**traverse_obj(comment, {
'id': ('id', {str_or_none}),
'text': ('attributes', 'body', {str}),
'timestamp': ('attributes', 'created', {parse_iso8601}),
'parent': ('relationships', 'parent', 'data', ('id', {value('root')}), {str}, any),
'author_is_uploader': ('attributes', 'is_by_creator', {bool}),
}),
**traverse_obj(response, (
'included', lambda _, v: v['id'] == author_id and v['type'] == 'user', 'attributes', {
'author': ('full_name', {str}),
'author_thumbnail': ('image_url', {url_or_none}),
}), get_all=False),
'author_id': author_id,
}
if count < traverse_obj(response, ('meta', 'count')):
cursor = traverse_obj(response, ('data', -1, 'id'))
if cursor is None:
break
class PatreonCampaignIE(PatreonBaseIE):
IE_NAME = 'patreon:campaign'
_VALID_URL = r'''(?x)
https?://(?:www\.)?patreon\.com/(?:
(?:m|api/campaigns)/(?P<campaign_id>\d+)|
(?:cw?/)?(?P<vanity>(?!creation[?/]|posts/|rss[?/])[\w-]+)
)(?:/posts)?/?(?:$|[?#])'''
_TESTS = [{
'url': 'https://www.patreon.com/dissonancepod/',
'info_dict': {
'title': 'Cognitive Dissonance Podcast',
'channel_url': 'https://www.patreon.com/dissonancepod',
'id': '80642',
'description': r're:(?s).*We produce a weekly news podcast focusing on stories that deal with skepticism and religion.*',
'channel_id': '80642',
'channel': 'Cognitive Dissonance Podcast',
'age_limit': 0,
'channel_follower_count': int,
'uploader_id': '87145',
'uploader_url': 'https://www.patreon.com/dissonancepod',
'uploader': 'Cognitive Dissonance Podcast',
'thumbnail': r're:^https?://.*$',
},
'playlist_mincount': 68,
}, {
'url': 'https://www.patreon.com/m/4767637/posts',
'info_dict': {
'title': 'Not Just Bikes',
'id': '4767637',
'channel_id': '4767637',
'channel_url': 'https://www.patreon.com/notjustbikes',
'description': r're:(?s).*Not Just Bikes started as a way to explain why we chose to live in the Netherlands.*',
'age_limit': 0,
'channel': 'Not Just Bikes',
'uploader_url': 'https://www.patreon.com/notjustbikes',
'uploader': 'Jason',
'uploader_id': '37306634',
'thumbnail': r're:^https?://.*$',
},
'playlist_mincount': 71,
}, {
'url': 'https://www.patreon.com/api/campaigns/4243769/posts',
'info_dict': {
'title': 'Second Thought',
'channel_follower_count': int,
'id': '4243769',
'channel_id': '4243769',
'channel_url': 'https://www.patreon.com/secondthought',
'description': r're:(?s).*Second Thought is an educational YouTube channel.*',
'age_limit': 0,
'channel': 'Second Thought',
'uploader_url': 'https://www.patreon.com/secondthought',
'uploader': 'JT Chapman',
'uploader_id': '32718287',
'thumbnail': r're:^https?://.*$',
},
'playlist_mincount': 201,
}, {
'url': 'https://www.patreon.com/c/OgSog',
'info_dict': {
'id': '8504388',
'title': 'OGSoG',
'description': r're:(?s)Hello and welcome to our Patreon page. We are Mari, Lasercorn, .+',
'channel': 'OGSoG',
'channel_id': '8504388',
'channel_url': 'https://www.patreon.com/OgSog',
'uploader_url': 'https://www.patreon.com/OgSog',
'uploader_id': '72323575',
'uploader': 'David Moss',
'thumbnail': r're:https?://.+/.+',
'channel_follower_count': int,
'age_limit': 0,
},
'playlist_mincount': 331,
'skip': 'Channel removed',
}, {
# next.js v13 data, see https://github.com/yt-dlp/yt-dlp/issues/13622
'url': 'https://www.patreon.com/c/anythingelse/posts',
'info_dict': {
'id': '9631148',
'title': 'Anything Else?',
'description': 'md5:2ee1db4aed2f9460c2b295825a24aa08',
'uploader': 'dan ',
'uploader_id': '13852412',
'uploader_url': 'https://www.patreon.com/anythingelse',
'channel': 'Anything Else?',
'channel_id': '9631148',
'channel_url': 'https://www.patreon.com/anythingelse',
'channel_follower_count': int,
'age_limit': 0,
'thumbnail': r're:https?://.+/.+',
},
'playlist_mincount': 151,
}, {
'url': 'https://www.patreon.com/cw/anythingelse',
'only_matching': True,
}, {
'url': 'https://www.patreon.com/c/OgSog/posts',
'only_matching': True,
}, {
'url': 'https://www.patreon.com/dissonancepod/posts',
'only_matching': True,
}, {
'url': 'https://www.patreon.com/m/5932659',
'only_matching': True,
}, {
'url': 'https://www.patreon.com/api/campaigns/4243769',
'only_matching': True,
}]
def _entries(self, campaign_id):
cursor = None
params = {
'fields[post]': 'patreon_url,url',
'filter[campaign_id]': campaign_id,
'filter[is_draft]': 'false',
'sort': '-published_at',
'json-api-use-default-includes': 'false',
}
for page in itertools.count(1):
params.update({'page[cursor]': cursor} if cursor else {})
posts_json = self._call_api('posts', campaign_id, query=params, note=f'Downloading posts page {page}')
cursor = traverse_obj(posts_json, ('meta', 'pagination', 'cursors', 'next'))
for post_url in traverse_obj(posts_json, ('data', ..., 'attributes', 'patreon_url')):
yield self.url_result(urljoin('https://www.patreon.com/', post_url), PatreonIE)
if cursor is None:
break
def _real_extract(self, url):
campaign_id, vanity = self._match_valid_url(url).group('campaign_id', 'vanity')
if campaign_id is None:
webpage = self._download_webpage(url, vanity, headers={'User-Agent': self.patreon_user_agent})
campaign_id = traverse_obj(self._search_nextjs_data(webpage, vanity, default=None), (
'props', 'pageProps', 'bootstrapEnvelope', 'pageBootstrap', 'campaign', 'data', 'id', {str}))
if not campaign_id:
campaign_id = traverse_obj(self._search_nextjs_v13_data(webpage, vanity), (
((..., 'value', 'campaign', 'data'), lambda _, v: v['type'] == 'campaign'),
'id', {str}, any, {require('campaign ID')}))
params = {
'json-api-use-default-includes': 'false',
'fields[user]': 'full_name,url',
'fields[campaign]': 'name,summary,url,patron_count,creation_count,is_nsfw,avatar_photo_url',
'include': 'creator',
}
campaign_response = self._call_api(
f'campaigns/{campaign_id}', campaign_id,
note='Downloading campaign info', fatal=False,
query=params) or {}
campaign_info = campaign_response.get('data') or {}
channel_name = traverse_obj(campaign_info, ('attributes', 'name'))
user_info = traverse_obj(
campaign_response, ('included', lambda _, v: v['type'] == 'user'),
default={}, expected_type=dict, get_all=False)
return {
'_type': 'playlist',
'id': campaign_id,
'title': channel_name,
'entries': self._entries(campaign_id),
'description': clean_html(traverse_obj(campaign_info, ('attributes', 'summary'))),
'channel_url': traverse_obj(campaign_info, ('attributes', 'url')),
'channel_follower_count': int_or_none(traverse_obj(campaign_info, ('attributes', 'patron_count'))),
'channel_id': campaign_id,
'channel': channel_name,
'uploader_url': traverse_obj(user_info, ('attributes', 'url')),
'uploader_id': str_or_none(user_info.get('id')),
'uploader': traverse_obj(user_info, ('attributes', 'full_name')),
'playlist_count': traverse_obj(campaign_info, ('attributes', 'creation_count')),
'age_limit': 18 if traverse_obj(campaign_info, ('attributes', 'is_nsfw')) else 0,
'thumbnail': url_or_none(traverse_obj(campaign_info, ('attributes', 'avatar_photo_url'))),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/pokergo.py | yt_dlp/extractor/pokergo.py | import base64
from .common import InfoExtractor
from ..utils import (
ExtractorError,
try_get,
)
from ..utils.traversal import traverse_obj
class PokerGoBaseIE(InfoExtractor):
_NETRC_MACHINE = 'pokergo'
_AUTH_TOKEN = None
_PROPERTY_ID = '1dfb3940-7d53-4980-b0b0-f28b369a000d'
def _perform_login(self, username, password):
if self._AUTH_TOKEN:
return
self.report_login()
PokerGoBaseIE._AUTH_TOKEN = self._download_json(
f'https://subscription.pokergo.com/properties/{self._PROPERTY_ID}/sign-in', None,
headers={'authorization': f'Basic {base64.b64encode(f"{username}:{password}".encode()).decode()}'},
data=b'')['meta']['token']
if not self._AUTH_TOKEN:
raise ExtractorError('Unable to get Auth Token.', expected=True)
def _real_initialize(self):
if not self._AUTH_TOKEN:
self.raise_login_required(method='password')
class PokerGoIE(PokerGoBaseIE):
_VALID_URL = r'https?://(?:www\.)?pokergo\.com/videos/(?P<id>[^&$#/?]+)'
_TESTS = [{
'url': 'https://www.pokergo.com/videos/2a70ec4e-4a80-414b-97ec-725d9b72a7dc',
'info_dict': {
'id': 'aVLOxDzY',
'ext': 'mp4',
'title': 'Poker After Dark | Season 12 (2020) | Cry Me a River | Episode 2',
'description': 'md5:c7a8c29556cbfb6eb3c0d5d622251b71',
'thumbnail': 'https://cdn.jwplayer.com/v2/media/aVLOxDzY/poster.jpg?width=720',
'timestamp': 1608085715,
'duration': 2700.12,
'season_number': 12,
'episode_number': 2,
'series': 'poker after dark',
'upload_date': '20201216',
'season': 'Season 12',
'episode': 'Episode 2',
'display_id': '2a70ec4e-4a80-414b-97ec-725d9b72a7dc',
},
'params': {'skip_download': True},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
data_json = self._download_json(
f'https://api.pokergo.com/v2/properties/{self._PROPERTY_ID}/videos/{video_id}', video_id,
headers={'authorization': f'Bearer {self._AUTH_TOKEN}'})['data']
v_id = data_json['source']
thumbnails = [{
'url': image['url'],
'id': image.get('label'),
'width': image.get('width'),
'height': image.get('height'),
} for image in data_json.get('images') or [] if image.get('url')]
series_json = traverse_obj(data_json, ('show_tags', lambda _, v: v['video_id'] == video_id, any)) or {}
return {
'_type': 'url_transparent',
'display_id': video_id,
'title': data_json.get('title'),
'description': data_json.get('description'),
'duration': data_json.get('duration'),
'thumbnails': thumbnails,
'season_number': series_json.get('season'),
'episode_number': series_json.get('episode_number'),
'series': try_get(series_json, lambda x: x['tag']['name']),
'url': f'https://cdn.jwplayer.com/v2/media/{v_id}',
}
class PokerGoCollectionIE(PokerGoBaseIE):
_VALID_URL = r'https?://(?:www\.)?pokergo\.com/collections/(?P<id>[^&$#/?]+)'
_TESTS = [{
'url': 'https://www.pokergo.com/collections/19ffe481-5dae-481a-8869-75cc0e3c4700',
'playlist_mincount': 13,
'info_dict': {
'id': '19ffe481-5dae-481a-8869-75cc0e3c4700',
},
}]
def _entries(self, playlist_id):
data_json = self._download_json(
f'https://api.pokergo.com/v2/properties/{self._PROPERTY_ID}/collections/{playlist_id}?include=entities',
playlist_id, headers={'authorization': f'Bearer {self._AUTH_TOKEN}'})['data']
for video in data_json.get('collection_video') or []:
video_id = video.get('id')
if video_id:
yield self.url_result(
f'https://www.pokergo.com/videos/{video_id}',
ie=PokerGoIE.ie_key(), video_id=video_id)
def _real_extract(self, url):
playlist_id = self._match_id(url)
return self.playlist_result(self._entries(playlist_id), playlist_id=playlist_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/magentamusik.py | yt_dlp/extractor/magentamusik.py | from .common import InfoExtractor
from ..utils import ExtractorError, int_or_none, join_nonempty, url_or_none
from ..utils.traversal import traverse_obj
class MagentaMusikIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?magentamusik\.de/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.magentamusik.de/marty-friedman-woa-2023-9208205928595409235',
'md5': 'd82dd4748f55fc91957094546aaf8584',
'info_dict': {
'id': '9208205928595409235',
'display_id': 'marty-friedman-woa-2023-9208205928595409235',
'ext': 'mp4',
'title': 'Marty Friedman: W:O:A 2023',
'alt_title': 'Konzert vom: 05.08.2023 13:00',
'duration': 2760,
'categories': ['Musikkonzert'],
'release_year': 2023,
'location': 'Deutschland',
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
player_config = self._search_json(
r'data-js-element="o-video-player__config">', webpage, 'player config', display_id, fatal=False)
if not player_config:
raise ExtractorError('No video found', expected=True)
asset_id = player_config['assetId']
asset_details = self._download_json(
f'https://wcps.t-online.de/cvss/magentamusic/vodclient/v2/assetdetails/58938/{asset_id}',
display_id, note='Downloading asset details')
video_id = traverse_obj(
asset_details, ('content', 'partnerInformation', ..., 'reference', {str}), get_all=False)
if not video_id:
raise ExtractorError('Unable to extract video id')
vod_data = self._download_json(
f'https://wcps.t-online.de/cvss/magentamusic/vodclient/v2/player/58935/{video_id}/Main%20Movie', video_id)
smil_url = traverse_obj(
vod_data, ('content', 'feature', 'representations', ...,
'contentPackages', ..., 'media', 'href', {url_or_none}), get_all=False)
return {
'id': video_id,
'display_id': display_id,
'formats': self._extract_smil_formats(smil_url, video_id),
**traverse_obj(vod_data, ('content', 'feature', 'metadata', {
'title': 'title',
'alt_title': 'originalTitle',
'description': 'longDescription',
'duration': ('runtimeInSeconds', {int_or_none}),
'location': ('countriesOfProduction', {list}, {lambda x: join_nonempty(*x, delim=', ')}),
'release_year': ('yearOfProduction', {int_or_none}),
'categories': ('mainGenre', {str}, all, filter),
})),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lecturio.py | yt_dlp/extractor/lecturio.py | import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
clean_html,
determine_ext,
float_or_none,
int_or_none,
str_or_none,
url_or_none,
urlencode_postdata,
urljoin,
)
class LecturioBaseIE(InfoExtractor):
_API_BASE_URL = 'https://app.lecturio.com/api/en/latest/html5/'
_LOGIN_URL = 'https://app.lecturio.com/en/login'
_NETRC_MACHINE = 'lecturio'
def _perform_login(self, username, password):
# Sets some cookies
_, urlh = self._download_webpage_handle(
self._LOGIN_URL, None, 'Downloading login popup')
def is_logged(url_handle):
return self._LOGIN_URL not in url_handle.url
# Already logged in
if is_logged(urlh):
return
login_form = {
'signin[email]': username,
'signin[password]': password,
'signin[remember]': 'on',
}
response, urlh = self._download_webpage_handle(
self._LOGIN_URL, None, 'Logging in',
data=urlencode_postdata(login_form))
# Logged in successfully
if is_logged(urlh):
return
errors = self._html_search_regex(
r'(?s)<ul[^>]+class=["\']error_list[^>]+>(.+?)</ul>', response,
'errors', default=None)
if errors:
raise ExtractorError(f'Unable to login: {errors}', expected=True)
raise ExtractorError('Unable to log in')
class LecturioIE(LecturioBaseIE):
_VALID_URL = r'''(?x)
https://
(?:
app\.lecturio\.com/([^/?#]+/(?P<nt>[^/?#&]+)\.lecture|(?:\#/)?lecture/c/\d+/(?P<id>\d+))|
(?:www\.)?lecturio\.de/(?:[^/?#]+/)+(?P<nt_de>[^/?#&]+)\.vortrag
)
'''
_TESTS = [{
'url': 'https://app.lecturio.com/medical-courses/important-concepts-and-terms-introduction-to-microbiology.lecture#tab/videos',
'md5': '9a42cf1d8282a6311bf7211bbde26fde',
'info_dict': {
'id': '39634',
'ext': 'mp4',
'title': 'Important Concepts and Terms — Introduction to Microbiology',
},
'skip': 'Requires lecturio account credentials',
}, {
'url': 'https://www.lecturio.de/jura/oeffentliches-recht-staatsexamen.vortrag',
'only_matching': True,
}, {
'url': 'https://www.lecturio.de/jura/oeffentliches-recht-at-1-staatsexamen/oeffentliches-recht-staatsexamen.vortrag',
'only_matching': True,
}, {
'url': 'https://app.lecturio.com/#/lecture/c/6434/39634',
'only_matching': True,
}]
_CC_LANGS = {
'Arabic': 'ar',
'Bulgarian': 'bg',
'German': 'de',
'English': 'en',
'Spanish': 'es',
'Persian': 'fa',
'French': 'fr',
'Japanese': 'ja',
'Polish': 'pl',
'Pashto': 'ps',
'Russian': 'ru',
}
def _real_extract(self, url):
mobj = self._match_valid_url(url)
nt = mobj.group('nt') or mobj.group('nt_de')
lecture_id = mobj.group('id')
display_id = nt or lecture_id
api_path = 'lectures/' + lecture_id if lecture_id else 'lecture/' + nt + '.json'
video = self._download_json(
self._API_BASE_URL + api_path, display_id)
title = video['title'].strip()
if not lecture_id:
pid = video.get('productId') or video.get('uid')
if pid:
spid = pid.split('_')
if spid and len(spid) == 2:
lecture_id = spid[1]
formats = []
for format_ in video['content']['media']:
if not isinstance(format_, dict):
continue
file_ = format_.get('file')
if not file_:
continue
ext = determine_ext(file_)
if ext == 'smil':
# smil contains only broken RTMP formats anyway
continue
file_url = url_or_none(file_)
if not file_url:
continue
label = str_or_none(format_.get('label'))
filesize = int_or_none(format_.get('fileSize'))
f = {
'url': file_url,
'format_id': label,
'filesize': float_or_none(filesize, invscale=1000),
}
if label:
mobj = re.match(r'(\d+)p\s*\(([^)]+)\)', label)
if mobj:
f.update({
'format_id': mobj.group(2),
'height': int(mobj.group(1)),
})
formats.append(f)
subtitles = {}
automatic_captions = {}
captions = video.get('captions') or []
for cc in captions:
cc_url = cc.get('url')
if not cc_url:
continue
cc_label = cc.get('translatedCode')
lang = cc.get('languageCode') or self._search_regex(
r'/([a-z]{2})_', cc_url, 'lang',
default=cc_label.split()[0] if cc_label else 'en')
original_lang = self._search_regex(
r'/[a-z]{2}_([a-z]{2})_', cc_url, 'original lang',
default=None)
sub_dict = (automatic_captions
if 'auto-translated' in cc_label or original_lang
else subtitles)
sub_dict.setdefault(self._CC_LANGS.get(lang, lang), []).append({
'url': cc_url,
})
return {
'id': lecture_id or nt,
'title': title,
'formats': formats,
'subtitles': subtitles,
'automatic_captions': automatic_captions,
}
class LecturioCourseIE(LecturioBaseIE):
_VALID_URL = r'https?://app\.lecturio\.com/(?:[^/]+/(?P<nt>[^/?#&]+)\.course|(?:#/)?course/c/(?P<id>\d+))'
_TESTS = [{
'url': 'https://app.lecturio.com/medical-courses/microbiology-introduction.course#/',
'info_dict': {
'id': 'microbiology-introduction',
'title': 'Microbiology: Introduction',
'description': 'md5:13da8500c25880c6016ae1e6d78c386a',
},
'playlist_count': 45,
'skip': 'Requires lecturio account credentials',
}, {
'url': 'https://app.lecturio.com/#/course/c/6434',
'only_matching': True,
}]
def _real_extract(self, url):
nt, course_id = self._match_valid_url(url).groups()
display_id = nt or course_id
api_path = 'courses/' + course_id if course_id else 'course/content/' + nt + '.json'
course = self._download_json(
self._API_BASE_URL + api_path, display_id)
entries = []
for lecture in course.get('lectures', []):
lecture_id = str_or_none(lecture.get('id'))
lecture_url = lecture.get('url')
if lecture_url:
lecture_url = urljoin(url, lecture_url)
else:
lecture_url = f'https://app.lecturio.com/#/lecture/c/{course_id}/{lecture_id}'
entries.append(self.url_result(
lecture_url, ie=LecturioIE.ie_key(), video_id=lecture_id))
return self.playlist_result(
entries, display_id, course.get('title'),
clean_html(course.get('description')))
class LecturioDeCourseIE(LecturioBaseIE):
_VALID_URL = r'https?://(?:www\.)?lecturio\.de/[^/]+/(?P<id>[^/?#&]+)\.kurs'
_TEST = {
'url': 'https://www.lecturio.de/jura/grundrechte.kurs',
'only_matching': True,
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
entries = []
for mobj in re.finditer(
r'(?s)<td[^>]+\bdata-lecture-id=["\'](?P<id>\d+).+?\bhref=(["\'])(?P<url>(?:(?!\2).)+\.vortrag)\b[^>]+>',
webpage):
lecture_url = urljoin(url, mobj.group('url'))
lecture_id = mobj.group('id')
entries.append(self.url_result(
lecture_url, ie=LecturioIE.ie_key(), video_id=lecture_id))
title = self._search_regex(
r'<h1[^>]*>([^<]+)', webpage, 'title', default=None)
return self.playlist_result(entries, display_id, title)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/umg.py | yt_dlp/extractor/umg.py | from .common import InfoExtractor
from ..utils import clean_html
from ..utils.traversal import find_element, traverse_obj
class UMGDeIE(InfoExtractor):
IE_NAME = 'umg:de'
IE_DESC = 'Universal Music Deutschland'
_VALID_URL = r'https?://(?:www\.)?universal-music\.de/[^/?#]+/videos/(?P<slug>[^/?#]+-(?P<id>\d+))'
_TESTS = [{
'url': 'https://www.universal-music.de/sido/videos/jedes-wort-ist-gold-wert-457803',
'info_dict': {
'id': '457803',
'ext': 'mp4',
'title': 'Jedes Wort ist Gold wert',
'artists': ['Sido'],
'description': 'md5:df2dbffcff1a74e0a7c9bef4b497aeec',
'display_id': 'jedes-wort-ist-gold-wert-457803',
'duration': 210.0,
'thumbnail': r're:https?://images\.universal-music\.de/img/assets/.+\.jpg',
'timestamp': 1513591800,
'upload_date': '20171218',
'view_count': int,
},
}, {
'url': 'https://www.universal-music.de/alexander-eder/videos/der-doktor-hat-gesagt-609533',
'info_dict': {
'id': '609533',
'ext': 'mp4',
'title': 'Der Doktor hat gesagt',
'artists': ['Alexander Eder'],
'display_id': 'der-doktor-hat-gesagt-609533',
'duration': 146.0,
'thumbnail': r're:https?://images\.universal-music\.de/img/assets/.+\.jpg',
'timestamp': 1742982100,
'upload_date': '20250326',
},
}]
def _real_extract(self, url):
display_id, video_id = self._match_valid_url(url).group('slug', 'id')
webpage = self._download_webpage(url, display_id)
return {
**self._search_json_ld(webpage, display_id),
'id': video_id,
'artists': traverse_obj(self._html_search_meta('umg-artist-screenname', webpage), (filter, all)),
# The JSON LD description duplicates the title
'description': traverse_obj(webpage, ({find_element(cls='_3Y0Lj')}, {clean_html})),
'display_id': display_id,
'formats': self._extract_m3u8_formats(
'https://hls.universal-music.de/get', display_id, 'mp4', query={'id': video_id}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/toggle.py | yt_dlp/extractor/toggle.py | import json
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
parse_iso8601,
strip_or_none,
)
class ToggleIE(InfoExtractor):
IE_NAME = 'toggle'
_VALID_URL = r'(?:https?://(?:(?:www\.)?mewatch|video\.toggle)\.sg/(?:en|zh)/(?:[^/]+/){2,}|toggle:)(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.mewatch.sg/en/series/lion-moms-tif/trailers/lion-moms-premier/343115',
'info_dict': {
'id': '343115',
'ext': 'mp4',
'title': 'Lion Moms Premiere',
'description': 'md5:aea1149404bff4d7f7b6da11fafd8e6b',
'upload_date': '20150910',
'timestamp': 1441858274,
},
'params': {
'skip_download': 'm3u8 download',
},
}, {
'url': 'http://www.mewatch.sg/en/movies/dug-s-special-mission/341413',
'only_matching': True,
}, {
'url': 'http://www.mewatch.sg/en/series/28th-sea-games-5-show/28th-sea-games-5-show-ep11/332861',
'only_matching': True,
}, {
'url': 'http://video.toggle.sg/en/clips/seraph-sun-aloysius-will-suddenly-sing-some-old-songs-in-high-pitch-on-set/343331',
'only_matching': True,
}, {
'url': 'http://www.mewatch.sg/en/clips/seraph-sun-aloysius-will-suddenly-sing-some-old-songs-in-high-pitch-on-set/343331',
'only_matching': True,
}, {
'url': 'http://www.mewatch.sg/zh/series/zero-calling-s2-hd/ep13/336367',
'only_matching': True,
}, {
'url': 'http://www.mewatch.sg/en/series/vetri-s2/webisodes/jeeva-is-an-orphan-vetri-s2-webisode-7/342302',
'only_matching': True,
}, {
'url': 'http://www.mewatch.sg/en/movies/seven-days/321936',
'only_matching': True,
}, {
'url': 'https://www.mewatch.sg/en/tv-show/news/may-2017-cna-singapore-tonight/fri-19-may-2017/512456',
'only_matching': True,
}, {
'url': 'http://www.mewatch.sg/en/channels/eleven-plus/401585',
'only_matching': True,
}]
_API_USER = 'tvpapi_147'
_API_PASS = '11111'
def _real_extract(self, url):
video_id = self._match_id(url)
params = {
'initObj': {
'Locale': {
'LocaleLanguage': '',
'LocaleCountry': '',
'LocaleDevice': '',
'LocaleUserState': 0,
},
'Platform': 0,
'SiteGuid': 0,
'DomainID': '0',
'UDID': '',
'ApiUser': self._API_USER,
'ApiPass': self._API_PASS,
},
'MediaID': video_id,
'mediaType': 0,
}
info = self._download_json(
'http://tvpapi.as.tvinci.com/v2_9/gateways/jsonpostgw.aspx?m=GetMediaInfo',
video_id, 'Downloading video info json', data=json.dumps(params).encode())
title = info['MediaName']
formats = []
for video_file in info.get('Files', []):
video_url, vid_format = video_file.get('URL'), video_file.get('Format')
if not video_url or video_url == 'NA' or not vid_format:
continue
ext = determine_ext(video_url)
vid_format = vid_format.replace(' ', '')
# if geo-restricted, m3u8 is inaccessible, but mp4 is okay
if ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
video_url, video_id, ext='mp4', m3u8_id=vid_format,
note=f'Downloading {vid_format} m3u8 information',
errnote=f'Failed to download {vid_format} m3u8 information',
fatal=False)
for f in m3u8_formats:
# Apple FairPlay Streaming
if '/fpshls/' in f['url']:
continue
formats.append(f)
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
video_url, video_id, mpd_id=vid_format,
note=f'Downloading {vid_format} MPD manifest',
errnote=f'Failed to download {vid_format} MPD manifest',
fatal=False))
elif ext == 'ism':
formats.extend(self._extract_ism_formats(
video_url, video_id, ism_id=vid_format,
note=f'Downloading {vid_format} ISM manifest',
errnote=f'Failed to download {vid_format} ISM manifest',
fatal=False))
elif ext == 'mp4':
formats.append({
'ext': ext,
'url': video_url,
'format_id': vid_format,
})
if not formats:
for meta in (info.get('Metas') or []):
if (not self.get_param('allow_unplayable_formats')
and meta.get('Key') == 'Encryption' and meta.get('Value') == '1'):
self.report_drm(video_id)
# Most likely because geo-blocked if no formats and no DRM
thumbnails = []
for picture in info.get('Pictures', []):
if not isinstance(picture, dict):
continue
pic_url = picture.get('URL')
if not pic_url:
continue
thumbnail = {
'url': pic_url,
}
pic_size = picture.get('PicSize', '')
m = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', pic_size)
if m:
thumbnail.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
thumbnails.append(thumbnail)
def counter(prefix):
return int_or_none(
info.get(prefix + 'Counter') or info.get(prefix.lower() + '_counter'))
return {
'id': video_id,
'title': title,
'description': strip_or_none(info.get('Description')),
'duration': int_or_none(info.get('Duration')),
'timestamp': parse_iso8601(info.get('CreationDate') or None),
'average_rating': float_or_none(info.get('Rating')),
'view_count': counter('View'),
'like_count': counter('Like'),
'thumbnails': thumbnails,
'formats': formats,
}
class MeWatchIE(InfoExtractor):
IE_NAME = 'mewatch'
_VALID_URL = r'https?://(?:(?:www|live)\.)?mewatch\.sg/watch/[^/?#&]+-(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://www.mewatch.sg/watch/Recipe-Of-Life-E1-179371',
'info_dict': {
'id': '1008625',
'ext': 'mp4',
'title': 'Recipe Of Life 味之道',
'timestamp': 1603306526,
'description': 'md5:6e88cde8af2068444fc8e1bc3ebf257c',
'upload_date': '20201021',
},
'params': {
'skip_download': 'm3u8 download',
},
}, {
'url': 'https://www.mewatch.sg/watch/Little-Red-Dot-Detectives-S2-搜密。打卡。小红点-S2-E1-176232',
'only_matching': True,
}, {
'url': 'https://www.mewatch.sg/watch/Little-Red-Dot-Detectives-S2-%E6%90%9C%E5%AF%86%E3%80%82%E6%89%93%E5%8D%A1%E3%80%82%E5%B0%8F%E7%BA%A2%E7%82%B9-S2-E1-176232',
'only_matching': True,
}, {
'url': 'https://live.mewatch.sg/watch/Recipe-Of-Life-E41-189759',
'only_matching': True,
}]
def _real_extract(self, url):
item_id = self._match_id(url)
custom_id = self._download_json(
'https://cdn.mewatch.sg/api/items/' + item_id,
item_id, query={'segments': 'all'})['customId']
return self.url_result(
'toggle:' + custom_id, ToggleIE.ie_key(), custom_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/redge.py | yt_dlp/extractor/redge.py |
from .common import InfoExtractor
from ..networking import HEADRequest
from ..utils import (
float_or_none,
int_or_none,
join_nonempty,
parse_qs,
update_url_query,
)
from ..utils.traversal import traverse_obj
class RedCDNLivxIE(InfoExtractor):
_VALID_URL = r'https?://[^.]+\.(?:dcs\.redcdn|atmcdn)\.pl/(?:live(?:dash|hls|ss)|nvr)/o2/(?P<tenant>[^/?#]+)/(?P<id>[^?#]+)\.livx'
IE_NAME = 'redcdnlivx'
_TESTS = [{
'url': 'https://r.dcs.redcdn.pl/livedash/o2/senat/ENC02/channel.livx?indexMode=true&startTime=638272860000&stopTime=638292544000',
'info_dict': {
'id': 'ENC02-638272860000-638292544000',
'ext': 'mp4',
'title': 'ENC02',
'duration': 19683.982,
'live_status': 'was_live',
},
}, {
'url': 'https://r.dcs.redcdn.pl/livedash/o2/sejm/ENC18/live.livx?indexMode=true&startTime=722333096000&stopTime=722335562000',
'info_dict': {
'id': 'ENC18-722333096000-722335562000',
'ext': 'mp4',
'title': 'ENC18',
'duration': 2463.995,
'live_status': 'was_live',
},
}, {
'url': 'https://r.dcs.redcdn.pl/livehls/o2/sportevolution/live/triathlon2018/warsaw.livx/playlist.m3u8?startTime=550305000000&stopTime=550327620000',
'info_dict': {
'id': 'triathlon2018-warsaw-550305000000-550327620000',
'ext': 'mp4',
'title': 'triathlon2018/warsaw',
'duration': 22619.98,
'live_status': 'was_live',
},
}, {
'url': 'https://n-25-12.dcs.redcdn.pl/nvr/o2/sejm/Migacz-ENC01/1.livx?startTime=722347200000&stopTime=722367345000',
'only_matching': True,
}, {
'url': 'https://redir.atmcdn.pl/nvr/o2/sejm/ENC08/1.livx?startTime=503831270000&stopTime=503840040000',
'only_matching': True,
}]
'''
Known methods (first in url path):
- `livedash` - DASH MPD
- `livehls` - HTTP Live Streaming
- `livess` - IIS Smooth Streaming
- `nvr` - CCTV mode, directly returns a file, typically flv, avc1, aac
- `sc` - shoutcast/icecast (audio streams, like radio)
'''
def _real_extract(self, url):
tenant, path = self._match_valid_url(url).group('tenant', 'id')
qs = parse_qs(url)
start_time = traverse_obj(qs, ('startTime', 0, {int_or_none}))
stop_time = traverse_obj(qs, ('stopTime', 0, {int_or_none}))
def livx_mode(mode):
suffix = ''
if mode == 'livess':
suffix = '/manifest'
elif mode == 'livehls':
suffix = '/playlist.m3u8'
file_qs = {}
if start_time:
file_qs['startTime'] = start_time
if stop_time:
file_qs['stopTime'] = stop_time
if mode == 'nvr':
file_qs['nolimit'] = 1
elif mode != 'sc':
file_qs['indexMode'] = 'true'
return update_url_query(f'https://r.dcs.redcdn.pl/{mode}/o2/{tenant}/{path}.livx{suffix}', file_qs)
# no id or title for a transmission. making ones up.
title = path \
.replace('/live', '').replace('live/', '') \
.replace('/channel', '').replace('channel/', '') \
.strip('/')
video_id = join_nonempty(title.replace('/', '-'), start_time, stop_time)
formats = []
# downloading the manifest separately here instead of _extract_ism_formats to also get some stream metadata
ism_res = self._download_xml_handle(
livx_mode('livess'), video_id,
note='Downloading ISM manifest',
errnote='Failed to download ISM manifest',
fatal=False)
ism_doc = None
if ism_res is not False:
ism_doc, ism_urlh = ism_res
formats, _ = self._parse_ism_formats_and_subtitles(ism_doc, ism_urlh.url, 'ss')
nvr_urlh = self._request_webpage(
HEADRequest(livx_mode('nvr')), video_id, 'Follow flv file redirect', fatal=False,
expected_status=lambda _: True)
if nvr_urlh and nvr_urlh.status == 200:
formats.append({
'url': nvr_urlh.url,
'ext': 'flv',
'format_id': 'direct-0',
'preference': -1, # might be slow
})
formats.extend(self._extract_mpd_formats(livx_mode('livedash'), video_id, mpd_id='dash', fatal=False))
formats.extend(self._extract_m3u8_formats(
livx_mode('livehls'), video_id, m3u8_id='hls', ext='mp4', fatal=False))
time_scale = traverse_obj(ism_doc, ('@TimeScale', {int_or_none})) or 10000000
duration = traverse_obj(
ism_doc, ('@Duration', {float_or_none(scale=time_scale)})) or None
live_status = None
if traverse_obj(ism_doc, '@IsLive') == 'TRUE':
live_status = 'is_live'
elif duration:
live_status = 'was_live'
return {
'id': video_id,
'title': title,
'formats': formats,
'duration': duration,
'live_status': live_status,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dreisat.py | yt_dlp/extractor/dreisat.py | from .zdf import ZDFBaseIE
from ..utils import (
int_or_none,
merge_dicts,
parse_iso8601,
)
from ..utils.traversal import require, traverse_obj
class DreiSatIE(ZDFBaseIE):
IE_NAME = '3sat'
_VALID_URL = r'https?://(?:www\.)?3sat\.de/(?:[^/?#]+/)*(?P<id>[^/?#&]+)\.html'
_TESTS = [{
'url': 'https://www.3sat.de/dokumentation/reise/traumziele-suedostasiens-die-philippinen-und-vietnam-102.html',
'info_dict': {
'id': '231124_traumziele_philippinen_und_vietnam_dokreise',
'ext': 'mp4',
'title': 'Traumziele Südostasiens (1/2): Die Philippinen und Vietnam',
'description': 'md5:26329ce5197775b596773b939354079d',
'duration': 2625.0,
'thumbnail': 'https://www.3sat.de/assets/traumziele-suedostasiens-die-philippinen-und-vietnam-100~original?cb=1699870351148',
'episode': 'Traumziele Südostasiens (1/2): Die Philippinen und Vietnam',
'episode_id': 'POS_cc7ff51c-98cf-4d12-b99d-f7a551de1c95',
'timestamp': 1747920900,
'upload_date': '20250522',
},
}, {
'url': 'https://www.3sat.de/film/ab-18/ab-18---mein-fremdes-ich-100.html',
'md5': 'f92638413a11d759bdae95c9d8ec165c',
'info_dict': {
'id': '221128_mein_fremdes_ich2_ab18',
'ext': 'mp4',
'title': 'Ab 18! - Mein fremdes Ich',
'description': 'md5:cae0c0b27b7426d62ca0dda181738bf0',
'duration': 2625.0,
'thumbnail': 'https://www.3sat.de/assets/ab-18---mein-fremdes-ich-106~original?cb=1666081865812',
'episode': 'Ab 18! - Mein fremdes Ich',
'episode_id': 'POS_6225d1ca-a0d5-45e3-870b-e783ee6c8a3f',
'timestamp': 1695081600,
'upload_date': '20230919',
},
}, {
'url': 'https://www.3sat.de/gesellschaft/37-grad-leben/aus-dem-leben-gerissen-102.html',
'md5': 'a903eaf8d1fd635bd3317cd2ad87ec84',
'info_dict': {
'id': '250323_0903_sendung_sgl',
'ext': 'mp4',
'title': 'Plötzlich ohne dich',
'description': 'md5:380cc10659289dd91510ad8fa717c66b',
'duration': 1620.0,
'thumbnail': 'https://www.3sat.de/assets/37-grad-leben-106~original?cb=1645537156810',
'episode': 'Plötzlich ohne dich',
'episode_id': 'POS_faa7a93c-c0f2-4d51-823f-ce2ac3ee191b',
'timestamp': 1743162540,
'upload_date': '20250328',
},
}, {
# Video with chapters
'url': 'https://www.3sat.de/kultur/buchmesse/dein-buch-das-beste-von-der-leipziger-buchmesse-2025-teil-1-100.html',
'md5': '6b95790ce52e75f0d050adcdd2711ee6',
'info_dict': {
'id': '250330_dein_buch1_bum',
'ext': 'mp4',
'title': 'dein buch - Das Beste von der Leipziger Buchmesse 2025 - Teil 1',
'description': 'md5:bae51bfc22f15563ce3acbf97d2e8844',
'duration': 5399.0,
'thumbnail': 'https://www.3sat.de/assets/buchmesse-kerkeling-100~original?cb=1747256996338',
'chapters': 'count:24',
'episode': 'dein buch - Das Beste von der Leipziger Buchmesse 2025 - Teil 1',
'episode_id': 'POS_1ef236cc-b390-401e-acd0-4fb4b04315fb',
'timestamp': 1743327000,
'upload_date': '20250330',
},
}, {
# Same as https://www.zdf.de/filme/filme-sonstige/der-hauptmann-112.html
'url': 'https://www.3sat.de/film/spielfilm/der-hauptmann-100.html',
'only_matching': True,
}, {
# Same as https://www.zdf.de/wissen/nano/nano-21-mai-2019-102.html, equal media ids
'url': 'https://www.3sat.de/wissen/nano/nano-21-mai-2019-102.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player = self._search_json(
r'data-zdfplayer-jsb=(["\'])', webpage, 'player JSON', video_id)
player_url = player['content']
api_token = f'Bearer {player["apiToken"]}'
content = self._call_api(player_url, video_id, 'video metadata', api_token)
video_target = content['mainVideoContent']['http://zdf.de/rels/target']
ptmd_path = traverse_obj(video_target, (
(('streams', 'default'), None),
('http://zdf.de/rels/streams/ptmd', 'http://zdf.de/rels/streams/ptmd-template'),
{str}, any, {require('ptmd path')}))
ptmd_url = self._expand_ptmd_template(player_url, ptmd_path)
aspect_ratio = self._parse_aspect_ratio(video_target.get('aspectRatio'))
info = self._extract_ptmd(ptmd_url, video_id, api_token, aspect_ratio)
return merge_dicts(info, {
**traverse_obj(content, {
'title': (('title', 'teaserHeadline'), {str}, any),
'episode': (('title', 'teaserHeadline'), {str}, any),
'description': (('leadParagraph', 'teasertext'), {str}, any),
'timestamp': ('editorialDate', {parse_iso8601}),
}),
**traverse_obj(video_target, {
'duration': ('duration', {int_or_none}),
'chapters': ('streamAnchorTag', {self._extract_chapters}),
}),
'thumbnails': self._extract_thumbnails(traverse_obj(content, ('teaserImageRef', 'layouts', {dict}))),
**traverse_obj(content, ('programmeItem', 0, 'http://zdf.de/rels/target', {
'series_id': ('http://zdf.de/rels/cmdm/series', 'seriesUuid', {str}),
'series': ('http://zdf.de/rels/cmdm/series', 'seriesTitle', {str}),
'season': ('http://zdf.de/rels/cmdm/season', 'seasonTitle', {str}),
'season_number': ('http://zdf.de/rels/cmdm/season', 'seasonNumber', {int_or_none}),
'season_id': ('http://zdf.de/rels/cmdm/season', 'seasonUuid', {str}),
'episode_number': ('episodeNumber', {int_or_none}),
'episode_id': ('contentId', {str}),
})),
})
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/imggaming.py | yt_dlp/extractor/imggaming.py | import json
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
int_or_none,
str_or_none,
try_get,
)
class ImgGamingBaseIE(InfoExtractor):
_API_BASE = 'https://dce-frontoffice.imggaming.com/api/v2/'
_API_KEY = '857a1e5d-e35e-4fdf-805b-a87b6f8364bf'
_HEADERS = None
_MANIFEST_HEADERS = {'Accept-Encoding': 'identity'}
_REALM = None
_VALID_URL_TEMPL = r'https?://(?P<domain>%s)/(?P<type>live|playlist|video)/(?P<id>\d+)(?:\?.*?\bplaylistId=(?P<playlist_id>\d+))?'
def _initialize_pre_login(self):
self._HEADERS = {
'Realm': 'dce.' + self._REALM,
'x-api-key': self._API_KEY,
}
def _perform_login(self, username, password):
p_headers = self._HEADERS.copy()
p_headers['Content-Type'] = 'application/json'
self._HEADERS['Authorization'] = 'Bearer ' + self._download_json(
self._API_BASE + 'login',
None, 'Logging in', data=json.dumps({
'id': username,
'secret': password,
}).encode(), headers=p_headers)['authorisationToken']
def _real_initialize(self):
if not self._HEADERS.get('Authorization'):
self.raise_login_required(method='password')
def _call_api(self, path, media_id):
return self._download_json(
self._API_BASE + path + media_id, media_id, headers=self._HEADERS)
def _extract_dve_api_url(self, media_id, media_type):
stream_path = 'stream'
if media_type == 'video':
stream_path += '/vod/'
else:
stream_path += '?eventId='
try:
return self._call_api(
stream_path, media_id)['playerUrlCallback']
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
raise ExtractorError(
self._parse_json(e.cause.response.read().decode(), media_id)['messages'][0],
expected=True)
raise
def _real_extract(self, url):
domain, media_type, media_id, playlist_id = self._match_valid_url(url).groups()
if playlist_id:
if self._yes_playlist(playlist_id, media_id):
media_type, media_id = 'playlist', playlist_id
if media_type == 'playlist':
playlist = self._call_api('vod/playlist/', media_id)
entries = []
for video in try_get(playlist, lambda x: x['videos']['vods']) or []:
video_id = str_or_none(video.get('id'))
if not video_id:
continue
entries.append(self.url_result(
f'https://{domain}/video/{video_id}',
self.ie_key(), video_id))
return self.playlist_result(
entries, media_id, playlist.get('title'),
playlist.get('description'))
dve_api_url = self._extract_dve_api_url(media_id, media_type)
video_data = self._download_json(dve_api_url, media_id)
is_live = media_type == 'live'
if is_live:
title = self._call_api('event/', media_id)['title']
else:
title = video_data['name']
formats = []
for proto in ('hls', 'dash'):
media_url = video_data.get(proto + 'Url') or try_get(video_data, lambda x: x[proto]['url'])
if not media_url:
continue
if proto == 'hls':
m3u8_formats = self._extract_m3u8_formats(
media_url, media_id, 'mp4', live=is_live,
m3u8_id='hls', fatal=False, headers=self._MANIFEST_HEADERS)
for f in m3u8_formats:
f.setdefault('http_headers', {}).update(self._MANIFEST_HEADERS)
formats.append(f)
else:
formats.extend(self._extract_mpd_formats(
media_url, media_id, mpd_id='dash', fatal=False,
headers=self._MANIFEST_HEADERS))
subtitles = {}
for subtitle in video_data.get('subtitles', []):
subtitle_url = subtitle.get('url')
if not subtitle_url:
continue
subtitles.setdefault(subtitle.get('lang', 'en_US'), []).append({
'url': subtitle_url,
})
return {
'id': media_id,
'title': title,
'formats': formats,
'thumbnail': video_data.get('thumbnailUrl'),
'description': video_data.get('description'),
'duration': int_or_none(video_data.get('duration')),
'tags': video_data.get('tags'),
'is_live': is_live,
'subtitles': subtitles,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hgtv.py | yt_dlp/extractor/hgtv.py | from .common import InfoExtractor
class HGTVComShowIE(InfoExtractor):
IE_NAME = 'hgtv.com:show'
_VALID_URL = r'https?://(?:www\.)?hgtv\.com/shows/[^/]+/(?P<id>[^/?#&]+)'
_TESTS = [{
# data-module="video"
'url': 'http://www.hgtv.com/shows/flip-or-flop/flip-or-flop-full-episodes-season-4-videos',
'info_dict': {
'id': 'flip-or-flop-full-episodes-season-4-videos',
'title': 'Flip or Flop Full Episodes',
},
'playlist_mincount': 15,
}, {
# data-deferred-module="video"
'url': 'http://www.hgtv.com/shows/good-bones/episodes/an-old-victorian-house-gets-a-new-facelift',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
config = self._parse_json(
self._search_regex(
r'(?s)data-(?:deferred-)?module=["\']video["\'][^>]*>.*?<script[^>]+type=["\']text/x-config["\'][^>]*>(.+?)</script',
webpage, 'video config'),
display_id)['channels'][0]
entries = [
self.url_result(video['releaseUrl'])
for video in config['videos'] if video.get('releaseUrl')]
return self.playlist_result(
entries, display_id, config.get('title'), config.get('description'))
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/foxnews.py | yt_dlp/extractor/foxnews.py | import re
from .amp import AMPIE
from .common import InfoExtractor
class FoxNewsIE(AMPIE):
IE_NAME = 'foxnews'
IE_DESC = 'Fox News and Fox Business Video'
_VALID_URL = r'https?://video\.(?:insider\.)?fox(?:news|business)\.com/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)'
_TESTS = [
{
'url': 'https://video.foxnews.com/v/6320653836112',
'info_dict': {
'id': '6320653836112',
'ext': 'mp4',
'title': 'Tucker Carlson joins \'Gutfeld!\' to discuss his new documentary',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 404,
'upload_date': '20230217',
'description': 'md5:858a8a36f59e9ca897d758855bcdfa02',
'timestamp': 1676611344.0,
},
'params': {'skip_download': 'm3u8'},
},
{
# From http://insider.foxnews.com/2016/08/25/univ-wisconsin-student-group-pushing-silence-certain-words
'url': 'http://video.insider.foxnews.com/v/video-embed.html?video_id=5099377331001&autoplay=true&share_url=http://insider.foxnews.com/2016/08/25/univ-wisconsin-student-group-pushing-silence-certain-words&share_title=Student%20Group:%20Saying%20%27Politically%20Correct,%27%20%27Trash%27%20and%20%27Lame%27%20Is%20Offensive&share=true',
'info_dict': {
'id': '5099377331001',
'ext': 'mp4',
'title': '82416_censoring',
'description': '82416_censoring',
'upload_date': '20160826',
'timestamp': 1472169708.0,
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 521,
},
'params': {'skip_download': 'm3u8'},
},
{
'url': 'http://video.foxnews.com/v/3937480/frozen-in-time/#sp=show-clips',
'md5': '32aaded6ba3ef0d1c04e238d01031e5e',
'info_dict': {
'id': '3937480',
'ext': 'flv',
'title': 'Frozen in Time',
'description': '16-year-old girl is size of toddler',
'duration': 265,
'timestamp': 1304411491,
'upload_date': '20110503',
'thumbnail': r're:^https?://.*\.jpg$',
},
'skip': '404 page',
},
{
'url': 'http://video.foxnews.com/v/3922535568001/rep-luis-gutierrez-on-if-obamas-immigration-plan-is-legal/#sp=show-clips',
'md5': '5846c64a1ea05ec78175421b8323e2df',
'info_dict': {
'id': '3922535568001',
'ext': 'mp4',
'title': "Rep. Luis Gutierrez on if Obama's immigration plan is legal",
'description': "Congressman discusses president's plan",
'duration': 292,
'timestamp': 1417662047,
'upload_date': '20141204',
'thumbnail': r're:^https?://.*\.jpg$',
},
'skip': 'm3u8 HTTP error 400 in web browser',
},
{
'url': 'http://video.foxnews.com/v/video-embed.html?video_id=3937480&d=video.foxnews.com',
'only_matching': True,
},
{
'url': 'http://video.foxbusiness.com/v/4442309889001',
'only_matching': True,
},
]
@classmethod
def _extract_embed_urls(cls, url, webpage):
for mobj in re.finditer(
r'''(?x)
<(?:script|(?:amp-)?iframe)[^>]+\bsrc=["\']
(?:https?:)?//video\.foxnews\.com/v/(?:video-embed\.html|embed\.js)\?
(?:[^>"\']+&)?(?:video_)?id=(?P<video_id>\d+)
''', webpage):
yield f'https://video.foxnews.com/v/video-embed.html?video_id={mobj.group("video_id")}'
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._extract_feed_info(
f'https://api.foxnews.com/v3/video-player/{video_id}?callback=uid_{video_id}')
info['id'] = video_id
return info
class FoxNewsVideoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?foxnews\.com/video/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.foxnews.com/video/6328632286112',
'info_dict': {
'id': '6328632286112',
'ext': 'mp4',
'title': 'Review: 2023 Toyota Prius Prime',
'duration': 155,
'thumbnail': r're:^https://.+\.jpg$',
'timestamp': 1685720177.0,
'upload_date': '20230602',
'description': 'md5:b69aafb125b41c1402e9744f53d6edc4',
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://www.foxnews.com/video/6313058664112',
'info_dict': {
'id': '6313058664112',
'ext': 'mp4',
'thumbnail': r're:https://.+/1280x720/match/image\.jpg',
'upload_date': '20220930',
'description': 'New York City, Kids Therapy, Biden',
'duration': 2415,
'title': 'Gutfeld! - Thursday, September 29',
'timestamp': 1664527538,
},
'skip': '404 page',
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result(f'https://video.foxnews.com/v/{video_id}', FoxNewsIE, video_id)
class FoxNewsArticleIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:insider\.)?foxnews\.com/(?!v)([^/]+/)+(?P<id>[a-z-]+)'
IE_NAME = 'foxnews:article'
_TESTS = [{
# data-video-id
'url': 'https://www.foxnews.com/politics/2016/09/08/buzz-about-bud-clinton-camp-denies-claims-wore-earpiece-at-forum.html',
'md5': 'd2dd6ce809cedeefa96460e964821437',
'info_dict': {
'id': '5116295019001',
'ext': 'mp4',
'title': 'Trump and Clinton asked to defend positions on Iraq War',
'description': 'Veterans and Fox News host Dana Perino react on \'The Kelly File\' to NBC\'s presidential forum',
'timestamp': 1473301045,
'upload_date': '20160908',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 426,
},
'params': {'skip_download': 'm3u8'},
}, {
# iframe embed
'url': 'https://www.foxnews.com/us/2018/03/09/parkland-survivor-kyle-kashuv-on-meeting-trump-his-app-to-prevent-another-school-shooting.amp.html?__twitter_impression=true',
'info_dict': {
'id': '5748266721001',
'ext': 'flv',
'title': 'Kyle Kashuv has a positive message for the Trump White House',
'description': 'Marjory Stoneman Douglas student disagrees with classmates.',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 229,
'timestamp': 1520594670,
'upload_date': '20180309',
},
'skip': '404 page',
}, {
'url': 'http://insider.foxnews.com/2016/08/25/univ-wisconsin-student-group-pushing-silence-certain-words',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._html_search_regex(
r'data-video-id=([\'"])(?P<id>[^\'"]+)\1',
webpage, 'video ID', group='id', default=None)
if video_id:
return self.url_result(
'http://video.foxnews.com/v/' + video_id, FoxNewsIE.ie_key())
return self.url_result(
next(FoxNewsIE._extract_embed_urls(url, webpage)), FoxNewsIE.ie_key())
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dhm.py | yt_dlp/extractor/dhm.py | from .common import InfoExtractor
from ..utils import parse_duration
class DHMIE(InfoExtractor):
_WORKING = False
IE_DESC = 'Filmarchiv - Deutsches Historisches Museum'
_VALID_URL = r'https?://(?:www\.)?dhm\.de/filmarchiv/(?:[^/]+/)+(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.dhm.de/filmarchiv/die-filme/the-marshallplan-at-work-in-west-germany/',
'md5': '11c475f670209bf6acca0b2b7ef51827',
'info_dict': {
'id': 'the-marshallplan-at-work-in-west-germany',
'ext': 'flv',
'title': 'MARSHALL PLAN AT WORK IN WESTERN GERMANY, THE',
'description': 'md5:1fabd480c153f97b07add61c44407c82',
'duration': 660,
'thumbnail': r're:^https?://.*\.jpg$',
},
}, {
'url': 'http://www.dhm.de/filmarchiv/02-mapping-the-wall/peter-g/rolle-1/',
'md5': '09890226332476a3e3f6f2cb74734aa5',
'info_dict': {
'id': 'rolle-1',
'ext': 'flv',
'title': 'ROLLE 1',
'thumbnail': r're:^https?://.*\.jpg$',
},
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist_url = self._search_regex(
r"file\s*:\s*'([^']+)'", webpage, 'playlist url')
entries = self._extract_xspf_playlist(playlist_url, playlist_id)
title = self._search_regex(
[r'dc:title="([^"]+)"', r'<title> »([^<]+)</title>'],
webpage, 'title').strip()
description = self._html_search_regex(
r'<p><strong>Description:</strong>(.+?)</p>',
webpage, 'description', default=None)
duration = parse_duration(self._search_regex(
r'<em>Length\s*</em>\s*:\s*</strong>([^<]+)',
webpage, 'duration', default=None))
entries[0].update({
'title': title,
'description': description,
'duration': duration,
})
return self.playlist_result(entries, playlist_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nrl.py | yt_dlp/extractor/nrl.py | from .common import InfoExtractor
class NRLTVIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:www\.)?nrl\.com/tv(/[^/]+)*/(?P<id>[^/?&#]+)'
_TEST = {
'url': 'https://www.nrl.com/tv/news/match-highlights-titans-v-knights-862805/',
'info_dict': {
'id': 'YyNnFuaDE6kPJqlDhG4CGQ_w89mKTau4',
'ext': 'mp4',
'title': 'Match Highlights: Titans v Knights',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
q_data = self._parse_json(self._html_search_regex(
r'(?s)q-data="({.+?})"', webpage, 'player data'), display_id)
ooyala_id = q_data['videoId']
return self.url_result(
'ooyala:' + ooyala_id, 'Ooyala', ooyala_id, q_data.get('title'))
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tencent.py | yt_dlp/extractor/tencent.py | import random
import re
import string
import time
from .common import InfoExtractor
from ..aes import aes_cbc_encrypt_bytes
from ..utils import (
ExtractorError,
determine_ext,
float_or_none,
int_or_none,
js_to_json,
traverse_obj,
urljoin,
)
class TencentBaseIE(InfoExtractor):
"""Subclasses must set _API_URL, _APP_VERSION, _PLATFORM, _HOST, _REFERER"""
def _check_api_response(self, api_response):
msg = api_response.get('msg')
if api_response.get('code') != '0.0' and msg is not None:
if msg in (
'您所在区域暂无此内容版权(如设置VPN请关闭后重试)',
'This content is not available in your area due to copyright restrictions. Please choose other videos.',
):
self.raise_geo_restricted()
raise ExtractorError(f'Tencent said: {msg}')
def _get_ckey(self, video_id, url, guid):
ua = self.get_param('http_headers')['User-Agent']
payload = (f'{video_id}|{int(time.time())}|mg3c3b04ba|{self._APP_VERSION}|{guid}|'
f'{self._PLATFORM}|{url[:48]}|{ua.lower()[:48]}||Mozilla|Netscape|Windows x86_64|00|')
return aes_cbc_encrypt_bytes(
bytes(f'|{sum(map(ord, payload))}|{payload}', 'utf-8'),
b'Ok\xda\xa3\x9e/\x8c\xb0\x7f^r-\x9e\xde\xf3\x14',
b'\x01PJ\xf3V\xe6\x19\xcf.B\xbb\xa6\x8c?p\xf9',
padding_mode='whitespace').hex().upper()
def _get_video_api_response(self, video_url, video_id, series_id, subtitle_format, video_format, video_quality):
guid = ''.join(random.choices(string.digits + string.ascii_lowercase, k=16))
ckey = self._get_ckey(video_id, video_url, guid)
query = {
'vid': video_id,
'cid': series_id,
'cKey': ckey,
'encryptVer': '8.1',
'spcaptiontype': '1' if subtitle_format == 'vtt' else '0',
'sphls': '2' if video_format == 'hls' else '0',
'dtype': '3' if video_format == 'hls' else '0',
'defn': video_quality,
'spsrt': '2', # Enable subtitles
'sphttps': '1', # Enable HTTPS
'otype': 'json',
'spwm': '1',
'hevclv': '28', # Enable HEVC
'drm': '40', # Enable DRM
# For HDR
'spvideo': '4',
'spsfrhdr': '100',
# For SHD
'host': self._HOST,
'referer': self._REFERER,
'ehost': video_url,
'appVer': self._APP_VERSION,
'platform': self._PLATFORM,
# For VQQ
'guid': guid,
'flowid': ''.join(random.choices(string.digits + string.ascii_lowercase, k=32)),
}
return self._search_json(r'QZOutputJson=', self._download_webpage(
self._API_URL, video_id, query=query), 'api_response', video_id)
def _extract_video_formats_and_subtitles(self, api_response, video_id):
video_response = api_response['vl']['vi'][0]
formats, subtitles = [], {}
for video_format in video_response['ul']['ui']:
if video_format.get('hls') or determine_ext(video_format['url']) == 'm3u8':
fmts, subs = self._extract_m3u8_formats_and_subtitles(
video_format['url'] + traverse_obj(video_format, ('hls', 'pt'), default=''),
video_id, 'mp4', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
else:
formats.append({
'url': f'{video_format["url"]}{video_response["fn"]}?vkey={video_response["fvkey"]}',
'ext': 'mp4',
})
identifier = video_response.get('br')
format_response = traverse_obj(
api_response, ('fl', 'fi', lambda _, v: v['br'] == identifier),
expected_type=dict, get_all=False) or {}
common_info = {
'width': video_response.get('vw'),
'height': video_response.get('vh'),
'abr': float_or_none(format_response.get('audiobandwidth'), scale=1000),
'vbr': float_or_none(format_response.get('bandwidth'), scale=1000),
'fps': format_response.get('vfps'),
'format': format_response.get('sname'),
'format_id': format_response.get('name'),
'format_note': format_response.get('resolution'),
'dynamic_range': {'hdr10': 'hdr10'}.get(format_response.get('name'), 'sdr'),
'has_drm': format_response.get('drm', 0) != 0,
}
for f in formats:
f.update(common_info)
return formats, subtitles
def _extract_video_native_subtitles(self, api_response):
subtitles = {}
for subtitle in traverse_obj(api_response, ('sfl', 'fi')) or ():
subtitles.setdefault(subtitle['lang'].lower(), []).append({
'url': subtitle['url'],
'ext': 'srt' if subtitle.get('captionType') == 1 else 'vtt',
'protocol': 'm3u8_native' if determine_ext(subtitle['url']) == 'm3u8' else 'http',
})
return subtitles
def _extract_all_video_formats_and_subtitles(self, url, video_id, series_id):
api_responses = [self._get_video_api_response(url, video_id, series_id, 'srt', 'hls', 'hd')]
self._check_api_response(api_responses[0])
qualities = traverse_obj(api_responses, (0, 'fl', 'fi', ..., 'name')) or ('shd', 'fhd')
for q in qualities:
if q not in ('ld', 'sd', 'hd'):
api_responses.append(self._get_video_api_response(
url, video_id, series_id, 'vtt', 'hls', q))
self._check_api_response(api_responses[-1])
formats, subtitles = [], {}
for api_response in api_responses:
fmts, subs = self._extract_video_formats_and_subtitles(api_response, video_id)
native_subtitles = self._extract_video_native_subtitles(api_response)
formats.extend(fmts)
self._merge_subtitles(subs, native_subtitles, target=subtitles)
return formats, subtitles
def _get_clean_title(self, title):
return re.sub(
r'\s*[_\-]\s*(?:Watch online|Watch HD Video Online|WeTV|腾讯视频|(?:高清)?1080P在线观看平台).*?$',
'', title or '').strip() or None
class VQQBaseIE(TencentBaseIE):
_VALID_URL_BASE = r'https?://v\.qq\.com'
_API_URL = 'https://h5vv6.video.qq.com/getvinfo'
_APP_VERSION = '3.5.57'
_PLATFORM = '10901'
_HOST = 'v.qq.com'
_REFERER = 'v.qq.com'
def _get_webpage_metadata(self, webpage, video_id):
return self._search_json(
r'<script[^>]*>[^<]*window\.__(?:pinia|PINIA__)\s*=',
webpage, 'pinia data', video_id, transform_source=js_to_json, fatal=False)
class VQQVideoIE(VQQBaseIE):
IE_NAME = 'vqq:video'
_VALID_URL = VQQBaseIE._VALID_URL_BASE + r'/x/(?:page|cover/(?P<series_id>\w+))/(?P<id>\w+)'
_TESTS = [{
'url': 'https://v.qq.com/x/page/q326831cny0.html',
'md5': 'b11c9cb781df710d686b950376676e2a',
'info_dict': {
'id': 'q326831cny0',
'ext': 'mp4',
'title': '我是选手:雷霆裂阵,终极时刻',
'description': 'md5:e7ed70be89244017dac2a835a10aeb1e',
'thumbnail': r're:^https?://[^?#]+q326831cny0',
'format_id': r're:^shd',
},
}, {
'url': 'https://v.qq.com/x/page/o3013za7cse.html',
'md5': 'a1bcf42c6d28c189bd2fe2d468abb287',
'info_dict': {
'id': 'o3013za7cse',
'ext': 'mp4',
'title': '欧阳娜娜VLOG',
'description': 'md5:29fe847497a98e04a8c3826e499edd2e',
'thumbnail': r're:^https?://[^?#]+o3013za7cse',
'format_id': r're:^shd',
},
}, {
'url': 'https://v.qq.com/x/cover/7ce5noezvafma27/a00269ix3l8.html',
'md5': '87968df6238a65d2478f19c25adf850b',
'info_dict': {
'id': 'a00269ix3l8',
'ext': 'mp4',
'title': '鸡毛飞上天 第01集',
'description': 'md5:8cae3534327315b3872fbef5e51b5c5b',
'thumbnail': r're:^https?://[^?#]+7ce5noezvafma27',
'series': '鸡毛飞上天',
'format_id': r're:^shd',
},
'skip': '404',
}, {
'url': 'https://v.qq.com/x/cover/mzc00200p29k31e/s0043cwsgj0.html',
'md5': 'fadd10bf88aec3420f06f19ee1d24c5b',
'info_dict': {
'id': 's0043cwsgj0',
'ext': 'mp4',
'title': '第1集:如何快乐吃糖?',
'description': 'md5:1d8c3a0b8729ae3827fa5b2d3ebd5213',
'thumbnail': r're:^https?://[^?#]+s0043cwsgj0',
'series': '青年理工工作者生活研究所',
'format_id': r're:^shd',
},
'params': {'skip_download': 'm3u8'},
}, {
# Geo-restricted to China
'url': 'https://v.qq.com/x/cover/mcv8hkc8zk8lnov/x0036x5qqsr.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id, series_id = self._match_valid_url(url).group('id', 'series_id')
webpage = self._download_webpage(url, video_id)
webpage_metadata = self._get_webpage_metadata(webpage, video_id)
formats, subtitles = self._extract_all_video_formats_and_subtitles(url, video_id, series_id)
return {
'id': video_id,
'title': self._get_clean_title(self._og_search_title(webpage)
or traverse_obj(webpage_metadata, ('global', 'videoInfo', 'title'))),
'description': (self._og_search_description(webpage)
or traverse_obj(webpage_metadata, ('global', 'videoInfo', 'desc'))),
'formats': formats,
'subtitles': subtitles,
'thumbnail': (self._og_search_thumbnail(webpage)
or traverse_obj(webpage_metadata, ('global', 'videoInfo', 'pic160x90'))),
'series': traverse_obj(webpage_metadata, ('global', 'coverInfo', 'title')),
}
class VQQSeriesIE(VQQBaseIE):
IE_NAME = 'vqq:series'
_VALID_URL = VQQBaseIE._VALID_URL_BASE + r'/x/cover/(?P<id>\w+)\.html/?(?:[?#]|$)'
_TESTS = [{
'url': 'https://v.qq.com/x/cover/7ce5noezvafma27.html',
'info_dict': {
'id': '7ce5noezvafma27',
'title': '鸡毛飞上天',
'description': 'md5:8cae3534327315b3872fbef5e51b5c5b',
},
'playlist_count': 55,
}, {
'url': 'https://v.qq.com/x/cover/oshd7r0vy9sfq8e.html',
'info_dict': {
'id': 'oshd7r0vy9sfq8e',
'title': '恋爱细胞2',
'description': 'md5:9d8a2245679f71ca828534b0f95d2a03',
},
'playlist_count': 12,
}]
def _real_extract(self, url):
series_id = self._match_id(url)
webpage = self._download_webpage(url, series_id)
webpage_metadata = self._get_webpage_metadata(webpage, series_id)
episode_paths = [f'/x/cover/{series_id}/{video_id}.html' for video_id in re.findall(
r'<div[^>]+data-vid="(?P<video_id>[^"]+)"[^>]+class="[^"]+episode-item-rect--number',
webpage)]
return self.playlist_from_matches(
episode_paths, series_id, ie=VQQVideoIE, getter=urljoin(url),
title=self._get_clean_title(traverse_obj(webpage_metadata, ('coverInfo', 'title'))
or self._og_search_title(webpage)),
description=(traverse_obj(webpage_metadata, ('coverInfo', 'description'))
or self._og_search_description(webpage)))
class WeTvBaseIE(TencentBaseIE):
_VALID_URL_BASE = r'https?://(?:www\.)?wetv\.vip/(?:[^?#]+/)?play'
_API_URL = 'https://play.wetv.vip/getvinfo'
_APP_VERSION = '3.5.57'
_PLATFORM = '4830201'
_HOST = 'wetv.vip'
_REFERER = 'wetv.vip'
def _get_webpage_metadata(self, webpage, video_id):
return self._parse_json(
traverse_obj(self._search_nextjs_data(webpage, video_id), ('props', 'pageProps', 'data')),
video_id, fatal=False)
def _extract_episode(self, url):
video_id, series_id = self._match_valid_url(url).group('id', 'series_id')
webpage = self._download_webpage(url, video_id)
webpage_metadata = self._get_webpage_metadata(webpage, video_id)
formats, subtitles = self._extract_all_video_formats_and_subtitles(url, video_id, series_id)
return {
'id': video_id,
'title': self._get_clean_title(self._og_search_title(webpage)
or traverse_obj(webpage_metadata, ('coverInfo', 'title'))),
'description': (traverse_obj(webpage_metadata, ('coverInfo', 'description'))
or self._og_search_description(webpage)),
'formats': formats,
'subtitles': subtitles,
'thumbnail': self._og_search_thumbnail(webpage),
'duration': int_or_none(traverse_obj(webpage_metadata, ('videoInfo', 'duration'))),
'series': traverse_obj(webpage_metadata, ('coverInfo', 'title')),
'episode_number': int_or_none(traverse_obj(webpage_metadata, ('videoInfo', 'episode'))),
}
def _extract_series(self, url, ie):
series_id = self._match_id(url)
webpage = self._download_webpage(url, series_id)
webpage_metadata = self._get_webpage_metadata(webpage, series_id)
episode_paths = ([f'/play/{series_id}/{episode["vid"]}' for episode in webpage_metadata.get('videoList')]
or re.findall(r'<a[^>]+class="play-video__link"[^>]+href="(?P<path>[^"]+)', webpage))
return self.playlist_from_matches(
episode_paths, series_id, ie=ie, getter=urljoin(url),
title=self._get_clean_title(traverse_obj(webpage_metadata, ('coverInfo', 'title'))
or self._og_search_title(webpage)),
description=(traverse_obj(webpage_metadata, ('coverInfo', 'description'))
or self._og_search_description(webpage)))
class WeTvEpisodeIE(WeTvBaseIE):
IE_NAME = 'wetv:episode'
_VALID_URL = WeTvBaseIE._VALID_URL_BASE + r'/(?P<series_id>\w+)(?:-[^?#]+)?/(?P<id>\w+)(?:-[^?#]+)?'
_TESTS = [{
'url': 'https://wetv.vip/en/play/air11ooo2rdsdi3-Cute-Programmer/v0040pr89t9-EP1-Cute-Programmer',
'md5': '0c70fdfaa5011ab022eebc598e64bbbe',
'info_dict': {
'id': 'v0040pr89t9',
'ext': 'mp4',
'title': 'EP1: Cute Programmer',
'description': 'md5:e87beab3bf9f392d6b9e541a63286343',
'thumbnail': r're:^https?://[^?#]+air11ooo2rdsdi3',
'series': 'Cute Programmer',
'episode': 'Episode 1',
'episode_number': 1,
'duration': 2835,
'format_id': r're:^shd',
},
}, {
'url': 'https://wetv.vip/en/play/u37kgfnfzs73kiu/p0039b9nvik',
'md5': '3b3c15ca4b9a158d8d28d5aa9d7c0a49',
'info_dict': {
'id': 'p0039b9nvik',
'ext': 'mp4',
'title': 'EP1: You Are My Glory',
'description': 'md5:831363a4c3b4d7615e1f3854be3a123b',
'thumbnail': r're:^https?://[^?#]+u37kgfnfzs73kiu',
'series': 'You Are My Glory',
'episode': 'Episode 1',
'episode_number': 1,
'duration': 2454,
'format_id': r're:^shd',
},
}, {
'url': 'https://wetv.vip/en/play/lcxgwod5hapghvw-WeTV-PICK-A-BOO/i0042y00lxp-Zhao-Lusi-Describes-The-First-Experiences-She-Had-In-Who-Rules-The-World-%7C-WeTV-PICK-A-BOO',
'md5': '71133f5c2d5d6cad3427e1b010488280',
'info_dict': {
'id': 'i0042y00lxp',
'ext': 'mp4',
'title': 'md5:f7a0857dbe5fbbe2e7ad630b92b54e6a',
'description': 'md5:76260cb9cdc0ef76826d7ca9d92fadfa',
'thumbnail': r're:^https?://[^?#]+i0042y00lxp',
'series': 'WeTV PICK-A-BOO',
'episode': 'Episode 0',
'episode_number': 0,
'duration': 442,
'format_id': r're:^shd',
},
}]
def _real_extract(self, url):
return self._extract_episode(url)
class WeTvSeriesIE(WeTvBaseIE):
_VALID_URL = WeTvBaseIE._VALID_URL_BASE + r'/(?P<id>\w+)(?:-[^/?#]+)?/?(?:[?#]|$)'
_TESTS = [{
'url': 'https://wetv.vip/play/air11ooo2rdsdi3-Cute-Programmer',
'info_dict': {
'id': 'air11ooo2rdsdi3',
'title': 'Cute Programmer',
'description': 'md5:e87beab3bf9f392d6b9e541a63286343',
},
'playlist_count': 30,
}, {
'url': 'https://wetv.vip/en/play/u37kgfnfzs73kiu-You-Are-My-Glory',
'info_dict': {
'id': 'u37kgfnfzs73kiu',
'title': 'You Are My Glory',
'description': 'md5:831363a4c3b4d7615e1f3854be3a123b',
},
'playlist_count': 32,
}]
def _real_extract(self, url):
return self._extract_series(url, WeTvEpisodeIE)
class IflixBaseIE(WeTvBaseIE):
_VALID_URL_BASE = r'https?://(?:www\.)?iflix\.com/(?:[^?#]+/)?play'
_API_URL = 'https://vplay.iflix.com/getvinfo'
_APP_VERSION = '3.5.57'
_PLATFORM = '330201'
_HOST = 'www.iflix.com'
_REFERER = 'www.iflix.com'
class IflixEpisodeIE(IflixBaseIE):
IE_NAME = 'iflix:episode'
_VALID_URL = IflixBaseIE._VALID_URL_BASE + r'/(?P<series_id>\w+)(?:-[^?#]+)?/(?P<id>\w+)(?:-[^?#]+)?'
_TESTS = [{
'url': 'https://www.iflix.com/en/play/daijrxu03yypu0s/a0040kvgaza',
'md5': '9740f9338c3a2105290d16b68fb3262f',
'info_dict': {
'id': 'a0040kvgaza',
'ext': 'mp4',
'title': 'EP1: Put Your Head On My Shoulder 2021',
'description': 'md5:c095a742d3b7da6dfedd0c8170727a42',
'thumbnail': r're:^https?://[^?#]+daijrxu03yypu0s',
'series': 'Put Your Head On My Shoulder 2021',
'episode': 'Episode 1',
'episode_number': 1,
'duration': 2639,
'format_id': r're:^shd',
},
}, {
'url': 'https://www.iflix.com/en/play/fvvrcc3ra9lbtt1-Take-My-Brother-Away/i0029sd3gm1-EP1%EF%BC%9ATake-My-Brother-Away',
'md5': '375c9b8478fdedca062274b2c2f53681',
'info_dict': {
'id': 'i0029sd3gm1',
'ext': 'mp4',
'title': 'EP1:Take My Brother Away',
'description': 'md5:f0f7be1606af51cd94d5627de96b0c76',
'thumbnail': r're:^https?://[^?#]+fvvrcc3ra9lbtt1',
'series': 'Take My Brother Away',
'episode': 'Episode 1',
'episode_number': 1,
'duration': 228,
'format_id': r're:^shd',
},
}]
def _real_extract(self, url):
return self._extract_episode(url)
class IflixSeriesIE(IflixBaseIE):
_VALID_URL = IflixBaseIE._VALID_URL_BASE + r'/(?P<id>\w+)(?:-[^/?#]+)?/?(?:[?#]|$)'
_TESTS = [{
'url': 'https://www.iflix.com/en/play/g21a6qk4u1s9x22-You-Are-My-Hero',
'info_dict': {
'id': 'g21a6qk4u1s9x22',
'title': 'You Are My Hero',
'description': 'md5:9c4d844bc0799cd3d2b5aed758a2050a',
},
'playlist_count': 40,
}, {
'url': 'https://www.iflix.com/play/0s682hc45t0ohll',
'info_dict': {
'id': '0s682hc45t0ohll',
'title': 'Miss Gu Who Is Silent',
'description': 'md5:a9651d0236f25af06435e845fa2f8c78',
},
'playlist_count': 20,
}]
def _real_extract(self, url):
return self._extract_series(url, IflixEpisodeIE)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/beatport.py | yt_dlp/extractor/beatport.py | import re
from .common import InfoExtractor
from ..utils import int_or_none
class BeatportIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.|pro\.)?beatport\.com/track/(?P<display_id>[^/]+)/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://beatport.com/track/synesthesia-original-mix/5379371',
'md5': 'b3c34d8639a2f6a7f734382358478887',
'info_dict': {
'id': '5379371',
'display_id': 'synesthesia-original-mix',
'ext': 'mp4',
'title': 'Froxic - Synesthesia (Original Mix)',
},
}, {
'url': 'https://beatport.com/track/love-and-war-original-mix/3756896',
'md5': 'e44c3025dfa38c6577fbaeb43da43514',
'info_dict': {
'id': '3756896',
'display_id': 'love-and-war-original-mix',
'ext': 'mp3',
'title': 'Wolfgang Gartner - Love & War (Original Mix)',
},
}, {
'url': 'https://beatport.com/track/birds-original-mix/4991738',
'md5': 'a1fd8e8046de3950fd039304c186c05f',
'info_dict': {
'id': '4991738',
'display_id': 'birds-original-mix',
'ext': 'mp4',
'title': "Tos, Middle Milk, Mumblin' Johnsson - Birds (Original Mix)",
},
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
track_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
playables = self._parse_json(
self._search_regex(
r'window\.Playables\s*=\s*({.+?});', webpage,
'playables info', flags=re.DOTALL),
track_id)
track = next(t for t in playables['tracks'] if t['id'] == int(track_id))
title = ', '.join(a['name'] for a in track['artists']) + ' - ' + track['name']
if track['mix']:
title += ' (' + track['mix'] + ')'
formats = []
for ext, info in track['preview'].items():
if not info['url']:
continue
fmt = {
'url': info['url'],
'ext': ext,
'format_id': ext,
'vcodec': 'none',
}
if ext == 'mp3':
fmt['acodec'] = 'mp3'
fmt['abr'] = 96
fmt['asr'] = 44100
elif ext == 'mp4':
fmt['acodec'] = 'aac'
fmt['abr'] = 96
fmt['asr'] = 44100
formats.append(fmt)
images = []
for name, info in track['images'].items():
image_url = info.get('url')
if name == 'dynamic' or not image_url:
continue
image = {
'id': name,
'url': image_url,
'height': int_or_none(info.get('height')),
'width': int_or_none(info.get('width')),
}
images.append(image)
return {
'id': str(track.get('id')) or track_id,
'display_id': track.get('slug') or display_id,
'title': title,
'formats': formats,
'thumbnails': images,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/digiview.py | yt_dlp/extractor/digiview.py | from .common import InfoExtractor
from .youtube import YoutubeIE
from ..utils import clean_html, int_or_none, traverse_obj, url_or_none, urlencode_postdata
class DigiviewIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ladigitale\.dev/digiview/#/v/(?P<id>[0-9a-f]+)'
_TESTS = [{
# normal video
'url': 'https://ladigitale.dev/digiview/#/v/67a8e50aee2ec',
'info_dict': {
'id': '67a8e50aee2ec',
'ext': 'mp4',
'title': 'Big Buck Bunny 60fps 4K - Official Blender Foundation Short Film',
'thumbnail': 'https://i.ytimg.com/vi/aqz-KE-bpKQ/hqdefault.jpg',
'upload_date': '20141110',
'playable_in_embed': True,
'duration': 635,
'view_count': int,
'comment_count': int,
'channel': 'Blender',
'license': 'Creative Commons Attribution license (reuse allowed)',
'like_count': int,
'tags': 'count:8',
'live_status': 'not_live',
'channel_id': 'UCSMOQeBJ2RAnuFungnQOxLg',
'channel_follower_count': int,
'channel_url': 'https://www.youtube.com/channel/UCSMOQeBJ2RAnuFungnQOxLg',
'uploader_id': '@BlenderOfficial',
'description': 'md5:8f3ed18a53a1bb36cbb3b70a15782fd0',
'categories': ['Film & Animation'],
'channel_is_verified': True,
'heatmap': 'count:100',
'section_end': 635,
'uploader': 'Blender',
'timestamp': 1415628355,
'uploader_url': 'https://www.youtube.com/@BlenderOfficial',
'age_limit': 0,
'section_start': 0,
'availability': 'public',
},
}, {
# cut video
'url': 'https://ladigitale.dev/digiview/#/v/67a8e51d0dd58',
'info_dict': {
'id': '67a8e51d0dd58',
'ext': 'mp4',
'title': 'Big Buck Bunny 60fps 4K - Official Blender Foundation Short Film',
'thumbnail': 'https://i.ytimg.com/vi/aqz-KE-bpKQ/hqdefault.jpg',
'upload_date': '20141110',
'playable_in_embed': True,
'duration': 5,
'view_count': int,
'comment_count': int,
'channel': 'Blender',
'license': 'Creative Commons Attribution license (reuse allowed)',
'like_count': int,
'tags': 'count:8',
'live_status': 'not_live',
'channel_id': 'UCSMOQeBJ2RAnuFungnQOxLg',
'channel_follower_count': int,
'channel_url': 'https://www.youtube.com/channel/UCSMOQeBJ2RAnuFungnQOxLg',
'uploader_id': '@BlenderOfficial',
'description': 'md5:8f3ed18a53a1bb36cbb3b70a15782fd0',
'categories': ['Film & Animation'],
'channel_is_verified': True,
'heatmap': 'count:100',
'section_end': 10,
'uploader': 'Blender',
'timestamp': 1415628355,
'uploader_url': 'https://www.youtube.com/@BlenderOfficial',
'age_limit': 0,
'section_start': 5,
'availability': 'public',
},
}, {
# changed title
'url': 'https://ladigitale.dev/digiview/#/v/67a8ea5644d7a',
'info_dict': {
'id': '67a8ea5644d7a',
'ext': 'mp4',
'title': 'Big Buck Bunny (with title changed)',
'thumbnail': 'https://i.ytimg.com/vi/aqz-KE-bpKQ/hqdefault.jpg',
'upload_date': '20141110',
'playable_in_embed': True,
'duration': 5,
'view_count': int,
'comment_count': int,
'channel': 'Blender',
'license': 'Creative Commons Attribution license (reuse allowed)',
'like_count': int,
'tags': 'count:8',
'live_status': 'not_live',
'channel_id': 'UCSMOQeBJ2RAnuFungnQOxLg',
'channel_follower_count': int,
'channel_url': 'https://www.youtube.com/channel/UCSMOQeBJ2RAnuFungnQOxLg',
'uploader_id': '@BlenderOfficial',
'description': 'md5:8f3ed18a53a1bb36cbb3b70a15782fd0',
'categories': ['Film & Animation'],
'channel_is_verified': True,
'heatmap': 'count:100',
'section_end': 15,
'uploader': 'Blender',
'timestamp': 1415628355,
'uploader_url': 'https://www.youtube.com/@BlenderOfficial',
'age_limit': 0,
'section_start': 10,
'availability': 'public',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json(
'https://ladigitale.dev/digiview/inc/recuperer_video.php', video_id,
data=urlencode_postdata({'id': video_id}))
clip_id = video_data['videoId']
return self.url_result(
f'https://www.youtube.com/watch?v={clip_id}',
YoutubeIE, video_id, url_transparent=True,
**traverse_obj(video_data, {
'section_start': ('debut', {int_or_none}),
'section_end': ('fin', {int_or_none}),
'description': ('description', {clean_html}, filter),
'title': ('titre', {str}),
'thumbnail': ('vignette', {url_or_none}),
'view_count': ('vues', {int_or_none}),
}),
)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rutube.py | yt_dlp/extractor/rutube.py | import itertools
from .common import InfoExtractor
from ..utils import (
UnsupportedError,
bool_or_none,
determine_ext,
int_or_none,
js_to_json,
parse_qs,
str_or_none,
try_get,
unified_timestamp,
url_or_none,
)
from ..utils.traversal import (
subs_list_to_dict,
traverse_obj,
)
class RutubeBaseIE(InfoExtractor):
def _download_api_info(self, video_id, query=None):
if not query:
query = {}
query['format'] = 'json'
return self._download_json(
f'https://rutube.ru/api/video/{video_id}/',
video_id, 'Downloading video JSON',
'Unable to download video JSON', query=query)
def _extract_info(self, video, video_id=None, require_title=True):
title = video['title'] if require_title else video.get('title')
age_limit = video.get('is_adult')
if age_limit is not None:
age_limit = 18 if age_limit is True else 0
uploader_id = try_get(video, lambda x: x['author']['id'])
category = try_get(video, lambda x: x['category']['name'])
description = video.get('description')
duration = int_or_none(video.get('duration'))
return {
'id': video.get('id') or video_id if video_id else video['id'],
'title': title,
'description': description,
'thumbnail': video.get('thumbnail_url'),
'duration': duration,
'uploader': try_get(video, lambda x: x['author']['name']),
'uploader_id': str(uploader_id) if uploader_id else None,
'timestamp': unified_timestamp(video.get('created_ts')),
'categories': [category] if category else None,
'age_limit': age_limit,
'view_count': int_or_none(video.get('hits')),
'comment_count': int_or_none(video.get('comments_count')),
'is_live': bool_or_none(video.get('is_livestream')),
'chapters': self._extract_chapters_from_description(description, duration),
}
def _download_and_extract_info(self, video_id, query=None):
return self._extract_info(
self._download_api_info(video_id, query=query), video_id)
def _download_api_options(self, video_id, query=None):
if not query:
query = {}
query['format'] = 'json'
return self._download_json(
f'https://rutube.ru/api/play/options/{video_id}/',
video_id, 'Downloading options JSON',
'Unable to download options JSON',
headers=self.geo_verification_headers(), query=query)
def _extract_formats_and_subtitles(self, options, video_id):
formats = []
subtitles = {}
for format_id, format_url in options['video_balancer'].items():
ext = determine_ext(format_url)
if ext == 'm3u8':
fmts, subs = self._extract_m3u8_formats_and_subtitles(
format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_id, fatal=False))
else:
formats.append({
'url': format_url,
'format_id': format_id,
})
for hls_url in traverse_obj(options, ('live_streams', 'hls', ..., 'url', {url_or_none})):
fmts, subs = self._extract_m3u8_formats_and_subtitles(
hls_url, video_id, 'mp4', fatal=False, m3u8_id='hls')
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
self._merge_subtitles(traverse_obj(options, ('captions', ..., {
'id': 'code',
'url': 'file',
'name': ('langTitle', {str}),
}, all, {subs_list_to_dict(lang='ru')})), target=subtitles)
return formats, subtitles
def _download_and_extract_formats_and_subtitles(self, video_id, query=None):
return self._extract_formats_and_subtitles(
self._download_api_options(video_id, query=query), video_id)
class RutubeIE(RutubeBaseIE):
IE_NAME = 'rutube'
IE_DESC = 'Rutube videos'
_VALID_URL = r'https?://rutube\.ru/(?:(?:live/)?video(?:/private)?|(?:play/)?embed)/(?P<id>[\da-z]{32})'
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//rutube\.ru/(?:play/)?embed/[\da-z]{32}.*?)\1']
_TESTS = [{
'url': 'https://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/',
'info_dict': {
'id': '3eac3b4561676c17df9132a9a1e62e3e',
'ext': 'mp4',
'title': 'Раненный кенгуру забежал в аптеку',
'description': 'http://www.ntdtv.ru ',
'duration': 81,
'uploader': 'NTDRussian',
'uploader_id': '29790',
'timestamp': 1381943602,
'upload_date': '20131016',
'age_limit': 0,
'view_count': int,
'thumbnail': r're:https?://pic\.rutubelist\.ru/video/.+\.(?:jpg|png)',
'categories': ['Новости и СМИ'],
'chapters': [],
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661',
'only_matching': True,
}, {
'url': 'https://rutube.ru/embed/a10e53b86e8f349080f718582ce4c661',
'only_matching': True,
}, {
'url': 'https://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/?pl_id=4252',
'only_matching': True,
}, {
'url': 'https://rutube.ru/video/10b3a03fc01d5bbcc632a2f3514e8aab/?pl_type=source',
'only_matching': True,
}, {
'url': 'https://rutube.ru/video/private/884fb55f07a97ab673c7d654553e0f48/?p=x2QojCumHTS3rsKHWXN8Lg',
'info_dict': {
'id': '884fb55f07a97ab673c7d654553e0f48',
'ext': 'mp4',
'title': 'Яцуноками, Nioh2',
'description': 'Nioh2: финал сражения с боссом Яцуноками',
'duration': 15,
'uploader': 'mexus',
'uploader_id': '24222106',
'timestamp': 1670646232,
'upload_date': '20221210',
'age_limit': 0,
'view_count': int,
'thumbnail': 'https://pic.rutubelist.ru/video/f2/d4/f2d42b54be0a6e69c1c22539e3152156.jpg',
'categories': ['Видеоигры'],
'chapters': [],
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://rutube.ru/video/c65b465ad0c98c89f3b25cb03dcc87c6/',
'info_dict': {
'id': 'c65b465ad0c98c89f3b25cb03dcc87c6',
'ext': 'mp4',
'chapters': 'count:4',
'categories': ['Бизнес и предпринимательство'],
'description': 'md5:252feac1305257d8c1bab215cedde75d',
'thumbnail': r're:https?://pic\.rutubelist\.ru/video/.+\.(?:jpg|png)',
'duration': 782,
'age_limit': 0,
'uploader_id': '23491359',
'timestamp': 1677153329,
'view_count': int,
'upload_date': '20230223',
'title': 'Бизнес с нуля: найм сотрудников. Интервью с директором строительной компании #1',
'uploader': 'Стас Быков',
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://rutube.ru/live/video/c58f502c7bb34a8fcdd976b221fca292/',
'info_dict': {
'id': 'c58f502c7bb34a8fcdd976b221fca292',
'ext': 'mp4',
'categories': ['Телепередачи'],
'description': '',
'thumbnail': r're:https?://pic\.rutubelist\.ru/video/.+\.(?:jpg|png)',
'live_status': 'is_live',
'age_limit': 0,
'uploader_id': '23460655',
'timestamp': 1652972968,
'view_count': int,
'upload_date': '20220519',
'title': str,
'uploader': 'Первый канал',
},
'skip': 'Invalid URL',
}, {
'url': 'https://rutube.ru/play/embed/03a9cb54bac3376af4c5cb0f18444e01/',
'info_dict': {
'id': '03a9cb54bac3376af4c5cb0f18444e01',
'ext': 'mp4',
'age_limit': 0,
'description': '',
'title': 'Церемония начала торгов акциями ПАО «ЕвроТранс»',
'chapters': [],
'upload_date': '20240829',
'duration': 293,
'uploader': 'MOEX - Московская биржа',
'timestamp': 1724946628,
'thumbnail': r're:https?://pic\.rutubelist\.ru/video/.+\.(?:jpg|png)',
'view_count': int,
'uploader_id': '38420507',
'categories': ['Интервью'],
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://rutube.ru/video/5ab908fccfac5bb43ef2b1e4182256b0/',
'only_matching': True,
}, {
'url': 'https://rutube.ru/live/video/private/c58f502c7bb34a8fcdd976b221fca292/',
'only_matching': True,
}]
_WEBPAGE_TESTS = [{
'url': 'https://novate.ru/blogs/170625/73644/',
'info_dict': {
'id': 'b0c96c75a4e5b274721bbced6ed8fb64',
'ext': 'mp4',
'title': 'Где в России находится единственная в своем роде скальная торпедная батарея',
'age_limit': 0,
'categories': ['Наука'],
'chapters': [],
'description': 'md5:2ed82e6b81958a43da6fb4d56f949e1f',
'duration': 182,
'thumbnail': r're:https?://pic\.rutubelist\.ru/video/.+\.(?:jpg|png)',
'timestamp': 1749950158,
'upload_date': '20250615',
'uploader': 'Novate',
'uploader_id': '24044809',
'view_count': int,
},
'params': {'skip_download': 'm3u8'},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
query = parse_qs(url)
info = self._download_and_extract_info(video_id, query)
formats, subtitles = self._download_and_extract_formats_and_subtitles(video_id, query)
return {
**info,
'formats': formats,
'subtitles': subtitles,
}
class RutubeEmbedIE(RutubeBaseIE):
IE_NAME = 'rutube:embed'
IE_DESC = 'Rutube embedded videos'
_VALID_URL = r'https?://rutube\.ru/(?:video|play)/embed/(?P<id>[0-9]+)(?:[?#/]|$)'
_TESTS = [{
'url': 'https://rutube.ru/video/embed/6722881?vk_puid37=&vk_puid38=',
'info_dict': {
'id': 'a10e53b86e8f349080f718582ce4c661',
'ext': 'mp4',
'timestamp': 1387830582,
'upload_date': '20131223',
'uploader_id': '297833',
'uploader': 'subziro89 ILya',
'title': 'Мистический городок Эйри в Индиан 5 серия озвучка subziro89',
'age_limit': 0,
'duration': 1395,
'chapters': [],
'description': 'md5:a5acea57bbc3ccdc3cacd1f11a014b5b',
'view_count': int,
'thumbnail': r're:https?://pic\.rutubelist\.ru/video/.+\.(?:jpg|png)',
'categories': ['Сериалы'],
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://rutube.ru/play/embed/8083783',
'only_matching': True,
}, {
# private video
'url': 'https://rutube.ru/play/embed/10631925?p=IbAigKqWd1do4mjaM5XLIQ',
'only_matching': True,
}]
def _real_extract(self, url):
embed_id = self._match_id(url)
# Query may contain private videos token and should be passed to API
# requests (see #19163)
query = parse_qs(url)
options = self._download_api_options(embed_id, query)
video_id = options['effective_video']
formats, subtitles = self._extract_formats_and_subtitles(options, video_id)
info = self._download_and_extract_info(video_id, query)
info.update({
'extractor_key': 'Rutube',
'formats': formats,
'subtitles': subtitles,
})
return info
class RutubePlaylistBaseIE(RutubeBaseIE):
def _next_page_url(self, page_num, playlist_id, *args, **kwargs):
return self._PAGE_TEMPLATE % (playlist_id, page_num)
def _entries(self, playlist_id, *args, **kwargs):
next_page_url = None
for pagenum in itertools.count(1):
page = self._download_json(
next_page_url or self._next_page_url(
pagenum, playlist_id, *args, **kwargs),
playlist_id, f'Downloading page {pagenum}')
results = page.get('results')
if not results or not isinstance(results, list):
break
for result in results:
video_url = url_or_none(result.get('video_url'))
if not video_url:
continue
entry = self._extract_info(result, require_title=False)
entry.update({
'_type': 'url',
'url': video_url,
'ie_key': RutubeIE.ie_key(),
})
yield entry
next_page_url = page.get('next')
if not next_page_url or not page.get('has_next'):
break
def _extract_playlist(self, playlist_id, *args, **kwargs):
return self.playlist_result(
self._entries(playlist_id, *args, **kwargs),
playlist_id, kwargs.get('playlist_name'))
def _real_extract(self, url):
return self._extract_playlist(self._match_id(url))
class RutubeTagsIE(RutubePlaylistBaseIE):
IE_NAME = 'rutube:tags'
IE_DESC = 'Rutube tags'
_VALID_URL = r'https?://rutube\.ru/tags/video/(?P<id>\d+)'
_TESTS = [{
'url': 'https://rutube.ru/tags/video/1800/',
'info_dict': {
'id': '1800',
},
'playlist_mincount': 68,
}]
_PAGE_TEMPLATE = 'https://rutube.ru/api/tags/video/%s/?page=%s&format=json'
class RutubeMovieIE(RutubePlaylistBaseIE):
IE_NAME = 'rutube:movie'
IE_DESC = 'Rutube movies'
_VALID_URL = r'https?://rutube\.ru/metainfo/tv/(?P<id>\d+)'
_MOVIE_TEMPLATE = 'https://rutube.ru/api/metainfo/tv/%s/?format=json'
_PAGE_TEMPLATE = 'https://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json'
def _real_extract(self, url):
movie_id = self._match_id(url)
movie = self._download_json(
self._MOVIE_TEMPLATE % movie_id, movie_id,
'Downloading movie JSON')
return self._extract_playlist(
movie_id, playlist_name=movie.get('name'))
class RutubePersonIE(RutubePlaylistBaseIE):
IE_NAME = 'rutube:person'
IE_DESC = 'Rutube person videos'
_VALID_URL = r'https?://rutube\.ru/video/person/(?P<id>\d+)'
_TESTS = [{
'url': 'https://rutube.ru/video/person/313878/',
'info_dict': {
'id': '313878',
},
'playlist_mincount': 36,
}]
_PAGE_TEMPLATE = 'https://rutube.ru/api/video/person/%s/?page=%s&format=json'
class RutubePlaylistIE(RutubePlaylistBaseIE):
IE_NAME = 'rutube:playlist'
IE_DESC = 'Rutube playlists'
_VALID_URL = r'https?://rutube\.ru/plst/(?P<id>\d+)'
_TESTS = [{
'url': 'https://rutube.ru/plst/308547/',
'info_dict': {
'id': '308547',
},
'playlist_mincount': 22,
}]
_PAGE_TEMPLATE = 'https://rutube.ru/api/playlist/custom/%s/videos?page=%s&format=json'
class RutubeChannelIE(RutubePlaylistBaseIE):
IE_NAME = 'rutube:channel'
IE_DESC = 'Rutube channel'
_VALID_URL = r'https?://rutube\.ru/(?:channel/(?P<id>\d+)|u/(?P<slug>\w+))(?:/(?P<section>videos|shorts|playlists))?'
_TESTS = [{
'url': 'https://rutube.ru/channel/639184/videos/',
'info_dict': {
'id': '639184_videos',
},
'playlist_mincount': 129,
}, {
'url': 'https://rutube.ru/channel/25902603/shorts/',
'info_dict': {
'id': '25902603_shorts',
},
'playlist_mincount': 277,
}, {
'url': 'https://rutube.ru/channel/25902603/',
'info_dict': {
'id': '25902603',
},
'playlist_mincount': 406,
}, {
'url': 'https://rutube.ru/u/rutube/videos/',
'info_dict': {
'id': '23704195_videos',
},
'playlist_mincount': 113,
}]
_PAGE_TEMPLATE = 'https://rutube.ru/api/video/person/%s/?page=%s&format=json&origin__type=%s'
def _next_page_url(self, page_num, playlist_id, section):
origin_type = {
'videos': 'rtb,rst,ifrm,rspa',
'shorts': 'rshorts',
None: '',
}.get(section)
return self._PAGE_TEMPLATE % (playlist_id, page_num, origin_type)
def _real_extract(self, url):
playlist_id, slug, section = self._match_valid_url(url).group('id', 'slug', 'section')
if section == 'playlists':
raise UnsupportedError(url)
if slug:
webpage = self._download_webpage(url, slug)
redux_state = self._search_json(
r'window\.reduxState\s*=', webpage, 'redux state', slug, transform_source=js_to_json)
playlist_id = traverse_obj(redux_state, (
'api', 'queries', lambda k, _: k.startswith('channelIdBySlug'),
'data', 'channel_id', {int}, {str_or_none}, any))
playlist = self._extract_playlist(playlist_id, section=section)
if section:
playlist['id'] = f'{playlist_id}_{section}'
return playlist
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/screenrec.py | yt_dlp/extractor/screenrec.py | from .common import InfoExtractor
class ScreenRecIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?screenrec\.com/share/(?P<id>\w{10})'
_TESTS = [{
'url': 'https://screenrec.com/share/DasLtbknYo',
'info_dict': {
'id': 'DasLtbknYo',
'ext': 'mp4',
'title': '02.05.2024_03.01.25_REC',
'description': 'Recorded with ScreenRec',
'thumbnail': r're:^https?://.*\.gif$',
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m3u8_url = self._search_regex(
r'customUrl\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'm3u8 URL', group='url')
return {
'id': video_id,
'title': self._og_search_title(webpage, default=None) or self._html_extract_title(webpage),
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'formats': self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4'),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cnn.py | yt_dlp/extractor/cnn.py | import json
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
extract_attributes,
int_or_none,
merge_dicts,
parse_duration,
parse_iso8601,
parse_resolution,
try_call,
update_url,
url_or_none,
)
from ..utils.traversal import find_elements, traverse_obj
class CNNIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:edition|www|money|cnnespanol)\.)?cnn\.com/(?!audio/)(?P<display_id>[^?#]+?)(?:[?#]|$|/index\.html)'
_TESTS = [{
'url': 'https://www.cnn.com/2024/05/31/sport/video/jadon-sancho-borussia-dortmund-champions-league-exclusive-spt-intl',
'info_dict': {
'id': 'med0e97ad0d154f56e29aa96e57192a14226734b6b',
'display_id': '2024/05/31/sport/video/jadon-sancho-borussia-dortmund-champions-league-exclusive-spt-intl',
'ext': 'mp4',
'upload_date': '20240531',
'description': 'md5:844bcdb0629e1877a7a466c913f4c19c',
'thumbnail': 'https://media.cnn.com/api/v1/images/stellar/prod/gettyimages-2151936122.jpg?c=original',
'duration': 373.0,
'timestamp': 1717148586,
'title': 'Borussia Dortmund star Jadon Sancho seeks Wembley redemption after 2020 Euros hurt',
'modified_date': '20240531',
'modified_timestamp': 1717150140,
},
}, {
'url': 'https://edition.cnn.com/2024/06/11/politics/video/inmates-vote-jail-nevada-murray-dnt-ac360-digvid',
'info_dict': {
'id': 'me522945c4709b299e5cb8657900a7a21ad3b559f9',
'display_id': '2024/06/11/politics/video/inmates-vote-jail-nevada-murray-dnt-ac360-digvid',
'ext': 'mp4',
'description': 'md5:e0120fe5da9ad8259fd707c1cbb64a60',
'title': 'Here’s how some inmates in closely divided state are now able to vote from jail',
'timestamp': 1718158269,
'upload_date': '20240612',
'thumbnail': 'https://media.cnn.com/api/v1/images/stellar/prod/still-20701554-13565-571-still.jpg?c=original',
'duration': 202.0,
'modified_date': '20240612',
'modified_timestamp': 1718158509,
},
}, {
'url': 'https://edition.cnn.com/2024/06/11/style/king-charles-portrait-vandalized/index.html',
'info_dict': {
'id': 'mef5f52b9e1fe28b1ad192afcbc9206ae984894b68',
'display_id': '2024/06/11/style/king-charles-portrait-vandalized',
'ext': 'mp4',
'thumbnail': 'https://media.cnn.com/api/v1/images/stellar/prod/still-20701257-8846-816-still.jpg?c=original',
'description': 'md5:19f78338ccec533db0fa8a4511012dae',
'title': 'Video shows King Charles\' portrait being vandalized by activists',
'timestamp': 1718113852,
'upload_date': '20240611',
'duration': 51.0,
'modified_timestamp': 1718116193,
'modified_date': '20240611',
},
}, {
'url': 'https://edition.cnn.com/videos/media/2022/12/05/robin-meade-final-sign-off-broadcast-hln-mxp-contd-vpx.hln',
'info_dict': {
'id': 'mefba13799201b084ea3b1d0f7ca820ae94d4bb5b2',
'display_id': 'videos/media/2022/12/05/robin-meade-final-sign-off-broadcast-hln-mxp-contd-vpx.hln',
'ext': 'mp4',
'thumbnail': 'https://media.cnn.com/api/v1/images/stellar/prod/221205163510-robin-meade-sign-off.jpg?c=original',
'duration': 158.0,
'title': 'Robin Meade signs off after HLN\'s last broadcast',
'description': 'md5:cff3c62d18d2fbc6c5c75cb029b7353b',
'upload_date': '20221205',
'timestamp': 1670284296,
'modified_timestamp': 1670332404,
'modified_date': '20221206',
},
'params': {'format': 'direct'},
}, {
'url': 'https://cnnespanol.cnn.com/video/ataque-misil-israel-beirut-libano-octubre-trax',
'info_dict': {
'id': 'me484a43722642aa00627b812fe928f2e99c6e2997',
'ext': 'mp4',
'display_id': 'video/ataque-misil-israel-beirut-libano-octubre-trax',
'timestamp': 1729501452,
'thumbnail': 'https://media.cnn.com/api/v1/images/stellar/prod/ataqeubeirut-1.jpg?c=original',
'description': 'md5:256ee7137d161f776cda429654135e52',
'upload_date': '20241021',
'duration': 31.0,
'title': 'VIDEO | Israel lanza un nuevo ataque sobre Beirut',
'modified_date': '20241021',
'modified_timestamp': 1729501530,
},
}, {
'url': 'https://edition.cnn.com/2024/10/16/politics/kamala-harris-fox-news-interview/index.html',
'info_dict': {
'id': '2024/10/16/politics/kamala-harris-fox-news-interview',
},
'playlist_count': 2,
'playlist': [{
'md5': '073ffab87b8bef97c9913e71cc18ef9e',
'info_dict': {
'id': 'me19d548fdd54df0924087039283128ef473ab397d',
'ext': 'mp4',
'title': '\'I\'m not finished\': Harris interview with Fox News gets heated',
'display_id': 'kamala-harris-fox-news-interview-ebof-digvid',
'description': 'md5:e7dd3d1a04df916062230b60ca419a0a',
'thumbnail': 'https://media.cnn.com/api/v1/images/stellar/prod/harris-20241016234916617.jpg?c=original',
'duration': 173.0,
'timestamp': 1729122182,
'upload_date': '20241016',
'modified_timestamp': 1729194706,
'modified_date': '20241017',
},
'params': {'format': 'direct'},
}, {
'md5': '11604ab4af83b650826753f1ccb8ecff',
'info_dict': {
'id': 'med04507d8ca3da827001f63d22af321ec29c7d97b',
'ext': 'mp4',
'title': '\'Wise\': Buttigieg on Harris\' handling of interview question about gender transition surgery',
'display_id': 'pete-buttigieg-harris-fox-newssrc-digvid',
'description': 'md5:602a8a7e853ed5e574acd3159428c98e',
'thumbnail': 'https://media.cnn.com/api/v1/images/stellar/prod/buttigieg-20241017040412074.jpg?c=original',
'duration': 145.0,
'timestamp': 1729137765,
'upload_date': '20241017',
'modified_timestamp': 1729138184,
'modified_date': '20241017',
},
'params': {'format': 'direct'},
}],
}]
def _real_extract(self, url):
display_id = self._match_valid_url(url).group('display_id')
webpage = self._download_webpage(url, display_id)
app_id = traverse_obj(
self._search_json(r'window\.env\s*=', webpage, 'window env', display_id, default={}),
('TOP_AUTH_SERVICE_APP_ID', {str}))
entries = []
for player_data in traverse_obj(webpage, (
{find_elements(tag='div', attr='data-component-name', value='video-player', html=True)},
..., {extract_attributes}, all, lambda _, v: v['data-media-id'])):
media_id = player_data['data-media-id']
parent_uri = player_data.get('data-video-resource-parent-uri')
formats, subtitles = [], {}
video_data = {}
if parent_uri:
video_data = self._download_json(
'https://fave.api.cnn.io/v1/video', media_id, fatal=False,
query={
'id': media_id,
'stellarUri': parent_uri,
})
for direct_url in traverse_obj(video_data, ('files', ..., 'fileUri', {url_or_none})):
resolution, bitrate = None, None
if mobj := re.search(r'-(?P<res>\d+x\d+)_(?P<tbr>\d+)k\.mp4', direct_url):
resolution, bitrate = mobj.group('res', 'tbr')
formats.append({
'url': direct_url,
'format_id': 'direct',
'quality': 1,
'tbr': int_or_none(bitrate),
**parse_resolution(resolution),
})
for sub_data in traverse_obj(video_data, (
'closedCaptions', 'types', lambda _, v: url_or_none(v['track']['url']), 'track')):
subtitles.setdefault(sub_data.get('lang') or 'en', []).append({
'url': sub_data['url'],
'name': sub_data.get('label'),
})
if app_id:
media_data = self._download_json(
f'https://medium.ngtv.io/v2/media/{media_id}/desktop', media_id, fatal=False,
query={'appId': app_id})
m3u8_url = traverse_obj(media_data, (
'media', 'desktop', 'unprotected', 'unencrypted', 'url', {url_or_none}))
if m3u8_url:
fmts, subs = self._extract_m3u8_formats_and_subtitles(
m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
entries.append({
**traverse_obj(player_data, {
'title': ('data-headline', {clean_html}),
'description': ('data-description', {clean_html}),
'duration': ('data-duration', {parse_duration}),
'timestamp': ('data-publish-date', {parse_iso8601}),
'thumbnail': (
'data-poster-image-override', {json.loads}, 'big', 'uri', {url_or_none},
{update_url(query='c=original')}),
'display_id': 'data-video-slug',
}),
**traverse_obj(video_data, {
'timestamp': ('dateCreated', 'uts', {int_or_none(scale=1000)}),
'description': ('description', {clean_html}),
'title': ('headline', {str}),
'modified_timestamp': ('lastModified', 'uts', {int_or_none(scale=1000)}),
'duration': ('trt', {int_or_none}),
}),
'id': media_id,
'formats': formats,
'subtitles': subtitles,
})
if len(entries) == 1:
return {
**entries[0],
'display_id': display_id,
}
return self.playlist_result(entries, display_id)
class CNNIndonesiaIE(InfoExtractor):
_VALID_URL = r'https?://www\.cnnindonesia\.com/[\w-]+/(?P<upload_date>\d{8})\d+-\d+-(?P<id>\d+)/(?P<display_id>[\w-]+)'
_TESTS = [{
'url': 'https://www.cnnindonesia.com/ekonomi/20220909212635-89-845885/alasan-harga-bbm-di-indonesia-masih-disubsidi',
'info_dict': {
'id': '845885',
'ext': 'mp4',
'description': 'md5:e7954bfa6f1749bc9ef0c079a719c347',
'upload_date': '20220909',
'title': 'Alasan Harga BBM di Indonesia Masih Disubsidi',
'timestamp': 1662859088,
'duration': 120.0,
'thumbnail': r're:https://akcdn\.detik\.net\.id/visual/2022/09/09/thumbnail-ekopedia-alasan-harga-bbm-disubsidi_169\.jpeg',
'tags': ['ekopedia', 'subsidi bbm', 'subsidi', 'bbm', 'bbm subsidi', 'harga pertalite naik'],
'age_limit': 0,
'release_timestamp': 1662859088,
'release_date': '20220911',
'uploader': 'Asfahan Yahsyi',
},
}, {
'url': 'https://www.cnnindonesia.com/internasional/20220911104341-139-846189/video-momen-charles-disambut-meriah-usai-dilantik-jadi-raja-inggris',
'info_dict': {
'id': '846189',
'ext': 'mp4',
'upload_date': '20220911',
'duration': 76.0,
'timestamp': 1662869995,
'description': 'md5:ece7b003b3ee7d81c6a5cfede7d5397d',
'thumbnail': r're:https://akcdn\.detik\.net\.id/visual/2022/09/11/thumbnail-video-1_169\.jpeg',
'title': 'VIDEO: Momen Charles Disambut Meriah usai Dilantik jadi Raja Inggris',
'tags': ['raja charles', 'raja charles iii', 'ratu elizabeth', 'ratu elizabeth meninggal dunia', 'raja inggris', 'inggris'],
'age_limit': 0,
'release_date': '20220911',
'uploader': 'REUTERS',
'release_timestamp': 1662869995,
},
}]
def _real_extract(self, url):
upload_date, video_id, display_id = self._match_valid_url(url).group('upload_date', 'id', 'display_id')
webpage = self._download_webpage(url, display_id)
json_ld_list = list(self._yield_json_ld(webpage, display_id))
json_ld_data = self._json_ld(json_ld_list, display_id)
embed_url = next(
json_ld.get('embedUrl') for json_ld in json_ld_list if json_ld.get('@type') == 'VideoObject')
return merge_dicts(json_ld_data, {
'_type': 'url_transparent',
'url': embed_url,
'id': video_id,
'upload_date': upload_date,
'tags': try_call(lambda: self._html_search_meta('keywords', webpage).split(', ')),
})
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/philharmoniedeparis.py | yt_dlp/extractor/philharmoniedeparis.py | from .common import InfoExtractor
from ..utils import try_get
class PhilharmonieDeParisIE(InfoExtractor):
IE_DESC = 'Philharmonie de Paris'
_VALID_URL = r'''(?x)
https?://
(?:
live\.philharmoniedeparis\.fr/(?:[Cc]oncert/|embed(?:app)?/|misc/Playlist\.ashx\?id=)|
pad\.philharmoniedeparis\.fr/(?:doc/CIMU/|player\.aspx\?id=)|
philharmoniedeparis\.fr/fr/live/concert/|
otoplayer\.philharmoniedeparis\.fr/fr/embed/
)
(?P<id>\d+)
'''
_TESTS = [{
'url': 'https://philharmoniedeparis.fr/fr/live/concert/1129666-danses-symphoniques',
'md5': '24bdb7e86c200c107680e1f7770330ae',
'info_dict': {
'id': '1129666',
'ext': 'mp4',
'title': 'Danses symphoniques. Orchestre symphonique Divertimento - Zahia Ziouani. Bizet, de Falla, Stravinski, Moussorgski, Saint-Saëns',
},
}, {
'url': 'https://philharmoniedeparis.fr/fr/live/concert/1032066-akademie-fur-alte-musik-berlin-rias-kammerchor-rene-jacobs-passion-selon-saint-jean-de-johann',
'info_dict': {
'id': '1032066',
'title': 'Akademie für alte Musik Berlin, Rias Kammerchor, René Jacobs : Passion selon saint Jean de Johann Sebastian Bach',
},
'playlist_mincount': 2,
}, {
'url': 'https://philharmoniedeparis.fr/fr/live/concert/1030324-orchestre-philharmonique-de-radio-france-myung-whun-chung-renaud-capucon-pascal-dusapin-johannes',
'only_matching': True,
}, {
'url': 'http://live.philharmoniedeparis.fr/misc/Playlist.ashx?id=1030324&track=&lang=fr',
'only_matching': True,
}, {
'url': 'https://live.philharmoniedeparis.fr/embedapp/1098406/berlioz-fantastique-lelio-les-siecles-national-youth-choir-of.html?lang=fr-FR',
'only_matching': True,
}, {
'url': 'https://otoplayer.philharmoniedeparis.fr/fr/embed/1098406?lang=fr-FR',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_json(
f'https://otoplayer.philharmoniedeparis.fr/fr/config/{video_id}.json', video_id, query={
'id': video_id,
'lang': 'fr-FR',
})
def extract_entry(source):
if not isinstance(source, dict):
return
title = source.get('title')
if not title:
return
files = source.get('files')
if not isinstance(files, dict):
return
format_urls = set()
formats = []
for format_id in ('mobile', 'desktop'):
format_url = try_get(
files, lambda x: x[format_id]['file'], str)
if not format_url or format_url in format_urls:
continue
format_urls.add(format_url)
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
if not formats and not self.get_param('ignore_no_formats'):
return
return {
'title': title,
'formats': formats,
'thumbnail': files.get('thumbnail'),
}
info = extract_entry(config)
if info:
info.update({
'id': video_id,
})
return info
entries = []
for num, chapter in enumerate(config['chapters'], start=1):
entry = extract_entry(chapter)
if entry is None:
continue
entry['id'] = f'{video_id}-{num}'
entries.append(entry)
return self.playlist_result(entries, video_id, config.get('title'))
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/europeantour.py | yt_dlp/extractor/europeantour.py | import re
from .common import InfoExtractor
class EuropeanTourIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?europeantour\.com/dpworld-tour/news/video/(?P<id>[^/&?#$]+)'
_TESTS = [{
'url': 'https://www.europeantour.com/dpworld-tour/news/video/the-best-shots-of-the-2021-seasons/',
'info_dict': {
'id': '6287788195001',
'ext': 'mp4',
'title': 'The best shots of the 2021 seasons',
'duration': 2416.512,
'timestamp': 1640010141,
'uploader_id': '5136026580001',
'tags': ['prod-imported'],
'thumbnail': 'md5:fdac52bc826548860edf8145ee74e71a',
'upload_date': '20211220',
},
'params': {'skip_download': True},
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
vid, aid = re.search(r'(?s)brightcove-player\s?video-id="([^"]+)".*"ACCOUNT_ID":"([^"]+)"', webpage).groups()
if not aid:
aid = '5136026580001'
return self.url_result(
self.BRIGHTCOVE_URL_TEMPLATE % (aid, vid), 'BrightcoveNew')
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/curiositystream.py | yt_dlp/extractor/curiositystream.py | import re
import urllib.parse
from .common import InfoExtractor
from ..utils import ExtractorError, int_or_none, urlencode_postdata
class CuriosityStreamBaseIE(InfoExtractor):
_NETRC_MACHINE = 'curiositystream'
_auth_token = None
def _handle_errors(self, result):
error = result.get('error', {}).get('message')
if error:
if isinstance(error, dict):
error = ', '.join(error.values())
raise ExtractorError(
f'{self.IE_NAME} said: {error}', expected=True)
def _call_api(self, path, video_id, query=None):
headers = {}
if not self._auth_token:
auth_cookie = self._get_cookies('https://curiositystream.com').get('auth_token')
if auth_cookie:
self.write_debug('Obtained auth_token cookie')
self._auth_token = urllib.parse.unquote(auth_cookie.value)
if self._auth_token:
headers['X-Auth-Token'] = self._auth_token
result = self._download_json(
self._API_BASE_URL + path, video_id, headers=headers, query=query)
self._handle_errors(result)
return result['data']
def _perform_login(self, username, password):
result = self._download_json(
'https://api.curiositystream.com/v1/login', None,
note='Logging in', data=urlencode_postdata({
'email': username,
'password': password,
}))
self._handle_errors(result)
CuriosityStreamBaseIE._auth_token = result['message']['auth_token']
class CuriosityStreamIE(CuriosityStreamBaseIE):
IE_NAME = 'curiositystream'
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/video/(?P<id>\d+)'
_TESTS = [{
'url': 'http://app.curiositystream.com/video/2',
'info_dict': {
'id': '2',
'ext': 'mp4',
'title': 'How Did You Develop The Internet?',
'description': 'Vint Cerf, Google\'s Chief Internet Evangelist, describes how he and Bob Kahn created the internet.',
'channel': 'Curiosity Stream',
'categories': ['Technology', 'Interview'],
'average_rating': float,
'series_id': '2',
'thumbnail': r're:https://img.curiositystream.com/.+\.jpg',
'tags': [],
'duration': 158,
},
'params': {
# m3u8 download
'skip_download': True,
},
}]
_API_BASE_URL = 'https://api.curiositystream.com/v1/media/'
def _real_extract(self, url):
video_id = self._match_id(url)
formats = []
for encoding_format in ('m3u8', 'mpd'):
media = self._call_api(video_id, video_id, query={
'encodingsNew': 'true',
'encodingsFormat': encoding_format,
})
for encoding in media.get('encodings', []):
playlist_url = encoding.get('master_playlist_url')
if encoding_format == 'm3u8':
# use `m3u8` entry_protocol until EXT-X-MAP is properly supported by `m3u8_native` entry_protocol
formats.extend(self._extract_m3u8_formats(
playlist_url, video_id, 'mp4',
m3u8_id='hls', fatal=False))
elif encoding_format == 'mpd':
formats.extend(self._extract_mpd_formats(
playlist_url, video_id, mpd_id='dash', fatal=False))
encoding_url = encoding.get('url')
file_url = encoding.get('file_url')
if not encoding_url and not file_url:
continue
f = {
'width': int_or_none(encoding.get('width')),
'height': int_or_none(encoding.get('height')),
'vbr': int_or_none(encoding.get('video_bitrate')),
'abr': int_or_none(encoding.get('audio_bitrate')),
'filesize': int_or_none(encoding.get('size_in_bytes')),
'vcodec': encoding.get('video_codec'),
'acodec': encoding.get('audio_codec'),
'container': encoding.get('container_type'),
}
for f_url in (encoding_url, file_url):
if not f_url:
continue
fmt = f.copy()
rtmp = re.search(r'^(?P<url>rtmpe?://(?P<host>[^/]+)/(?P<app>.+))/(?P<playpath>mp[34]:.+)$', f_url)
if rtmp:
fmt.update({
'url': rtmp.group('url'),
'play_path': rtmp.group('playpath'),
'app': rtmp.group('app'),
'ext': 'flv',
'format_id': 'rtmp',
})
else:
fmt.update({
'url': f_url,
'format_id': 'http',
})
formats.append(fmt)
title = media['title']
subtitles = {}
for closed_caption in media.get('closed_captions', []):
sub_url = closed_caption.get('file')
if not sub_url:
continue
lang = closed_caption.get('code') or closed_caption.get('language') or 'en'
subtitles.setdefault(lang, []).append({
'url': sub_url,
})
return {
'id': video_id,
'formats': formats,
'title': title,
'description': media.get('description'),
'thumbnail': media.get('image_large') or media.get('image_medium') or media.get('image_small'),
'duration': int_or_none(media.get('duration')),
'tags': media.get('tags'),
'subtitles': subtitles,
'channel': media.get('producer'),
'categories': [media.get('primary_category'), media.get('type')],
'average_rating': media.get('rating_percentage'),
'series_id': str(media.get('collection_id') or '') or None,
}
class CuriosityStreamCollectionBaseIE(CuriosityStreamBaseIE):
def _real_extract(self, url):
collection_id = self._match_id(url)
collection = self._call_api(collection_id, collection_id)
entries = []
for media in collection.get('media', []):
media_id = str(media.get('id'))
media_type, ie = ('series', CuriosityStreamSeriesIE) if media.get('is_collection') else ('video', CuriosityStreamIE)
entries.append(self.url_result(
f'https://curiositystream.com/{media_type}/{media_id}',
ie=ie.ie_key(), video_id=media_id))
return self.playlist_result(
entries, collection_id,
collection.get('title'), collection.get('description'))
class CuriosityStreamCollectionsIE(CuriosityStreamCollectionBaseIE):
IE_NAME = 'curiositystream:collections'
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/collections/(?P<id>\d+)'
_API_BASE_URL = 'https://api.curiositystream.com/v2/collections/'
_TESTS = [{
'url': 'https://curiositystream.com/collections/86',
'info_dict': {
'id': '86',
'title': 'Staff Picks',
'description': 'Wondering where to start? Here are a few of our favorite series and films... from our couch to yours.',
},
'playlist_mincount': 7,
}, {
'url': 'https://curiositystream.com/collections/36',
'only_matching': True,
}]
class CuriosityStreamSeriesIE(CuriosityStreamCollectionBaseIE):
IE_NAME = 'curiositystream:series'
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/(?:series|collection)/(?P<id>\d+)'
_API_BASE_URL = 'https://api.curiositystream.com/v2/series/'
_TESTS = [{
'url': 'https://curiositystream.com/series/2',
'info_dict': {
'id': '2',
'title': 'Curious Minds: The Internet',
'description': 'How is the internet shaping our lives in the 21st Century?',
},
'playlist_mincount': 16,
}, {
'url': 'https://curiositystream.com/collection/2',
'only_matching': True,
}]
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/acfun.py | yt_dlp/extractor/acfun.py | from .common import InfoExtractor
from ..utils import (
float_or_none,
format_field,
int_or_none,
parse_codecs,
parse_qs,
str_or_none,
traverse_obj,
)
class AcFunVideoBaseIE(InfoExtractor):
def _extract_metadata(self, video_id, video_info):
playjson = self._parse_json(video_info['ksPlayJson'], video_id)
formats, subtitles = [], {}
for video in traverse_obj(playjson, ('adaptationSet', 0, 'representation')):
fmts, subs = self._extract_m3u8_formats_and_subtitles(video['url'], video_id, 'mp4', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
for f in fmts:
f.update({
'fps': float_or_none(video.get('frameRate')),
'width': int_or_none(video.get('width')),
'height': int_or_none(video.get('height')),
'tbr': float_or_none(video.get('avgBitrate')),
**parse_codecs(video.get('codecs', '')),
})
return {
'id': video_id,
'formats': formats,
'subtitles': subtitles,
'duration': float_or_none(video_info.get('durationMillis'), 1000),
'timestamp': int_or_none(video_info.get('uploadTime'), 1000),
'http_headers': {'Referer': 'https://www.acfun.cn/'},
}
class AcFunVideoIE(AcFunVideoBaseIE):
_VALID_URL = r'https?://www\.acfun\.cn/v/ac(?P<id>[_\d]+)'
_TESTS = [{
'url': 'https://www.acfun.cn/v/ac35457073',
'info_dict': {
'id': '35457073',
'ext': 'mp4',
'duration': 174.208,
'timestamp': 1656403967,
'title': '1 8 岁 现 状',
'description': '“赶紧回去!班主任查班了!”',
'uploader': '锤子game',
'uploader_id': '51246077',
'thumbnail': r're:^https?://.*\.(jpg|jpeg)',
'upload_date': '20220628',
'like_count': int,
'view_count': int,
'comment_count': int,
'tags': list,
},
}, {
# example for len(video_list) > 1
'url': 'https://www.acfun.cn/v/ac35468952_2',
'info_dict': {
'id': '35468952_2',
'ext': 'mp4',
'title': '【动画剧集】Rocket & Groot Season 1(2022)/火箭浣熊与格鲁特第1季 P02 S01E02 十拿九穩',
'duration': 90.459,
'uploader': '比令',
'uploader_id': '37259967',
'upload_date': '20220629',
'timestamp': 1656479962,
'tags': list,
'like_count': int,
'view_count': int,
'comment_count': int,
'thumbnail': r're:^https?://.*\.(jpg|jpeg)',
'description': 'md5:67583aaf3a0f933bd606bc8a2d3ebb17',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
json_all = self._search_json(r'window.videoInfo\s*=', webpage, 'videoInfo', video_id)
title = json_all.get('title')
video_list = json_all.get('videoList') or []
video_internal_id = traverse_obj(json_all, ('currentVideoInfo', 'id'))
if video_internal_id and len(video_list) > 1:
part_idx, part_video_info = next(
(idx + 1, v) for (idx, v) in enumerate(video_list)
if v['id'] == video_internal_id)
title = f'{title} P{part_idx:02d} {part_video_info["title"]}'
return {
**self._extract_metadata(video_id, json_all['currentVideoInfo']),
'title': title,
'thumbnail': json_all.get('coverUrl'),
'description': json_all.get('description'),
'uploader': traverse_obj(json_all, ('user', 'name')),
'uploader_id': traverse_obj(json_all, ('user', 'href')),
'tags': traverse_obj(json_all, ('tagList', ..., 'name')),
'view_count': int_or_none(json_all.get('viewCount')),
'like_count': int_or_none(json_all.get('likeCountShow')),
'comment_count': int_or_none(json_all.get('commentCountShow')),
}
class AcFunBangumiIE(AcFunVideoBaseIE):
_VALID_URL = r'https?://www\.acfun\.cn/bangumi/(?P<id>aa[_\d]+)'
_TESTS = [{
'url': 'https://www.acfun.cn/bangumi/aa6002917_36188_1745457?ac=2',
'info_dict': {
'id': 'aa6002917_36188_1745457__2',
'ext': 'mp4',
'title': '【7月】租借女友 水原千鹤角色曲『DATE』特别PV',
'upload_date': '20200916',
'timestamp': 1600243813,
'duration': 92.091,
},
}, {
'url': 'https://www.acfun.cn/bangumi/aa5023171_36188_1750645',
'info_dict': {
'id': 'aa5023171_36188_1750645',
'ext': 'mp4',
'title': '红孩儿之趴趴蛙寻石记 第5话 ',
'duration': 760.0,
'season': '红孩儿之趴趴蛙寻石记',
'season_id': '5023171',
'season_number': 1, # series has only 1 season
'episode': 'Episode 5',
'episode_number': 5,
'upload_date': '20181223',
'timestamp': 1545552185,
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)',
'comment_count': int,
},
}, {
'url': 'https://www.acfun.cn/bangumi/aa6065485_36188_1885061',
'info_dict': {
'id': 'aa6065485_36188_1885061',
'ext': 'mp4',
'title': '叽歪老表(第二季) 第5话 坚不可摧',
'season': '叽歪老表(第二季)',
'season_number': 2,
'season_id': '6065485',
'episode': '坚不可摧',
'episode_number': 5,
'upload_date': '20220324',
'timestamp': 1648082786,
'duration': 105.002,
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)',
'comment_count': int,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
ac_idx = parse_qs(url).get('ac', [None])[-1]
video_id = f'{video_id}{format_field(ac_idx, None, "__%s")}'
webpage = self._download_webpage(url, video_id)
json_bangumi_data = self._search_json(r'window.bangumiData\s*=', webpage, 'bangumiData', video_id)
if ac_idx:
video_info = json_bangumi_data['hlVideoInfo']
return {
**self._extract_metadata(video_id, video_info),
'title': video_info.get('title'),
}
video_info = json_bangumi_data['currentVideoInfo']
season_id = json_bangumi_data.get('bangumiId')
season_number = season_id and next((
idx for idx, v in enumerate(json_bangumi_data.get('relatedBangumis') or [], 1)
if v.get('id') == season_id), 1)
json_bangumi_list = self._search_json(
r'window\.bangumiList\s*=', webpage, 'bangumiList', video_id, fatal=False)
video_internal_id = int_or_none(traverse_obj(json_bangumi_data, ('currentVideoInfo', 'id')))
episode_number = video_internal_id and next((
idx for idx, v in enumerate(json_bangumi_list.get('items') or [], 1)
if v.get('videoId') == video_internal_id), None)
return {
**self._extract_metadata(video_id, video_info),
'title': json_bangumi_data.get('showTitle'),
'thumbnail': json_bangumi_data.get('image'),
'season': json_bangumi_data.get('bangumiTitle'),
'season_id': str_or_none(season_id),
'season_number': season_number,
'episode': json_bangumi_data.get('title'),
'episode_number': episode_number,
'comment_count': int_or_none(json_bangumi_data.get('commentCount')),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/zype.py | yt_dlp/extractor/zype.py | import re
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
dict_get,
int_or_none,
js_to_json,
parse_iso8601,
)
class ZypeIE(InfoExtractor):
_ID_RE = r'[\da-fA-F]+'
_COMMON_RE = r'//player\.zype\.com/embed/%s\.(?:js|json|html)\?.*?(?:access_token|(?:ap[ip]|player)_key)='
_VALID_URL = r'https?:%s[^&]+' % (_COMMON_RE % (f'(?P<id>{_ID_RE})'))
_EMBED_REGEX = [fr'<script[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?{_COMMON_RE % _ID_RE}.+?)\1']
_TEST = {
'url': 'https://player.zype.com/embed/5b400b834b32992a310622b9.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ&autoplay=false&controls=true&da=false',
'md5': 'eaee31d474c76a955bdaba02a505c595',
'info_dict': {
'id': '5b400b834b32992a310622b9',
'ext': 'mp4',
'title': 'Smoky Barbecue Favorites',
'thumbnail': r're:^https?://.*\.jpe?g',
'description': 'md5:5ff01e76316bd8d46508af26dc86023b',
'timestamp': 1504915200,
'upload_date': '20170909',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
try:
response = self._download_json(re.sub(
r'\.(?:js|html)\?', '.json?', url), video_id)['response']
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status in (400, 401, 403):
raise ExtractorError(self._parse_json(
e.cause.response.read().decode(), video_id)['message'], expected=True)
raise
body = response['body']
video = response['video']
title = video['title']
subtitles = {}
if isinstance(body, dict):
formats = []
for output in body.get('outputs', []):
output_url = output.get('url')
if not output_url:
continue
name = output.get('name')
if name == 'm3u8':
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
output_url, video_id, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False)
else:
f = {
'format_id': name,
'tbr': int_or_none(output.get('bitrate')),
'url': output_url,
}
if name in ('m4a', 'mp3'):
f['vcodec'] = 'none'
else:
f.update({
'height': int_or_none(output.get('height')),
'width': int_or_none(output.get('width')),
})
formats.append(f)
text_tracks = body.get('subtitles') or []
else:
m3u8_url = self._search_regex(
r'(["\'])(?P<url>(?:(?!\1).)+\.m3u8(?:(?!\1).)*)\1',
body, 'm3u8 url', group='url', default=None)
if not m3u8_url:
source = self._search_regex(
r'(?s)sources\s*:\s*\[\s*({.+?})\s*\]', body, 'source')
def get_attr(key):
return self._search_regex(
rf'\b{key}\s*:\s*([\'"])(?P<val>(?:(?!\1).)+)\1',
source, key, group='val')
if get_attr('integration') == 'verizon-media':
m3u8_url = 'https://content.uplynk.com/{}.m3u8'.format(get_attr('id'))
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls')
text_tracks = self._search_regex(
r'textTracks\s*:\s*(\[[^]]+\])',
body, 'text tracks', default=None)
if text_tracks:
text_tracks = self._parse_json(
text_tracks, video_id, js_to_json, False)
if text_tracks:
for text_track in text_tracks:
tt_url = dict_get(text_track, ('file', 'src'))
if not tt_url:
continue
subtitles.setdefault(text_track.get('label') or 'English', []).append({
'url': tt_url,
})
thumbnails = []
for thumbnail in video.get('thumbnails', []):
thumbnail_url = thumbnail.get('url')
if not thumbnail_url:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
})
return {
'id': video_id,
'display_id': video.get('friendly_title'),
'title': title,
'thumbnails': thumbnails,
'description': dict_get(video, ('description', 'ott_description', 'short_description')),
'timestamp': parse_iso8601(video.get('published_at')),
'duration': int_or_none(video.get('duration')),
'view_count': int_or_none(video.get('request_count')),
'average_rating': int_or_none(video.get('rating')),
'season_number': int_or_none(video.get('season')),
'episode_number': int_or_none(video.get('episode')),
'formats': formats,
'subtitles': subtitles,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/thisoldhouse.py | yt_dlp/extractor/thisoldhouse.py | import urllib.parse
from .brightcove import BrightcoveNewIE
from .common import InfoExtractor
from .zype import ZypeIE
from ..networking import HEADRequest
from ..utils import (
ExtractorError,
filter_dict,
parse_qs,
smuggle_url,
urlencode_postdata,
)
from ..utils.traversal import traverse_obj
class ThisOldHouseIE(InfoExtractor):
_NETRC_MACHINE = 'thisoldhouse'
_VALID_URL = r'https?://(?:www\.)?thisoldhouse\.com/(?:watch|how-to|tv-episode|(?:[^/?#]+/)?\d+)/(?P<id>[^/?#]+)'
_TESTS = [{
# Unresolved Brightcove URL embed (formerly Zype), free
'url': 'https://www.thisoldhouse.com/furniture/21017078/how-to-build-a-storage-bench',
'info_dict': {
'id': '6325298523112',
'ext': 'mp4',
'title': 'How to Build a Storage Bench',
'description': 'In the workshop, Tom Silva and Kevin O\'Connor build a storage bench for an entryway.',
'timestamp': 1681793639,
'upload_date': '20230418',
'duration': 674.54,
'tags': 'count:11',
'uploader_id': '6314471934001',
'thumbnail': r're:^https?://.*\.jpg',
},
'params': {
'skip_download': True,
},
}, {
# Brightcove embed, authwalled
'url': 'https://www.thisoldhouse.com/glen-ridge-generational/99537/s45-e17-multi-generational',
'info_dict': {
'id': '6349675446112',
'ext': 'mp4',
'title': 'E17 | Glen Ridge Generational | Multi-Generational',
'description': 'md5:53c6bc2e8031f3033d693d9a3563222c',
'timestamp': 1711382202,
'upload_date': '20240325',
'duration': 1422.229,
'tags': 'count:13',
'uploader_id': '6314471934001',
'thumbnail': r're:^https?://.*\.jpg',
},
'expected_warnings': ['Login with password is not supported for this website'],
'params': {
'skip_download': True,
},
'skip': 'Requires subscription',
}, {
# Page no longer has video
'url': 'https://www.thisoldhouse.com/watch/arlington-arts-crafts-arts-and-crafts-class-begins',
'only_matching': True,
}, {
# 404 Not Found
'url': 'https://www.thisoldhouse.com/tv-episode/ask-toh-shelf-rough-electric',
'only_matching': True,
}, {
# 404 Not Found
'url': 'https://www.thisoldhouse.com/how-to/how-to-build-storage-bench',
'only_matching': True,
}, {
'url': 'https://www.thisoldhouse.com/21113884/s41-e13-paradise-lost',
'only_matching': True,
}, {
# iframe www.thisoldhouse.com
'url': 'https://www.thisoldhouse.com/21083431/seaside-transformation-the-westerly-project',
'only_matching': True,
}]
def _perform_login(self, username, password):
login_page = self._download_webpage(
'https://www.thisoldhouse.com/insider-login', None, 'Downloading login page')
hidden_inputs = self._hidden_inputs(login_page)
response = self._download_json(
'https://www.thisoldhouse.com/wp-admin/admin-ajax.php', None, 'Logging in',
headers={
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
}, data=urlencode_postdata(filter_dict({
'action': 'onebill_subscriber_login',
'email': username,
'password': password,
'pricingPlanTerm': hidden_inputs['pricing_plan_term'],
'utm_parameters': hidden_inputs.get('utm_parameters'),
'nonce': hidden_inputs['mdcr_onebill_login_nonce'],
})))
message = traverse_obj(response, ('data', 'message', {str}))
if not response['success']:
if message and 'Something went wrong' in message:
raise ExtractorError('Invalid username or password', expected=True)
raise ExtractorError(message or 'Login was unsuccessful')
if message and 'Your subscription is not active' in message:
self.report_warning(
f'{self.IE_NAME} said your subscription is not active. '
f'If your subscription is active, this could be caused by too many sign-ins, '
f'and you should instead try using {self._login_hint(method="cookies")[4:]}')
else:
self.write_debug(f'{self.IE_NAME} said: {message}')
def _real_extract(self, url):
display_id = self._match_id(url)
webpage, urlh = self._download_webpage_handle(url, display_id)
# If login response says inactive subscription, site redirects to frontpage for Insider content
if 'To Unlock This content' in webpage or urllib.parse.urlparse(urlh.url).path in ('', '/'):
self.raise_login_required('This video is only available for subscribers')
video_url, video_id = self._search_regex(
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?thisoldhouse\.(?:chorus\.build|com)/videos/zype/([0-9a-f]{24})[^\'"]*)[\'"]',
webpage, 'zype url', group=(1, 2), default=(None, None))
if video_url:
video_url = self._request_webpage(HEADRequest(video_url), video_id, 'Resolving Zype URL').url
return self.url_result(video_url, ZypeIE, video_id)
video_url, video_id = self._search_regex([
r'<iframe[^>]+src=[\'"]((?:https?:)?//players\.brightcove\.net/\d+/\w+/index\.html\?videoId=(\d+))',
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)thisoldhouse\.com/videos/brightcove/(\d+))'],
webpage, 'iframe url', group=(1, 2))
if not parse_qs(video_url).get('videoId'):
video_url = self._request_webpage(HEADRequest(video_url), video_id, 'Resolving Brightcove URL').url
return self.url_result(smuggle_url(video_url, {'referrer': url}), BrightcoveNewIE, video_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/n1.py | yt_dlp/extractor/n1.py | import re
import urllib.parse
from .common import InfoExtractor
from ..utils import (
extract_attributes,
unified_timestamp,
url_or_none,
)
from ..utils.traversal import traverse_obj
class N1InfoAssetIE(InfoExtractor):
_VALID_URL = r'https?://best-vod\.umn\.cdn\.united\.cloud/stream\?asset=(?P<id>[^&]+)'
_TESTS = [{
'url': 'https://best-vod.umn.cdn.united.cloud/stream?asset=ljsottomazilirija3060921-n1info-si-worldwide&stream=hp1400&t=0&player=m3u8v&sp=n1info&u=n1info&p=n1Sh4redSecre7iNf0',
'md5': '28b08b32aeaff2b8562736ccd5a66fe7',
'info_dict': {
'id': 'ljsottomazilirija3060921-n1info-si-worldwide',
'ext': 'mp4',
'title': 'ljsottomazilirija3060921-n1info-si-worldwide',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
formats = self._extract_m3u8_formats(
url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
return {
'id': video_id,
'title': video_id,
'formats': formats,
}
class N1InfoIIE(InfoExtractor):
IE_NAME = 'N1Info:article'
_VALID_URL = r'https?://(?:(?:\w+\.)?n1info\.\w+|nova\.rs)/(?:[^/?#]+/){1,2}(?P<id>[^/?#]+)'
_TESTS = [{
# YouTube embedded
'url': 'https://sportklub.n1info.rs/tenis/us-open/glava-telo-igra-kako-je-novak-ispustio-istorijsku-sansu/',
'md5': '987ce6fd72acfecc453281e066b87973',
'info_dict': {
'id': 'L5Hd4hQVUpk',
'ext': 'mp4',
'upload_date': '20210913',
'title': 'Ozmo i USO21, ep. 13: Novak Đoković – Danil Medvedev | Ključevi Poraza, Budućnost | SPORT KLUB TENIS',
'description': 'md5:467f330af1effedd2e290f10dc31bb8e',
'uploader': 'Sport Klub',
'uploader_id': '@sportklub',
'uploader_url': 'https://www.youtube.com/@sportklub',
'channel': 'Sport Klub',
'channel_id': 'UChpzBje9Ro6CComXe3BgNaw',
'channel_url': 'https://www.youtube.com/channel/UChpzBje9Ro6CComXe3BgNaw',
'channel_is_verified': True,
'channel_follower_count': int,
'comment_count': int,
'view_count': int,
'like_count': int,
'age_limit': 0,
'duration': 1049,
'thumbnail': 'https://i.ytimg.com/vi/L5Hd4hQVUpk/maxresdefault.jpg',
'chapters': 'count:9',
'categories': ['Sports'],
'tags': 'count:10',
'timestamp': 1631522787,
'playable_in_embed': True,
'availability': 'public',
'live_status': 'not_live',
'media_type': 'video',
},
}, {
'url': 'https://n1info.si/novice/svet/v-srbiji-samo-ta-konec-tedna-vec-kot-200-pozarov/',
'info_dict': {
'id': '2182656',
'ext': 'mp4',
'title': 'V Srbiji samo ta konec tedna več kot 200 požarov',
'timestamp': 1753611983,
'upload_date': '20250727',
'thumbnail': 'https://n1info.si/media/images/2025/7/1753611048_Pozar.width-1200.webp',
},
'params': {
'skip_download': True,
},
}, {
# Reddit embedded
'url': 'https://nova.rs/vesti/drustvo/ako-vucic-izgubi-izbore-ja-cu-da-crknem-jugoslavija-je-gotova/',
'info_dict': {
'id': '2wmfee9eycp71',
'ext': 'mp4',
'title': '"Ako Vučić izgubi izbore, ja ću da crknem, Jugoslavija je gotova"',
'upload_date': '20210924',
'timestamp': 1632448649.0,
'uploader': 'YouLotWhatDontStop',
'display_id': 'pu9wbx',
'channel_id': 'serbia',
'comment_count': int,
'like_count': int,
'dislike_count': int,
'age_limit': 0,
'duration': 134,
'thumbnail': 'https://external-preview.redd.it/5nmmawSeGx60miQM3Iq-ueC9oyCLTLjjqX-qqY8uRsc.png?format=pjpg&auto=webp&s=2f973400b04d23f871b608b178e47fc01f9b8f1d',
},
}, {
'url': 'https://nova.rs/vesti/politika/zaklina-tatalovic-ani-brnabic-pricate-lazi-video/',
'info_dict': {
'id': 'tnjganabrnabicizaklinatatalovic100danavladegp-novas-worldwide',
'ext': 'mp4',
'title': 'Žaklina Tatalović Ani Brnabić: Pričate laži (VIDEO)',
'upload_date': '20211102',
'timestamp': 1635861677,
'thumbnail': 'https://nova.rs/wp-content/uploads/2021/11/02/1635860298-TNJG_Ana_Brnabic_i_Zaklina_Tatalovic_100_dana_Vlade_GP.jpg',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://n1info.rs/vesti/cuta-biti-u-kosovskoj-mitrovici-znaci-da-te-docekaju-eksplozivnim-napravama/',
'info_dict': {
'id': '1332368',
'ext': 'mp4',
'title': 'Ćuta: Biti u Kosovskoj Mitrovici znači da te dočekaju eksplozivnim napravama',
'upload_date': '20230620',
'timestamp': 1687290536,
'thumbnail': 'https://cdn.brid.tv/live/partners/26827/snapshot/1332368_th_6492013a8356f_1687290170.jpg',
},
}, {
'url': 'https://n1info.rs/vesti/vuciceva-turneja-po-srbiji-najavljuje-kontrarevoluciju-preti-svom-narodu-vredja-novinare/',
'info_dict': {
'id': '2025974',
'ext': 'mp4',
'title': 'Vučićeva turneja po Srbiji: Najavljuje kontrarevoluciju, preti svom narodu, vređa novinare',
'thumbnail': 'https://cdn-uc.brid.tv/live/partners/26827/snapshot/2025974_fhd_67c4a23280a81_1740939826.jpg',
'timestamp': 1740939936,
'upload_date': '20250302',
},
}, {
'url': 'https://hr.n1info.com/vijesti/pravobraniteljica-o-ubojstvu-u-zagrebu-radi-se-o-doista-nezapamcenoj-situaciji/',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage) or self._html_extract_title(webpage)
timestamp = unified_timestamp(
self._og_search_property('published_time', webpage, default=None)
or self._html_search_meta('article:published_time', webpage))
plugin_data = re.findall(r'\$bp\("(?:Brid|TargetVideo)_\d+",\s(.+)\);', webpage)
entries = []
if plugin_data:
site_id = self._html_search_regex(r'site:(\d+)', webpage, 'site id', default=None)
if site_id is None:
site_id = self._search_regex(
r'partners/(\d+)', self._html_search_meta('contentUrl', webpage, fatal=True), 'site ID')
for video_data in plugin_data:
video_id = self._parse_json(video_data, title)['video']
entries.append({
'id': video_id,
'title': title,
'timestamp': timestamp,
'thumbnail': self._html_search_meta('thumbnailURL', webpage),
'formats': self._extract_m3u8_formats(
f'https://cdn-uc.brid.tv/live/partners/{site_id}/streaming/{video_id}/{video_id}.m3u8',
video_id, fatal=False),
})
else:
# Old player still present in older articles
videos = re.findall(r'(?m)(<video[^>]+>)', webpage)
for video in videos:
video_data = extract_attributes(video)
entries.append({
'_type': 'url_transparent',
'url': video_data.get('data-url'),
'id': video_data.get('id'),
'title': title,
'thumbnail': traverse_obj(video_data, (('data-thumbnail', 'data-default_thumbnail'), {url_or_none}, any)),
'timestamp': timestamp,
'ie_key': 'N1InfoAsset',
})
embedded_videos = re.findall(r'(<iframe[^>]+>)', webpage)
for embedded_video in embedded_videos:
video_data = extract_attributes(embedded_video)
url = video_data.get('src') or ''
hostname = urllib.parse.urlparse(url).hostname
if hostname == 'www.youtube.com':
entries.append(self.url_result(url, ie='Youtube'))
elif hostname == 'www.redditmedia.com':
entries.append(self.url_result(url, ie='Reddit'))
elif hostname == 'www.facebook.com' and 'plugins/video' in url:
entries.append(self.url_result(url, ie='FacebookPluginsVideo'))
return {
'_type': 'playlist',
'id': video_id,
'title': title,
'timestamp': timestamp,
'entries': entries,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/caffeinetv.py | yt_dlp/extractor/caffeinetv.py | from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
parse_iso8601,
traverse_obj,
urljoin,
)
class CaffeineTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?caffeine\.tv/[^/?#]+/video/(?P<id>[\da-f-]+)'
_TESTS = [{
'url': 'https://www.caffeine.tv/TsuSurf/video/cffc0a00-e73f-11ec-8080-80017d29f26e',
'info_dict': {
'id': 'cffc0a00-e73f-11ec-8080-80017d29f26e',
'ext': 'mp4',
'title': 'GOOOOD MORNINNNNN #highlights',
'timestamp': 1654702180,
'upload_date': '20220608',
'uploader': 'RahJON Wicc',
'uploader_id': 'TsuSurf',
'duration': 3145,
'age_limit': 17,
'thumbnail': 'https://www.caffeine.tv/broadcasts/776b6f84-9cd5-42e3-af1d-4a776eeed697/replay/lobby.jpg',
'comment_count': int,
'view_count': int,
'like_count': int,
'tags': ['highlights', 'battlerap'],
},
'params': {
'skip_download': 'm3u8',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
json_data = self._download_json(
f'https://api.caffeine.tv/social/public/activity/{video_id}', video_id)
broadcast_info = traverse_obj(json_data, ('broadcast_info', {dict})) or {}
video_url = broadcast_info['video_url']
ext = determine_ext(video_url)
if ext == 'm3u8':
formats = self._extract_m3u8_formats(video_url, video_id, 'mp4')
else:
formats = [{'url': video_url}]
return {
'id': video_id,
'formats': formats,
**traverse_obj(json_data, {
'like_count': ('like_count', {int_or_none}),
'view_count': ('view_count', {int_or_none}),
'comment_count': ('comment_count', {int_or_none}),
'tags': ('tags', ..., {str}, filter),
'uploader': ('user', 'name', {str}),
'uploader_id': (((None, 'user'), 'username'), {str}, any),
'is_live': ('is_live', {bool}),
}),
**traverse_obj(broadcast_info, {
'title': ('broadcast_title', {str}),
'duration': ('content_duration', {int_or_none}),
'timestamp': ('broadcast_start_time', {parse_iso8601}),
'thumbnail': ('preview_image_path', {urljoin(url)}),
}),
'age_limit': {
# assume Apple Store ratings: https://en.wikipedia.org/wiki/Mobile_software_content_rating_system
'FOUR_PLUS': 0,
'NINE_PLUS': 9,
'TWELVE_PLUS': 12,
'SEVENTEEN_PLUS': 17,
}.get(broadcast_info.get('content_rating'), 17),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/iltalehti.py | yt_dlp/extractor/iltalehti.py | from .common import InfoExtractor
from ..utils import js_to_json, traverse_obj
class IltalehtiIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?iltalehti\.fi/[^/?#]+/a/(?P<id>[^/?#])'
_TESTS = [
# jwplatform embed main_media
{
'url': 'https://www.iltalehti.fi/ulkomaat/a/9fbd067f-94e4-46cd-8748-9d958eb4dae2',
'md5': 'af12d42c539f1f49f0b62d231fe72dcd',
'info_dict': {
'id': 'gYjjaf1L',
'ext': 'mp4',
'title': 'Sensuroimaton Päivärinta, jakso 227: Vieraana Suomen Venäjän ex-suurlähettiläs René Nyberg ja Kenraalimajuri evp Pekka Toveri',
'description': '',
'upload_date': '20220928',
'timestamp': 1664360878,
'duration': 2089,
'thumbnail': r're:^https?://.*\.jpg',
},
},
# jwplatform embed body
{
'url': 'https://www.iltalehti.fi/politiikka/a/1ce49d85-1670-428b-8db8-d2479b9950a4',
'md5': '9e50334b8f8330ce8828b567a82a3c65',
'info_dict': {
'id': '18R6zkLi',
'ext': 'mp4',
'title': 'Pekka Toverin arvio: Näin Nord Stream -kaasuputken räjäyttäminen on saatettu toteuttaa',
'description': 'md5:3d1302c9e17e7ffd564143ff58f8de35',
'upload_date': '20220929',
'timestamp': 1664435867,
'duration': 165.0,
'thumbnail': r're:^https?://.*\.jpg',
},
},
]
def _real_extract(self, url):
article_id = self._match_id(url)
webpage = self._download_webpage(url, article_id)
info = self._search_json(
r'<script>\s*window.App\s*=', webpage, 'json', article_id,
transform_source=js_to_json)
props = traverse_obj(info, (
'state', 'articles', ..., 'items', (('main_media', 'properties'), ('body', ..., 'properties'))))
video_ids = traverse_obj(props, (lambda _, v: v['provider'] == 'jwplayer', 'id'))
return self.playlist_from_matches(
video_ids, article_id, ie='JWPlatform', getter=lambda video_id: f'jwplatform:{video_id}',
title=traverse_obj(info, ('state', 'articles', ..., 'items', 'canonical_title'), get_all=False))
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tiktok.py | yt_dlp/extractor/tiktok.py | import functools
import itertools
import json
import random
import re
import string
import time
import urllib.parse
import uuid
from .common import InfoExtractor
from ..networking import HEADRequest
from ..utils import (
ExtractorError,
UnsupportedError,
UserNotLive,
determine_ext,
filter_dict,
format_field,
int_or_none,
join_nonempty,
merge_dicts,
mimetype2ext,
parse_qs,
qualities,
srt_subtitles_timecode,
str_or_none,
traverse_obj,
truncate_string,
try_call,
try_get,
url_or_none,
urlencode_postdata,
)
class TikTokBaseIE(InfoExtractor):
_UPLOADER_URL_FORMAT = 'https://www.tiktok.com/@%s'
_WEBPAGE_HOST = 'https://www.tiktok.com/'
QUALITIES = ('360p', '540p', '720p', '1080p')
_APP_INFO_DEFAULTS = {
# unique "install id"
'iid': None,
# TikTok (KR/PH/TW/TH/VN) = trill, TikTok (rest of world) = musical_ly, Douyin = aweme
'app_name': 'musical_ly',
'app_version': '35.1.3',
'manifest_app_version': '2023501030',
# "app id": aweme = 1128, trill = 1180, musical_ly = 1233, universal = 0
'aid': '0',
}
_APP_INFO_POOL = None
_APP_INFO = None
_APP_USER_AGENT = None
@functools.cached_property
def _KNOWN_APP_INFO(self):
# If we have a genuine device ID, we may not need any IID
default = [''] if self._KNOWN_DEVICE_ID else []
return self._configuration_arg('app_info', default, ie_key=TikTokIE)
@functools.cached_property
def _KNOWN_DEVICE_ID(self):
return self._configuration_arg('device_id', [None], ie_key=TikTokIE)[0]
@functools.cached_property
def _DEVICE_ID(self):
return self._KNOWN_DEVICE_ID or str(random.randint(7250000000000000000, 7325099899999994577))
@functools.cached_property
def _API_HOSTNAME(self):
return self._configuration_arg(
'api_hostname', ['api16-normal-c-useast1a.tiktokv.com'], ie_key=TikTokIE)[0]
def _get_next_app_info(self):
if self._APP_INFO_POOL is None:
defaults = {
key: self._configuration_arg(key, [default], ie_key=TikTokIE)[0]
for key, default in self._APP_INFO_DEFAULTS.items()
if key != 'iid'
}
self._APP_INFO_POOL = [
{**defaults, **dict(
(k, v) for k, v in zip(self._APP_INFO_DEFAULTS, app_info.split('/'), strict=False) if v
)} for app_info in self._KNOWN_APP_INFO
]
if not self._APP_INFO_POOL:
return False
self._APP_INFO = self._APP_INFO_POOL.pop(0)
app_name = self._APP_INFO['app_name']
version = self._APP_INFO['manifest_app_version']
if app_name == 'musical_ly':
package = f'com.zhiliaoapp.musically/{version}'
else: # trill, aweme
package = f'com.ss.android.ugc.{app_name}/{version}'
self._APP_USER_AGENT = f'{package} (Linux; U; Android 13; en_US; Pixel 7; Build/TD1A.220804.031; Cronet/58.0.2991.0)'
return True
@staticmethod
def _create_url(user_id, video_id):
return f'https://www.tiktok.com/@{user_id or "_"}/video/{video_id}'
def _get_sigi_state(self, webpage, display_id):
return self._search_json(
r'<script[^>]+\bid="(?:SIGI_STATE|sigi-persisted-data)"[^>]*>', webpage,
'sigi state', display_id, end_pattern=r'</script>', default={})
def _get_universal_data(self, webpage, display_id):
return traverse_obj(self._search_json(
r'<script[^>]+\bid="__UNIVERSAL_DATA_FOR_REHYDRATION__"[^>]*>', webpage,
'universal data', display_id, end_pattern=r'</script>', default={}),
('__DEFAULT_SCOPE__', {dict})) or {}
def _call_api_impl(self, ep, video_id, query=None, data=None, headers=None, fatal=True,
note='Downloading API JSON', errnote='Unable to download API page'):
self._set_cookie(self._API_HOSTNAME, 'odin_tt', ''.join(random.choices('0123456789abcdef', k=160)))
webpage_cookies = self._get_cookies(self._WEBPAGE_HOST)
if webpage_cookies.get('sid_tt'):
self._set_cookie(self._API_HOSTNAME, 'sid_tt', webpage_cookies['sid_tt'].value)
return self._download_json(
f'https://{self._API_HOSTNAME}/aweme/v1/{ep}/', video_id=video_id,
fatal=fatal, note=note, errnote=errnote, headers={
'User-Agent': self._APP_USER_AGENT,
'Accept': 'application/json',
**(headers or {}),
}, query=query, data=data)
def _build_api_query(self, query):
return filter_dict({
**query,
'device_platform': 'android',
'os': 'android',
'ssmix': 'a',
'_rticket': int(time.time() * 1000),
'cdid': str(uuid.uuid4()),
'channel': 'googleplay',
'aid': self._APP_INFO['aid'],
'app_name': self._APP_INFO['app_name'],
'version_code': ''.join(f'{int(v):02d}' for v in self._APP_INFO['app_version'].split('.')),
'version_name': self._APP_INFO['app_version'],
'manifest_version_code': self._APP_INFO['manifest_app_version'],
'update_version_code': self._APP_INFO['manifest_app_version'],
'ab_version': self._APP_INFO['app_version'],
'resolution': '1080*2400',
'dpi': 420,
'device_type': 'Pixel 7',
'device_brand': 'Google',
'language': 'en',
'os_api': '29',
'os_version': '13',
'ac': 'wifi',
'is_pad': '0',
'current_region': 'US',
'app_type': 'normal',
'sys_region': 'US',
'last_install_time': int(time.time()) - random.randint(86400, 1123200),
'timezone_name': 'America/New_York',
'residence': 'US',
'app_language': 'en',
'timezone_offset': '-14400',
'host_abi': 'armeabi-v7a',
'locale': 'en',
'ac2': 'wifi5g',
'uoo': '1',
'carrier_region': 'US',
'op_region': 'US',
'build_number': self._APP_INFO['app_version'],
'region': 'US',
'ts': int(time.time()),
'iid': self._APP_INFO.get('iid'),
'device_id': self._DEVICE_ID,
'openudid': ''.join(random.choices('0123456789abcdef', k=16)),
})
def _call_api(self, ep, video_id, query=None, data=None, headers=None, fatal=True,
note='Downloading API JSON', errnote='Unable to download API page'):
if not self._APP_INFO and not self._get_next_app_info():
message = 'No working app info is available'
if fatal:
raise ExtractorError(message, expected=True)
else:
self.report_warning(message)
return
max_tries = len(self._APP_INFO_POOL) + 1 # _APP_INFO_POOL + _APP_INFO
for count in itertools.count(1):
self.write_debug(str(self._APP_INFO))
real_query = self._build_api_query(query or {})
try:
return self._call_api_impl(
ep, video_id, query=real_query, data=data, headers=headers,
fatal=fatal, note=note, errnote=errnote)
except ExtractorError as e:
if isinstance(e.cause, json.JSONDecodeError) and e.cause.pos == 0:
message = str(e.cause or e.msg)
if not self._get_next_app_info():
if fatal:
raise
else:
self.report_warning(message)
return
self.report_warning(f'{message}. Retrying... (attempt {count} of {max_tries})')
continue
raise
def _extract_aweme_app(self, aweme_id):
aweme_detail = traverse_obj(
self._call_api('multi/aweme/detail', aweme_id, data=urlencode_postdata({
'aweme_ids': f'[{aweme_id}]',
'request_source': '0',
}), headers={'X-Argus': ''}), ('aweme_details', 0, {dict}))
if not aweme_detail:
raise ExtractorError('Unable to extract aweme detail info', video_id=aweme_id)
return self._parse_aweme_video_app(aweme_detail)
def _extract_web_data_and_status(self, url, video_id, fatal=True):
video_data, status = {}, -1
res = self._download_webpage_handle(url, video_id, fatal=fatal, impersonate=True)
if res is False:
return video_data, status
webpage, urlh = res
if urllib.parse.urlparse(urlh.url).path == '/login':
message = 'TikTok is requiring login for access to this content'
if fatal:
self.raise_login_required(message)
self.report_warning(f'{message}. {self._login_hint()}')
return video_data, status
if universal_data := self._get_universal_data(webpage, video_id):
self.write_debug('Found universal data for rehydration')
status = traverse_obj(universal_data, ('webapp.video-detail', 'statusCode', {int})) or 0
video_data = traverse_obj(universal_data, ('webapp.video-detail', 'itemInfo', 'itemStruct', {dict}))
elif sigi_data := self._get_sigi_state(webpage, video_id):
self.write_debug('Found sigi state data')
status = traverse_obj(sigi_data, ('VideoPage', 'statusCode', {int})) or 0
video_data = traverse_obj(sigi_data, ('ItemModule', video_id, {dict}))
elif next_data := self._search_nextjs_data(webpage, video_id, default={}):
self.write_debug('Found next.js data')
status = traverse_obj(next_data, ('props', 'pageProps', 'statusCode', {int})) or 0
video_data = traverse_obj(next_data, ('props', 'pageProps', 'itemInfo', 'itemStruct', {dict}))
elif fatal:
raise ExtractorError('Unable to extract webpage video data')
if not traverse_obj(video_data, ('video', {dict})) and traverse_obj(video_data, ('isContentClassified', {bool})):
message = 'This post may not be comfortable for some audiences. Log in for access'
if fatal:
self.raise_login_required(message)
self.report_warning(f'{message}. {self._login_hint()}', video_id=video_id)
return video_data, status
def _get_subtitles(self, aweme_detail, aweme_id, user_name):
# TODO: Extract text positioning info
EXT_MAP = { # From lowest to highest preference
'creator_caption': 'json',
'srt': 'srt',
'webvtt': 'vtt',
}
preference = qualities(tuple(EXT_MAP.values()))
subtitles = {}
# aweme/detail endpoint subs
captions_info = traverse_obj(
aweme_detail, ('interaction_stickers', ..., 'auto_video_caption_info', 'auto_captions', ...), expected_type=dict)
for caption in captions_info:
caption_url = traverse_obj(caption, ('url', 'url_list', ...), expected_type=url_or_none, get_all=False)
if not caption_url:
continue
caption_json = self._download_json(
caption_url, aweme_id, note='Downloading captions', errnote='Unable to download captions', fatal=False)
if not caption_json:
continue
subtitles.setdefault(caption.get('language', 'en'), []).append({
'ext': 'srt',
'data': '\n\n'.join(
f'{i + 1}\n{srt_subtitles_timecode(line["start_time"] / 1000)} --> {srt_subtitles_timecode(line["end_time"] / 1000)}\n{line["text"]}'
for i, line in enumerate(caption_json['utterances']) if line.get('text')),
})
# feed endpoint subs
if not subtitles:
for caption in traverse_obj(aweme_detail, ('video', 'cla_info', 'caption_infos', ...), expected_type=dict):
if not caption.get('url'):
continue
subtitles.setdefault(caption.get('lang') or 'en', []).append({
'url': caption['url'],
'ext': EXT_MAP.get(caption.get('Format')),
})
# webpage subs
if not subtitles:
if user_name: # only _parse_aweme_video_app needs to extract the webpage here
aweme_detail, _ = self._extract_web_data_and_status(
self._create_url(user_name, aweme_id), aweme_id, fatal=False)
for caption in traverse_obj(aweme_detail, ('video', 'subtitleInfos', lambda _, v: v['Url'])):
subtitles.setdefault(caption.get('LanguageCodeName') or 'en', []).append({
'url': caption['Url'],
'ext': EXT_MAP.get(caption.get('Format')),
})
# Deprioritize creator_caption json since it can't be embedded or used by media players
for lang, subs_list in subtitles.items():
subtitles[lang] = sorted(subs_list, key=lambda x: preference(x['ext']))
return subtitles
def _parse_url_key(self, url_key):
format_id, codec, res, bitrate = self._search_regex(
r'v[^_]+_(?P<id>(?P<codec>[^_]+)_(?P<res>\d+p)_(?P<bitrate>\d+))', url_key,
'url key', default=(None, None, None, None), group=('id', 'codec', 'res', 'bitrate'))
if not format_id:
return {}, None
return {
'format_id': format_id,
'vcodec': 'h265' if codec == 'bytevc1' else codec,
'tbr': int_or_none(bitrate, scale=1000) or None,
'quality': qualities(self.QUALITIES)(res),
}, res
def _parse_aweme_video_app(self, aweme_detail):
aweme_id = aweme_detail['aweme_id']
video_info = aweme_detail['video']
known_resolutions = {}
def audio_meta(url):
ext = determine_ext(url, default_ext='m4a')
return {
'format_note': 'Music track',
'ext': ext,
'acodec': 'aac' if ext == 'm4a' else ext,
'vcodec': 'none',
'width': None,
'height': None,
} if ext == 'mp3' or '-music-' in url else {}
def extract_addr(addr, add_meta={}):
parsed_meta, res = self._parse_url_key(addr.get('url_key', ''))
is_bytevc2 = parsed_meta.get('vcodec') == 'bytevc2'
if res:
known_resolutions.setdefault(res, {}).setdefault('height', int_or_none(addr.get('height')))
known_resolutions[res].setdefault('width', int_or_none(addr.get('width')))
parsed_meta.update(known_resolutions.get(res, {}))
add_meta.setdefault('height', int_or_none(res[:-1]))
return [{
'url': url,
'filesize': int_or_none(addr.get('data_size')),
'ext': 'mp4',
'acodec': 'aac',
'source_preference': -2 if 'aweme/v1' in url else -1, # Downloads from API might get blocked
**add_meta, **parsed_meta,
# bytevc2 is bytedance's own custom h266/vvc codec, as-of-yet unplayable
'preference': -100 if is_bytevc2 else -1,
'format_note': join_nonempty(
add_meta.get('format_note'), '(API)' if 'aweme/v1' in url else None,
'(UNPLAYABLE)' if is_bytevc2 else None, delim=' '),
**audio_meta(url),
} for url in addr.get('url_list') or []]
# Hack: Add direct video links first to prioritize them when removing duplicate formats
formats = []
width = int_or_none(video_info.get('width'))
height = int_or_none(video_info.get('height'))
ratio = try_call(lambda: width / height) or 0.5625
if video_info.get('play_addr'):
formats.extend(extract_addr(video_info['play_addr'], {
'format_id': 'play_addr',
'format_note': 'Direct video',
'vcodec': 'h265' if traverse_obj(
video_info, 'is_bytevc1', 'is_h265') else 'h264', # TODO: Check for "direct iOS" videos, like https://www.tiktok.com/@cookierun_dev/video/7039716639834656002
'width': width,
'height': height,
}))
if video_info.get('download_addr'):
download_addr = video_info['download_addr']
dl_width = int_or_none(download_addr.get('width'))
formats.extend(extract_addr(download_addr, {
'format_id': 'download_addr',
'format_note': 'Download video%s' % (', watermarked' if video_info.get('has_watermark') else ''),
'vcodec': 'h264',
'width': dl_width,
'height': try_call(lambda: int(dl_width / ratio)), # download_addr['height'] is wrong
'preference': -2 if video_info.get('has_watermark') else -1,
}))
if video_info.get('play_addr_h264'):
formats.extend(extract_addr(video_info['play_addr_h264'], {
'format_id': 'play_addr_h264',
'format_note': 'Direct video',
'vcodec': 'h264',
}))
if video_info.get('play_addr_bytevc1'):
formats.extend(extract_addr(video_info['play_addr_bytevc1'], {
'format_id': 'play_addr_bytevc1',
'format_note': 'Direct video',
'vcodec': 'h265',
}))
for bitrate in video_info.get('bit_rate', []):
if bitrate.get('play_addr'):
formats.extend(extract_addr(bitrate['play_addr'], {
'format_id': bitrate.get('gear_name'),
'format_note': 'Playback video',
'tbr': try_get(bitrate, lambda x: x['bit_rate'] / 1000),
'vcodec': 'h265' if traverse_obj(
bitrate, 'is_bytevc1', 'is_h265') else 'h264',
'fps': bitrate.get('FPS'),
}))
self._remove_duplicate_formats(formats)
auth_cookie = self._get_cookies(self._WEBPAGE_HOST).get('sid_tt')
if auth_cookie:
for f in formats:
self._set_cookie(urllib.parse.urlparse(f['url']).hostname, 'sid_tt', auth_cookie.value)
stats_info = aweme_detail.get('statistics') or {}
music_info = aweme_detail.get('music') or {}
labels = traverse_obj(aweme_detail, ('hybrid_label', ..., 'text'), expected_type=str)
contained_music_track = traverse_obj(
music_info, ('matched_song', 'title'), ('matched_pgc_sound', 'title'), expected_type=str)
contained_music_author = traverse_obj(
music_info, ('matched_song', 'author'), ('matched_pgc_sound', 'author'), 'author', expected_type=str)
is_generic_og_trackname = music_info.get('is_original_sound') and music_info.get('title') == 'original sound - {}'.format(music_info.get('owner_handle'))
if is_generic_og_trackname:
music_track, music_author = contained_music_track or 'original sound', contained_music_author
else:
music_track, music_author = music_info.get('title'), traverse_obj(music_info, ('author', {str}))
author_info = traverse_obj(aweme_detail, ('author', {
'uploader': ('unique_id', {str}),
'uploader_id': ('uid', {str_or_none}),
'channel': ('nickname', {str}),
'channel_id': ('sec_uid', {str}),
}))
return {
'id': aweme_id,
**traverse_obj(aweme_detail, {
'title': ('desc', {truncate_string(left=72)}),
'description': ('desc', {str}),
'timestamp': ('create_time', {int_or_none}),
}),
**traverse_obj(stats_info, {
'view_count': 'play_count',
'like_count': 'digg_count',
'repost_count': 'share_count',
'comment_count': 'comment_count',
'save_count': 'collect_count',
}, expected_type=int_or_none),
**author_info,
'channel_url': format_field(author_info, 'channel_id', self._UPLOADER_URL_FORMAT, default=None),
'uploader_url': format_field(
author_info, ['uploader', 'uploader_id'], self._UPLOADER_URL_FORMAT, default=None),
'track': music_track,
'album': str_or_none(music_info.get('album')) or None,
'artists': re.split(r'(?:, | & )', music_author) if music_author else None,
'formats': formats,
'subtitles': self.extract_subtitles(
aweme_detail, aweme_id, traverse_obj(author_info, 'uploader', 'uploader_id', 'channel_id')),
'thumbnails': [
{
'id': cover_id,
'url': cover_url,
'preference': -1 if cover_id in ('cover', 'origin_cover') else -2,
}
for cover_id in (
'cover', 'ai_dynamic_cover', 'animated_cover',
'ai_dynamic_cover_bak', 'origin_cover', 'dynamic_cover')
for cover_url in traverse_obj(video_info, (cover_id, 'url_list', ...))
],
'duration': (traverse_obj(video_info, (
(None, 'download_addr'), 'duration', {int_or_none(scale=1000)}, any))
or traverse_obj(music_info, ('duration', {int_or_none}))),
'availability': self._availability(
is_private='Private' in labels,
needs_subscription='Friends only' in labels,
is_unlisted='Followers only' in labels),
'_format_sort_fields': ('quality', 'codec', 'size', 'br'),
}
def _extract_web_formats(self, aweme_detail):
COMMON_FORMAT_INFO = {
'ext': 'mp4',
'vcodec': 'h264',
'acodec': 'aac',
}
video_info = traverse_obj(aweme_detail, ('video', {dict})) or {}
play_width = int_or_none(video_info.get('width'))
play_height = int_or_none(video_info.get('height'))
ratio = try_call(lambda: play_width / play_height) or 0.5625
formats = []
for bitrate_info in traverse_obj(video_info, ('bitrateInfo', lambda _, v: v['PlayAddr']['UrlList'])):
format_info, res = self._parse_url_key(
traverse_obj(bitrate_info, ('PlayAddr', 'UrlKey', {str})) or '')
# bytevc2 is bytedance's own custom h266/vvc codec, as-of-yet unplayable
is_bytevc2 = format_info.get('vcodec') == 'bytevc2'
format_info.update({
'format_note': 'UNPLAYABLE' if is_bytevc2 else None,
'preference': -100 if is_bytevc2 else -1,
'filesize': traverse_obj(bitrate_info, ('PlayAddr', 'DataSize', {int_or_none})),
})
if dimension := (res and int(res[:-1])):
if dimension == 540: # '540p' is actually 576p
dimension = 576
if ratio < 1: # portrait: res/dimension is width
y = int(dimension / ratio)
format_info.update({
'width': dimension,
'height': y - (y % 2),
})
else: # landscape: res/dimension is height
x = int(dimension * ratio)
format_info.update({
'width': x + (x % 2),
'height': dimension,
})
for video_url in traverse_obj(bitrate_info, ('PlayAddr', 'UrlList', ..., {url_or_none})):
formats.append({
**COMMON_FORMAT_INFO,
**format_info,
'url': self._proto_relative_url(video_url),
})
# We don't have res string for play formats, but need quality for sorting & de-duplication
play_quality = traverse_obj(formats, (lambda _, v: v['width'] == play_width, 'quality', any))
for play_url in traverse_obj(video_info, ('playAddr', ((..., 'src'), None), {url_or_none})):
formats.append({
**COMMON_FORMAT_INFO,
'format_id': 'play',
'url': self._proto_relative_url(play_url),
'width': play_width,
'height': play_height,
'quality': play_quality,
})
for download_url in traverse_obj(video_info, (('downloadAddr', ('download', 'url')), {url_or_none})):
formats.append({
**COMMON_FORMAT_INFO,
'format_id': 'download',
'url': self._proto_relative_url(download_url),
'format_note': 'watermarked',
'preference': -2,
})
self._remove_duplicate_formats(formats)
# Is it a slideshow with only audio for download?
if not formats and traverse_obj(aweme_detail, ('music', 'playUrl', {url_or_none})):
audio_url = aweme_detail['music']['playUrl']
ext = traverse_obj(parse_qs(audio_url), (
'mime_type', -1, {lambda x: x.replace('_', '/')}, {mimetype2ext})) or 'm4a'
formats.append({
'format_id': 'audio',
'url': self._proto_relative_url(audio_url),
'ext': ext,
'acodec': 'aac' if ext == 'm4a' else ext,
'vcodec': 'none',
})
# Filter out broken formats, see https://github.com/yt-dlp/yt-dlp/issues/11034
return [f for f in formats if urllib.parse.urlparse(f['url']).hostname != 'www.tiktok.com']
def _parse_aweme_video_web(self, aweme_detail, webpage_url, video_id, extract_flat=False):
author_info = traverse_obj(aweme_detail, (('authorInfo', 'author', None), {
'channel': ('nickname', {str}),
'channel_id': (('authorSecId', 'secUid'), {str}),
'uploader': (('uniqueId', 'author'), {str}),
'uploader_id': (('authorId', 'uid', 'id'), {str_or_none}),
}), get_all=False)
return {
'id': video_id,
'formats': None if extract_flat else self._extract_web_formats(aweme_detail),
'subtitles': None if extract_flat else self.extract_subtitles(aweme_detail, video_id, None),
'http_headers': {'Referer': webpage_url},
**author_info,
'channel_url': format_field(author_info, 'channel_id', self._UPLOADER_URL_FORMAT, default=None),
'uploader_url': format_field(
author_info, ['uploader', 'uploader_id'], self._UPLOADER_URL_FORMAT, default=None),
**traverse_obj(aweme_detail, ('music', {
'track': ('title', {str}),
'album': ('album', {str}, filter),
'artists': ('authorName', {str}, {lambda x: re.split(r'(?:, | & )', x) if x else None}),
'duration': ('duration', {int_or_none}),
})),
**traverse_obj(aweme_detail, {
'title': ('desc', {truncate_string(left=72)}),
'description': ('desc', {str}),
# audio-only slideshows have a video duration of 0 and an actual audio duration
'duration': ('video', 'duration', {int_or_none}, filter),
'timestamp': ('createTime', {int_or_none}),
}),
**traverse_obj(aweme_detail, ('stats', {
'view_count': 'playCount',
'like_count': 'diggCount',
'repost_count': 'shareCount',
'comment_count': 'commentCount',
'save_count': 'collectCount',
}), expected_type=int_or_none),
'thumbnails': [
{
'id': cover_id,
'url': self._proto_relative_url(cover_url),
'preference': -2 if cover_id == 'dynamicCover' else -1,
}
for cover_id in ('thumbnail', 'cover', 'dynamicCover', 'originCover')
for cover_url in traverse_obj(aweme_detail, ((None, 'video'), cover_id, {url_or_none}))
],
}
class TikTokIE(TikTokBaseIE):
_VALID_URL = r'https?://www\.tiktok\.com/(?:embed|@(?P<user_id>[\w\.-]+)?/video)/(?P<id>\d+)'
_EMBED_REGEX = [rf'<(?:script|iframe)[^>]+\bsrc=(["\'])(?P<url>{_VALID_URL})']
_TESTS = [{
'url': 'https://www.tiktok.com/@leenabhushan/video/6748451240264420610',
'md5': '736bb7a466c6f0a6afeb597da1e6f5b7',
'info_dict': {
'id': '6748451240264420610',
'ext': 'mp4',
'title': '#jassmanak #lehanga #leenabhushan',
'description': '#jassmanak #lehanga #leenabhushan',
'duration': 13,
'height': 1024,
'width': 576,
'uploader': 'leenabhushan',
'uploader_id': '6691488002098119685',
'uploader_url': 'https://www.tiktok.com/@MS4wLjABAAAA_Eb4t1vodM1IuTy_cvp9CY22RAb59xqrO0Xtz9CYQJvgXaDvZxYnZYRzDWhhgJmy',
'creator': 'facestoriesbyleenabh',
'thumbnail': r're:^https?://[\w\/\.\-]+(~[\w\-]+\.image)?',
'upload_date': '20191016',
'timestamp': 1571246252,
'view_count': int,
'like_count': int,
'repost_count': int,
'comment_count': int,
'save_count': int,
'artist': 'Ysrbeats',
'album': 'Lehanga',
'track': 'Lehanga',
},
'skip': '404 Not Found',
}, {
'url': 'https://www.tiktok.com/@patroxofficial/video/6742501081818877190?langCountry=en',
'md5': 'f21112672ee4ce05ca390fb6522e1b6f',
'info_dict': {
'id': '6742501081818877190',
'ext': 'mp4',
'title': 'Tag 1 Friend reverse this Video and look what happens 🤩😱 @skyandtami ...',
'description': 'md5:5e2a23877420bb85ce6521dbee39ba94',
'duration': 27,
'height': 1024,
'width': 576,
'uploader': 'patrox',
'uploader_id': '18702747',
'uploader_url': 'https://www.tiktok.com/@patrox',
'channel_url': 'https://www.tiktok.com/@MS4wLjABAAAAiFnldaILebi5heDoVU6bn4jBWWycX6-9U3xuNPqZ8Ws',
'channel_id': 'MS4wLjABAAAAiFnldaILebi5heDoVU6bn4jBWWycX6-9U3xuNPqZ8Ws',
'channel': 'patroX',
'thumbnail': r're:^https?://[\w\/\.\-]+(~[\w\-]+\.image)?',
'upload_date': '20190930',
'timestamp': 1569860870,
'view_count': int,
'like_count': int,
'repost_count': int,
'comment_count': int,
'save_count': int,
'artists': ['Evan Todd', 'Jessica Keenan Wynn', 'Alice Lee', 'Barrett Wilbert Weed', 'Jon Eidson'],
'track': 'Big Fun',
},
}, {
# Banned audio, was available on the app, now works with web too
'url': 'https://www.tiktok.com/@barudakhb_/video/6984138651336838402',
'info_dict': {
'id': '6984138651336838402',
'ext': 'mp4',
'title': 'Balas @yolaaftwsr hayu yu ? #SquadRandom_ 🔥',
'description': 'Balas @yolaaftwsr hayu yu ? #SquadRandom_ 🔥',
'uploader': 'barudakhb_',
'channel': 'md5:29f238c49bc0c176cb3cef1a9cea9fa6',
'uploader_id': '6974687867511718913',
'uploader_url': 'https://www.tiktok.com/@barudakhb_',
'channel_url': 'https://www.tiktok.com/@MS4wLjABAAAAbhBwQC-R1iKoix6jDFsF-vBdfx2ABoDjaZrM9fX6arU3w71q3cOWgWuTXn1soZ7d',
'channel_id': 'MS4wLjABAAAAbhBwQC-R1iKoix6jDFsF-vBdfx2ABoDjaZrM9fX6arU3w71q3cOWgWuTXn1soZ7d',
'track': 'Boka Dance',
'artists': ['md5:29f238c49bc0c176cb3cef1a9cea9fa6'],
'timestamp': 1626121503,
'duration': 18,
'thumbnail': r're:^https?://[\w\/\.\-]+(~[\w\-]+\.image)?',
'upload_date': '20210712',
'view_count': int,
'like_count': int,
'repost_count': int,
'comment_count': int,
'save_count': int,
},
}, {
# Sponsored video, only available with feed workaround
'url': 'https://www.tiktok.com/@MS4wLjABAAAATh8Vewkn0LYM7Fo03iec3qKdeCUOcBIouRk1mkiag6h3o_pQu_dUXvZ2EZlGST7_/video/7042692929109986561',
'info_dict': {
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | true |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/radiocanada.py | yt_dlp/extractor/radiocanada.py | from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
unified_strdate,
)
class RadioCanadaIE(InfoExtractor):
IE_NAME = 'radiocanada'
_VALID_URL = r'(?:radiocanada:|https?://ici\.radio-canada\.ca/widgets/mediaconsole/)(?P<app_code>[^:/]+)[:/](?P<id>[0-9]+)'
_TESTS = [
{
'url': 'http://ici.radio-canada.ca/widgets/mediaconsole/medianet/7184272',
'info_dict': {
'id': '7184272',
'ext': 'mp4',
'title': 'Le parcours du tireur capté sur vidéo',
'description': 'Images des caméras de surveillance fournies par la GRC montrant le parcours du tireur d\'Ottawa',
'upload_date': '20141023',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
# empty Title
'url': 'http://ici.radio-canada.ca/widgets/mediaconsole/medianet/7754998/',
'info_dict': {
'id': '7754998',
'ext': 'mp4',
'title': 'letelejournal22h',
'description': 'INTEGRALE WEB 22H-TJ',
'upload_date': '20170720',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
# with protectionType but not actually DRM protected
'url': 'radiocanada:toutv:140872',
'info_dict': {
'id': '140872',
'title': 'Épisode 1',
'series': 'District 31',
},
'only_matching': True,
},
]
_GEO_COUNTRIES = ['CA']
_access_token = None
_claims = None
def _call_api(self, path, video_id=None, app_code=None, query=None):
if not query:
query = {}
query.update({
'client_key': '773aea60-0e80-41bb-9c7f-e6d7c3ad17fb',
'output': 'json',
})
if video_id:
query.update({
'appCode': app_code,
'idMedia': video_id,
})
if self._access_token:
query['access_token'] = self._access_token
try:
return self._download_json(
'https://services.radio-canada.ca/media/' + path, video_id, query=query)
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status in (401, 422):
data = self._parse_json(e.cause.response.read().decode(), None)
error = data.get('error_description') or data['errorMessage']['text']
raise ExtractorError(error, expected=True)
raise
def _extract_info(self, app_code, video_id):
metas = self._call_api('meta/v1/index.ashx', video_id, app_code)['Metas']
def get_meta(name):
for meta in metas:
if meta.get('name') == name:
text = meta.get('text')
if text:
return text
# protectionType does not necessarily mean the video is DRM protected (see
# https://github.com/ytdl-org/youtube-dl/pull/18609).
if get_meta('protectionType'):
self.report_warning('This video is probably DRM protected.')
query = {
'connectionType': 'hd',
'deviceType': 'ipad',
'multibitrate': 'true',
}
if self._claims:
query['claims'] = self._claims
v_data = self._call_api('validation/v2/', video_id, app_code, query)
v_url = v_data.get('url')
if not v_url:
error = v_data['message']
if error == "Le contenu sélectionné n'est pas disponible dans votre pays":
raise self.raise_geo_restricted(error, self._GEO_COUNTRIES)
if error == 'Le contenu sélectionné est disponible seulement en premium':
self.raise_login_required(error)
raise ExtractorError(
f'{self.IE_NAME} said: {error}', expected=True)
formats = self._extract_m3u8_formats(v_url, video_id, 'mp4')
subtitles = {}
closed_caption_url = get_meta('closedCaption') or get_meta('closedCaptionHTML5')
if closed_caption_url:
subtitles['fr'] = [{
'url': closed_caption_url,
'ext': determine_ext(closed_caption_url, 'vtt'),
}]
return {
'id': video_id,
'title': get_meta('Title') or get_meta('AV-nomEmission'),
'description': get_meta('Description') or get_meta('ShortDescription'),
'thumbnail': get_meta('imageHR') or get_meta('imageMR') or get_meta('imageBR'),
'duration': int_or_none(get_meta('length')),
'series': get_meta('Emission'),
'season_number': int_or_none('SrcSaison'),
'episode_number': int_or_none('SrcEpisode'),
'upload_date': unified_strdate(get_meta('Date')),
'subtitles': subtitles,
'formats': formats,
}
def _real_extract(self, url):
return self._extract_info(*self._match_valid_url(url).groups())
class RadioCanadaAudioVideoIE(InfoExtractor):
IE_NAME = 'radiocanada:audiovideo'
_VALID_URL = r'https?://ici\.radio-canada\.ca/([^/]+/)*media-(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://ici.radio-canada.ca/audio-video/media-7527184/barack-obama-au-vietnam',
'info_dict': {
'id': '7527184',
'ext': 'mp4',
'title': 'Barack Obama au Vietnam',
'description': 'Les États-Unis lèvent l\'embargo sur la vente d\'armes qui datait de la guerre du Vietnam',
'upload_date': '20160523',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'https://ici.radio-canada.ca/info/videos/media-7527184/barack-obama-au-vietnam',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(f'radiocanada:medianet:{self._match_id(url)}')
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/smotrim.py | yt_dlp/extractor/smotrim.py | import functools
import json
import re
import urllib.parse
from .common import InfoExtractor
from ..utils import (
OnDemandPagedList,
clean_html,
determine_ext,
extract_attributes,
int_or_none,
parse_iso8601,
str_or_none,
unescapeHTML,
url_or_none,
urljoin,
)
from ..utils.traversal import (
find_element,
find_elements,
require,
traverse_obj,
)
class SmotrimBaseIE(InfoExtractor):
_BASE_URL = 'https://smotrim.ru'
_GEO_BYPASS = False
_GEO_COUNTRIES = ['RU']
def _extract_from_smotrim_api(self, typ, item_id):
path = f'data{typ.replace("-", "")}/{"uid" if typ == "live" else "id"}'
data = self._download_json(
f'https://player.smotrim.ru/iframe/{path}/{item_id}/sid/smotrim', item_id)
media = traverse_obj(data, ('data', 'playlist', 'medialist', -1, {dict}))
if traverse_obj(media, ('locked', {bool})):
self.raise_login_required()
if error_msg := traverse_obj(media, ('errors', {clean_html})):
self.raise_geo_restricted(error_msg, countries=self._GEO_COUNTRIES)
webpage_url = traverse_obj(data, ('data', 'template', 'share_url', {url_or_none}))
webpage = self._download_webpage(webpage_url, item_id)
common = {
'thumbnail': self._html_search_meta(['og:image', 'twitter:image'], webpage, default=None),
**traverse_obj(media, {
'id': ('id', {str_or_none}),
'title': (('episodeTitle', 'title'), {clean_html}, filter, any),
'channel_id': ('channelId', {str_or_none}),
'description': ('anons', {clean_html}, filter),
'season': ('season', {clean_html}, filter),
'series': (('brand_title', 'brandTitle'), {clean_html}, filter, any),
'series_id': ('brand_id', {str_or_none}),
}),
}
if typ == 'audio':
bookmark = self._search_json(
r'class="bookmark"[^>]+value\s*=\s*"', webpage,
'bookmark', item_id, default={}, transform_source=unescapeHTML)
metadata = {
'vcodec': 'none',
**common,
**traverse_obj(media, {
'ext': ('audio_url', {determine_ext(default_ext='mp3')}),
'duration': ('duration', {int_or_none}),
'url': ('audio_url', {url_or_none}),
}),
**traverse_obj(bookmark, {
'title': ('subtitle', {clean_html}),
'timestamp': ('published', {parse_iso8601}),
}),
}
elif typ == 'audio-live':
metadata = {
'ext': 'mp3',
'url': traverse_obj(media, ('source', 'auto', {url_or_none})),
'vcodec': 'none',
**common,
}
else:
formats, subtitles = [], {}
for m3u8_url in traverse_obj(media, (
'sources', 'm3u8', {dict.values}, ..., {url_or_none},
)):
fmts, subs = self._extract_m3u8_formats_and_subtitles(
m3u8_url, item_id, 'mp4', m3u8_id='hls', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
metadata = {
'formats': formats,
'subtitles': subtitles,
**self._search_json_ld(webpage, item_id),
**common,
}
return {
'age_limit': traverse_obj(data, ('data', 'age_restrictions', {int_or_none})),
'is_live': typ in ('audio-live', 'live'),
'tags': traverse_obj(webpage, (
{find_elements(cls='tags-list__link')}, ..., {clean_html}, filter, all, filter)),
'webpage_url': webpage_url,
**metadata,
}
class SmotrimIE(SmotrimBaseIE):
IE_NAME = 'smotrim'
_VALID_URL = r'(?:https?:)?//(?:(?:player|www)\.)?smotrim\.ru(?:/iframe)?/video(?:/id)?/(?P<id>\d+)'
_EMBED_REGEX = [fr'<iframe\b[^>]+\bsrc=["\'](?P<url>{_VALID_URL})']
_TESTS = [{
'url': 'https://smotrim.ru/video/1539617',
'info_dict': {
'id': '1539617',
'ext': 'mp4',
'title': 'Урок №16',
'duration': 2631,
'series': 'Полиглот. Китайский с нуля за 16 часов!',
'series_id': '60562',
'tags': 'mincount:6',
'thumbnail': r're:https?://cdn-st\d+\.smotrim\.ru/.+\.(?:jpg|png)',
'timestamp': 1466771100,
'upload_date': '20160624',
'view_count': int,
},
}, {
'url': 'https://player.smotrim.ru/iframe/video/id/2988590',
'info_dict': {
'id': '2988590',
'ext': 'mp4',
'title': 'Трейлер',
'age_limit': 16,
'description': 'md5:6af7e68ecf4ed7b8ff6720d20c4da47b',
'duration': 30,
'series': 'Мы в разводе',
'series_id': '71624',
'tags': 'mincount:5',
'thumbnail': r're:https?://cdn-st\d+\.smotrim\.ru/.+\.(?:jpg|png)',
'timestamp': 1750670040,
'upload_date': '20250623',
'view_count': int,
'webpage_url': 'https://smotrim.ru/video/2988590',
},
}]
_WEBPAGE_TESTS = [{
'url': 'https://smotrim.ru/article/2813445',
'info_dict': {
'id': '2431846',
'ext': 'mp4',
'title': 'Съёмки первой программы "Большие и маленькие"',
'description': 'md5:446c9a5d334b995152a813946353f447',
'duration': 240,
'series': 'Новости культуры',
'series_id': '19725',
'tags': 'mincount:6',
'thumbnail': r're:https?://cdn-st\d+\.smotrim\.ru/.+\.(?:jpg|png)',
'timestamp': 1656054443,
'upload_date': '20220624',
'view_count': int,
'webpage_url': 'https://smotrim.ru/video/2431846',
},
}, {
'url': 'https://www.vesti.ru/article/4642878',
'info_dict': {
'id': '3007209',
'ext': 'mp4',
'title': 'Иностранные мессенджеры используют не только мошенники, но и вербовщики',
'description': 'md5:74ab625a0a89b87b2e0ed98d6391b182',
'duration': 265,
'series': 'Вести. Дежурная часть',
'series_id': '5204',
'tags': 'mincount:6',
'thumbnail': r're:https?://cdn-st\d+\.smotrim\.ru/.+\.(?:jpg|png)',
'timestamp': 1754756280,
'upload_date': '20250809',
'view_count': int,
'webpage_url': 'https://smotrim.ru/video/3007209',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._extract_from_smotrim_api('video', video_id)
class SmotrimAudioIE(SmotrimBaseIE):
IE_NAME = 'smotrim:audio'
_VALID_URL = r'https?://(?:(?:player|www)\.)?smotrim\.ru(?:/iframe)?/audio(?:/id)?/(?P<id>\d+)'
_TESTS = [{
'url': 'https://smotrim.ru/audio/2573986',
'md5': 'e28d94c20da524e242b2d00caef41a8e',
'info_dict': {
'id': '2573986',
'ext': 'mp3',
'title': 'Радиоспектакль',
'description': 'md5:4bcaaf7d532bc78f76e478fad944e388',
'duration': 3072,
'series': 'Морис Леблан. Арсен Люпен, джентльмен-грабитель',
'series_id': '66461',
'tags': 'mincount:7',
'thumbnail': r're:https?://cdn-st\d+\.smotrim\.ru/.+\.(?:jpg|png)',
'timestamp': 1624884358,
'upload_date': '20210628',
},
}, {
'url': 'https://player.smotrim.ru/iframe/audio/id/2860468',
'md5': '5a6bc1fa24c7142958be1ad9cfae58a8',
'info_dict': {
'id': '2860468',
'ext': 'mp3',
'title': 'Колобок и музыкальная игра "Терем-теремок"',
'duration': 1501,
'series': 'Веселый колобок',
'series_id': '68880',
'tags': 'mincount:4',
'thumbnail': r're:https?://cdn-st\d+\.smotrim\.ru/.+\.(?:jpg|png)',
'timestamp': 1755925800,
'upload_date': '20250823',
'webpage_url': 'https://smotrim.ru/audio/2860468',
},
}]
def _real_extract(self, url):
audio_id = self._match_id(url)
return self._extract_from_smotrim_api('audio', audio_id)
class SmotrimLiveIE(SmotrimBaseIE):
IE_NAME = 'smotrim:live'
_VALID_URL = r'''(?x:
(?:https?:)?//
(?:(?:(?:test)?player|www)\.)?
(?:
smotrim\.ru|
vgtrk\.com
)
(?:/iframe)?/
(?P<type>
channel|
(?:audio-)?live
)
(?:/u?id)?/(?P<id>[\da-f-]+)
)'''
_EMBED_REGEX = [fr'<iframe\b[^>]+\bsrc=["\'](?P<url>{_VALID_URL})']
_TESTS = [{
'url': 'https://smotrim.ru/channel/76',
'info_dict': {
'id': '1661',
'ext': 'mp4',
'title': str,
'channel_id': '76',
'description': 'Смотрим прямой эфир «Москва 24»',
'display_id': '76',
'live_status': 'is_live',
'thumbnail': r're:https?://cdn-st\d+\.smotrim\.ru/.+\.(?:jpg|png)',
'timestamp': int,
'upload_date': str,
},
'params': {'skip_download': 'Livestream'},
}, {
# Radio
'url': 'https://smotrim.ru/channel/81',
'info_dict': {
'id': '81',
'ext': 'mp3',
'title': str,
'channel_id': '81',
'live_status': 'is_live',
'thumbnail': r're:https?://cdn-st\d+\.smotrim\.ru/.+\.(?:jpg|png)',
},
'params': {'skip_download': 'Livestream'},
}, {
# Sometimes geo-restricted to Russia
'url': 'https://player.smotrim.ru/iframe/live/uid/381308c7-a066-4c4f-9656-83e2e792a7b4',
'info_dict': {
'id': '19201',
'ext': 'mp4',
'title': str,
'channel_id': '4',
'description': 'Смотрим прямой эфир «Россия К»',
'display_id': '381308c7-a066-4c4f-9656-83e2e792a7b4',
'live_status': 'is_live',
'thumbnail': r're:https?://cdn-st\d+\.smotrim\.ru/.+\.(?:jpg|png)',
'timestamp': int,
'upload_date': str,
'webpage_url': 'https://smotrim.ru/channel/4',
},
'params': {'skip_download': 'Livestream'},
}, {
'url': 'https://smotrim.ru/live/19201',
'only_matching': True,
}, {
'url': 'https://player.smotrim.ru/iframe/audio-live/id/81',
'only_matching': True,
}, {
'url': 'https://testplayer.vgtrk.com/iframe/live/id/19201',
'only_matching': True,
}]
def _real_extract(self, url):
typ, display_id = self._match_valid_url(url).group('type', 'id')
if typ == 'live' and re.fullmatch(r'[0-9]+', display_id):
url = self._request_webpage(url, display_id).url
typ = self._match_valid_url(url).group('type')
if typ == 'channel':
webpage = self._download_webpage(url, display_id)
src_url = traverse_obj(webpage, ((
({find_element(cls='main-player__frame', html=True)}, {extract_attributes}, 'src'),
({find_element(cls='audio-play-button', html=True)},
{extract_attributes}, 'value', {urllib.parse.unquote}, {json.loads}, 'source'),
), any, {self._proto_relative_url}, {url_or_none}, {require('src URL')}))
typ, video_id = self._match_valid_url(src_url).group('type', 'id')
else:
video_id = display_id
return {
'display_id': display_id,
**self._extract_from_smotrim_api(typ, video_id),
}
class SmotrimPlaylistIE(SmotrimBaseIE):
IE_NAME = 'smotrim:playlist'
_PAGE_SIZE = 15
_VALID_URL = r'https?://smotrim\.ru/(?P<type>brand|podcast)/(?P<id>\d+)/?(?P<season>[\w-]+)?'
_TESTS = [{
# Video
'url': 'https://smotrim.ru/brand/64356',
'info_dict': {
'id': '64356',
'title': 'Большие и маленькие',
},
'playlist_mincount': 55,
}, {
# Video, season
'url': 'https://smotrim.ru/brand/65293/3-sezon',
'info_dict': {
'id': '65293',
'title': 'Спасская',
'season': '3 сезон',
},
'playlist_count': 16,
}, {
# Audio
'url': 'https://smotrim.ru/brand/68880',
'info_dict': {
'id': '68880',
'title': 'Веселый колобок',
},
'playlist_mincount': 156,
}, {
# Podcast
'url': 'https://smotrim.ru/podcast/8021',
'info_dict': {
'id': '8021',
'title': 'Сила звука',
},
'playlist_mincount': 27,
}]
def _fetch_page(self, endpoint, key, playlist_id, page):
page += 1
items = self._download_json(
f'{self._BASE_URL}/api/{endpoint}', playlist_id,
f'Downloading page {page}', query={
key: playlist_id,
'limit': self._PAGE_SIZE,
'page': page,
},
)
for link in traverse_obj(items, ('contents', -1, 'list', ..., 'link', {str})):
yield self.url_result(urljoin(self._BASE_URL, link))
def _real_extract(self, url):
playlist_type, playlist_id, season = self._match_valid_url(url).group('type', 'id', 'season')
key = 'rubricId' if playlist_type == 'podcast' else 'brandId'
webpage = self._download_webpage(url, playlist_id)
playlist_title = self._html_search_meta(['og:title', 'twitter:title'], webpage, default=None)
if season:
return self.playlist_from_matches(traverse_obj(webpage, (
{find_elements(tag='a', attr='href', value=r'/video/\d+', html=True, regex=True)},
..., {extract_attributes}, 'href', {str},
)), playlist_id, playlist_title, season=traverse_obj(webpage, (
{find_element(cls='seasons__item seasons__item--selected')}, {clean_html},
)), ie=SmotrimIE, getter=urljoin(self._BASE_URL))
if traverse_obj(webpage, (
{find_element(cls='brand-main-item__videos')}, {clean_html}, filter,
)):
endpoint = 'videos'
else:
endpoint = 'audios'
return self.playlist_result(OnDemandPagedList(
functools.partial(self._fetch_page, endpoint, key, playlist_id), self._PAGE_SIZE), playlist_id, playlist_title)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ciscowebex.py | yt_dlp/extractor/ciscowebex.py | from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
try_get,
unified_timestamp,
)
class CiscoWebexIE(InfoExtractor):
IE_NAME = 'ciscowebex'
IE_DESC = 'Cisco Webex'
_VALID_URL = r'''(?x)
(?P<url>https?://(?P<subdomain>[^/#?]*)\.webex\.com/(?:
(?P<siteurl_1>[^/#?]*)/(?:ldr|lsr).php\?(?:[^#]*&)*RCID=(?P<rcid>[0-9a-f]{32})|
(?:recordingservice|webappng)/sites/(?P<siteurl_2>[^/#?]*)/recording/(?:playback/|play/)?(?P<id>[0-9a-f]{32})
))'''
_TESTS = [{
'url': 'https://demosubdomain.webex.com/demositeurl/ldr.php?RCID=e58e803bc0f766bb5f6376d2e86adb5b',
'only_matching': True,
}, {
'url': 'http://demosubdomain.webex.com/demositeurl/lsr.php?RCID=bc04b4a7b5ea2cc3a493d5ae6aaff5d7',
'only_matching': True,
}, {
'url': 'https://demosubdomain.webex.com/recordingservice/sites/demositeurl/recording/88e7a42f7b19f5b423c54754aecc2ce9/playback',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
rcid = mobj.group('rcid')
if rcid:
webpage = self._download_webpage(url, None, note='Getting video ID')
url = self._search_regex(self._VALID_URL, webpage, 'redirection url', group='url')
url = self._request_webpage(url, None, note='Resolving final URL').url
mobj = self._match_valid_url(url)
subdomain = mobj.group('subdomain')
siteurl = mobj.group('siteurl_1') or mobj.group('siteurl_2')
video_id = mobj.group('id')
password = self.get_param('videopassword')
headers = {'Accept': 'application/json'}
if password:
headers['accessPwd'] = password
stream, urlh = self._download_json_handle(
f'https://{subdomain}.webex.com/webappng/api/v1/recordings/{video_id}/stream',
video_id, headers=headers, query={'siteurl': siteurl}, expected_status=(403, 429))
if urlh.status == 403:
if stream['code'] == 53004:
self.raise_login_required()
if stream['code'] == 53005:
if password:
raise ExtractorError('Wrong password', expected=True)
raise ExtractorError(
'This video is protected by a password, use the --video-password option', expected=True)
raise ExtractorError(f'{self.IE_NAME} said: {stream["code"]} - {stream["message"]}', expected=True)
if urlh.status == 429:
self.raise_login_required(
f'{self.IE_NAME} asks you to solve a CAPTCHA. Solve CAPTCHA in browser and',
method='cookies')
video_id = stream.get('recordUUID') or video_id
formats = [{
'format_id': 'video',
'url': stream['fallbackPlaySrc'],
'ext': 'mp4',
'vcodec': 'avc1.640028',
'acodec': 'mp4a.40.2',
}]
if stream.get('preventDownload') is False:
mp4url = try_get(stream, lambda x: x['downloadRecordingInfo']['downloadInfo']['mp4URL'])
if mp4url:
formats.append({
'format_id': 'video',
'url': mp4url,
'ext': 'mp4',
'vcodec': 'avc1.640028',
'acodec': 'mp4a.40.2',
})
audiourl = try_get(stream, lambda x: x['downloadRecordingInfo']['downloadInfo']['audioURL'])
if audiourl:
formats.append({
'format_id': 'audio',
'url': audiourl,
'ext': 'mp3',
'vcodec': 'none',
'acodec': 'mp3',
})
return {
'id': video_id,
'title': stream['recordName'],
'description': stream.get('description'),
'uploader': stream.get('ownerDisplayName'),
'uploader_id': stream.get('ownerUserName') or stream.get('ownerId'),
'timestamp': unified_timestamp(stream.get('createTime')),
'duration': int_or_none(stream.get('duration'), 1000),
'webpage_url': f'https://{subdomain}.webex.com/recordingservice/sites/{siteurl}/recording/playback/{video_id}',
'formats': formats,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rds.py | yt_dlp/extractor/rds.py | from .common import InfoExtractor
from ..utils import (
js_to_json,
parse_duration,
parse_iso8601,
)
class RDSIE(InfoExtractor):
_WORKING = False
IE_DESC = 'RDS.ca'
_VALID_URL = r'https?://(?:www\.)?rds\.ca/vid(?:[eé]|%C3%A9)os/(?:[^/]+/)*(?P<id>[^/]+)-\d+\.\d+'
_TESTS = [{
# has two 9c9media ContentPackages, the web player selects the first ContentPackage
'url': 'https://www.rds.ca/videos/Hockey/NationalHockeyLeague/teams/9/forum-du-5-a-7-jesperi-kotkaniemi-de-retour-de-finlande-3.1377606',
'info_dict': {
'id': '2083309',
'display_id': 'forum-du-5-a-7-jesperi-kotkaniemi-de-retour-de-finlande',
'ext': 'flv',
'title': 'Forum du 5 à 7 : Kotkaniemi de retour de Finlande',
'description': 'md5:83fa38ecc4a79b19e433433254077f25',
'timestamp': 1606129030,
'upload_date': '20201123',
'duration': 773.039,
},
}, {
'url': 'http://www.rds.ca/vid%C3%A9os/un-voyage-positif-3.877934',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
item = self._parse_json(self._search_regex(r'(?s)itemToPush\s*=\s*({.+?});', webpage, 'item'), display_id, js_to_json)
video_id = str(item['id'])
title = item.get('title') or self._og_search_title(webpage) or self._html_search_meta(
'title', webpage, 'title', fatal=True)
description = self._og_search_description(webpage) or self._html_search_meta(
'description', webpage, 'description')
thumbnail = item.get('urlImageBig') or self._og_search_thumbnail(webpage) or self._search_regex(
[r'<link[^>]+itemprop="thumbnailUrl"[^>]+href="([^"]+)"',
r'<span[^>]+itemprop="thumbnailUrl"[^>]+content="([^"]+)"'],
webpage, 'thumbnail', fatal=False)
timestamp = parse_iso8601(self._search_regex(
r'<span[^>]+itemprop="uploadDate"[^>]+content="([^"]+)"',
webpage, 'upload date', fatal=False))
duration = parse_duration(self._search_regex(
r'<span[^>]+itemprop="duration"[^>]+content="([^"]+)"',
webpage, 'duration', fatal=False))
age_limit = self._family_friendly_search(webpage)
return {
'_type': 'url_transparent',
'id': video_id,
'display_id': display_id,
'url': f'9c9media:rds_web:{video_id}',
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'age_limit': age_limit,
'ie_key': 'NineCNineMedia',
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/senategov.py | yt_dlp/extractor/senategov.py | import re
import urllib.parse
from .common import InfoExtractor
from ..utils import (
ExtractorError,
UnsupportedError,
make_archive_id,
remove_end,
url_or_none,
)
from ..utils.traversal import traverse_obj
class SenateISVPIE(InfoExtractor):
IE_NAME = 'senate.gov:isvp'
_VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)'
_EMBED_REGEX = [r"<iframe[^>]+src=['\"](?P<url>https?://www\.senate\.gov/isvp/?\?[^'\"]+)['\"]"]
_TESTS = [{
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',
'info_dict': {
'id': 'judiciary031715',
'ext': 'mp4',
'title': 'ISVP',
'thumbnail': r're:https?://.+\.(?:jpe?g|png)',
'_old_archive_ids': ['senategov judiciary031715'],
},
'params': {'skip_download': 'm3u8'},
'expected_warnings': ['Failed to download m3u8 information'],
}, {
'url': 'http://www.senate.gov/isvp/?type=live&comm=commerce&filename=commerce011514.mp4&auto_play=false',
'info_dict': {
'id': 'commerce011514',
'ext': 'mp4',
'title': 'Integrated Senate Video Player',
'_old_archive_ids': ['senategov commerce011514'],
},
'skip': 'This video is not available.',
}, {
'url': 'http://www.senate.gov/isvp/?type=arch&comm=intel&filename=intel090613&hc_location=ufi',
# checksum differs each time
'info_dict': {
'id': 'intel090613',
'ext': 'mp4',
'title': 'ISVP',
'_old_archive_ids': ['senategov intel090613'],
},
'expected_warnings': ['Failed to download m3u8 information'],
}, {
'url': 'https://www.senate.gov/isvp/?auto_play=false&comm=help&filename=help090920&poster=https://www.help.senate.gov/assets/images/video-poster.png&stt=950',
'info_dict': {
'id': 'help090920',
'ext': 'mp4',
'title': 'ISVP',
'thumbnail': r're:https?://.+\.(?:jpe?g|png)',
'_old_archive_ids': ['senategov help090920'],
},
}, {
# From http://www.c-span.org/video/?96791-1
'url': 'http://www.senate.gov/isvp?type=live&comm=banking&filename=banking012715',
'only_matching': True,
}]
_WEBPAGE_TESTS = [{
# FIXME: Embed detection
'url': 'https://www.hsgac.senate.gov/subcommittees/bmfwra/hearings/match-ready-oversight-of-the-federal-governments-border-management-and-personnel-readiness-efforts-for-the-decade-of-sports/',
'info_dict': {
'id': 'govtaff061025',
'ext': 'mp4',
'title': 'ISVP',
'thumbnail': r're:https?://.+\.(?:jpe?g|png)',
'_old_archive_ids': ['senategov govtaff061025'],
},
}]
_COMMITTEES = {
'ag': ('76440', 'https://ag-f.akamaihd.net', '2036803', 'agriculture'),
'aging': ('76442', 'https://aging-f.akamaihd.net', '2036801', 'aging'),
'approps': ('76441', 'https://approps-f.akamaihd.net', '2036802', 'appropriations'),
'arch': ('', 'https://ussenate-f.akamaihd.net', '', 'arch'),
'armed': ('76445', 'https://armed-f.akamaihd.net', '2036800', 'armedservices'),
'banking': ('76446', 'https://banking-f.akamaihd.net', '2036799', 'banking'),
'budget': ('76447', 'https://budget-f.akamaihd.net', '2036798', 'budget'),
'cecc': ('76486', 'https://srs-f.akamaihd.net', '2036782', 'srs_cecc'),
'commerce': ('80177', 'https://commerce1-f.akamaihd.net', '2036779', 'commerce'),
'csce': ('75229', 'https://srs-f.akamaihd.net', '2036777', 'srs_srs'),
'dpc': ('76590', 'https://dpc-f.akamaihd.net', '', 'dpc'),
'energy': ('76448', 'https://energy-f.akamaihd.net', '2036797', 'energy'),
'epw': ('76478', 'https://epw-f.akamaihd.net', '2036783', 'environment'),
'ethics': ('76449', 'https://ethics-f.akamaihd.net', '2036796', 'ethics'),
'finance': ('76450', 'https://finance-f.akamaihd.net', '2036795', 'finance_finance'),
'foreign': ('76451', 'https://foreign-f.akamaihd.net', '2036794', 'foreignrelations'),
'govtaff': ('76453', 'https://govtaff-f.akamaihd.net', '2036792', 'hsgac'),
'help': ('76452', 'https://help-f.akamaihd.net', '2036793', 'help'),
'indian': ('76455', 'https://indian-f.akamaihd.net', '2036791', 'indianaffairs'),
'intel': ('76456', 'https://intel-f.akamaihd.net', '2036790', 'intelligence'),
'intlnarc': ('76457', 'https://intlnarc-f.akamaihd.net', '', 'internationalnarcoticscaucus'),
'jccic': ('85180', 'https://jccic-f.akamaihd.net', '2036778', 'jccic'),
'jec': ('76458', 'https://jec-f.akamaihd.net', '2036789', 'jointeconomic'),
'judiciary': ('76459', 'https://judiciary-f.akamaihd.net', '2036788', 'judiciary'),
'rpc': ('76591', 'https://rpc-f.akamaihd.net', '', 'rpc'),
'rules': ('76460', 'https://rules-f.akamaihd.net', '2036787', 'rules'),
'saa': ('76489', 'https://srs-f.akamaihd.net', '2036780', 'srs_saa'),
'smbiz': ('76461', 'https://smbiz-f.akamaihd.net', '2036786', 'smallbusiness'),
'srs': ('75229', 'https://srs-f.akamaihd.net', '2031966', 'srs_srs'),
'uscc': ('76487', 'https://srs-f.akamaihd.net', '2036781', 'srs_uscc'),
'vetaff': ('76462', 'https://vetaff-f.akamaihd.net', '2036785', 'veteransaffairs'),
}
def _real_extract(self, url):
qs = urllib.parse.parse_qs(self._match_valid_url(url).group('qs'))
if not qs.get('filename') or not qs.get('comm'):
raise ExtractorError('Invalid URL', expected=True)
filename = qs['filename'][0]
video_id = remove_end(filename, '.mp4')
webpage = self._download_webpage(url, video_id)
committee = qs['comm'][0]
stream_num, stream_domain, stream_id, msl3 = self._COMMITTEES[committee]
urls_alternatives = [f'https://www-senate-gov-media-srs.akamaized.net/hls/live/{stream_id}/{committee}/{filename}/master.m3u8',
f'https://www-senate-gov-msl3archive.akamaized.net/{msl3}/{filename}_1/master.m3u8',
f'{stream_domain}/i/{filename}_1@{stream_num}/master.m3u8',
f'{stream_domain}/i/{filename}.mp4/master.m3u8']
formats = []
subtitles = {}
for video_url in urls_alternatives:
formats, subtitles = self._extract_m3u8_formats_and_subtitles(video_url, video_id, ext='mp4', fatal=False)
if formats:
break
return {
'id': video_id,
'title': self._html_extract_title(webpage),
'formats': formats,
'subtitles': subtitles,
'thumbnail': traverse_obj(qs, ('poster', 0, {url_or_none})),
'_old_archive_ids': [make_archive_id(SenateGovIE, video_id)],
}
class SenateGovIE(InfoExtractor):
IE_NAME = 'senate.gov'
_SUBDOMAIN_RE = '|'.join(map(re.escape, (
'agriculture', 'aging', 'appropriations', 'armed-services', 'banking',
'budget', 'commerce', 'energy', 'epw', 'finance', 'foreign', 'help',
'intelligence', 'inaugural', 'judiciary', 'rules', 'sbc', 'veterans',
)))
_VALID_URL = rf'https?://(?:www\.)?(?:{_SUBDOMAIN_RE})\.senate\.gov'
_TESTS = [{
'url': 'https://www.help.senate.gov/hearings/vaccines-saving-lives-ensuring-confidence-and-protecting-public-health',
'info_dict': {
'id': 'help090920',
'display_id': 'vaccines-saving-lives-ensuring-confidence-and-protecting-public-health',
'title': 'Vaccines: Saving Lives, Ensuring Confidence, and Protecting Public Health',
'description': 'Full Committee Hearing on September 9, 2020 at 6:00 AM',
'ext': 'mp4',
'age_limit': 0,
'thumbnail': r're:https?://.+\.(?:jpe?g|png)',
'_old_archive_ids': ['senategov help090920'],
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://www.appropriations.senate.gov/hearings/watch?hearingid=B8A25434-5056-A066-6020-1F68CB75F0CD',
'info_dict': {
'id': 'appropsA051518',
'display_id': 'watch?hearingid=B8A25434-5056-A066-6020-1F68CB75F0CD',
'title': 'Review of the FY2019 Budget Request for the U.S. Army',
'ext': 'mp4',
'age_limit': 0,
'thumbnail': r're:https?://.+\.(?:jpe?g|png)',
'_old_archive_ids': ['senategov appropsA051518'],
},
'params': {'skip_download': 'm3u8'},
'expected_warnings': ['Failed to download m3u8 information'],
}, {
'url': 'https://www.banking.senate.gov/hearings/21st-century-communities-public-transportation-infrastructure-investment-and-fast-act-reauthorization',
'info_dict': {
'id': 'banking041521',
'display_id': '21st-century-communities-public-transportation-infrastructure-investment-and-fast-act-reauthorization',
'title': '21st Century Communities: Public Transportation Infrastructure Investment and FAST Act Reauthorization',
'description': 'The Official website of The United States Committee on Banking, Housing, and Urban Affairs',
'ext': 'mp4',
'thumbnail': r're:https?://.+\.(?:jpe?g|png)',
'age_limit': 0,
'_old_archive_ids': ['senategov banking041521'],
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://www.agriculture.senate.gov/hearings/hemp-production-and-the-2018-farm-bill',
'only_matching': True,
}, {
'url': 'https://www.aging.senate.gov/hearings/the-older-americans-act-the-local-impact-of-the-law-and-the-upcoming-reauthorization',
'only_matching': True,
}, {
'url': 'https://www.budget.senate.gov/hearings/improving-care-lowering-costs-achieving-health-care-efficiency',
'only_matching': True,
}, {
'url': 'https://www.commerce.senate.gov/2024/12/communications-networks-safety-and-security',
'only_matching': True,
}, {
'url': 'https://www.energy.senate.gov/hearings/2024/2/full-committee-hearing-to-examine',
'only_matching': True,
}, {
'url': 'https://www.epw.senate.gov/public/index.cfm/hearings?ID=F63083EA-2C13-498C-B548-341BED68C209',
'only_matching': True,
}, {
'url': 'https://www.foreign.senate.gov/hearings/american-diplomacy-and-global-leadership-review-of-the-fy25-state-department-budget-request',
'only_matching': True,
}, {
'url': 'https://www.intelligence.senate.gov/hearings/foreign-threats-elections-2024-%E2%80%93-roles-and-responsibilities-us-tech-providers',
'only_matching': True,
}, {
'url': 'https://www.inaugural.senate.gov/52nd-inaugural-ceremonies/',
'only_matching': True,
}, {
'url': 'https://www.rules.senate.gov/hearings/02/07/2023/business-meeting',
'only_matching': True,
}, {
'url': 'https://www.sbc.senate.gov/public/index.cfm/hearings?ID=5B13AA6B-8279-45AF-B54B-94156DC7A2AB',
'only_matching': True,
}, {
'url': 'https://www.veterans.senate.gov/2024/5/frontier-health-care-ensuring-veterans-access-no-matter-where-they-live',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._generic_id(url)
webpage = self._download_webpage(url, display_id)
url_info = next(SenateISVPIE.extract_from_webpage(self._downloader, url, webpage), None)
if not url_info:
raise UnsupportedError(url)
title = self._html_search_regex(
(*self._og_regexes('title'), r'(?s)<title>([^<]*?)</title>'), webpage, 'video title', fatal=False)
return {
**url_info,
'_type': 'url_transparent',
'display_id': display_id,
'title': re.sub(r'\s+', ' ', title.split('|')[0]).strip(),
'description': self._og_search_description(webpage, default=None),
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'age_limit': self._rta_search(webpage),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/alsace20tv.py | yt_dlp/extractor/alsace20tv.py | from .common import InfoExtractor
from ..utils import (
clean_html,
dict_get,
get_element_by_class,
int_or_none,
unified_strdate,
url_or_none,
)
class Alsace20TVBaseIE(InfoExtractor):
def _extract_video(self, video_id, url=None):
info = self._download_json(
f'https://www.alsace20.tv/visionneuse/visio_v9_js.php?key={video_id}&habillage=0&mode=html',
video_id) or {}
title = info.get('titre')
formats = []
for res, fmt_url in (info.get('files') or {}).items():
formats.extend(
self._extract_smil_formats(fmt_url, video_id, fatal=False)
if '/smil:_' in fmt_url
else self._extract_mpd_formats(fmt_url, video_id, mpd_id=res, fatal=False))
webpage = (url and self._download_webpage(url, video_id, fatal=False)) or ''
thumbnail = url_or_none(dict_get(info, ('image', 'preview')) or self._og_search_thumbnail(webpage))
upload_date = self._search_regex(r'/(\d{6})_', thumbnail, 'upload_date', default=None)
upload_date = unified_strdate(f'20{upload_date[:2]}-{upload_date[2:4]}-{upload_date[4:]}') if upload_date else None
return {
'id': video_id,
'title': title,
'formats': formats,
'description': clean_html(get_element_by_class('wysiwyg', webpage)),
'upload_date': upload_date,
'thumbnail': thumbnail,
'duration': int_or_none(self._og_search_property('video:duration', webpage) if webpage else None),
'view_count': int_or_none(info.get('nb_vues')),
}
class Alsace20TVIE(Alsace20TVBaseIE):
_VALID_URL = r'https?://(?:www\.)?alsace20\.tv/(?:[\w-]+/)+[\w-]+-(?P<id>[\w]+)'
_TESTS = [{
'url': 'https://www.alsace20.tv/VOD/Actu/JT/Votre-JT-jeudi-3-fevrier-lyNHCXpYJh.html',
'info_dict': {
'id': 'lyNHCXpYJh',
'ext': 'mp4',
'description': 'md5:fc0bc4a0692d3d2dba4524053de4c7b7',
'title': 'Votre JT du jeudi 3 février',
'upload_date': '20220203',
'thumbnail': r're:https?://.+\.jpg',
'duration': 1073,
'view_count': int,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._extract_video(video_id, url)
class Alsace20TVEmbedIE(Alsace20TVBaseIE):
_VALID_URL = r'https?://(?:www\.)?alsace20\.tv/emb/(?P<id>[\w]+)'
_TESTS = [{
'url': 'https://www.alsace20.tv/emb/lyNHCXpYJh',
# 'md5': 'd91851bf9af73c0ad9b2cdf76c127fbb',
'info_dict': {
'id': 'lyNHCXpYJh',
'ext': 'mp4',
'title': 'Votre JT du jeudi 3 février',
'upload_date': '20220203',
'thumbnail': r're:https?://.+\.jpg',
'view_count': int,
},
'params': {
'format': 'bestvideo',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._extract_video(video_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/kankanews.py | yt_dlp/extractor/kankanews.py | import hashlib
import random
import string
import time
import urllib.parse
from .common import InfoExtractor
class KankaNewsIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:www\.)?kankanews\.com/a/\d+\-\d+\-\d+/(?P<id>\d+)\.shtml'
_TESTS = [{
'url': 'https://www.kankanews.com/a/2022-11-08/00310276054.shtml?appid=1088227',
'md5': '05e126513c74b1258d657452a6f4eef9',
'info_dict': {
'id': '4485057',
'url': 'http://mediaplay.kksmg.com/2022/11/08/h264_450k_mp4_1a388ad771e0e4cc28b0da44d245054e_ncm.mp4',
'ext': 'mp4',
'title': '视频|第23个中国记者节,我们在进博切蛋糕',
'thumbnail': r're:^https?://.*\.jpg*',
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(r'omsid\s*=\s*"(\d+)"', webpage, 'video id')
params = {
'nonce': ''.join(random.choices(string.ascii_lowercase + string.digits, k=8)),
'omsid': video_id,
'platform': 'pc',
'timestamp': int(time.time()),
'version': '1.0',
}
params['sign'] = hashlib.md5((hashlib.md5((
urllib.parse.urlencode(params) + '&28c8edde3d61a0411511d3b1866f0636'
).encode()).hexdigest()).encode()).hexdigest()
meta = self._download_json('https://api-app.kankanews.com/kankan/pc/getvideo',
video_id, query=params)['result']['video']
return {
'id': video_id,
'url': meta['videourl'],
'title': self._search_regex(r'g\.title\s*=\s*"([^"]+)"', webpage, 'title'),
'thumbnail': meta.get('titlepic'),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/wimtv.py | yt_dlp/extractor/wimtv.py | from .common import InfoExtractor
from ..utils import (
ExtractorError,
determine_ext,
parse_duration,
urlencode_postdata,
)
class WimTVIE(InfoExtractor):
_player = None
_UUID_RE = r'[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}'
_VALID_URL = rf'''(?x:
https?://platform\.wim\.tv/
(?:
(?:embed/)?\?
|\#/webtv/.+?/
)
(?P<type>vod|live|cast)[=/]
(?P<id>{_UUID_RE}).*?)'''
_EMBED_REGEX = [rf'<iframe[^>]+src=["\'](?P<url>{_VALID_URL})']
_TESTS = [{
# vod stream
'url': 'https://platform.wim.tv/embed/?vod=db29fb32-bade-47b6-a3a6-cb69fe80267a',
'md5': 'db29fb32-bade-47b6-a3a6-cb69fe80267a',
'info_dict': {
'id': 'db29fb32-bade-47b6-a3a6-cb69fe80267a',
'ext': 'mp4',
'title': 'AMA SUPERCROSS 2020 - R2 ST. LOUIS',
'duration': 6481,
'thumbnail': r're:https?://.+?/thumbnail/.+?/720$',
},
'skip': 'Invalid URL',
}, {
# live stream
'url': 'https://platform.wim.tv/embed/?live=28e22c22-49db-40f3-8c37-8cbb0ff44556&autostart=true',
'info_dict': {
'id': '28e22c22-49db-40f3-8c37-8cbb0ff44556',
'ext': 'mp4',
'title': 'Streaming MSmotorTV',
'is_live': True,
},
'skip': 'Invalid URL',
}, {
'url': 'https://platform.wim.tv/#/webtv/automotornews/vod/422492b6-539e-474d-9c6b-68c9d5893365',
'only_matching': True,
}, {
'url': 'https://platform.wim.tv/#/webtv/renzoarborechannel/cast/f47e0d15-5b45-455e-bf0d-dba8ffa96365',
'only_matching': True,
}]
_WEBPAGE_TESTS = [{
'url': 'http://www.renzoarborechannel.tv/50_sorrisi_da_napoli.htm',
'info_dict': {
'id': '50_sorrisi_da_napoli',
'title': 'Renzo Arbore Channel . TV - 50 Sorrisi da Napoli',
'age_limit': 0,
'timestamp': 1612226372,
'upload_date': '20210202',
},
'playlist_count': 40,
}]
def _real_initialize(self):
if not self._player:
self._get_player_data()
def _get_player_data(self):
msg_id = 'Player data'
self._player = {}
datas = [{
'url': 'https://platform.wim.tv/common/libs/player/wimtv/wim-rest.js',
'vars': [{
'regex': r'appAuth = "(.+?)"',
'variable': 'app_auth',
}],
}, {
'url': 'https://platform.wim.tv/common/config/endpointconfig.js',
'vars': [{
'regex': r'PRODUCTION_HOSTNAME_THUMB = "(.+?)"',
'variable': 'thumb_server',
}, {
'regex': r'PRODUCTION_HOSTNAME_THUMB\s*\+\s*"(.+?)"',
'variable': 'thumb_server_path',
}],
}]
for data in datas:
temp = self._download_webpage(data['url'], msg_id)
for var in data['vars']:
val = self._search_regex(var['regex'], temp, msg_id)
if not val:
raise ExtractorError('{} not found'.format(var['variable']))
self._player[var['variable']] = val
def _generate_token(self):
json = self._download_json(
'https://platform.wim.tv/wimtv-server/oauth/token', 'Token generation',
headers={'Authorization': 'Basic {}'.format(self._player['app_auth'])},
data=urlencode_postdata({'grant_type': 'client_credentials'}))
token = json.get('access_token')
if not token:
raise ExtractorError('access token not generated')
return token
def _generate_thumbnail(self, thumb_id, width='720'):
if not thumb_id or not self._player.get('thumb_server'):
return None
if not self._player.get('thumb_server_path'):
self._player['thumb_server_path'] = ''
return '{}{}/asset/thumbnail/{}/{}'.format(
self._player['thumb_server'],
self._player['thumb_server_path'],
thumb_id, width)
def _real_extract(self, url):
urlc = self._match_valid_url(url).groupdict()
video_id = urlc['id']
stream_type = is_live = None
if urlc['type'] in {'live', 'cast'}:
stream_type = urlc['type'] + '/channel'
is_live = True
else:
stream_type = 'vod'
is_live = False
token = self._generate_token()
json = self._download_json(
f'https://platform.wim.tv/wimtv-server/api/public/{stream_type}/{video_id}/play',
video_id, headers={
'Authorization': f'Bearer {token}',
'Content-Type': 'application/json',
}, data=b'{}')
formats = []
for src in json.get('srcs') or []:
if src.get('mimeType') == 'application/x-mpegurl':
formats.extend(
self._extract_m3u8_formats(
src.get('uniqueStreamer'), video_id, 'mp4'))
if src.get('mimeType') == 'video/flash':
formats.append({
'format_id': 'rtmp',
'url': src.get('uniqueStreamer'),
'ext': determine_ext(src.get('uniqueStreamer'), 'flv'),
'rtmp_live': is_live,
})
json = json.get('resource')
thumb = self._generate_thumbnail(json.get('thumbnailId'))
return {
'id': video_id,
'title': json.get('title') or json.get('name'),
'duration': parse_duration(json.get('duration')),
'formats': formats,
'thumbnail': thumb,
'is_live': is_live,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/piksel.py | yt_dlp/extractor/piksel.py | import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
dict_get,
int_or_none,
join_nonempty,
parse_iso8601,
traverse_obj,
try_get,
unescapeHTML,
urljoin,
)
class PikselIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://
(?:
(?:
player\.
(?:
olympusattelecom|
vibebyvista
)|
(?:api|player)\.multicastmedia|
(?:api-ovp|player)\.piksel
)\.(?:com|tech)|
(?:
mz-edge\.stream\.co|
movie-s\.nhk\.or
)\.jp|
vidego\.baltimorecity\.gov
)/v/(?:refid/(?P<refid>[^/]+)/prefid/)?(?P<id>[\w-]+)'''
_EMBED_REGEX = [r'<iframe[^>]+src=["\'](?P<url>(?:https?:)?//player\.piksel\.(?:com|tech)/v/[a-z0-9]+)']
_TESTS = [
{
'url': 'http://player.piksel.tech/v/ums2867l',
'md5': '34e34c8d89dc2559976a6079db531e85',
'info_dict': {
'id': 'ums2867l',
'ext': 'mp4',
'title': 'GX-005 with Caption',
'timestamp': 1481335659,
'upload_date': '20161210',
'description': '',
'thumbnail': 'https://thumbs.piksel.tech/thumbs/aid/t1488331553/3238987.jpg?w=640&h=480',
},
},
{
# Original source: http://www.uscourts.gov/cameras-courts/state-washington-vs-donald-j-trump-et-al
'url': 'https://player.piksel.tech/v/v80kqp41',
'md5': '753ddcd8cc8e4fa2dda4b7be0e77744d',
'info_dict': {
'id': 'v80kqp41',
'ext': 'mp4',
'title': 'WAW- State of Washington vs. Donald J. Trump, et al',
'description': 'State of Washington vs. Donald J. Trump, et al, Case Number 17-CV-00141-JLR, TRO Hearing, Civil Rights Case, 02/3/2017, 1:00 PM (PST), Seattle Federal Courthouse, Seattle, WA, Judge James L. Robart presiding.',
'timestamp': 1486171129,
'upload_date': '20170204',
'thumbnail': 'https://thumbs.piksel.tech/thumbs/aid/t1495569155/3279887.jpg?w=640&h=360',
},
},
{
# https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2019240/
'url': 'http://player.piksel.com/v/refid/nhkworld/prefid/nw_vod_v_en_2019_240_20190823233000_02_1566873477',
'only_matching': True,
},
]
def _call_api(self, app_token, resource, display_id, query, host='https://player.piksel.tech', fatal=True):
url = urljoin(host, f'/ws/ws_{resource}/api/{app_token}/mode/json/apiv/5')
response = traverse_obj(
self._download_json(url, display_id, query=query, fatal=fatal), ('response', {dict})) or {}
failure = traverse_obj(response, ('failure', 'reason')) if response else 'Empty response from API'
if failure:
if fatal:
raise ExtractorError(failure, expected=True)
self.report_warning(failure)
return response
def _real_extract(self, url):
ref_id, display_id = self._match_valid_url(url).groups()
webpage = self._download_webpage(url, display_id)
app_token = self._search_regex([
r'clientAPI\s*:\s*"([^"]+)"',
r'data-de-api-key\s*=\s*"([^"]+)"',
], webpage, 'app token')
query = {'refid': ref_id, 'prefid': display_id} if ref_id else {'v': display_id}
program = self._call_api(
app_token, 'program', display_id, query, url)['WsProgramResponse']['program']
video_id = program['uuid']
video_data = program['asset']
title = video_data['title']
asset_type = dict_get(video_data, ['assetType', 'asset_type'])
formats = []
def process_asset_file(asset_file):
if not asset_file:
return
# TODO: extract rtmp formats
http_url = asset_file.get('http_url')
if not http_url:
return
tbr = None
vbr = int_or_none(asset_file.get('videoBitrate'), 1024)
abr = int_or_none(asset_file.get('audioBitrate'), 1024)
if asset_type == 'video':
tbr = vbr + abr
elif asset_type == 'audio':
tbr = abr
formats.append({
'format_id': join_nonempty('http', tbr),
'url': unescapeHTML(http_url),
'vbr': vbr,
'abr': abr,
'width': int_or_none(asset_file.get('videoWidth')),
'height': int_or_none(asset_file.get('videoHeight')),
'filesize': int_or_none(asset_file.get('filesize')),
'tbr': tbr,
})
def process_asset_files(asset_files):
for asset_file in (asset_files or []):
process_asset_file(asset_file)
process_asset_files(video_data.get('assetFiles'))
process_asset_file(video_data.get('referenceFile'))
if not formats:
asset_id = video_data.get('assetid') or program.get('assetid')
if asset_id:
process_asset_files(try_get(self._call_api(
app_token, 'asset_file', display_id, {
'assetid': asset_id,
}, url, False), lambda x: x['WsAssetFileResponse']['AssetFiles']))
m3u8_url = dict_get(video_data, [
'm3u8iPadURL',
'ipadM3u8Url',
'm3u8AndroidURL',
'm3u8iPhoneURL',
'iphoneM3u8Url'])
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
smil_url = dict_get(video_data, ['httpSmil', 'hdSmil', 'rtmpSmil'])
if smil_url:
transform_source = lambda x: x.replace('src="/', 'src="')
if ref_id == 'nhkworld':
# TODO: figure out if this is something to be fixed in urljoin,
# _parse_smil_formats or keep it here
transform_source = lambda x: x.replace('src="/', 'src="').replace('/media"', '/media/"')
formats.extend(self._extract_smil_formats(
re.sub(r'/od/[^/]+/', '/od/http/', smil_url), video_id,
transform_source=transform_source, fatal=False))
subtitles = {}
for caption in video_data.get('captions', []):
caption_url = caption.get('url')
if caption_url:
subtitles.setdefault(caption.get('locale', 'en'), []).append({
'url': caption_url})
return {
'id': video_id,
'title': title,
'description': video_data.get('description'),
'thumbnail': video_data.get('thumbnailUrl'),
'timestamp': parse_iso8601(video_data.get('dateadd')),
'formats': formats,
'subtitles': subtitles,
'_format_sort_fields': ('tbr', ), # Incomplete resolution information
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ninaprotocol.py | yt_dlp/extractor/ninaprotocol.py | from .common import InfoExtractor
from ..utils import int_or_none, mimetype2ext, parse_iso8601, url_or_none
from ..utils.traversal import traverse_obj
class NinaProtocolIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ninaprotocol\.com/releases/(?P<id>[^/#?]+)'
_TESTS = [{
'url': 'https://www.ninaprotocol.com/releases/3SvsMM3y4oTPZ5DXFJnLkCAqkxz34hjzFxqms1vu9XBJ',
'info_dict': {
'id': '3SvsMM3y4oTPZ5DXFJnLkCAqkxz34hjzFxqms1vu9XBJ',
'title': 'The Spatulas - March Chant',
'tags': ['punk', 'postpresentmedium', 'cambridge'],
'uploader_id': '2bGjgdKUddJoj2shYGqfNcUfoSoABP21RJoiwGMZDq3A',
'channel': 'ppm',
'description': 'md5:bb9f9d39d8f786449cd5d0ff7c5772db',
'album': 'The Spatulas - March Chant',
'thumbnail': 'https://www.arweave.net/VyZA6CBeUuqP174khvSrD44Eosi3MLVyWN42uaQKg50',
'timestamp': 1701417610,
'uploader': 'ppmrecs',
'channel_id': '4ceG4zsb7VVxBTGPtZMqDZWGHo3VUg2xRvzC2b17ymWP',
'display_id': 'the-spatulas-march-chant',
'upload_date': '20231201',
'album_artist': 'Post Present Medium ',
},
'playlist': [{
'info_dict': {
'id': '3SvsMM3y4oTPZ5DXFJnLkCAqkxz34hjzFxqms1vu9XBJ_1',
'title': 'March Chant In April',
'track': 'March Chant In April',
'ext': 'mp3',
'duration': 152,
'track_number': 1,
'uploader_id': '2bGjgdKUddJoj2shYGqfNcUfoSoABP21RJoiwGMZDq3A',
'uploader': 'ppmrecs',
'thumbnail': 'https://www.arweave.net/VyZA6CBeUuqP174khvSrD44Eosi3MLVyWN42uaQKg50',
'timestamp': 1701417610,
'channel': 'ppm',
'album': 'The Spatulas - March Chant',
'tags': ['punk', 'postpresentmedium', 'cambridge'],
'channel_id': '4ceG4zsb7VVxBTGPtZMqDZWGHo3VUg2xRvzC2b17ymWP',
'upload_date': '20231201',
'album_artist': 'Post Present Medium ',
},
}, {
'info_dict': {
'id': '3SvsMM3y4oTPZ5DXFJnLkCAqkxz34hjzFxqms1vu9XBJ_2',
'title': 'Rescue Mission',
'track': 'Rescue Mission',
'ext': 'mp3',
'duration': 212,
'track_number': 2,
'album_artist': 'Post Present Medium ',
'uploader': 'ppmrecs',
'tags': ['punk', 'postpresentmedium', 'cambridge'],
'thumbnail': 'https://www.arweave.net/VyZA6CBeUuqP174khvSrD44Eosi3MLVyWN42uaQKg50',
'channel': 'ppm',
'upload_date': '20231201',
'channel_id': '4ceG4zsb7VVxBTGPtZMqDZWGHo3VUg2xRvzC2b17ymWP',
'timestamp': 1701417610,
'album': 'The Spatulas - March Chant',
'uploader_id': '2bGjgdKUddJoj2shYGqfNcUfoSoABP21RJoiwGMZDq3A',
},
}, {
'info_dict': {
'id': '3SvsMM3y4oTPZ5DXFJnLkCAqkxz34hjzFxqms1vu9XBJ_3',
'title': 'Slinger Style',
'track': 'Slinger Style',
'ext': 'mp3',
'duration': 179,
'track_number': 3,
'timestamp': 1701417610,
'upload_date': '20231201',
'channel_id': '4ceG4zsb7VVxBTGPtZMqDZWGHo3VUg2xRvzC2b17ymWP',
'uploader_id': '2bGjgdKUddJoj2shYGqfNcUfoSoABP21RJoiwGMZDq3A',
'thumbnail': 'https://www.arweave.net/VyZA6CBeUuqP174khvSrD44Eosi3MLVyWN42uaQKg50',
'album_artist': 'Post Present Medium ',
'album': 'The Spatulas - March Chant',
'tags': ['punk', 'postpresentmedium', 'cambridge'],
'uploader': 'ppmrecs',
'channel': 'ppm',
},
}, {
'info_dict': {
'id': '3SvsMM3y4oTPZ5DXFJnLkCAqkxz34hjzFxqms1vu9XBJ_4',
'title': 'Psychic Signal',
'track': 'Psychic Signal',
'ext': 'mp3',
'duration': 220,
'track_number': 4,
'tags': ['punk', 'postpresentmedium', 'cambridge'],
'upload_date': '20231201',
'album': 'The Spatulas - March Chant',
'thumbnail': 'https://www.arweave.net/VyZA6CBeUuqP174khvSrD44Eosi3MLVyWN42uaQKg50',
'timestamp': 1701417610,
'album_artist': 'Post Present Medium ',
'channel_id': '4ceG4zsb7VVxBTGPtZMqDZWGHo3VUg2xRvzC2b17ymWP',
'channel': 'ppm',
'uploader_id': '2bGjgdKUddJoj2shYGqfNcUfoSoABP21RJoiwGMZDq3A',
'uploader': 'ppmrecs',
},
}, {
'info_dict': {
'id': '3SvsMM3y4oTPZ5DXFJnLkCAqkxz34hjzFxqms1vu9XBJ_5',
'title': 'Curvy Color',
'track': 'Curvy Color',
'ext': 'mp3',
'duration': 148,
'track_number': 5,
'timestamp': 1701417610,
'uploader_id': '2bGjgdKUddJoj2shYGqfNcUfoSoABP21RJoiwGMZDq3A',
'thumbnail': 'https://www.arweave.net/VyZA6CBeUuqP174khvSrD44Eosi3MLVyWN42uaQKg50',
'album': 'The Spatulas - March Chant',
'album_artist': 'Post Present Medium ',
'channel': 'ppm',
'tags': ['punk', 'postpresentmedium', 'cambridge'],
'uploader': 'ppmrecs',
'channel_id': '4ceG4zsb7VVxBTGPtZMqDZWGHo3VUg2xRvzC2b17ymWP',
'upload_date': '20231201',
},
}, {
'info_dict': {
'id': '3SvsMM3y4oTPZ5DXFJnLkCAqkxz34hjzFxqms1vu9XBJ_6',
'title': 'Caveman Star',
'track': 'Caveman Star',
'ext': 'mp3',
'duration': 121,
'track_number': 6,
'channel_id': '4ceG4zsb7VVxBTGPtZMqDZWGHo3VUg2xRvzC2b17ymWP',
'thumbnail': 'https://www.arweave.net/VyZA6CBeUuqP174khvSrD44Eosi3MLVyWN42uaQKg50',
'tags': ['punk', 'postpresentmedium', 'cambridge'],
'album_artist': 'Post Present Medium ',
'uploader': 'ppmrecs',
'timestamp': 1701417610,
'uploader_id': '2bGjgdKUddJoj2shYGqfNcUfoSoABP21RJoiwGMZDq3A',
'album': 'The Spatulas - March Chant',
'channel': 'ppm',
'upload_date': '20231201',
},
}],
}, {
'url': 'https://www.ninaprotocol.com/releases/f-g-s-american-shield',
'info_dict': {
'id': '76PZnJwaMgViQHYfA4NYJXds7CmW6vHQKAtQUxGene6J',
'description': 'md5:63f08d5db558b4b36e1896f317062721',
'title': 'F.G.S. - American Shield',
'uploader_id': 'Ej3rozs11wYqFk1Gs6oggGCkGLz8GzBhmJfnUxf6gPci',
'channel_id': '6JuksCZPXuP16wJ1BUfwuukJzh42C7guhLrFPPkVJfyE',
'channel': 'tinkscough',
'tags': [],
'album_artist': 'F.G.S.',
'album': 'F.G.S. - American Shield',
'thumbnail': 'https://www.arweave.net/YJpgImkXLT9SbpFb576KuZ5pm6bdvs452LMs3Rx6lm8',
'display_id': 'f-g-s-american-shield',
'uploader': 'flannerysilva',
'timestamp': 1702395858,
'upload_date': '20231212',
},
'playlist_count': 1,
}, {
'url': 'https://www.ninaprotocol.com/releases/time-to-figure-things-out',
'info_dict': {
'id': '6Zi1nC5hj6b13NkpxVYwRhFy6mYA7oLBbe9DMrgGDcYh',
'display_id': 'time-to-figure-things-out',
'description': 'md5:960202ed01c3134bb8958f1008527e35',
'timestamp': 1706283607,
'title': 'DJ STEPDAD - time to figure things out',
'album_artist': 'DJ STEPDAD',
'uploader': 'tddvsss',
'upload_date': '20240126',
'album': 'time to figure things out',
'uploader_id': 'AXQNRgTyYsySyAMFDwxzumuGjfmoXshorCesjpquwCBi',
'thumbnail': 'https://www.arweave.net/O4i8bcKVqJVZvNeHHFp6r8knpFGh9ZwEgbeYacr4nss',
'tags': [],
},
'playlist_count': 4,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
release = self._download_json(
f'https://api.ninaprotocol.com/v1/releases/{video_id}', video_id)['release']
video_id = release.get('publicKey') or video_id
common_info = traverse_obj(release, {
'album': ('metadata', 'properties', 'title', {str}),
'album_artist': ((('hub', 'data'), 'publisherAccount'), 'displayName', {str}),
'timestamp': ('datetime', {parse_iso8601}),
'thumbnail': ('metadata', 'image', {url_or_none}),
'uploader': ('publisherAccount', 'handle', {str}),
'uploader_id': ('publisherAccount', 'publicKey', {str}),
'channel': ('hub', 'handle', {str}),
'channel_id': ('hub', 'publicKey', {str}),
}, get_all=False)
common_info['tags'] = traverse_obj(release, ('metadata', 'properties', 'tags', ..., {str}))
entries = []
for track_num, track in enumerate(traverse_obj(release, (
'metadata', 'properties', 'files', lambda _, v: url_or_none(v['uri']))), 1):
entries.append({
'id': f'{video_id}_{track_num}',
'url': track['uri'],
**traverse_obj(track, {
'title': ('track_title', {str}),
'track': ('track_title', {str}),
'ext': ('type', {mimetype2ext}),
'track_number': ('track', {int_or_none}),
'duration': ('duration', {int_or_none}),
}),
'vcodec': 'none',
**common_info,
})
return {
'_type': 'playlist',
'id': video_id,
'entries': entries,
**traverse_obj(release, {
'display_id': ('slug', {str}),
'title': ('metadata', 'name', {str}),
'description': ('metadata', 'description', {str}),
}),
**common_info,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/scrolller.py | yt_dlp/extractor/scrolller.py | import json
from .common import InfoExtractor
from ..utils import determine_ext, int_or_none
class ScrolllerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?scrolller\.com/(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://scrolller.com/a-helping-hand-1k9pxikxkw',
'info_dict': {
'id': 'a-helping-hand-1k9pxikxkw',
'ext': 'mp4',
'thumbnail': 'https://zepto.scrolller.com/a-helping-hand-3ty9q8x094-540x960.jpg',
'title': 'A helping hand',
'age_limit': 0,
},
}, {
'url': 'https://scrolller.com/tigers-chasing-a-drone-c5d1f2so6j',
'info_dict': {
'id': 'tigers-chasing-a-drone-c5d1f2so6j',
'ext': 'mp4',
'thumbnail': 'https://zepto.scrolller.com/tigers-chasing-a-drone-az9pkpguwe-540x303.jpg',
'title': 'Tigers chasing a drone',
'age_limit': 0,
},
}, {
'url': 'https://scrolller.com/baby-rhino-smells-something-9chhugsv9p',
'info_dict': {
'id': 'baby-rhino-smells-something-9chhugsv9p',
'ext': 'mp4',
'thumbnail': 'https://atto.scrolller.com/hmm-whats-that-smell-bh54mf2c52-300x224.jpg',
'title': 'Baby rhino smells something',
'age_limit': 0,
},
}, {
'url': 'https://scrolller.com/its-all-fun-and-games-cco8jjmoh7',
'info_dict': {
'id': 'its-all-fun-and-games-cco8jjmoh7',
'ext': 'mp4',
'thumbnail': 'https://atto.scrolller.com/its-all-fun-and-games-3amk9vg7m3-540x649.jpg',
'title': 'It\'s all fun and games...',
'age_limit': 0,
},
}, {
'url': 'https://scrolller.com/may-the-force-be-with-you-octokuro-yeytg1fs7a',
'info_dict': {
'id': 'may-the-force-be-with-you-octokuro-yeytg1fs7a',
'ext': 'mp4',
'thumbnail': 'https://thumbs2.redgifs.com/DarkStarchyNautilus-poster.jpg',
'title': 'May the force be with you (Octokuro)',
'age_limit': 18,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
query = {
'query': '''{
getSubredditPost(url:"/%s"){
id
title
isNsfw
mediaSources{
url
width
height
}
}
}''' % video_id, # noqa: UP031
}
video_data = self._download_json(
'https://api.scrolller.com/api/v2/graphql', video_id, data=json.dumps(query).encode(),
headers={'Content-Type': 'application/json'})['data']['getSubredditPost']
formats, thumbnails = [], []
for source in video_data['mediaSources']:
if determine_ext(source.get('url')) in ('jpg', 'png'):
thumbnails.append({
'url': source['url'],
'width': int_or_none(source.get('width')),
'height': int_or_none(source.get('height')),
})
elif source.get('url'):
formats.append({
'url': source['url'],
'width': int_or_none(source.get('width')),
'height': int_or_none(source.get('height')),
})
if not formats:
self.raise_no_formats('There is no video.', expected=True, video_id=video_id)
return {
'id': video_id,
'title': video_data.get('title'),
'thumbnails': thumbnails,
'formats': formats,
'age_limit': 18 if video_data.get('isNsfw') else 0,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/abcotvs.py | yt_dlp/extractor/abcotvs.py | from .common import InfoExtractor
from ..utils import (
dict_get,
int_or_none,
try_get,
)
class ABCOTVSIE(InfoExtractor):
IE_NAME = 'abcotvs'
IE_DESC = 'ABC Owned Television Stations'
_VALID_URL = r'https?://(?P<site>abc(?:7(?:news|ny|chicago)?|11|13|30)|6abc)\.com(?:(?:/[^/]+)*/(?P<display_id>[^/]+))?/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://abc7news.com/entertainment/east-bay-museum-celebrates-vintage-synthesizers/472581/',
'info_dict': {
'id': '472548',
'display_id': 'east-bay-museum-celebrates-vintage-synthesizers',
'ext': 'mp4',
'title': 'East Bay museum celebrates synthesized music',
'description': 'md5:24ed2bd527096ec2a5c67b9d5a9005f3',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1421118520,
'upload_date': '20150113',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
'url': 'http://abc7news.com/472581',
'only_matching': True,
},
{
'url': 'https://6abc.com/man-75-killed-after-being-struck-by-vehicle-in-chester/5725182/',
'only_matching': True,
},
]
_SITE_MAP = {
'6abc': 'wpvi',
'abc11': 'wtvd',
'abc13': 'ktrk',
'abc30': 'kfsn',
'abc7': 'kabc',
'abc7chicago': 'wls',
'abc7news': 'kgo',
'abc7ny': 'wabc',
}
def _real_extract(self, url):
site, display_id, video_id = self._match_valid_url(url).groups()
display_id = display_id or video_id
station = self._SITE_MAP[site]
data = self._download_json(
'https://api.abcotvs.com/v2/content', display_id, query={
'id': video_id,
'key': f'otv.web.{station}.story',
'station': station,
})['data']
video = try_get(data, lambda x: x['featuredMedia']['video'], dict) or data
video_id = str(dict_get(video, ('id', 'publishedKey'), video_id))
title = video.get('title') or video['linkText']
formats = []
m3u8_url = video.get('m3u8')
if m3u8_url:
formats = self._extract_m3u8_formats(
video['m3u8'].split('?')[0], display_id, 'mp4', m3u8_id='hls', fatal=False)
mp4_url = video.get('mp4')
if mp4_url:
formats.append({
'abr': 128,
'format_id': 'https',
'height': 360,
'url': mp4_url,
'width': 640,
})
image = video.get('image') or {}
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': dict_get(video, ('description', 'caption'), try_get(video, lambda x: x['meta']['description'])),
'thumbnail': dict_get(image, ('source', 'dynamicSource')),
'timestamp': int_or_none(video.get('date')),
'duration': int_or_none(video.get('length')),
'formats': formats,
}
class ABCOTVSClipsIE(InfoExtractor):
IE_NAME = 'abcotvs:clips'
_VALID_URL = r'https?://clips\.abcotvs\.com/(?:[^/]+/)*video/(?P<id>\d+)'
_TEST = {
'url': 'https://clips.abcotvs.com/kabc/video/214814',
'info_dict': {
'id': '214814',
'ext': 'mp4',
'title': 'SpaceX launch pad explosion destroys rocket, satellite',
'description': 'md5:9f186e5ad8f490f65409965ee9c7be1b',
'upload_date': '20160901',
'timestamp': 1472756695,
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json('https://clips.abcotvs.com/vogo/video/getByIds?ids=' + video_id, video_id)['results'][0]
title = video_data['title']
formats = self._extract_m3u8_formats(
video_data['videoURL'].split('?')[0], video_id, 'mp4')
return {
'id': video_id,
'title': title,
'description': video_data.get('description'),
'thumbnail': video_data.get('thumbnailURL'),
'duration': int_or_none(video_data.get('duration')),
'timestamp': int_or_none(video_data.get('pubDate')),
'formats': formats,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bfi.py | yt_dlp/extractor/bfi.py | import re
from .common import InfoExtractor
from ..utils import extract_attributes
class BFIPlayerIE(InfoExtractor):
_WORKING = False
IE_NAME = 'bfi:player'
_VALID_URL = r'https?://player\.bfi\.org\.uk/[^/]+/film/watch-(?P<id>[\w-]+)-online'
_TEST = {
'url': 'https://player.bfi.org.uk/free/film/watch-computer-doctor-1974-online',
'md5': 'e8783ebd8e061ec4bc6e9501ed547de8',
'info_dict': {
'id': 'htNnhlZjE60C9VySkQEIBtU-cNV1Xx63',
'ext': 'mp4',
'title': 'Computer Doctor',
'description': 'md5:fb6c240d40c4dbe40428bdd62f78203b',
},
'skip': 'BFI Player films cannot be played outside of the UK',
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
entries = []
for player_el in re.findall(r'(?s)<[^>]+class="player"[^>]*>', webpage):
player_attr = extract_attributes(player_el)
ooyala_id = player_attr.get('data-video-id')
if not ooyala_id:
continue
entries.append(self.url_result(
'ooyala:' + ooyala_id, 'Ooyala',
ooyala_id, player_attr.get('data-label')))
return self.playlist_result(entries)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cliprs.py | yt_dlp/extractor/cliprs.py | from .onet import OnetBaseIE
class ClipRsIE(OnetBaseIE):
_WORKING = False
_VALID_URL = r'https?://(?:www\.)?clip\.rs/(?P<id>[^/]+)/\d+'
_TEST = {
'url': 'http://www.clip.rs/premijera-frajle-predstavljaju-novi-spot-za-pesmu-moli-me-moli/3732',
'md5': 'c412d57815ba07b56f9edc7b5d6a14e5',
'info_dict': {
'id': '1488842.1399140381',
'ext': 'mp4',
'title': 'PREMIJERA Frajle predstavljaju novi spot za pesmu Moli me, moli',
'description': 'md5:56ce2c3b4ab31c5a2e0b17cb9a453026',
'duration': 229,
'timestamp': 1459850243,
'upload_date': '20160405',
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
mvp_id = self._search_mvp_id(webpage)
info_dict = self._extract_from_id(mvp_id, webpage)
info_dict['display_id'] = display_id
return info_dict
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/boxcast.py | yt_dlp/extractor/boxcast.py | from .common import InfoExtractor
from ..utils import js_to_json, traverse_obj, unified_timestamp
class BoxCastVideoIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://boxcast\.tv/(?:
view-embed/|
channel/\w+\?(?:[^#]+&)?b=|
video-portal/(?:\w+/){2}
)(?P<id>[\w-]+)'''
_EMBED_REGEX = [r'<iframe[^>]+src=["\'](?P<url>https?://boxcast\.tv/view-embed/[\w-]+)']
_TESTS = [{
'url': 'https://boxcast.tv/view-embed/in-the-midst-of-darkness-light-prevails-an-interdisciplinary-symposium-ozmq5eclj50ujl4bmpwx',
'info_dict': {
'id': 'da1eqqgkacngd5djlqld',
'ext': 'mp4',
'thumbnail': r're:https?://uploads\.boxcast\.com/(?:[\w+-]+/){3}.+\.png$',
'title': 'In the Midst of Darkness Light Prevails: An Interdisciplinary Symposium',
'release_timestamp': 1670686812,
'release_date': '20221210',
'uploader_id': 're8w0v8hohhvpqtbskpe',
'uploader': 'Children\'s Health Defense',
},
}, {
'url': 'https://boxcast.tv/video-portal/vctwevwntun3o0ikq7af/rvyblnn0fxbfjx5nwxhl/otbpltj2kzkveo2qz3ad',
'info_dict': {
'id': 'otbpltj2kzkveo2qz3ad',
'ext': 'mp4',
'uploader_id': 'vctwevwntun3o0ikq7af',
'uploader': 'Legacy Christian Church',
'title': 'The Quest | 1: Beginner\'s Bay | Jamie Schools',
'thumbnail': r're:https?://uploads.boxcast.com/(?:[\w-]+/){3}.+\.jpg',
},
}, {
'url': 'https://boxcast.tv/channel/z03fqwaeaby5lnaawox2?b=ssihlw5gvfij2by8tkev',
'info_dict': {
'id': 'ssihlw5gvfij2by8tkev',
'ext': 'mp4',
'thumbnail': r're:https?://uploads.boxcast.com/(?:[\w-]+/){3}.+\.jpg$',
'release_date': '20230101',
'uploader_id': 'ds25vaazhlu4ygcvffid',
'release_timestamp': 1672543201,
'uploader': 'Lighthouse Ministries International - Beltsville, Maryland',
'description': 'md5:ac23e3d01b0b0be592e8f7fe0ec3a340',
'title': 'New Year\'s Eve CROSSOVER Service at LHMI | December 31, 2022',
},
}]
_WEBPAGE_TESTS = [{
'url': 'https://childrenshealthdefense.eu/live-stream/',
'info_dict': {
'id': 'da1eqqgkacngd5djlqld',
'ext': 'mp4',
'thumbnail': r're:https?://uploads\.boxcast\.com/(?:[\w+-]+/){3}.+\.png$',
'title': 'In the Midst of Darkness Light Prevails: An Interdisciplinary Symposium',
'release_timestamp': 1670686812,
'release_date': '20221210',
'uploader_id': 're8w0v8hohhvpqtbskpe',
'uploader': 'Children\'s Health Defense',
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
webpage_json_data = self._search_json(
r'var\s*BOXCAST_PRELOAD\s*=', webpage, 'broadcast data', display_id,
transform_source=js_to_json, default={})
# Ref: https://support.boxcast.com/en/articles/4235158-build-a-custom-viewer-experience-with-boxcast-api
broadcast_json_data = (
traverse_obj(webpage_json_data, ('broadcast', 'data'))
or self._download_json(f'https://api.boxcast.com/broadcasts/{display_id}', display_id))
view_json_data = (
traverse_obj(webpage_json_data, ('view', 'data'))
or self._download_json(f'https://api.boxcast.com/broadcasts/{display_id}/view',
display_id, fatal=False) or {})
formats, subtitles = [], {}
if view_json_data.get('status') == 'recorded':
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
view_json_data['playlist'], display_id)
return {
'id': str(broadcast_json_data['id']),
'title': (broadcast_json_data.get('name')
or self._html_search_meta(['og:title', 'twitter:title'], webpage)),
'description': (broadcast_json_data.get('description')
or self._html_search_meta(['og:description', 'twitter:description'], webpage)
or None),
'thumbnail': (broadcast_json_data.get('preview')
or self._html_search_meta(['og:image', 'twitter:image'], webpage)),
'formats': formats,
'subtitles': subtitles,
'release_timestamp': unified_timestamp(broadcast_json_data.get('streamed_at')),
'uploader': broadcast_json_data.get('account_name'),
'uploader_id': broadcast_json_data.get('account_id'),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rts.py | yt_dlp/extractor/rts.py | import re
from .srgssr import SRGSSRIE
from ..utils import (
determine_ext,
int_or_none,
parse_duration,
parse_iso8601,
unescapeHTML,
urljoin,
)
class RTSIE(SRGSSRIE): # XXX: Do not subclass from concrete IE
_WORKING = False
IE_DESC = 'RTS.ch'
_VALID_URL = r'rts:(?P<rts_id>\d+)|https?://(?:.+?\.)?rts\.ch/(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html'
_TESTS = [
{
'url': 'http://www.rts.ch/archives/tv/divers/3449373-les-enfants-terribles.html',
'md5': '753b877968ad8afaeddccc374d4256a5',
'info_dict': {
'id': '3449373',
'display_id': 'les-enfants-terribles',
'ext': 'mp4',
'duration': 1488,
'title': 'Les Enfants Terribles',
'description': 'France Pommier et sa soeur Luce Feral, les deux filles de ce groupe de 5.',
'uploader': 'Divers',
'upload_date': '19680921',
'timestamp': -40280400,
'thumbnail': r're:^https?://.*\.image',
'view_count': int,
},
'expected_warnings': ['Unable to download f4m manifest', 'Failed to download m3u8 information'],
},
{
'url': 'http://www.rts.ch/emissions/passe-moi-les-jumelles/5624067-entre-ciel-et-mer.html',
'info_dict': {
'id': '5624065',
'title': 'Passe-moi les jumelles',
},
'playlist_mincount': 4,
},
{
'url': 'http://www.rts.ch/video/sport/hockey/5745975-1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski.html',
'info_dict': {
'id': '5745975',
'display_id': '1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski',
'ext': 'mp4',
'duration': 48,
'title': '1/2, Kloten - Fribourg (5-2): second but pour Gottéron par Kwiatowski',
'description': 'Hockey - Playoff',
'uploader': 'Hockey',
'upload_date': '20140403',
'timestamp': 1396556882,
'thumbnail': r're:^https?://.*\.image',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['Unable to download f4m manifest', 'Failed to download m3u8 information'],
'skip': 'Blocked outside Switzerland',
},
{
'url': 'http://www.rts.ch/video/info/journal-continu/5745356-londres-cachee-par-un-epais-smog.html',
'md5': '9bb06503773c07ce83d3cbd793cebb91',
'info_dict': {
'id': '5745356',
'display_id': 'londres-cachee-par-un-epais-smog',
'ext': 'mp4',
'duration': 33,
'title': 'Londres cachée par un épais smog',
'description': 'Un important voile de smog recouvre Londres depuis mercredi, provoqué par la pollution et du sable du Sahara.',
'uploader': 'L\'actu en vidéo',
'upload_date': '20140403',
'timestamp': 1396537322,
'thumbnail': r're:^https?://.*\.image',
'view_count': int,
},
'expected_warnings': ['Unable to download f4m manifest', 'Failed to download m3u8 information'],
},
{
'url': 'http://www.rts.ch/audio/couleur3/programmes/la-belle-video-de-stephane-laurenceau/5706148-urban-hippie-de-damien-krisl-03-04-2014.html',
'md5': 'dd8ef6a22dff163d063e2a52bc8adcae',
'info_dict': {
'id': '5706148',
'display_id': 'urban-hippie-de-damien-krisl-03-04-2014',
'ext': 'mp3',
'duration': 123,
'title': '"Urban Hippie", de Damien Krisl',
'description': 'Des Hippies super glam.',
'upload_date': '20140403',
'timestamp': 1396551600,
},
},
{
# article with videos on rhs
'url': 'http://www.rts.ch/sport/hockey/6693917-hockey-davos-decroche-son-31e-titre-de-champion-de-suisse.html',
'info_dict': {
'id': '6693917',
'title': 'Hockey: Davos décroche son 31e titre de champion de Suisse',
},
'playlist_mincount': 5,
},
{
'url': 'http://pages.rts.ch/emissions/passe-moi-les-jumelles/5624065-entre-ciel-et-mer.html',
'only_matching': True,
},
]
def _real_extract(self, url):
m = self._match_valid_url(url)
media_id = m.group('rts_id') or m.group('id')
display_id = m.group('display_id') or media_id
def download_json(internal_id):
return self._download_json(
f'http://www.rts.ch/a/{internal_id}.html?f=json/article',
display_id)
all_info = download_json(media_id)
# media_id extracted out of URL is not always a real id
if 'video' not in all_info and 'audio' not in all_info:
entries = []
for item in all_info.get('items', []):
item_url = item.get('url')
if not item_url:
continue
entries.append(self.url_result(item_url, 'RTS'))
if not entries:
page, urlh = self._download_webpage_handle(url, display_id)
if re.match(self._VALID_URL, urlh.url).group('id') != media_id:
return self.url_result(urlh.url, 'RTS')
# article with videos on rhs
videos = re.findall(
r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:([^"]+)"',
page)
if not videos:
videos = re.findall(
r'(?s)<iframe[^>]+class="srg-player"[^>]+src="[^"]+urn:([^"]+)"',
page)
if videos:
entries = [self.url_result(f'srgssr:{video_urn}', 'SRGSSR') for video_urn in videos]
if entries:
return self.playlist_result(entries, media_id, all_info.get('title'))
internal_id = self._html_search_regex(
r'<(?:video|audio) data-id="([0-9]+)"', page,
'internal video id')
all_info = download_json(internal_id)
media_type = 'video' if 'video' in all_info else 'audio'
# check for errors
self._get_media_data('rts', media_type, media_id)
info = all_info['video']['JSONinfo'] if 'video' in all_info else all_info['audio']
title = info['title']
def extract_bitrate(url):
return int_or_none(self._search_regex(
r'-([0-9]+)k\.', url, 'bitrate', default=None))
formats = []
streams = info.get('streams', {})
for format_id, format_url in streams.items():
if format_id == 'hds_sd' and 'hds' in streams:
continue
if format_id == 'hls_sd' and 'hls' in streams:
continue
ext = determine_ext(format_url)
if ext in ('m3u8', 'f4m'):
format_url = self._get_tokenized_src(format_url, media_id, format_id)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
format_url + ('?' if '?' not in format_url else '&') + 'hdcore=3.4.0',
media_id, f4m_id=format_id, fatal=False))
else:
formats.extend(self._extract_m3u8_formats(
format_url, media_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False))
else:
formats.append({
'format_id': format_id,
'url': format_url,
'tbr': extract_bitrate(format_url),
})
download_base = 'http://rtsww{}-d.rts.ch/'.format('-a' if media_type == 'audio' else '')
for media in info.get('media', []):
media_url = media.get('url')
if not media_url or re.match(r'https?://', media_url):
continue
rate = media.get('rate')
ext = media.get('ext') or determine_ext(media_url, 'mp4')
format_id = ext
if rate:
format_id += '-%dk' % rate
formats.append({
'format_id': format_id,
'url': urljoin(download_base, media_url),
'tbr': rate or extract_bitrate(media_url),
})
self._check_formats(formats, media_id)
duration = info.get('duration') or info.get('cutout') or info.get('cutduration')
if isinstance(duration, str):
duration = parse_duration(duration)
return {
'id': media_id,
'display_id': display_id,
'formats': formats,
'title': title,
'description': info.get('intro'),
'duration': duration,
'view_count': int_or_none(info.get('plays')),
'uploader': info.get('programName'),
'timestamp': parse_iso8601(info.get('broadcast_date')),
'thumbnail': unescapeHTML(info.get('preview_image_url')),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/prankcast.py | yt_dlp/extractor/prankcast.py | import json
from .common import InfoExtractor
from ..utils import float_or_none, parse_iso8601, str_or_none, try_call, url_or_none
from ..utils.traversal import traverse_obj, value
class PrankCastIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?prankcast\.com/[^/?#]+/showreel/(?P<id>\d+)-(?P<display_id>[^/?#]+)'
_TESTS = [{
'url': 'https://prankcast.com/Devonanustart/showreel/1561-Beverly-is-back-like-a-heart-attack-',
'info_dict': {
'id': '1561',
'ext': 'mp3',
'title': 'Beverly is back like a heart attack!',
'display_id': 'Beverly-is-back-like-a-heart-attack-',
'timestamp': 1661391575,
'uploader': 'Devonanustart',
'channel_id': '4',
'duration': 7918,
'cast': ['Devonanustart', 'Phonelosers'],
'description': '',
'categories': ['prank'],
'tags': ['prank call', 'prank', 'live show'],
'upload_date': '20220825',
},
}, {
'url': 'https://prankcast.com/phonelosers/showreel/2048-NOT-COOL',
'info_dict': {
'id': '2048',
'ext': 'mp3',
'title': 'NOT COOL',
'display_id': 'NOT-COOL',
'timestamp': 1665028364,
'uploader': 'phonelosers',
'channel_id': '6',
'duration': 4044,
'cast': ['phonelosers'],
'description': '',
'categories': ['prank'],
'tags': ['prank call', 'prank', 'live show'],
'upload_date': '20221006',
},
}]
def _real_extract(self, url):
video_id, display_id = self._match_valid_url(url).group('id', 'display_id')
webpage = self._download_webpage(url, video_id)
json_info = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['ssr_data_showreel']
uploader = json_info.get('user_name')
guests_json = self._parse_json(json_info.get('guests_json') or '{}', video_id)
start_date = parse_iso8601(json_info.get('start_date'))
return {
'id': video_id,
'title': json_info.get('broadcast_title') or self._og_search_title(webpage),
'display_id': display_id,
'url': f'{json_info["broadcast_url"]}{json_info["recording_hash"]}.mp3',
'timestamp': start_date,
'uploader': uploader,
'channel_id': str_or_none(json_info.get('user_id')),
'duration': try_call(lambda: parse_iso8601(json_info['end_date']) - start_date),
'cast': list(filter(None, [uploader, *traverse_obj(guests_json, (..., 'name'))])),
'description': json_info.get('broadcast_description'),
'categories': [json_info.get('broadcast_category')],
'tags': try_call(lambda: json_info['broadcast_tags'].split(',')),
}
class PrankCastPostIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?prankcast\.com/[^/?#]+/posts/(?P<id>\d+)-(?P<display_id>[^/?#]+)'
_TESTS = [{
'url': 'https://prankcast.com/devonanustart/posts/6214-happy-national-rachel-day-',
'info_dict': {
'id': '6214',
'ext': 'mp3',
'title': 'Happy National Rachel Day!',
'display_id': 'happy-national-rachel-day-',
'timestamp': 1704333938,
'uploader': 'Devonanustart',
'channel_id': '4',
'duration': 13175,
'cast': ['Devonanustart'],
'description': '',
'categories': ['prank call'],
'upload_date': '20240104',
},
}, {
'url': 'https://prankcast.com/despicabledogs/posts/6217-jake-the-work-crow-',
'info_dict': {
'id': '6217',
'ext': 'mp3',
'title': 'Jake the Work Crow!',
'display_id': 'jake-the-work-crow-',
'timestamp': 1704346592,
'uploader': 'despicabledogs',
'channel_id': '957',
'duration': 263.287,
'cast': ['despicabledogs'],
'description': 'https://imgur.com/a/vtxLvKU',
'upload_date': '20240104',
},
}, {
'url': 'https://prankcast.com/drtomservo/posts/11988-butteye-s-late-night-stank-episode-1-part-1-',
'info_dict': {
'id': '11988',
'ext': 'mp3',
'title': 'Butteye\'s Late Night Stank Episode 1 (Part 1)',
'display_id': 'butteye-s-late-night-stank-episode-1-part-1-',
'timestamp': 1754238686,
'uploader': 'DrTomServo',
'channel_id': '136',
'duration': 2176.464,
'cast': ['DrTomServo'],
'description': '',
'upload_date': '20250803',
},
}, {
'url': 'https://prankcast.com/drtomservo/posts/12105-butteye-s-late-night-stank-episode-08-16-2025-part-2',
'info_dict': {
'id': '12105',
'ext': 'mp3',
'title': 'Butteye\'s Late Night Stank Episode 08-16-2025 Part 2',
'display_id': 'butteye-s-late-night-stank-episode-08-16-2025-part-2',
'timestamp': 1755453505,
'uploader': 'DrTomServo',
'channel_id': '136',
'duration': 19018.392,
'cast': ['DrTomServo'],
'description': '',
'upload_date': '20250817',
},
}]
def _real_extract(self, url):
video_id, display_id = self._match_valid_url(url).group('id', 'display_id')
webpage = self._download_webpage(url, video_id)
post = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['ssr_data_posts']
content = self._parse_json(post['post_contents_json'], video_id)[0]
return {
'id': video_id,
'display_id': display_id,
'title': self._og_search_title(webpage),
**traverse_obj(post, {
'title': ('post_title', {str}),
'description': ('post_body', {str}),
'tags': ('post_tags', {lambda x: x.split(',')}, ..., {str.strip}, filter),
'channel_id': ('user_id', {int}, {str_or_none}),
'uploader': ('user_name', {str}),
}),
**traverse_obj(content, {
'url': (('secure_url', 'url'), {url_or_none}, any),
'timestamp': ((
(('start_date', 'crdate'), {parse_iso8601(delimiter=' ')}),
('created_at', {parse_iso8601}),
), any),
'duration': ('duration', {float_or_none}),
'categories': ('category', {str}, filter, all, filter),
'cast': ((
{value(post.get('user_name'))},
('guests_json', {json.loads}, ..., 'name'),
), {str}, filter),
}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/s4c.py | yt_dlp/extractor/s4c.py | from .common import InfoExtractor
from ..utils import traverse_obj, url_or_none
class S4CIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?s4c\.cymru/clic/programme/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.s4c.cymru/clic/programme/861362209',
'info_dict': {
'id': '861362209',
'ext': 'mp4',
'title': 'Y Swn',
'description': 'md5:f7681a30e4955b250b3224aa9fe70cf0',
'duration': 5340,
'thumbnail': 'https://www.s4c.cymru/amg/1920x1080/Y_Swn_2023S4C_099_ii.jpg',
},
}, {
# Geo restricted to the UK
'url': 'https://www.s4c.cymru/clic/programme/886303048',
'info_dict': {
'id': '886303048',
'ext': 'mp4',
'title': 'Pennod 1',
'description': 'md5:7e3f364b70f61fcdaa8b4cb4a3eb3e7a',
'duration': 2880,
'thumbnail': 'https://www.s4c.cymru/amg/1920x1080/Stad_2025S4C_P1_210053.jpg',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
details = self._download_json(
f'https://www.s4c.cymru/df/full_prog_details?lang=e&programme_id={video_id}',
video_id, fatal=False)
player_config = self._download_json(
'https://player-api.s4c-cdn.co.uk/player-configuration/prod', video_id, query={
'programme_id': video_id,
'signed': '0',
'lang': 'en',
'mode': 'od',
'appId': 'clic',
'streamName': '',
}, note='Downloading player config JSON')
subtitles = {}
for sub in traverse_obj(player_config, ('subtitles', lambda _, v: url_or_none(v['0']))):
subtitles.setdefault(sub.get('3', 'en'), []).append({
'url': sub['0'],
'name': sub.get('1'),
})
m3u8_url = self._download_json(
'https://player-api.s4c-cdn.co.uk/streaming-urls/prod', video_id, query={
'mode': 'od',
'application': 'clic',
'region': 'UK' if player_config.get('application') == 's4chttpl' else 'WW',
'extra': 'false',
'thirdParty': 'false',
'filename': player_config['filename'],
}, note='Downloading streaming urls JSON')['hls']
return {
'id': video_id,
'formats': self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', m3u8_id='hls'),
'subtitles': subtitles,
'thumbnail': url_or_none(player_config.get('poster')),
**traverse_obj(details, ('full_prog_details', 0, {
'title': (('programme_title', 'series_title'), {str}),
'description': ('full_billing', {str.strip}),
'duration': ('duration', {lambda x: int(x) * 60}),
}), get_all=False),
}
class S4CSeriesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?s4c\.cymru/clic/series/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.s4c.cymru/clic/series/864982911',
'playlist_mincount': 6,
'info_dict': {
'id': '864982911',
'title': 'Iaith ar Daith',
},
}, {
'url': 'https://www.s4c.cymru/clic/series/866852587',
'playlist_mincount': 8,
'info_dict': {
'id': '866852587',
'title': 'FFIT Cymru',
},
}]
def _real_extract(self, url):
series_id = self._match_id(url)
series_details = self._download_json(
'https://www.s4c.cymru/df/series_details', series_id, query={
'lang': 'e',
'series_id': series_id,
'show_prog_in_series': 'Y',
}, note='Downloading series details JSON')
return self.playlist_result(
[self.url_result(f'https://www.s4c.cymru/clic/programme/{episode_id}', S4CIE, episode_id)
for episode_id in traverse_obj(series_details, ('other_progs_in_series', ..., 'id'))],
series_id, traverse_obj(series_details, ('full_prog_details', 0, 'series_title', {str})))
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/wdr.py | yt_dlp/extractor/wdr.py | import re
import urllib.parse
from .common import InfoExtractor
from ..utils import (
ExtractorError,
determine_ext,
dict_get,
js_to_json,
strip_jsonp,
try_get,
unified_strdate,
update_url_query,
url_or_none,
urlhandle_detect_ext,
)
class WDRIE(InfoExtractor):
__API_URL_TPL = '//deviceids-medp.wdr.de/ondemand/%s/%s'
_VALID_URL = r'''(?x)https?://
(?:deviceids-medp\.wdr\.de/ondemand/\d+/|
kinder\.wdr\.de/(?!mediathek/)[^#?]+-)
(?P<id>\d+)\.(?:js|assetjsonp)
'''
_GEO_COUNTRIES = ['DE']
_TESTS = [{
'url': 'http://deviceids-medp.wdr.de/ondemand/155/1557833.js',
'info_dict': {
'id': 'mdb-1557833',
'ext': 'mp4',
'title': 'Biathlon-Staffel verpasst Podest bei Olympia-Generalprobe',
'upload_date': '20180112',
},
}]
def _asset_url(self, wdr_id):
id_len = max(len(wdr_id), 5)
return ''.join(('https:', self.__API_URL_TPL % (wdr_id[:id_len - 4], wdr_id), '.js'))
def _real_extract(self, url):
video_id = self._match_id(url)
if url.startswith('wdr:'):
video_id = url[4:]
url = self._asset_url(video_id)
metadata = self._download_json(
url, video_id, transform_source=strip_jsonp)
is_live = metadata.get('mediaType') == 'live'
tracker_data = metadata['trackerData']
title = tracker_data['trackerClipTitle']
media_resource = metadata['mediaResource']
formats = []
subtitles = {}
# check if the metadata contains a direct URL to a file
for kind, media in media_resource.items():
if kind == 'captionsHash':
for ext, url in media.items():
subtitles.setdefault('de', []).append({
'url': url,
'ext': ext,
})
continue
if kind not in ('dflt', 'alt'):
continue
if not isinstance(media, dict):
continue
for tag_name, medium_url in media.items():
if tag_name not in ('videoURL', 'audioURL'):
continue
ext = determine_ext(medium_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
medium_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls'))
elif ext == 'f4m':
manifest_url = update_url_query(
medium_url, {'hdcore': '3.2.0', 'plugin': 'aasp-3.2.0.77.18'})
formats.extend(self._extract_f4m_formats(
manifest_url, video_id, f4m_id='hds', fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
medium_url, 'stream', fatal=False))
else:
a_format = {
'url': medium_url,
}
if ext == 'unknown_video':
urlh = self._request_webpage(
medium_url, video_id, note='Determining extension')
ext = urlhandle_detect_ext(urlh)
a_format['ext'] = ext
formats.append(a_format)
caption_url = media_resource.get('captionURL')
if caption_url:
subtitles['de'] = [{
'url': caption_url,
'ext': 'ttml',
}]
captions_hash = media_resource.get('captionsHash')
if isinstance(captions_hash, dict):
for ext, format_url in captions_hash.items():
format_url = url_or_none(format_url)
if not format_url:
continue
subtitles.setdefault('de', []).append({
'url': format_url,
'ext': determine_ext(format_url, None) or ext,
})
return {
'id': tracker_data.get('trackerClipId', video_id),
'title': title,
'alt_title': tracker_data.get('trackerClipSubcategory'),
'formats': formats,
'subtitles': subtitles,
'upload_date': unified_strdate(tracker_data.get('trackerClipAirTime')),
'is_live': is_live,
}
class WDRPageIE(WDRIE): # XXX: Do not subclass from concrete IE
_MAUS_REGEX = r'https?://(?:www\.)wdrmaus.de/(?:[^/]+/)*?(?P<maus_id>[^/?#.]+)(?:/?|/index\.php5|\.php5)$'
_PAGE_REGEX = r'/(?:mediathek/)?(?:[^/]+/)*(?P<display_id>[^/]+)\.html'
_VALID_URL = r'https?://(?:www\d?\.)?(?:(?:kinder\.)?wdr\d?|sportschau)\.de' + _PAGE_REGEX + '|' + _MAUS_REGEX
_TESTS = [
{
'url': 'http://www1.wdr.de/mediathek/video/sendungen/doku-am-freitag/video-geheimnis-aachener-dom-100.html',
# HDS download, MD5 is unstable
'info_dict': {
'id': 'mdb-1058683',
'ext': 'flv',
'display_id': 'doku-am-freitag/video-geheimnis-aachener-dom-100',
'title': 'Geheimnis Aachener Dom',
'alt_title': 'Doku am Freitag',
'upload_date': '20160304',
'description': 'md5:87be8ff14d8dfd7a7ee46f0299b52318',
'is_live': False,
'subtitles': {'de': [{
'url': 'http://ondemand-ww.wdr.de/medp/fsk0/105/1058683/1058683_12220974.xml',
'ext': 'ttml',
}]},
},
'skip': 'HTTP Error 404: Not Found',
},
{
'url': 'http://www1.wdr.de/mediathek/audio/wdr3/wdr3-gespraech-am-samstag/audio-schriftstellerin-juli-zeh-100.html',
'md5': 'f4c1f96d01cf285240f53ea4309663d8',
'info_dict': {
'id': 'mdb-1072000',
'ext': 'mp3',
'display_id': 'wdr3-gespraech-am-samstag/audio-schriftstellerin-juli-zeh-100',
'title': 'Schriftstellerin Juli Zeh',
'alt_title': 'WDR 3 Gespräch am Samstag',
'upload_date': '20160312',
'description': 'md5:e127d320bc2b1f149be697ce044a3dd7',
'is_live': False,
'subtitles': {},
},
'skip': 'HTTP Error 404: Not Found',
},
{
# FIXME: Asset JSON is directly embedded in webpage
'url': 'http://www1.wdr.de/mediathek/video/live/index.html',
'info_dict': {
'id': 'mdb-2296252',
'ext': 'mp4',
'title': r're:^WDR Fernsehen im Livestream (?:\(nur in Deutschland erreichbar\) )?[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'alt_title': 'WDR Fernsehen Live',
'upload_date': '20201112',
'is_live': True,
},
'params': {
'skip_download': True, # m3u8 download
},
},
{
'url': 'http://www1.wdr.de/mediathek/video/sendungen/aktuelle-stunde/aktuelle-stunde-120.html',
'playlist_mincount': 6,
'info_dict': {
'id': 'aktuelle-stunde-120',
},
},
{
'url': 'http://www.wdrmaus.de/aktuelle-sendung/index.php5',
'info_dict': {
'id': 'mdb-2627637',
'ext': 'mp4',
'upload_date': 're:^[0-9]{8}$',
'title': 're:^Die Sendung (?:mit der Maus )?vom [0-9.]{10}$',
},
'skip': 'The id changes from week to week because of the new episode',
},
{
'url': 'http://www.wdrmaus.de/filme/sachgeschichten/achterbahn.php5',
'md5': '803138901f6368ee497b4d195bb164f2',
'info_dict': {
'id': 'mdb-186083',
'ext': 'mp4',
'upload_date': '20130919',
'title': 'Sachgeschichte - Achterbahn ',
},
'skip': 'HTTP Error 404: Not Found',
},
{
'url': 'http://www1.wdr.de/radio/player/radioplayer116~_layout-popupVersion.html',
# Live stream, MD5 unstable
'info_dict': {
'id': 'mdb-869971',
'ext': 'mp4',
'title': r're:^COSMO Livestream [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'alt_title': 'COSMO Livestream',
'live_status': 'is_live',
'upload_date': '20160101',
},
'params': {
'skip_download': True, # m3u8 download
},
},
{
'url': 'http://www.sportschau.de/handballem2018/handball-nationalmannschaft-em-stolperstein-vorrunde-100.html',
'info_dict': {
'id': 'mdb-1556012',
'ext': 'mp4',
'title': 'DHB-Vizepräsident Bob Hanning - "Die Weltspitze ist extrem breit"',
'upload_date': '20180111',
},
'params': {
'skip_download': True,
},
'skip': 'HTTP Error 404: Not Found',
},
{
'url': 'http://www.sportschau.de/handballem2018/audio-vorschau---die-handball-em-startet-mit-grossem-favoritenfeld-100.html',
'only_matching': True,
},
{
'url': 'https://kinder.wdr.de/tv/die-sendung-mit-dem-elefanten/av/video-folge---astronaut-100.html',
'only_matching': True,
},
{
'url': 'https://www1.wdr.de/mediathek/video/sendungen/rockpalast/video-baroness---freak-valley-festival--100.html',
'info_dict': {
'id': 'mdb-2741028',
'ext': 'mp4',
'title': 'Baroness - Freak Valley Festival 2022',
'alt_title': 'Rockpalast',
'upload_date': '20220725',
},
},
]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
display_id = dict_get(mobj.groupdict(), ('display_id', 'maus_id'), 'wdrmaus')
webpage = self._download_webpage(url, display_id)
entries = []
# Article with several videos
# for wdr.de the data-extension-ard is in a tag with the class "mediaLink"
# for wdr.de radio players, in a tag with the class "wdrrPlayerPlayBtn"
# for wdrmaus, in a tag with the class "videoButton" (previously a link
# to the page in a multiline "videoLink"-tag)
for mobj in re.finditer(
r'''(?sx)class=
(?:
(["\'])(?:mediaLink|wdrrPlayerPlayBtn|videoButton)\b.*?\1[^>]+|
(["\'])videoLink\b.*?\2[\s]*>\n[^\n]*
)data-extension(?:-ard)?=(["\'])(?P<data>(?:(?!\3).)+)\3
''', webpage):
media_link_obj = self._parse_json(
mobj.group('data'), display_id, transform_source=js_to_json,
fatal=False)
if not media_link_obj:
continue
jsonp_url = try_get(
media_link_obj, lambda x: x['mediaObj']['url'], str)
if jsonp_url:
# metadata, or player JS with ['ref'] giving WDR id, or just media, perhaps
clip_id = media_link_obj['mediaObj'].get('ref')
if jsonp_url.endswith('.assetjsonp'):
asset = self._download_json(
jsonp_url, display_id, fatal=False, transform_source=strip_jsonp)
clip_id = try_get(asset, lambda x: x['trackerData']['trackerClipId'], str)
if clip_id:
jsonp_url = self._asset_url(clip_id[4:])
entries.append(self.url_result(jsonp_url, ie=WDRIE.ie_key()))
# Playlist (e.g. https://www1.wdr.de/mediathek/video/sendungen/aktuelle-stunde/aktuelle-stunde-120.html)
if not entries:
entries = [
self.url_result(
urllib.parse.urljoin(url, mobj.group('href')),
ie=WDRPageIE.ie_key())
for mobj in re.finditer(
r'<a[^>]+\bhref=(["\'])(?P<href>(?:(?!\1).)+)\1[^>]+\bdata-extension(?:-ard)?=',
webpage) if re.match(self._PAGE_REGEX, mobj.group('href'))
]
return self.playlist_result(entries, playlist_id=display_id)
class WDRElefantIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)wdrmaus\.de/elefantenseite/#(?P<id>.+)'
_TEST = {
'url': 'http://www.wdrmaus.de/elefantenseite/#elefantenkino_wippe',
# adaptive stream: unstable file MD5
'info_dict': {
'title': 'Wippe',
'id': 'mdb-1198320',
'ext': 'mp4',
'upload_date': '20071003',
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
# Table of Contents seems to always be at this address, so fetch it directly.
# The website fetches configurationJS.php5, which links to tableOfContentsJS.php5.
table_of_contents = self._download_json(
'https://www.wdrmaus.de/elefantenseite/data/tableOfContentsJS.php5',
display_id)
if display_id not in table_of_contents:
raise ExtractorError(
'No entry in site\'s table of contents for this URL. '
'Is the fragment part of the URL (after the #) correct?',
expected=True)
xml_metadata_path = table_of_contents[display_id]['xmlPath']
xml_metadata = self._download_xml(
'https://www.wdrmaus.de/elefantenseite/' + xml_metadata_path,
display_id)
zmdb_url_element = xml_metadata.find('./movie/zmdb_url')
if zmdb_url_element is None:
raise ExtractorError(
f'{display_id} is not a video', expected=True)
return self.url_result(zmdb_url_element.text, ie=WDRIE.ie_key())
class WDRMobileIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://mobile-ondemand\.wdr\.de/
.*?/fsk(?P<age_limit>[0-9]+)
/[0-9]+/[0-9]+/
(?P<id>[0-9]+)_(?P<title>[0-9]+)'''
IE_NAME = 'wdr:mobile'
_WORKING = False # no such domain
_TEST = {
'url': 'http://mobile-ondemand.wdr.de/CMS2010/mdb/ondemand/weltweit/fsk0/42/421735/421735_4283021.mp4',
'info_dict': {
'title': '4283021',
'id': '421735',
'ext': 'mp4',
'age_limit': 0,
},
'skip': 'Problems with loading data.',
}
def _real_extract(self, url):
mobj = self._match_valid_url(url)
return {
'id': mobj.group('id'),
'title': mobj.group('title'),
'age_limit': int(mobj.group('age_limit')),
'url': url,
'http_headers': {
'User-Agent': 'mobile',
},
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/jwplatform.py | yt_dlp/extractor/jwplatform.py | import re
from .common import InfoExtractor
from ..utils import unsmuggle_url
class JWPlatformIE(InfoExtractor):
_VALID_URL = r'(?:https?://(?:content\.jwplatform|cdn\.jwplayer)\.com/(?:(?:feed|player|thumb|preview|manifest)s|jw6|v2/media)/|jwplatform:)(?P<id>[a-zA-Z0-9]{8})'
_TESTS = [{
'url': 'http://content.jwplatform.com/players/nPripu9l-ALJ3XQCI.js',
'info_dict': {
'id': 'nPripu9l',
'ext': 'mp4',
'title': 'Big Buck Bunny Trailer',
'description': 'Big Buck Bunny is a short animated film by the Blender Institute. It is made using free and open source software.',
'upload_date': '20081127',
'timestamp': 1227796140,
'duration': 32.0,
'thumbnail': r're:https?://cdn\.jwplayer\.com/v2/media/.+',
},
}, {
'url': 'https://cdn.jwplayer.com/players/nPripu9l-ALJ3XQCI.js',
'only_matching': True,
}]
_WEBPAGE_TESTS = [{
# JWPlatform iframe
'url': 'https://www.covermagazine.co.uk/feature/2465255/business-protection-involved',
'info_dict': {
'id': 'AG26UQXM',
'ext': 'mp4',
'upload_date': '20160719',
'timestamp': 1468923808,
'title': '2016_05_18 Cover L&G Business Protection V1 FINAL.mp4',
'thumbnail': r're:https?://cdn\.jwplayer\.com/v2/media/.+',
'description': '',
'duration': 294.0,
},
'skip': 'Site no longer embeds JWPlatform',
}, {
# Player url not surrounded by quotes
'url': 'https://www.deutsche-kinemathek.de/en/online/streaming/school-trip',
'info_dict': {
'id': 'jUxh5uin',
'title': 'Klassenfahrt',
'ext': 'mp4',
'upload_date': '20230109',
'thumbnail': r're:https?://cdn\.jwplayer\.com/v2/media/.+',
'timestamp': 1673270298,
'description': '',
'duration': 5193.0,
},
'skip': 'Site no longer embeds JWPlatform',
}, {
# iframe src attribute includes backslash before URL string
'url': 'https://www.elespectador.com/colombia/video-asi-se-evito-la-fuga-de-john-poulos-presunto-feminicida-de-valentina-trespalacios-explicacion',
'info_dict': {
'id': 'QD3gsexj',
'title': 'Así se evitó la fuga de John Poulos, presunto feminicida de Valentina Trespalacios',
'ext': 'mp4',
'upload_date': '20230127',
'thumbnail': r're:https?://cdn\.jwplayer\.com/v2/media/.+',
'timestamp': 1674862986,
'description': 'md5:128fd74591c4e1fc2da598c5cb6f5ce4',
'duration': 263.0,
},
}, {
'url': 'https://www.skimag.com/video/ski-people-1980',
'info_dict': {
'id': 'YTmgRiNU',
'ext': 'mp4',
'title': 'Ski People (1980)',
'channel': 'snow',
'description': 'md5:cf9c3d101452c91e141f292b19fe4843',
'duration': 5688.0,
'thumbnail': r're:https?://cdn\.jwplayer\.com/v2/media/.+',
'timestamp': 1610407738,
'upload_date': '20210111',
},
}]
@classmethod
def _extract_embed_urls(cls, url, webpage):
for tag, key in ((r'(?:script|iframe)', 'src'), ('input', 'value')):
# <input value=URL> is used by hyland.com
# if we find <iframe>, dont look for <input>
ret = re.findall(
rf'<{tag}[^>]+?{key}=\\?["\']?((?:https?:)?//(?:content\.jwplatform|cdn\.jwplayer)\.com/players/[a-zA-Z0-9]{{8}})',
webpage)
if ret:
return ret
mobj = re.search(r'<div\b[^>]* data-video-jw-id="([a-zA-Z0-9]{8})"', webpage)
if mobj:
return [f'jwplatform:{mobj.group(1)}']
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
self._initialize_geo_bypass({
'countries': smuggled_data.get('geo_countries'),
})
video_id = self._match_id(url)
json_data = self._download_json('https://cdn.jwplayer.com/v2/media/' + video_id, video_id)
return self._parse_jwplayer_data(json_data, video_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/zdf.py | yt_dlp/extractor/zdf.py | import itertools
import json
import re
import time
from .common import InfoExtractor
from ..utils import (
ExtractorError,
ISO639Utils,
determine_ext,
filter_dict,
float_or_none,
int_or_none,
join_nonempty,
make_archive_id,
parse_codecs,
parse_iso8601,
parse_qs,
smuggle_url,
unified_timestamp,
unsmuggle_url,
url_or_none,
urljoin,
variadic,
)
from ..utils.traversal import require, traverse_obj
class ZDFBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['DE']
_TOKEN_CACHE_PARAMS = ('zdf', 'api-token')
_token_cache = {}
def _get_api_token(self):
# As of 2025-03, this API is used by the Android app for getting tokens.
# An equivalent token could be extracted from the webpage should the API become unavailable.
# For now this allows the extractor to avoid dealing with Next.js hydration data.
if not self._token_cache:
self._token_cache.update(self.cache.load(*self._TOKEN_CACHE_PARAMS, default={}))
if traverse_obj(self._token_cache, ('expires', {int_or_none}), default=0) < int(time.time()):
self._token_cache.update(self._download_json(
'https://zdf-prod-futura.zdf.de/mediathekV2/token', None,
'Downloading API token', 'Failed to download API token'))
self.cache.store(*self._TOKEN_CACHE_PARAMS, self._token_cache)
return f'{self._token_cache["type"]} {self._token_cache["token"]}'
def _call_api(self, url, video_id, item, api_token=None):
return self._download_json(
url, video_id, f'Downloading {item}', f'Failed to download {item}',
headers=filter_dict({'Api-Auth': api_token}))
def _parse_aspect_ratio(self, aspect_ratio):
if not aspect_ratio or not isinstance(aspect_ratio, str):
return None
mobj = re.match(r'(?P<width>\d+):(?P<height>\d+)', aspect_ratio)
return int(mobj.group('width')) / int(mobj.group('height')) if mobj else None
def _extract_chapters(self, data):
return traverse_obj(data, (lambda _, v: v['anchorOffset'], {
'start_time': ('anchorOffset', {float_or_none}),
'title': ('anchorLabel', {str}),
})) or None
@staticmethod
def _extract_subtitles(src):
seen_urls = set()
subtitles = {}
for caption in src:
subtitle_url = url_or_none(caption.get('uri'))
if not subtitle_url or subtitle_url in seen_urls:
continue
seen_urls.add(subtitle_url)
lang = caption.get('language') or 'deu'
subtitles.setdefault(lang, []).append({
'url': subtitle_url,
})
return subtitles
def _expand_ptmd_template(self, api_base_url, template):
return urljoin(api_base_url, template.replace('{playerId}', 'android_native_6'))
def _extract_ptmd(self, ptmd_urls, video_id, api_token=None, aspect_ratio=None):
content_id = None
duration = None
formats, src_captions = [], []
seen_urls = set()
for ptmd_url in variadic(ptmd_urls):
ptmd_url, smuggled_data = unsmuggle_url(ptmd_url, {})
# Is it a DGS variant? (*D*eutsche *G*ebärden*s*prache' / German Sign Language)
is_dgs = smuggled_data.get('vod_media_type') == 'DGS'
ptmd = self._call_api(ptmd_url, video_id, 'PTMD data', api_token)
basename = (
ptmd.get('basename')
# ptmd_url examples:
# https://api.zdf.de/tmd/2/android_native_6/vod/ptmd/mediathek/250328_sendung_hsh/3
# https://tmd.phoenix.de/tmd/2/android_native_6/vod/ptmd/phoenix/221215_phx_spitzbergen
or self._search_regex(r'/vod/ptmd/[^/?#]+/(\w+)', ptmd_url, 'content ID', default=None))
# If this is_dgs, then it's from ZDFIE and it only uses content_id for _old_archive_ids,
# and the old version of the extractor didn't extract DGS variants, so ignore basename
if not content_id and not is_dgs:
content_id = basename
if not duration:
duration = traverse_obj(ptmd, ('attributes', 'duration', 'value', {float_or_none(scale=1000)}))
src_captions += traverse_obj(ptmd, ('captions', ..., {dict}))
for stream in traverse_obj(ptmd, ('priorityList', ..., 'formitaeten', ..., {dict})):
for quality in traverse_obj(stream, ('qualities', ..., {dict})):
for variant in traverse_obj(quality, ('audio', 'tracks', lambda _, v: url_or_none(v['uri']))):
format_url = variant['uri']
if format_url in seen_urls:
continue
seen_urls.add(format_url)
ext = determine_ext(format_url)
if ext == 'm3u8':
fmts = self._extract_m3u8_formats(
format_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
elif ext in ('mp4', 'webm'):
height = int_or_none(quality.get('highestVerticalResolution'))
width = round(aspect_ratio * height) if aspect_ratio and height else None
fmts = [{
'url': format_url,
**parse_codecs(quality.get('mimeCodec')),
'height': height,
'width': width,
'format_id': join_nonempty('http', stream.get('type')),
'tbr': int_or_none(self._search_regex(r'_(\d+)k_', format_url, 'tbr', default=None)),
}]
else:
self.report_warning(f'Skipping unsupported extension "{ext}"', video_id=video_id)
fmts = []
f_class = variant.get('class')
for f in fmts:
f_lang = ISO639Utils.short2long(
(f.get('language') or variant.get('language') or '').lower())
is_audio_only = f.get('vcodec') == 'none'
formats.append({
**f,
'format_id': join_nonempty(f['format_id'], is_dgs and 'dgs'),
'format_note': join_nonempty(
not is_audio_only and f_class,
is_dgs and 'German Sign Language',
f.get('format_note'), delim=', '),
'preference': -2 if is_dgs else -1,
'language': f_lang,
'language_preference': (
-10 if ((is_audio_only and f.get('format_note') == 'Audiodeskription')
or (not is_audio_only and f_class == 'ad'))
else 10 if f_lang == 'deu' and f_class == 'main'
else 5 if f_lang == 'deu'
else 1 if f_class == 'main'
else -1),
})
return {
'id': content_id or video_id,
'duration': duration,
'formats': formats,
'subtitles': self._extract_subtitles(src_captions),
}
def _download_graphql(self, item_id, data_desc, query=None, body=None):
assert query or body, 'One of query or body is required'
return self._download_json(
'https://api.zdf.de/graphql', item_id,
f'Downloading {data_desc}', f'Failed to download {data_desc}',
query=query, data=json.dumps(body).encode() if body else None,
headers=filter_dict({
'Api-Auth': self._get_api_token(),
'Apollo-Require-Preflight': True,
'Content-Type': 'application/json' if body else None,
}))
@staticmethod
def _extract_thumbnails(source):
return [{
'id': str(format_id),
'url': url,
'preference': 1 if format_id == 'original' else 0,
**traverse_obj(re.search(r'(?P<width>\d+|auto)[Xx](?P<height>\d+|auto)', str(format_id)), {
'width': ('width', {int_or_none}),
'height': ('height', {int_or_none}),
}),
} for format_id, url in traverse_obj(source, ({dict.items}, lambda _, v: url_or_none(v[1])))]
class ZDFIE(ZDFBaseIE):
_VALID_URL = [
r'https?://(?:www\.)?zdf\.de/(?:video|play)/(?:[^/?#]+/)*(?P<id>[^/?#]+)',
# /nachrichten/ sub-site URLs and legacy redirects from before the redesign in 2025-03
r'https?://(?:www\.)?zdf\.de/(?:[^/?#]+/)*(?P<id>[^/?#]+)\.html',
]
IE_NAME = 'zdf'
_TESTS = [{
# Standalone video (i.e. not part of a playlist), video URL
'url': 'https://www.zdf.de/video/dokus/sylt---deutschlands-edles-nordlicht-movie-100/sylt-deutschlands-edles-nordlicht-100',
'info_dict': {
'id': 'sylt-deutschlands-edles-nordlicht-100',
'ext': 'mp4',
'title': 'Sylt - Deutschlands edles Nordlicht',
'description': 'md5:35407b810c2e1e33efbe15ef6e4c06c3',
'duration': 810.0,
'thumbnail': 'https://www.zdf.de/assets/sylt-118~original?cb=1613992485011',
'series': 'Sylt - Deutschlands edles Nordlicht',
'series_id': 'sylt---deutschlands-edles-nordlicht-movie-100',
'timestamp': 1612462500,
'upload_date': '20210204',
'_old_archive_ids': ['zdf 210402_1915_sendung_dok'],
},
}, {
# Standalone video (i.e. not part of a playlist), play URL
'url': 'https://www.zdf.de/play/dokus/sylt---deutschlands-edles-nordlicht-movie-100/sylt-deutschlands-edles-nordlicht-100',
'info_dict': {
'id': 'sylt-deutschlands-edles-nordlicht-100',
'ext': 'mp4',
'title': 'Sylt - Deutschlands edles Nordlicht',
'description': 'md5:35407b810c2e1e33efbe15ef6e4c06c3',
'duration': 810.0,
'thumbnail': 'https://www.zdf.de/assets/sylt-118~original?cb=1613992485011',
'series': 'Sylt - Deutschlands edles Nordlicht',
'series_id': 'sylt---deutschlands-edles-nordlicht-movie-100',
'timestamp': 1612462500,
'upload_date': '20210204',
'_old_archive_ids': ['zdf 210402_1915_sendung_dok'],
},
'params': {'skip_download': True},
}, {
# Standalone video (i.e. not part of a playlist), legacy URL before website redesign in 2025-03
'url': 'https://www.zdf.de/dokumentation/dokumentation-sonstige/sylt-deutschlands-edles-nordlicht-100.html',
'info_dict': {
'id': 'sylt-deutschlands-edles-nordlicht-100',
'ext': 'mp4',
'title': 'Sylt - Deutschlands edles Nordlicht',
'description': 'md5:35407b810c2e1e33efbe15ef6e4c06c3',
'duration': 810.0,
'thumbnail': 'https://www.zdf.de/assets/sylt-118~original?cb=1613992485011',
'series': 'Sylt - Deutschlands edles Nordlicht',
'series_id': 'sylt---deutschlands-edles-nordlicht-movie-100',
'timestamp': 1612462500,
'upload_date': '20210204',
'_old_archive_ids': ['zdf 210402_1915_sendung_dok'],
},
'params': {'skip_download': True},
}, {
# Video belongs to a playlist, video URL
'url': 'https://www.zdf.de/video/dokus/die-magie-der-farben-116/die-magie-der-farben-von-koenigspurpur-und-jeansblau-100',
'md5': '1eda17eb40a9ead3046326e10b9c5973',
'info_dict': {
'id': 'die-magie-der-farben-von-koenigspurpur-und-jeansblau-100',
'ext': 'mp4',
'title': 'Von Königspurpur bis Jeansblau',
'description': 'md5:a89da10c928c6235401066b60a6d5c1a',
'duration': 2615.0,
'thumbnail': 'https://www.zdf.de/assets/koenigspurpur-bis-jeansblau-100~original?cb=1741857765971',
'series': 'Die Magie der Farben',
'series_id': 'die-magie-der-farben-116',
'season': 'Season 1',
'season_number': 1,
'episode': 'Episode 2',
'episode_number': 2,
'timestamp': 1445797800,
'upload_date': '20151025',
'_old_archive_ids': ['zdf 151025_magie_farben2_tex'],
},
}, {
# Video belongs to a playlist, play URL
'url': 'https://www.zdf.de/play/dokus/die-magie-der-farben-116/die-magie-der-farben-von-koenigspurpur-und-jeansblau-100',
'md5': '1eda17eb40a9ead3046326e10b9c5973',
'info_dict': {
'id': 'die-magie-der-farben-von-koenigspurpur-und-jeansblau-100',
'ext': 'mp4',
'title': 'Von Königspurpur bis Jeansblau',
'description': 'md5:a89da10c928c6235401066b60a6d5c1a',
'duration': 2615.0,
'thumbnail': 'https://www.zdf.de/assets/koenigspurpur-bis-jeansblau-100~original?cb=1741857765971',
'series': 'Die Magie der Farben',
'series_id': 'die-magie-der-farben-116',
'season': 'Season 1',
'season_number': 1,
'episode': 'Episode 2',
'episode_number': 2,
'timestamp': 1445797800,
'upload_date': '20151025',
'_old_archive_ids': ['zdf 151025_magie_farben2_tex'],
},
'params': {'skip_download': True},
}, {
# Video belongs to a playlist, legacy URL before website redesign in 2025-03
'url': 'https://www.zdf.de/dokumentation/terra-x/die-magie-der-farben-von-koenigspurpur-und-jeansblau-100.html',
'md5': '1eda17eb40a9ead3046326e10b9c5973',
'info_dict': {
'id': 'die-magie-der-farben-von-koenigspurpur-und-jeansblau-100',
'ext': 'mp4',
'title': 'Von Königspurpur bis Jeansblau',
'description': 'md5:a89da10c928c6235401066b60a6d5c1a',
'duration': 2615.0,
'thumbnail': 'https://www.zdf.de/assets/koenigspurpur-bis-jeansblau-100~original?cb=1741857765971',
'series': 'Die Magie der Farben',
'series_id': 'die-magie-der-farben-116',
'season': 'Season 1',
'season_number': 1,
'episode': 'Episode 2',
'episode_number': 2,
'timestamp': 1445797800,
'upload_date': '20151025',
'_old_archive_ids': ['zdf 151025_magie_farben2_tex'],
},
'params': {'skip_download': True},
}, {
# Video with chapters
# Also: video with sign-language variant
'url': 'https://www.zdf.de/video/magazine/heute-journal-104/heute-journal-vom-19-12-2021-100',
'md5': '6ada39465497a84fb98d48ffff69e7b7',
'info_dict': {
'id': 'heute-journal-vom-19-12-2021-100',
'ext': 'mp4',
'title': 'heute journal vom 19.12.2021',
'description': 'md5:02504cf3b03777ff32fcc927d260c5dd',
'duration': 1770.0,
'thumbnail': 'https://epg-image.zdf.de/fotobase-webdelivery/images/273e5545-16e7-4ca3-898e-52fe9e06d964?layout=1920x1080',
'chapters': 'count:11',
'series': 'heute journal',
'series_id': 'heute-journal-104',
'season': 'Season 2021',
'season_number': 2021,
'episode': 'Episode 370',
'episode_number': 370,
'timestamp': 1639946700,
'upload_date': '20211219',
# Videos with sign language variants must not have a 'dgs' suffix on their old archive IDs.
'_old_archive_ids': ['zdf 211219_sendung_hjo'],
},
}, {
# Video that requires fallback extraction
'url': 'https://www.zdf.de/nachrichten/politik/deutschland/koalitionsverhandlungen-spd-cdu-csu-dobrindt-100.html',
'md5': 'c3a78514dd993a5781aa3afe50db51e2',
'info_dict': {
'id': 'koalitionsverhandlungen-spd-cdu-csu-dobrindt-100',
'ext': 'mp4',
'title': 'Dobrindt schließt Steuererhöhungen aus',
'description': 'md5:9a117646d7b8df6bc902eb543a9c9023',
'duration': 325,
'thumbnail': 'https://www.zdfheute.de/assets/dobrindt-csu-berlin-direkt-100~1920x1080?cb=1743357653736',
'timestamp': 1743374520,
'upload_date': '20250330',
'_old_archive_ids': ['zdf 250330_clip_2_bdi'],
},
}, {
# FUNK video (hosted on a different CDN, has atypical PTMD and HLS files)
'url': 'https://www.zdf.de/funk/druck-11790/funk-alles-ist-verzaubert-102.html',
'md5': '57af4423db0455a3975d2dc4578536bc',
'info_dict': {
'id': 'funk-alles-ist-verzaubert-102',
'ext': 'mp4',
'title': 'Alles ist verzaubert',
'description': 'Die Neue an der Schule verdreht Ismail den Kopf.',
'duration': 1278.0,
'thumbnail': 'https://www.zdf.de/assets/teaser-funk-alles-ist-verzaubert-102~original?cb=1663848412907',
'series': 'DRUCK',
'series_id': 'funk-collection-funk-11790-1590',
'season': 'Season 7',
'season_number': 7,
'episode': 'Episode 1',
'episode_number': 1,
'timestamp': 1635520560,
'upload_date': '20211029',
'_old_archive_ids': ['zdf video_funk_1770473'],
},
}, {
'url': 'https://www.zdf.de/serien/soko-stuttgart/das-geld-anderer-leute-100.html',
'info_dict': {
'id': 'das-geld-anderer-leute-100',
'ext': 'mp4',
'title': 'Das Geld anderer Leute',
'description': 'md5:cb6f660850dc5eb7d1ab776ea094959d',
'duration': 2581.0,
'thumbnail': 'https://epg-image.zdf.de/fotobase-webdelivery/images/e2d7e55a-09f0-424e-ac73-6cac4dd65f35?layout=1920x1080',
'series': 'SOKO Stuttgart',
'series_id': 'soko-stuttgart-104',
'season': 'Season 11',
'season_number': 11,
'episode': 'Episode 10',
'episode_number': 10,
'timestamp': 1728983700,
'upload_date': '20241015',
'_old_archive_ids': ['zdf 191205_1800_sendung_sok8'],
},
}, {
'url': 'https://www.zdf.de/serien/northern-lights/begegnung-auf-der-bruecke-100.html',
'info_dict': {
'id': 'begegnung-auf-der-bruecke-100',
'ext': 'webm',
'title': 'Begegnung auf der Brücke',
'description': 'md5:e53a555da87447f7f1207f10353f8e45',
'duration': 3083.0,
'thumbnail': 'https://epg-image.zdf.de/fotobase-webdelivery/images/c5ff1d1f-f5c8-4468-86ac-1b2f1dbecc76?layout=1920x1080',
'series': 'Northern Lights',
'series_id': 'northern-lights-100',
'season': 'Season 1',
'season_number': 1,
'episode': 'Episode 1',
'episode_number': 1,
'timestamp': 1738546500,
'upload_date': '20250203',
'_old_archive_ids': ['zdf 240319_2310_sendung_not'],
},
'params': {'skip_download': 'geo-restricted http format'},
}, {
# Same as https://www.phoenix.de/sendungen/ereignisse/corona-nachgehakt/wohin-fuehrt-der-protest-in-der-pandemie-a-2050630.html
'url': 'https://www.zdf.de/politik/phoenix-sendungen/wohin-fuehrt-der-protest-in-der-pandemie-100.html',
'only_matching': True,
}, {
# Same as https://www.3sat.de/film/ab-18/10-wochen-sommer-108.html
'url': 'https://www.zdf.de/dokumentation/ab-18/10-wochen-sommer-102.html',
'only_matching': True,
}, {
# Same as https://www.phoenix.de/sendungen/dokumentationen/gesten-der-maechtigen-i-a-89468.html?ref=suche
'url': 'https://www.zdf.de/politik/phoenix-sendungen/die-gesten-der-maechtigen-100.html',
'only_matching': True,
}, {
# Same as https://www.3sat.de/film/spielfilm/der-hauptmann-100.html
'url': 'https://www.zdf.de/filme/filme-sonstige/der-hauptmann-112.html',
'only_matching': True,
}, {
# Same as https://www.3sat.de/wissen/nano/nano-21-mai-2019-102.html, equal media ids
'url': 'https://www.zdf.de/wissen/nano/nano-21-mai-2019-102.html',
'only_matching': True,
}, {
'url': 'https://www.zdf.de/service-und-hilfe/die-neue-zdf-mediathek/zdfmediathek-trailer-100.html',
'only_matching': True,
}, {
'url': 'https://www.zdf.de/filme/taunuskrimi/die-lebenden-und-die-toten-1---ein-taunuskrimi-100.html',
'only_matching': True,
}, {
'url': 'https://www.zdf.de/dokumentation/planet-e/planet-e-uebersichtsseite-weitere-dokumentationen-von-planet-e-100.html',
'only_matching': True,
}, {
'url': 'https://www.zdf.de/arte/todliche-flucht/page-video-artede-toedliche-flucht-16-100.html',
'only_matching': True,
}, {
'url': 'https://www.zdf.de/dokumentation/terra-x/unser-gruener-planet-wuesten-doku-100.html',
'only_matching': True,
}]
_GRAPHQL_QUERY = '''
query VideoByCanonical($canonical: String!) {
videoByCanonical(canonical: $canonical) {
canonical
title
leadParagraph
editorialDate
teaser {
description
image {
list
}
}
episodeInfo {
episodeNumber
seasonNumber
}
smartCollection {
canonical
title
}
currentMedia {
nodes {
ptmdTemplate
... on VodMedia {
duration
aspectRatio
streamAnchorTags {
nodes {
anchorOffset
anchorLabel
}
}
vodMediaType
label
}
... on LiveMedia {
start
stop
encryption
liveMediaType
label
}
id
}
}
}
}
'''
def _extract_ptmd(self, *args, **kwargs):
ptmd_data = super()._extract_ptmd(*args, **kwargs)
# This was the video id before the graphql redesign, other extractors still use it as such
old_archive_id = ptmd_data.pop('id')
ptmd_data['_old_archive_ids'] = [make_archive_id(self, old_archive_id)]
return ptmd_data
# This fallback should generally only happen for pages under `zdf.de/nachrichten`.
# They are on a separate website for which GraphQL often doesn't return results.
# The API used here is no longer in use by official clients and likely deprecated.
# Long-term, news documents probably should use the API used by the mobile apps:
# https://zdf-prod-futura.zdf.de/news/documents/ (note 'news' vs 'mediathekV2')
def _extract_fallback(self, document_id):
video = self._download_json(
f'https://zdf-prod-futura.zdf.de/mediathekV2/document/{document_id}',
document_id, note='Downloading fallback metadata',
errnote='Failed to download fallback metadata')
document = video['document']
ptmd_url = traverse_obj(document, (
('streamApiUrlAndroid', ('streams', 0, 'streamApiUrlAndroid')),
{url_or_none}, any, {require('PTMD URL')}))
thumbnails = []
for thumbnail_key, thumbnail in traverse_obj(document, ('teaserBild', {dict.items}, ...)):
thumbnail_url = traverse_obj(thumbnail, ('url', {url_or_none}))
if not thumbnail_url:
continue
thumbnails.append({
'url': thumbnail_url,
'id': thumbnail_key,
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
})
return {
'thumbnails': thumbnails,
**traverse_obj(video, {
'title': ('document', 'titel', {str}),
'description': ('document', 'beschreibung', {str}),
'timestamp': (
(('document', 'date'), ('meta', 'editorialDate')),
{unified_timestamp}, any),
'subtitles': ('document', 'captions', {self._extract_subtitles}),
}),
**self._extract_ptmd(ptmd_url, document_id, self._get_api_token()),
'id': document_id,
}
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_graphql(video_id, 'video metadata', body={
'operationName': 'VideoByCanonical',
'query': self._GRAPHQL_QUERY,
'variables': {'canonical': video_id},
})['data']['videoByCanonical']
if not video_data:
return self._extract_fallback(video_id)
aspect_ratio = None
ptmd_urls = []
for node in traverse_obj(video_data, ('currentMedia', 'nodes', lambda _, v: v['ptmdTemplate'])):
ptmd_url = self._expand_ptmd_template('https://api.zdf.de', node['ptmdTemplate'])
# Smuggle vod_media_type so that _extract_ptmd is aware of 'DGS' variants
if vod_media_type := node.get('vodMediaType'):
ptmd_url = smuggle_url(ptmd_url, {'vod_media_type': vod_media_type})
ptmd_urls.append(ptmd_url)
if not aspect_ratio:
aspect_ratio = self._parse_aspect_ratio(node.get('aspectRatio'))
return {
**traverse_obj(video_data, {
'title': ('title', {str}),
'description': (('leadParagraph', ('teaser', 'description')), any, {str}),
'timestamp': ('editorialDate', {parse_iso8601}),
'thumbnails': ('teaser', 'image', 'list', {self._extract_thumbnails}),
'episode_number': ('episodeInfo', 'episodeNumber', {int_or_none}),
'season_number': ('episodeInfo', 'seasonNumber', {int_or_none}),
'series': ('smartCollection', 'title', {str}),
'series_id': ('smartCollection', 'canonical', {str}),
'chapters': ('currentMedia', 'nodes', 0, 'streamAnchorTags', 'nodes', {self._extract_chapters}),
}),
**self._extract_ptmd(ptmd_urls, video_id, self._get_api_token(), aspect_ratio),
'id': video_id,
}
class ZDFChannelIE(ZDFBaseIE):
_VALID_URL = r'https?://www\.zdf\.de/(?:[^/?#]+/)*(?P<id>[^/?#]+)'
IE_NAME = 'zdf:channel'
_TESTS = [{
# Playlist, legacy URL before website redesign in 2025-03
'url': 'https://www.zdf.de/sport/das-aktuelle-sportstudio',
'info_dict': {
'id': 'das-aktuelle-sportstudio-220',
'title': 'das aktuelle sportstudio',
'description': 'md5:e46c785324238a03edcf8b301c5fd5dc',
},
'playlist_mincount': 25,
}, {
# Playlist, current URL
'url': 'https://www.zdf.de/sport/das-aktuelle-sportstudio-220',
'info_dict': {
'id': 'das-aktuelle-sportstudio-220',
'title': 'das aktuelle sportstudio',
'description': 'md5:e46c785324238a03edcf8b301c5fd5dc',
},
'playlist_mincount': 25,
}, {
# Standalone video (i.e. not part of a playlist), collection URL
'add_ie': [ZDFIE.ie_key()],
'url': 'https://www.zdf.de/dokus/sylt---deutschlands-edles-nordlicht-movie-100',
'info_dict': {
'id': 'sylt-deutschlands-edles-nordlicht-100',
'ext': 'mp4',
'title': 'Sylt - Deutschlands edles Nordlicht',
'description': 'md5:35407b810c2e1e33efbe15ef6e4c06c3',
'duration': 810.0,
'thumbnail': 'https://www.zdf.de/assets/sylt-118~original?cb=1613992485011',
'series': 'Sylt - Deutschlands edles Nordlicht',
'series_id': 'sylt---deutschlands-edles-nordlicht-movie-100',
'timestamp': 1612462500,
'upload_date': '20210204',
'_old_archive_ids': ['zdf 210402_1915_sendung_dok'],
},
'params': {'skip_download': True},
}, {
'url': 'https://www.zdf.de/gesellschaft/aktenzeichen-xy-ungeloest',
'info_dict': {
'id': 'aktenzeichen-xy-ungeloest-110',
'title': 'Aktenzeichen XY... Ungelöst',
'description': 'md5:b79ac0d64b979e53cbe510c0ca9cb7be',
},
'playlist_mincount': 2,
}, {
'url': 'https://www.zdf.de/serien/taunuskrimi/',
'info_dict': {
'id': 'taunuskrimi-100',
'title': 'Taunuskrimi',
'description': 'md5:ee7204e9c625c3b611d1274f9d0e3070',
},
'playlist_mincount': 8,
}, {
'url': 'https://www.zdf.de/serien/taunuskrimi/?staffel=1',
'info_dict': {
'id': 'taunuskrimi-100-s1',
'title': 'Taunuskrimi - Season 1',
'description': 'md5:ee7204e9c625c3b611d1274f9d0e3070',
},
'playlist_count': 7,
}, {
'url': 'https://www.zdf.de/magazine/heute-journal-104',
'info_dict': {
'id': 'heute-journal-104',
'title': 'heute journal',
'description': 'md5:6edad39189abf8431795d3d6d7f986b3',
},
'playlist_mincount': 500,
}, {
'url': 'https://www.zdf.de/magazine/heute-journal-104?staffel=2024',
'info_dict': {
'id': 'heute-journal-104-s2024',
'title': 'heute journal - Season 2024',
'description': 'md5:6edad39189abf8431795d3d6d7f986b3',
},
'playlist_count': 242,
'skip': 'Video count changes daily, needs support for playlist_maxcount',
}]
_PAGE_SIZE = 24
@classmethod
def suitable(cls, url):
return False if ZDFIE.suitable(url) else super().suitable(url)
def _fetch_page(self, playlist_id, canonical_id, season_idx, season_number, page_number, cursor=None):
return self._download_graphql(
playlist_id, f'season {season_number} page {page_number} JSON', query={
'operationName': 'seasonByCanonical',
'variables': json.dumps(filter_dict({
'seasonIndex': season_idx,
'canonical': canonical_id,
'episodesPageSize': self._PAGE_SIZE,
'episodesAfter': cursor,
})),
'extensions': json.dumps({
'persistedQuery': {
'version': 1,
'sha256Hash': '9412a0f4ac55dc37d46975d461ec64bfd14380d815df843a1492348f77b5c99a',
},
}),
})['data']['smartCollectionByCanonical']
def _entries(self, playlist_id, canonical_id, season_numbers, requested_season_number):
for season_idx, season_number in enumerate(season_numbers):
if requested_season_number is not None and requested_season_number != season_number:
continue
cursor = None
for page_number in itertools.count(1):
page = self._fetch_page(
playlist_id, canonical_id, season_idx, season_number, page_number, cursor)
nodes = traverse_obj(page, ('seasons', 'nodes', ...))
for episode in traverse_obj(nodes, (
..., 'episodes', 'nodes', lambda _, v: url_or_none(v['sharingUrl']),
)):
yield self.url_result(
episode['sharingUrl'], ZDFIE,
**traverse_obj(episode, {
'id': ('canonical', {str}),
'title': ('teaser', 'title', {str}),
'description': (('leadParagraph', ('teaser', 'description')), any, {str}),
'timestamp': ('editorialDate', {parse_iso8601}),
'episode_number': ('episodeInfo', 'episodeNumber', {int_or_none}),
'season_number': ('episodeInfo', 'seasonNumber', {int_or_none}),
}))
page_info = traverse_obj(nodes, (-1, 'episodes', 'pageInfo', {dict})) or {}
if not page_info.get('hasNextPage') or not page_info.get('endCursor'):
break
cursor = page_info['endCursor']
def _real_extract(self, url):
canonical_id = self._match_id(url)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | true |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/trunews.py | yt_dlp/extractor/trunews.py | from .common import InfoExtractor
class TruNewsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?trunews\.com/stream/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://www.trunews.com/stream/will-democrats-stage-a-circus-during-president-trump-s-state-of-the-union-speech',
'info_dict': {
'id': '5c5a21e65d3c196e1c0020cc',
'display_id': 'will-democrats-stage-a-circus-during-president-trump-s-state-of-the-union-speech',
'ext': 'mp4',
'title': "Will Democrats Stage a Circus During President Trump's State of the Union Speech?",
'description': 'md5:c583b72147cc92cf21f56a31aff7a670',
'duration': 3685,
'timestamp': 1549411440,
'upload_date': '20190206',
},
'add_ie': ['Zype'],
}
_ZYPE_TEMPL = 'https://player.zype.com/embed/%s.js?api_key=X5XnahkjCwJrT_l5zUqypnaLEObotyvtUKJWWlONxDoHVjP8vqxlArLV8llxMbyt'
def _real_extract(self, url):
display_id = self._match_id(url)
zype_id = self._download_json(
'https://api.zype.com/videos', display_id, query={
'app_key': 'PUVKp9WgGUb3-JUw6EqafLx8tFVP6VKZTWbUOR-HOm__g4fNDt1bCsm_LgYf_k9H',
'per_page': 1,
'active': 'true',
'friendly_title': display_id,
})['response'][0]['_id']
return self.url_result(self._ZYPE_TEMPL % zype_id, 'Zype', zype_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/adobeconnect.py | yt_dlp/extractor/adobeconnect.py | import urllib.parse
from .common import InfoExtractor
class AdobeConnectIE(InfoExtractor):
_VALID_URL = r'https?://\w+\.adobeconnect\.com/(?P<id>[\w-]+)'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_extract_title(webpage)
qs = urllib.parse.parse_qs(self._search_regex(r"swfUrl\s*=\s*'([^']+)'", webpage, 'swf url').split('?')[1])
is_live = qs.get('isLive', ['false'])[0] == 'true'
formats = []
for con_string in qs['conStrings'][0].split(','):
formats.append({
'format_id': con_string.split('://')[0],
'app': urllib.parse.quote('?' + con_string.split('?')[1] + 'flvplayerapp/' + qs['appInstance'][0]),
'ext': 'flv',
'play_path': 'mp4:' + qs['streamName'][0],
'rtmp_conn': 'S:' + qs['ticket'][0],
'rtmp_live': is_live,
'url': con_string,
})
return {
'id': video_id,
'title': title,
'formats': formats,
'is_live': is_live,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/allocine.py | yt_dlp/extractor/allocine.py | from .common import InfoExtractor
from ..utils import (
int_or_none,
qualities,
remove_end,
strip_or_none,
try_get,
unified_timestamp,
url_basename,
)
class AllocineIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?allocine\.fr/(?:article|video|film)/(?:fichearticle_gen_carticle=|player_gen_cmedia=|fichefilm_gen_cfilm=|video-)(?P<id>[0-9]+)(?:\.html)?'
_TESTS = [{
'url': 'http://www.allocine.fr/article/fichearticle_gen_carticle=18635087.html',
'md5': '0c9fcf59a841f65635fa300ac43d8269',
'info_dict': {
'id': '19546517',
'display_id': '18635087',
'ext': 'mp4',
'title': 'Astérix - Le Domaine des Dieux Teaser VF',
'description': 'md5:4a754271d9c6f16c72629a8a993ee884',
'thumbnail': r're:http://.*\.jpg',
'duration': 39,
'timestamp': 1404273600,
'upload_date': '20140702',
'view_count': int,
},
}, {
'url': 'http://www.allocine.fr/video/player_gen_cmedia=19540403&cfilm=222257.html',
'md5': 'd0cdce5d2b9522ce279fdfec07ff16e0',
'info_dict': {
'id': '19540403',
'display_id': '19540403',
'ext': 'mp4',
'title': 'Planes 2 Bande-annonce VF',
'description': 'Regardez la bande annonce du film Planes 2 (Planes 2 Bande-annonce VF). Planes 2, un film de Roberts Gannaway',
'thumbnail': r're:http://.*\.jpg',
'duration': 69,
'timestamp': 1385659800,
'upload_date': '20131128',
'view_count': int,
},
}, {
'url': 'http://www.allocine.fr/video/player_gen_cmedia=19544709&cfilm=181290.html',
'md5': '101250fb127ef9ca3d73186ff22a47ce',
'info_dict': {
'id': '19544709',
'display_id': '19544709',
'ext': 'mp4',
'title': 'Dragons 2 - Bande annonce finale VF',
'description': 'md5:6cdd2d7c2687d4c6aafe80a35e17267a',
'thumbnail': r're:http://.*\.jpg',
'duration': 144,
'timestamp': 1397589900,
'upload_date': '20140415',
'view_count': int,
},
}, {
'url': 'http://www.allocine.fr/video/video-19550147/',
'md5': '3566c0668c0235e2d224fd8edb389f67',
'info_dict': {
'id': '19550147',
'ext': 'mp4',
'title': 'Faux Raccord N°123 - Les gaffes de Cliffhanger',
'description': 'md5:bc734b83ffa2d8a12188d9eb48bb6354',
'thumbnail': r're:http://.*\.jpg',
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
formats = []
quality = qualities(['ld', 'md', 'hd'])
model = self._html_search_regex(
r'data-model="([^"]+)"', webpage, 'data model', default=None)
if model:
model_data = self._parse_json(model, display_id)
video = model_data['videos'][0]
title = video['title']
for video_url in video['sources'].values():
video_id, format_id = url_basename(video_url).split('_')[:2]
formats.append({
'format_id': format_id,
'quality': quality(format_id),
'url': video_url,
})
duration = int_or_none(video.get('duration'))
view_count = int_or_none(video.get('view_count'))
timestamp = unified_timestamp(try_get(
video, lambda x: x['added_at']['date'], str))
else:
video_id = display_id
media_data = self._download_json(
f'http://www.allocine.fr/ws/AcVisiondataV5.ashx?media={video_id}', display_id)
title = remove_end(strip_or_none(self._html_extract_title(webpage), ' - AlloCiné'))
for key, value in media_data['video'].items():
if not key.endswith('Path'):
continue
format_id = key[:-len('Path')]
formats.append({
'format_id': format_id,
'quality': quality(format_id),
'url': value,
})
duration, view_count, timestamp = [None] * 3
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'duration': duration,
'timestamp': timestamp,
'view_count': view_count,
'formats': formats,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rbgtum.py | yt_dlp/extractor/rbgtum.py | import re
from .common import InfoExtractor
from ..utils import ExtractorError, parse_qs, remove_start, traverse_obj
class RbgTumIE(InfoExtractor):
_VALID_URL = r'https?://(?:live\.rbg\.tum\.de|tum\.live)/w/(?P<id>[^?#]+)'
_TESTS = [{
# Combined view
'url': 'https://live.rbg.tum.de/w/cpp/22128',
'md5': '53a5e7b3e07128e33bbf36687fe1c08f',
'info_dict': {
'id': 'cpp/22128',
'ext': 'mp4',
'title': 'Lecture: October 18. 2022',
'series': 'Concepts of C++ programming (IN2377)',
},
}, {
# Presentation only
'url': 'https://live.rbg.tum.de/w/I2DL/12349/PRES',
'md5': '36c584272179f3e56b0db5d880639cba',
'info_dict': {
'id': 'I2DL/12349/PRES',
'ext': 'mp4',
'title': 'Lecture 3: Introduction to Neural Networks',
'series': 'Introduction to Deep Learning (IN2346)',
},
}, {
# Camera only
'url': 'https://live.rbg.tum.de/w/fvv-info/16130/CAM',
'md5': 'e04189d92ff2f56aedf5cede65d37aad',
'info_dict': {
'id': 'fvv-info/16130/CAM',
'ext': 'mp4',
'title': 'Fachschaftsvollversammlung',
'series': 'Fachschaftsvollversammlung Informatik',
},
}, {
'url': 'https://tum.live/w/linalginfo/27102',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m3u8 = self._html_search_regex(r'"(https://[^"]+\.m3u8[^"]*)', webpage, 'm3u8')
lecture_title = self._html_search_regex(r'<h1[^>]*>([^<]+)</h1>', webpage, 'title', fatal=False)
lecture_series_title = remove_start(self._html_extract_title(webpage), 'TUM-Live | ')
formats = self._extract_m3u8_formats(m3u8, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')
return {
'id': video_id,
'title': lecture_title,
'series': lecture_series_title,
'formats': formats,
}
class RbgTumCourseIE(InfoExtractor):
_VALID_URL = r'https?://(?P<hostname>(?:live\.rbg\.tum\.de|tum\.live))/old/course/(?P<id>(?P<year>\d+)/(?P<term>\w+)/(?P<slug>[^/?#]+))'
_TESTS = [{
'url': 'https://live.rbg.tum.de/old/course/2022/S/fpv',
'info_dict': {
'title': 'Funktionale Programmierung und Verifikation (IN0003)',
'id': '2022/S/fpv',
},
'params': {
'noplaylist': False,
},
'playlist_count': 13,
}, {
'url': 'https://live.rbg.tum.de/old/course/2022/W/set',
'info_dict': {
'title': 'SET FSMPIC',
'id': '2022/W/set',
},
'params': {
'noplaylist': False,
},
'playlist_count': 6,
}, {
'url': 'https://tum.live/old/course/2023/S/linalginfo',
'only_matching': True,
}]
def _real_extract(self, url):
course_id, hostname, year, term, slug = self._match_valid_url(url).group('id', 'hostname', 'year', 'term', 'slug')
meta = self._download_json(
f'https://{hostname}/api/courses/{slug}/', course_id, fatal=False,
query={'year': year, 'term': term}) or {}
lecture_series_title = meta.get('Name')
lectures = [self.url_result(f'https://{hostname}/w/{slug}/{stream_id}', RbgTumIE)
for stream_id in traverse_obj(meta, ('Streams', ..., 'ID'))]
if not lectures:
webpage = self._download_webpage(url, course_id)
lecture_series_title = remove_start(self._html_extract_title(webpage), 'TUM-Live | ')
lectures = [self.url_result(f'https://{hostname}{lecture_path}', RbgTumIE)
for lecture_path in re.findall(r'href="(/w/[^/"]+/[^/"]+)"', webpage)]
return self.playlist_result(lectures, course_id, lecture_series_title)
class RbgTumNewCourseIE(InfoExtractor):
_VALID_URL = r'https?://(?P<hostname>(?:live\.rbg\.tum\.de|tum\.live))/\?'
_TESTS = [{
'url': 'https://live.rbg.tum.de/?year=2022&term=S&slug=fpv&view=3',
'info_dict': {
'title': 'Funktionale Programmierung und Verifikation (IN0003)',
'id': '2022/S/fpv',
},
'params': {
'noplaylist': False,
},
'playlist_count': 13,
}, {
'url': 'https://live.rbg.tum.de/?year=2022&term=W&slug=set&view=3',
'info_dict': {
'title': 'SET FSMPIC',
'id': '2022/W/set',
},
'params': {
'noplaylist': False,
},
'playlist_count': 6,
}, {
'url': 'https://tum.live/?year=2023&term=S&slug=linalginfo&view=3',
'only_matching': True,
}]
def _real_extract(self, url):
query = parse_qs(url)
errors = [key for key in ('year', 'term', 'slug') if not query.get(key)]
if errors:
raise ExtractorError(f'Input URL is missing query parameters: {", ".join(errors)}')
year, term, slug = query['year'][0], query['term'][0], query['slug'][0]
hostname = self._match_valid_url(url).group('hostname')
return self.url_result(f'https://{hostname}/old/course/{year}/{term}/{slug}', RbgTumCourseIE)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/viddler.py | yt_dlp/extractor/viddler.py | from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
)
class ViddlerIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:www\.)?viddler\.com/(?:v|embed|player)/(?P<id>[a-z0-9]+)(?:.+?\bsecret=(\d+))?'
_EMBED_REGEX = [r'<(?:iframe[^>]+?src|param[^>]+?value)=(["\'])(?P<url>(?:https?:)?//(?:www\.)?viddler\.com/(?:embed|player)/.+?)\1']
_TESTS = [{
'url': 'http://www.viddler.com/v/43903784',
'md5': '9eee21161d2c7f5b39690c3e325fab2f',
'info_dict': {
'id': '43903784',
'ext': 'mov',
'title': 'Video Made Easy',
'description': 'md5:6a697ebd844ff3093bd2e82c37b409cd',
'uploader': 'viddler',
'timestamp': 1335371429,
'upload_date': '20120425',
'duration': 100.89,
'thumbnail': r're:https?://.+\.jpg',
'view_count': int,
'comment_count': int,
'categories': ['video content', 'high quality video', 'video made easy', 'how to produce video with limited resources', 'viddler'],
},
'skip': 'Invalid URL',
}, {
'url': 'http://www.viddler.com/v/4d03aad9/',
'md5': 'f12c5a7fa839c47a79363bfdf69404fb',
'info_dict': {
'id': '4d03aad9',
'ext': 'ts',
'title': 'WALL-TO-GORTAT',
'upload_date': '20150126',
'uploader': 'deadspin',
'timestamp': 1422285291,
'view_count': int,
'comment_count': int,
},
}, {
'url': 'http://www.viddler.com/player/221ebbbd/0/',
'md5': '740511f61d3d1bb71dc14a0fe01a1c10',
'info_dict': {
'id': '221ebbbd',
'ext': 'mov',
'title': 'LETeens-Grammar-snack-third-conditional',
'description': ' ',
'upload_date': '20140929',
'uploader': 'BCLETeens',
'timestamp': 1411997190,
'view_count': int,
'comment_count': int,
},
'skip': 'Invalid URL',
}, {
# secret protected
'url': 'http://www.viddler.com/v/890c0985?secret=34051570',
'info_dict': {
'id': '890c0985',
'ext': 'mp4',
'title': 'Complete Property Training - Traineeships',
'description': ' ',
'upload_date': '20130606',
'uploader': 'TiffanyBowtell',
'timestamp': 1370496993,
'view_count': int,
'comment_count': int,
},
'params': {
'skip_download': True,
},
}]
_WEBPAGE_TESTS = [{
'url': 'https://deadspin.com/i-cant-stop-watching-john-wall-chop-the-nuggets-with-th-1681801597/',
'info_dict': {
'id': '4d03aad9',
'ext': 'mp4',
'title': 'WALL-TO-GORTAT',
},
'skip': 'Site no longer embeds Viddler',
}]
def _real_extract(self, url):
video_id, secret = self._match_valid_url(url).groups()
query = {
'video_id': video_id,
'key': 'v0vhrt7bg2xq1vyxhkct',
}
if secret:
query['secret'] = secret
data = self._download_json(
'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json',
video_id, headers={'Referer': url}, query=query)['video']
formats = []
for filed in data['files']:
if filed.get('status', 'ready') != 'ready':
continue
format_id = filed.get('profile_id') or filed['profile_name']
f = {
'format_id': format_id,
'format_note': filed['profile_name'],
'url': self._proto_relative_url(filed['url']),
'width': int_or_none(filed.get('width')),
'height': int_or_none(filed.get('height')),
'filesize': int_or_none(filed.get('size')),
'ext': filed.get('ext'),
'source_preference': -1,
}
formats.append(f)
if filed.get('cdn_url'):
f = f.copy()
f['url'] = self._proto_relative_url(filed['cdn_url'], 'http:')
f['format_id'] = format_id + '-cdn'
f['source_preference'] = 1
formats.append(f)
if filed.get('html5_video_source'):
f = f.copy()
f['url'] = self._proto_relative_url(filed['html5_video_source'])
f['format_id'] = format_id + '-html5'
f['source_preference'] = 0
formats.append(f)
categories = [
t.get('text') for t in data.get('tags', []) if 'text' in t]
return {
'id': video_id,
'title': data['title'],
'formats': formats,
'description': data.get('description'),
'timestamp': int_or_none(data.get('upload_time')),
'thumbnail': self._proto_relative_url(data.get('thumbnail_url')),
'uploader': data.get('author'),
'duration': float_or_none(data.get('length')),
'view_count': int_or_none(data.get('view_count')),
'comment_count': int_or_none(data.get('comment_count')),
'categories': categories,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lastfm.py | yt_dlp/extractor/lastfm.py | import itertools
import re
from .common import InfoExtractor
from ..utils import int_or_none, parse_qs, traverse_obj
class LastFMPlaylistBaseIE(InfoExtractor):
def _entries(self, url, playlist_id):
single_page = traverse_obj(parse_qs(url), ('page', -1, {int_or_none}))
for page in itertools.count(single_page or 1):
webpage = self._download_webpage(
url, playlist_id, f'Downloading page {page}', query={'page': page})
videos = re.findall(r'data-youtube-url="([^"]+)"', webpage)
yield from videos
if single_page or not videos:
return
def _real_extract(self, url):
playlist_id = self._match_id(url)
return self.playlist_from_matches(self._entries(url, playlist_id), playlist_id, ie='Youtube')
class LastFMPlaylistIE(LastFMPlaylistBaseIE):
_VALID_URL = r'https?://(?:www\.)?last\.fm/(music|tag)/(?P<id>[^/]+)(?:/[^/]+)?/?(?:[?#]|$)'
_TESTS = [{
'url': 'https://www.last.fm/music/Oasis/(What%27s+the+Story)+Morning+Glory%3F',
'info_dict': {
'id': 'Oasis',
},
'playlist_mincount': 11,
}, {
'url': 'https://www.last.fm/music/Oasis',
'only_matching': True,
}, {
'url': 'https://www.last.fm/music/Oasis/',
'only_matching': True,
}, {
'url': 'https://www.last.fm/music/Oasis?top_tracks_date_preset=ALL#top-tracks',
'only_matching': True,
}, {
'url': 'https://www.last.fm/music/Oasis/+tracks',
'only_matching': True,
}, {
'url': 'https://www.last.fm/music/Oasis/+tracks?page=2',
'only_matching': True,
}, {
'url': 'https://www.last.fm/music/Oasis/+tracks?date_preset=LAST_90_DAYS#top-tracks',
'only_matching': True,
}, {
'url': 'https://www.last.fm/tag/rock',
'only_matching': True,
}, {
'url': 'https://www.last.fm/tag/rock/tracks',
'only_matching': True,
}]
class LastFMUserIE(LastFMPlaylistBaseIE):
_VALID_URL = r'https?://(?:www\.)?last\.fm/user/[^/]+/playlists/(?P<id>[^/#?]+)'
_TESTS = [{
'url': 'https://www.last.fm/user/mehq/playlists/12319471',
'info_dict': {
'id': '12319471',
},
'playlist_count': 30,
}, {
'url': 'https://www.last.fm/user/naamloos1/playlists/12543760',
'info_dict': {
'id': '12543760',
},
'playlist_mincount': 80,
}, {
'url': 'https://www.last.fm/user/naamloos1/playlists/12543760?page=3',
'info_dict': {
'id': '12543760',
},
'playlist_count': 32,
}]
class LastFMIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?last\.fm/music(?:/[^/]+){2}/(?P<id>[^/#?]+)'
_TESTS = [{
'url': 'https://www.last.fm/music/Oasis/_/Wonderwall',
'md5': '9c4a70c2e84c03d54fe24229b9e13b7b',
'info_dict': {
'id': '6hzrDeceEKc',
'ext': 'mp4',
'title': 'Oasis - Wonderwall (Official Video)',
'thumbnail': r're:^https?://i.ytimg.com/.*\.jpg$',
'description': 'md5:0848669853c10687cc28e88b5756738f',
'uploader': 'Oasis',
'uploader_id': 'oasisinetofficial',
'upload_date': '20080207',
'album': '(What\'s The Story) Morning Glory? (Remastered)',
'track': 'Wonderwall (Remastered)',
'channel_id': 'UCUDVBtnOQi4c7E8jebpjc9Q',
'view_count': int,
'live_status': 'not_live',
'channel_url': 'https://www.youtube.com/channel/UCUDVBtnOQi4c7E8jebpjc9Q',
'tags': 'count:39',
'creator': 'Oasis',
'uploader_url': 're:^https?://www.youtube.com/user/oasisinetofficial',
'duration': 279,
'alt_title': 'Wonderwall (Remastered)',
'age_limit': 0,
'channel': 'Oasis',
'channel_follower_count': int,
'categories': ['Music'],
'availability': 'public',
'like_count': int,
'playable_in_embed': True,
'artist': 'Oasis',
},
'add_ie': ['Youtube'],
}, {
'url': 'https://www.last.fm/music/Oasis/_/Don%27t+Look+Back+In+Anger+-+Remastered/',
'only_matching': True,
}, {
'url': 'https://www.last.fm/music/Guns+N%27+Roses/_/Sweet+Child+o%27+Mine',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player_url = self._search_regex(r'(?s)class="header-new-playlink"\s+href="([^"]+)"', webpage, 'player_url')
return self.url_result(player_url, 'Youtube')
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/kommunetv.py | yt_dlp/extractor/kommunetv.py | from .common import InfoExtractor
from ..utils import update_url
class KommunetvIE(InfoExtractor):
_VALID_URL = r'https?://\w+\.kommunetv\.no/archive/(?P<id>\w+)'
_TEST = {
'url': 'https://oslo.kommunetv.no/archive/921',
'md5': '5f102be308ee759be1e12b63d5da4bbc',
'info_dict': {
'id': '921',
'title': 'Bystyremøte',
'ext': 'mp4',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
headers = {
'Accept': 'application/json',
}
data = self._download_json(f'https://oslo.kommunetv.no/api/streams?streamType=1&id={video_id}', video_id, headers=headers)
title = data['stream']['title']
file = data['playlist'][0]['playlist'][0]['file']
url = update_url(file, query=None, fragment=None)
formats = self._extract_m3u8_formats(url, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
return {
'id': video_id,
'formats': formats,
'title': title,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bostonglobe.py | yt_dlp/extractor/bostonglobe.py | import re
from .common import InfoExtractor
from ..utils import (
extract_attributes,
)
class BostonGlobeIE(InfoExtractor):
_VALID_URL = r'(?i)https?://(?:www\.)?bostonglobe\.com/.*/(?P<id>[^/]+)/\w+(?:\.html)?'
_TESTS = [
{
'url': 'http://www.bostonglobe.com/metro/2017/02/11/tree-finally-succumbs-disease-leaving-hole-neighborhood/h1b4lviqzMTIn9sVy8F3gP/story.html',
'md5': '0a62181079c85c2d2b618c9a738aedaf',
'info_dict': {
'title': 'A tree finally succumbs to disease, leaving a hole in a neighborhood',
'id': '5320421710001',
'ext': 'mp4',
'description': 'It arrived as a sapling when the Back Bay was in its infancy, a spindly American elm tamped down into a square of dirt cut into the brick sidewalk of 1880s Marlborough Street, no higher than the first bay window of the new brownstone behind it.',
'timestamp': 1486877593,
'upload_date': '20170212',
'uploader_id': '245991542',
},
},
{
# Embedded youtube video; we hand it off to the Generic extractor.
'url': 'https://www.bostonglobe.com/lifestyle/names/2017/02/17/does-ben-affleck-play-matt-damon-favorite-version-batman/ruqkc9VxKBYmh5txn1XhSI/story.html',
'md5': '582b40327089d5c0c949b3c54b13c24b',
'info_dict': {
'title': "Who Is Matt Damon's Favorite Batman?",
'id': 'ZW1QCnlA6Qc',
'ext': 'mp4',
'upload_date': '20170217',
'description': 'md5:3b3dccb9375867e0b4d527ed87d307cb',
'uploader': 'The Late Late Show with James Corden',
'uploader_id': 'TheLateLateShow',
},
'expected_warnings': ['404'],
},
]
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
page_title = self._og_search_title(webpage, default=None)
# <video data-brightcove-video-id="5320421710001" data-account="245991542" data-player="SJWAiyYWg" data-embed="default" class="video-js" controls itemscope itemtype="http://schema.org/VideoObject">
entries = []
for video in re.findall(r'(?i)(<video[^>]+>)', webpage):
attrs = extract_attributes(video)
video_id = attrs.get('data-brightcove-video-id')
account_id = attrs.get('data-account')
player_id = attrs.get('data-player')
embed = attrs.get('data-embed')
if video_id and account_id and player_id and embed:
entries.append(
f'http://players.brightcove.net/{account_id}/{player_id}_{embed}/index.html?videoId={video_id}')
if len(entries) == 0:
return self.url_result(url, 'Generic')
elif len(entries) == 1:
return self.url_result(entries[0], 'BrightcoveNew')
else:
return self.playlist_from_matches(entries, page_id, page_title, ie='BrightcoveNew')
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/neteasemusic.py | yt_dlp/extractor/neteasemusic.py | import hashlib
import itertools
import json
import random
import re
import time
from .common import InfoExtractor
from ..aes import aes_ecb_encrypt, pkcs7_padding
from ..utils import (
ExtractorError,
int_or_none,
join_nonempty,
str_or_none,
strftime_or_none,
traverse_obj,
unified_strdate,
url_or_none,
urljoin,
variadic,
)
class NetEaseMusicBaseIE(InfoExtractor):
# XXX: _extract_formats logic depends on the order of the levels in each tier
_LEVELS = (
'standard', # free tier; 标准; 128kbps mp3 or aac
'higher', # free tier; 192kbps mp3 or aac
'exhigh', # free tier; 极高 (HQ); 320kbps mp3 or aac
'lossless', # VIP tier; 无损 (SQ); 48kHz/16bit flac
'hires', # VIP tier; 高解析度无损 (Hi-Res); 192kHz/24bit flac
'jyeffect', # VIP tier; 高清臻音 (Spatial Audio); 96kHz/24bit flac
'jymaster', # SVIP tier; 超清母带 (Master); 192kHz/24bit flac
'sky', # SVIP tier; 沉浸环绕声 (Surround Audio); flac
)
_API_BASE = 'http://music.163.com/api/'
def _create_eapi_cipher(self, api_path, query_body, cookies):
request_text = json.dumps({**query_body, 'header': cookies}, separators=(',', ':'))
message = f'nobody{api_path}use{request_text}md5forencrypt'.encode('latin1')
msg_digest = hashlib.md5(message).hexdigest()
data = pkcs7_padding(list(str.encode(
f'{api_path}-36cd479b6b5-{request_text}-36cd479b6b5-{msg_digest}')))
encrypted = bytes(aes_ecb_encrypt(data, list(b'e82ckenh8dichen8')))
return f'params={encrypted.hex().upper()}'.encode()
def _download_eapi_json(self, path, video_id, query_body, headers={}, **kwargs):
cookies = {
'osver': 'undefined',
'deviceId': 'undefined',
'appver': '8.0.0',
'versioncode': '140',
'mobilename': 'undefined',
'buildver': '1623435496',
'resolution': '1920x1080',
'__csrf': '',
'os': 'pc',
'channel': 'undefined',
'requestId': f'{int(time.time() * 1000)}_{random.randint(0, 1000):04}',
**traverse_obj(self._get_cookies(self._API_BASE), {
'MUSIC_U': ('MUSIC_U', {lambda i: i.value}),
}),
}
if self._x_forwarded_for_ip:
headers.setdefault('X-Real-IP', self._x_forwarded_for_ip)
return self._download_json(
urljoin('https://interface3.music.163.com/', f'/eapi{path}'), video_id,
data=self._create_eapi_cipher(f'/api{path}', query_body, cookies), headers={
'Referer': 'https://music.163.com',
'Cookie': '; '.join([f'{k}={v}' for k, v in cookies.items()]),
**headers,
}, **kwargs)
def _call_player_api(self, song_id, level):
return self._download_eapi_json(
'/song/enhance/player/url/v1', song_id,
{'ids': f'[{song_id}]', 'level': level, 'encodeType': 'flac'},
note=f'Downloading song URL info: level {level}')
def _extract_formats(self, info):
formats = []
song_id = info['id']
for level in self._LEVELS:
song = traverse_obj(
self._call_player_api(song_id, level), ('data', lambda _, v: url_or_none(v['url']), any))
if not song:
break # Media is not available due to removal or geo-restriction
actual_level = song.get('level')
if actual_level and actual_level != level:
if level in ('lossless', 'jymaster'):
break # We've already extracted the highest level of the user's account tier
continue
formats.append({
'url': song['url'],
'format_id': level,
'vcodec': 'none',
**traverse_obj(song, {
'ext': ('type', {str}),
'abr': ('br', {int_or_none(scale=1000)}),
'filesize': ('size', {int_or_none}),
}),
})
if not actual_level:
break # Only 1 level is available if API does not return a value (netease:program)
if not formats:
self.raise_geo_restricted(
'No media links found; possibly due to geo restriction', countries=['CN'])
return formats
def _query_api(self, endpoint, video_id, note):
result = self._download_json(
f'{self._API_BASE}{endpoint}', video_id, note, headers={'Referer': self._API_BASE})
code = traverse_obj(result, ('code', {int}))
message = traverse_obj(result, ('message', {str})) or ''
if code == -462:
self.raise_login_required(f'Login required to download: {message}')
elif code != 200:
raise ExtractorError(f'Failed to get meta info: {code} {message}')
return result
def _get_entries(self, songs_data, entry_keys=None, id_key='id', name_key='name'):
for song in traverse_obj(songs_data, (
*variadic(entry_keys, (str, bytes, dict, set)),
lambda _, v: int_or_none(v[id_key]) is not None)):
song_id = str(song[id_key])
yield self.url_result(
f'http://music.163.com/#/song?id={song_id}', NetEaseMusicIE,
song_id, traverse_obj(song, (name_key, {str})))
class NetEaseMusicIE(NetEaseMusicBaseIE):
IE_NAME = 'netease:song'
IE_DESC = '网易云音乐'
_VALID_URL = r'https?://(?:y\.)?music\.163\.com/(?:[#m]/)?song\?.*?\bid=(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://music.163.com/#/song?id=550136151',
'info_dict': {
'id': '550136151',
'ext': 'mp3',
'title': 'It\'s Ok (Live)',
'creators': 'count:10',
'timestamp': 1522944000,
'upload_date': '20180405',
'description': 'md5:9fd07059c2ccee3950dc8363429a3135',
'duration': 197,
'thumbnail': r're:^http.*\.jpg',
'album': '偶像练习生 表演曲目合集',
'average_rating': int,
'album_artists': ['偶像练习生'],
},
}, {
'url': 'http://music.163.com/song?id=17241424',
'info_dict': {
'id': '17241424',
'ext': 'mp3',
'title': 'Opus 28',
'upload_date': '20080211',
'timestamp': 1202745600,
'duration': 263,
'thumbnail': r're:^http.*\.jpg',
'album': 'Piano Solos Vol. 2',
'album_artist': 'Dustin O\'Halloran',
'average_rating': int,
'description': '[00:05.00]纯音乐,请欣赏\n',
'album_artists': ['Dustin O\'Halloran'],
'creators': ['Dustin O\'Halloran'],
'subtitles': {'lyrics': [{'ext': 'lrc'}]},
},
}, {
'url': 'https://y.music.163.com/m/song?app_version=8.8.45&id=95670&uct2=sKnvS4+0YStsWkqsPhFijw%3D%3D&dlt=0846',
'md5': 'b896be78d8d34bd7bb665b26710913ff',
'info_dict': {
'id': '95670',
'ext': 'mp3',
'title': '国际歌',
'upload_date': '19911130',
'timestamp': 691516800,
'description': 'md5:1ba2f911a2b0aa398479f595224f2141',
'subtitles': {'lyrics': [{'ext': 'lrc'}]},
'duration': 268,
'alt_title': '伴唱:现代人乐队 合唱:总政歌舞团',
'thumbnail': r're:^http.*\.jpg',
'average_rating': int,
'album': '红色摇滚',
'album_artist': '侯牧人',
'creators': ['马备'],
'album_artists': ['侯牧人'],
},
}, {
'url': 'http://music.163.com/#/song?id=32102397',
'md5': '3e909614ce09b1ccef4a3eb205441190',
'info_dict': {
'id': '32102397',
'ext': 'mp3',
'title': 'Bad Blood',
'creators': ['Taylor Swift', 'Kendrick Lamar'],
'upload_date': '20150516',
'timestamp': 1431792000,
'description': 'md5:21535156efb73d6d1c355f95616e285a',
'subtitles': {'lyrics': [{'ext': 'lrc'}]},
'duration': 199,
'thumbnail': r're:^http.*\.jpg',
'album': 'Bad Blood',
'average_rating': int,
'album_artist': 'Taylor Swift',
},
'skip': 'Blocked outside Mainland China',
}, {
'note': 'Has translated name.',
'url': 'http://music.163.com/#/song?id=22735043',
'info_dict': {
'id': '22735043',
'ext': 'mp3',
'title': '소원을 말해봐 (Genie)',
'creators': ['少女时代'],
'upload_date': '20100127',
'timestamp': 1264608000,
'description': 'md5:03d1ffebec3139aa4bafe302369269c5',
'subtitles': {'lyrics': [{'ext': 'lrc'}]},
'duration': 229,
'alt_title': '说出愿望吧(Genie)',
'thumbnail': r're:^http.*\.jpg',
'average_rating': int,
'album': 'Oh!',
'album_artist': '少女时代',
},
'skip': 'Blocked outside Mainland China',
}]
def _process_lyrics(self, lyrics_info):
original = traverse_obj(lyrics_info, ('lrc', 'lyric', {str}))
translated = traverse_obj(lyrics_info, ('tlyric', 'lyric', {str}))
if not original or original == '[99:00.00]纯音乐,请欣赏\n':
return None
if not translated:
return {
'lyrics': [{'data': original, 'ext': 'lrc'}],
}
lyrics_expr = r'(\[[0-9]{2}:[0-9]{2}\.[0-9]{2,}\])([^\n]+)'
original_ts_texts = re.findall(lyrics_expr, original)
translation_ts_dict = dict(re.findall(lyrics_expr, translated))
merged = '\n'.join(
join_nonempty(f'{timestamp}{text}', translation_ts_dict.get(timestamp, ''), delim=' / ')
for timestamp, text in original_ts_texts)
return {
'lyrics_merged': [{'data': merged, 'ext': 'lrc'}],
'lyrics': [{'data': original, 'ext': 'lrc'}],
'lyrics_translated': [{'data': translated, 'ext': 'lrc'}],
}
def _real_extract(self, url):
song_id = self._match_id(url)
info = self._query_api(
f'song/detail?id={song_id}&ids=%5B{song_id}%5D', song_id, 'Downloading song info')['songs'][0]
formats = self._extract_formats(info)
lyrics = self._process_lyrics(self._query_api(
f'song/lyric?id={song_id}&lv=-1&tv=-1', song_id, 'Downloading lyrics data'))
lyric_data = {
'description': traverse_obj(lyrics, (('lyrics_merged', 'lyrics'), 0, 'data'), get_all=False),
'subtitles': lyrics,
} if lyrics else {}
return {
'id': song_id,
'formats': formats,
'alt_title': '/'.join(traverse_obj(info, (('transNames', 'alias'), ...))) or None,
'creators': traverse_obj(info, ('artists', ..., 'name')) or None,
'album_artists': traverse_obj(info, ('album', 'artists', ..., 'name')) or None,
**lyric_data,
**traverse_obj(info, {
'title': ('name', {str}),
'timestamp': ('album', 'publishTime', {int_or_none(scale=1000)}),
'thumbnail': ('album', 'picUrl', {url_or_none}),
'duration': ('duration', {int_or_none(scale=1000)}),
'album': ('album', 'name', {str}),
'average_rating': ('score', {int_or_none}),
}),
}
class NetEaseMusicAlbumIE(NetEaseMusicBaseIE):
IE_NAME = 'netease:album'
IE_DESC = '网易云音乐 - 专辑'
_VALID_URL = r'https?://music\.163\.com/(?:#/)?album\?id=(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://music.163.com/#/album?id=133153666',
'info_dict': {
'id': '133153666',
'title': '桃几的翻唱',
'upload_date': '20210913',
'description': '桃几2021年翻唱合集',
'thumbnail': r're:^http.*\.jpg',
},
'playlist_mincount': 12,
}, {
'url': 'http://music.163.com/#/album?id=220780',
'info_dict': {
'id': '220780',
'title': 'B\'Day',
'upload_date': '20060904',
'description': 'md5:71a74e1d8f392d88cf1bbe48879ad0b0',
'thumbnail': r're:^http.*\.jpg',
},
'playlist_count': 23,
}]
def _real_extract(self, url):
album_id = self._match_id(url)
webpage = self._download_webpage(f'https://music.163.com/album?id={album_id}', album_id)
songs = self._search_json(
r'<textarea[^>]+\bid="song-list-pre-data"[^>]*>', webpage, 'metainfo', album_id,
end_pattern=r'</textarea>', contains_pattern=r'\[(?s:.+)\]')
metainfo = {
'title': self._og_search_property('title', webpage, 'title', fatal=False),
'description': self._html_search_regex(
(rf'<div[^>]+\bid="album-desc-{suffix}"[^>]*>(.*?)</div>' for suffix in ('more', 'dot')),
webpage, 'description', flags=re.S, fatal=False),
'thumbnail': self._og_search_property('image', webpage, 'thumbnail', fatal=False),
'upload_date': unified_strdate(self._html_search_meta('music:release_date', webpage, 'date', fatal=False)),
}
return self.playlist_result(self._get_entries(songs), album_id, **metainfo)
class NetEaseMusicSingerIE(NetEaseMusicBaseIE):
IE_NAME = 'netease:singer'
IE_DESC = '网易云音乐 - 歌手'
_VALID_URL = r'https?://music\.163\.com/(?:#/)?artist\?id=(?P<id>[0-9]+)'
_TESTS = [{
'note': 'Singer has aliases.',
'url': 'http://music.163.com/#/artist?id=10559',
'info_dict': {
'id': '10559',
'title': '张惠妹 - aMEI;阿妹;阿密特',
},
'playlist_count': 50,
}, {
'note': 'Singer has translated name.',
'url': 'http://music.163.com/#/artist?id=124098',
'info_dict': {
'id': '124098',
'title': '李昇基 - 이승기',
},
'playlist_count': 50,
}, {
'note': 'Singer with both translated and alias',
'url': 'https://music.163.com/#/artist?id=159692',
'info_dict': {
'id': '159692',
'title': '初音ミク - 初音未来;Hatsune Miku',
},
'playlist_count': 50,
}]
def _real_extract(self, url):
singer_id = self._match_id(url)
info = self._query_api(
f'artist/{singer_id}?id={singer_id}', singer_id, note='Downloading singer data')
name = join_nonempty(
traverse_obj(info, ('artist', 'name', {str})),
join_nonempty(*traverse_obj(info, ('artist', ('trans', ('alias', ...)), {str})), delim=';'),
delim=' - ')
return self.playlist_result(self._get_entries(info, 'hotSongs'), singer_id, name)
class NetEaseMusicListIE(NetEaseMusicBaseIE):
IE_NAME = 'netease:playlist'
IE_DESC = '网易云音乐 - 歌单'
_VALID_URL = r'https?://music\.163\.com/(?:#/)?(?:playlist|discover/toplist)\?id=(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://music.163.com/#/playlist?id=79177352',
'info_dict': {
'id': '79177352',
'title': 'Billboard 2007 Top 100',
'description': 'md5:12fd0819cab2965b9583ace0f8b7b022',
'tags': ['欧美'],
'uploader': '浑然破灭',
'uploader_id': '67549805',
'timestamp': int,
'upload_date': r're:\d{8}',
},
'playlist_mincount': 95,
}, {
'note': 'Toplist/Charts sample',
'url': 'https://music.163.com/#/discover/toplist?id=60198',
'info_dict': {
'id': '60198',
'title': 're:美国Billboard榜 [0-9]{4}-[0-9]{2}-[0-9]{2}',
'description': '美国Billboard排行榜',
'tags': ['流行', '欧美', '榜单'],
'uploader': 'Billboard公告牌',
'uploader_id': '48171',
'timestamp': int,
'upload_date': r're:\d{8}',
},
'playlist_count': 100,
}, {
'note': 'Toplist/Charts sample',
'url': 'http://music.163.com/#/discover/toplist?id=3733003',
'info_dict': {
'id': '3733003',
'title': 're:韩国Melon排行榜周榜(?: [0-9]{4}-[0-9]{2}-[0-9]{2})?',
'description': 'md5:73ec782a612711cadc7872d9c1e134fc',
'upload_date': '20200109',
'uploader_id': '2937386',
'tags': ['韩语', '榜单'],
'uploader': 'Melon榜单',
'timestamp': 1578569373,
},
'playlist_count': 50,
}]
def _real_extract(self, url):
list_id = self._match_id(url)
info = self._download_eapi_json(
'/v3/playlist/detail', list_id,
{'id': list_id, 't': '-1', 'n': '500', 's': '0'},
note='Downloading playlist info')
metainfo = traverse_obj(info, ('playlist', {
'title': ('name', {str}),
'description': ('description', {str}),
'tags': ('tags', ..., {str}),
'uploader': ('creator', 'nickname', {str}),
'uploader_id': ('creator', 'userId', {str_or_none}),
'timestamp': ('updateTime', {int_or_none(scale=1000)}),
}))
if traverse_obj(info, ('playlist', 'specialType')) == 10:
metainfo['title'] = f'{metainfo.get("title")} {strftime_or_none(metainfo.get("timestamp"), "%Y-%m-%d")}'
return self.playlist_result(self._get_entries(info, ('playlist', 'tracks')), list_id, **metainfo)
class NetEaseMusicMvIE(NetEaseMusicBaseIE):
IE_NAME = 'netease:mv'
IE_DESC = '网易云音乐 - MV'
_VALID_URL = r'https?://music\.163\.com/(?:#/)?mv\?id=(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://music.163.com/#/mv?id=10958064',
'info_dict': {
'id': '10958064',
'ext': 'mp4',
'title': '交换余生',
'description': 'md5:e845872cff28820642a2b02eda428fea',
'creators': ['林俊杰'],
'upload_date': '20200916',
'thumbnail': r're:http.*\.jpg',
'duration': 364,
'view_count': int,
'like_count': int,
'comment_count': int,
},
}, {
'url': 'http://music.163.com/#/mv?id=415350',
'info_dict': {
'id': '415350',
'ext': 'mp4',
'title': '이럴거면 그러지말지',
'description': '白雅言自作曲唱甜蜜爱情',
'creators': ['白娥娟'],
'upload_date': '20150520',
'thumbnail': r're:http.*\.jpg',
'duration': 216,
'view_count': int,
'like_count': int,
'comment_count': int,
},
'skip': 'Blocked outside Mainland China',
}, {
'note': 'This MV has multiple creators.',
'url': 'https://music.163.com/#/mv?id=22593543',
'info_dict': {
'id': '22593543',
'ext': 'mp4',
'title': '老北京杀器',
'creators': ['秃子2z', '辉子', 'Saber梁维嘉'],
'duration': 206,
'upload_date': '20240618',
'like_count': int,
'comment_count': int,
'thumbnail': r're:http.*\.jpg',
'view_count': int,
},
}]
def _real_extract(self, url):
mv_id = self._match_id(url)
info = self._query_api(
f'mv/detail?id={mv_id}&type=mp4', mv_id, 'Downloading mv info')['data']
formats = [
{'url': mv_url, 'ext': 'mp4', 'format_id': f'{brs}p', 'height': int_or_none(brs)}
for brs, mv_url in info['brs'].items()
]
return {
'id': mv_id,
'formats': formats,
'creators': traverse_obj(info, ('artists', ..., 'name')) or [info.get('artistName')],
**traverse_obj(info, {
'title': ('name', {str}),
'description': (('desc', 'briefDesc'), {str}, filter),
'upload_date': ('publishTime', {unified_strdate}),
'thumbnail': ('cover', {url_or_none}),
'duration': ('duration', {int_or_none(scale=1000)}),
'view_count': ('playCount', {int_or_none}),
'like_count': ('likeCount', {int_or_none}),
'comment_count': ('commentCount', {int_or_none}),
}, get_all=False),
}
class NetEaseMusicProgramIE(NetEaseMusicBaseIE):
IE_NAME = 'netease:program'
IE_DESC = '网易云音乐 - 电台节目'
_VALID_URL = r'https?://music\.163\.com/(?:#/)?(?:dj|program)\?id=(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://music.163.com/#/program?id=10109055',
'info_dict': {
'id': '32593346',
'ext': 'mp3',
'title': '不丹足球背后的故事',
'description': '喜马拉雅人的足球梦 ...',
'creators': ['大话西藏'],
'timestamp': 1434179287,
'upload_date': '20150613',
'thumbnail': r're:http.*\.jpg',
'duration': 900,
},
}, {
'note': 'This program has accompanying songs.',
'url': 'http://music.163.com/#/program?id=10141022',
'info_dict': {
'id': '10141022',
'title': '滚滚电台的有声节目',
'description': 'md5:8d594db46cc3e6509107ede70a4aaa3b',
'creators': ['滚滚电台ORZ'],
'timestamp': 1434450733,
'upload_date': '20150616',
'thumbnail': r're:http.*\.jpg',
},
'playlist_count': 4,
}, {
'note': 'This program has accompanying songs.',
'url': 'http://music.163.com/#/program?id=10141022',
'info_dict': {
'id': '32647209',
'ext': 'mp3',
'title': '滚滚电台的有声节目',
'description': 'md5:8d594db46cc3e6509107ede70a4aaa3b',
'creators': ['滚滚电台ORZ'],
'timestamp': 1434450733,
'upload_date': '20150616',
'thumbnail': r're:http.*\.jpg',
'duration': 1104,
},
'params': {
'noplaylist': True,
},
}, {
'url': 'https://music.163.com/#/dj?id=3706179315',
'only_matching': True,
}]
def _real_extract(self, url):
program_id = self._match_id(url)
info = self._query_api(
f'dj/program/detail?id={program_id}', program_id, note='Downloading program info')['program']
metainfo = traverse_obj(info, {
'title': ('name', {str}),
'description': ('description', {str}),
'creator': ('dj', 'brand', {str}),
'thumbnail': ('coverUrl', {url_or_none}),
'timestamp': ('createTime', {int_or_none(scale=1000)}),
})
if not self._yes_playlist(
info['songs'] and program_id, info['mainSong']['id'], playlist_label='program', video_label='song'):
formats = self._extract_formats(info['mainSong'])
return {
'id': str(info['mainSong']['id']),
'formats': formats,
'duration': traverse_obj(info, ('mainSong', 'duration', {int_or_none(scale=1000)})),
**metainfo,
}
songs = traverse_obj(info, (('mainSong', ('songs', ...)),))
return self.playlist_result(self._get_entries(songs), program_id, **metainfo)
class NetEaseMusicDjRadioIE(NetEaseMusicBaseIE):
IE_NAME = 'netease:djradio'
IE_DESC = '网易云音乐 - 电台'
_VALID_URL = r'https?://music\.163\.com/(?:#/)?djradio\?id=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://music.163.com/#/djradio?id=42',
'info_dict': {
'id': '42',
'title': '声音蔓延',
'description': 'md5:c7381ebd7989f9f367668a5aee7d5f08',
},
'playlist_mincount': 40,
}
_PAGE_SIZE = 1000
def _real_extract(self, url):
dj_id = self._match_id(url)
metainfo = {}
entries = []
for offset in itertools.count(start=0, step=self._PAGE_SIZE):
info = self._query_api(
f'dj/program/byradio?asc=false&limit={self._PAGE_SIZE}&radioId={dj_id}&offset={offset}',
dj_id, note=f'Downloading dj programs - {offset}')
entries.extend(self.url_result(
f'http://music.163.com/#/program?id={program["id"]}', NetEaseMusicProgramIE,
program['id'], program.get('name')) for program in info['programs'])
if not metainfo:
metainfo = traverse_obj(info, ('programs', 0, 'radio', {
'title': ('name', {str}),
'description': ('desc', {str}),
}))
if not info['more']:
break
return self.playlist_result(entries, dj_id, **metainfo)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rai.py | yt_dlp/extractor/rai.py | import re
from .common import InfoExtractor
from ..networking import HEADRequest
from ..utils import (
ExtractorError,
GeoRestrictedError,
clean_html,
determine_ext,
filter_dict,
int_or_none,
join_nonempty,
parse_duration,
remove_start,
strip_or_none,
traverse_obj,
try_get,
unified_strdate,
unified_timestamp,
update_url_query,
urljoin,
xpath_text,
)
class RaiBaseIE(InfoExtractor):
_UUID_RE = r'[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}'
_GEO_COUNTRIES = ['IT']
_GEO_BYPASS = False
def _fix_m3u8_formats(self, media_url, video_id):
fmts = self._extract_m3u8_formats(
media_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
# Fix malformed m3u8 manifests by setting audio-only/video-only formats
for f in fmts:
if not f.get('acodec'):
f['acodec'] = 'mp4a'
if not f.get('vcodec'):
f['vcodec'] = 'avc1'
man_url = f['url']
if re.search(r'chunklist(?:_b\d+)*_ao[_.]', man_url): # audio only
f['vcodec'] = 'none'
elif re.search(r'chunklist(?:_b\d+)*_vo[_.]', man_url): # video only
f['acodec'] = 'none'
else: # video+audio
if f['acodec'] == 'none':
f['acodec'] = 'mp4a'
if f['vcodec'] == 'none':
f['vcodec'] = 'avc1'
return fmts
def _extract_relinker_info(self, relinker_url, video_id, audio_only=False):
def fix_cdata(s):
# remove \r\n\t before and after <![CDATA[ ]]> to avoid
# polluted text with xpath_text
s = re.sub(r'(\]\]>)[\r\n\t]+(</)', '\\1\\2', s)
return re.sub(r'(>)[\r\n\t]+(<!\[CDATA\[)', '\\1\\2', s)
if not re.match(r'https?://', relinker_url):
return {'formats': [{'url': relinker_url}]}
# set User-Agent to generic 'Rai' to avoid quality filtering from
# the media server and get the maximum qualities available
relinker = self._download_xml(
relinker_url, video_id, note='Downloading XML metadata',
transform_source=fix_cdata, query={'output': 64},
headers={**self.geo_verification_headers(), 'User-Agent': 'Rai'})
if xpath_text(relinker, './license_url', default='{}') != '{}':
self.report_drm(video_id)
is_live = xpath_text(relinker, './is_live', default='N') == 'Y'
duration = parse_duration(xpath_text(relinker, './duration', default=None))
media_url = xpath_text(relinker, './url[@type="content"]', default=None)
if not media_url:
self.raise_no_formats('The relinker returned no media url')
# geo flag is a bit unreliable and not properly set all the time
geoprotection = xpath_text(relinker, './geoprotection', default='N') == 'Y'
ext = determine_ext(media_url).lower()
formats = []
if ext == 'mp3':
formats.append({
'url': media_url,
'vcodec': 'none',
'acodec': 'mp3',
'format_id': 'https-mp3',
})
elif ext == 'm3u8' or 'format=m3u8' in media_url:
formats.extend(self._fix_m3u8_formats(media_url, video_id))
elif ext == 'f4m':
# very likely no longer needed. Cannot find any url that uses it.
manifest_url = update_url_query(
media_url.replace('manifest#live_hds.f4m', 'manifest.f4m'),
{'hdcore': '3.7.0', 'plugin': 'aasp-3.7.0.39.44'})
formats.extend(self._extract_f4m_formats(
manifest_url, video_id, f4m_id='hds', fatal=False))
elif ext == 'mp4':
bitrate = int_or_none(xpath_text(relinker, './bitrate'))
formats.append({
'url': media_url,
'tbr': bitrate if bitrate > 0 else None,
'format_id': join_nonempty('https', bitrate, delim='-'),
})
else:
raise ExtractorError(f'Unrecognized media extension "{ext}"')
if (not formats and geoprotection is True) or '/video_no_available.mp4' in media_url:
self.raise_geo_restricted(countries=self._GEO_COUNTRIES, metadata_available=True)
if not audio_only and not is_live:
formats.extend(self._create_http_urls(media_url, relinker_url, formats, video_id))
return filter_dict({
'is_live': is_live,
'duration': duration,
'formats': formats,
})
def _create_http_urls(self, manifest_url, relinker_url, fmts, video_id):
_MANIFEST_REG = r'/(?P<id>\w+)(?:_(?P<quality>[\d\,]+))?(?:\.mp4)?(?:\.csmil)?/playlist\.m3u8'
_MP4_TMPL = '%s&overrideUserAgentRule=mp4-%s'
_QUALITY = {
# tbr: w, h
250: [352, 198],
400: [512, 288],
600: [512, 288],
700: [512, 288],
800: [700, 394],
1200: [736, 414],
1500: [920, 518],
1800: [1024, 576],
2400: [1280, 720],
3200: [1440, 810],
3600: [1440, 810],
5000: [1920, 1080],
10000: [1920, 1080],
}
def percentage(number, target, pc=20, roof=125):
"""check if the target is in the range of number +/- percent"""
if not number or number < 0:
return False
return abs(target - number) < min(float(number) * float(pc) / 100.0, roof)
def get_format_info(tbr):
import math
br = int_or_none(tbr)
if len(fmts) == 1 and not br:
br = fmts[0].get('tbr')
if br and br > 300:
tbr = math.floor(br / 100) * 100
else:
tbr = 250
# try extracting info from available m3u8 formats
format_copy = [None, None]
for f in fmts:
if f.get('tbr'):
if percentage(tbr, f['tbr']):
format_copy[0] = f.copy()
if [f.get('width'), f.get('height')] == _QUALITY.get(tbr):
format_copy[1] = f.copy()
format_copy[1]['tbr'] = tbr
# prefer format with similar bitrate because there might be
# multiple video with the same resolution but different bitrate
format_copy = format_copy[0] or format_copy[1] or {}
return {
'format_id': f'https-{tbr}',
'width': format_copy.get('width'),
'height': format_copy.get('height'),
'tbr': format_copy.get('tbr') or tbr,
'vcodec': format_copy.get('vcodec') or 'avc1',
'acodec': format_copy.get('acodec') or 'mp4a',
'fps': format_copy.get('fps') or 25,
} if format_copy else {
'format_id': f'https-{tbr}',
'width': _QUALITY[tbr][0],
'height': _QUALITY[tbr][1],
'tbr': tbr,
'vcodec': 'avc1',
'acodec': 'mp4a',
'fps': 25,
}
# Check if MP4 download is available
try:
self._request_webpage(
HEADRequest(_MP4_TMPL % (relinker_url, '*')), video_id, 'Checking MP4 availability')
except ExtractorError as e:
self.to_screen(f'{video_id}: MP4 direct download is not available: {e.cause}')
return []
# filter out single-stream formats
fmts = [f for f in fmts
if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
mobj = re.search(_MANIFEST_REG, manifest_url)
if not mobj:
return []
available_qualities = mobj.group('quality').split(',') if mobj.group('quality') else ['*']
formats = []
for q in filter(None, available_qualities):
self.write_debug(f'Creating https format for quality {q}')
formats.append({
'url': _MP4_TMPL % (relinker_url, q),
'protocol': 'https',
'ext': 'mp4',
**get_format_info(q),
})
return formats
@staticmethod
def _get_thumbnails_list(thumbs, url):
return [{
'url': urljoin(url, thumb_url),
} for thumb_url in (thumbs or {}).values() if thumb_url]
@staticmethod
def _extract_subtitles(url, video_data):
STL_EXT = 'stl'
SRT_EXT = 'srt'
subtitles = {}
subtitles_array = video_data.get('subtitlesArray') or video_data.get('subtitleList') or []
for k in ('subtitles', 'subtitlesUrl'):
subtitles_array.append({'url': video_data.get(k)})
for subtitle in subtitles_array:
sub_url = subtitle.get('url')
if sub_url and isinstance(sub_url, str):
sub_lang = subtitle.get('language') or 'it'
sub_url = urljoin(url, sub_url)
sub_ext = determine_ext(sub_url, SRT_EXT)
subtitles.setdefault(sub_lang, []).append({
'ext': sub_ext,
'url': sub_url,
})
if STL_EXT == sub_ext:
subtitles[sub_lang].append({
'ext': SRT_EXT,
'url': sub_url[:-len(STL_EXT)] + SRT_EXT,
})
return subtitles
class RaiPlayIE(RaiBaseIE):
_VALID_URL = rf'(?P<base>https?://(?:www\.)?raiplay\.it/.+?-(?P<id>{RaiBaseIE._UUID_RE}))\.(?:html|json)'
_TESTS = [{
'url': 'https://www.raiplay.it/video/2014/04/Report-del-07042014-cb27157f-9dd0-4aee-b788-b1f67643a391.html',
'md5': '8970abf8caf8aef4696e7b1f2adfc696',
'info_dict': {
'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391',
'ext': 'mp4',
'title': 'Report del 07/04/2014',
'alt_title': 'St 2013/14 - Report - Espresso nel caffè - 07/04/2014',
'description': 'md5:d730c168a58f4bb35600fc2f881ec04e',
'thumbnail': r're:^https?://www\.raiplay\.it/.+\.jpg',
'uploader': 'Rai 3',
'creator': 'Rai 3',
'duration': 6160,
'series': 'Report',
'season': '2013/14',
'subtitles': {'it': 'count:4'},
'release_year': 2024,
'episode': 'Espresso nel caffè - 07/04/2014',
'timestamp': 1396919880,
'upload_date': '20140408',
'formats': 'count:4',
},
'params': {'skip_download': True},
}, {
# 1080p
'url': 'https://www.raiplay.it/video/2021/11/Blanca-S1E1-Senza-occhi-b1255a4a-8e72-4a2f-b9f3-fc1308e00736.html',
'md5': 'aeda7243115380b2dd5e881fd42d949a',
'info_dict': {
'id': 'b1255a4a-8e72-4a2f-b9f3-fc1308e00736',
'ext': 'mp4',
'title': 'Blanca - S1E1 - Senza occhi',
'alt_title': 'St 1 Ep 1 - Blanca - Senza occhi',
'description': 'md5:75f95d5c030ec8bac263b1212322e28c',
'thumbnail': r're:^https://www\.raiplay\.it/dl/img/.+\.jpg',
'uploader': 'Rai Premium',
'creator': 'Rai Fiction',
'duration': 6493,
'series': 'Blanca',
'season': 'Season 1',
'episode_number': 1,
'release_year': 2021,
'season_number': 1,
'episode': 'Senza occhi',
'timestamp': 1637318940,
'upload_date': '20211119',
'formats': 'count:7',
},
'params': {'skip_download': True},
'expected_warnings': ['Video not available. Likely due to geo-restriction.'],
}, {
# 1500 quality
'url': 'https://www.raiplay.it/video/2012/09/S1E11---Tutto-cio-che-luccica-0cab3323-732e-45d6-8e86-7704acab6598.html',
'md5': 'a634d20e8ab2d43724c273563f6bf87a',
'info_dict': {
'id': '0cab3323-732e-45d6-8e86-7704acab6598',
'ext': 'mp4',
'title': 'Mia and Me - S1E11 - Tutto ciò che luccica',
'alt_title': 'St 1 Ep 11 - Mia and Me - Tutto ciò che luccica',
'description': 'md5:4969e594184b1920c4c1f2b704da9dea',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Rai Gulp',
'series': 'Mia and Me',
'season': 'Season 1',
'episode_number': 11,
'release_year': 2015,
'season_number': 1,
'episode': 'Tutto ciò che luccica',
'timestamp': 1348495020,
'upload_date': '20120924',
},
}, {
# checking program_info gives false positive for DRM
'url': 'https://www.raiplay.it/video/2022/10/Ad-ogni-costo---Un-giorno-in-Pretura---Puntata-del-15102022-1dfd1295-ea38-4bac-b51e-f87e2881693b.html',
'md5': '572c6f711b7c5f2d670ba419b4ae3b08',
'info_dict': {
'id': '1dfd1295-ea38-4bac-b51e-f87e2881693b',
'ext': 'mp4',
'title': 'Ad ogni costo - Un giorno in Pretura - Puntata del 15/10/2022',
'alt_title': 'St 2022/23 - Un giorno in pretura - Ad ogni costo',
'description': 'md5:4046d97b2687f74f06a8b8270ba5599f',
'uploader': 'Rai 3',
'duration': 3773.0,
'thumbnail': 'https://www.raiplay.it/dl/img/2022/10/12/1665586539957_2048x2048.png',
'creators': ['Rai 3'],
'series': 'Un giorno in pretura',
'season': '2022/23',
'episode': 'Ad ogni costo',
'timestamp': 1665507240,
'upload_date': '20221011',
'release_year': 2025,
},
}, {
'url': 'http://www.raiplay.it/video/2016/11/gazebotraindesi-efebe701-969c-4593-92f3-285f0d1ce750.html?',
'only_matching': True,
}, {
# subtitles at 'subtitlesArray' key (see #27698)
'url': 'https://www.raiplay.it/video/2020/12/Report---04-01-2021-2e90f1de-8eee-4de4-ac0e-78d21db5b600.html',
'only_matching': True,
}, {
# DRM protected
'url': 'https://www.raiplay.it/video/2021/06/Lo-straordinario-mondo-di-Zoey-S2E1-Lo-straordinario-ritorno-di-Zoey-3ba992de-2332-41ad-9214-73e32ab209f4.html',
'only_matching': True,
}]
def _real_extract(self, url):
base, video_id = self._match_valid_url(url).groups()
media = self._download_json(
f'{base}.json', video_id, 'Downloading video JSON')
if traverse_obj(media, ('rights_management', 'rights', 'drm')):
self.report_drm(video_id)
video = media['video']
relinker_info = self._extract_relinker_info(video['content_url'], video_id)
date_published = join_nonempty(
media.get('date_published'), media.get('time_published'), delim=' ')
season = media.get('season')
alt_title = join_nonempty(media.get('subtitle'), media.get('toptitle'), delim=' - ')
return {
'id': remove_start(media.get('id'), 'ContentItem-') or video_id,
'display_id': video_id,
'title': media.get('name'),
'alt_title': strip_or_none(alt_title or None),
'description': media.get('description'),
'uploader': strip_or_none(
traverse_obj(media, ('program_info', 'channel'))
or media.get('channel') or None),
'creator': strip_or_none(
traverse_obj(media, ('program_info', 'editor'))
or media.get('editor') or None),
'duration': parse_duration(video.get('duration')),
'timestamp': unified_timestamp(date_published),
'thumbnails': self._get_thumbnails_list(media.get('images'), url),
'series': traverse_obj(media, ('program_info', 'name')),
'season_number': int_or_none(season),
'season': season if (season and not season.isdigit()) else None,
'episode': media.get('episode_title'),
'episode_number': int_or_none(media.get('episode')),
'subtitles': self._extract_subtitles(url, video),
'release_year': int_or_none(traverse_obj(media, ('track_info', 'edit_year'))),
**relinker_info,
}
class RaiPlayLiveIE(RaiPlayIE): # XXX: Do not subclass from concrete IE
_VALID_URL = r'(?P<base>https?://(?:www\.)?raiplay\.it/dirette/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'http://www.raiplay.it/dirette/rainews24',
'info_dict': {
'id': 'd784ad40-e0ae-4a69-aa76-37519d238a9c',
'display_id': 'rainews24',
'ext': 'mp4',
'title': 're:^Diretta di Rai News 24 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:4d00bcf6dc98b27c6ec480de329d1497',
'uploader': 'Rai News 24',
'creator': 'Rai News 24',
'is_live': True,
'live_status': 'is_live',
'upload_date': '20090502',
'timestamp': 1241276220,
'formats': 'count:3',
},
'params': {'skip_download': True},
}]
class RaiPlayPlaylistIE(InfoExtractor):
_VALID_URL = r'(?P<base>https?://(?:www\.)?raiplay\.it/programmi/(?P<id>[^/?#&]+))(?:/(?P<extra_id>[^?#&]+))?'
_TESTS = [{
# entire series episodes + extras...
'url': 'https://www.raiplay.it/programmi/nondirloalmiocapo/',
'info_dict': {
'id': 'nondirloalmiocapo',
'title': 'Non dirlo al mio capo',
'description': 'md5:98ab6b98f7f44c2843fd7d6f045f153b',
},
'playlist_mincount': 30,
}, {
# single season
'url': 'https://www.raiplay.it/programmi/nondirloalmiocapo/episodi/stagione-2/',
'info_dict': {
'id': 'nondirloalmiocapo',
'title': 'Non dirlo al mio capo - Stagione 2',
'description': 'md5:98ab6b98f7f44c2843fd7d6f045f153b',
},
'playlist_count': 12,
}]
def _real_extract(self, url):
base, playlist_id, extra_id = self._match_valid_url(url).groups()
program = self._download_json(
f'{base}.json', playlist_id, 'Downloading program JSON')
if extra_id:
extra_id = extra_id.upper().rstrip('/')
playlist_title = program.get('name')
entries = []
for b in (program.get('blocks') or []):
for s in (b.get('sets') or []):
if extra_id:
if extra_id != join_nonempty(
b.get('name'), s.get('name'), delim='/').replace(' ', '-').upper():
continue
playlist_title = join_nonempty(playlist_title, s.get('name'), delim=' - ')
s_id = s.get('id')
if not s_id:
continue
medias = self._download_json(
f'{base}/{s_id}.json', s_id,
'Downloading content set JSON', fatal=False)
if not medias:
continue
for m in (medias.get('items') or []):
path_id = m.get('path_id')
if not path_id:
continue
video_url = urljoin(url, path_id)
entries.append(self.url_result(
video_url, ie=RaiPlayIE.ie_key(),
video_id=RaiPlayIE._match_id(video_url)))
return self.playlist_result(
entries, playlist_id, playlist_title,
try_get(program, lambda x: x['program_info']['description']))
class RaiPlaySoundIE(RaiBaseIE):
_VALID_URL = rf'(?P<base>https?://(?:www\.)?raiplaysound\.it/.+?-(?P<id>{RaiBaseIE._UUID_RE}))\.(?:html|json)'
_TESTS = [{
'url': 'https://www.raiplaysound.it/audio/2021/12/IL-RUGGITO-DEL-CONIGLIO-1ebae2a7-7cdb-42bb-842e-fe0d193e9707.html',
'md5': '8970abf8caf8aef4696e7b1f2adfc696',
'info_dict': {
'id': '1ebae2a7-7cdb-42bb-842e-fe0d193e9707',
'ext': 'mp3',
'title': 'Il Ruggito del Coniglio del 10/12/2021',
'alt_title': 'md5:0e6476cd57858bb0f3fcc835d305b455',
'description': 'md5:2a17d2107e59a4a8faa0e18334139ee2',
'thumbnail': r're:^https?://.+\.jpg$',
'uploader': 'rai radio 2',
'duration': 5685,
'series': 'Il Ruggito del Coniglio',
'episode': 'Il Ruggito del Coniglio del 10/12/2021',
'creator': 'rai radio 2',
'timestamp': 1638346620,
'upload_date': '20211201',
},
'params': {'skip_download': True},
}, {
# case-sensitivity test for uppercase extension
'url': 'https://www.raiplaysound.it/audio/2020/05/Storia--Lunita-dItalia-e-lunificazione-della-Germania-b4c16390-7f3f-4282-b353-d94897dacb7c.html',
'md5': 'c69ebd69282f0effd7ef67b7e2f6c7d8',
'info_dict': {
'id': 'b4c16390-7f3f-4282-b353-d94897dacb7c',
'ext': 'mp3',
'title': "Storia | 01 L'unità d'Italia e l'unificazione della Germania",
'alt_title': 'md5:ed4ed82585c52057b71b43994a59b705',
'description': 'md5:92818b6f31b2c150567d56b75db2ea7f',
'uploader': 'rai radio 3',
'duration': 2439.0,
'thumbnail': 'https://www.raiplaysound.it/dl/img/2023/09/07/1694084898279_Maturadio-LOGO-2048x1152.jpg',
'creators': ['rai radio 3'],
'series': 'Maturadio',
'season': 'Season 9',
'season_number': 9,
'episode': "01. L'unità d'Italia e l'unificazione della Germania",
'episode_number': 1,
'timestamp': 1590400740,
'upload_date': '20200525',
},
}]
def _real_extract(self, url):
base, audio_id = self._match_valid_url(url).group('base', 'id')
media = self._download_json(f'{base}.json', audio_id, 'Downloading audio JSON')
uid = try_get(media, lambda x: remove_start(remove_start(x['uniquename'], 'ContentItem-'), 'Page-'))
info = {}
formats = []
relinkers = set(traverse_obj(media, (('downloadable_audio', 'audio', ('live', 'cards', 0, 'audio')), 'url')))
for r in relinkers:
info = self._extract_relinker_info(r, audio_id, True)
formats.extend(info.get('formats'))
date_published = try_get(media, (lambda x: f'{x["create_date"]} {x.get("create_time") or ""}',
lambda x: x['live']['create_date']))
podcast_info = traverse_obj(media, 'podcast_info', ('live', 'cards', 0)) or {}
return {
**info,
'id': uid or audio_id,
'display_id': audio_id,
'title': traverse_obj(media, 'title', 'episode_title'),
'alt_title': traverse_obj(media, ('track_info', 'media_name'), expected_type=strip_or_none),
'description': media.get('description'),
'uploader': traverse_obj(media, ('track_info', 'channel'), expected_type=strip_or_none),
'creator': traverse_obj(media, ('track_info', 'editor'), expected_type=strip_or_none),
'timestamp': unified_timestamp(date_published),
'thumbnails': self._get_thumbnails_list(podcast_info.get('images'), url),
'series': podcast_info.get('title'),
'season_number': int_or_none(media.get('season')),
'episode': media.get('episode_title'),
'episode_number': int_or_none(media.get('episode')),
'formats': formats,
}
class RaiPlaySoundLiveIE(RaiPlaySoundIE): # XXX: Do not subclass from concrete IE
_VALID_URL = r'(?P<base>https?://(?:www\.)?raiplaysound\.it/(?P<id>[^/?#&]+)$)'
_TESTS = [{
'url': 'https://www.raiplaysound.it/radio2',
'info_dict': {
'id': 'b00a50e6-f404-4af6-8f8c-ff3b9af73a44',
'display_id': 'radio2',
'ext': 'mp4',
'title': r're:Rai Radio 2 \d+-\d+-\d+ \d+:\d+',
'thumbnail': r're:^https://www\.raiplaysound\.it/dl/img/.+\.png',
'uploader': 'rai radio 2',
'series': 'Rai Radio 2',
'creator': 'raiplaysound',
'is_live': True,
'live_status': 'is_live',
},
'params': {'skip_download': True},
}]
class RaiPlaySoundPlaylistIE(InfoExtractor):
_VALID_URL = r'(?P<base>https?://(?:www\.)?raiplaysound\.it/(?:programmi|playlist|audiolibri)/(?P<id>[^/?#&]+))(?:/(?P<extra_id>[^?#&]+))?'
_TESTS = [{
# entire show
'url': 'https://www.raiplaysound.it/programmi/ilruggitodelconiglio',
'info_dict': {
'id': 'ilruggitodelconiglio',
'title': 'Il Ruggito del Coniglio',
'description': 'md5:62a627b3a2d0635d08fa8b6e0a04f27e',
},
'playlist_mincount': 65,
}, {
# single season
'url': 'https://www.raiplaysound.it/programmi/ilruggitodelconiglio/puntate/prima-stagione-1995',
'info_dict': {
'id': 'ilruggitodelconiglio_puntate_prima-stagione-1995',
'title': 'Prima Stagione 1995',
},
'playlist_count': 1,
}]
def _real_extract(self, url):
base, playlist_id, extra_id = self._match_valid_url(url).group('base', 'id', 'extra_id')
url = f'{base}.json'
program = self._download_json(url, playlist_id, 'Downloading program JSON')
if extra_id:
extra_id = extra_id.rstrip('/')
playlist_id += '_' + extra_id.replace('/', '_')
path = next(c['path_id'] for c in program.get('filters') or [] if extra_id in c.get('weblink'))
program = self._download_json(
urljoin('https://www.raiplaysound.it', path), playlist_id, 'Downloading program secondary JSON')
entries = [
self.url_result(urljoin(base, c['path_id']), ie=RaiPlaySoundIE.ie_key())
for c in traverse_obj(program, 'cards', ('block', 'cards')) or []
if c.get('path_id')]
return self.playlist_result(entries, playlist_id, program.get('title'),
traverse_obj(program, ('podcast_info', 'description')))
class RaiIE(RaiBaseIE):
_VALID_URL = rf'https?://[^/]+\.(?:rai\.(?:it|tv))/.+?-(?P<id>{RaiBaseIE._UUID_RE})(?:-.+?)?\.html'
_TESTS = [{
'url': 'https://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html',
'info_dict': {
'id': '04a9f4bd-b563-40cf-82a6-aad3529cb4a9',
'ext': 'mp4',
'title': 'TG PRIMO TEMPO',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 1758,
'upload_date': '20140612',
},
'params': {'skip_download': True},
'expected_warnings': ['Video not available. Likely due to geo-restriction.'],
}, {
'url': 'https://www.rai.it/dl/RaiTV/programmi/media/ContentItem-efb17665-691c-45d5-a60c-5301333cbb0c.html',
'info_dict': {
'id': 'efb17665-691c-45d5-a60c-5301333cbb0c',
'ext': 'mp4',
'title': 'TG1 ore 20:00 del 03/11/2016',
'description': 'TG1 edizione integrale ore 20:00 del giorno 03/11/2016',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2214,
'upload_date': '20161103',
},
'params': {'skip_download': True},
}, {
# Direct MMS: Media URL no longer works.
'url': 'http://www.rai.it/dl/RaiTV/programmi/media/ContentItem-b63a4089-ac28-48cf-bca5-9f5b5bc46df5.html',
'only_matching': True,
}]
def _real_extract(self, url):
content_id = self._match_id(url)
media = self._download_json(
f'https://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-{content_id}.html?json',
content_id, 'Downloading video JSON', fatal=False, expected_status=404)
if media is None:
return None
if 'Audio' in media['type']:
relinker_info = {
'formats': [{
'format_id': join_nonempty('https', media.get('formatoAudio'), delim='-'),
'url': media['audioUrl'],
'ext': media.get('formatoAudio'),
'vcodec': 'none',
'acodec': media.get('formatoAudio'),
}],
}
elif 'Video' in media['type']:
relinker_info = self._extract_relinker_info(media['mediaUri'], content_id)
else:
raise ExtractorError('not a media file')
thumbnails = self._get_thumbnails_list(
{image_type: media.get(image_type) for image_type in (
'image', 'image_medium', 'image_300')}, url)
return {
'id': content_id,
'title': strip_or_none(media.get('name') or media.get('title')),
'description': strip_or_none(media.get('desc')) or None,
'thumbnails': thumbnails,
'uploader': strip_or_none(media.get('author')) or None,
'upload_date': unified_strdate(media.get('date')),
'duration': parse_duration(media.get('length')),
'subtitles': self._extract_subtitles(url, media),
**relinker_info,
}
class RaiNewsIE(RaiBaseIE):
_VALID_URL = rf'https?://(www\.)?rainews\.it/(?!articoli)[^?#]+-(?P<id>{RaiBaseIE._UUID_RE})(?:-[^/?#]+)?\.html'
_EMBED_REGEX = [rf'<iframe[^>]+data-src="(?P<url>/iframe/[^?#]+?{RaiBaseIE._UUID_RE}\.html)']
_TESTS = [{
# new rainews player (#3911)
'url': 'https://www.rainews.it/video/2024/02/membri-della-croce-rossa-evacuano-gli-abitanti-di-un-villaggio-nella-regione-ucraina-di-kharkiv-il-filmato-dallucraina--31e8017c-845c-43f5-9c48-245b43c3a079.html',
'info_dict': {
'id': '31e8017c-845c-43f5-9c48-245b43c3a079',
'ext': 'mp4',
'title': 'md5:1e81364b09de4a149042bac3c7d36f0b',
'duration': 196,
'upload_date': '20240225',
'uploader': 'rainews',
'formats': 'count:2',
},
'params': {'skip_download': True},
}, {
# old content with fallback method to extract media urls
'url': 'https://www.rainews.it/dl/rainews/media/Weekend-al-cinema-da-Hollywood-arriva-il-thriller-di-Tate-Taylor-La-ragazza-del-treno-1632c009-c843-4836-bb65-80c33084a64b.html',
'info_dict': {
'id': '1632c009-c843-4836-bb65-80c33084a64b',
'ext': 'mp4',
'title': 'Weekend al cinema, da Hollywood arriva il thriller di Tate Taylor "La ragazza del treno"',
'description': 'I film in uscita questa settimana.',
'thumbnail': r're:^https?://.*\.png$',
'duration': 833,
'upload_date': '20161103',
'formats': 'count:8',
},
'params': {'skip_download': True},
'expected_warnings': ['unable to extract player_data'],
}, {
# iframe + drm
'url': 'https://www.rainews.it/iframe/video/2022/07/euro2022-europei-calcio-femminile-italia-belgio-gol-0-1-video-4de06a69-de75-4e32-a657-02f0885f8118.html',
'only_matching': True,
}]
_PLAYER_TAG = 'news'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player_data = self._search_json(
rf'<rai{self._PLAYER_TAG}-player\s*data=\'', webpage, 'player_data', video_id,
transform_source=clean_html, default={})
track_info = player_data.get('track_info')
relinker_url = traverse_obj(player_data, 'mediapolis', 'content_url')
if not relinker_url:
# fallback on old implementation for some old content
try:
return RaiIE._real_extract(self, url)
except GeoRestrictedError:
raise
except ExtractorError as e:
raise ExtractorError('Relinker URL not found', cause=e)
relinker_info = self._extract_relinker_info(urljoin(url, relinker_url), video_id)
return {
'id': video_id,
'title': player_data.get('title') or track_info.get('title') or self._og_search_title(webpage),
'upload_date': unified_strdate(track_info.get('date')),
'uploader': strip_or_none(track_info.get('editor') or None),
**relinker_info,
}
class RaiCulturaIE(RaiNewsIE): # XXX: Do not subclass from concrete IE
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | true |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/planetmarathi.py | yt_dlp/extractor/planetmarathi.py | from .common import InfoExtractor
from ..utils import (
try_get,
unified_strdate,
)
class PlanetMarathiIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?planetmarathi\.com/titles/(?P<id>[^/#&?$]+)'
_TESTS = [{
'url': 'https://www.planetmarathi.com/titles/ek-unad-divas',
'playlist_mincount': 2,
'info_dict': {
'id': 'ek-unad-divas',
},
'playlist': [{
'info_dict': {
'id': 'ASSETS-MOVIE-ASSET-01_ek-unad-divas',
'ext': 'mp4',
'title': 'ek unad divas',
'alt_title': 'चित्रपट',
'description': 'md5:41c7ed6b041c2fea9820a3f3125bd881',
'episode_number': 1,
'duration': 5539,
'upload_date': '20210829',
},
}], # Trailer skipped
}, {
'url': 'https://www.planetmarathi.com/titles/baap-beep-baap-season-1',
'playlist_mincount': 10,
'info_dict': {
'id': 'baap-beep-baap-season-1',
},
'playlist': [{
'info_dict': {
'id': 'ASSETS-CHARACTER-PROFILE-SEASON-01-ASSET-01_baap-beep-baap-season-1',
'ext': 'mp4',
'title': 'Manohar Kanhere',
'alt_title': 'मनोहर कान्हेरे',
'description': 'md5:285ed45d5c0ab5522cac9a043354ebc6',
'season_number': 1,
'episode_number': 1,
'duration': 29,
'upload_date': '20210829',
},
}], # Trailers, Episodes, other Character profiles skipped
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
entries = []
json_data = self._download_json(
f'https://www.planetmarathi.com/api/v1/titles/{playlist_id}/assets', playlist_id)['assets']
for asset in json_data:
asset_title = asset['mediaAssetName']['en']
if asset_title == 'Movie':
asset_title = playlist_id.replace('-', ' ')
asset_id = f'{asset["sk"]}_{playlist_id}'.replace('#', '-')
formats, subtitles = self._extract_m3u8_formats_and_subtitles(asset['mediaAssetURL'], asset_id)
entries.append({
'id': asset_id,
'title': asset_title,
'alt_title': try_get(asset, lambda x: x['mediaAssetName']['mr']),
'description': try_get(asset, lambda x: x['mediaAssetDescription']['en']),
'season_number': asset.get('mediaAssetSeason'),
'episode_number': asset.get('mediaAssetIndexForAssetType'),
'duration': asset.get('mediaAssetDurationInSeconds'),
'upload_date': unified_strdate(asset.get('created')),
'formats': formats,
'subtitles': subtitles,
})
return self.playlist_result(entries, playlist_id=playlist_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/parler.py | yt_dlp/extractor/parler.py | from .common import InfoExtractor
from .youtube import YoutubeIE
from ..utils import (
clean_html,
int_or_none,
strip_or_none,
traverse_obj,
unified_timestamp,
urljoin,
)
class ParlerIE(InfoExtractor):
IE_DESC = 'Posts on parler.com'
_VALID_URL = r'https?://parler\.com/feed/(?P<id>[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12})'
_TESTS = [
{
'url': 'https://parler.com/feed/df79fdba-07cc-48fe-b085-3293897520d7',
'md5': '16e0f447bf186bb3cf64de5bbbf4d22d',
'info_dict': {
'id': 'df79fdba-07cc-48fe-b085-3293897520d7',
'ext': 'mp4',
'thumbnail': 'https://bl-images.parler.com/videos/6ce7cdf3-a27a-4d72-bf9c-d3e17ce39a66/thumbnail.jpeg',
'title': 'Parler video #df79fdba-07cc-48fe-b085-3293897520d7',
'description': 'md5:6f220bde2df4a97cbb89ac11f1fd8197',
'timestamp': 1659785481,
'upload_date': '20220806',
'uploader': 'Tulsi Gabbard',
'uploader_id': 'TulsiGabbard',
'uploader_url': 'https://parler.com/TulsiGabbard',
'view_count': int,
'comment_count': int,
'repost_count': int,
},
},
{
'url': 'https://parler.com/feed/f23b85c1-6558-470f-b9ff-02c145f28da5',
'md5': 'eaba1ff4a10fe281f5ce74e930ab2cb4',
'info_dict': {
'id': 'r5vkSaz8PxQ',
'ext': 'mp4',
'live_status': 'not_live',
'comment_count': int,
'duration': 1267,
'like_count': int,
'channel_follower_count': int,
'channel_id': 'UCox6YeMSY1PQInbCtTaZj_w',
'upload_date': '20220716',
'thumbnail': 'https://i.ytimg.com/vi/r5vkSaz8PxQ/maxresdefault.jpg',
'tags': 'count:17',
'availability': 'public',
'categories': ['Entertainment'],
'playable_in_embed': True,
'channel': 'Who Knows What! With Mahesh & Friends',
'title': 'Tom MacDonald Names Reaction',
'uploader': 'Who Knows What! With Mahesh & Friends',
'uploader_id': '@maheshchookolingo',
'age_limit': 0,
'description': 'md5:33c21f0d35ae6dc2edf3007d6696baea',
'channel_url': 'https://www.youtube.com/channel/UCox6YeMSY1PQInbCtTaZj_w',
'view_count': int,
'uploader_url': 'http://www.youtube.com/@maheshchookolingo',
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(f'https://api.parler.com/v0/public/parleys/{video_id}',
video_id)['data']
if data.get('link'):
return self.url_result(data['link'], YoutubeIE)
return {
'id': video_id,
'title': strip_or_none(data.get('title')) or '',
**traverse_obj(data, {
'url': ('video', 'videoSrc'),
'thumbnail': ('video', 'thumbnailUrl'),
'description': ('body', {clean_html}),
'timestamp': ('date_created', {unified_timestamp}),
'uploader': ('user', 'name', {strip_or_none}),
'uploader_id': ('user', 'username', {str}),
'uploader_url': ('user', 'username', {urljoin('https://parler.com/')}),
'view_count': ('views', {int_or_none}),
'comment_count': ('total_comments', {int_or_none}),
'repost_count': ('echos', {int_or_none}),
}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/streamable.py | yt_dlp/extractor/streamable.py | from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
parse_codecs,
try_get,
)
class StreamableIE(InfoExtractor):
_VALID_URL = r'https?://streamable\.com/(?:[es]/)?(?P<id>\w+)'
_EMBED_REGEX = [r'<iframe[^>]+\bsrc=(?P<q1>[\'"])(?P<url>(?:https?:)?//streamable\.com/.+?)(?P=q1)']
_TESTS = [
{
'url': 'https://streamable.com/dnd1',
'md5': '3e3bc5ca088b48c2d436529b64397fef',
'info_dict': {
'id': 'dnd1',
'ext': 'mp4',
'title': 'Mikel Oiarzabal scores to make it 0-3 for La Real against Espanyol',
'thumbnail': r're:https?://.*\.jpg$',
'uploader': 'teabaker',
'timestamp': 1454964157.35115,
'upload_date': '20160208',
'duration': 61.516,
'view_count': int,
},
},
# older video without bitrate, width/height, codecs, etc. info
{
'url': 'https://streamable.com/moo',
'md5': '2cf6923639b87fba3279ad0df3a64e73',
'info_dict': {
'id': 'moo',
'ext': 'mp4',
'title': '"Please don\'t eat me!"',
'thumbnail': r're:https?://.*\.jpg$',
'timestamp': 1426115495,
'upload_date': '20150311',
'duration': 12,
'view_count': int,
},
},
{
'url': 'https://streamable.com/e/dnd1',
'only_matching': True,
},
{
'url': 'https://streamable.com/s/okkqk/drxjds',
'only_matching': True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
# Note: Using the ajax API, as the public Streamable API doesn't seem
# to return video info like the title properly sometimes, and doesn't
# include info like the video duration
video = self._download_json(
f'https://ajax.streamable.com/videos/{video_id}', video_id)
# Format IDs:
# 0 The video is being uploaded
# 1 The video is being processed
# 2 The video has at least one file ready
# 3 The video is unavailable due to an error
status = video.get('status')
if status != 2:
raise ExtractorError(
'This video is currently unavailable. It may still be uploading or processing.',
expected=True)
title = video.get('reddit_title') or video['title']
formats = []
for key, info in video['files'].items():
if not info.get('url'):
continue
formats.append({
'format_id': key,
'url': self._proto_relative_url(info['url']),
'width': int_or_none(info.get('width')),
'height': int_or_none(info.get('height')),
'filesize': int_or_none(info.get('size')),
'fps': int_or_none(info.get('framerate')),
'vbr': float_or_none(info.get('bitrate'), 1000),
'vcodec': parse_codecs(try_get(info, lambda x: x['input_metadata']['video_codec_name'])).get('vcodec'),
'acodec': parse_codecs(try_get(info, lambda x: x['input_metadata']['audio_codec_name'])).get('acodec'),
})
return {
'id': video_id,
'title': title,
'description': video.get('description'),
'thumbnail': self._proto_relative_url(video.get('thumbnail_url')),
'uploader': video.get('owner', {}).get('user_name'),
'timestamp': float_or_none(video.get('date_added')),
'duration': float_or_none(video.get('duration')),
'view_count': int_or_none(video.get('plays')),
'formats': formats,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nzherald.py | yt_dlp/extractor/nzherald.py | import json
from .brightcove import BrightcoveNewIE
from .common import InfoExtractor
from ..utils import ExtractorError, traverse_obj
class NZHeraldIE(InfoExtractor):
IE_NAME = 'nzherald'
_VALID_URL = r'https?://(?:www\.)?nzherald\.co\.nz/[\w\/-]+\/(?P<id>[A-Z0-9]+)'
_TESTS = [
{
# Video accessible under 'video' key
'url': 'https://www.nzherald.co.nz/nz/queen-elizabeth-death-nz-public-holiday-announced-for-september-26/CEOPBSXO2JDCLNK3H7E3BIE2FA/',
'info_dict': {
'id': '6312191736112',
'ext': 'mp4',
'title': 'Focus: PM holds post-Cabinet press conference',
'duration': 238.08,
'upload_date': '20220912',
'uploader_id': '1308227299001',
'timestamp': 1662957159,
'tags': [],
'thumbnail': r're:https?://.*\.jpg$',
'description': 'md5:2f17713fcbfcfbe38bb9e7dfccbb0f2e',
},
}, {
# Webpage has brightcove embed player url
'url': 'https://www.nzherald.co.nz/travel/pencarrow-coastal-trail/HDVTPJEPP46HJ2UEMK4EGD2DFI/',
'info_dict': {
'id': '6261791733001',
'ext': 'mp4',
'title': 'Pencarrow Coastal Trail',
'timestamp': 1625102897,
'upload_date': '20210701',
'uploader_id': '1308227299001',
'description': 'md5:d361aaa0c6498f7ac1bc4fc0a0aec1e4',
'thumbnail': r're:https?://.*\.jpg$',
'tags': ['travel', 'video'],
'duration': 43.627,
},
}, {
# two video embeds of the same video
'url': 'https://www.nzherald.co.nz/nz/truck-driver-captured-cutting-off-motorist-on-state-highway-1-in-canterbury/FIHNJB7PLLPHWQPK4S7ZBDUC4I/',
'info_dict': {
'id': '6251114530001',
'ext': 'mp4',
'title': 'Truck travelling north from Rakaia runs car off road',
'timestamp': 1619730509,
'upload_date': '20210429',
'uploader_id': '1308227299001',
'description': 'md5:4cae7dfb7613ac4c73b9e73a75c6b5d7',
},
'skip': 'video removed',
}, {
# customVideo embed requiring additional API call
'url': 'https://www.nzherald.co.nz/nz/politics/reserve-bank-rejects-political-criticisms-stands-by-review/2JO5Q4WLZRCBBNWTLACZMOP4RA/',
'info_dict': {
'id': '6315123873112',
'ext': 'mp4',
'timestamp': 1667862725,
'title': 'Focus: Luxon on re-appointment of Reserve Bank governor Adrian Orr',
'upload_date': '20221107',
'description': 'md5:df2f1f7033a8160c66e28e4743f5d934',
'uploader_id': '1308227299001',
'tags': ['video', 'nz herald focus', 'politics', 'politics videos'],
'thumbnail': r're:https?://.*\.jpg$',
'duration': 99.584,
},
}, {
'url': 'https://www.nzherald.co.nz/kahu/kaupapa-companies-my-taiao-supporting-maori-in-study-and-business/PQBO2J25WCG77VGRX7W7BVYEAI/',
'only_matching': True,
}, {
'url': 'https://nzherald.co.nz/the-country/video/focus-nzs-first-mass-covid-19-vaccination-event/N5I7IL3BRFLZSD33TLDLYJDGK4/',
'only_matching': True,
}, {
'url': 'https://www.nzherald.co.nz/the-vision-is-clear/news/tvic-damian-roper-planting-trees-an-addiction/AN2AAEPNRK5VLISDWQAJZB6ATQ',
'only_matching': True,
},
]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1308227299001/S1BXZn8t_default/index.html?videoId=%s'
def _extract_bc_embed_url(self, webpage):
"""The initial webpage may include the brightcove player embed url"""
bc_url = BrightcoveNewIE._extract_url(self, webpage)
return bc_url or self._search_regex(
rf'(?:embedUrl)\"\s*:\s*\"(?P<embed_url>{BrightcoveNewIE._VALID_URL})',
webpage, 'embed url', default=None, group='embed_url')
def _real_extract(self, url):
article_id = self._match_id(url)
webpage = self._download_webpage(url, article_id)
bc_url = self._extract_bc_embed_url(webpage)
if not bc_url:
fusion_metadata = self._parse_json(
self._search_regex(r'Fusion\.globalContent\s*=\s*({.+?})\s*;', webpage, 'fusion metadata'), article_id)
video_metadata = fusion_metadata.get('video')
if not video_metadata:
custom_video_id = traverse_obj(fusion_metadata, ('customVideo', 'embed', 'id'), expected_type=str)
if custom_video_id:
video_metadata = self._download_json(
'https://www.nzherald.co.nz/pf/api/v3/content/fetch/full-content-by-id', article_id,
query={'query': json.dumps({'id': custom_video_id, 'site': 'nzh'}), '_website': 'nzh'})
bc_video_id = traverse_obj(
video_metadata or fusion_metadata, # fusion metadata is the video metadata for video-only pages
'brightcoveId', ('content_elements', ..., 'referent', 'id'),
get_all=False, expected_type=str)
if not bc_video_id:
if isinstance(video_metadata, dict) and len(video_metadata) == 0:
raise ExtractorError('This article does not have a video.', expected=True)
else:
raise ExtractorError('Failed to extract brightcove video id')
bc_url = self.BRIGHTCOVE_URL_TEMPLATE % bc_video_id
return self.url_result(bc_url, 'BrightcoveNew')
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/netverse.py | yt_dlp/extractor/netverse.py | import itertools
from .common import InfoExtractor, SearchInfoExtractor
from .dailymotion import DailymotionIE
from ..utils import smuggle_url, traverse_obj
class NetverseBaseIE(InfoExtractor):
_ENDPOINTS = {
'watch': 'watchvideo',
'video': 'watchvideo',
'webseries': 'webseries',
'season': 'webseason_videos',
}
def _call_api(self, slug, endpoint, query={}, season_id='', display_id=None):
return self._download_json(
f'https://api.netverse.id/medias/api/v2/{self._ENDPOINTS[endpoint]}/{slug}/{season_id}',
display_id or slug, query=query)
def _get_comments(self, video_id):
last_page_number = None
for i in itertools.count(1):
comment_data = self._download_json(
f'https://api.netverse.id/mediadetails/api/v3/videos/comments/{video_id}',
video_id, data=b'', fatal=False, query={'page': i},
note=f'Downloading JSON comment metadata page {i}') or {}
yield from traverse_obj(comment_data, ('response', 'comments', 'data', ..., {
'id': '_id',
'text': 'comment',
'author_id': 'customer_id',
'author': ('customer', 'name'),
'author_thumbnail': ('customer', 'profile_picture'),
}))
if not last_page_number:
last_page_number = traverse_obj(comment_data, ('response', 'comments', 'last_page'))
if i >= (last_page_number or 0):
break
class NetverseIE(NetverseBaseIE):
_VALID_URL = r'https?://(?:\w+\.)?netverse\.id/(?P<type>watch|video)/(?P<display_id>[^/?#&]+)'
_TESTS = [{
# Watch video
'url': 'https://www.netverse.id/watch/waktu-indonesia-bercanda-edisi-spesial-lebaran-2016',
'info_dict': {
'id': 'k4yhqUwINAGtmHx3NkL',
'title': 'Waktu Indonesia Bercanda - Edisi Spesial Lebaran 2016',
'ext': 'mp4',
'season': 'Season 2016',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
'episode_number': 22,
'episode': 'Episode 22',
'uploader_id': 'x2ir3vq',
'age_limit': 0,
'tags': [],
'view_count': int,
'display_id': 'waktu-indonesia-bercanda-edisi-spesial-lebaran-2016',
'duration': 2990,
'upload_date': '20210722',
'timestamp': 1626919804,
'like_count': int,
'uploader': 'Net Prime',
},
}, {
# series
'url': 'https://www.netverse.id/watch/jadoo-seorang-model',
'info_dict': {
'id': 'x88izwc',
'title': 'Jadoo Seorang Model',
'ext': 'mp4',
'season': 'Season 2',
'description': 'md5:8a74f70812cca267e19ee0635f0af835',
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
'episode_number': 2,
'episode': 'Episode 2',
'view_count': int,
'like_count': int,
'display_id': 'jadoo-seorang-model',
'uploader_id': 'x2ir3vq',
'duration': 635,
'timestamp': 1646372927,
'tags': ['PG069497-hellojadooseason2eps2'],
'upload_date': '20220304',
'uploader': 'Net Prime',
'age_limit': 0,
},
'skip': 'video get Geo-blocked for some country',
}, {
# non www host
'url': 'https://netverse.id/watch/tetangga-baru',
'info_dict': {
'id': 'k4CNGz7V0HJ7vfwZbXy',
'ext': 'mp4',
'title': 'Tetangga Baru',
'season': 'Season 1',
'description': 'md5:23fcf70e97d461d3029d25d59b2ccfb9',
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
'episode_number': 1,
'episode': 'Episode 1',
'timestamp': 1624538169,
'view_count': int,
'upload_date': '20210624',
'age_limit': 0,
'uploader_id': 'x2ir3vq',
'like_count': int,
'uploader': 'Net Prime',
'tags': ['PG008534', 'tetangga', 'Baru'],
'display_id': 'tetangga-baru',
'duration': 1406,
},
}, {
# /video url
'url': 'https://www.netverse.id/video/pg067482-hellojadoo-season1',
'title': 'Namaku Choi Jadoo',
'info_dict': {
'id': 'x887jzz',
'ext': 'mp4',
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
'season': 'Season 1',
'episode_number': 1,
'description': 'md5:d4f627b3e7a3f9acdc55f6cdd5ea41d5',
'title': 'Namaku Choi Jadoo',
'episode': 'Episode 1',
'age_limit': 0,
'like_count': int,
'view_count': int,
'tags': ['PG067482', 'PG067482-HelloJadoo-season1'],
'duration': 780,
'display_id': 'pg067482-hellojadoo-season1',
'uploader_id': 'x2ir3vq',
'uploader': 'Net Prime',
'timestamp': 1645764984,
'upload_date': '20220225',
},
'skip': 'This video get Geo-blocked for some country',
}, {
# video with comments
'url': 'https://netverse.id/video/episode-1-season-2016-ok-food',
'info_dict': {
'id': 'k6hetBPiQMljSxxvAy7',
'ext': 'mp4',
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
'display_id': 'episode-1-season-2016-ok-food',
'like_count': int,
'description': '',
'duration': 1471,
'age_limit': 0,
'timestamp': 1642405848,
'episode_number': 1,
'season': 'Season 2016',
'uploader_id': 'x2ir3vq',
'title': 'Episode 1 - Season 2016 - Ok Food',
'upload_date': '20220117',
'tags': [],
'view_count': int,
'episode': 'Episode 1',
'uploader': 'Net Prime',
'comment_count': int,
},
'params': {
'getcomments': True,
},
}, {
# video with multiple page comment
'url': 'https://netverse.id/video/match-island-eps-1-fix',
'info_dict': {
'id': 'x8aznjc',
'ext': 'mp4',
'like_count': int,
'tags': ['Match-Island', 'Pd00111'],
'display_id': 'match-island-eps-1-fix',
'view_count': int,
'episode': 'Episode 1',
'uploader': 'Net Prime',
'duration': 4070,
'timestamp': 1653068165,
'description': 'md5:e9cf3b480ad18e9c33b999e3494f223f',
'age_limit': 0,
'title': 'Welcome To Match Island',
'upload_date': '20220520',
'episode_number': 1,
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
'uploader_id': 'x2ir3vq',
'season': 'Season 1',
'comment_count': int,
},
'params': {
'getcomments': True,
},
}]
def _real_extract(self, url):
display_id, sites_type = self._match_valid_url(url).group('display_id', 'type')
program_json = self._call_api(display_id, sites_type)
videos = program_json['response']['videos']
return {
'_type': 'url_transparent',
'ie_key': DailymotionIE.ie_key(),
'url': smuggle_url(videos['dailymotion_url'], {'query': {'embedder': 'https://www.netverse.id'}}),
'display_id': display_id,
'title': videos.get('title'),
'season': videos.get('season_name'),
'thumbnail': traverse_obj(videos, ('program_detail', 'thumbnail_image')),
'description': traverse_obj(videos, ('program_detail', 'description')),
'episode_number': videos.get('episode_order'),
'__post_extractor': self.extract_comments(display_id),
}
class NetversePlaylistIE(NetverseBaseIE):
_VALID_URL = r'https?://(?:\w+\.)?netverse\.id/(?P<type>webseries)/(?P<display_id>[^/?#&]+)'
_TESTS = [{
# multiple season
'url': 'https://netverse.id/webseries/tetangga-masa-gitu',
'info_dict': {
'id': 'tetangga-masa-gitu',
'title': 'Tetangga Masa Gitu',
},
'playlist_count': 519,
}, {
# single season
'url': 'https://netverse.id/webseries/kelas-internasional',
'info_dict': {
'id': 'kelas-internasional',
'title': 'Kelas Internasional',
},
'playlist_count': 203,
}]
def parse_playlist(self, json_data, playlist_id):
slug_sample = traverse_obj(json_data, ('related', 'data', ..., 'slug'))[0]
for season in traverse_obj(json_data, ('seasons', ..., 'id')):
playlist_json = self._call_api(
slug_sample, 'season', display_id=playlist_id, season_id=season)
for current_page in range(playlist_json['response']['season_list']['last_page']):
playlist_json = self._call_api(slug_sample, 'season', query={'page': current_page + 1},
season_id=season, display_id=playlist_id)
for slug in traverse_obj(playlist_json, ('response', ..., 'data', ..., 'slug')):
yield self.url_result(f'https://www.netverse.id/video/{slug}', NetverseIE)
def _real_extract(self, url):
playlist_id, sites_type = self._match_valid_url(url).group('display_id', 'type')
playlist_data = self._call_api(playlist_id, sites_type)
return self.playlist_result(
self.parse_playlist(playlist_data['response'], playlist_id),
traverse_obj(playlist_data, ('response', 'webseries_info', 'slug')),
traverse_obj(playlist_data, ('response', 'webseries_info', 'title')))
class NetverseSearchIE(SearchInfoExtractor):
_SEARCH_KEY = 'netsearch'
_TESTS = [{
'url': 'netsearch10:tetangga',
'info_dict': {
'id': 'tetangga',
'title': 'tetangga',
},
'playlist_count': 10,
}]
def _search_results(self, query):
last_page = None
for i in itertools.count(1):
search_data = self._download_json(
'https://api.netverse.id/search/elastic/search', query,
query={'q': query, 'page': i}, note=f'Downloading page {i}')
videos = traverse_obj(search_data, ('response', 'data', ...))
for video in videos:
yield self.url_result(f'https://netverse.id/video/{video["slug"]}', NetverseIE)
last_page = last_page or traverse_obj(search_data, ('response', 'lastpage'))
if not videos or i >= (last_page or 0):
break
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/arcpublishing.py | yt_dlp/extractor/arcpublishing.py | import re
from .common import InfoExtractor
from ..utils import (
extract_attributes,
int_or_none,
join_nonempty,
parse_iso8601,
try_get,
)
class ArcPublishingIE(InfoExtractor):
_UUID_REGEX = r'[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12}'
_VALID_URL = rf'arcpublishing:(?P<org>[a-z]+):(?P<id>{_UUID_REGEX})'
_TESTS = [{
# https://www.adn.com/politics/2020/11/02/video-senate-candidates-campaign-in-anchorage-on-eve-of-election-day/
'url': 'arcpublishing:adn:8c99cb6e-b29c-4bc9-9173-7bf9979225ab',
'only_matching': True,
}, {
# https://www.bostonglobe.com/video/2020/12/30/metro/footage-released-showing-officer-talking-about-striking-protesters-with-car/
'url': 'arcpublishing:bostonglobe:232b7ae6-7d73-432d-bc0a-85dbf0119ab1',
'only_matching': True,
}, {
# https://www.actionnewsjax.com/video/live-stream/
'url': 'arcpublishing:cmg:cfb1cf1b-3ab5-4d1b-86c5-a5515d311f2a',
'only_matching': True,
}, {
# https://elcomercio.pe/videos/deportes/deporte-total-futbol-peruano-seleccion-peruana-la-valorizacion-de-los-peruanos-en-el-exterior-tras-un-2020-atipico-nnav-vr-video-noticia/
'url': 'arcpublishing:elcomercio:27a7e1f8-2ec7-4177-874f-a4feed2885b3',
'only_matching': True,
}, {
# https://www.clickondetroit.com/video/community/2020/05/15/events-surrounding-woodward-dream-cruise-being-canceled/
'url': 'arcpublishing:gmg:c8793fb2-8d44-4242-881e-2db31da2d9fe',
'only_matching': True,
}, {
# https://www.wabi.tv/video/2020/12/30/trenton-company-making-equipment-pfizer-covid-vaccine/
'url': 'arcpublishing:gray:0b0ba30e-032a-4598-8810-901d70e6033e',
'only_matching': True,
}, {
# https://www.lateja.cr/el-mundo/video-china-aprueba-con-condiciones-su-primera/dfcbfa57-527f-45ff-a69b-35fe71054143/video/
'url': 'arcpublishing:gruponacion:dfcbfa57-527f-45ff-a69b-35fe71054143',
'only_matching': True,
}, {
# https://www.fifthdomain.com/video/2018/03/09/is-america-vulnerable-to-a-cyber-attack/
'url': 'arcpublishing:mco:aa0ca6fe-1127-46d4-b32c-be0d6fdb8055',
'only_matching': True,
}, {
# https://www.vl.no/kultur/2020/12/09/en-melding-fra-en-lytter-endret-julelista-til-lewi-bergrud/
'url': 'arcpublishing:mentormedier:47a12084-650b-4011-bfd0-3699b6947b2d',
'only_matching': True,
}, {
# https://www.14news.com/2020/12/30/whiskey-theft-caught-camera-henderson-liquor-store/
'url': 'arcpublishing:raycom:b89f61f8-79fa-4c09-8255-e64237119bf7',
'only_matching': True,
}, {
# https://www.theglobeandmail.com/world/video-ethiopian-woman-who-became-symbol-of-integration-in-italy-killed-on/
'url': 'arcpublishing:tgam:411b34c1-8701-4036-9831-26964711664b',
'only_matching': True,
}, {
# https://www.pilotonline.com/460f2931-8130-4719-8ea1-ffcb2d7cb685-132.html
'url': 'arcpublishing:tronc:460f2931-8130-4719-8ea1-ffcb2d7cb685',
'only_matching': True,
}]
_WEBPAGE_TESTS = [{
'url': 'https://www.uppermichiganssource.com/2025/07/18/scattered-showers-storms-bring-heavy-rain-potential/',
'info_dict': {
'id': '508116f7-e999-48db-b7c2-60a04842679b',
'ext': 'mp4',
'title': 'Scattered showers & storms bring heavy rain potential',
'description': 'Scattered showers & storms bring heavy rain potential',
'duration': 2016,
'thumbnail': r're:https?://.+\.jpg',
'timestamp': 1752881287,
'upload_date': '20250718',
},
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest'],
}]
_POWA_DEFAULTS = [
(['cmg', 'prisa'], '%s-config-prod.api.cdn.arcpublishing.com/video'),
([
'adn', 'advancelocal', 'answers', 'bonnier', 'bostonglobe', 'demo',
'gmg', 'gruponacion', 'infobae', 'mco', 'nzme', 'pmn', 'raycom',
'spectator', 'tbt', 'tgam', 'tronc', 'wapo', 'wweek',
], 'video-api-cdn.%s.arcpublishing.com/api'),
]
@classmethod
def _extract_embed_urls(cls, url, webpage):
entries = []
# https://arcpublishing.atlassian.net/wiki/spaces/POWA/overview
for powa_el in re.findall(rf'(<div[^>]+class="[^"]*\bpowa\b[^"]*"[^>]+data-uuid="{ArcPublishingIE._UUID_REGEX}"[^>]*>)', webpage):
powa = extract_attributes(powa_el) or {}
org = powa.get('data-org')
uuid = powa.get('data-uuid')
if org and uuid:
entries.append(f'arcpublishing:{org}:{uuid}')
return entries
def _real_extract(self, url):
org, uuid = self._match_valid_url(url).groups()
for orgs, tmpl in self._POWA_DEFAULTS:
if org in orgs:
base_api_tmpl = tmpl
break
else:
base_api_tmpl = '%s-prod-cdn.video-api.arcpublishing.com/api'
if org == 'wapo':
org = 'washpost'
video = self._download_json(
'https://%s/v1/ansvideos/findByUuid' % (base_api_tmpl % org),
uuid, query={'uuid': uuid})[0]
title = video['headlines']['basic']
is_live = video.get('status') == 'live'
urls = []
formats = []
for s in video.get('streams', []):
s_url = s.get('url')
if not s_url or s_url in urls:
continue
urls.append(s_url)
stream_type = s.get('stream_type')
if stream_type == 'smil':
smil_formats = self._extract_smil_formats(
s_url, uuid, fatal=False)
for f in smil_formats:
if f['url'].endswith('/cfx/st'):
f['app'] = 'cfx/st'
if not f['play_path'].startswith('mp4:'):
f['play_path'] = 'mp4:' + f['play_path']
if isinstance(f['tbr'], float):
f['vbr'] = f['tbr'] * 1000
del f['tbr']
f['format_id'] = 'rtmp-%d' % f['vbr']
formats.extend(smil_formats)
elif stream_type in ('ts', 'hls'):
m3u8_formats = self._extract_m3u8_formats(
s_url, uuid, 'mp4', live=is_live, m3u8_id='hls', fatal=False)
if all(f.get('acodec') == 'none' for f in m3u8_formats):
continue
for f in m3u8_formats:
height = f.get('height')
if not height:
continue
vbr = self._search_regex(
r'[_x]%d[_-](\d+)' % height, f['url'], 'vbr', default=None)
if vbr:
f['vbr'] = int(vbr)
formats.extend(m3u8_formats)
else:
vbr = int_or_none(s.get('bitrate'))
formats.append({
'format_id': join_nonempty(stream_type, vbr),
'vbr': vbr,
'width': int_or_none(s.get('width')),
'height': int_or_none(s.get('height')),
'filesize': int_or_none(s.get('filesize')),
'url': s_url,
'quality': -10,
})
subtitles = {}
for subtitle in (try_get(video, lambda x: x['subtitles']['urls'], list) or []):
subtitle_url = subtitle.get('url')
if subtitle_url:
subtitles.setdefault('en', []).append({'url': subtitle_url})
return {
'id': uuid,
'title': title,
'thumbnail': try_get(video, lambda x: x['promo_image']['url']),
'description': try_get(video, lambda x: x['subheadlines']['basic']),
'formats': formats,
'duration': int_or_none(video.get('duration'), 100),
'timestamp': parse_iso8601(video.get('created_date')),
'subtitles': subtitles,
'is_live': is_live,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/xinpianchang.py | yt_dlp/extractor/xinpianchang.py | from .common import InfoExtractor
from ..utils import (
int_or_none,
str_or_none,
try_get,
url_or_none,
)
class XinpianchangIE(InfoExtractor):
_VALID_URL = r'https?://(www\.)?xinpianchang\.com/(?P<id>a\d+)'
IE_DESC = '新片场'
_TESTS = [{
'url': 'https://www.xinpianchang.com/a11766551',
'info_dict': {
'id': 'a11766551',
'ext': 'mp4',
'title': '北京2022冬奥会闭幕式再见短片-冰墩墩下班了',
'description': 'md5:4a730c10639a82190fabe921c0fa4b87',
'duration': 151,
'thumbnail': r're:^https?://oss-xpc0\.xpccdn\.com.+/assets/',
'uploader': '正时文创',
'uploader_id': '10357277',
'categories': ['宣传片', '国家城市', '广告', '其他'],
'tags': ['北京冬奥会', '冰墩墩', '再见', '告别', '冰墩墩哭了', '感动', '闭幕式', '熄火'],
},
}, {
'url': 'https://www.xinpianchang.com/a11762904',
'info_dict': {
'id': 'a11762904',
'ext': 'mp4',
'title': '冬奥会决胜时刻《法国派出三只鸡?》',
'description': 'md5:55cb139ef8f48f0c877932d1f196df8b',
'duration': 136,
'thumbnail': r're:^https?://oss-xpc0\.xpccdn\.com.+/assets/',
'uploader': '精品动画',
'uploader_id': '10858927',
'categories': ['动画', '三维CG'],
'tags': ['France Télévisions', '法国3台', '蠢萌', '冬奥会'],
},
}, {
'url': 'https://www.xinpianchang.com/a11779743?from=IndexPick&part=%E7%BC%96%E8%BE%91%E7%B2%BE%E9%80%89&index=2',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id=video_id, headers={'Referer': url})
video_data = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['detail']['video']
data = self._download_json(
f'https://mod-api.xinpianchang.com/mod/api/v2/media/{video_data["vid"]}', video_id,
query={'appKey': video_data['appKey']})['data']
formats, subtitles = [], {}
for k, v in data.get('resource').items():
if k in ('dash', 'hls'):
v_url = v.get('url')
if not v_url:
continue
if k == 'dash':
fmts, subs = self._extract_mpd_formats_and_subtitles(v_url, video_id=video_id)
elif k == 'hls':
fmts, subs = self._extract_m3u8_formats_and_subtitles(v_url, video_id=video_id)
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
elif k == 'progressive':
formats.extend([{
'url': url_or_none(prog.get('url')),
'width': int_or_none(prog.get('width')),
'height': int_or_none(prog.get('height')),
'ext': 'mp4',
'http_headers': {
# NB: Server returns 403 without the Range header
'Range': 'bytes=0-',
},
} for prog in v if prog.get('url') or []])
return {
'id': video_id,
'title': data.get('title'),
'description': data.get('description'),
'duration': int_or_none(data.get('duration')),
'categories': data.get('categories'),
'tags': data.get('keywords'),
'thumbnail': data.get('cover'),
'uploader': try_get(data, lambda x: x['owner']['username']),
'uploader_id': str_or_none(try_get(data, lambda x: x['owner']['id'])),
'formats': formats,
'subtitles': subtitles,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/telebruxelles.py | yt_dlp/extractor/telebruxelles.py | import re
from .common import InfoExtractor
class TeleBruxellesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:telebruxelles|bx1)\.be/(?:[^/]+/)*(?P<id>[^/#?]+)'
_TESTS = [{
'url': 'http://bx1.be/news/que-risque-lauteur-dune-fausse-alerte-a-la-bombe/',
'md5': 'a2a67a5b1c3e8c9d33109b902f474fd9',
'info_dict': {
'id': '158856',
'display_id': 'que-risque-lauteur-dune-fausse-alerte-a-la-bombe',
'ext': 'mp4',
'title': 'Que risque l’auteur d’une fausse alerte à la bombe ?',
'description': 'md5:3cf8df235d44ebc5426373050840e466',
},
}, {
'url': 'http://bx1.be/sport/futsal-schaerbeek-sincline-5-3-a-thulin/',
'md5': 'dfe07ecc9c153ceba8582ac912687675',
'info_dict': {
'id': '158433',
'display_id': 'futsal-schaerbeek-sincline-5-3-a-thulin',
'ext': 'mp4',
'title': 'Futsal : Schaerbeek s’incline 5-3 à Thulin',
'description': 'md5:fd013f1488d5e2dceb9cebe39e2d569b',
},
}, {
'url': 'http://bx1.be/emission/bxenf1-gastronomie/',
'only_matching': True,
}, {
'url': 'https://bx1.be/berchem-sainte-agathe/personnel-carrefour-de-berchem-sainte-agathe-inquiet/',
'only_matching': True,
}, {
'url': 'https://bx1.be/dernier-jt/',
'only_matching': True,
}, {
# live stream
'url': 'https://bx1.be/lives/direct-tv/',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
article_id = self._html_search_regex(
r'<article[^>]+\bid=["\']post-(\d+)', webpage, 'article ID', default=None)
title = self._html_search_regex(
r'<h1[^>]*>(.+?)</h1>', webpage, 'title',
default=None) or self._og_search_title(webpage)
description = self._og_search_description(webpage, default=None)
rtmp_url = self._html_search_regex(
r'file["\']?\s*:\s*"(r(?:tm|mt)ps?://[^/]+/(?:vod/mp4:"\s*\+\s*"[^"]+"\s*\+\s*"\.mp4|stream/live))"',
webpage, 'RTMP url')
# Yes, they have a typo in scheme name for live stream URLs (e.g.
# https://bx1.be/lives/direct-tv/)
rtmp_url = re.sub(r'^rmtp', 'rtmp', rtmp_url)
rtmp_url = re.sub(r'"\s*\+\s*"', '', rtmp_url)
formats = self._extract_wowza_formats(rtmp_url, article_id or display_id)
is_live = 'stream/live' in rtmp_url
return {
'id': article_id or display_id,
'display_id': display_id,
'title': title,
'description': description,
'formats': formats,
'is_live': is_live,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/opencast.py | yt_dlp/extractor/opencast.py | import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
parse_iso8601,
traverse_obj,
variadic,
)
class OpencastBaseIE(InfoExtractor):
_INSTANCES_RE = r'''(?:
opencast\.informatik\.kit\.edu|
electures\.uni-muenster\.de|
oc-presentation\.ltcc\.tuwien\.ac\.at|
medien\.ph-noe\.ac\.at|
oc-video\.ruhr-uni-bochum\.de|
oc-video1\.ruhr-uni-bochum\.de|
opencast\.informatik\.uni-goettingen\.de|
heicast\.uni-heidelberg\.de|
opencast\.hawk\.de:8080|
opencast\.hs-osnabrueck\.de|
video[0-9]+\.virtuos\.uni-osnabrueck\.de|
opencast\.uni-koeln\.de|
media\.opencast\.hochschule-rhein-waal\.de|
matterhorn\.dce\.harvard\.edu|
hs-harz\.opencast\.uni-halle\.de|
videocampus\.urz\.uni-leipzig\.de|
media\.uct\.ac\.za|
vid\.igb\.illinois\.edu|
cursosabertos\.c3sl\.ufpr\.br|
mcmedia\.missioncollege\.org|
clases\.odon\.edu\.uy
)'''
_UUID_RE = r'[\da-fA-F]{8}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{12}'
def _call_api(self, host, video_id, **kwargs):
return self._download_json(self._API_BASE % (host, video_id), video_id, **kwargs)
def _parse_mediapackage(self, video):
video_id = video.get('id')
if video_id is None:
raise ExtractorError('Video id was not found')
formats = []
for track in variadic(traverse_obj(video, ('media', 'track')) or []):
href = track.get('url')
if href is None:
continue
ext = determine_ext(href, None)
transport = track.get('transport')
if transport == 'DASH' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(href, video_id, mpd_id='dash', fatal=False))
elif transport == 'HLS' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
href, video_id, m3u8_id='hls', entry_protocol='m3u8_native', fatal=False))
elif transport == 'HDS' or ext == 'f4m':
formats.extend(self._extract_f4m_formats(href, video_id, f4m_id='hds', fatal=False))
elif transport == 'SMOOTH':
formats.extend(self._extract_ism_formats(href, video_id, ism_id='smooth', fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(href, video_id, fatal=False))
else:
track_obj = {
'url': href,
'ext': ext,
'format_note': track.get('transport'),
'resolution': traverse_obj(track, ('video', 'resolution')),
'fps': int_or_none(traverse_obj(track, ('video', 'framerate'))),
'vbr': int_or_none(traverse_obj(track, ('video', 'bitrate')), scale=1000),
'vcodec': traverse_obj(track, ('video', 'encoder', 'type')) if track.get('video') else 'none',
'abr': int_or_none(traverse_obj(track, ('audio', 'bitrate')), scale=1000),
'asr': int_or_none(traverse_obj(track, ('audio', 'samplingrate'))),
'acodec': traverse_obj(track, ('audio', 'encoder', 'type')) if track.get('audio') else 'none',
}
if transport == 'RTMP':
m_obj = re.search(r'(?:rtmp://[^/]+/(?P<app>[^/]+))/(?P<ext>.+):(?P<playpath>.+)', href)
if not m_obj:
continue
track_obj.update({
'app': m_obj.group('app'),
'ext': m_obj.group('ext'),
'play_path': m_obj.group('ext') + ':' + m_obj.group('playpath'),
'rtmp_live': True,
'preference': -2,
})
formats.append(track_obj)
return {
'id': video_id,
'formats': formats,
'title': video.get('title'),
'series': video.get('seriestitle'),
'season_id': video.get('series'),
'creator': traverse_obj(video, ('creators', 'creator')),
'timestamp': parse_iso8601(video.get('start')),
'thumbnail': traverse_obj(video, ('attachments', 'attachment', ..., 'url'), get_all=False),
}
class OpencastIE(OpencastBaseIE):
_VALID_URL = rf'''(?x)
https?://(?P<host>{OpencastBaseIE._INSTANCES_RE})/paella/ui/watch\.html\?
(?:[^#]+&)?id=(?P<id>{OpencastBaseIE._UUID_RE})'''
_API_BASE = 'https://%s/search/episode.json?id=%s'
_TESTS = [
{
'url': 'https://oc-video1.ruhr-uni-bochum.de/paella/ui/watch.html?id=ed063cd5-72c8-46b5-a60a-569243edcea8',
'md5': '554c8e99a90f7be7e874619fcf2a3bc9',
'info_dict': {
'id': 'ed063cd5-72c8-46b5-a60a-569243edcea8',
'ext': 'mp4',
'title': '11 - Kryptographie - 24.11.2015',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1606208400,
'upload_date': '20201124',
'season_id': 'cf68a4a1-36b1-4a53-a6ba-61af5705a0d0',
'series': 'Kryptographie - WiSe 15/16',
'creator': 'Alexander May',
},
},
]
def _real_extract(self, url):
host, video_id = self._match_valid_url(url).group('host', 'id')
return self._parse_mediapackage(
self._call_api(host, video_id)['search-results']['result']['mediapackage'])
class OpencastPlaylistIE(OpencastBaseIE):
_VALID_URL = rf'''(?x)
https?://(?P<host>{OpencastBaseIE._INSTANCES_RE})(?:
/engage/ui/index\.html\?(?:[^#]+&)?epFrom=|
/ltitools/index\.html\?(?:[^#]+&)?series=
)(?P<id>{OpencastBaseIE._UUID_RE})'''
_API_BASE = 'https://%s/search/episode.json?sid=%s'
_TESTS = [
{
'url': 'https://oc-video1.ruhr-uni-bochum.de/engage/ui/index.html?epFrom=cf68a4a1-36b1-4a53-a6ba-61af5705a0d0',
'info_dict': {
'id': 'cf68a4a1-36b1-4a53-a6ba-61af5705a0d0',
'title': 'Kryptographie - WiSe 15/16',
},
'playlist_mincount': 29,
},
{
'url': 'https://oc-video1.ruhr-uni-bochum.de/ltitools/index.html?subtool=series&series=cf68a4a1-36b1-4a53-a6ba-61af5705a0d0&lng=de',
'info_dict': {
'id': 'cf68a4a1-36b1-4a53-a6ba-61af5705a0d0',
'title': 'Kryptographie - WiSe 15/16',
},
'playlist_mincount': 29,
},
{
'url': 'https://electures.uni-muenster.de/engage/ui/index.html?e=1&p=1&epFrom=39391d10-a711-4d23-b21d-afd2ed7d758c',
'info_dict': {
'id': '39391d10-a711-4d23-b21d-afd2ed7d758c',
'title': '021670 Theologische Themen bei Hans Blumenberg WiSe 2017/18',
},
'playlist_mincount': 13,
},
]
def _real_extract(self, url):
host, video_id = self._match_valid_url(url).group('host', 'id')
entries = [
self._parse_mediapackage(episode['mediapackage'])
for episode in variadic(self._call_api(host, video_id)['search-results']['result'])
if episode.get('mediapackage')
]
return self.playlist_result(entries, video_id, traverse_obj(entries, (0, 'series')))
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/museai.py | yt_dlp/extractor/museai.py | import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
determine_ext,
float_or_none,
int_or_none,
js_to_json,
traverse_obj,
url_or_none,
)
class MuseAIIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?muse\.ai/(?:v|embed)/(?P<id>\w+)'
_TESTS = [{
'url': 'https://muse.ai/embed/YdTWvUW',
'md5': 'f994f9a38be1c3aaf9e37cbd7d76fe7c',
'info_dict': {
'id': 'YdTWvUW',
'ext': 'mp4',
'title': '2023-05-28-Grabien-1941111 (1)',
'description': '',
'uploader': 'Today News Africa',
'uploader_id': 'TodayNewsAfrica',
'upload_date': '20230528',
'timestamp': 1685285044,
'duration': 1291.3,
'view_count': int,
'availability': 'public',
},
}, {
'url': 'https://muse.ai/v/gQ4gGAA-0756',
'md5': '52dbfc78e865e56dc19a1715badc35e8',
'info_dict': {
'id': 'gQ4gGAA',
'ext': 'mp4',
'title': '0756',
'description': 'md5:0ca1483f9aac423e9a96ad00bb3a0785',
'uploader': 'Aerial.ie',
'uploader_id': 'aerial',
'upload_date': '20210306',
'timestamp': 1615072842,
'duration': 21.4,
'view_count': int,
'availability': 'public',
},
}]
_WEBPAGE_TESTS = [{
'url': 'https://muse.ai/docs',
'playlist_mincount': 4,
'info_dict': {
'id': 'docs',
'title': 'muse.ai | docs',
'description': 'md5:6c0293431481582739c82ee8902687fa',
'age_limit': 0,
'thumbnail': 'https://muse.ai/static/imgs/poster-img-docs.png',
},
'params': {'allowed_extractors': ['all', '-html5']},
}]
_EMBED_REGEX = [r'<iframe[^>]*\bsrc=["\'](?P<url>https://muse\.ai/embed/\w+)']
@classmethod
def _extract_embed_urls(cls, url, webpage):
yield from super()._extract_embed_urls(url, webpage)
for embed_id in re.findall(r'<script>[^<]*\bMusePlayer\(\{[^}<]*\bvideo:\s*["\'](\w+)["\']', webpage):
yield f'https://muse.ai/embed/{embed_id}'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(f'https://muse.ai/embed/{video_id}', video_id)
data = self._search_json(
r'player\.setData\(', webpage, 'player data', video_id, transform_source=js_to_json)
source_url = data['url']
if not url_or_none(source_url):
raise ExtractorError('Unable to extract video URL')
formats = [{
'url': source_url,
'format_id': 'source',
'quality': 1,
**traverse_obj(data, {
'ext': ('filename', {determine_ext}),
'width': ('width', {int_or_none}),
'height': ('height', {int_or_none}),
'filesize': ('size', {int_or_none}),
}),
}]
if source_url.endswith('/data'):
base_url = f'{source_url[:-5]}/videos'
formats.extend(self._extract_m3u8_formats(
f'{base_url}/hls.m3u8', video_id, m3u8_id='hls', fatal=False))
formats.extend(self._extract_mpd_formats(
f'{base_url}/dash.mpd', video_id, mpd_id='dash', fatal=False))
return {
'id': video_id,
'formats': formats,
**traverse_obj(data, {
'title': ('title', {str}),
'description': ('description', {str}),
'duration': ('duration', {float_or_none}),
'timestamp': ('tcreated', {int_or_none}),
'uploader': ('owner_name', {str}),
'uploader_id': ('owner_username', {str}),
'view_count': ('views', {int_or_none}),
'age_limit': ('mature', {lambda x: 18 if x else None}),
'availability': ('visibility', {lambda x: x if x in ('private', 'unlisted') else 'public'}),
}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hse.py | yt_dlp/extractor/hse.py | from .common import InfoExtractor
from ..utils import (
ExtractorError,
traverse_obj,
unified_timestamp,
)
class HSEShowBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['DE']
def _extract_redux_data(self, url, video_id):
webpage = self._download_webpage(url, video_id)
redux = self._html_search_regex(
r'window\.__REDUX_DATA__\s*=\s*({.*});?', webpage, 'redux data')
return self._parse_json(redux.replace('\n', ''), video_id)
def _extract_formats_and_subtitles(self, sources, video_id):
if not sources:
raise ExtractorError('No video found', expected=True, video_id=video_id)
formats, subtitles = [], {}
for src in sources:
if src['mimetype'] != 'application/x-mpegURL':
continue
fmts, subs = self._extract_m3u8_formats_and_subtitles(src['url'], video_id, ext='mp4')
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
return formats, subtitles
class HSEShowIE(HSEShowBaseIE):
_VALID_URL = r'https?://(?:www\.)?hse\.de/dpl/c/tv-shows/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://www.hse.de/dpl/c/tv-shows/505350',
'info_dict': {
'id': '505350',
'ext': 'mp4',
'title': 'Pfeffinger Mode & Accessoires',
'timestamp': 1638810000,
'upload_date': '20211206',
'channel': 'HSE24',
'uploader': 'Arina Pirayesh',
},
'params': {'skip_download': 'm3u8'},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
json_data = self._extract_redux_data(url, video_id)
formats, subtitles = self._extract_formats_and_subtitles(
traverse_obj(json_data, ('tvShowPage', 'tvShowVideo', 'sources')), video_id)
show = traverse_obj(json_data, ('tvShowPage', 'tvShow')) or {}
return {
'id': video_id,
'title': show.get('title') or video_id,
'formats': formats,
'timestamp': unified_timestamp(f'{show.get("date")} {show.get("hour")}:00'),
'thumbnail': traverse_obj(json_data, ('tvShowVideo', 'poster')),
'channel': self._search_regex(
r'tvShow \| ([A-Z0-9]+)_', show.get('actionFieldText') or '', video_id, fatal=False),
'uploader': show.get('presenter'),
'subtitles': subtitles,
}
class HSEProductIE(HSEShowBaseIE):
_VALID_URL = r'https?://(?:www\.)?hse\.de/dpl/p/product/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://www.hse.de/dpl/p/product/408630',
'info_dict': {
'id': '408630',
'ext': 'mp4',
'title': 'Hose im Ponte-Mix',
'uploader': 'Judith Williams',
},
'params': {'skip_download': 'm3u8'},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
json_data = self._extract_redux_data(url, video_id)
video = traverse_obj(json_data, ('productContent', 'productContent', 'videos', 0)) or {}
formats, subtitles = self._extract_formats_and_subtitles(video.get('sources'), video_id)
return {
'id': video_id,
'title': traverse_obj(json_data, ('productDetail', 'product', 'name', 'short')) or video_id,
'formats': formats,
'subtitles': subtitles,
'thumbnail': video.get('poster'),
'uploader': traverse_obj(json_data, ('productDetail', 'product', 'brand', 'brandName')),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lcp.py | yt_dlp/extractor/lcp.py | from .common import InfoExtractor
class LcpPlayIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://play\.lcp\.fr/embed/(?P<id>[^/]+)/(?P<account_id>[^/]+)/[^/]+/[^/]+'
_TESTS = [{
'url': 'http://play.lcp.fr/embed/327336/131064/darkmatter/0',
'md5': 'b8bd9298542929c06c1c15788b1f277a',
'info_dict': {
'id': '327336',
'ext': 'mp4',
'title': '327336',
'timestamp': 1456391602,
'upload_date': '20160225',
},
'params': {
'skip_download': True,
},
}]
class LcpIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:www\.)?lcp\.fr/(?:[^/]+/)*(?P<id>[^/]+)'
_TESTS = [{
# dailymotion live stream
'url': 'http://www.lcp.fr/le-direct',
'info_dict': {
'id': 'xji3qy',
'ext': 'mp4',
'title': 'La Chaine Parlementaire (LCP), Live TNT',
'description': 'md5:5c69593f2de0f38bd9a949f2c95e870b',
'uploader': 'LCP',
'uploader_id': 'xbz33d',
'timestamp': 1308923058,
'upload_date': '20110624',
},
'params': {
# m3u8 live stream
'skip_download': True,
},
}, {
'url': 'http://www.lcp.fr/emissions/277792-les-volontaires',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
play_url = self._search_regex(
rf'<iframe[^>]+src=(["\'])(?P<url>{LcpPlayIE._VALID_URL}?(?:(?!\1).)*)\1',
webpage, 'play iframe', default=None, group='url')
if not play_url:
return self.url_result(url, 'Generic')
title = self._og_search_title(webpage, default=None) or self._html_search_meta(
'twitter:title', webpage, fatal=True)
description = self._html_search_meta(
('description', 'twitter:description'), webpage)
return {
'_type': 'url_transparent',
'ie_key': LcpPlayIE.ie_key(),
'url': play_url,
'display_id': display_id,
'title': title,
'description': description,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/generic.py | yt_dlp/extractor/generic.py | import os
import re
import types
import urllib.parse
import xml.etree.ElementTree
from .common import InfoExtractor
from .commonprotocols import RtmpIE
from .youtube import YoutubeIE
from ..compat import compat_etree_fromstring
from ..cookies import LenientSimpleCookie
from ..networking.exceptions import HTTPError
from ..networking.impersonate import ImpersonateTarget
from ..utils import (
KNOWN_EXTENSIONS,
MEDIA_EXTENSIONS,
ExtractorError,
UnsupportedError,
determine_ext,
determine_protocol,
dict_get,
extract_basic_auth,
filter_dict,
format_field,
int_or_none,
is_html,
js_to_json,
merge_dicts,
mimetype2ext,
orderedSet,
parse_duration,
parse_resolution,
smuggle_url,
str_or_none,
traverse_obj,
try_call,
unescapeHTML,
unified_timestamp,
unsmuggle_url,
update_url,
update_url_query,
url_or_none,
urlhandle_detect_ext,
urljoin,
variadic,
xpath_attr,
xpath_text,
xpath_with_ns,
)
from ..utils._utils import _UnsafeExtensionError
class GenericIE(InfoExtractor):
IE_DESC = 'Generic downloader that works on some sites'
_VALID_URL = r'.*'
IE_NAME = 'generic'
_NETRC_MACHINE = False # Suppress username warning
_TESTS = [{
# Direct link
# https://github.com/ytdl-org/youtube-dl/commit/c5fa81fe81ce05cd81c20ff4ea6dac3dccdcbf9d
'url': 'https://media.w3.org/2010/05/sintel/trailer.mp4',
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
'info_dict': {
'id': 'trailer',
'ext': 'mp4',
'title': 'trailer',
'direct': True,
'timestamp': 1273772943,
'upload_date': '20100513',
},
}, {
# Direct link: No HEAD support
# https://github.com/ytdl-org/youtube-dl/issues/4032
'url': 'http://ai-radio.org:8000/radio.opus',
'info_dict': {
'id': 'radio',
'ext': 'opus',
'title': 'radio',
},
'skip': 'Invalid URL',
}, {
# Direct link: Incorrect MIME type
# https://github.com/ytdl-org/youtube-dl/commit/c5fa81fe81ce05cd81c20ff4ea6dac3dccdcbf9d
'url': 'https://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'md5': '4ccbebe5f36706d85221f204d7eb5913',
'info_dict': {
'id': '5_Lennart_Poettering_-_Systemd',
'ext': 'webm',
'title': '5_Lennart_Poettering_-_Systemd',
'direct': True,
'timestamp': 1416498816,
'upload_date': '20141120',
},
}, {
# Direct link: Live HLS; https://castr.com/hlsplayer/
# https://github.com/yt-dlp/yt-dlp/pull/6775
'url': 'https://stream-akamai.castr.com/5b9352dbda7b8c769937e459/live_2361c920455111ea85db6911fe397b9e/index.fmp4.m3u8',
'info_dict': {
'id': 'index.fmp4',
'ext': 'mp4',
'title': str,
'live_status': 'is_live',
},
'params': {'skip_download': 'm3u8'},
}, {
# Compressed when `Accept-Encoding: *`
# https://github.com/ytdl-org/youtube-dl/commit/a074e922967fa571d4f1abb1773c711747060f00
'url': 'http://calimero.tk/muzik/FictionJunction-Parallel_Hearts.flac',
'info_dict': {
'id': 'FictionJunction-Parallel_Hearts',
'ext': 'flac',
'title': 'FictionJunction-Parallel_Hearts',
},
'skip': 'Invalid URL',
}, {
# `Content-Encoding: br` when `Accept-Encoding: *`
# https://github.com/yt-dlp/yt-dlp/commit/3e01ce744a981d8f19ae77ec695005e7000f4703
'url': 'https://www.extra.cz/cauky-lidi-70-dil-babis-predstavil-pohadky-prymulanek-nebo-andrejovy-nove-saty-ac867',
'md5': 'a9a2cad3e54f78e4680c6deef82417e9',
'info_dict': {
'id': 'cauky-lidi-70-dil-babis-predstavil-pohadky-prymulanek-nebo-andrejovy-nove-saty-ac867',
'ext': 'mp4',
'title': 'čauky lidi 70 finall',
'description': 'md5:47b2673a5b76780d9d329783e1fbf5aa',
'direct': True,
'duration': 318.0,
'thumbnail': r're:https?://media\.extra\.cz/static/img/.+\.jpg',
'timestamp': 1654513791,
'upload_date': '20220606',
},
'params': {'extractor_args': {'generic': {'impersonate': ['chrome']}}},
}, {
# HLS: `Content-Type: audio/mpegurl`; https://bitmovin.com/demos/stream-test
# https://github.com/ytdl-org/youtube-dl/commit/20938f768b16c945c6041ba3c0a7ae1a4e790881
'url': 'https://cdn.bitmovin.com/content/assets/art-of-motion-dash-hls-progressive/m3u8s/f08e80da-bf1d-4e3d-8899-f0f6155f6efa.m3u8',
'info_dict': {
'id': 'f08e80da-bf1d-4e3d-8899-f0f6155f6efa',
'ext': 'mp4',
'title': 'f08e80da-bf1d-4e3d-8899-f0f6155f6efa',
'duration': 211,
'timestamp': 1737363648,
'upload_date': '20250120',
},
'params': {'skip_download': 'm3u8'},
}, {
# HLS: `Content-Type: text/plain`; https://github.com/grafov/m3u8
# https://github.com/ytdl-org/youtube-dl/commit/edd9b71c2cca7e5a0df8799710d9ad410ec77d29
'url': 'https://raw.githubusercontent.com/grafov/m3u8/refs/heads/master/sample-playlists/master.m3u8',
'info_dict': {
'id': 'master',
'ext': 'mp4',
'title': 'master',
},
'params': {'skip_download': 'm3u8'},
}, {
# MPEG-DASH; https://bitmovin.com/demos/stream-test
# https://github.com/ytdl-org/youtube-dl/commit/9d939cec48f06a401fb79eb078c1fc50b2aefbe1
'url': 'https://cdn.bitmovin.com/content/assets/art-of-motion-dash-hls-progressive/mpds/f08e80da-bf1d-4e3d-8899-f0f6155f6efa.mpd',
'info_dict': {
'id': 'f08e80da-bf1d-4e3d-8899-f0f6155f6efa',
'ext': 'mp4',
'title': 'f08e80da-bf1d-4e3d-8899-f0f6155f6efa',
'timestamp': 1737363728,
'upload_date': '20250120',
},
'params': {'skip_download': True},
}, {
# Live MPEG-DASH; https://livesim2.dashif.org/urlgen/create
# https://github.com/yt-dlp/yt-dlp/pull/12256
'url': 'https://livesim2.dashif.org/livesim2/ato_10/testpic_2s/Manifest.mpd',
'info_dict': {
'id': 'Manifest',
'ext': 'mp4',
'title': str,
'live_status': 'is_live',
},
'params': {'skip_download': 'livestream'},
}, {
# SMIL
# https://github.com/ytdl-org/youtube-dl/pull/6428
'url': 'https://api.new.livestream.com/accounts/21/events/7954027/videos/166558123.secure.smil',
'info_dict': {
'id': '166558123.secure',
'ext': 'mp4',
'title': '73fb2379-a624-4b6c-bce4-e46086007f2c',
},
'params': {'skip_download': 'smil'},
}, {
# XSPF playlist; https://shellac-archive.ch/de/index.html
# https://github.com/ytdl-org/youtube-dl/commit/1de5cd3ba51ce67d9a1cd3b40157058e78e46692
'url': 'https://shellac-archive.ch/repository/xspf/22-AL0019Z.xspf',
'info_dict': {
'id': '22-AL0019Z',
},
'playlist_count': 12,
'params': {'skip_download': True},
}, {
# RSS feed
# https://github.com/ytdl-org/youtube-dl/commit/c5fa81fe81ce05cd81c20ff4ea6dac3dccdcbf9d
'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'info_dict': {
'id': 'https://phihag.de/2014/youtube-dl/rss2.xml',
'title': 'Zero Punctuation',
'description': 'md5:512ae5f840e52eb3c0d08d4bed08eb3e',
},
'playlist_mincount': 11,
}, {
# RSS feed: Includes enclosure, description, and thumbnails
# https://github.com/ytdl-org/youtube-dl/pull/27405
'url': 'https://anchor.fm/s/dd00e14/podcast/rss',
'info_dict': {
'id': 'https://anchor.fm/s/dd00e14/podcast/rss',
'title': '100% Hydrogen ',
'description': 'md5:7ec96327f8b91a2549a2e74f064022a1',
},
'playlist_count': 1,
'params': {'skip_download': True},
}, {
# RSS feed: Includes guid
'url': 'https://www.omnycontent.com/d/playlist/a7b4f8fe-59d9-4afc-a79a-a90101378abf/bf2c1d80-3656-4449-9d00-a903004e8f84/efbff746-e7c1-463a-9d80-a903004e8f8f/podcast.rss',
'info_dict': {
'id': 'https://www.omnycontent.com/d/playlist/a7b4f8fe-59d9-4afc-a79a-a90101378abf/bf2c1d80-3656-4449-9d00-a903004e8f84/efbff746-e7c1-463a-9d80-a903004e8f8f/podcast.rss',
'title': 'The Little Red Podcast',
'description': 'md5:be809a44b63b0c56fb485caf68685520',
},
'playlist_mincount': 76,
}, {
# RSS feed: Includes enclosure and unsupported URLs
# https://github.com/ytdl-org/youtube-dl/pull/16189
'url': 'https://www.interfax.ru/rss.asp',
'info_dict': {
'id': 'https://www.interfax.ru/rss.asp',
'title': 'Интерфакс',
'description': 'md5:49b6b8905772efba21923942bbc0444c',
},
'playlist_mincount': 25,
}, {
# Webpage starts with a duplicate UTF-8 BOM
# https://github.com/yt-dlp/yt-dlp/commit/80e8493ee7c3083f4e215794e4a67ba5265f24f7
'url': 'https://www.filmarkivet.se/movies/paris-d-moll/',
'md5': 'df02cadc719dcc63d43288366f037754',
'info_dict': {
'id': 'paris-d-moll',
'ext': 'mp4',
'title': 'Paris d-moll',
'description': 'md5:319e37ea5542293db37e1e13072fe330',
'thumbnail': r're:https?://www\.filmarkivet\.se/wp-content/uploads/.+\.jpg',
},
}, {
# Multiple HTML5 videos
# https://github.com/ytdl-org/youtube-dl/pull/14107
'url': 'https://www.dagbladet.no/nyheter/etter-ett-ars-planlegging-klaffet-endelig-alt---jeg-matte-ta-en-liten-dans/60413035',
'info_dict': {
'id': '60413035',
'title': 'Etter ett års planlegging, klaffet endelig alt: - Jeg måtte ta en liten dans',
'description': 'md5:bbb4e12e42e78609a74fd421b93b1239',
'thumbnail': r're:https?://www\.dagbladet\.no/images/.+',
},
'playlist_count': 2,
}, {
# Cinerama Player
# https://github.com/ytdl-org/youtube-dl/commit/501f13fbf3d1f7225f91e3e0ad008df2cd3219f1
'url': 'https://www.abc.net.au/res/libraries/cinerama2/examples/single_clip.htm',
'info_dict': {
'id': 'single_clip',
'title': 'Single Clip player examples',
},
'playlist_count': 3,
}, {
# FIXME: Improve extraction
# Flowplayer
# https://github.com/ytdl-org/youtube-dl/commit/4d805e063c6c4ffd557d7c7cb905a3ed9c926b08
'url': 'https://flowplayer.com/resources/demos/standard-setup',
'info_dict': {
'id': 'playlist',
'ext': 'mp4',
'title': 'playlist',
'duration': 13,
'timestamp': 1539082175,
'upload_date': '20181009',
},
'params': {'skip_download': 'm3u8'},
}, {
# JW Player: YouTube
# https://github.com/ytdl-org/youtube-dl/commit/a0f719854463c6f4226e4042dfa80c1b17154e1d
'url': 'https://media.nationalarchives.gov.uk/index.php/webinar-using-discovery-national-archives-online-catalogue/',
'info_dict': {
'id': 'Mrj4DVp2zeA',
'ext': 'mp4',
'title': 'Using Discovery, The National Archives’ online catalogue',
'age_limit': 0,
'availability': 'unlisted',
'categories': ['Education'],
'channel': 'The National Archives UK',
'channel_follower_count': int,
'channel_id': 'UCUuzebc1yADDJEnOLA5P9xw',
'channel_url': 'https://www.youtube.com/channel/UCUuzebc1yADDJEnOLA5P9xw',
'chapters': 'count:13',
'description': 'md5:a236581cd2449dd2df4f93412f3f01c6',
'duration': 3066,
'like_count': int,
'live_status': 'not_live',
'media_type': 'video',
'playable_in_embed': True,
'tags': 'count:5',
'thumbnail': r're:https?://i\.ytimg\.com/vi/.+',
'timestamp': 1423757117,
'upload_date': '20150212',
'uploader': 'The National Archives UK',
'uploader_id': '@TheNationalArchivesUK',
'uploader_url': 'https://www.youtube.com/@TheNationalArchivesUK',
'view_count': int,
},
'add_ie': ['Youtube'],
}, {
# JW Player: Complex
# https://github.com/ytdl-org/youtube-dl/commit/a4a554a79354981fcab55de8eaab7b95a40bbb48
'url': 'https://www.indiedb.com/games/king-machine/videos',
'info_dict': {
'id': 'videos-1',
'ext': 'mp4',
'title': 'Videos & Audio - King Machine (1)',
'description': 'Browse King Machine videos & audio for sweet media. Your eyes will thank you.',
'thumbnail': r're:https?://media\.indiedb\.com/cache/images/.+\.jpg',
'_old_archive_ids': ['generic videos'],
},
}, {
# JW Player: JSON Feed URL
# https://github.com/yt-dlp/yt-dlp/issues/1476
'url': 'https://foodschmooze.org/',
'info_dict': {
'id': 'z00Frhnw',
'ext': 'mp4',
'title': 'Grilling Beef Tenderloin',
'description': '',
'duration': 392.0,
'thumbnail': r're:https?://cdn\.jwplayer\.com/v2/media/.+',
'timestamp': 1465313685,
'upload_date': '20160607',
},
'params': {'skip_download': 'm3u8'},
}, {
# JW Player: RTMP
# https://github.com/ytdl-org/youtube-dl/issues/11993
'url': 'http://www.suffolk.edu/sjc/live.php',
'info_dict': {
'id': 'live',
'ext': 'flv',
'title': 'Massachusetts Supreme Judicial Court Oral Arguments',
},
'skip': 'Invalid URL',
}, {
# KVS Player v7.3.3
# kt_player.js?v=5.1.1
'url': 'https://bogmedia.org/videos/21217/40-nochey-2016/',
'md5': '94166bdb26b4cb1fb9214319a629fc51',
'info_dict': {
'id': '21217',
'ext': 'mp4',
'title': '40 ночей (2016) - BogMedia.org',
'description': 'md5:4e6d7d622636eb7948275432eb256dc3',
'display_id': '40-nochey-2016',
'thumbnail': r're:https?://bogmedia\.org/contents/videos_screenshots/.+\.jpg',
},
}, {
# KVS Player v7.7.11
# kt_player.js?v=5.5.1
# https://github.com/yt-dlp/yt-dlp/commit/a318f59d14792d25b2206c3f50181e03e8716db7
'url': 'https://youix.com/video/leningrad-zoj/',
'md5': '94f96ba95706dc3880812b27b7d8a2b8',
'info_dict': {
'id': '18485',
'ext': 'mp4',
'title': 'Клип: Ленинград - ЗОЖ скачать, смотреть онлайн | Youix.com',
'display_id': 'leningrad-zoj',
'thumbnail': r're:https?://youix\.com/contents/videos_screenshots/.+\.jpg',
},
}, {
# KVS Player v7.10.3
# kt_player.js?v=12
# https://github.com/ytdl-org/youtube-dl/commit/fc2beab0e701c497a003f11fef5c0df54fba1da3
'url': 'https://shooshtime.com/videos/346037/fresh-out-of-the-shower/',
'md5': 'c9a97ad528607a4516d4df83a3aeb12c',
'info_dict': {
'id': '346037',
'ext': 'mp4',
'title': 'Fresh out of the shower - Shooshtime',
'age_limit': 18,
'description': 'md5:efd70fd3973f8750d285c743b910580a',
'display_id': 'fresh-out-of-the-shower',
'thumbnail': r're:https?://i\.shoosh\.co/contents/videos_screenshots/.+\.jpg',
},
'expected_warnings': ['Untested major version'],
}, {
# FIXME: Unable to extract flashvars
# KVS Player v7.11.4
# kt_player.js?v=2.11.5.1
# https://github.com/yt-dlp/yt-dlp/commit/a318f59d14792d25b2206c3f50181e03e8716db7
'url': 'https://www.kvs-demo.com/video/105/kelis-4th-of-july/',
'info_dict': {
'id': '105',
'ext': 'mp4',
'title': 'Kelis - 4th Of July',
},
}, {
# KVS Player v7.11.4
# kt_player.js?v=6.3.2
# https://github.com/yt-dlp/yt-dlp/commit/a318f59d14792d25b2206c3f50181e03e8716db7
'url': 'https://www.kvs-demo.com/embed/105/',
'md5': '1ff84c70acaddbb03288c6cc5ee1879f',
'info_dict': {
'id': '105',
'ext': 'mp4',
'title': 'Kelis - 4th Of July / Embed Player',
'display_id': 'kelis-4th-of-july',
'thumbnail': r're:https?://www\.kvs-demo\.com/contents/videos_screenshots/.+\.jpg',
},
}, {
# twitter:player:stream
# https://github.com/ytdl-org/youtube-dl/commit/371ddb14fe651d4a1e5a8310d6d7c0e395cd92b0
'url': 'https://beltzlaw.com/',
'info_dict': {
'id': 'beltzlaw-1',
'ext': 'mp4',
'title': str,
'description': str,
'thumbnail': r're:https?://beltzlaw\.com/wp-content/uploads/.+\.jpg',
'timestamp': int, # varies
'upload_date': str,
'_old_archive_ids': ['generic beltzlaw'],
},
}, {
# twitter:player
# https://github.com/ytdl-org/youtube-dl/commit/329179073b93e37ab76e759d1fe96d8f984367f3
'url': 'https://cine.ar/',
'md5': 'd3e33335e339f04008690118698dfd08',
'info_dict': {
'id': 'cine-1',
'ext': 'webm',
'title': 'CINE.AR (1)',
'description': 'md5:a4e58f9e2291c940e485f34251898c4a',
'thumbnail': r're:https?://cine\.ar/img/.+\.png',
'_old_archive_ids': ['generic cine'],
},
'params': {'format': 'webm'},
}, {
# JSON-LD: multiple @type
# https://github.com/yt-dlp/yt-dlp/commit/f3c0c77304bc0e5614a65c45629de22f067685ac
'url': 'https://www.nu.nl/280161/video/hoe-een-bladvlo-dit-verwoestende-japanse-onkruid-moet-vernietigen.html',
'info_dict': {
'id': 'ipy2AcGL',
'ext': 'mp4',
'title': 'Hoe een bladvlo dit verwoestende Japanse onkruid moet vernietigen',
'description': 'md5:6a9d644bab0dc2dc06849c2505d8383d',
'duration': 111.0,
'thumbnail': r're:https?://images\.nu\.nl/.+\.jpg',
'timestamp': 1586584674,
'upload_date': '20200411',
},
'params': {'extractor_args': {'generic': {'impersonate': ['chrome']}}},
}, {
# JSON-LD: unexpected @type
# https://github.com/yt-dlp/yt-dlp/pull/5145
'url': 'https://www.autoweek.nl/autotests/artikel/porsche-911-gt3-rs-rij-impressie-2/',
'info_dict': {
'id': 'porsche-911-gt3-rs-rij-impressie-2',
'ext': 'mp4',
'title': 'Test: Porsche 911 GT3 RS - AutoWeek',
'description': 'md5:a17b5bd84288448d8f11b838505718fc',
'direct': True,
'thumbnail': r're:https?://images\.autoweek\.nl/.+',
'timestamp': 1664920902,
'upload_date': '20221004',
},
'params': {'extractor_args': {'generic': {'impersonate': ['chrome']}}},
}, {
# JSON-LD: VideoObject
# https://github.com/ytdl-org/youtube-dl/commit/6e6b70d65f0681317c425bfe1e157f3474afbbe8
'url': 'https://breezy.hr/',
'info_dict': {
'id': 'k6gl2kt2eq',
'ext': 'mp4',
'title': 'Breezy HR\'s ATS helps you find & hire employees sooner',
'average_rating': 4.5,
'description': 'md5:eee75fdd3044c538003f3be327ba01e1',
'duration': 60.1,
'thumbnail': r're:https?://cdn\.prod\.website-files\.com/.+\.webp',
'timestamp': 1485734400,
'upload_date': '20170130',
},
}, {
# Video.js: VOD HLS
# https://github.com/yt-dlp/yt-dlp/pull/6775
'url': 'https://gist.githubusercontent.com/bashonly/2aae0862c50f4a4b84f220c315767208/raw/e3380d413749dabbe804c9c2d8fd9a45142475c7/videojs_hls_test.html',
'info_dict': {
'id': 'videojs_hls_test',
'ext': 'mp4',
'title': 'video',
'duration': 1800,
},
'params': {'skip_download': 'm3u8'},
}, {
# Video.js: YouTube
# https://github.com/ytdl-org/youtube-dl/commit/63d990d2859d0e981da2e416097655798334431b
'url': 'https://ortcam.com/solidworks-%d1%83%d1%80%d0%be%d0%ba-6-%d0%bd%d0%b0%d1%81%d1%82%d1%80%d0%be%d0%b9%d0%ba%d0%b0-%d1%87%d0%b5%d1%80%d1%82%d0%b5%d0%b6%d0%b0_33f9b7351.html?vid=33f9b7351',
'info_dict': {
'id': 'yygqldloqIk',
'ext': 'mp4',
'title': 'SolidWorks. Урок 6 Настройка чертежа',
'age_limit': 0,
'availability': 'public',
'categories': ['Education'],
'channel': 'PROстое3D',
'channel_follower_count': int,
'channel_id': 'UCy91Bug3dERhbwGh2m2Ijng',
'channel_url': 'https://www.youtube.com/channel/UCy91Bug3dERhbwGh2m2Ijng',
'comment_count': int,
'description': 'md5:baf95267792646afdbf030e4d06b2ab3',
'duration': 1160,
'heatmap': 'count:100',
'like_count': int,
'live_status': 'not_live',
'media_type': 'video',
'playable_in_embed': True,
'tags': 'count:17',
'thumbnail': r're:https?://i\.ytimg\.com/vi/.+',
'timestamp': 1363263144,
'upload_date': '20130314',
'uploader': 'PROстое3D',
'uploader_id': '@PROstoe3D',
'uploader_url': 'https://www.youtube.com/@PROstoe3D',
'view_count': int,
},
'add_ie': ['Youtube'],
}, {
# Redirect
# https://github.com/ytdl-org/youtube-dl/issues/413
'url': 'https://www.google.com/url?rct=j&url=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY',
'info_dict': {
'id': 'cmQHVoWB5FY',
'ext': 'mp4',
'title': 'First Firefox OS phones side-by-side',
'age_limit': 0,
'availability': 'public',
'categories': ['Entertainment'],
'channel': 'The Verge',
'channel_follower_count': int,
'channel_id': 'UCddiUEpeqJcYeBxX1IVBKvQ',
'channel_is_verified': True,
'channel_url': 'https://www.youtube.com/channel/UCddiUEpeqJcYeBxX1IVBKvQ',
'comment_count': int,
'description': 'md5:7a676046ad24d9ea55cdde4a6657c5b3',
'duration': 207,
'like_count': int,
'live_status': 'not_live',
'media_type': 'video',
'playable_in_embed': True,
'tags': 'count:15',
'thumbnail': r're:https?://i\.ytimg\.com/vi/.+',
'timestamp': 1361738430,
'upload_date': '20130224',
'uploader': 'The Verge',
'uploader_id': '@TheVerge',
'uploader_url': 'https://www.youtube.com/@TheVerge',
'view_count': int,
},
'add_ie': ['Youtube'],
}]
def report_following_redirect(self, new_url):
"""Report information extraction."""
self._downloader.to_screen(f'[redirect] Following redirect to {new_url}')
def report_detected(self, name, num=1, note=None):
if num > 1:
name += 's'
elif not num:
return
else:
num = 'a'
self._downloader.write_debug(f'Identified {num} {name}{format_field(note, None, "; %s")}')
def _extra_manifest_info(self, info, manifest_url):
fragment_query = self._configuration_arg('fragment_query', [None], casesense=True)[0]
if fragment_query is not None:
info['extra_param_to_segment_url'] = (
urllib.parse.urlparse(fragment_query).query or fragment_query
or urllib.parse.urlparse(manifest_url).query or None)
key_query = self._configuration_arg('key_query', [None], casesense=True)[0]
if key_query is not None:
info['extra_param_to_key_url'] = (
urllib.parse.urlparse(key_query).query or key_query
or urllib.parse.urlparse(manifest_url).query or None)
def hex_or_none(value):
return value if re.fullmatch(r'(0x)?[\da-f]+', value, re.IGNORECASE) else None
info['hls_aes'] = traverse_obj(self._configuration_arg('hls_key', casesense=True), {
'uri': (0, {url_or_none}), 'key': (0, {hex_or_none}), 'iv': (1, {hex_or_none}),
}) or None
variant_query = self._configuration_arg('variant_query', [None], casesense=True)[0]
if variant_query is not None:
query = urllib.parse.parse_qs(
urllib.parse.urlparse(variant_query).query or variant_query
or urllib.parse.urlparse(manifest_url).query)
for fmt in self._downloader._get_formats(info):
fmt['url'] = update_url_query(fmt['url'], query)
# Attempt to detect live HLS or set VOD duration
m3u8_format = next((f for f in self._downloader._get_formats(info)
if determine_protocol(f) == 'm3u8_native'), None)
if m3u8_format:
is_live = self._configuration_arg('is_live', [None])[0]
if is_live is not None:
info['live_status'] = 'not_live' if is_live == 'false' else 'is_live'
return
headers = m3u8_format.get('http_headers') or info.get('http_headers') or {}
display_id = info.get('id')
urlh = self._request_webpage(
m3u8_format['url'], display_id, 'Checking m3u8 live status', errnote=False,
headers={**headers, 'Accept-Encoding': 'identity'}, fatal=False)
if urlh is False:
return
first_bytes = urlh.read(512)
if not first_bytes.startswith(b'#EXTM3U'):
return
m3u8_doc = self._webpage_read_content(
urlh, urlh.url, display_id, prefix=first_bytes, fatal=False, errnote=False)
if not m3u8_doc:
return
duration = self._parse_m3u8_vod_duration(m3u8_doc, display_id)
if not duration:
info['live_status'] = 'is_live'
info['duration'] = info.get('duration') or duration
def _extract_rss(self, url, video_id, doc):
NS_MAP = {
'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd',
}
entries = []
for it in doc.findall('./channel/item'):
next_url = next(
(e.attrib.get('url') for e in it.findall('./enclosure')),
xpath_text(it, 'link', fatal=False))
if not next_url:
continue
guid = try_call(lambda: it.find('guid').text)
if guid:
next_url = smuggle_url(next_url, {'force_videoid': guid})
def itunes(key):
return xpath_text(it, xpath_with_ns(f'./itunes:{key}', NS_MAP), default=None)
entries.append({
'_type': 'url_transparent',
'url': next_url,
'title': try_call(lambda: it.find('title').text),
'description': xpath_text(it, 'description', default=None),
'timestamp': unified_timestamp(xpath_text(it, 'pubDate', default=None)),
'duration': parse_duration(itunes('duration')),
'thumbnail': url_or_none(xpath_attr(it, xpath_with_ns('./itunes:image', NS_MAP), 'href')),
'episode': itunes('title'),
'episode_number': int_or_none(itunes('episode')),
'season_number': int_or_none(itunes('season')),
'age_limit': {'true': 18, 'yes': 18, 'false': 0, 'no': 0}.get((itunes('explicit') or '').lower()),
})
return {
'_type': 'playlist',
'id': url,
'title': try_call(lambda: doc.find('./channel/title').text),
'description': try_call(lambda: doc.find('./channel/description').text),
'entries': entries,
}
@classmethod
def _kvs_get_real_url(cls, video_url, license_code):
if not video_url.startswith('function/0/'):
return video_url # not obfuscated
parsed = urllib.parse.urlparse(video_url[len('function/0/'):])
license_token = cls._kvs_get_license_token(license_code)
urlparts = parsed.path.split('/')
HASH_LENGTH = 32
hash_ = urlparts[3][:HASH_LENGTH]
indices = list(range(HASH_LENGTH))
# Swap indices of hash according to the destination calculated from the license token
accum = 0
for src in reversed(range(HASH_LENGTH)):
accum += license_token[src]
dest = (src + accum) % HASH_LENGTH
indices[src], indices[dest] = indices[dest], indices[src]
urlparts[3] = ''.join(hash_[index] for index in indices) + urlparts[3][HASH_LENGTH:]
return urllib.parse.urlunparse(parsed._replace(path='/'.join(urlparts)))
@staticmethod
def _kvs_get_license_token(license_code):
license_code = license_code.replace('$', '')
license_values = [int(char) for char in license_code]
modlicense = license_code.replace('0', '1')
center = len(modlicense) // 2
fronthalf = int(modlicense[:center + 1])
backhalf = int(modlicense[center:])
modlicense = str(4 * abs(fronthalf - backhalf))[:center + 1]
return [
(license_values[index + offset] + current) % 10
for index, current in enumerate(map(int, modlicense))
for offset in range(4)
]
def _extract_kvs(self, url, webpage, video_id):
flashvars = self._search_json(
r'(?s:<script\b[^>]*>.*?var\s+flashvars\s*=)',
webpage, 'flashvars', video_id, transform_source=js_to_json)
# extract the part after the last / as the display_id from the
# canonical URL.
display_id = self._search_regex(
r'(?:<link href="https?://[^"]+/(.+?)/?" rel="canonical"\s*/?>'
r'|<link rel="canonical" href="https?://[^"]+/(.+?)/?"\s*/?>)',
webpage, 'display_id', fatal=False)
title = self._html_search_regex(r'<(?:h1|title)>(?:Video: )?(.+?)</(?:h1|title)>', webpage, 'title')
thumbnail = flashvars['preview_url']
if thumbnail.startswith('//'):
protocol, _, _ = url.partition('/')
thumbnail = protocol + thumbnail
url_keys = list(filter(re.compile(r'^video_(?:url|alt_url\d*)$').match, flashvars.keys()))
formats = []
for key in url_keys:
if '/get_file/' not in flashvars[key]:
continue
format_id = flashvars.get(f'{key}_text', key)
formats.append({
'url': urljoin(url, self._kvs_get_real_url(flashvars[key], flashvars['license_code'])),
'format_id': format_id,
'ext': 'mp4',
**(parse_resolution(format_id) or parse_resolution(flashvars[key])),
'http_headers': {'Referer': url},
})
if not formats[-1].get('height'):
formats[-1]['quality'] = 1
return {
'id': flashvars['video_id'],
'display_id': display_id,
'title': title,
'thumbnail': urljoin(url, thumbnail),
'formats': formats,
}
def _real_extract(self, url):
if url.startswith('//'):
return self.url_result(self.http_scheme() + url)
parsed_url = urllib.parse.urlparse(url)
if not parsed_url.scheme:
default_search = self.get_param('default_search')
if default_search is None:
default_search = 'fixup_error'
if default_search in ('auto', 'auto_warning', 'fixup_error'):
if re.match(r'[^\s/]+\.[^\s/]+/', url):
self.report_warning('The url doesn\'t specify the protocol, trying with https')
return self.url_result('https://' + url)
elif default_search != 'fixup_error':
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | true |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nintendo.py | yt_dlp/extractor/nintendo.py | import json
import urllib.parse
from .common import InfoExtractor
from ..utils import (
ExtractorError,
make_archive_id,
unified_timestamp,
urljoin,
)
from ..utils.traversal import traverse_obj
class NintendoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nintendo\.com/(?:(?P<locale>\w{2}(?:-\w{2})?)/)?nintendo-direct/(?P<slug>[^/?#]+)'
_TESTS = [{
'url': 'https://www.nintendo.com/nintendo-direct/09-04-2019/',
'info_dict': {
'ext': 'mp4',
'id': '2oPmiviVePUA1IqAZzjuVh',
'display_id': '09-04-2019',
'title': 'Nintendo Direct 9.4.2019',
'timestamp': 1567580400,
'description': 'md5:8aac2780361d8cb772b6d1de66d7d6f4',
'upload_date': '20190904',
'age_limit': 17,
'_old_archive_ids': ['nintendo J2bXdmaTE6fe3dWJTPcc7m23FNbc_A1V'],
},
}, {
'url': 'https://www.nintendo.com/en-ca/nintendo-direct/08-31-2023/',
'info_dict': {
'ext': 'mp4',
'id': '2TB2w2rJhNYF84qQ9E57hU',
'display_id': '08-31-2023',
'title': 'Super Mario Bros. Wonder Direct 8.31.2023',
'timestamp': 1693465200,
'description': 'md5:3067c5b824bcfdae9090a7f38ab2d200',
'tags': ['Mild Fantasy Violence', 'In-Game Purchases'],
'upload_date': '20230831',
'age_limit': 6,
},
}, {
'url': 'https://www.nintendo.com/us/nintendo-direct/50-fact-extravaganza/',
'info_dict': {
'ext': 'mp4',
'id': 'j0BBGzfw0pQ',
'channel_follower_count': int,
'view_count': int,
'description': 'Learn new details about Super Smash Bros. for Wii U, which launches on November 21.',
'duration': 2123,
'availability': 'public',
'thumbnail': 'https://i.ytimg.com/vi_webp/j0BBGzfw0pQ/maxresdefault.webp',
'timestamp': 1414047600,
'channel_id': 'UCGIY_O-8vW4rfX98KlMkvRg',
'chapters': 'count:53',
'heatmap': 'count:100',
'upload_date': '20141023',
'uploader_id': '@NintendoAmerica',
'playable_in_embed': True,
'categories': ['Gaming'],
'display_id': '50-fact-extravaganza',
'channel': 'Nintendo of America',
'tags': ['Comic Mischief', 'Cartoon Violence', 'Mild Suggestive Themes'],
'like_count': int,
'channel_url': 'https://www.youtube.com/channel/UCGIY_O-8vW4rfX98KlMkvRg',
'age_limit': 10,
'uploader_url': 'https://www.youtube.com/@NintendoAmerica',
'comment_count': int,
'live_status': 'not_live',
'uploader': 'Nintendo of America',
'title': '50-FACT Extravaganza',
},
}]
def _create_asset_url(self, path):
return urljoin('https://assets.nintendo.com/', urllib.parse.quote(path))
def _real_extract(self, url):
locale, slug = self._match_valid_url(url).group('locale', 'slug')
language, _, country = (locale or 'US').rpartition('-')
parsed_locale = f'{language.lower() or "en"}_{country.upper()}'
self.write_debug(f'Using locale {parsed_locale} (from {locale})', only_once=True)
response = self._download_json('https://graph.nintendo.com/', slug, query={
'operationName': 'NintendoDirect',
'variables': json.dumps({
'locale': parsed_locale,
'slug': slug,
}, separators=(',', ':')),
'extensions': json.dumps({
'persistedQuery': {
'version': 1,
'sha256Hash': '969b16fe9f08b686fa37bc44d1fd913b6188e65794bb5e341c54fa683a8004cb',
},
}, separators=(',', ':')),
})
# API returns `{"data": {"direct": null}}` if no matching id
direct_info = traverse_obj(response, ('data', 'direct', {dict}))
if not direct_info:
raise ExtractorError(f'No Nintendo Direct with id {slug} exists', expected=True)
errors = ', '.join(traverse_obj(response, ('errors', ..., 'message')))
if errors:
raise ExtractorError(f'GraphQL API error: {errors or "Unknown error"}')
result = traverse_obj(direct_info, {
'id': ('id', {str}),
'title': ('name', {str}),
'timestamp': ('startDate', {unified_timestamp}),
'description': ('description', 'text', {str}),
'age_limit': ('contentRating', 'order', {int}),
'tags': ('contentDescriptors', ..., 'label', {str}),
'thumbnail': ('thumbnail', {self._create_asset_url}),
})
result['display_id'] = slug
asset_id = traverse_obj(direct_info, ('video', 'publicId', {str}))
if not asset_id:
youtube_id = traverse_obj(direct_info, ('liveStream', {str}))
if not youtube_id:
self.raise_no_formats('Could not find any video formats', video_id=slug)
return self.url_result(youtube_id, **result, url_transparent=True)
if asset_id.startswith('Legacy Videos/'):
result['_old_archive_ids'] = [make_archive_id(self, asset_id[14:])]
result['formats'] = self._extract_m3u8_formats(
self._create_asset_url(f'/video/upload/sp_full_hd/v1/{asset_id}.m3u8'), slug)
return result
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gettr.py | yt_dlp/extractor/gettr.py | from .common import InfoExtractor
from ..utils import (
ExtractorError,
bool_or_none,
dict_get,
float_or_none,
int_or_none,
str_or_none,
traverse_obj,
try_get,
url_or_none,
urljoin,
)
class GettrBaseIE(InfoExtractor):
_BASE_REGEX = r'https?://(www\.)?gettr\.com/'
_MEDIA_BASE_URL = 'https://media.gettr.com/'
def _call_api(self, path, video_id, *args, **kwargs):
return self._download_json(urljoin('https://api.gettr.com/u/', path), video_id, *args, **kwargs)['result']
class GettrIE(GettrBaseIE):
_VALID_URL = GettrBaseIE._BASE_REGEX + r'post/(?P<id>[a-z0-9]+)'
_TESTS = [{
'url': 'https://www.gettr.com/post/pcf6uv838f',
'info_dict': {
'id': 'pcf6uv838f',
'title': 'md5:9086a646bbd06c41c4fe8e52b3c93454',
'description': 'md5:be0577f1e4caadc06de4a002da2bf287',
'ext': 'mp4',
'uploader': 'EpochTV',
'uploader_id': 'epochtv',
'upload_date': '20210927',
'thumbnail': r're:^https?://.+/out\.jpg',
'timestamp': 1632782451.058,
'duration': 58.5585,
'tags': ['hornofafrica', 'explorations'],
},
}, {
'url': 'https://gettr.com/post/p4iahp',
'info_dict': {
'id': 'p4iahp',
'title': 'md5:b03c07883db6fbc1aab88877a6c3b149',
'description': 'md5:741b7419d991c403196ed2ea7749a39d',
'ext': 'mp4',
'uploader': 'Neues Forum Freiheit',
'uploader_id': 'nf_freiheit',
'upload_date': '20210718',
'thumbnail': r're:^https?://.+/out\.jpg',
'timestamp': 1626594455.017,
'duration': 23,
'tags': 'count:12',
},
}, {
# quote post
'url': 'https://gettr.com/post/pxn5b743a9',
'only_matching': True,
}, {
# quote with video
'url': 'https://gettr.com/post/pxtiiz5ca2',
'only_matching': True,
}, {
# streaming embed
'url': 'https://gettr.com/post/pxlu8p3b13',
'only_matching': True,
}, {
# youtube embed
'url': 'https://gettr.com/post/pv6wp9e24c',
'only_matching': True,
'add_ie': ['Youtube'],
}]
def _real_extract(self, url):
post_id = self._match_id(url)
webpage = self._download_webpage(url, post_id)
api_data = self._call_api(f'post/{post_id}?incl="poststats|userinfo"', post_id)
post_data = api_data.get('data')
user_data = try_get(api_data, lambda x: x['aux']['uinf'][post_data['uid']], dict) or {}
vid = post_data.get('vid')
ovid = post_data.get('ovid')
if post_data.get('p_type') == 'stream':
return self.url_result(f'https://gettr.com/streaming/{post_id}', ie='GettrStreaming', video_id=post_id)
if not (ovid or vid):
embed_url = url_or_none(post_data.get('prevsrc'))
shared_post_id = traverse_obj(api_data, ('aux', 'shrdpst', '_id'), ('data', 'rpstIds', 0), expected_type=str)
if embed_url:
return self.url_result(embed_url)
elif shared_post_id:
return self.url_result(f'https://gettr.com/post/{shared_post_id}', ie='Gettr', video_id=shared_post_id)
else:
raise ExtractorError('There\'s no video in this post.')
title = description = str_or_none(
post_data.get('txt') or self._og_search_description(webpage))
uploader = str_or_none(
user_data.get('nickname')
or self._search_regex(r'^(.+?) on GETTR', self._og_search_title(webpage, default=''), 'uploader', fatal=False))
if uploader:
title = f'{uploader} - {title}'
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
urljoin(self._MEDIA_BASE_URL, vid), post_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) if vid else ([], {})
if ovid:
formats.append({
'url': urljoin(self._MEDIA_BASE_URL, ovid),
'format_id': 'ovid',
'ext': 'mp4',
'width': int_or_none(post_data.get('vid_wid')),
'height': int_or_none(post_data.get('vid_hgt')),
})
return {
'id': post_id,
'title': title,
'description': description,
'formats': formats,
'subtitles': subtitles,
'uploader': uploader,
'uploader_id': str_or_none(
dict_get(user_data, ['_id', 'username'])
or post_data.get('uid')),
'thumbnail': url_or_none(
urljoin(self._MEDIA_BASE_URL, post_data.get('main'))
or self._html_search_meta(['og:image', 'image'], webpage, 'thumbnail', fatal=False)),
'timestamp': float_or_none(dict_get(post_data, ['cdate', 'udate']), scale=1000),
'duration': float_or_none(post_data.get('vid_dur')),
'tags': post_data.get('htgs'),
}
class GettrStreamingIE(GettrBaseIE):
_VALID_URL = GettrBaseIE._BASE_REGEX + r'streaming/(?P<id>[a-z0-9]+)'
_TESTS = [{
'url': 'https://gettr.com/streaming/psoiulc122',
'info_dict': {
'id': 'psoiulc122',
'ext': 'mp4',
'description': 'md5:56bca4b8f48f1743d9fd03d49c723017',
'view_count': int,
'uploader': 'Corona Investigative Committee',
'uploader_id': 'coronacommittee',
'duration': 5180.184,
'thumbnail': r're:^https?://.+',
'title': 'Day 1: Opening Session of the Grand Jury Proceeding',
'timestamp': 1644080997.164,
'upload_date': '20220205',
},
}, {
'url': 'https://gettr.com/streaming/psfmeefcc1',
'info_dict': {
'id': 'psfmeefcc1',
'ext': 'mp4',
'title': 'Session 90: "The Virus Of Power"',
'view_count': int,
'uploader_id': 'coronacommittee',
'description': 'md5:98986acdf656aa836bf36f9c9704c65b',
'uploader': 'Corona Investigative Committee',
'thumbnail': r're:^https?://.+',
'duration': 21872.507,
'timestamp': 1643976662.858,
'upload_date': '20220204',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video_info = self._call_api(f'live/join/{video_id}', video_id, data={})
live_info = video_info['broadcast']
live_url = url_or_none(live_info.get('url'))
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
live_url, video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) if live_url else ([], {})
thumbnails = [{
'url': urljoin(self._MEDIA_BASE_URL, thumbnail),
} for thumbnail in try_get(video_info, lambda x: x['postData']['imgs'], list) or []]
return {
'id': video_id,
'title': try_get(video_info, lambda x: x['postData']['ttl'], str),
'description': try_get(video_info, lambda x: x['postData']['dsc'], str),
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
'uploader': try_get(video_info, lambda x: x['liveHostInfo']['nickname'], str),
'uploader_id': try_get(video_info, lambda x: x['liveHostInfo']['_id'], str),
'view_count': int_or_none(live_info.get('viewsCount')),
'timestamp': float_or_none(live_info.get('startAt'), scale=1000),
'duration': float_or_none(live_info.get('duration'), scale=1000),
'is_live': bool_or_none(live_info.get('isLive')),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tvplay.py | yt_dlp/extractor/tvplay.py | import re
import urllib.parse
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
parse_iso8601,
qualities,
traverse_obj,
try_get,
update_url_query,
url_or_none,
urljoin,
)
class TVPlayIE(InfoExtractor):
IE_NAME = 'mtg'
IE_DESC = 'MTG services'
_VALID_URL = r'''(?x)
(?:
mtg:|
https?://
(?:www\.)?
(?:
tvplay(?:\.skaties)?\.lv(?:/parraides)?|
(?:tv3play|play\.tv3)\.lt(?:/programos)?|
tv3play(?:\.tv3)?\.ee/sisu
)
/(?:[^/]+/)+
)
(?P<id>\d+)
'''
_TESTS = [
{
'url': 'http://www.tvplay.lv/parraides/vinas-melo-labak/418113?autostart=true',
'md5': 'a1612fe0849455423ad8718fe049be21',
'info_dict': {
'id': '418113',
'ext': 'mp4',
'title': 'Kādi ir īri? - Viņas melo labāk',
'description': 'Baiba apsmej īrus, kādi tie ir un ko viņi dara.',
'series': 'Viņas melo labāk',
'season': '2.sezona',
'season_number': 2,
'duration': 25,
'timestamp': 1406097056,
'upload_date': '20140723',
},
},
{
'url': 'http://play.tv3.lt/programos/moterys-meluoja-geriau/409229?autostart=true',
'info_dict': {
'id': '409229',
'ext': 'flv',
'title': 'Moterys meluoja geriau',
'description': 'md5:9aec0fc68e2cbc992d2a140bd41fa89e',
'series': 'Moterys meluoja geriau',
'episode_number': 47,
'season': '1 sezonas',
'season_number': 1,
'duration': 1330,
'timestamp': 1403769181,
'upload_date': '20140626',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv3play.ee/sisu/kodu-keset-linna/238551?autostart=true',
'info_dict': {
'id': '238551',
'ext': 'flv',
'title': 'Kodu keset linna 398537',
'description': 'md5:7df175e3c94db9e47c0d81ffa5d68701',
'duration': 1257,
'timestamp': 1292449761,
'upload_date': '20101215',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://tvplay.skaties.lv/parraides/vinas-melo-labak/418113?autostart=true',
'only_matching': True,
},
{
'url': 'https://tvplay.skaties.lv/vinas-melo-labak/418113/?autostart=true',
'only_matching': True,
},
{
# views is null
'url': 'http://tvplay.skaties.lv/parraides/tv3-zinas/760183',
'only_matching': True,
},
{
'url': 'http://tv3play.tv3.ee/sisu/kodu-keset-linna/238551?autostart=true',
'only_matching': True,
},
{
'url': 'mtg:418113',
'only_matching': True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
geo_country = self._search_regex(
r'https?://[^/]+\.([a-z]{2})', url,
'geo country', default=None)
if geo_country:
self._initialize_geo_bypass({'countries': [geo_country.upper()]})
video = self._download_json(
f'http://playapi.mtgx.tv/v3/videos/{video_id}', video_id, 'Downloading video JSON')
title = video['title']
try:
streams = self._download_json(
f'http://playapi.mtgx.tv/v3/videos/stream/{video_id}',
video_id, 'Downloading streams JSON')
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
msg = self._parse_json(e.cause.response.read().decode('utf-8'), video_id)
raise ExtractorError(msg['msg'], expected=True)
raise
quality = qualities(['hls', 'medium', 'high'])
formats = []
for format_id, video_url in streams.get('streams', {}).items():
video_url = url_or_none(video_url)
if not video_url:
continue
ext = determine_ext(video_url)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
update_url_query(video_url, {
'hdcore': '3.5.0',
'plugin': 'aasp-3.5.0.151.81',
}), video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
else:
fmt = {
'format_id': format_id,
'quality': quality(format_id),
'ext': ext,
}
if video_url.startswith('rtmp'):
m = re.search(
r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', video_url)
if not m:
continue
fmt.update({
'ext': 'flv',
'url': m.group('url'),
'app': m.group('app'),
'play_path': m.group('playpath'),
'preference': -1,
})
else:
fmt.update({
'url': video_url,
})
formats.append(fmt)
if not formats and video.get('is_geo_blocked'):
self.raise_geo_restricted(
'This content might not be available in your country due to copyright reasons',
metadata_available=True)
# TODO: webvtt in m3u8
subtitles = {}
sami_path = video.get('sami_path')
if sami_path:
lang = self._search_regex(
r'_([a-z]{2})\.xml', sami_path, 'lang',
default=urllib.parse.urlparse(url).netloc.rsplit('.', 1)[-1])
subtitles[lang] = [{
'url': sami_path,
}]
series = video.get('format_title')
episode_number = int_or_none(video.get('format_position', {}).get('episode'))
season = video.get('_embedded', {}).get('season', {}).get('title')
season_number = int_or_none(video.get('format_position', {}).get('season'))
return {
'id': video_id,
'title': title,
'description': video.get('description'),
'series': series,
'episode_number': episode_number,
'season': season,
'season_number': season_number,
'duration': int_or_none(video.get('duration')),
'timestamp': parse_iso8601(video.get('created_at')),
'view_count': try_get(video, lambda x: x['views']['total'], int),
'age_limit': int_or_none(video.get('age_limit', 0)),
'formats': formats,
'subtitles': subtitles,
}
class TVPlayHomeIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:tv3?)?
play\.(?:tv3|skaties)\.(?P<country>lv|lt|ee)/
(?P<live>lives/)?
[^?#&]+(?:episode|programme|clip)-(?P<id>\d+)
'''
_TESTS = [{
'url': 'https://play.tv3.lt/series/gauju-karai-karveliai,serial-2343791/serija-8,episode-2343828',
'info_dict': {
'id': '2343828',
'ext': 'mp4',
'title': 'Gaujų karai. Karveliai (2021) | S01E08: Serija 8',
'description': 'md5:f6fcfbb236429f05531131640dfa7c81',
'duration': 2710,
'season': 'Gaujų karai. Karveliai',
'season_number': 1,
'release_year': 2021,
'episode': 'Serija 8',
'episode_number': 8,
},
'params': {
'skip_download': 'm3u8',
},
}, {
'url': 'https://play.tv3.lt/series/moterys-meluoja-geriau-n-7,serial-2574652/serija-25,episode-3284937',
'info_dict': {
'id': '3284937',
'ext': 'mp4',
'season': 'Moterys meluoja geriau [N-7]',
'season_number': 14,
'release_year': 2021,
'episode': 'Serija 25',
'episode_number': 25,
'title': 'Moterys meluoja geriau [N-7] (2021) | S14|E25: Serija 25',
'description': 'md5:c6926e9710f1a126f028fbe121eddb79',
'duration': 2440,
},
'skip': '404',
}, {
'url': 'https://play.tv3.lt/lives/tv6-lt,live-2838694/optibet-a-lygos-rungtynes-marijampoles-suduva--vilniaus-riteriai,programme-3422014',
'only_matching': True,
}, {
'url': 'https://tv3play.skaties.lv/series/women-lie-better-lv,serial-1024464/women-lie-better-lv,episode-1038762',
'only_matching': True,
}, {
'url': 'https://play.tv3.ee/series/_,serial-2654462/_,episode-2654474',
'only_matching': True,
}, {
'url': 'https://tv3play.skaties.lv/clips/tv3-zinas-valsti-lidz-15novembrim-bus-majsede,clip-3464509',
'only_matching': True,
}]
def _real_extract(self, url):
country, is_live, video_id = self._match_valid_url(url).groups()
api_path = 'lives/programmes' if is_live else 'vods'
data = self._download_json(
urljoin(url, f'/api/products/{api_path}/{video_id}?platform=BROWSER&lang={country.upper()}'),
video_id)
video_type = 'CATCHUP' if is_live else 'MOVIE'
stream_id = data['programRecordingId'] if is_live else video_id
stream = self._download_json(
urljoin(url, f'/api/products/{stream_id}/videos/playlist?videoType={video_type}&platform=BROWSER'), video_id)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
stream['sources']['HLS'][0]['src'], video_id, 'mp4', 'm3u8_native', m3u8_id='hls')
thumbnails = set(traverse_obj(
data, (('galary', 'images', 'artworks'), ..., ..., ('miniUrl', 'mainUrl')), expected_type=url_or_none))
return {
'id': video_id,
'title': self._resolve_title(data),
'description': traverse_obj(data, 'description', 'lead'),
'duration': int_or_none(data.get('duration')),
'season': traverse_obj(data, ('season', 'serial', 'title')),
'season_number': int_or_none(traverse_obj(data, ('season', 'number'))),
'episode': data.get('title'),
'episode_number': int_or_none(data.get('episode')),
'release_year': int_or_none(traverse_obj(data, ('season', 'serial', 'year'))),
'thumbnails': [{'url': url, 'ext': 'jpg'} for url in thumbnails],
'formats': formats,
'subtitles': subtitles,
}
@staticmethod
def _resolve_title(data):
return try_get(data, lambda x: (
f'{data["season"]["serial"]["title"]} ({data["season"]["serial"]["year"]}) | '
f'S{data["season"]["number"]:02d}E{data["episode"]:02d}: {data["title"]}'
)) or data.get('title')
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/redgifs.py | yt_dlp/extractor/redgifs.py | import functools
import urllib.parse
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
OnDemandPagedList,
int_or_none,
qualities,
try_get,
)
class RedGifsBaseIE(InfoExtractor):
_FORMATS = {
'gif': 250,
'sd': 480,
'hd': None,
}
_API_HEADERS = {
'referer': 'https://www.redgifs.com/',
'origin': 'https://www.redgifs.com',
'content-type': 'application/json',
}
def _parse_gif_data(self, gif_data):
video_id = gif_data.get('id')
quality = qualities(tuple(self._FORMATS.keys()))
orig_height = int_or_none(gif_data.get('height'))
aspect_ratio = try_get(gif_data, lambda x: orig_height / x['width'])
formats = []
for format_id, height in self._FORMATS.items():
video_url = gif_data['urls'].get(format_id)
if not video_url:
continue
height = min(orig_height, height or orig_height)
formats.append({
'url': video_url,
'format_id': format_id,
'width': height * aspect_ratio if aspect_ratio else None,
'height': height,
'quality': quality(format_id),
})
return {
'id': video_id,
'webpage_url': f'https://redgifs.com/watch/{video_id}',
'extractor_key': RedGifsIE.ie_key(),
'extractor': 'RedGifs',
'title': ' '.join(gif_data.get('tags') or []) or 'RedGifs',
'timestamp': int_or_none(gif_data.get('createDate')),
'uploader': gif_data.get('userName'),
'duration': int_or_none(gif_data.get('duration')),
'view_count': int_or_none(gif_data.get('views')),
'like_count': int_or_none(gif_data.get('likes')),
'categories': gif_data.get('tags') or [],
'tags': gif_data.get('tags'),
'age_limit': 18,
'formats': formats,
}
def _fetch_oauth_token(self, video_id):
# https://github.com/Redgifs/api/wiki/Temporary-tokens
auth = self._download_json('https://api.redgifs.com/v2/auth/temporary',
video_id, note='Fetching temporary token')
if not auth.get('token'):
raise ExtractorError('Unable to get temporary token')
self._API_HEADERS['authorization'] = f'Bearer {auth["token"]}'
def _call_api(self, ep, video_id, **kwargs):
for first_attempt in True, False:
if 'authorization' not in self._API_HEADERS:
self._fetch_oauth_token(video_id)
try:
headers = dict(self._API_HEADERS)
headers['x-customheader'] = f'https://www.redgifs.com/watch/{video_id}'
data = self._download_json(
f'https://api.redgifs.com/v2/{ep}', video_id, headers=headers, **kwargs)
break
except ExtractorError as e:
if first_attempt and isinstance(e.cause, HTTPError) and e.cause.status == 401:
del self._API_HEADERS['authorization'] # refresh the token
continue
raise
if 'error' in data:
raise ExtractorError(f'RedGifs said: {data["error"]}', expected=True, video_id=video_id)
return data
def _fetch_page(self, ep, video_id, query, page):
query['page'] = page + 1
data = self._call_api(
ep, video_id, query=query, note=f'Downloading JSON metadata page {page + 1}')
for entry in data['gifs']:
yield self._parse_gif_data(entry)
def _prepare_api_query(self, query, fields):
api_query = [
(field_name, query.get(field_name, (default,))[0])
for field_name, default in fields.items()]
return {key: val for key, val in api_query if val is not None}
def _paged_entries(self, ep, item_id, query, fields):
page = int_or_none(query.get('page', (None,))[0])
page_fetcher = functools.partial(
self._fetch_page, ep, item_id, self._prepare_api_query(query, fields))
return page_fetcher(page) if page else OnDemandPagedList(page_fetcher, self._PAGE_SIZE)
class RedGifsIE(RedGifsBaseIE):
_VALID_URL = r'https?://(?:(?:www\.)?redgifs\.com/(?:watch|ifr)/|thumbs2\.redgifs\.com/)(?P<id>[^-/?#\.]+)'
_TESTS = [{
'url': 'https://www.redgifs.com/watch/squeakyhelplesswisent',
'info_dict': {
'id': 'squeakyhelplesswisent',
'ext': 'mp4',
'title': 'Hotwife Legs Thick',
'timestamp': 1636287915,
'upload_date': '20211107',
'uploader': 'ignored52',
'duration': 16,
'view_count': int,
'like_count': int,
'categories': list,
'age_limit': 18,
'tags': list,
},
}, {
'url': 'https://thumbs2.redgifs.com/SqueakyHelplessWisent-mobile.mp4#t=0',
'info_dict': {
'id': 'squeakyhelplesswisent',
'ext': 'mp4',
'title': 'Hotwife Legs Thick',
'timestamp': 1636287915,
'upload_date': '20211107',
'uploader': 'ignored52',
'duration': 16,
'view_count': int,
'like_count': int,
'categories': list,
'age_limit': 18,
'tags': list,
},
}, {
'url': 'https://www.redgifs.com/ifr/squeakyhelplesswisent',
'info_dict': {
'id': 'squeakyhelplesswisent',
'ext': 'mp4',
'title': 'Hotwife Legs Thick',
'timestamp': 1636287915,
'upload_date': '20211107',
'uploader': 'ignored52',
'duration': 16,
'view_count': int,
'like_count': int,
'categories': list,
'age_limit': 18,
'tags': list,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url).lower()
video_info = self._call_api(
f'gifs/{video_id}?views=yes', video_id, note='Downloading video info')
return self._parse_gif_data(video_info['gif'])
class RedGifsSearchIE(RedGifsBaseIE):
IE_DESC = 'Redgifs search'
_VALID_URL = r'https?://(?:www\.)?redgifs\.com/browse\?(?P<query>[^#]+)'
_PAGE_SIZE = 80
_TESTS = [
{
'url': 'https://www.redgifs.com/browse?tags=Lesbian',
'info_dict': {
'id': 'tags=Lesbian',
'title': 'Lesbian',
'description': 'RedGifs search for Lesbian, ordered by trending',
},
'playlist_mincount': 100,
},
{
'url': 'https://www.redgifs.com/browse?type=g&order=latest&tags=Lesbian',
'info_dict': {
'id': 'type=g&order=latest&tags=Lesbian',
'title': 'Lesbian',
'description': 'RedGifs search for Lesbian, ordered by latest',
},
'playlist_mincount': 100,
},
{
'url': 'https://www.redgifs.com/browse?type=g&order=latest&tags=Lesbian&page=2',
'info_dict': {
'id': 'type=g&order=latest&tags=Lesbian&page=2',
'title': 'Lesbian',
'description': 'RedGifs search for Lesbian, ordered by latest',
},
'playlist_count': 80,
},
]
def _real_extract(self, url):
query_str = self._match_valid_url(url).group('query')
query = urllib.parse.parse_qs(query_str)
if not query.get('tags'):
raise ExtractorError('Invalid query tags', expected=True)
tags = query.get('tags')[0]
order = query.get('order', ('trending',))[0]
query['search_text'] = [tags]
entries = self._paged_entries('gifs/search', query_str, query, {
'search_text': None,
'order': 'trending',
'type': None,
})
return self.playlist_result(
entries, query_str, tags, f'RedGifs search for {tags}, ordered by {order}')
class RedGifsUserIE(RedGifsBaseIE):
IE_DESC = 'Redgifs user'
_VALID_URL = r'https?://(?:www\.)?redgifs\.com/users/(?P<username>[^/?#]+)(?:\?(?P<query>[^#]+))?'
_PAGE_SIZE = 80
_TESTS = [
{
'url': 'https://www.redgifs.com/users/lamsinka89',
'info_dict': {
'id': 'lamsinka89',
'title': 'lamsinka89',
'description': 'RedGifs user lamsinka89, ordered by recent',
},
'playlist_mincount': 391,
},
{
'url': 'https://www.redgifs.com/users/lamsinka89?page=3',
'info_dict': {
'id': 'lamsinka89?page=3',
'title': 'lamsinka89',
'description': 'RedGifs user lamsinka89, ordered by recent',
},
'playlist_count': 80,
},
{
'url': 'https://www.redgifs.com/users/lamsinka89?order=best&type=g',
'info_dict': {
'id': 'lamsinka89?order=best&type=g',
'title': 'lamsinka89',
'description': 'RedGifs user lamsinka89, ordered by best',
},
'playlist_mincount': 391,
},
{
'url': 'https://www.redgifs.com/users/ignored52',
'note': 'https://github.com/yt-dlp/yt-dlp/issues/7382',
'info_dict': {
'id': 'ignored52',
'title': 'ignored52',
'description': 'RedGifs user ignored52, ordered by recent',
},
'playlist_mincount': 121,
},
]
def _real_extract(self, url):
username, query_str = self._match_valid_url(url).group('username', 'query')
playlist_id = f'{username}?{query_str}' if query_str else username
query = urllib.parse.parse_qs(query_str)
order = query.get('order', ('recent',))[0]
entries = self._paged_entries(f'users/{username}/search', playlist_id, query, {
'order': 'recent',
'type': None,
})
return self.playlist_result(
entries, playlist_id, username, f'RedGifs user {username}, ordered by {order}')
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/giantbomb.py | yt_dlp/extractor/giantbomb.py | import json
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
qualities,
unescapeHTML,
)
class GiantBombIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?giantbomb\.com/(?:videos|shows)/(?P<display_id>[^/]+)/(?P<id>\d+-\d+)'
_TESTS = [{
'url': 'http://www.giantbomb.com/videos/quick-look-destiny-the-dark-below/2300-9782/',
'md5': '132f5a803e7e0ab0e274d84bda1e77ae',
'info_dict': {
'id': '2300-9782',
'display_id': 'quick-look-destiny-the-dark-below',
'ext': 'mp4',
'title': 'Quick Look: Destiny: The Dark Below',
'description': 'md5:0aa3aaf2772a41b91d44c63f30dfad24',
'duration': 2399,
'thumbnail': r're:^https?://.*\.jpg$',
},
}, {
'url': 'https://www.giantbomb.com/shows/ben-stranding/2970-20212',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
video = json.loads(unescapeHTML(self._search_regex(
r'data-video="([^"]+)"', webpage, 'data-video')))
duration = int_or_none(video.get('lengthSeconds'))
quality = qualities([
'f4m_low', 'progressive_low', 'f4m_high',
'progressive_high', 'f4m_hd', 'progressive_hd'])
formats = []
for format_id, video_url in video['videoStreams'].items():
if format_id == 'f4m_stream':
continue
ext = determine_ext(video_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(video_url + '?hdcore=3.3.1', display_id)
if f4m_formats:
f4m_formats[0]['quality'] = quality(format_id)
formats.extend(f4m_formats)
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, display_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
})
if not formats:
youtube_id = video.get('youtubeID')
if youtube_id:
return self.url_result(youtube_id, 'Youtube')
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/_extractors.py | yt_dlp/extractor/_extractors.py | # flake8: noqa: F401
# isort: off
from .youtube import ( # Youtube is moved to the top to improve performance
YoutubeIE,
YoutubeClipIE,
YoutubeFavouritesIE,
YoutubeNotificationsIE,
YoutubeHistoryIE,
YoutubeTabIE,
YoutubeLivestreamEmbedIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeMusicSearchURLIE,
YoutubeSubscriptionsIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeYtBeIE,
YoutubeYtUserIE,
YoutubeWatchLaterIE,
YoutubeShortsAudioPivotIE,
YoutubeConsentRedirectIE,
)
# isort: on
from .abc import (
ABCIE,
ABCIViewIE,
ABCIViewShowSeriesIE,
)
from .abcnews import (
AbcNewsIE,
AbcNewsVideoIE,
)
from .abcotvs import (
ABCOTVSIE,
ABCOTVSClipsIE,
)
from .abematv import (
AbemaTVIE,
AbemaTVTitleIE,
)
from .academicearth import AcademicEarthCourseIE
from .acast import (
ACastChannelIE,
ACastIE,
)
from .acfun import (
AcFunBangumiIE,
AcFunVideoIE,
)
from .adn import (
ADNIE,
ADNSeasonIE,
)
from .adobeconnect import AdobeConnectIE
from .adobetv import AdobeTVVideoIE
from .adultswim import AdultSwimIE
from .aenetworks import (
AENetworksCollectionIE,
AENetworksIE,
AENetworksShowIE,
BiographyIE,
HistoryPlayerIE,
HistoryTopicIE,
)
from .aeonco import AeonCoIE
from .afreecatv import (
AfreecaTVCatchStoryIE,
AfreecaTVIE,
AfreecaTVLiveIE,
AfreecaTVUserIE,
)
from .agalega import AGalegaIE
from .agora import (
TokFMAuditionIE,
TokFMPodcastIE,
WyborczaPodcastIE,
WyborczaVideoIE,
)
from .airtv import AirTVIE
from .aitube import AitubeKZVideoIE
from .alibaba import AlibabaIE
from .aliexpress import AliExpressLiveIE
from .aljazeera import AlJazeeraIE
from .allocine import AllocineIE
from .allstar import (
AllstarIE,
AllstarProfileIE,
)
from .alphaporno import AlphaPornoIE
from .alsace20tv import (
Alsace20TVEmbedIE,
Alsace20TVIE,
)
from .altcensored import (
AltCensoredChannelIE,
AltCensoredIE,
)
from .alura import (
AluraCourseIE,
AluraIE,
)
from .amadeustv import AmadeusTVIE
from .amara import AmaraIE
from .amazon import (
AmazonReviewsIE,
AmazonStoreIE,
)
from .amazonminitv import (
AmazonMiniTVIE,
AmazonMiniTVSeasonIE,
AmazonMiniTVSeriesIE,
)
from .amcnetworks import AMCNetworksIE
from .americastestkitchen import (
AmericasTestKitchenIE,
AmericasTestKitchenSeasonIE,
)
from .anchorfm import AnchorFMEpisodeIE
from .angel import AngelIE
from .antenna import (
Ant1NewsGrArticleIE,
Ant1NewsGrEmbedIE,
AntennaGrWatchIE,
)
from .anvato import AnvatoIE
from .aol import AolIE
from .apa import APAIE
from .aparat import AparatIE
from .appleconnect import AppleConnectIE
from .applepodcasts import ApplePodcastsIE
from .appletrailers import (
AppleTrailersIE,
AppleTrailersSectionIE,
)
from .archiveorg import (
ArchiveOrgIE,
YoutubeWebArchiveIE,
)
from .arcpublishing import ArcPublishingIE
from .ard import (
ARDIE,
ARDAudiothekIE,
ARDAudiothekPlaylistIE,
ARDBetaMediathekIE,
ARDMediathekCollectionIE,
)
from .arnes import ArnesIE
from .art19 import (
Art19IE,
Art19ShowIE,
)
from .arte import (
ArteTVCategoryIE,
ArteTVEmbedIE,
ArteTVIE,
ArteTVPlaylistIE,
)
from .asobichannel import (
AsobiChannelIE,
AsobiChannelTagURLIE,
)
from .asobistage import AsobiStageIE
from .atresplayer import AtresPlayerIE
from .atscaleconf import AtScaleConfEventIE
from .atvat import ATVAtIE
from .audimedia import AudiMediaIE
from .audioboom import AudioBoomIE
from .audiodraft import (
AudiodraftCustomIE,
AudiodraftGenericIE,
)
from .audiomack import (
AudiomackAlbumIE,
AudiomackIE,
)
from .audius import (
AudiusIE,
AudiusPlaylistIE,
AudiusProfileIE,
AudiusTrackIE,
)
from .awaan import (
AWAANIE,
AWAANLiveIE,
AWAANSeasonIE,
AWAANVideoIE,
)
from .axs import AxsIE
from .azmedien import AZMedienIE
from .baidu import BaiduVideoIE
from .banbye import (
BanByeChannelIE,
BanByeIE,
)
from .bandcamp import (
BandcampAlbumIE,
BandcampIE,
BandcampUserIE,
BandcampWeeklyIE,
)
from .bandlab import (
BandlabIE,
BandlabPlaylistIE,
)
from .bannedvideo import BannedVideoIE
from .bbc import (
BBCIE,
BBCCoUkArticleIE,
BBCCoUkIE,
BBCCoUkIPlayerEpisodesIE,
BBCCoUkIPlayerGroupIE,
BBCCoUkPlaylistIE,
)
from .beacon import BeaconTvIE
from .beatbump import (
BeatBumpPlaylistIE,
BeatBumpVideoIE,
)
from .beatport import BeatportIE
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .berufetv import BerufeTVIE
from .bet import BetIE
from .bfi import BFIPlayerIE
from .bfmtv import (
BFMTVIE,
BFMTVArticleIE,
BFMTVLiveIE,
)
from .bibeltv import (
BibelTVLiveIE,
BibelTVSeriesIE,
BibelTVVideoIE,
)
from .bigflix import BigflixIE
from .bigo import BigoIE
from .bild import BildIE
from .bilibili import (
BilibiliAudioAlbumIE,
BilibiliAudioIE,
BiliBiliBangumiIE,
BiliBiliBangumiMediaIE,
BiliBiliBangumiSeasonIE,
BilibiliCategoryIE,
BilibiliCheeseIE,
BilibiliCheeseSeasonIE,
BilibiliCollectionListIE,
BiliBiliDynamicIE,
BilibiliFavoritesListIE,
BiliBiliIE,
BiliBiliPlayerIE,
BilibiliPlaylistIE,
BiliBiliSearchIE,
BilibiliSeriesListIE,
BilibiliSpaceAudioIE,
BilibiliSpaceVideoIE,
BilibiliWatchlaterIE,
BiliIntlIE,
BiliIntlSeriesIE,
BiliLiveIE,
)
from .biobiochiletv import BioBioChileTVIE
from .bitchute import (
BitChuteChannelIE,
BitChuteIE,
)
from .bitmovin import BitmovinIE
from .blackboardcollaborate import (
BlackboardCollaborateIE,
BlackboardCollaborateLaunchIE,
)
from .bleacherreport import (
BleacherReportCMSIE,
BleacherReportIE,
)
from .blerp import BlerpIE
from .blogger import BloggerIE
from .bloomberg import BloombergIE
from .bluesky import BlueskyIE
from .bokecc import BokeCCIE
from .bongacams import BongaCamsIE
from .boosty import BoostyIE
from .bostonglobe import BostonGlobeIE
from .box import BoxIE
from .boxcast import BoxCastVideoIE
from .bpb import BpbIE
from .br import BRIE
from .brainpop import (
BrainPOPELLIE,
BrainPOPEspIE,
BrainPOPFrIE,
BrainPOPIE,
BrainPOPIlIE,
BrainPOPJrIE,
)
from .breitbart import BreitBartIE
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .brilliantpala import (
BrilliantpalaClassesIE,
BrilliantpalaElearnIE,
)
from .btvplus import BTVPlusIE
from .bundesliga import BundesligaIE
from .bundestag import BundestagIE
from .bunnycdn import BunnyCdnIE
from .businessinsider import BusinessInsiderIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .caffeinetv import CaffeineTVIE
from .callin import CallinIE
from .caltrans import CaltransIE
from .cam4 import CAM4IE
from .camdemy import (
CamdemyFolderIE,
CamdemyIE,
)
from .camfm import (
CamFMEpisodeIE,
CamFMShowIE,
)
from .cammodels import CamModelsIE
from .camsoda import CamsodaIE
from .camtasia import CamtasiaEmbedIE
from .canal1 import Canal1IE
from .canalalpha import CanalAlphaIE
from .canalc2 import Canalc2IE
from .canalplus import CanalplusIE
from .canalsurmas import CanalsurmasIE
from .caracoltv import CaracolTvPlayIE
from .cbc import (
CBCIE,
CBCGemIE,
CBCGemLiveIE,
CBCGemPlaylistIE,
CBCListenIE,
CBCPlayerIE,
CBCPlayerPlaylistIE,
)
from .cbs import (
CBSIE,
ParamountPressExpressIE,
)
from .cbsnews import (
CBSLocalArticleIE,
CBSLocalIE,
CBSLocalLiveIE,
CBSNewsEmbedIE,
CBSNewsIE,
CBSNewsLiveIE,
CBSNewsLiveVideoIE,
)
from .cbssports import (
CBSSportsEmbedIE,
CBSSportsIE,
TwentyFourSevenSportsIE,
)
from .ccc import (
CCCIE,
CCCPlaylistIE,
)
from .ccma import CCMAIE
from .cctv import CCTVIE
from .cda import (
CDAIE,
CDAFolderIE,
)
from .cellebrite import CellebriteIE
from .ceskatelevize import CeskaTelevizeIE
from .cgtn import CGTNIE
from .charlierose import CharlieRoseIE
from .chaturbate import ChaturbateIE
from .chilloutzone import ChilloutzoneIE
from .chzzk import (
CHZZKLiveIE,
CHZZKVideoIE,
)
from .cinemax import CinemaxIE
from .cinetecamilano import CinetecaMilanoIE
from .cineverse import (
CineverseDetailsIE,
CineverseIE,
)
from .ciscolive import (
CiscoLiveSearchIE,
CiscoLiveSessionIE,
)
from .ciscowebex import CiscoWebexIE
from .cjsw import CJSWIE
from .clipchamp import ClipchampIE
from .clippit import ClippitIE
from .cliprs import ClipRsIE
from .closertotruth import CloserToTruthIE
from .cloudflarestream import CloudflareStreamIE
from .cloudycdn import CloudyCDNIE
from .clubic import ClubicIE
from .clyp import ClypIE
from .cnbc import CNBCVideoIE
from .cnn import (
CNNIE,
CNNIndonesiaIE,
)
from .comedycentral import ComedyCentralIE
from .commonmistakes import (
BlobIE,
CommonMistakesIE,
UnicodeBOMIE,
)
from .commonprotocols import (
MmsIE,
RtmpIE,
ViewSourceIE,
)
from .condenast import CondeNastIE
from .contv import CONtvIE
from .corus import CorusIE
from .coub import CoubIE
from .cozytv import CozyTVIE
from .cpac import (
CPACIE,
CPACPlaylistIE,
)
from .cracked import CrackedIE
from .craftsy import CraftsyIE
from .crooksandliars import CrooksAndLiarsIE
from .crowdbunker import (
CrowdBunkerChannelIE,
CrowdBunkerIE,
)
from .crtvg import CrtvgIE
from .cspan import (
CSpanCongressIE,
CSpanIE,
)
from .ctsnews import CtsNewsIE
from .ctvnews import CTVNewsIE
from .cultureunplugged import CultureUnpluggedIE
from .curiositystream import (
CuriosityStreamCollectionsIE,
CuriosityStreamIE,
CuriosityStreamSeriesIE,
)
from .cybrary import (
CybraryCourseIE,
CybraryIE,
)
from .dacast import (
DacastPlaylistIE,
DacastVODIE,
)
from .dailymail import DailyMailIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionSearchIE,
DailymotionUserIE,
)
from .dailywire import (
DailyWireIE,
DailyWirePodcastIE,
)
from .damtomo import (
DamtomoRecordIE,
DamtomoVideoIE,
)
from .dangalplay import (
DangalPlayIE,
DangalPlaySeasonIE,
)
from .daum import (
DaumClipIE,
DaumIE,
DaumPlaylistIE,
DaumUserIE,
)
from .daystar import DaystarClipIE
from .dbtv import DBTVIE
from .dctp import DctpTvIE
from .democracynow import DemocracynowIE
from .detik import DetikEmbedIE
from .deuxm import (
DeuxMIE,
DeuxMNewsIE,
)
from .dfb import DFBIE
from .dhm import DHMIE
from .digitalconcerthall import DigitalConcertHallIE
from .digiteka import DigitekaIE
from .digiview import DigiviewIE
from .discogs import DiscogsReleasePlaylistIE
from .disney import DisneyIE
from .dispeak import DigitallySpeakingIE
from .dlf import (
DLFIE,
DLFCorpusIE,
)
from .dlive import (
DLiveStreamIE,
DLiveVODIE,
)
from .douyutv import (
DouyuShowIE,
DouyuTVIE,
)
from .dplay import (
TLCIE,
AmHistoryChannelIE,
AnimalPlanetIE,
CookingChannelIE,
DestinationAmericaIE,
DiscoveryLifeIE,
DiscoveryNetworksDeIE,
DiscoveryPlusIE,
DiscoveryPlusIndiaIE,
DiscoveryPlusIndiaShowIE,
DiscoveryPlusItalyIE,
DiscoveryPlusItalyShowIE,
DPlayIE,
FoodNetworkIE,
GoDiscoveryIE,
HGTVDeIE,
HGTVUsaIE,
InvestigationDiscoveryIE,
ScienceChannelIE,
TravelChannelIE,
)
from .drbonanza import DRBonanzaIE
from .dreisat import DreiSatIE
from .drooble import DroobleIE
from .dropbox import DropboxIE
from .dropout import (
DropoutIE,
DropoutSeasonIE,
)
from .drtalks import DrTalksIE
from .drtuber import DrTuberIE
from .drtv import (
DRTVIE,
DRTVLiveIE,
DRTVSeasonIE,
DRTVSeriesIE,
)
from .dtube import DTubeIE
from .duboku import (
DubokuIE,
DubokuPlaylistIE,
)
from .dumpert import DumpertIE
from .duoplay import DuoplayIE
from .dvtv import DVTVIE
from .dw import (
DWIE,
DWArticleIE,
)
from .ebaumsworld import EbaumsWorldIE
from .ebay import EbayIE
from .egghead import (
EggheadCourseIE,
EggheadLessonIE,
)
from .eggs import (
EggsArtistIE,
EggsIE,
)
from .eighttracks import EightTracksIE
from .eitb import EitbIE
from .elementorembed import ElementorEmbedIE
from .elonet import ElonetIE
from .elpais import ElPaisIE
from .eltrecetv import ElTreceTVIE
from .embedly import EmbedlyIE
from .epicon import (
EpiconIE,
EpiconSeriesIE,
)
from .epidemicsound import EpidemicSoundIE
from .eplus import EplusIbIE
from .epoch import EpochIE
from .eporner import EpornerIE
from .erocast import ErocastIE
from .eroprofile import (
EroProfileAlbumIE,
EroProfileIE,
)
from .err import ERRJupiterIE
from .ertgr import (
ERTFlixCodenameIE,
ERTFlixIE,
ERTWebtvEmbedIE,
)
from .espn import (
ESPNIE,
ESPNArticleIE,
ESPNCricInfoIE,
FiveThirtyEightIE,
WatchESPNIE,
)
from .ettutv import EttuTvIE
from .europa import (
EuropaIE,
EuroParlWebstreamIE,
)
from .europeantour import EuropeanTourIE
from .eurosport import EurosportIE
from .euscreen import EUScreenIE
from .expressen import ExpressenIE
from .eyedotv import EyedoTVIE
from .facebook import (
FacebookAdsIE,
FacebookIE,
FacebookPluginsVideoIE,
FacebookRedirectURLIE,
FacebookReelIE,
)
from .fancode import (
FancodeLiveIE,
FancodeVodIE,
)
from .fathom import FathomIE
from .faulio import (
FaulioIE,
FaulioLiveIE,
)
from .faz import FazIE
from .fc2 import (
FC2IE,
FC2EmbedIE,
FC2LiveIE,
)
from .fczenit import FczenitIE
from .fifa import FifaIE
from .filmarchiv import FilmArchivIE
from .filmon import (
FilmOnChannelIE,
FilmOnIE,
)
from .filmweb import FilmwebIE
from .firsttv import (
FirstTVIE,
FirstTVLiveIE,
)
from .fivetv import FiveTVIE
from .flextv import FlexTVIE
from .flickr import FlickrIE
from .floatplane import (
FloatplaneChannelIE,
FloatplaneIE,
)
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
from .formula1 import Formula1IE
from .fourtube import (
FourTubeIE,
FuxIE,
PornerBrosIE,
PornTubeIE,
)
from .fox import FOXIE
from .fox9 import (
FOX9IE,
FOX9NewsIE,
)
from .foxnews import (
FoxNewsArticleIE,
FoxNewsIE,
FoxNewsVideoIE,
)
from .foxsports import FoxSportsIE
from .fptplay import FptplayIE
from .francaisfacile import FrancaisFacileIE
from .franceinter import FranceInterIE
from .francetv import (
FranceTVIE,
FranceTVInfoIE,
FranceTVSiteIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freetv import (
FreeTvIE,
FreeTvMoviesIE,
)
from .frontendmasters import (
FrontendMastersCourseIE,
FrontendMastersIE,
FrontendMastersLessonIE,
)
from .frontro import (
TheChosenGroupIE,
TheChosenIE,
)
from .fujitv import FujiTVFODPlus7IE
from .funk import FunkIE
from .funker530 import Funker530IE
from .fuyintv import FuyinTVIE
from .gab import (
GabIE,
GabTVIE,
)
from .gaia import GaiaIE
from .gamedevtv import GameDevTVDashboardIE
from .gamejolt import (
GameJoltCommunityIE,
GameJoltGameIE,
GameJoltGameSoundtrackIE,
GameJoltIE,
GameJoltSearchIE,
GameJoltUserIE,
)
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gaskrank import GaskrankIE
from .gazeta import GazetaIE
from .gbnews import GBNewsIE
from .gdcvault import GDCVaultIE
from .gedidigital import GediDigitalIE
from .generic import GenericIE
from .genericembeds import (
HTML5MediaEmbedIE,
QuotedHTMLIE,
)
from .genius import (
GeniusIE,
GeniusLyricsIE,
)
from .germanupa import GermanupaIE
from .getcourseru import (
GetCourseRuIE,
GetCourseRuPlayerIE,
)
from .gettr import (
GettrIE,
GettrStreamingIE,
)
from .giantbomb import GiantBombIE
from .glide import GlideIE
from .globalplayer import (
GlobalPlayerAudioEpisodeIE,
GlobalPlayerAudioIE,
GlobalPlayerLiveIE,
GlobalPlayerLivePlaylistIE,
GlobalPlayerVideoIE,
)
from .globo import (
GloboArticleIE,
GloboIE,
)
from .glomex import (
GlomexEmbedIE,
GlomexIE,
)
from .gmanetwork import GMANetworkVideoIE
from .go import GoIE
from .godresource import GodResourceIE
from .godtube import GodTubeIE
from .gofile import GofileIE
from .golem import GolemIE
from .goodgame import GoodGameIE
from .googledrive import (
GoogleDriveFolderIE,
GoogleDriveIE,
)
from .googlepodcasts import (
GooglePodcastsFeedIE,
GooglePodcastsIE,
)
from .googlesearch import GoogleSearchIE
from .goplay import GoPlayIE
from .gopro import GoProIE
from .goshgay import GoshgayIE
from .gotostage import GoToStageIE
from .gputechconf import GPUTechConfIE
from .graspop import GraspopIE
from .gronkh import (
GronkhFeedIE,
GronkhIE,
GronkhVodsIE,
)
from .groupon import GrouponIE
from .harpodeon import HarpodeonIE
from .hbo import HBOIE
from .hearthisat import HearThisAtIE
from .heise import HeiseIE
from .hellporno import HellPornoIE
from .hgtv import HGTVComShowIE
from .hidive import HiDiveIE
from .historicfilms import HistoricFilmsIE
from .hitrecord import HitRecordIE
from .hketv import HKETVIE
from .hollywoodreporter import (
HollywoodReporterIE,
HollywoodReporterPlaylistIE,
)
from .holodex import HolodexIE
from .hotnewhiphop import HotNewHipHopIE
from .hotstar import (
HotStarIE,
HotStarPrefixIE,
HotStarSeriesIE,
)
from .hrefli import HrefLiRedirectIE
from .hrfensehen import HRFernsehenIE
from .hrti import (
HRTiIE,
HRTiPlaylistIE,
)
from .hse import (
HSEProductIE,
HSEShowIE,
)
from .huajiao import HuajiaoIE
from .huffpost import HuffPostIE
from .hungama import (
HungamaAlbumPlaylistIE,
HungamaIE,
HungamaSongIE,
)
from .huya import (
HuyaLiveIE,
HuyaVideoIE,
)
from .hypem import HypemIE
from .hypergryph import MonsterSirenHypergryphMusicIE
from .hytale import HytaleIE
from .icareus import IcareusIE
from .ichinanalive import (
IchinanaLiveClipIE,
IchinanaLiveIE,
IchinanaLiveVODIE,
)
from .idagio import (
IdagioAlbumIE,
IdagioPersonalPlaylistIE,
IdagioPlaylistIE,
IdagioRecordingIE,
IdagioTrackIE,
)
from .idolplus import IdolPlusIE
from .ign import (
IGNIE,
IGNArticleIE,
IGNVideoIE,
)
from .iheart import (
IHeartRadioIE,
IHeartRadioPodcastIE,
)
from .ilpost import IlPostIE
from .iltalehti import IltalehtiIE
from .imdb import (
ImdbIE,
ImdbListIE,
)
from .imgur import (
ImgurAlbumIE,
ImgurGalleryIE,
ImgurIE,
)
from .ina import InaIE
from .inc import IncIE
from .indavideo import IndavideoEmbedIE
from .infoq import InfoQIE
from .instagram import (
InstagramIE,
InstagramIOSIE,
InstagramStoryIE,
InstagramTagIE,
InstagramUserIE,
)
from .internazionale import InternazionaleIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import (
IPrimaCNNIE,
IPrimaIE,
)
from .iqiyi import (
IqAlbumIE,
IqIE,
IqiyiIE,
)
from .islamchannel import (
IslamChannelIE,
IslamChannelSeriesIE,
)
from .israelnationalnews import IsraelNationalNewsIE
from .itprotv import (
ITProTVCourseIE,
ITProTVIE,
)
from .itv import (
ITVBTCCIE,
ITVIE,
)
from .ivi import (
IviCompilationIE,
IviIE,
)
from .ivideon import IvideonIE
from .ivoox import IvooxIE
from .iwara import (
IwaraIE,
IwaraPlaylistIE,
IwaraUserIE,
)
from .ixigua import IxiguaIE
from .izlesene import IzleseneIE
from .jamendo import (
JamendoAlbumIE,
JamendoIE,
)
from .japandiet import (
SangiinIE,
SangiinInstructionIE,
ShugiinItvLiveIE,
ShugiinItvLiveRoomIE,
ShugiinItvVodIE,
)
from .jeuxvideo import JeuxVideoIE
from .jiosaavn import (
JioSaavnAlbumIE,
JioSaavnArtistIE,
JioSaavnPlaylistIE,
JioSaavnShowIE,
JioSaavnShowPlaylistIE,
JioSaavnSongIE,
)
from .joj import JojIE
from .jove import JoveIE
from .jstream import JStreamIE
from .jtbc import (
JTBCIE,
JTBCProgramIE,
)
from .jwplatform import JWPlatformIE
from .kakao import KakaoIE
from .kaltura import KalturaIE
from .kankanews import KankaNewsIE
from .karaoketv import KaraoketvIE
from .kelbyone import KelbyOneIE
from .kenh14 import (
Kenh14PlaylistIE,
Kenh14VideoIE,
)
from .khanacademy import (
KhanAcademyIE,
KhanAcademyUnitIE,
)
from .kick import (
KickClipIE,
KickIE,
KickVODIE,
)
from .kicker import KickerIE
from .kickstarter import KickStarterIE
from .kika import (
KikaIE,
KikaPlaylistIE,
)
from .kinja import KinjaEmbedIE
from .kinopoisk import KinoPoiskIE
from .kommunetv import KommunetvIE
from .kompas import KompasVideoIE
from .koo import KooIE
from .krasview import KrasViewIE
from .kth import KTHIE
from .ku6 import Ku6IE
from .kukululive import KukuluLiveIE
from .kuwo import (
KuwoAlbumIE,
KuwoCategoryIE,
KuwoChartIE,
KuwoIE,
KuwoMvIE,
KuwoSingerIE,
)
from .la7 import (
LA7IE,
LA7PodcastEpisodeIE,
LA7PodcastIE,
)
from .laracasts import (
LaracastsIE,
LaracastsPlaylistIE,
)
from .lastfm import (
LastFMIE,
LastFMPlaylistIE,
LastFMUserIE,
)
from .laxarxames import LaXarxaMesIE
from .lbry import (
LBRYIE,
LBRYChannelIE,
LBRYPlaylistIE,
)
from .lci import LCIIE
from .lcp import (
LcpIE,
LcpPlayIE,
)
from .learningonscreen import LearningOnScreenIE
from .lecture2go import Lecture2GoIE
from .lecturio import (
LecturioCourseIE,
LecturioDeCourseIE,
LecturioIE,
)
from .leeco import (
LeIE,
LePlaylistIE,
LetvCloudIE,
)
from .lefigaro import (
LeFigaroVideoEmbedIE,
LeFigaroVideoSectionIE,
)
from .lego import LEGOIE
from .lemonde import LemondeIE
from .lenta import LentaIE
from .libraryofcongress import LibraryOfCongressIE
from .libsyn import LibsynIE
from .lifenews import (
LifeEmbedIE,
LifeNewsIE,
)
from .likee import (
LikeeIE,
LikeeUserIE,
)
from .linkedin import (
LinkedInEventsIE,
LinkedInIE,
LinkedInLearningCourseIE,
LinkedInLearningIE,
)
from .liputan6 import Liputan6IE
from .listennotes import ListenNotesIE
from .litv import LiTVIE
from .livejournal import LiveJournalIE
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .livestreamfails import LivestreamfailsIE
from .lnk import LnkIE
from .loco import LocoIE
from .loom import (
LoomFolderIE,
LoomIE,
)
from .lovehomeporn import LoveHomePornIE
from .lrt import (
LRTVODIE,
LRTRadioIE,
LRTStreamIE,
)
from .lsm import (
LSMLREmbedIE,
LSMLTVEmbedIE,
LSMReplayIE,
)
from .lumni import LumniIE
from .lynda import (
LyndaCourseIE,
LyndaIE,
)
from .maariv import MaarivIE
from .magellantv import MagellanTVIE
from .magentamusik import MagentaMusikIE
from .mailru import (
MailRuIE,
MailRuMusicIE,
MailRuMusicSearchIE,
)
from .mainstreaming import MainStreamingIE
from .mangomolo import (
MangomoloLiveIE,
MangomoloVideoIE,
)
from .manyvids import ManyVidsIE
from .maoritv import MaoriTVIE
from .markiza import (
MarkizaIE,
MarkizaPageIE,
)
from .massengeschmacktv import MassengeschmackTVIE
from .masters import MastersIE
from .matchtv import MatchTVIE
from .mave import (
MaveChannelIE,
MaveIE,
)
from .mbn import MBNIE
from .mdr import MDRIE
from .medaltv import MedalTVIE
from .mediaite import MediaiteIE
from .mediaklikk import MediaKlikkIE
from .medialaan import MedialaanIE
from .mediaset import (
MediasetIE,
MediasetShowIE,
)
from .mediasite import (
MediasiteCatalogIE,
MediasiteIE,
MediasiteNamedCatalogIE,
)
from .mediastream import (
MediaStreamIE,
WinSportsVideoIE,
)
from .mediaworksnz import MediaWorksNZVODIE
from .medici import MediciIE
from .megaphone import MegaphoneIE
from .megatvcom import (
MegaTVComEmbedIE,
MegaTVComIE,
)
from .meipai import MeipaiIE
from .melonvod import MelonVODIE
from .metacritic import MetacriticIE
from .mgtv import MGTVIE
from .microsoftembed import (
MicrosoftBuildIE,
MicrosoftEmbedIE,
MicrosoftLearnEpisodeIE,
MicrosoftLearnPlaylistIE,
MicrosoftLearnSessionIE,
MicrosoftMediusIE,
)
from .microsoftstream import MicrosoftStreamIE
from .minds import (
MindsChannelIE,
MindsGroupIE,
MindsIE,
)
from .minoto import MinotoIE
from .mir24tv import Mir24TvIE
from .mirrativ import (
MirrativIE,
MirrativUserIE,
)
from .mirrorcouk import MirrorCoUKIE
from .mit import (
OCWMITIE,
TechTVMITIE,
)
from .mixch import (
MixchArchiveIE,
MixchIE,
MixchMovieIE,
)
from .mixcloud import (
MixcloudIE,
MixcloudPlaylistIE,
MixcloudUserIE,
)
from .mixlr import (
MixlrIE,
MixlrRecoringIE,
)
from .mlb import (
MLBIE,
MLBTVIE,
MLBArticleIE,
MLBVideoIE,
)
from .mlssoccer import MLSSoccerIE
from .mocha import MochaVideoIE
from .mojevideo import MojevideoIE
from .mojvideo import MojvideoIE
from .monstercat import MonstercatIE
from .motherless import (
MotherlessGalleryIE,
MotherlessGroupIE,
MotherlessIE,
MotherlessUploaderIE,
)
from .motorsport import MotorsportIE
from .moviepilot import MoviepilotIE
from .moview import MoviewPlayIE
from .moviezine import MoviezineIE
from .movingimage import MovingImageIE
from .msn import MSNIE
from .mtv import MTVIE
from .muenchentv import MuenchenTVIE
from .murrtube import (
MurrtubeIE,
MurrtubeUserIE,
)
from .museai import MuseAIIE
from .musescore import MuseScoreIE
from .musicdex import (
MusicdexAlbumIE,
MusicdexArtistIE,
MusicdexPlaylistIE,
MusicdexSongIE,
)
from .mux import MuxIE
from .mx3 import (
Mx3IE,
Mx3NeoIE,
Mx3VolksmusikIE,
)
from .mxplayer import (
MxplayerIE,
MxplayerShowIE,
)
from .myspace import (
MySpaceAlbumIE,
MySpaceIE,
)
from .myspass import MySpassIE
from .myvideoge import MyVideoGeIE
from .myvidster import MyVidsterIE
from .mzaalo import MzaaloIE
from .n1 import (
N1InfoAssetIE,
N1InfoIIE,
)
from .nascar import NascarClassicsIE
from .nate import (
NateIE,
NateProgramIE,
)
from .nationalgeographic import (
NationalGeographicTVIE,
NationalGeographicVideoIE,
)
from .naver import (
NaverIE,
NaverLiveIE,
NaverNowIE,
)
from .nba import (
NBAIE,
NBAChannelIE,
NBAEmbedIE,
NBAWatchCollectionIE,
NBAWatchEmbedIE,
NBAWatchIE,
)
from .nbc import (
NBCIE,
BravoTVIE,
NBCNewsIE,
NBCOlympicsIE,
NBCOlympicsStreamIE,
NBCSportsIE,
NBCSportsStreamIE,
NBCSportsVPlayerIE,
NBCStationsIE,
SyfyIE,
)
from .ndr import (
NDRIE,
NDREmbedBaseIE,
NDREmbedIE,
NJoyEmbedIE,
NJoyIE,
)
from .ndtv import NDTVIE
from .nebula import (
NebulaChannelIE,
NebulaClassIE,
NebulaIE,
NebulaSeasonIE,
NebulaSubscriptionsIE,
)
from .nekohacker import NekoHackerIE
from .nerdcubed import NerdCubedFeedIE
from .nest import (
NestClipIE,
NestIE,
)
from .netapp import (
NetAppCollectionIE,
NetAppVideoIE,
)
from .neteasemusic import (
NetEaseMusicAlbumIE,
NetEaseMusicDjRadioIE,
NetEaseMusicIE,
NetEaseMusicListIE,
NetEaseMusicMvIE,
NetEaseMusicProgramIE,
NetEaseMusicSingerIE,
)
from .netverse import (
NetverseIE,
NetversePlaylistIE,
NetverseSearchIE,
)
from .netzkino import NetzkinoIE
from .newgrounds import (
NewgroundsIE,
NewgroundsPlaylistIE,
NewgroundsUserIE,
)
from .newspicks import NewsPicksIE
from .newsy import NewsyIE
from .nexx import (
NexxEmbedIE,
NexxIE,
)
from .nfb import (
NFBIE,
NFBSeriesIE,
)
from .nfhsnetwork import NFHSNetworkIE
from .nfl import (
NFLIE,
NFLArticleIE,
NFLPlusEpisodeIE,
NFLPlusReplayIE,
)
from .nhk import (
NhkForSchoolBangumiIE,
NhkForSchoolProgramListIE,
NhkForSchoolSubjectIE,
NhkRadioNewsPageIE,
NhkRadiruIE,
NhkRadiruLiveIE,
NhkVodIE,
NhkVodProgramIE,
)
from .nhl import NHLIE
from .nick import NickIE
from .niconico import (
NiconicoHistoryIE,
NiconicoIE,
NiconicoLiveIE,
NiconicoPlaylistIE,
NiconicoSeriesIE,
NiconicoUserIE,
NicovideoSearchDateIE,
NicovideoSearchIE,
NicovideoSearchURLIE,
NicovideoTagURLIE,
)
from .niconicochannelplus import (
NiconicoChannelPlusChannelLivesIE,
NiconicoChannelPlusChannelVideosIE,
NiconicoChannelPlusIE,
)
from .ninaprotocol import NinaProtocolIE
from .ninecninemedia import (
CPTwentyFourIE,
NineCNineMediaIE,
)
from .ninegag import NineGagIE
from .ninenews import NineNewsIE
from .ninenow import NineNowIE
from .nintendo import NintendoIE
from .nitter import NitterIE
from .nobelprize import NobelPrizeIE
from .noice import NoicePodcastIE
from .nonktube import NonkTubeIE
from .noodlemagazine import NoodleMagazineIE
from .nosnl import NOSNLArticleIE
from .nova import (
NovaEmbedIE,
NovaIE,
)
from .novaplay import NovaPlayIE
from .nowcanal import NowCanalIE
from .nowness import (
NownessIE,
NownessPlaylistIE,
NownessSeriesIE,
)
from .noz import NozIE
from .npo import (
NPOIE,
VPROIE,
WNLIE,
AndereTijdenIE,
HetKlokhuisIE,
NPOLiveIE,
NPORadioFragmentIE,
NPORadioIE,
SchoolTVIE,
)
from .npr import NprIE
from .nrk import (
NRKIE,
NRKTVIE,
NRKPlaylistIE,
NRKRadioPodkastIE,
NRKSkoleIE,
NRKTVDirekteIE,
NRKTVEpisodeIE,
NRKTVEpisodesIE,
NRKTVSeasonIE,
NRKTVSeriesIE,
)
from .nrl import NRLTVIE
from .nts import NTSLiveIE
from .ntvcojp import NTVCoJpCUIE
from .ntvde import NTVDeIE
from .ntvru import NTVRuIE
from .nubilesporn import NubilesPornIE
from .nuum import (
NuumLiveIE,
NuumMediaIE,
NuumTabIE,
)
from .nuvid import NuvidIE
from .nytimes import (
NYTimesArticleIE,
NYTimesCookingIE,
NYTimesCookingRecipeIE,
NYTimesIE,
)
from .nzherald import NZHeraldIE
from .nzonscreen import NZOnScreenIE
from .nzz import NZZIE
from .odkmedia import OnDemandChinaEpisodeIE
from .odnoklassniki import OdnoklassnikiIE
from .oftv import (
OfTVIE,
OfTVPlaylistIE,
)
from .oktoberfesttv import OktoberfestTVIE
from .olympics import OlympicsReplayIE
from .on24 import On24IE
from .ondemandkorea import (
OnDemandKoreaIE,
OnDemandKoreaProgramIE,
)
from .onefootball import OneFootballIE
from .onenewsnz import OneNewsNZIE
from .oneplace import OnePlacePodcastIE
from .onet import (
OnetChannelIE,
OnetIE,
OnetMVPIE,
OnetPlIE,
)
from .onionstudios import OnionStudiosIE
from .onsen import OnsenIE
from .opencast import (
OpencastIE,
OpencastPlaylistIE,
)
from .openrec import (
OpenRecCaptureIE,
OpenRecIE,
OpenRecMovieIE,
)
from .ora import OraTVIE
from .orf import (
ORFIPTVIE,
ORFONIE,
ORFFM4StoryIE,
ORFPodcastIE,
ORFRadioIE,
)
from .outsidetv import OutsideTVIE
from .owncloud import OwnCloudIE
from .packtpub import (
PacktPubCourseIE,
PacktPubIE,
)
from .palcomp3 import (
PalcoMP3ArtistIE,
PalcoMP3IE,
PalcoMP3VideoIE,
)
from .pandatv import PandaTvIE
from .panopto import (
PanoptoIE,
PanoptoListIE,
PanoptoPlaylistIE,
)
from .parler import ParlerIE
from .parlview import ParlviewIE
from .parti import (
PartiLivestreamIE,
PartiVideoIE,
)
from .patreon import (
PatreonCampaignIE,
PatreonIE,
)
from .pbs import (
PBSIE,
PBSKidsIE,
)
from .pearvideo import PearVideoIE
from .peekvids import (
PeekVidsIE,
PlayVidsIE,
)
from .peertube import (
PeerTubeIE,
PeerTubePlaylistIE,
)
from .peertv import PeerTVIE
from .peloton import (
PelotonIE,
PelotonLiveIE,
)
from .performgroup import PerformGroupIE
from .periscope import (
PeriscopeIE,
PeriscopeUserIE,
)
from .pgatour import PGATourIE
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .pialive import PiaLiveIE
from .piapro import PiaproIE
from .picarto import (
PicartoIE,
PicartoVodIE,
)
from .piksel import PikselIE
from .pinkbike import PinkbikeIE
from .pinterest import (
PinterestCollectionIE,
PinterestIE,
)
from .piramidetv import (
PiramideTVChannelIE,
PiramideTVIE,
)
from .planetmarathi import PlanetMarathiIE
from .platzi import (
PlatziCourseIE,
PlatziIE,
)
from .playerfm import PlayerFmIE
from .playplustv import PlayPlusTVIE
from .playsuisse import PlaySuisseIE
from .playtvak import PlaytvakIE
from .playwire import PlaywireIE
from .pluralsight import (
PluralsightCourseIE,
PluralsightIE,
)
from .plutotv import PlutoTVIE
from .plvideo import PlVideoIE
from .plyr import PlyrEmbedIE
from .podbayfm import (
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | true |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/iprima.py | yt_dlp/extractor/iprima.py | import json
import re
import time
from .common import InfoExtractor
from ..utils import (
ExtractorError,
determine_ext,
js_to_json,
traverse_obj,
)
class IPrimaIE(InfoExtractor):
_VALID_URL = r'https?://(?!cnn)(?:[^/]+)\.iprima\.cz/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_GEO_BYPASS = False
_NETRC_MACHINE = 'iprima'
access_token = None
_TESTS = [{
'url': 'https://prima.iprima.cz/particka/92-epizoda',
'info_dict': {
'id': 'p51388',
'ext': 'mp4',
'title': 'Partička (92)',
'description': 'md5:57943f6a50d6188288c3a579d2fd5f01',
'episode': 'Partička (92)',
'season': 'Partička',
'series': 'Prima Partička',
'episode_number': 92,
'thumbnail': 'https://d31b9s05ygj54s.cloudfront.net/prima-plus/image/video-ef6cf9de-c980-4443-92e4-17fe8bccd45c-16x9.jpeg',
},
'params': {
'skip_download': True, # m3u8 download
},
}, {
'url': 'https://zoom.iprima.cz/porady/krasy-kanarskych-ostrovu/tenerife-v-risi-ohne',
'info_dict': {
'id': 'p1412199',
'ext': 'mp4',
'episode_number': 3,
'episode': 'Tenerife: V říši ohně',
'description': 'md5:4b4a05c574b5eaef130e68d4811c3f2c',
'duration': 3111.0,
'thumbnail': 'https://d31b9s05ygj54s.cloudfront.net/prima-plus/image/video-f66dd7fb-c1a0-47d1-b3bc-7db328d566c5-16x9-1711636518.jpg/t_16x9_medium_1366_768',
'title': 'Tenerife: V říši ohně',
'timestamp': 1711825800,
'upload_date': '20240330',
},
'params': {
'skip_download': True, # m3u8 download
},
}, {
'url': 'http://play.iprima.cz/particka/particka-92',
'only_matching': True,
}, {
# geo restricted
'url': 'http://play.iprima.cz/closer-nove-pripady/closer-nove-pripady-iv-1',
'only_matching': True,
}, {
'url': 'https://prima.iprima.cz/my-little-pony/mapa-znameni-2-2',
'only_matching': True,
}, {
'url': 'https://prima.iprima.cz/porady/jak-se-stavi-sen/rodina-rathousova-praha',
'only_matching': True,
}, {
'url': 'http://www.iprima.cz/filmy/desne-rande',
'only_matching': True,
}, {
'url': 'https://zoom.iprima.cz/10-nejvetsich-tajemstvi-zahad/posvatna-mista-a-stavby',
'only_matching': True,
}, {
'url': 'https://krimi.iprima.cz/mraz-0/sebevrazdy',
'only_matching': True,
}, {
'url': 'https://cool.iprima.cz/derava-silnice-nevadi',
'only_matching': True,
}, {
'url': 'https://love.iprima.cz/laska-az-za-hrob/slib-dany-bratrovi',
'only_matching': True,
}]
def _perform_login(self, username, password):
if self.access_token:
return
token_data = self._download_json(
'https://ucet.iprima.cz/api/session/create', None,
note='Logging in', errnote='Failed to log in',
data=json.dumps({
'email': username,
'password': password,
'deviceName': 'Windows Chrome',
}).encode(), headers={'content-type': 'application/json'})
self.access_token = token_data['accessToken']['value']
if not self.access_token:
raise ExtractorError('Failed to fetch access token')
def _real_initialize(self):
if not self.access_token:
self.raise_login_required('Login is required to access any iPrima content', method='password')
def _raise_access_error(self, error_code):
if error_code == 'PLAY_GEOIP_DENIED':
self.raise_geo_restricted(countries=['CZ'], metadata_available=True)
elif error_code is not None:
self.raise_no_formats('Access to stream infos forbidden', expected=True)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_extract_title(webpage) or self._html_search_meta(
['og:title', 'twitter:title'],
webpage, 'title', default=None)
video_id = self._search_regex((
r'productId\s*=\s*([\'"])(?P<id>p\d+)\1',
r'pproduct_id\s*=\s*([\'"])(?P<id>p\d+)\1',
r'let\s+videos\s*=\s*([\'"])(?P<id>p\d+)\1',
), webpage, 'real id', group='id', default=None)
if not video_id:
nuxt_data = self._search_nuxt_data(webpage, video_id, traverse='data', fatal=False)
video_id = traverse_obj(
nuxt_data, (..., 'content', 'additionals', 'videoPlayId', {str}), get_all=False)
if not video_id:
nuxt_data = self._search_json(
r'<script[^>]+\bid=["\']__NUXT_DATA__["\'][^>]*>',
webpage, 'nuxt data', None, end_pattern=r'</script>', contains_pattern=r'\[(?s:.+)\]')
video_id = traverse_obj(nuxt_data, lambda _, v: re.fullmatch(r'p\d+', v), get_all=False)
if not video_id:
self.raise_no_formats('Unable to extract video ID from webpage')
metadata = self._download_json(
f'https://api.play-backend.iprima.cz/api/v1//products/id-{video_id}/play',
video_id, note='Getting manifest URLs', errnote='Failed to get manifest URLs',
headers={'X-OTT-Access-Token': self.access_token},
expected_status=403)
self._raise_access_error(metadata.get('errorCode'))
stream_infos = metadata.get('streamInfos')
formats = []
if stream_infos is None:
self.raise_no_formats('Reading stream infos failed', expected=True)
else:
for manifest in stream_infos:
manifest_type = manifest.get('type')
manifest_url = manifest.get('url')
ext = determine_ext(manifest_url)
if manifest_type == 'HLS' or ext == 'm3u8':
formats += self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
elif manifest_type == 'DASH' or ext == 'mpd':
formats += self._extract_mpd_formats(
manifest_url, video_id, mpd_id='dash', fatal=False)
final_result = self._search_json_ld(webpage, video_id, default={})
final_result.update({
'id': video_id,
'title': final_result.get('title') or title,
'thumbnail': self._html_search_meta(
['thumbnail', 'og:image', 'twitter:image'],
webpage, 'thumbnail', default=None),
'formats': formats,
'description': self._html_search_meta(
['description', 'og:description', 'twitter:description'],
webpage, 'description', default=None)})
return final_result
class IPrimaCNNIE(InfoExtractor):
_VALID_URL = r'https?://cnn\.iprima\.cz/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_GEO_BYPASS = False
_TESTS = [{
'url': 'https://cnn.iprima.cz/porady/strunc/24072020-koronaviru-mam-plne-zuby-strasit-druhou-vlnou-je-absurdni-rika-senatorka-dernerova',
'info_dict': {
'id': 'p716177',
'ext': 'mp4',
'title': 'md5:277c6b1ed0577e51b40ddd35602ff43e',
},
'params': {
'skip_download': 'm3u8',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
self._set_cookie('play.iprima.cz', 'ott_adult_confirmed', '1')
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(
webpage, default=None) or self._search_regex(
r'<h1>([^<]+)', webpage, 'title')
video_id = self._search_regex(
(r'<iframe[^>]+\bsrc=["\'](?:https?:)?//(?:api\.play-backend\.iprima\.cz/prehravac/embedded|prima\.iprima\.cz/[^/]+/[^/]+)\?.*?\bid=(p\d+)',
r'data-product="([^"]+)">',
r'id=["\']player-(p\d+)"',
r'playerId\s*:\s*["\']player-(p\d+)',
r'\bvideos\s*=\s*["\'](p\d+)'),
webpage, 'real id')
playerpage = self._download_webpage(
'http://play.iprima.cz/prehravac/init',
video_id, note='Downloading player', query={
'_infuse': 1,
'_ts': round(time.time()),
'productId': video_id,
}, headers={'Referer': url})
formats = []
def extract_formats(format_url, format_key=None, lang=None):
ext = determine_ext(format_url)
new_formats = []
if format_key == 'hls' or ext == 'm3u8':
new_formats = self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
elif format_key == 'dash' or ext == 'mpd':
return
new_formats = self._extract_mpd_formats(
format_url, video_id, mpd_id='dash', fatal=False)
if lang:
for f in new_formats:
if not f.get('language'):
f['language'] = lang
formats.extend(new_formats)
options = self._parse_json(
self._search_regex(
r'(?s)(?:TDIPlayerOptions|playerOptions)\s*=\s*({.+?});\s*\]\]',
playerpage, 'player options', default='{}'),
video_id, transform_source=js_to_json, fatal=False)
if options:
for key, tracks in options.get('tracks', {}).items():
if not isinstance(tracks, list):
continue
for track in tracks:
src = track.get('src')
if src:
extract_formats(src, key.lower(), track.get('lang'))
if not formats:
for _, src in re.findall(r'src["\']\s*:\s*(["\'])(.+?)\1', playerpage):
extract_formats(src)
if not formats and '>GEO_IP_NOT_ALLOWED<' in playerpage:
self.raise_geo_restricted(countries=['CZ'], metadata_available=True)
return {
'id': video_id,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'formats': formats,
'description': self._og_search_description(webpage, default=None),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/srmediathek.py | yt_dlp/extractor/srmediathek.py | from .ard import ARDMediathekBaseIE
from ..utils import (
ExtractorError,
clean_html,
extract_attributes,
parse_duration,
parse_qs,
unified_strdate,
)
from ..utils.traversal import (
find_element,
require,
traverse_obj,
)
class SRMediathekIE(ARDMediathekBaseIE):
IE_NAME = 'sr:mediathek'
IE_DESC = 'Saarländischer Rundfunk'
_CLS_COMMON = 'teaser__image__caption__text teaser__image__caption__text--'
_VALID_URL = r'https?://(?:www\.)?sr-mediathek\.de/index\.php\?.*?&id=(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.sr-mediathek.de/index.php?seite=7&id=141317',
'info_dict': {
'id': '141317',
'ext': 'mp4',
'title': 'Kärnten, da will ich hin!',
'channel': 'SR Fernsehen',
'description': 'md5:7732e71e803379a499732864a572a456',
'duration': 1788.0,
'release_date': '20250525',
'series': 'da will ich hin!',
'series_id': 'DWIH',
'thumbnail': r're:https?://.+\.jpg',
},
}, {
'url': 'https://www.sr-mediathek.de/index.php?seite=7&id=153853',
'info_dict': {
'id': '153853',
'ext': 'mp3',
'title': 'Kappes, Klöße, Kokosmilch: Bruschetta mit Nduja',
'channel': 'SR 3',
'description': 'md5:3935798de3562b10c4070b408a15e225',
'duration': 139.0,
'release_date': '20250523',
'series': 'Kappes, Klöße, Kokosmilch',
'series_id': 'SR3_KKK_A',
'thumbnail': r're:https?://.+\.jpg',
},
}, {
'url': 'https://www.sr-mediathek.de/index.php?seite=7&id=31406&pnr=&tbl=pf',
'info_dict': {
'id': '31406',
'ext': 'mp3',
'title': 'Das Leben schwer nehmen, ist einfach zu anstrengend',
'channel': 'SR 1',
'description': 'md5:3e03fd556af831ad984d0add7175fb0c',
'duration': 1769.0,
'release_date': '20230717',
'series': 'Abendrot',
'series_id': 'SR1_AB_P',
'thumbnail': r're:https?://.+\.jpg',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
description = self._og_search_description(webpage)
if description == 'Der gewünschte Beitrag ist leider nicht mehr vorhanden.':
raise ExtractorError(f'Video {video_id} is no longer available', expected=True)
player_url = traverse_obj(webpage, (
{find_element(tag='div', id=f'player{video_id}', html=True)},
{extract_attributes}, 'data-mediacollection-ardplayer',
{self._proto_relative_url}, {require('player URL')}))
article = traverse_obj(webpage, (
{find_element(cls='article__content')},
{find_element(tag='p')}, {clean_html}))
return {
**self._extract_media_info(player_url, webpage, video_id),
'id': video_id,
'title': traverse_obj(webpage, (
{find_element(cls='ardplayer-title')}, {clean_html})),
'channel': traverse_obj(webpage, (
{find_element(cls=f'{self._CLS_COMMON}subheadline')},
{lambda x: x.split('|')[0]}, {clean_html})),
'description': description,
'duration': parse_duration(self._search_regex(
r'(\d{2}:\d{2}:\d{2})', article, 'duration')),
'release_date': unified_strdate(self._search_regex(
r'(\d{2}\.\d{2}\.\d{4})', article, 'release_date')),
'series': traverse_obj(webpage, (
{find_element(cls=f'{self._CLS_COMMON}headline')}, {clean_html})),
'series_id': traverse_obj(webpage, (
{find_element(cls='teaser__link', html=True)},
{extract_attributes}, 'href', {parse_qs}, 'sen', ..., {str}, any)),
'thumbnail': self._og_search_thumbnail(webpage),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gputechconf.py | yt_dlp/extractor/gputechconf.py | from .common import InfoExtractor
class GPUTechConfIE(InfoExtractor):
_VALID_URL = r'https?://on-demand\.gputechconf\.com/gtc/2015/video/S(?P<id>\d+)\.html'
_TEST = {
'url': 'http://on-demand.gputechconf.com/gtc/2015/video/S5156.html',
'md5': 'a8862a00a0fd65b8b43acc5b8e33f798',
'info_dict': {
'id': '5156',
'ext': 'mp4',
'title': 'Coordinating More Than 3 Million CUDA Threads for Social Network Analysis',
'duration': 1219,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
root_path = self._search_regex(
r'var\s+rootPath\s*=\s*"([^"]+)', webpage, 'root path',
default='http://evt.dispeak.com/nvidia/events/gtc15/')
xml_file_id = self._search_regex(
r'var\s+xmlFileId\s*=\s*"([^"]+)', webpage, 'xml file id')
return {
'_type': 'url_transparent',
'id': video_id,
'url': f'{root_path}xml/{xml_file_id}.xml',
'ie_key': 'DigitallySpeaking',
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lecture2go.py | yt_dlp/extractor/lecture2go.py | import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
determine_protocol,
int_or_none,
parse_duration,
)
class Lecture2GoIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://lecture2go\.uni-hamburg\.de/veranstaltungen/-/v/(?P<id>\d+)'
_TEST = {
'url': 'https://lecture2go.uni-hamburg.de/veranstaltungen/-/v/17473',
'md5': 'ac02b570883020d208d405d5a3fd2f7f',
'info_dict': {
'id': '17473',
'ext': 'mp4',
'title': '2 - Endliche Automaten und reguläre Sprachen',
'creator': 'Frank Heitmann',
'duration': 5220,
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<em[^>]+class="title">(.+)</em>', webpage, 'title')
formats = []
for url in set(re.findall(r'var\s+playerUri\d+\s*=\s*"([^"]+)"', webpage)):
ext = determine_ext(url)
protocol = determine_protocol({'url': url})
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(url, video_id, f4m_id='hds'))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(url, video_id, ext='mp4', m3u8_id='hls'))
else:
if protocol == 'rtmp':
continue # XXX: currently broken
formats.append({
'format_id': protocol,
'url': url,
})
creator = self._html_search_regex(
r'<div[^>]+id="description">([^<]+)</div>', webpage, 'creator', fatal=False)
duration = parse_duration(self._html_search_regex(
r'Duration:\s*</em>\s*<em[^>]*>([^<]+)</em>', webpage, 'duration', fatal=False))
view_count = int_or_none(self._html_search_regex(
r'Views:\s*</em>\s*<em[^>]+>(\d+)</em>', webpage, 'view count', fatal=False))
return {
'id': video_id,
'title': title,
'formats': formats,
'creator': creator,
'duration': duration,
'view_count': view_count,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nexx.py | yt_dlp/extractor/nexx.py | import hashlib
import random
import re
import time
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
parse_duration,
srt_subtitles_timecode,
traverse_obj,
try_get,
urlencode_postdata,
)
class NexxIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://api\.nexx(?:\.cloud|cdn\.com)/v3(?:\.\d)?/(?P<domain_id>\d+)/videos/byid/|
nexx:(?:(?P<domain_id_s>\d+):)?|
https?://arc\.nexx\.cloud/api/video/
)
(?P<id>\d+)
'''
_TESTS = [{
# movie
'url': 'https://api.nexx.cloud/v3/748/videos/byid/128907',
'md5': '31899fd683de49ad46f4ee67e53e83fe',
'info_dict': {
'id': '128907',
'ext': 'mp4',
'title': 'Stiftung Warentest',
'alt_title': 'Wie ein Test abläuft',
'description': 'md5:d1ddb1ef63de721132abd38639cc2fd2',
'creator': 'SPIEGEL TV',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2509,
'timestamp': 1384264416,
'upload_date': '20131112',
},
'skip': 'Spiegel nexx CDNs are now disabled',
}, {
# episode with captions
'url': 'https://api.nexx.cloud/v3.1/741/videos/byid/1701834',
'info_dict': {
'id': '1701834',
'ext': 'mp4',
'title': 'Mein Leben mit \'nem TikTok E-Boy 😤',
'alt_title': 'Mein Leben mit \'nem TikTok E-Boy 😤',
'description': 'md5:f84f395a881fd143f952c892deab528d',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 770,
'timestamp': 1595600027,
'upload_date': '20200724',
'episode_number': 2,
'season_number': 2,
'episode': 'Episode 2',
'season': 'Season 2',
},
'params': {
'skip_download': True,
},
}, {
'url': 'nexx:741:1269984',
'md5': 'd5f14e14b592501e51addd5abef95a7f',
'info_dict': {
'id': '1269984',
'ext': 'mp4',
'title': '1 TAG ohne KLO... wortwörtlich! ?',
'alt_title': '1 TAG ohne KLO... wortwörtlich! ?',
'description': 'md5:2016393a31991a900946432ccdd09a6f',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 607,
'timestamp': 1518614955,
'upload_date': '20180214',
},
}, {
# free cdn from http://www.spiegel.de/video/eifel-zoo-aufregung-um-ausgebrochene-raubtiere-video-99018031.html
'url': 'nexx:747:1533779',
'md5': '6bf6883912b82b7069fb86c2297e9893',
'info_dict': {
'id': '1533779',
'ext': 'mp4',
'title': 'Aufregung um ausgebrochene Raubtiere',
'alt_title': 'Eifel-Zoo',
'description': 'md5:f21375c91c74ad741dcb164c427999d2',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 111,
'timestamp': 1527874460,
'upload_date': '20180601',
},
'skip': 'Spiegel nexx CDNs are now disabled',
}, {
'url': 'https://api.nexxcdn.com/v3/748/videos/byid/128907',
'only_matching': True,
}, {
'url': 'nexx:748:128907',
'only_matching': True,
}, {
'url': 'nexx:128907',
'only_matching': True,
}, {
'url': 'https://arc.nexx.cloud/api/video/128907.json',
'only_matching': True,
}]
@staticmethod
def _extract_domain_id(webpage):
mobj = re.search(
r'<script\b[^>]+\bsrc=["\'](?:https?:)?//(?:require|arc)\.nexx(?:\.cloud|cdn\.com)/(?:sdk/)?(?P<id>\d+)',
webpage)
return mobj.group('id') if mobj else None
@classmethod
def _extract_embed_urls(cls, url, webpage):
# Reference:
# 1. https://nx-s.akamaized.net/files/201510/44.pdf
entries = []
# JavaScript Integration
domain_id = NexxIE._extract_domain_id(webpage)
if domain_id:
for video_id in re.findall(
r'(?is)onPLAYReady.+?_play\.(?:init|(?:control\.)?addPlayer)\s*\(.+?\s*,\s*["\']?(\d+)',
webpage):
entries.append(
f'https://api.nexx.cloud/v3/{domain_id}/videos/byid/{video_id}')
# TODO: support more embed formats
return entries
def _handle_error(self, response):
if traverse_obj(response, ('metadata', 'notice'), expected_type=str):
self.report_warning('{} said: {}'.format(self.IE_NAME, response['metadata']['notice']))
status = int_or_none(try_get(
response, lambda x: x['metadata']['status']) or 200)
if 200 <= status < 300:
return
raise ExtractorError(
'{} said: {}'.format(self.IE_NAME, response['metadata']['errorhint']),
expected=True)
def _call_api(self, domain_id, path, video_id, data=None, headers={}):
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
result = self._download_json(
f'https://api.nexx.cloud/v3/{domain_id}/{path}', video_id,
f'Downloading {path} JSON', data=urlencode_postdata(data),
headers=headers)
self._handle_error(result)
return result['result']
def _extract_free_formats(self, video, video_id):
stream_data = video['streamdata']
cdn = stream_data['cdnType']
assert cdn == 'free'
video_hash = video['general']['hash']
ps = str(stream_data['originalDomain'])
if stream_data['applyFolderHierarchy'] == 1:
s = ('%04d' % int(video_id))[::-1]
ps += f'/{s[0:2]}/{s[2:4]}'
ps += f'/{video_id}/{video_hash}_'
t = 'http://%s' + ps
fd = stream_data['azureFileDistribution'].split(',')
cdn_provider = stream_data['cdnProvider']
def p0(p):
return f'_{p}' if stream_data['applyAzureStructure'] == 1 else ''
formats = []
if cdn_provider == 'ak':
t += ','
for i in fd:
p = i.split(':')
t += p[1] + p0(int(p[0])) + ','
t += '.mp4.csmil/master.%s'
elif cdn_provider == 'ce':
k = t.split('/')
h = k.pop()
http_base = t = '/'.join(k)
http_base = http_base % stream_data['cdnPathHTTP']
t += '/asset.ism/manifest.%s?dcp_ver=aos4&videostream='
for i in fd:
p = i.split(':')
tbr = int(p[0])
filename = f'{h}{p[1]}{p0(tbr)}.mp4'
f = {
'url': http_base + '/' + filename,
'format_id': f'{cdn}-http-{tbr}',
'tbr': tbr,
}
width_height = p[1].split('x')
if len(width_height) == 2:
f.update({
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
})
formats.append(f)
a = filename + f':{tbr * 1000}'
t += a + ','
t = t[:-1] + '&audiostream=' + a.split(':')[0]
else:
assert False
if cdn_provider == 'ce':
formats.extend(self._extract_mpd_formats(
t % (stream_data['cdnPathDASH'], 'mpd'), video_id,
mpd_id=f'{cdn}-dash', fatal=False))
formats.extend(self._extract_m3u8_formats(
t % (stream_data['cdnPathHLS'], 'm3u8'), video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id=f'{cdn}-hls', fatal=False))
return formats
def _extract_3q_formats(self, video, video_id):
stream_data = video['streamdata']
cdn = stream_data['cdnType']
assert cdn == '3q'
q_acc, q_prefix, q_locator, q_hash = stream_data['qAccount'], stream_data['qPrefix'], stream_data['qLocator'], stream_data['qHash']
protection_key = traverse_obj(
video, ('protectiondata', 'key'), expected_type=str)
def get_cdn_shield_base(shield_type=''):
for secure in ('', 's'):
cdn_shield = stream_data.get(f'cdnShield{shield_type}HTTP{secure.upper()}')
if cdn_shield:
return f'http{secure}://{cdn_shield}'
return f'http://sdn-global-{"prog" if shield_type.lower() == "prog" else "streaming"}-cache.3qsdn.com/' + (f's/{protection_key}/' if protection_key else '')
stream_base = get_cdn_shield_base()
formats = []
formats.extend(self._extract_m3u8_formats(
f'{stream_base}{q_acc}/files/{q_prefix}/{q_locator}/{q_acc}-{stream_data.get("qHEVCHash") or q_hash}.ism/manifest.m3u8',
video_id, 'mp4', m3u8_id=f'{cdn}-hls', fatal=False))
formats.extend(self._extract_mpd_formats(
f'{stream_base}{q_acc}/files/{q_prefix}/{q_locator}/{q_acc}-{q_hash}.ism/manifest.mpd',
video_id, mpd_id=f'{cdn}-dash', fatal=False))
progressive_base = get_cdn_shield_base('Prog')
q_references = stream_data.get('qReferences') or ''
fds = q_references.split(',')
for fd in fds:
ss = fd.split(':')
if len(ss) != 3:
continue
tbr = int_or_none(ss[1], scale=1000)
formats.append({
'url': f'{progressive_base}{q_acc}/uploads/{q_acc}-{ss[2]}.webm',
'format_id': f'{cdn}-{ss[0]}{f"-{tbr}" if tbr else ""}',
'tbr': tbr,
})
azure_file_distribution = stream_data.get('azureFileDistribution') or ''
fds = azure_file_distribution.split(',')
for fd in fds:
ss = fd.split(':')
if len(ss) != 3:
continue
tbr = int_or_none(ss[0])
width, height = ss[1].split('x') if len(ss[1].split('x')) == 2 else (None, None)
f = {
'url': f'{progressive_base}{q_acc}/files/{q_prefix}/{q_locator}/{ss[2]}.mp4',
'format_id': f'{cdn}-http-{f"-{tbr}" if tbr else ""}',
'tbr': tbr,
'width': int_or_none(width),
'height': int_or_none(height),
}
formats.append(f)
return formats
def _extract_azure_formats(self, video, video_id):
stream_data = video['streamdata']
cdn = stream_data['cdnType']
assert cdn == 'azure'
azure_locator = stream_data['azureLocator']
def get_cdn_shield_base(shield_type='', static=False):
for secure in ('', 's'):
cdn_shield = stream_data.get(f'cdnShield{shield_type}HTTP{secure.upper()}')
if cdn_shield:
return f'http{secure}://{cdn_shield}'
if 'fb' in stream_data['azureAccount']:
prefix = 'df' if static else 'f'
else:
prefix = 'd' if static else 'p'
account = int(stream_data['azureAccount'].replace('nexxplayplus', '').replace('nexxplayfb', ''))
return 'http://nx-%s%02d.akamaized.net/' % (prefix, account)
language = video['general'].get('language_raw') or ''
azure_stream_base = get_cdn_shield_base()
is_ml = ',' in language
azure_manifest_url = '{}{}/{}_src{}.ism/Manifest'.format(
azure_stream_base, azure_locator, video_id, ('_manifest' if is_ml else '')) + '%s'
protection_token = try_get(
video, lambda x: x['protectiondata']['token'], str)
if protection_token:
azure_manifest_url += f'?hdnts={protection_token}'
formats = self._extract_m3u8_formats(
azure_manifest_url % '(format=m3u8-aapl)',
video_id, 'mp4', 'm3u8_native',
m3u8_id=f'{cdn}-hls', fatal=False)
formats.extend(self._extract_mpd_formats(
azure_manifest_url % '(format=mpd-time-csf)',
video_id, mpd_id=f'{cdn}-dash', fatal=False))
formats.extend(self._extract_ism_formats(
azure_manifest_url % '', video_id, ism_id=f'{cdn}-mss', fatal=False))
azure_progressive_base = get_cdn_shield_base('Prog', True)
azure_file_distribution = stream_data.get('azureFileDistribution')
if azure_file_distribution:
fds = azure_file_distribution.split(',')
if fds:
for fd in fds:
ss = fd.split(':')
if len(ss) == 2:
tbr = int_or_none(ss[0])
if tbr:
f = {
'url': f'{azure_progressive_base}{azure_locator}/{video_id}_src_{ss[1]}_{tbr}.mp4',
'format_id': f'{cdn}-http-{tbr}',
'tbr': tbr,
}
width_height = ss[1].split('x')
if len(width_height) == 2:
f.update({
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
})
formats.append(f)
return formats
def _real_extract(self, url):
mobj = self._match_valid_url(url)
domain_id = mobj.group('domain_id') or mobj.group('domain_id_s')
video_id = mobj.group('id')
video = None
def find_video(result):
if isinstance(result, dict):
return result
elif isinstance(result, list):
vid = int(video_id)
for v in result:
if try_get(v, lambda x: x['general']['ID'], int) == vid:
return v
return None
response = self._download_json(
f'https://arc.nexx.cloud/api/video/{video_id}.json',
video_id, fatal=False)
if response and isinstance(response, dict):
result = response.get('result')
if result:
video = find_video(result)
# not all videos work via arc, e.g. nexx:741:1269984
if not video:
# Reverse engineered from JS code (see getDeviceID function)
device_id = f'{random.randint(1, 4)}:{int(time.time())}:{random.randint(10000, 99999)}{random.randint(1, 9)}'
result = self._call_api(domain_id, 'session/init', video_id, data={
'nxp_devh': device_id,
'nxp_userh': '',
'precid': '0',
'playlicense': '0',
'screenx': '1920',
'screeny': '1080',
'playerversion': '6.0.00',
'gateway': 'html5',
'adGateway': '',
'explicitlanguage': 'en-US',
'addTextTemplates': '1',
'addDomainData': '1',
'addAdModel': '1',
}, headers={
'X-Request-Enable-Auth-Fallback': '1',
})
cid = result['general']['cid']
# As described in [1] X-Request-Token generation algorithm is
# as follows:
# md5( operation + domain_id + domain_secret )
# where domain_secret is a static value that will be given by nexx.tv
# as per [1]. Here is how this "secret" is generated (reversed
# from _play._factory.data.getDomainData function, search for
# domaintoken or enableAPIAccess). So it's actually not static
# and not that much of a secret.
# 1. https://nexxtvstorage.blob.core.windows.net/files/201610/27.pdf
secret = result['device']['domaintoken'][int(device_id[0]):]
secret = secret[0:len(secret) - int(device_id[-1])]
op = 'byid'
# Reversed from JS code for _play.api.call function (search for
# X-Request-Token)
request_token = hashlib.md5(
''.join((op, domain_id, secret)).encode()).hexdigest()
result = self._call_api(
domain_id, f'videos/{op}/{video_id}', video_id, data={
'additionalfields': 'language,channel,format,licenseby,slug,fileversion,episode,season',
'addInteractionOptions': '1',
'addStatusDetails': '1',
'addStreamDetails': '1',
'addFeatures': '1',
# Caption format selection doesn't seem to be enforced?
'addCaptions': 'vtt',
'addScenes': '1',
'addChapters': '1',
'addHotSpots': '1',
'addConnectedMedia': 'persons',
'addBumpers': '1',
}, headers={
'X-Request-CID': cid,
'X-Request-Token': request_token,
})
video = find_video(result)
general = video['general']
title = general['title']
cdn = video['streamdata']['cdnType']
if cdn == 'azure':
formats = self._extract_azure_formats(video, video_id)
elif cdn == 'free':
formats = self._extract_free_formats(video, video_id)
elif cdn == '3q':
formats = self._extract_3q_formats(video, video_id)
else:
self.raise_no_formats(f'{cdn} formats are currently not supported', video_id)
subtitles = {}
for sub in video.get('captiondata') or []:
if sub.get('data'):
subtitles.setdefault(sub.get('language', 'en'), []).append({
'ext': 'srt',
'data': '\n\n'.join(
f'{i + 1}\n{srt_subtitles_timecode(line["fromms"] / 1000)} --> {srt_subtitles_timecode(line["toms"] / 1000)}\n{line["caption"]}'
for i, line in enumerate(sub['data'])),
'name': sub.get('language_long') or sub.get('title'),
})
elif sub.get('url'):
subtitles.setdefault(sub.get('language', 'en'), []).append({
'url': sub['url'],
'ext': sub.get('format'),
'name': sub.get('language_long') or sub.get('title'),
})
return {
'id': video_id,
'title': title,
'alt_title': general.get('subtitle'),
'description': general.get('description'),
'release_year': int_or_none(general.get('year')),
'creator': general.get('studio') or general.get('studio_adref') or None,
'thumbnail': try_get(
video, lambda x: x['imagedata']['thumb'], str),
'duration': parse_duration(general.get('runtime')),
'timestamp': int_or_none(general.get('uploaded')),
'episode_number': traverse_obj(
video, (('episodedata', 'general'), 'episode'), expected_type=int, get_all=False),
'season_number': traverse_obj(
video, (('episodedata', 'general'), 'season'), expected_type=int, get_all=False),
'cast': traverse_obj(video, ('connectedmedia', ..., 'title'), expected_type=str),
'formats': formats,
'subtitles': subtitles,
}
class NexxEmbedIE(InfoExtractor):
_VALID_URL = r'https?://embed\.nexx(?:\.cloud|cdn\.com)/\d+/(?:video/)?(?P<id>[^/?#&]+)'
# Reference. https://nx-s.akamaized.net/files/201510/44.pdf
_EMBED_REGEX = [r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//embed\.nexx(?:\.cloud|cdn\.com)/\d+/(?:(?!\1).)+)\1']
_TESTS = [{
'url': 'http://embed.nexx.cloud/748/KC1614647Z27Y7T?autoplay=1',
'md5': '16746bfc28c42049492385c989b26c4a',
'info_dict': {
'id': '161464',
'ext': 'mp4',
'title': 'Nervenkitzel Achterbahn',
'alt_title': 'Karussellbauer in Deutschland',
'description': 'md5:ffe7b1cc59a01f585e0569949aef73cc',
'creator': 'SPIEGEL TV',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2761,
'timestamp': 1394021479,
'upload_date': '20140305',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://embed.nexx.cloud/11888/video/DSRTO7UVOX06S7',
'only_matching': True,
}]
def _real_extract(self, url):
embed_id = self._match_id(url)
webpage = self._download_webpage(url, embed_id)
return self.url_result(NexxIE._extract_url(webpage), ie=NexxIE.ie_key())
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/xxxymovies.py | yt_dlp/extractor/xxxymovies.py | from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
)
class XXXYMoviesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?xxxymovies\.com/videos/(?P<id>\d+)/(?P<display_id>[^/]+)'
_TEST = {
'url': 'http://xxxymovies.com/videos/138669/ecstatic-orgasm-sofcore/',
'md5': '810b1bdbbffff89dd13bdb369fe7be4b',
'info_dict': {
'id': '138669',
'display_id': 'ecstatic-orgasm-sofcore',
'ext': 'mp4',
'title': 'Ecstatic Orgasm Sofcore',
'duration': 931,
'categories': list,
'view_count': int,
'like_count': int,
'dislike_count': int,
'age_limit': 18,
},
}
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
video_url = self._search_regex(
r"video_url\s*:\s*'([^']+)'", webpage, 'video URL')
title = self._html_search_regex(
[r'<div[^>]+\bclass="block_header"[^>]*>\s*<h1>([^<]+)<',
r'<title>(.*?)\s*-\s*(?:XXXYMovies\.com|XXX\s+Movies)</title>'],
webpage, 'title')
thumbnail = self._search_regex(
r"preview_url\s*:\s*'([^']+)'",
webpage, 'thumbnail', fatal=False)
categories = self._html_search_meta(
'keywords', webpage, 'categories', default='').split(',')
duration = parse_duration(self._search_regex(
r'<span>Duration:</span>\s*(\d+:\d+)',
webpage, 'duration', fatal=False))
view_count = int_or_none(self._html_search_regex(
r'<div class="video_views">\s*(\d+)',
webpage, 'view count', fatal=False))
like_count = int_or_none(self._search_regex(
r'>\s*Likes? <b>\((\d+)\)',
webpage, 'like count', fatal=False))
dislike_count = int_or_none(self._search_regex(
r'>\s*Dislike <b>\((\d+)\)</b>',
webpage, 'dislike count', fatal=False))
age_limit = self._rta_search(webpage)
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'categories': categories,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'age_limit': age_limit,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/livestream.py | yt_dlp/extractor/livestream.py | import itertools
import re
import urllib.parse
from .common import InfoExtractor
from ..utils import (
determine_ext,
find_xpath_attr,
float_or_none,
int_or_none,
orderedSet,
parse_iso8601,
traverse_obj,
update_url_query,
xpath_attr,
xpath_text,
xpath_with_ns,
)
class LivestreamIE(InfoExtractor):
IE_NAME = 'livestream'
_VALID_URL = r'''(?x)
https?://(?:new\.)?livestream\.com/
(?:accounts/(?P<account_id>\d+)|(?P<account_name>[^/]+))
(?:/events/(?P<event_id>\d+)|/(?P<event_name>[^/]+))?
(?:/videos/(?P<id>\d+))?
'''
_EMBED_REGEX = [r'<iframe[^>]+src="(?P<url>https?://(?:new\.)?livestream\.com/[^"]+/player[^"]+)"']
_TESTS = [{
'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
'md5': '7876c5f5dc3e711b6b73acce4aac1527',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': 'Live from Webster Hall NYC',
'timestamp': 1350008072,
'upload_date': '20121012',
'duration': 5968.0,
'like_count': int,
'view_count': int,
'comment_count': int,
'thumbnail': r're:^http://.*\.jpg$',
},
}, {
'url': 'https://livestream.com/coheedandcambria/websterhall',
'info_dict': {
'id': '1585861',
'title': 'Live From Webster Hall',
},
'playlist_mincount': 1,
}, {
'url': 'https://livestream.com/dayananda/events/7954027',
'info_dict': {
'title': 'Live from Mevo',
'id': '7954027',
},
'playlist_mincount': 4,
}, {
'url': 'https://livestream.com/accounts/82',
'info_dict': {
'id': '253978',
'view_count': int,
'title': 'trsr',
'comment_count': int,
'like_count': int,
'upload_date': '20120306',
'timestamp': 1331042383,
'thumbnail': 'http://img.new.livestream.com/videos/0000000000000372/cacbeed6-fb68-4b5e-ad9c-e148124e68a9_640x427.jpg',
'duration': 15.332,
'ext': 'mp4',
},
}, {
'url': 'https://new.livestream.com/accounts/362/events/3557232/videos/67864563/player?autoPlay=false&height=360&mute=false&width=640',
'only_matching': True,
}, {
'url': 'http://livestream.com/bsww/concacafbeachsoccercampeonato2015',
'only_matching': True,
}]
_API_URL_TEMPLATE = 'http://livestream.com/api/accounts/%s/events/%s'
def _parse_smil_formats_and_subtitles(
self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base_ele = find_xpath_attr(
smil, self._xpath_ns('.//meta', namespace), 'name', 'httpBase')
base = base_ele.get('content') if base_ele is not None else 'http://livestreamvod-f.akamaihd.net/'
formats = []
video_nodes = smil.findall(self._xpath_ns('.//video', namespace))
for vn in video_nodes:
tbr = int_or_none(vn.attrib.get('system-bitrate'), 1000)
furl = (
update_url_query(urllib.parse.urljoin(base, vn.attrib['src']), {
'v': '3.0.3',
'fp': 'WIN% 14,0,0,145',
}))
if 'clipBegin' in vn.attrib:
furl += '&ssek=' + vn.attrib['clipBegin']
formats.append({
'url': furl,
'format_id': 'smil_%d' % tbr,
'ext': 'flv',
'tbr': tbr,
'preference': -1000, # Strictly inferior than all other formats?
})
return formats, {}
def _extract_video_info(self, video_data):
video_id = str(video_data['id'])
FORMAT_KEYS = (
('sd', 'progressive_url'),
('hd', 'progressive_url_hd'),
)
formats = []
for format_id, key in FORMAT_KEYS:
video_url = video_data.get(key)
if video_url:
ext = determine_ext(video_url)
if ext == 'm3u8':
continue
bitrate = int_or_none(self._search_regex(
rf'(\d+)\.{ext}', video_url, 'bitrate', default=None))
formats.append({
'url': video_url,
'format_id': format_id,
'tbr': bitrate,
'ext': ext,
})
smil_url = video_data.get('smil_url')
if smil_url:
formats.extend(self._extract_smil_formats(smil_url, video_id, fatal=False))
m3u8_url = video_data.get('m3u8_url')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
f4m_url = video_data.get('f4m_url')
if f4m_url:
formats.extend(self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False))
comments = [{
'author_id': comment.get('author_id'),
'author': comment.get('author', {}).get('full_name'),
'id': comment.get('id'),
'text': comment['text'],
'timestamp': parse_iso8601(comment.get('created_at')),
} for comment in video_data.get('comments', {}).get('data', [])]
return {
'id': video_id,
'formats': formats,
'title': video_data['caption'],
'description': video_data.get('description'),
'thumbnail': video_data.get('thumbnail_url'),
'duration': float_or_none(video_data.get('duration'), 1000),
'timestamp': parse_iso8601(video_data.get('publish_at')),
'like_count': video_data.get('likes', {}).get('total'),
'comment_count': video_data.get('comments', {}).get('total'),
'view_count': video_data.get('views'),
'comments': comments,
}
def _extract_stream_info(self, stream_info):
broadcast_id = str(stream_info['broadcast_id'])
is_live = stream_info.get('is_live')
formats = []
smil_url = stream_info.get('play_url')
if smil_url:
formats.extend(self._extract_smil_formats(smil_url, broadcast_id))
m3u8_url = stream_info.get('m3u8_url')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, broadcast_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
rtsp_url = stream_info.get('rtsp_url')
if rtsp_url:
formats.append({
'url': rtsp_url,
'format_id': 'rtsp',
})
return {
'id': broadcast_id,
'formats': formats,
'title': stream_info['stream_title'],
'thumbnail': stream_info.get('thumbnail_url'),
'is_live': is_live,
}
def _generate_event_playlist(self, event_data):
event_id = str(event_data['id'])
account_id = str(event_data['owner_account_id'])
feed_root_url = self._API_URL_TEMPLATE % (account_id, event_id) + '/feed.json'
stream_info = event_data.get('stream_info')
if stream_info:
return self._extract_stream_info(stream_info)
last_video = None
for i in itertools.count(1):
if last_video is None:
info_url = feed_root_url
else:
info_url = f'{feed_root_url}?&id={last_video}&newer=-1&type=video'
videos_info = self._download_json(
info_url, event_id, f'Downloading page {i}')['data']
videos_info = [v['data'] for v in videos_info if v['type'] == 'video']
if not videos_info:
break
for v in videos_info:
v_id = str(v['id'])
yield self.url_result(
f'http://livestream.com/accounts/{account_id}/events/{event_id}/videos/{v_id}',
LivestreamIE, v_id, v.get('caption'))
last_video = videos_info[-1]['id']
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
event = mobj.group('event_id') or mobj.group('event_name')
account = mobj.group('account_id') or mobj.group('account_name')
api_url = f'http://livestream.com/api/accounts/{account}'
if video_id:
video_data = self._download_json(
f'{api_url}/events/{event}/videos/{video_id}', video_id)
return self._extract_video_info(video_data)
elif event:
event_data = self._download_json(f'{api_url}/events/{event}', None)
return self.playlist_result(
self._generate_event_playlist(event_data), str(event_data['id']), event_data['full_name'])
account_data = self._download_json(api_url, None)
items = traverse_obj(account_data, (('upcoming_events', 'past_events'), 'data', ...))
return self.playlist_result(
itertools.chain.from_iterable(map(self._generate_event_playlist, items)),
account_data.get('id'), account_data.get('full_name'))
# The original version of Livestream uses a different system
class LivestreamOriginalIE(InfoExtractor):
IE_NAME = 'livestream:original'
_VALID_URL = r'''(?x)https?://original\.livestream\.com/
(?P<user>[^/\?#]+)(?:/(?P<type>video|folder)
(?:(?:\?.*?Id=|/)(?P<id>.*?)(&|$))?)?
'''
_TESTS = [{
'url': 'http://original.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
'info_dict': {
'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
'ext': 'mp4',
'title': 'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital',
'duration': 771.301,
'view_count': int,
},
}, {
'url': 'https://original.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3',
'info_dict': {
'id': 'a07bf706-d0e4-4e75-a747-b021d84f2fd3',
},
'playlist_mincount': 4,
}, {
# live stream
'url': 'http://original.livestream.com/znsbahamas',
'only_matching': True,
}]
def _extract_video_info(self, user, video_id):
api_url = f'http://x{user}x.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id={video_id}'
info = self._download_xml(api_url, video_id)
item = info.find('channel').find('item')
title = xpath_text(item, 'title')
media_ns = {'media': 'http://search.yahoo.com/mrss'}
thumbnail_url = xpath_attr(
item, xpath_with_ns('media:thumbnail', media_ns), 'url')
duration = float_or_none(xpath_attr(
item, xpath_with_ns('media:content', media_ns), 'duration'))
ls_ns = {'ls': 'http://api.channel.livestream.com/2.0'}
view_count = int_or_none(xpath_text(
item, xpath_with_ns('ls:viewsCount', ls_ns)))
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail_url,
'duration': duration,
'view_count': view_count,
}
def _extract_video_formats(self, video_data, video_id):
formats = []
progressive_url = video_data.get('progressiveUrl')
if progressive_url:
formats.append({
'url': progressive_url,
'format_id': 'http',
})
m3u8_url = video_data.get('httpUrl')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
rtsp_url = video_data.get('rtspUrl')
if rtsp_url:
formats.append({
'url': rtsp_url,
'format_id': 'rtsp',
})
return formats
def _extract_folder(self, url, folder_id):
webpage = self._download_webpage(url, folder_id)
paths = orderedSet(re.findall(
r'''(?x)(?:
<li\s+class="folder">\s*<a\s+href="|
<a\s+href="(?=https?://livestre\.am/)
)([^"]+)"''', webpage))
entries = [{
'_type': 'url',
'url': urllib.parse.urljoin(url, p),
} for p in paths]
return self.playlist_result(entries, folder_id)
def _real_extract(self, url):
mobj = self._match_valid_url(url)
user = mobj.group('user')
url_type = mobj.group('type')
content_id = mobj.group('id')
if url_type == 'folder':
return self._extract_folder(url, content_id)
else:
# this url is used on mobile devices
stream_url = f'http://x{user}x.api.channel.livestream.com/3.0/getstream.json'
info = {}
if content_id:
stream_url += f'?id={content_id}'
info = self._extract_video_info(user, content_id)
else:
content_id = user
webpage = self._download_webpage(url, content_id)
info = {
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'thumbnail': self._search_regex(r'channelLogo\.src\s*=\s*"([^"]+)"', webpage, 'thumbnail', None),
}
video_data = self._download_json(stream_url, content_id)
is_live = video_data.get('isLive')
info.update({
'id': content_id,
'title': info['title'],
'formats': self._extract_video_formats(video_data, content_id),
'is_live': is_live,
})
return info
# The server doesn't support HEAD request, the generic extractor can't detect
# the redirection
class LivestreamShortenerIE(InfoExtractor):
IE_NAME = 'livestream:shortener'
IE_DESC = False # Do not list
_VALID_URL = r'https?://livestre\.am/(?P<id>.+)'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
return self.url_result(self._og_search_url(webpage))
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vk.py | yt_dlp/extractor/vk.py | import collections
import hashlib
import re
from .common import InfoExtractor
from .dailymotion import DailymotionIE
from .odnoklassniki import OdnoklassnikiIE
from .sibnet import SibnetEmbedIE
from .vimeo import VimeoIE
from .youtube import YoutubeIE
from ..utils import (
ExtractorError,
UserNotLive,
clean_html,
get_element_by_class,
get_element_html_by_id,
int_or_none,
join_nonempty,
parse_qs,
parse_resolution,
str_or_none,
str_to_int,
try_call,
unescapeHTML,
unified_timestamp,
update_url_query,
url_or_none,
urlencode_postdata,
urljoin,
)
from ..utils.traversal import require, traverse_obj
class VKBaseIE(InfoExtractor):
_NETRC_MACHINE = 'vk'
def _download_webpage_handle(self, url_or_request, video_id, *args, fatal=True, **kwargs):
response = super()._download_webpage_handle(url_or_request, video_id, *args, fatal=fatal, **kwargs)
challenge_url, cookie = response[1].url if response else '', None
if challenge_url.startswith('https://vk.com/429.html?'):
cookie = self._get_cookies(challenge_url).get('hash429')
if not cookie:
return response
hash429 = hashlib.md5(cookie.value.encode('ascii')).hexdigest()
self._request_webpage(
update_url_query(challenge_url, {'key': hash429}), video_id, fatal=fatal,
note='Resolving WAF challenge', errnote='Failed to bypass WAF challenge')
return super()._download_webpage_handle(url_or_request, video_id, *args, fatal=True, **kwargs)
def _perform_login(self, username, password):
login_page, url_handle = self._download_webpage_handle(
'https://vk.com', None, 'Downloading login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'email': username.encode('cp1251'),
'pass': password.encode('cp1251'),
})
# vk serves two same remixlhk cookies in Set-Cookie header and expects
# first one to be actually set
self._apply_first_set_cookie_header(url_handle, 'remixlhk')
login_page = self._download_webpage(
'https://vk.com/login', None,
note='Logging in',
data=urlencode_postdata(login_form))
if re.search(r'onLoginFailed', login_page):
raise ExtractorError(
'Unable to login, incorrect username and/or password', expected=True)
def _download_payload(self, path, video_id, data, fatal=True):
endpoint = f'https://vk.com/{path}.php'
data['al'] = 1
code, payload = self._download_json(
endpoint, video_id, data=urlencode_postdata(data), fatal=fatal,
headers={
'Referer': endpoint,
'X-Requested-With': 'XMLHttpRequest',
})['payload']
if code == '3':
self.raise_login_required()
elif code == '8':
raise ExtractorError(clean_html(payload[0][1:-1]), expected=True)
return payload
class VKIE(VKBaseIE):
IE_NAME = 'vk'
IE_DESC = 'VK'
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk(?:(?:video)?\.ru|\.com)/video_ext\.php.+?)\1']
_VALID_URL = r'''(?x)
https?://
(?:
(?:
(?:(?:m|new|vksport)\.)?vk(?:(?:video)?\.ru|\.com)/video_|
(?:www\.)?daxab\.com/
)
ext\.php\?(?P<embed_query>.*?\boid=(?P<oid>-?\d+).*?\bid=(?P<id>\d+).*)|
(?:
(?:(?:m|new|vksport)\.)?vk(?:(?:video)?\.ru|\.com)/(?:.+?\?.*?z=)?(?:video|clip)|
(?:www\.)?daxab\.com/embed/
)
(?P<videoid>-?\d+_\d+)(?:.*\blist=(?P<list_id>([\da-f]+)|(ln-[\da-zA-Z]+)))?
)
'''
_TESTS = [
{
'url': 'https://vk.com/videos-77521?z=video-77521_162222515%2Fclub77521',
'info_dict': {
'id': '-77521_162222515',
'ext': 'mp4',
'title': 'ProtivoGunz - Хуёвая песня',
'description': 'Видео из официальной группы Noize MC\nhttp://vk.com/noizemc',
'uploader': 're:(?:Noize MC|Alexander Ilyashenko).*',
'uploader_id': '39545378',
'duration': 195,
'timestamp': 1329049880,
'upload_date': '20120212',
'comment_count': int,
'like_count': int,
'thumbnail': r're:https?://.+(?:\.jpg|getVideoPreview.*)$',
},
'params': {'skip_download': 'm3u8'},
},
{
'url': 'https://vk.com/video205387401_165548505',
'info_dict': {
'id': '205387401_165548505',
'ext': 'mp4',
'title': 'No name',
'uploader': 'Tom Cruise',
'uploader_id': '205387401',
'duration': 9,
'timestamp': 1374364108,
'upload_date': '20130720',
'comment_count': int,
'like_count': int,
'thumbnail': r're:https?://.+(?:\.jpg|getVideoPreview.*)$',
},
},
{
'note': 'Embedded video',
'url': 'https://vk.com/video_ext.php?oid=-77521&id=162222515&hash=87b046504ccd8bfa',
'info_dict': {
'id': '-77521_162222515',
'ext': 'mp4',
'uploader': 're:(?:Noize MC|Alexander Ilyashenko).*',
'title': 'ProtivoGunz - Хуёвая песня',
'duration': 195,
'upload_date': '20120212',
'timestamp': 1329049880,
'uploader_id': '39545378',
'thumbnail': r're:https?://.+(?:\.jpg|getVideoPreview.*)$',
},
'params': {'skip_download': 'm3u8'},
},
{
'url': 'https://vk.com/video-93049196_456239755?list=ln-cBjJ7S4jYYx3ADnmDT',
'info_dict': {
'id': '-93049196_456239755',
'ext': 'mp4',
'title': '8 серия (озвучка)',
'description': 'Видео из официальной группы Noize MC\nhttp://vk.com/noizemc',
'duration': 8383,
'comment_count': int,
'uploader': 'Dizi2021',
'like_count': int,
'timestamp': 1640162189,
'upload_date': '20211222',
'uploader_id': '-93049196',
'thumbnail': r're:https?://.+(?:\.jpg|getVideoPreview.*)$',
},
},
{
'note': 'youtube embed',
'url': 'https://vk.com/video276849682_170681728',
'info_dict': {
'id': 'V3K4mi0SYkc',
'ext': 'mp4',
'title': "DSWD Awards 'Children's Joy Foundation, Inc.' Certificate of Registration and License to Operate",
'description': 'md5:bf9c26cfa4acdfb146362682edd3827a',
'duration': 179,
'upload_date': '20130117',
'uploader': "Children's Joy Foundation Inc.",
'uploader_id': '@CJFIofficial',
'view_count': int,
'channel_id': 'UCgzCNQ11TmR9V97ECnhi3gw',
'availability': 'public',
'like_count': int,
'live_status': 'not_live',
'playable_in_embed': True,
'channel': 'Children\'s Joy Foundation Inc.',
'uploader_url': 'https://www.youtube.com/@CJFIofficial',
'thumbnail': r're:https?://.+\.jpg$',
'tags': 'count:27',
'start_time': 0.0,
'categories': ['Nonprofits & Activism'],
'channel_url': 'https://www.youtube.com/channel/UCgzCNQ11TmR9V97ECnhi3gw',
'channel_follower_count': int,
'age_limit': 0,
'timestamp': 1358394935,
},
},
{
'note': 'dailymotion embed',
'url': 'https://vk.com/video-95168827_456239103?list=cca524a0f0d5557e16',
'info_dict': {
'id': 'x8gfli0',
'ext': 'mp4',
'title': 'md5:45410f60ccd4b2760da98cb5fc777d70',
'description': 'md5:2e71c5c9413735cfa06cf1a166f16c84',
'uploader': 'Movies and cinema.',
'upload_date': '20221218',
'uploader_id': 'x1jdavv',
'timestamp': 1671387617,
'age_limit': 0,
'duration': 2918,
'like_count': int,
'view_count': int,
'thumbnail': r're:https?://.+x1080$',
'tags': list,
},
'skip': 'This video has been deleted and is no longer available.',
},
{
'url': 'https://vk.com/clips-74006511?z=clip-74006511_456247211',
'info_dict': {
'id': '-74006511_456247211',
'ext': 'mp4',
'comment_count': int,
'duration': 9,
'like_count': int,
'thumbnail': r're:https?://.+(?:\.jpg|getVideoPreview.*)$',
'timestamp': 1664995597,
'title': 'Clip by @madempress',
'upload_date': '20221005',
'uploader': 'Шальная Императрица',
'uploader_id': '-74006511',
'description': 'md5:f9315f7786fa0e84e75e4f824a48b056',
},
},
{
# video key is extra_data not url\d+
'url': 'https://vk.com/video-110305615_171782105',
'md5': 'e13fcda136f99764872e739d13fac1d1',
'info_dict': {
'id': '-110305615_171782105',
'ext': 'mp4',
'title': 'S-Dance, репетиции к The way show',
'uploader': 'THE WAY SHOW | 17 апреля',
'uploader_id': '-110305615',
'timestamp': 1454859345,
'upload_date': '20160207',
},
'skip': 'Removed',
},
{
'note': 'finished live stream, postlive_mp4',
'url': 'https://vk.com/videos-387766?z=video-387766_456242764%2Fpl_-387766_-2',
'info_dict': {
'id': '-387766_456242764',
'ext': 'mp4',
'title': 'ИгроМир 2016 День 1 — Игромания Утром',
'uploader': 'Игромания',
'duration': 5239,
'upload_date': '20160929',
'uploader_id': '-387766',
'timestamp': 1475137527,
'thumbnail': r're:https?://.+\.jpg$',
'comment_count': int,
'like_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'No formats found',
},
{
'note': 'video has chapters',
'url': 'https://vkvideo.ru/video-18403220_456239696',
'info_dict': {
'id': '-18403220_456239696',
'ext': 'mp4',
'title': 'Трамп отменяет гранты // DeepSeek - Революция в ИИ // Илон Маск читер',
'description': 'md5:b112ea9de53683b6d03d29076f62eec2',
'uploader': 'Руслан Усачев',
'uploader_id': '-18403220',
'comment_count': int,
'like_count': int,
'duration': 1983,
'thumbnail': r're:https?://.+\.jpg',
'chapters': 'count:21',
'timestamp': 1738252883,
'upload_date': '20250130',
},
},
{
'url': 'https://vkvideo.ru/video-50883936_456244102',
'info_dict': {
'id': '-50883936_456244102',
'ext': 'mp4',
'title': 'Добивание Украины // Техник в коме // МОЯ ЗЛОСТЬ №140',
'description': 'md5:a9bc46181e9ebd0fdd82cef6c0191140',
'uploader': 'Стас Ай, Как Просто!',
'uploader_id': '-50883936',
'comment_count': int,
'like_count': int,
'duration': 4651,
'thumbnail': r're:https?://.+\.jpg',
'chapters': 'count:59',
'timestamp': 1743333869,
'upload_date': '20250330',
},
},
{
# live stream, hls and rtmp links, most likely already finished live
# stream by the time you are reading this comment
'url': 'https://vk.com/video-140332_456239111',
'only_matching': True,
},
{
# removed video, just testing that we match the pattern
'url': 'http://vk.com/feed?z=video-43215063_166094326%2Fbb50cacd3177146d7a',
'only_matching': True,
},
{
# age restricted video, requires vk account credentials
'url': 'https://vk.com/video205387401_164765225',
'only_matching': True,
},
{
'url': 'http://new.vk.com/video205387401_165548505',
'only_matching': True,
},
{
# This video is no longer available, because its author has been blocked.
'url': 'https://vk.com/video-10639516_456240611',
'only_matching': True,
},
{
# The video is not available in your region.
'url': 'https://vk.com/video-51812607_171445436',
'only_matching': True,
},
{
'url': 'https://vk.com/clip30014565_456240946',
'only_matching': True,
},
{
'url': 'https://vkvideo.ru/video-127553155_456242961',
'only_matching': True,
},
{
'url': 'https://vk.ru/video-220754053_456242564',
'only_matching': True,
},
{
'url': 'https://vksport.vkvideo.ru/video-124096712_456240773',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('videoid')
mv_data = {}
if video_id:
data = {
'act': 'show',
'video': video_id,
}
# Some videos (removed?) can only be downloaded with list id specified
list_id = mobj.group('list_id')
if list_id:
data['list'] = list_id
payload = self._download_payload('al_video', video_id, data)
info_page = payload[1]
opts = payload[-1]
mv_data = opts.get('mvData') or {}
player = opts.get('player') or {}
else:
video_id = '{}_{}'.format(mobj.group('oid'), mobj.group('id'))
info_page = self._download_webpage(
'https://vk.com/video_ext.php?' + mobj.group('embed_query'), video_id)
error_message = self._html_search_regex(
[r'(?s)<!><div[^>]+class="video_layer_message"[^>]*>(.+?)</div>',
r'(?s)<div[^>]+id="video_ext_msg"[^>]*>(.+?)</div>'],
info_page, 'error message', default=None)
if error_message:
raise ExtractorError(error_message, expected=True)
if re.search(r'<!>/login\.php\?.*\bact=security_check', info_page):
raise ExtractorError(
'You are trying to log in from an unusual location. You should confirm ownership at vk.com to log in with this IP.',
expected=True)
ERROR_COPYRIGHT = 'Video %s has been removed from public access due to rightholder complaint.'
ERRORS = {
r'>Видеозапись .*? была изъята из публичного доступа в связи с обращением правообладателя.<':
ERROR_COPYRIGHT,
r'>The video .*? was removed from public access by request of the copyright holder.<':
ERROR_COPYRIGHT,
r'<!>Please log in or <':
'Video %s is only available for registered users, '
'use --username and --password options to provide account credentials.',
r'<!>Unknown error':
'Video %s does not exist.',
r'<!>Видео временно недоступно':
'Video %s is temporarily unavailable.',
r'<!>Access denied':
'Access denied to video %s.',
r'<!>Видеозапись недоступна, так как её автор был заблокирован.':
'Video %s is no longer available, because its author has been blocked.',
r'<!>This video is no longer available, because its author has been blocked.':
'Video %s is no longer available, because its author has been blocked.',
r'<!>This video is no longer available, because it has been deleted.':
'Video %s is no longer available, because it has been deleted.',
r'<!>The video .+? is not available in your region.':
'Video %s is not available in your region.',
}
for error_re, error_msg in ERRORS.items():
if re.search(error_re, info_page):
raise ExtractorError(error_msg % video_id, expected=True)
player = self._parse_json(self._search_regex(
r'var\s+playerParams\s*=\s*({.+?})\s*;\s*\n',
info_page, 'player params'), video_id)
youtube_url = YoutubeIE._extract_url(info_page)
if youtube_url:
return self.url_result(youtube_url, YoutubeIE.ie_key())
vimeo_url = VimeoIE._extract_url(url, info_page)
if vimeo_url is not None:
return self.url_result(vimeo_url, VimeoIE.ie_key())
m_rutube = re.search(
r'\ssrc="((?:https?:)?//rutube\.ru\\?/(?:video|play)\\?/embed(?:.*?))\\?"', info_page)
if m_rutube is not None:
rutube_url = self._proto_relative_url(
m_rutube.group(1).replace('\\', ''))
return self.url_result(rutube_url)
dailymotion_url = next(DailymotionIE._extract_embed_urls(url, info_page), None)
if dailymotion_url:
return self.url_result(dailymotion_url, DailymotionIE.ie_key())
odnoklassniki_url = OdnoklassnikiIE._extract_url(info_page)
if odnoklassniki_url:
return self.url_result(odnoklassniki_url, OdnoklassnikiIE.ie_key())
sibnet_url = next(SibnetEmbedIE._extract_embed_urls(url, info_page), None)
if sibnet_url:
return self.url_result(sibnet_url)
m_opts = re.search(r'(?s)var\s+opts\s*=\s*({.+?});', info_page)
if m_opts:
m_opts_url = re.search(r"url\s*:\s*'((?!/\b)[^']+)", m_opts.group(1))
if m_opts_url:
opts_url = m_opts_url.group(1)
if opts_url.startswith('//'):
opts_url = 'https:' + opts_url
return self.url_result(opts_url)
data = player['params'][0]
# 2 = live
# 3 = post live (finished live)
is_live = data.get('live') == 2
timestamp = unified_timestamp(self._html_search_regex(
r'class=["\']mv_info_date[^>]+>([^<]+)(?:<|from)', info_page,
'upload date', default=None)) or int_or_none(data.get('date'))
view_count = str_to_int(self._search_regex(
r'class=["\']mv_views_count[^>]+>\s*([\d,.]+)',
info_page, 'view count', default=None))
formats = []
subtitles = {}
for format_id, format_url in data.items():
format_url = url_or_none(format_url)
if not format_url or not format_url.startswith(('http', '//', 'rtmp')):
continue
if (format_id.startswith(('url', 'cache'))
or format_id in ('extra_data', 'live_mp4', 'postlive_mp4')):
height = int_or_none(self._search_regex(
r'^(?:url|cache)(\d+)', format_id, 'height', default=None))
formats.append({
'format_id': format_id,
'url': format_url,
'ext': 'mp4',
'source_preference': 1,
'height': height,
})
elif format_id.startswith('hls') and format_id != 'hls_live_playback':
fmts, subs = self._extract_m3u8_formats_and_subtitles(
format_url, video_id, 'mp4', 'm3u8_native',
m3u8_id=format_id, fatal=False, live=is_live)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
elif format_id.startswith('dash') and format_id not in ('dash_live_playback', 'dash_uni'):
fmts, subs = self._extract_mpd_formats_and_subtitles(
format_url, video_id, mpd_id=format_id, fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
elif format_id == 'rtmp':
formats.append({
'format_id': format_id,
'url': format_url,
'ext': 'flv',
})
for sub in data.get('subs') or {}:
subtitles.setdefault(sub.get('lang', 'en'), []).append({
'ext': sub.get('title', '.srt').split('.')[-1],
'url': url_or_none(sub.get('url')),
})
return {
'id': video_id,
'formats': formats,
'subtitles': subtitles,
**traverse_obj(mv_data, {
'title': ('title', {str}, {unescapeHTML}),
'description': ('desc', {clean_html}, filter),
'duration': ('duration', {int_or_none}),
'like_count': ('likes', {int_or_none}),
'comment_count': ('commcount', {int_or_none}),
}),
**traverse_obj(data, {
'title': ('md_title', {str}, {unescapeHTML}),
'description': ('description', {clean_html}, filter),
'thumbnail': ('jpg', {url_or_none}),
'uploader': ('md_author', {str}, {unescapeHTML}),
'uploader_id': (('author_id', 'authorId'), {str_or_none}, any),
'duration': ('duration', {int_or_none}),
'chapters': ('time_codes', lambda _, v: isinstance(v['time'], int), {
'title': ('text', {str}, {unescapeHTML}),
'start_time': 'time',
}),
}),
'timestamp': timestamp,
'view_count': view_count,
'is_live': is_live,
'_format_sort_fields': ('res', 'source'),
}
class VKUserVideosIE(VKBaseIE):
IE_NAME = 'vk:uservideos'
IE_DESC = "VK - User's Videos"
_BASE_URL_RE = r'https?://(?:(?:m|new)\.)?vk(?:video\.ru|\.com/video)'
_VALID_URL = [
rf'{_BASE_URL_RE}/playlist/(?P<id>-?\d+_-?\d+)',
rf'{_BASE_URL_RE}/(?P<id>@[^/?#]+)(?:/all)?/?(?!\?.*\bz=video)(?:[?#]|$)',
]
_TESTS = [{
'url': 'https://vk.com/video/@mobidevices',
'info_dict': {
'id': '-17892518_all',
},
'playlist_mincount': 1355,
}, {
'url': 'https://vk.com/video/@mobidevices?section=uploaded',
'info_dict': {
'id': '-17892518_uploaded',
},
'playlist_mincount': 182,
}, {
'url': 'https://vkvideo.ru/playlist/-204353299_426',
'info_dict': {
'id': '-204353299_playlist_426',
},
'playlist_mincount': 33,
}, {
'url': 'https://vk.com/video/@gorkyfilmstudio/all',
'only_matching': True,
}, {
'url': 'https://vkvideo.ru/@mobidevices',
'only_matching': True,
}, {
'url': 'https://vk.com/video/playlist/-174476437_2',
'only_matching': True,
}, {
'url': 'https://vkvideo.ru/playlist/-51890028_-2',
'only_matching': True,
}]
_VIDEO = collections.namedtuple('Video', ['owner_id', 'id'])
def _entries(self, page_id, section):
video_list_json = self._download_payload('al_video', page_id, {
'act': 'load_videos_silent',
'offset': 0,
'oid': page_id,
'section': section,
})[0][section]
count = video_list_json['count']
total = video_list_json['total']
video_list = video_list_json['list']
while True:
for video in video_list:
v = self._VIDEO._make(video[:2])
video_id = '%d_%d' % (v.owner_id, v.id)
yield self.url_result(
'https://vk.com/video' + video_id, VKIE.ie_key(), video_id)
if count >= total:
break
video_list_json = self._download_payload('al_video', page_id, {
'act': 'load_videos_silent',
'offset': count,
'oid': page_id,
'section': section,
})[0][section]
new_count = video_list_json['count']
if not new_count:
self.to_screen(f'{page_id}: Skipping {total - count} unavailable videos')
break
count += new_count
video_list = video_list_json['list']
def _real_extract(self, url):
u_id = self._match_id(url)
webpage = self._download_webpage(url, u_id)
if u_id.startswith('@'):
page_id = traverse_obj(
self._search_json(r'\bvar newCur\s*=', webpage, 'cursor data', u_id),
('oid', {int}, {str_or_none}, {require('page id')}))
section = traverse_obj(parse_qs(url), ('section', 0)) or 'all'
else:
page_id, _, section = u_id.partition('_')
section = f'playlist_{section}'
playlist_title = clean_html(get_element_by_class('VideoInfoPanel__title', webpage))
return self.playlist_result(self._entries(page_id, section), f'{page_id}_{section}', playlist_title)
class VKWallPostIE(VKBaseIE):
IE_NAME = 'vk:wallpost'
_VALID_URL = r'https?://(?:(?:(?:(?:m|new)\.)?vk\.com/(?:[^?]+\?.*\bw=)?wall(?P<id>-?\d+_\d+)))'
_TESTS = [{
# public page URL, audio playlist
'url': 'https://vk.com/bs.official?w=wall-23538238_35',
'info_dict': {
'id': '-23538238_35',
'title': 'Black Shadow - Wall post -23538238_35',
'description': 'md5:190c78f905a53e0de793d83933c6e67f',
},
'playlist': [{
'md5': '5ba93864ec5b85f7ce19a9af4af080f6',
'info_dict': {
'id': '135220665_111806521',
'ext': 'm4a',
'title': 'Black Shadow - Слепое Верование',
'duration': 370,
'uploader': 'Black Shadow',
'artist': 'Black Shadow',
'track': 'Слепое Верование',
},
}, {
'md5': '4cc7e804579122b17ea95af7834c9233',
'info_dict': {
'id': '135220665_111802303',
'ext': 'm4a',
'title': 'Black Shadow - Война - Негасимое Бездны Пламя!',
'duration': 423,
'uploader': 'Black Shadow',
'artist': 'Black Shadow',
'track': 'Война - Негасимое Бездны Пламя!',
},
}],
'params': {
'skip_download': True,
},
}, {
# single YouTube embed with irrelevant reaction videos
'url': 'https://vk.com/wall-32370614_7173954',
'info_dict': {
'id': '-32370614_7173954',
'title': 'md5:9f93c405bbc00061d34007d78c75e3bc',
'description': 'md5:953b811f26fa9f21ee5856e2ea8e68fc',
},
'playlist_count': 1,
}, {
# wall page URL
'url': 'https://vk.com/wall-23538238_35',
'only_matching': True,
}, {
# mobile wall page URL
'url': 'https://m.vk.com/wall-23538238_35',
'only_matching': True,
}]
_BASE64_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN0PQRSTUVWXYZO123456789+/='
_AUDIO = collections.namedtuple('Audio', ['id', 'owner_id', 'url', 'title', 'performer', 'duration', 'album_id', 'unk', 'author_link', 'lyrics', 'flags', 'context', 'extra', 'hashes', 'cover_url', 'ads'])
def _decode(self, enc):
dec = ''
e = n = 0
for c in enc:
r = self._BASE64_CHARS.index(c)
cond = n % 4
e = 64 * e + r if cond else r
n += 1
if cond:
dec += chr(255 & e >> (-2 * n & 6))
return dec
def _unmask_url(self, mask_url, vk_id):
if 'audio_api_unavailable' in mask_url:
extra = mask_url.split('?extra=')[1].split('#')
_, base = self._decode(extra[1]).split(chr(11))
mask_url = list(self._decode(extra[0]))
url_len = len(mask_url)
indexes = [None] * url_len
index = int(base) ^ vk_id
for n in range(url_len - 1, -1, -1):
index = (url_len * (n + 1) ^ index + n) % url_len
indexes[n] = index
for n in range(1, url_len):
c = mask_url[n]
index = indexes[url_len - 1 - n]
mask_url[n] = mask_url[index]
mask_url[index] = c
mask_url = ''.join(mask_url)
return mask_url
def _real_extract(self, url):
post_id = self._match_id(url)
webpage = self._download_payload('wkview', post_id, {
'act': 'show',
'w': 'wall' + post_id,
})[1]
uploader = clean_html(get_element_by_class('PostHeaderTitle__authorName', webpage))
entries = []
for audio in re.findall(r'data-audio="([^"]+)', webpage):
audio = self._parse_json(unescapeHTML(audio), post_id)
if not audio['url']:
continue
title = unescapeHTML(audio.get('title'))
artist = unescapeHTML(audio.get('artist'))
entries.append({
'id': f'{audio["owner_id"]}_{audio["id"]}',
'title': join_nonempty(artist, title, delim=' - '),
'thumbnails': try_call(lambda: [{'url': u} for u in audio['coverUrl'].split(',')]),
'duration': int_or_none(audio.get('duration')),
'uploader': uploader,
'artist': artist,
'track': title,
'formats': [{
'url': audio['url'],
'ext': 'm4a',
'vcodec': 'none',
'acodec': 'mp3',
'container': 'm4a_dash',
}],
})
entries.extend(self.url_result(urljoin(url, entry), VKIE) for entry in set(re.findall(
r'<a[^>]+href=(?:["\'])(/video(?:-?[\d_]+)[^"\']*)',
get_element_html_by_id('wl_post_body', webpage))))
return self.playlist_result(
entries, post_id, join_nonempty(uploader, f'Wall post {post_id}', delim=' - '),
clean_html(get_element_by_class('wall_post_text', webpage)))
class VKPlayBaseIE(InfoExtractor):
_BASE_URL_RE = r'https?://(?:vkplay\.live|live\.vk(?:play|video)\.ru)/'
_RESOLUTIONS = {
'tiny': '256x144',
'lowest': '426x240',
'low': '640x360',
'medium': '852x480',
'high': '1280x720',
'full_hd': '1920x1080',
'quad_hd': '2560x1440',
}
def _extract_from_initial_state(self, url, video_id, path):
webpage = self._download_webpage(url, video_id)
video_info = traverse_obj(self._search_json(
r'<script[^>]+\bid="initial-state"[^>]*>', webpage, 'initial state', video_id),
path, expected_type=dict)
if not video_info:
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | true |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/weiqitv.py | yt_dlp/extractor/weiqitv.py | from .common import InfoExtractor
class WeiqiTVIE(InfoExtractor):
_WORKING = False
IE_DESC = 'WQTV'
_VALID_URL = r'https?://(?:www\.)?weiqitv\.com/index/video_play\?videoId=(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'http://www.weiqitv.com/index/video_play?videoId=53c744f09874f0e76a8b46f3',
'md5': '26450599afd64c513bc77030ad15db44',
'info_dict': {
'id': '53c744f09874f0e76a8b46f3',
'ext': 'mp4',
'title': '2013年度盘点',
},
}, {
'url': 'http://www.weiqitv.com/index/video_play?videoId=567379a2d4c36cca518b4569',
'info_dict': {
'id': '567379a2d4c36cca518b4569',
'ext': 'mp4',
'title': '民国围棋史',
},
}, {
'url': 'http://www.weiqitv.com/index/video_play?videoId=5430220a9874f088658b4567',
'info_dict': {
'id': '5430220a9874f088658b4567',
'ext': 'mp4',
'title': '二路托过的手段和运用',
},
}]
def _real_extract(self, url):
media_id = self._match_id(url)
page = self._download_webpage(url, media_id)
info_json_str = self._search_regex(
r'var\s+video\s*=\s*(.+});', page, 'info json str')
info_json = self._parse_json(info_json_str, media_id)
letvcloud_url = self._search_regex(
r'var\s+letvurl\s*=\s*"([^"]+)', page, 'letvcloud url')
return {
'_type': 'url_transparent',
'ie_key': 'LetvCloud',
'url': letvcloud_url,
'title': info_json['name'],
'id': media_id,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/stitcher.py | yt_dlp/extractor/stitcher.py | from .common import InfoExtractor
from ..utils import (
ExtractorError,
clean_html,
clean_podcast_url,
int_or_none,
str_or_none,
try_get,
url_or_none,
)
class StitcherBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?stitcher\.com/(?:podcast|show)/'
def _call_api(self, path, video_id, query):
resp = self._download_json(
'https://api.prod.stitcher.com/' + path,
video_id, query=query)
error_massage = try_get(resp, lambda x: x['errors'][0]['message'])
if error_massage:
raise ExtractorError(error_massage, expected=True)
return resp['data']
def _extract_description(self, data):
return clean_html(data.get('html_description') or data.get('description'))
def _extract_audio_url(self, episode):
return url_or_none(episode.get('audio_url') or episode.get('guid'))
def _extract_show_info(self, show):
return {
'thumbnail': show.get('image_base_url'),
'series': show.get('title'),
}
def _extract_episode(self, episode, audio_url, show_info):
info = {
'id': str(episode['id']),
'display_id': episode.get('slug'),
'title': episode['title'].strip(),
'description': self._extract_description(episode),
'duration': int_or_none(episode.get('duration')),
'url': clean_podcast_url(audio_url),
'vcodec': 'none',
'timestamp': int_or_none(episode.get('date_published')),
'season_number': int_or_none(episode.get('season')),
'season_id': str_or_none(episode.get('season_id')),
}
info.update(show_info)
return info
class StitcherIE(StitcherBaseIE):
_VALID_URL = StitcherBaseIE._VALID_URL_BASE + r'(?:[^/]+/)+e(?:pisode)?/(?:[^/#?&]+-)?(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.stitcher.com/podcast/the-talking-machines/e/40789481?autoplay=true',
'md5': 'e9635098e0da10b21a0e2b85585530f6',
'info_dict': {
'id': '40789481',
'ext': 'mp3',
'title': 'Machine Learning Mastery and Cancer Clusters',
'description': 'md5:547adb4081864be114ae3831b4c2b42f',
'duration': 1604,
'thumbnail': r're:^https?://.*\.jpg',
'upload_date': '20151008',
'timestamp': 1444285800,
'series': 'Talking Machines',
},
}, {
'url': 'http://www.stitcher.com/podcast/panoply/vulture-tv/e/the-rare-hourlong-comedy-plus-40846275?autoplay=true',
'info_dict': {
'id': '40846275',
'display_id': 'the-rare-hourlong-comedy-plus',
'ext': 'mp3',
'title': "The CW's 'Crazy Ex-Girlfriend'",
'description': 'md5:04f1e2f98eb3f5cbb094cea0f9e19b17',
'duration': 2235,
'thumbnail': r're:^https?://.*\.jpg',
},
'params': {
'skip_download': True,
},
'skip': 'Page Not Found',
}, {
# escaped title
'url': 'http://www.stitcher.com/podcast/marketplace-on-stitcher/e/40910226?autoplay=true',
'only_matching': True,
}, {
'url': 'http://www.stitcher.com/podcast/panoply/getting-in/e/episode-2a-how-many-extracurriculars-should-i-have-40876278?autoplay=true',
'only_matching': True,
}, {
'url': 'https://www.stitcher.com/show/threedom/episode/circles-on-a-stick-200212584',
'only_matching': True,
}]
def _real_extract(self, url):
audio_id = self._match_id(url)
data = self._call_api(
'shows/episodes', audio_id, {'episode_ids': audio_id})
episode = data['episodes'][0]
audio_url = self._extract_audio_url(episode)
if not audio_url:
self.raise_login_required()
show = try_get(data, lambda x: x['shows'][0], dict) or {}
return self._extract_episode(
episode, audio_url, self._extract_show_info(show))
class StitcherShowIE(StitcherBaseIE):
_VALID_URL = StitcherBaseIE._VALID_URL_BASE + r'(?P<id>[^/#?&]+)/?(?:[?#&]|$)'
_TESTS = [{
'url': 'http://www.stitcher.com/podcast/the-talking-machines',
'info_dict': {
'id': 'the-talking-machines',
'title': 'Talking Machines',
'description': 'md5:831f0995e40f26c10231af39cf1ebf0b',
},
'playlist_mincount': 106,
}, {
'url': 'https://www.stitcher.com/show/the-talking-machines',
'only_matching': True,
}]
def _real_extract(self, url):
show_slug = self._match_id(url)
data = self._call_api(
f'search/show/{show_slug}/allEpisodes', show_slug, {'count': 10000})
show = try_get(data, lambda x: x['shows'][0], dict) or {}
show_info = self._extract_show_info(show)
entries = []
for episode in (data.get('episodes') or []):
audio_url = self._extract_audio_url(episode)
if not audio_url:
continue
entries.append(self._extract_episode(episode, audio_url, show_info))
return self.playlist_result(
entries, show_slug, show.get('title'),
self._extract_description(show))
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tunein.py | yt_dlp/extractor/tunein.py | import functools
import urllib.parse
from .common import InfoExtractor
from ..utils import (
OnDemandPagedList,
UnsupportedError,
clean_html,
int_or_none,
join_nonempty,
parse_iso8601,
update_url_query,
url_or_none,
)
from ..utils.traversal import traverse_obj
class TuneInBaseIE(InfoExtractor):
def _call_api(self, item_id, endpoint=None, note='Downloading JSON metadata', fatal=False, query=None):
return self._download_json(
join_nonempty('https://api.tunein.com/profiles', item_id, endpoint, delim='/'),
item_id, note=note, fatal=fatal, query=query) or {}
def _extract_formats_and_subtitles(self, content_id):
streams = self._download_json(
'https://opml.radiotime.com/Tune.ashx', content_id, query={
'formats': 'mp3,aac,ogg,flash,hls',
'id': content_id,
'render': 'json',
})
formats, subtitles = [], {}
for stream in traverse_obj(streams, ('body', lambda _, v: url_or_none(v['url']))):
if stream.get('media_type') == 'hls':
fmts, subs = self._extract_m3u8_formats_and_subtitles(stream['url'], content_id, fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
else:
formats.append(traverse_obj(stream, {
'abr': ('bitrate', {int_or_none}),
'ext': ('media_type', {str}),
'url': ('url', {self._proto_relative_url}),
}))
return formats, subtitles
class TuneInStationIE(TuneInBaseIE):
IE_NAME = 'tunein:station'
_VALID_URL = r'https?://tunein\.com/radio/[^/?#]+(?P<id>s\d+)'
_TESTS = [{
'url': 'https://tunein.com/radio/Jazz24-885-s34682/',
'info_dict': {
'id': 's34682',
'ext': 'mp3',
'title': str,
'alt_title': 'World Class Jazz',
'channel_follower_count': int,
'description': 'md5:d6d0b89063fd68d529fa7058ee98619b',
'location': r're:Seattle-Tacoma, (?:US|WA)',
'live_status': 'is_live',
'thumbnail': r're:https?://.+',
},
'params': {'skip_download': 'Livestream'},
}, {
'url': 'https://tunein.com/radio/BBC-Radio-1-988-s24939/',
'info_dict': {
'id': 's24939',
'ext': 'm4a',
'title': str,
'alt_title': 'The biggest new pop and all-day vibes',
'channel_follower_count': int,
'description': 'md5:ee2c56794844610d045f8caf5ff34d0c',
'location': 'London, UK',
'live_status': 'is_live',
'thumbnail': r're:https?://.+',
},
'params': {'skip_download': 'Livestream'},
}]
def _real_extract(self, url):
station_id = self._match_id(url)
formats, subtitles = self._extract_formats_and_subtitles(station_id)
return {
'id': station_id,
'formats': formats,
'subtitles': subtitles,
**traverse_obj(self._call_api(station_id), ('Item', {
'title': ('Title', {clean_html}),
'alt_title': ('Subtitle', {clean_html}, filter),
'channel_follower_count': ('Actions', 'Follow', 'FollowerCount', {int_or_none}),
'description': ('Description', {clean_html}, filter),
'is_live': ('Actions', 'Play', 'IsLive', {bool}),
'location': ('Properties', 'Location', 'DisplayName', {str}),
'thumbnail': ('Image', {url_or_none}),
})),
}
class TuneInPodcastIE(TuneInBaseIE):
IE_NAME = 'tunein:podcast:program'
_PAGE_SIZE = 20
_VALID_URL = r'https?://tunein\.com/podcasts(?:/[^/?#]+){1,2}(?P<id>p\d+)'
_TESTS = [{
'url': 'https://tunein.com/podcasts/Technology-Podcasts/Artificial-Intelligence-p1153019/',
'info_dict': {
'id': 'p1153019',
'title': 'Lex Fridman Podcast',
},
'playlist_mincount': 200,
}, {
'url': 'https://tunein.com/podcasts/World-News/BBC-News-p14/',
'info_dict': {
'id': 'p14',
'title': 'BBC News',
},
'playlist_mincount': 35,
}]
@classmethod
def suitable(cls, url):
return False if TuneInPodcastEpisodeIE.suitable(url) else super().suitable(url)
def _fetch_page(self, url, podcast_id, page=0):
items = self._call_api(
podcast_id, 'contents', f'Downloading page {page + 1}', query={
'filter': 't:free',
'limit': self._PAGE_SIZE,
'offset': page * self._PAGE_SIZE,
},
)['Items']
for item in traverse_obj(items, (..., 'GuideId', {str}, filter)):
yield self.url_result(update_url_query(url, {'topicId': item[1:]}))
def _real_extract(self, url):
podcast_id = self._match_id(url)
return self.playlist_result(OnDemandPagedList(
functools.partial(self._fetch_page, url, podcast_id), self._PAGE_SIZE),
podcast_id, traverse_obj(self._call_api(podcast_id), ('Item', 'Title', {str})))
class TuneInPodcastEpisodeIE(TuneInBaseIE):
IE_NAME = 'tunein:podcast'
_VALID_URL = r'https?://tunein\.com/podcasts(?:/[^/?#]+){1,2}(?P<series_id>p\d+)/?\?(?:[^#]+&)?(?i:topicid)=(?P<id>\d+)'
_TESTS = [{
'url': 'https://tunein.com/podcasts/Technology-Podcasts/Artificial-Intelligence-p1153019/?topicId=236404354',
'info_dict': {
'id': 't236404354',
'ext': 'mp3',
'title': '#351 – MrBeast: Future of YouTube, Twitter, TikTok, and Instagram',
'alt_title': 'Technology Podcasts >',
'cast': 'count:1',
'description': 'md5:1029895354ef073ff00f20b82eb6eb71',
'display_id': '236404354',
'duration': 8330,
'thumbnail': r're:https?://.+',
'timestamp': 1673458571,
'upload_date': '20230111',
'series': 'Lex Fridman Podcast',
'series_id': 'p1153019',
},
}, {
'url': 'https://tunein.com/podcasts/The-BOB--TOM-Show-Free-Podcast-p20069/?topicId=174556405',
'info_dict': {
'id': 't174556405',
'ext': 'mp3',
'title': 'B&T Extra: Ohhh Yeah, It\'s Sexy Time',
'alt_title': 'Westwood One >',
'cast': 'count:2',
'description': 'md5:6828234f410ab88c85655495c5fcfa88',
'display_id': '174556405',
'duration': 1203,
'series': 'The BOB & TOM Show Free Podcast',
'series_id': 'p20069',
'thumbnail': r're:https?://.+',
'timestamp': 1661799600,
'upload_date': '20220829',
},
}]
def _real_extract(self, url):
series_id, display_id = self._match_valid_url(url).group('series_id', 'id')
episode_id = f't{display_id}'
formats, subtitles = self._extract_formats_and_subtitles(episode_id)
return {
'id': episode_id,
'display_id': display_id,
'formats': formats,
'series': traverse_obj(self._call_api(series_id), ('Item', 'Title', {clean_html})),
'series_id': series_id,
'subtitles': subtitles,
**traverse_obj(self._call_api(episode_id), ('Item', {
'title': ('Title', {clean_html}),
'alt_title': ('Subtitle', {clean_html}, filter),
'cast': (
'Properties', 'ParentProgram', 'Hosts', {clean_html},
{lambda x: x.split(';')}, ..., {str.strip}, filter, all, filter),
'description': ('Description', {clean_html}, filter),
'duration': ('Actions', 'Play', 'Duration', {int_or_none}),
'thumbnail': ('Image', {url_or_none}),
'timestamp': ('Actions', 'Play', 'PublishTime', {parse_iso8601}),
})),
}
class TuneInEmbedIE(TuneInBaseIE):
IE_NAME = 'tunein:embed'
_VALID_URL = r'https?://tunein\.com/embed/player/(?P<id>[^/?#]+)'
_EMBED_REGEX = [r'<iframe\b[^>]+\bsrc=["\'](?P<url>(?:https?:)?//tunein\.com/embed/player/[^/?#"\']+)']
_TESTS = [{
'url': 'https://tunein.com/embed/player/s6404/',
'info_dict': {
'id': 's6404',
'ext': 'mp3',
'title': str,
'alt_title': 'South Africa\'s News and Information Leader',
'channel_follower_count': int,
'live_status': 'is_live',
'location': 'Johannesburg, South Africa',
'thumbnail': r're:https?://.+',
},
'params': {'skip_download': 'Livestream'},
}, {
'url': 'https://tunein.com/embed/player/t236404354/',
'info_dict': {
'id': 't236404354',
'ext': 'mp3',
'title': '#351 – MrBeast: Future of YouTube, Twitter, TikTok, and Instagram',
'alt_title': 'Technology Podcasts >',
'cast': 'count:1',
'description': 'md5:1029895354ef073ff00f20b82eb6eb71',
'display_id': '236404354',
'duration': 8330,
'series': 'Lex Fridman Podcast',
'series_id': 'p1153019',
'thumbnail': r're:https?://.+',
'timestamp': 1673458571,
'upload_date': '20230111',
},
}, {
'url': 'https://tunein.com/embed/player/p191660/',
'info_dict': {
'id': 'p191660',
'title': 'SBS Tamil',
},
'playlist_mincount': 195,
}]
_WEBPAGE_TESTS = [{
'url': 'https://www.martiniinthemorning.com/',
'info_dict': {
'id': 's55412',
'ext': 'mp3',
'title': str,
'alt_title': 'Now that\'s music!',
'channel_follower_count': int,
'description': 'md5:41588a3e2cf34b3eafc6c33522fa611a',
'live_status': 'is_live',
'location': 'US',
'thumbnail': r're:https?://.+',
},
'params': {'skip_download': 'Livestream'},
}]
def _real_extract(self, url):
embed_id = self._match_id(url)
kind = {
'p': 'program',
's': 'station',
't': 'topic',
}.get(embed_id[:1])
return self.url_result(
f'https://tunein.com/{kind}/?{kind}id={embed_id[1:]}')
class TuneInShortenerIE(InfoExtractor):
IE_NAME = 'tunein:shortener'
IE_DESC = False # Do not list
_VALID_URL = r'https?://tun\.in/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'http://tun.in/ser7s',
'info_dict': {
'id': 's34682',
'title': str,
'ext': 'mp3',
'alt_title': 'World Class Jazz',
'channel_follower_count': int,
'description': 'md5:d6d0b89063fd68d529fa7058ee98619b',
'location': r're:Seattle-Tacoma, (?:US|WA)',
'live_status': 'is_live',
'thumbnail': r're:https?://.+',
},
'params': {'skip_download': 'Livestream'},
}, {
'url': 'http://tun.in/tqeeFw',
'info_dict': {
'id': 't236404354',
'title': str,
'ext': 'mp3',
'alt_title': 'Technology Podcasts >',
'cast': 'count:1',
'description': 'md5:1029895354ef073ff00f20b82eb6eb71',
'display_id': '236404354',
'duration': 8330,
'series': 'Lex Fridman Podcast',
'series_id': 'p1153019',
'thumbnail': r're:https?://.+',
'timestamp': 1673458571,
'upload_date': '20230111',
},
'params': {'skip_download': 'Livestream'},
}, {
'url': 'http://tun.in/pei6i',
'info_dict': {
'id': 'p14',
'title': 'BBC News',
},
'playlist_mincount': 35,
}]
def _real_extract(self, url):
redirect_id = self._match_id(url)
# The server doesn't support HEAD requests
urlh = self._request_webpage(url, redirect_id, 'Downloading redirect page')
# Need to strip port from URL
parsed = urllib.parse.urlparse(urlh.url)
new_url = parsed._replace(netloc=parsed.hostname).geturl()
# Prevent infinite loop in case redirect fails
if self.suitable(new_url):
raise UnsupportedError(new_url)
return self.url_result(new_url)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/murrtube.py | yt_dlp/extractor/murrtube.py | import functools
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
OnDemandPagedList,
clean_html,
extract_attributes,
get_element_by_class,
get_element_html_by_id,
parse_count,
remove_end,
update_url,
urlencode_postdata,
)
class MurrtubeIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
murrtube:|
https?://murrtube\.net/(?:v/|videos/(?P<slug>[a-z0-9-]+?)-)
)
(?P<id>[A-Z0-9]{4}|[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})
'''
_TESTS = [{
'url': 'https://murrtube.net/videos/inferno-x-skyler-148b6f2a-fdcc-4902-affe-9c0f41aaaca0',
'md5': '70380878a77e8565d4aea7f68b8bbb35',
'info_dict': {
'id': 'ca885d8456b95de529b6723b158032e11115d',
'ext': 'mp4',
'title': 'Inferno X Skyler',
'description': 'Humping a very good slutty sheppy (roomate)',
'uploader': 'Inferno Wolf',
'age_limit': 18,
'thumbnail': 'https://storage.murrtube.net/murrtube-production/ekbs3zcfvuynnqfx72nn2tkokvsd',
'comment_count': int,
'view_count': int,
'like_count': int,
},
}, {
'url': 'https://murrtube.net/v/0J2Q',
'md5': '31262f6ac56f0ca75e5a54a0f3fefcb6',
'info_dict': {
'id': '8442998c52134968d9caa36e473e1a6bac6ca',
'ext': 'mp4',
'uploader': 'Hayel',
'title': 'Who\'s in charge now?',
'description': 'md5:795791e97e5b0f1805ea84573f02a997',
'age_limit': 18,
'thumbnail': 'https://storage.murrtube.net/murrtube-production/fb1ojjwiucufp34ya6hxu5vfqi5s',
'comment_count': int,
'view_count': int,
'like_count': int,
},
}]
def _extract_count(self, name, html):
return parse_count(self._search_regex(
rf'([\d,]+)\s+<span[^>]*>{name}</span>', html, name, default=None))
def _real_initialize(self):
homepage = self._download_webpage(
'https://murrtube.net', None, note='Getting session token')
self._request_webpage(
'https://murrtube.net/accept_age_check', None, 'Setting age cookie',
data=urlencode_postdata(self._hidden_inputs(homepage)))
def _real_extract(self, url):
video_id = self._match_id(url)
if video_id.startswith('murrtube:'):
raise ExtractorError('Support for murrtube: prefix URLs is broken')
video_page = self._download_webpage(url, video_id)
video_attrs = extract_attributes(get_element_html_by_id('video', video_page))
playlist = update_url(video_attrs['data-url'], query=None)
video_id = self._search_regex(r'/([\da-f]+)/index.m3u8', playlist, 'video id')
return {
'id': video_id,
'title': remove_end(self._og_search_title(video_page), ' - Murrtube'),
'age_limit': 18,
'formats': self._extract_m3u8_formats(playlist, video_id, 'mp4'),
'description': self._og_search_description(video_page),
'thumbnail': update_url(self._og_search_thumbnail(video_page, default=''), query=None) or None,
'uploader': clean_html(get_element_by_class('pl-1 is-size-6 has-text-lighter', video_page)),
'view_count': self._extract_count('Views', video_page),
'like_count': self._extract_count('Likes', video_page),
'comment_count': self._extract_count('Comments', video_page),
}
class MurrtubeUserIE(InfoExtractor):
_WORKING = False
IE_DESC = 'Murrtube user profile'
_VALID_URL = r'https?://murrtube\.net/(?P<id>[^/]+)$'
_TESTS = [{
'url': 'https://murrtube.net/stormy',
'info_dict': {
'id': 'stormy',
},
'playlist_mincount': 27,
}]
_PAGE_SIZE = 10
def _download_gql(self, video_id, op, note=None, fatal=True):
result = self._download_json(
'https://murrtube.net/graphql',
video_id, note, data=json.dumps(op).encode(), fatal=fatal,
headers={'Content-Type': 'application/json'})
return result['data']
def _fetch_page(self, username, user_id, page):
data = self._download_gql(username, {
'operationName': 'Media',
'variables': {
'limit': self._PAGE_SIZE,
'offset': page * self._PAGE_SIZE,
'sort': 'latest',
'userId': user_id,
},
'query': '''\
query Media($q: String, $sort: String, $userId: ID, $offset: Int!, $limit: Int!) {
media(q: $q, sort: $sort, userId: $userId, offset: $offset, limit: $limit) {
id
__typename
}
}'''},
f'Downloading page {page + 1}')
if data is None:
raise ExtractorError(f'Failed to retrieve video list for page {page + 1}')
media = data['media']
for entry in media:
yield self.url_result('murrtube:{}'.format(entry['id']), MurrtubeIE.ie_key())
def _real_extract(self, url):
username = self._match_id(url)
data = self._download_gql(username, {
'operationName': 'User',
'variables': {
'id': username,
},
'query': '''\
query User($id: ID!) {
user(id: $id) {
id
__typename
}
}'''},
'Downloading user info')
if data is None:
raise ExtractorError('Failed to fetch user info')
user = data['user']
entries = OnDemandPagedList(functools.partial(
self._fetch_page, username, user.get('id')), self._PAGE_SIZE)
return self.playlist_result(entries, username)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mangomolo.py | yt_dlp/extractor/mangomolo.py | import base64
import urllib.parse
from .common import InfoExtractor
from ..utils import classproperty, int_or_none
class MangomoloBaseIE(InfoExtractor):
_BASE_REGEX = r'(?:https?:)?//(?:admin\.mangomolo\.com/analytics/index\.php/customers/embed/|player\.mangomolo\.com/v1/)'
_SLUG = None
@classproperty
def _VALID_URL(cls):
return f'{cls._BASE_REGEX}{cls._SLUG}'
@classproperty
def _EMBED_REGEX(cls):
return [rf'<iframe[^>]+src=(["\'])(?P<url>{cls._VALID_URL}.+?)\1']
def _extract_from_webpage(self, url, webpage):
for res in super()._extract_from_webpage(url, webpage):
yield {
**res,
'_type': 'url_transparent',
'id': self._search_regex(self._SLUG, res['url'], 'id', group='id'),
'uploader': self._search_regex(r'^(?:https?://)?([^/]*)/.*', url, 'video uploader'),
}
def _get_real_id(self, page_id):
return page_id
def _real_extract(self, url):
page_id = self._get_real_id(self._match_id(url))
webpage = self._download_webpage(
'https://player.mangomolo.com/v1/{}?{}'.format(self._TYPE, url.split('?')[1]), page_id)
hidden_inputs = self._hidden_inputs(webpage)
m3u8_entry_protocol = 'm3u8' if self._IS_LIVE else 'm3u8_native'
format_url = self._html_search_regex(
[
r'(?:file|src)\s*:\s*"(https?://[^"]+?/playlist\.m3u8)',
r'<a[^>]+href="(rtsp://[^"]+)"',
], webpage, 'format url')
formats = self._extract_wowza_formats(
format_url, page_id, m3u8_entry_protocol, ['smil'])
return {
'id': page_id,
'title': page_id,
'uploader_id': hidden_inputs.get('userid'),
'duration': int_or_none(hidden_inputs.get('duration')),
'is_live': self._IS_LIVE,
'formats': formats,
}
class MangomoloVideoIE(MangomoloBaseIE):
_TYPE = 'video'
IE_NAME = 'mangomolo:' + _TYPE
_SLUG = r'video\?.*?\bid=(?P<id>\d+)'
_IS_LIVE = False
class MangomoloLiveIE(MangomoloBaseIE):
_TYPE = 'live'
IE_NAME = 'mangomolo:' + _TYPE
_SLUG = r'(?:live|index)\?.*?\bchannelid=(?P<id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)'
_IS_LIVE = True
def _get_real_id(self, page_id):
return base64.b64decode(urllib.parse.unquote(page_id)).decode()
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sverigesradio.py | yt_dlp/extractor/sverigesradio.py | from .common import InfoExtractor
from ..utils import (
determine_ext,
extract_attributes,
get_element_by_id,
get_element_html_by_class,
int_or_none,
str_or_none,
traverse_obj,
url_or_none,
)
class SverigesRadioBaseIE(InfoExtractor):
_BASE_URL = 'https://sverigesradio.se/sida/playerajax/'
_QUALITIES = ['low', 'medium', 'high']
_EXT_TO_CODEC_MAP = {
'mp3': 'mp3',
'm4a': 'aac',
}
_CODING_FORMAT_TO_ABR_MAP = {
5: 128,
11: 192,
12: 32,
13: 96,
}
def _real_extract(self, url):
audio_id, display_id = self._match_valid_url(url).group('id', 'slug')
if not audio_id:
webpage = self._download_webpage(url, display_id)
audio_id = (
traverse_obj(
get_element_html_by_class('audio-button', webpage),
({extract_attributes}, ('data-audio-id', 'data-publication-id')), get_all=False)
or self._parse_json(get_element_by_id('gtm-metadata', webpage), display_id)['pageId'])
query = {
'id': audio_id,
'type': self._AUDIO_TYPE,
}
item = self._download_json(
self._BASE_URL + 'audiometadata', audio_id,
'Downloading audio JSON metadata', query=query)['items'][0]
query['format'] = 'iis'
urls = []
formats = []
for quality in self._QUALITIES:
query['quality'] = quality
audio_url_data = self._download_json(
self._BASE_URL + 'getaudiourl', audio_id,
f'Downloading {quality} format JSON metadata',
fatal=False, query=query) or {}
audio_url = audio_url_data.get('audioUrl')
if not audio_url or audio_url in urls:
continue
urls.append(audio_url)
ext = determine_ext(audio_url)
coding_format = audio_url_data.get('codingFormat')
abr = int_or_none(self._search_regex(
r'_a(\d+)\.m4a', audio_url, 'audio bitrate',
default=None)) or self._CODING_FORMAT_TO_ABR_MAP.get(coding_format)
formats.append({
'abr': abr,
'acodec': self._EXT_TO_CODEC_MAP.get(ext),
'ext': ext,
'format_id': str_or_none(coding_format),
'vcodec': 'none',
'url': audio_url,
})
return {
'id': audio_id,
'formats': formats,
**traverse_obj(item, {
'title': 'subtitle',
'series': 'title',
'duration': ('duration', {int_or_none}),
'thumbnail': ('displayimageurl', {url_or_none}),
'description': 'description',
}),
}
class SverigesRadioPublicationIE(SverigesRadioBaseIE):
IE_NAME = 'sverigesradio:publication'
_VALID_URL = r'https?://(?:www\.)?sverigesradio\.se/(?:sida/)?(?:artikel|gruppsida)(?:\.aspx\?.*?\bartikel=(?P<id>[0-9]+)|/(?P<slug>[\w-]+))'
_TESTS = [{
'url': 'https://sverigesradio.se/sida/artikel.aspx?programid=83&artikel=7038546',
'md5': '6a4917e1923fccb080e5a206a5afa542',
'info_dict': {
'id': '7038546',
'ext': 'm4a',
'duration': 132,
'series': 'Nyheter (Ekot)',
'title': 'Esa Teittinen: Sanningen har inte kommit fram',
'description': 'md5:daf7ce66a8f0a53d5465a5984d3839df',
'thumbnail': r're:^https?://.*\.jpg',
},
}, {
'url': 'https://sverigesradio.se/artikel/tysk-fotbollsfeber-bayern-munchens-10-ariga-segersvit-kan-brytas',
'md5': 'f8a914ad50f491bb74eed403ab4bfef6',
'info_dict': {
'id': '8360345',
'ext': 'm4a',
'title': 'Tysk fotbollsfeber när Bayern Münchens 10-åriga segersvit kan brytas',
'series': 'Radiosporten',
'description': 'md5:5254610e20ce527ecb3a6102a06dcc5f',
'duration': 72,
'thumbnail': r're:^https?://.*\.jpg',
},
}, {
'url': 'https://sverigesradio.se/sida/gruppsida.aspx?programid=3304&grupp=6247&artikel=7146887',
'only_matching': True,
}]
_AUDIO_TYPE = 'publication'
class SverigesRadioEpisodeIE(SverigesRadioBaseIE):
IE_NAME = 'sverigesradio:episode'
_VALID_URL = r'https?://(?:www\.)?sverigesradio\.se/(?:sida/)?avsnitt/(?:(?P<id>\d+)|(?P<slug>[\w-]+))(?:$|[#?])'
_TESTS = [{
'url': 'https://sverigesradio.se/avsnitt/1140922?programid=1300',
'md5': '20dc4d8db24228f846be390b0c59a07c',
'info_dict': {
'id': '1140922',
'ext': 'mp3',
'duration': 3307,
'series': 'Konflikt',
'title': 'Metoo och valen',
'description': 'md5:fcb5c1f667f00badcc702b196f10a27e',
'thumbnail': r're:^https?://.*\.jpg',
},
}, {
'url': 'https://sverigesradio.se/avsnitt/p4-live-med-first-aid-kit-scandinavium-mars-2023',
'md5': 'ce17fb82520a8033dbb846993d5589fe',
'info_dict': {
'id': '2160416',
'ext': 'm4a',
'title': 'P4 Live med First Aid Kit',
'description': 'md5:6d5b78eed3d2b65f6de04daa45e9285d',
'thumbnail': r're:^https?://.*\.jpg',
'series': 'P4 Live',
'duration': 5640,
},
}]
_AUDIO_TYPE = 'episode'
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mediaklikk.py | yt_dlp/extractor/mediaklikk.py | import urllib.parse
from .common import InfoExtractor
from ..utils import (
ExtractorError,
traverse_obj,
unified_strdate,
url_or_none,
)
class MediaKlikkIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?
(?:mediaklikk|m4sport|hirado)\.hu/.*?(?:videok?|cikk)/
(?:(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/)?
(?P<id>[^/#?_]+)'''
_TESTS = [{
# mediaklikk
'url': 'https://mediaklikk.hu/ajanlo/video/2025/08/04/heviz-dzsungel-a-viz-alatt-ajanlo-08-10/',
'info_dict': {
'id': '8573769',
'title': 'Hévíz - dzsungel a víz alatt – Ajánló (08.10.)',
'display_id': 'heviz-dzsungel-a-viz-alatt-ajanlo-08-10',
'ext': 'mp4',
'upload_date': '20250804',
'thumbnail': 'https://cdn.cms.mtv.hu/wp-content/uploads/sites/4/2025/08/vlcsnap-2025-08-04-13h48m24s336.jpg',
},
}, {
# mediaklikk - date in html
'url': 'https://mediaklikk.hu/video/hazajaro-bilo-hegyseg-verocei-barangolas-a-drava-menten/',
'info_dict': {
'id': '8482167',
'title': 'Hazajáró, Bilo-hegység - Verőcei barangolás a Dráva mentén',
'display_id': 'hazajaro-bilo-hegyseg-verocei-barangolas-a-drava-menten',
'ext': 'mp4',
'upload_date': '20250703',
'thumbnail': 'https://cdn.cms.mtv.hu/wp-content/uploads/sites/4/2025/07/2024-000307-M0010-01_3700_cover_01.jpg',
},
}, {
# m4sport
'url': 'https://m4sport.hu/video/2025/08/07/holnap-kezdodik-a-12-vilagjatekok/',
'info_dict': {
'id': '8581887',
'title': 'Holnap kezdődik a 12. Világjátékok',
'display_id': 'holnap-kezdodik-a-12-vilagjatekok',
'ext': 'mp4',
'upload_date': '20250807',
'thumbnail': 'https://cdn.cms.mtv.hu/wp-content/uploads/sites/4/2025/08/vlcsnap-2025-08-06-20h30m48s817.jpg',
},
}, {
# hirado
'url': 'https://hirado.hu/video/2025/08/09/idojaras-jelentes-2025-augusztus-9-2230',
'info_dict': {
'id': '8592033',
'title': 'Időjárás-jelentés, 2025. augusztus 9. 22:30',
'display_id': 'idojaras-jelentes-2025-augusztus-9-2230',
'ext': 'mp4',
'upload_date': '20250809',
'thumbnail': 'https://cdn.cms.mtv.hu/wp-content/uploads/sites/4/2025/08/Idojaras-jelentes-35-1.jpg',
},
}, {
# hirado - subcategory
'url': 'https://hirado.hu/belfold/video/2025/08/09/nyitott-porta-napok-2025/',
'info_dict': {
'id': '8590581',
'title': 'Nyitott Porta Napok 2025',
'display_id': 'nyitott-porta-napok-2025',
'ext': 'mp4',
'upload_date': '20250809',
'thumbnail': 'https://cdn.cms.mtv.hu/wp-content/uploads/sites/4/2025/08/vlcsnap-2025-08-09-10h35m01s887.jpg',
},
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
display_id = mobj.group('id')
webpage = self._download_webpage(url, display_id)
player_data = self._search_json(
r'loadPlayer\((?:\s*["\'][^"\']+["\']\s*,)?', webpage, 'player data', mobj)
video_id = str(player_data['contentId'])
title = player_data.get('title') or self._og_search_title(webpage, fatal=False) or \
self._html_search_regex(r'<h\d+\b[^>]+\bclass="article_title">([^<]+)<', webpage, 'title')
upload_date = unified_strdate(
'{}-{}-{}'.format(mobj.group('year'), mobj.group('month'), mobj.group('day')))
if not upload_date:
upload_date = unified_strdate(self._html_search_regex(
r'<p+\b[^>]+\bclass="article_date">([^<]+)<', webpage, 'upload date', default=None))
player_data['video'] = urllib.parse.unquote(player_data.pop('token'))
player_page = self._download_webpage(
'https://player.mediaklikk.hu/playernew/player.php', video_id,
query=player_data, headers={'Referer': url})
player_json = self._search_json(
r'\bpl\.setup\s*\(', player_page, 'player json', video_id, end_pattern=r'\);')
playlist_url = traverse_obj(
player_json, ('playlist', lambda _, v: v['type'] == 'hls', 'file', {url_or_none}), get_all=False)
if not playlist_url:
raise ExtractorError('Unable to extract playlist url')
formats, subtitles = self._extract_m3u8_formats_and_subtitles(playlist_url, video_id)
return {
'id': video_id,
'title': title,
'display_id': display_id,
'formats': formats,
'subtitles': subtitles,
'upload_date': upload_date,
'thumbnail': player_data.get('bgImage') or self._og_search_thumbnail(webpage),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/asobistage.py | yt_dlp/extractor/asobistage.py | import functools
from .common import InfoExtractor
from ..utils import str_or_none, url_or_none
from ..utils.traversal import traverse_obj
class AsobiStageIE(InfoExtractor):
IE_DESC = 'ASOBISTAGE (アソビステージ)'
_VALID_URL = r'https?://asobistage\.asobistore\.jp/event/(?P<id>(?P<event>\w+)/(?P<type>archive|player)/(?P<slug>\w+))(?:[?#]|$)'
_TESTS = [{
'url': 'https://asobistage.asobistore.jp/event/315passionhour_2022summer/archive/frame',
'info_dict': {
'id': '315passionhour_2022summer/archive/frame',
'title': '315プロダクションプレゼンツ 315パッションアワー!!!',
'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
},
'playlist_count': 1,
'playlist': [{
'info_dict': {
'id': 'edff52f2',
'ext': 'mp4',
'title': '315passion_FRAME_only',
'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
},
}],
}, {
'url': 'https://asobistage.asobistore.jp/event/idolmaster_idolworld2023_goods/archive/live',
'info_dict': {
'id': 'idolmaster_idolworld2023_goods/archive/live',
'title': 'md5:378510b6e830129d505885908bd6c576',
'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
},
'playlist_count': 1,
'playlist': [{
'info_dict': {
'id': '3aef7110',
'ext': 'mp4',
'title': 'asobistore_station_1020_serverREC',
'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
},
}],
}, {
'url': 'https://asobistage.asobistore.jp/event/sidem_fclive_bpct/archive/premium_hc',
'playlist_count': 4,
'info_dict': {
'id': 'sidem_fclive_bpct/archive/premium_hc',
'title': '315 Production presents F@NTASTIC COMBINATION LIVE ~BRAINPOWER!!~/~CONNECTIME!!!!~',
'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
},
}, {
'url': 'https://asobistage.asobistore.jp/event/ijigenfes_utagassen/player/day1',
'only_matching': True,
}]
_API_HOST = 'https://asobistage-api.asobistore.jp'
_HEADERS = {}
_is_logged_in = False
@functools.cached_property
def _owned_tickets(self):
owned_tickets = set()
if not self._is_logged_in:
return owned_tickets
for path, name in [
('api/v1/purchase_history/list', 'ticket purchase history'),
('api/v1/serialcode/list', 'redemption history'),
]:
response = self._download_json(
f'{self._API_HOST}/{path}', None, f'Downloading {name}',
f'Unable to download {name}', expected_status=400)
if traverse_obj(response, ('payload', 'error_message'), 'error') == 'notlogin':
self._is_logged_in = False
break
owned_tickets.update(
traverse_obj(response, ('payload', 'value', ..., 'digital_product_id', {str_or_none})))
return owned_tickets
def _get_available_channel_id(self, channel):
channel_id = traverse_obj(channel, ('chennel_vspf_id', {str}))
if not channel_id:
return None
# if rights_type_id == 6, then 'No conditions (no login required - non-members are OK)'
if traverse_obj(channel, ('viewrights', lambda _, v: v['rights_type_id'] == 6)):
return channel_id
available_tickets = traverse_obj(channel, (
'viewrights', ..., ('tickets', 'serialcodes'), ..., 'digital_product_id', {str_or_none}))
if not self._owned_tickets.intersection(available_tickets):
self.report_warning(
f'You are not a ticketholder for "{channel.get("channel_name") or channel_id}"')
return None
return channel_id
def _real_initialize(self):
if self._get_cookies(self._API_HOST):
self._is_logged_in = True
token = self._download_json(
f'{self._API_HOST}/api/v1/vspf/token', None, 'Getting token', 'Unable to get token')
self._HEADERS['Authorization'] = f'Bearer {token}'
def _real_extract(self, url):
webpage, urlh = self._download_webpage_handle(url, self._match_id(url))
video_id, event, type_, slug = self._match_valid_url(urlh.url).group('id', 'event', 'type', 'slug')
video_type = {'archive': 'archives', 'player': 'broadcasts'}[type_]
event_data = traverse_obj(
self._search_nextjs_data(webpage, video_id, default={}),
('props', 'pageProps', 'eventCMSData', {
'title': ('event_name', {str}),
'thumbnail': ('event_thumbnail_image', {url_or_none}),
}))
available_channels = traverse_obj(self._download_json(
f'https://asobistage.asobistore.jp/cdn/v101/events/{event}/{video_type}.json',
video_id, 'Getting channel list', 'Unable to get channel list'), (
video_type, lambda _, v: v['broadcast_slug'] == slug,
'channels', lambda _, v: v['chennel_vspf_id'] != '00000'))
entries = []
for channel_id in traverse_obj(available_channels, (..., {self._get_available_channel_id})):
if video_type == 'archives':
channel_json = self._download_json(
f'https://survapi.channel.or.jp/proxy/v1/contents/{channel_id}/get_by_cuid', channel_id,
'Getting archive channel info', 'Unable to get archive channel info', fatal=False,
headers=self._HEADERS)
channel_data = traverse_obj(channel_json, ('ex_content', {
'm3u8_url': 'streaming_url',
'title': 'title',
'thumbnail': ('thumbnail', 'url'),
}))
else: # video_type == 'broadcasts'
channel_json = self._download_json(
f'https://survapi.channel.or.jp/ex/events/{channel_id}', channel_id,
'Getting live channel info', 'Unable to get live channel info', fatal=False,
headers=self._HEADERS, query={'embed': 'channel'})
channel_data = traverse_obj(channel_json, ('data', {
'm3u8_url': ('Channel', 'Custom_live_url'),
'title': 'Name',
'thumbnail': 'Poster_url',
}))
entries.append({
'id': channel_id,
'title': channel_data.get('title'),
'formats': self._extract_m3u8_formats(channel_data.get('m3u8_url'), channel_id, fatal=False),
'is_live': video_type == 'broadcasts',
'thumbnail': url_or_none(channel_data.get('thumbnail')),
})
if not self._is_logged_in and not entries:
self.raise_login_required()
return self.playlist_result(entries, video_id, **event_data)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/chilloutzone.py | yt_dlp/extractor/chilloutzone.py | import base64
from .common import InfoExtractor
from ..utils import (
clean_html,
int_or_none,
traverse_obj,
)
class ChilloutzoneIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?chilloutzone\.net/video/(?P<id>[\w-]+)\.html'
_TESTS = [{
'url': 'https://www.chilloutzone.net/video/enemene-meck-alle-katzen-weg.html',
'md5': 'a76f3457e813ea0037e5244f509e66d1',
'info_dict': {
'id': 'enemene-meck-alle-katzen-weg',
'ext': 'mp4',
'title': 'Enemene Meck - Alle Katzen weg',
'description': 'Ist das der Umkehrschluss des Niesenden Panda-Babys?',
'duration': 24,
},
}, {
'note': 'Video hosted at YouTube',
'url': 'https://www.chilloutzone.net/video/eine-sekunde-bevor.html',
'info_dict': {
'id': '1YVQaAgHyRU',
'ext': 'mp4',
'title': '16 Photos Taken 1 Second Before Disaster',
'description': 'md5:58a8fcf6a459fe0a08f54140f0ad1814',
'uploader': 'BuzzFeedVideo',
'uploader_id': '@BuzzFeedVideo',
'upload_date': '20131105',
'availability': 'public',
'thumbnail': 'https://i.ytimg.com/vi/1YVQaAgHyRU/maxresdefault.jpg',
'tags': 'count:41',
'like_count': int,
'playable_in_embed': True,
'channel_url': 'https://www.youtube.com/channel/UCpko_-a4wgz2u_DgDgd9fqA',
'chapters': 'count:6',
'live_status': 'not_live',
'view_count': int,
'categories': ['Entertainment'],
'age_limit': 0,
'channel_id': 'UCpko_-a4wgz2u_DgDgd9fqA',
'duration': 100,
'uploader_url': 'http://www.youtube.com/@BuzzFeedVideo',
'channel_follower_count': int,
'channel': 'BuzzFeedVideo',
},
}, {
'url': 'https://www.chilloutzone.net/video/icon-blending.html',
'md5': '2f9d6850ec567b24f0f4fa143b9aa2f9',
'info_dict': {
'id': 'LLNkHpSjBfc',
'ext': 'mp4',
'title': 'The Sunday Times Making of Icons',
'description': 'md5:b9259fcf63a1669e42001e5db677f02a',
'uploader': 'MadFoxUA',
'uploader_id': '@MadFoxUA',
'upload_date': '20140204',
'channel_id': 'UCSZa9Y6-Vl7c11kWMcbAfCw',
'channel_url': 'https://www.youtube.com/channel/UCSZa9Y6-Vl7c11kWMcbAfCw',
'comment_count': int,
'uploader_url': 'http://www.youtube.com/@MadFoxUA',
'duration': 66,
'live_status': 'not_live',
'channel_follower_count': int,
'playable_in_embed': True,
'view_count': int,
'like_count': int,
'thumbnail': 'https://i.ytimg.com/vi/LLNkHpSjBfc/maxresdefault.jpg',
'categories': ['Comedy'],
'availability': 'public',
'tags': [],
'channel': 'MadFoxUA',
'age_limit': 0,
},
}, {
'url': 'https://www.chilloutzone.net/video/ordentlich-abgeschuettelt.html',
'info_dict': {
'id': 'ordentlich-abgeschuettelt',
'ext': 'mp4',
'title': 'Ordentlich abgeschüttelt',
'description': 'md5:d41541966b75d3d1e8ea77a94ea0d329',
'duration': 18,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
b64_data = self._html_search_regex(
r'var cozVidData\s*=\s*"([^"]+)"', webpage, 'video data')
info = self._parse_json(base64.b64decode(b64_data).decode(), video_id)
video_url = info.get('mediaUrl')
native_platform = info.get('nativePlatform')
if native_platform and info.get('sourcePriority') == 'native':
native_video_id = info['nativeVideoId']
if native_platform == 'youtube':
return self.url_result(native_video_id, 'Youtube')
elif native_platform == 'vimeo':
return self.url_result(f'https://vimeo.com/{native_video_id}', 'Vimeo')
elif not video_url:
# Possibly a standard youtube embed?
# TODO: Investigate if site still does this (there are no tests for it)
return self.url_result(url, 'Generic')
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
**traverse_obj(info, {
'title': 'title',
'description': ('description', {clean_html}),
'duration': ('videoLength', {int_or_none}),
'width': ('videoWidth', {int_or_none}),
'height': ('videoHeight', {int_or_none}),
}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ciscolive.py | yt_dlp/extractor/ciscolive.py | import itertools
from .common import InfoExtractor
from ..utils import (
clean_html,
float_or_none,
int_or_none,
parse_qs,
try_get,
urlencode_postdata,
)
class CiscoLiveBaseIE(InfoExtractor):
# These appear to be constant across all Cisco Live presentations
# and are not tied to any user session or event
RAINFOCUS_API_URL = 'https://events.rainfocus.com/api/%s'
RAINFOCUS_API_PROFILE_ID = 'Na3vqYdAlJFSxhYTYQGuMbpafMqftalz'
RAINFOCUS_WIDGET_ID = 'n6l4Lo05R8fiy3RpUBm447dZN8uNWoye'
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/5647924234001/SyK2FdqjM_default/index.html?videoId=%s'
HEADERS = {
'Origin': 'https://ciscolive.cisco.com',
'rfApiProfileId': RAINFOCUS_API_PROFILE_ID,
'rfWidgetId': RAINFOCUS_WIDGET_ID,
}
def _call_api(self, ep, rf_id, query, referrer, note=None):
headers = self.HEADERS.copy()
headers['Referer'] = referrer
return self._download_json(
self.RAINFOCUS_API_URL % ep, rf_id, note=note,
data=urlencode_postdata(query), headers=headers)
def _parse_rf_item(self, rf_item):
event_name = rf_item.get('eventName')
title = rf_item['title']
description = clean_html(rf_item.get('abstract'))
presenter_name = try_get(rf_item, lambda x: x['participants'][0]['fullName'])
bc_id = rf_item['videos'][0]['url']
bc_url = self.BRIGHTCOVE_URL_TEMPLATE % bc_id
duration = float_or_none(try_get(rf_item, lambda x: x['times'][0]['length']))
location = try_get(rf_item, lambda x: x['times'][0]['room'])
if duration:
duration = duration * 60
return {
'_type': 'url_transparent',
'url': bc_url,
'ie_key': 'BrightcoveNew',
'title': title,
'description': description,
'duration': duration,
'creator': presenter_name,
'location': location,
'series': event_name,
}
class CiscoLiveSessionIE(CiscoLiveBaseIE):
_VALID_URL = r'https?://(?:www\.)?ciscolive(?:\.cisco)?\.com/[^#]*#/session/(?P<id>[^/?&]+)'
_TESTS = [{
'url': 'https://ciscolive.cisco.com/on-demand-library/?#/session/1423353499155001FoSs',
'md5': 'c98acf395ed9c9f766941c70f5352e22',
'info_dict': {
'id': '5803694304001',
'ext': 'mp4',
'title': '13 Smart Automations to Monitor Your Cisco IOS Network',
'description': 'md5:ec4a436019e09a918dec17714803f7cc',
'timestamp': 1530305395,
'upload_date': '20180629',
'uploader_id': '5647924234001',
'location': '16B Mezz.',
},
}, {
'url': 'https://www.ciscolive.com/global/on-demand-library.html?search.event=ciscoliveemea2019#/session/15361595531500013WOU',
'only_matching': True,
}, {
'url': 'https://www.ciscolive.com/global/on-demand-library.html?#/session/1490051371645001kNaS',
'only_matching': True,
}]
def _real_extract(self, url):
rf_id = self._match_id(url)
rf_result = self._call_api('session', rf_id, {'id': rf_id}, url)
return self._parse_rf_item(rf_result['items'][0])
class CiscoLiveSearchIE(CiscoLiveBaseIE):
_VALID_URL = r'https?://(?:www\.)?ciscolive(?:\.cisco)?\.com/(?:global/)?on-demand-library(?:\.html|/)'
_TESTS = [{
'url': 'https://ciscolive.cisco.com/on-demand-library/?search.event=ciscoliveus2018&search.technicallevel=scpsSkillLevel_aintroductory&search.focus=scpsSessionFocus_designAndDeployment#/',
'info_dict': {
'title': 'Search query',
},
'playlist_count': 5,
}, {
'url': 'https://ciscolive.cisco.com/on-demand-library/?search.technology=scpsTechnology_applicationDevelopment&search.technology=scpsTechnology_ipv6&search.focus=scpsSessionFocus_troubleshootingTroubleshooting#/',
'only_matching': True,
}, {
'url': 'https://www.ciscolive.com/global/on-demand-library.html?search.technicallevel=scpsSkillLevel_aintroductory&search.event=ciscoliveemea2019&search.technology=scpsTechnology_dataCenter&search.focus=scpsSessionFocus_bestPractices#/',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if CiscoLiveSessionIE.suitable(url) else super().suitable(url)
@staticmethod
def _check_bc_id_exists(rf_item):
return int_or_none(try_get(rf_item, lambda x: x['videos'][0]['url'])) is not None
def _entries(self, query, url):
query['size'] = 50
query['from'] = 0
for page_num in itertools.count(1):
results = self._call_api(
'search', None, query, url,
f'Downloading search JSON page {page_num}')
sl = try_get(results, lambda x: x['sectionList'][0], dict)
if sl:
results = sl
items = results.get('items')
if not items or not isinstance(items, list):
break
for item in items:
if not isinstance(item, dict):
continue
if not self._check_bc_id_exists(item):
continue
yield self._parse_rf_item(item)
size = int_or_none(results.get('size'))
if size is not None:
query['size'] = size
total = int_or_none(results.get('total'))
if total is not None and query['from'] + query['size'] > total:
break
query['from'] += query['size']
def _real_extract(self, url):
query = parse_qs(url)
query['type'] = 'session'
return self.playlist_result(
self._entries(query, url), playlist_title='Search query')
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/graspop.py | yt_dlp/extractor/graspop.py | from .common import InfoExtractor
from ..utils import update_url, url_or_none
from ..utils.traversal import traverse_obj
class GraspopIE(InfoExtractor):
_VALID_URL = r'https?://vod\.graspop\.be/[a-z]{2}/(?P<id>\d+)/'
_TESTS = [{
'url': 'https://vod.graspop.be/fr/101556/thy-art-is-murder-concert/',
'info_dict': {
'id': '101556',
'ext': 'mp4',
'title': 'Thy Art Is Murder',
'thumbnail': r're:https://cdn-mds\.pickx\.be/festivals/v3/global/original/.+\.jpg',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
metadata = self._download_json(
f'https://tv.proximus.be/MWC/videocenter/festivals/{video_id}/stream', video_id)
return {
'id': video_id,
'formats': self._extract_m3u8_formats(
# Downgrade manifest request to avoid incomplete certificate chain error
update_url(metadata['source']['assetUri'], scheme='http'), video_id, 'mp4'),
**traverse_obj(metadata, {
'title': ('name', {str}),
'thumbnail': ('source', 'poster', {url_or_none}),
}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/medici.py | yt_dlp/extractor/medici.py | import urllib.parse
from .common import InfoExtractor
from ..utils import (
filter_dict,
parse_iso8601,
traverse_obj,
try_call,
url_or_none,
)
class MediciIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?P<sub>www|edu)\.)?medici\.tv/[a-z]{2}/[\w.-]+/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.medici.tv/en/operas/thomas-ades-the-exterminating-angel-calixto-bieito-opera-bastille-paris',
'md5': 'd483f74e7a7a9eac0dbe152ab189050d',
'info_dict': {
'id': '8032',
'ext': 'mp4',
'title': 'Thomas Adès\'s The Exterminating Angel',
'description': 'md5:708ae6350dadc604225b4a6e32482bab',
'thumbnail': r're:https://.+/.+\.jpg',
'upload_date': '20240304',
'timestamp': 1709561766,
'display_id': 'thomas-ades-the-exterminating-angel-calixto-bieito-opera-bastille-paris',
},
'expected_warnings': [r'preview'],
}, {
'url': 'https://edu.medici.tv/en/operas/wagner-lohengrin-paris-opera-kirill-serebrennikov-piotr-beczala-kwangchul-youn-johanni-van-oostrum',
'md5': '4ef3f4079a6e1c617584463a9eb84f99',
'info_dict': {
'id': '7900',
'ext': 'mp4',
'title': 'Wagner\'s Lohengrin',
'description': 'md5:a384a62937866101f86902f21752cd89',
'thumbnail': r're:https://.+/.+\.jpg',
'upload_date': '20231017',
'timestamp': 1697554771,
'display_id': 'wagner-lohengrin-paris-opera-kirill-serebrennikov-piotr-beczala-kwangchul-youn-johanni-van-oostrum',
},
'expected_warnings': [r'preview'],
}, {
'url': 'https://www.medici.tv/en/concerts/sergey-smbatyan-conducts-mansurian-chouchane-siranossian-mario-brunello',
'md5': '9dd757e53b22b2511e85ea9ea60e4815',
'info_dict': {
'id': '5712',
'ext': 'mp4',
'title': 'Sergey Smbatyan conducts Tigran Mansurian — With Chouchane Siranossian and Mario Brunello',
'thumbnail': r're:https://.+/.+\.jpg',
'description': 'md5:9411fe44c874bb10e9af288c65816e41',
'upload_date': '20200323',
'timestamp': 1584975600,
'display_id': 'sergey-smbatyan-conducts-mansurian-chouchane-siranossian-mario-brunello',
},
'expected_warnings': [r'preview'],
}, {
'url': 'https://www.medici.tv/en/ballets/carmen-ballet-choregraphie-de-jiri-bubenicek-teatro-dellopera-di-roma',
'md5': '40f5e76cb701a97a6d7ba23b62c49990',
'info_dict': {
'id': '7857',
'ext': 'mp4',
'title': 'Carmen by Jiří Bubeníček after Roland Petit, music by Bizet, de Falla, Castelnuovo-Tedesco, and Bonolis',
'thumbnail': r're:https://.+/.+\.jpg',
'description': 'md5:0f15a15611ed748020c769873e10a8bb',
'upload_date': '20240223',
'timestamp': 1708707600,
'display_id': 'carmen-ballet-choregraphie-de-jiri-bubenicek-teatro-dellopera-di-roma',
},
'expected_warnings': [r'preview'],
}, {
'url': 'https://www.medici.tv/en/documentaries/la-sonnambula-liege-2023-documentaire',
'md5': '87ff198018ce79a34757ab0dd6f21080',
'info_dict': {
'id': '7513',
'ext': 'mp4',
'title': 'La Sonnambula',
'thumbnail': r're:https://.+/.+\.jpg',
'description': 'md5:0caf9109a860fd50cd018df062a67f34',
'upload_date': '20231103',
'timestamp': 1699010830,
'display_id': 'la-sonnambula-liege-2023-documentaire',
},
'expected_warnings': [r'preview'],
}, {
'url': 'https://edu.medici.tv/en/masterclasses/yvonne-loriod-olivier-messiaen',
'md5': 'fb5dcec46d76ad20fbdbaabb01da191d',
'info_dict': {
'id': '3024',
'ext': 'mp4',
'title': 'Olivier Messiaen and Yvonne Loriod, pianists and teachers',
'thumbnail': r're:https://.+/.+\.jpg',
'description': 'md5:aab948e2f7690214b5c28896c83f1fc1',
'upload_date': '20150223',
'timestamp': 1424706608,
'display_id': 'yvonne-loriod-olivier-messiaen',
},
'skip': 'Requires authentication; preview starts in the middle',
}, {
'url': 'https://www.medici.tv/en/jazz/makaya-mccraven-la-rochelle',
'md5': '4cc279a8b06609782747c8f50beea2b3',
'info_dict': {
'id': '7922',
'ext': 'mp4',
'title': 'NEW: Makaya McCraven in La Rochelle',
'thumbnail': r're:https://.+/.+\.jpg',
'description': 'md5:b5a8aaeb6993d8ccb18bde8abb8aa8d2',
'upload_date': '20231228',
'timestamp': 1703754863,
'display_id': 'makaya-mccraven-la-rochelle',
},
'expected_warnings': [r'preview'],
}]
def _real_extract(self, url):
display_id, subdomain = self._match_valid_url(url).group('id', 'sub')
self._request_webpage(url, display_id, 'Requesting CSRF token cookie')
subdomain = 'edu-' if subdomain == 'edu' else ''
origin = f'https://{urllib.parse.urlparse(url).hostname}'
data = self._download_json(
f'https://api.medici.tv/{subdomain}satie/edito/movie-file/{display_id}/', display_id,
headers=filter_dict({
'Authorization': try_call(
lambda: urllib.parse.unquote(self._get_cookies(url)['auth._token.mAuth'].value)),
'Device-Type': 'web',
'Origin': origin,
'Referer': f'{origin}/',
'Accept': 'application/json, text/plain, */*',
}))
if not traverse_obj(data, ('video', 'is_full_video')) and traverse_obj(
data, ('video', 'is_limited_by_user_access')):
self.report_warning(
'The full video is for subscribers only. Only previews will be downloaded. If you '
'have used the --cookies-from-browser option, try using the --cookies option instead')
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
data['video']['video_url'], display_id, 'mp4')
return {
'id': str(data['id']),
'display_id': display_id,
'formats': formats,
'subtitles': subtitles,
**traverse_obj(data, {
'title': ('title', {str}),
'description': ('subtitle', {str}),
'thumbnail': ('picture', {url_or_none}),
'timestamp': ('date_publish', {parse_iso8601}),
}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.