repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/banbye.py | yt_dlp/extractor/banbye.py | import math
import urllib.parse
from .common import InfoExtractor
from ..utils import (
InAdvancePagedList,
determine_ext,
format_field,
int_or_none,
join_nonempty,
traverse_obj,
unified_timestamp,
url_or_none,
)
class BanByeBaseIE(InfoExtractor):
_API_BASE = 'https://api.banbye.com'
_CDN_BASE = 'https://cdn.banbye.com'
_VIDEO_BASE = 'https://banbye.com/watch'
@staticmethod
def _extract_playlist_id(url, param='playlist'):
return urllib.parse.parse_qs(
urllib.parse.urlparse(url).query).get(param, [None])[0]
def _extract_playlist(self, playlist_id):
data = self._download_json(f'{self._API_BASE}/playlists/{playlist_id}', playlist_id)
return self.playlist_result([
self.url_result(f'{self._VIDEO_BASE}/{video_id}', BanByeIE)
for video_id in data['videoIds']], playlist_id, data.get('name'))
class BanByeIE(BanByeBaseIE):
_VALID_URL = r'https?://(?:www\.)?banbye\.com/(?:en/)?watch/(?P<id>[\w-]+)'
_TESTS = [{
# ['src']['mp4']['levels'] direct mp4 urls only
'url': 'https://banbye.com/watch/v_ytfmvkVYLE8T',
'md5': '2f4ea15c5ca259a73d909b2cfd558eb5',
'info_dict': {
'id': 'v_ytfmvkVYLE8T',
'ext': 'mp4',
'title': 'md5:5ec098f88a0d796f987648de6322ba0f',
'description': 'md5:4d94836e73396bc18ef1fa0f43e5a63a',
'uploader': 'wRealu24',
'channel_id': 'ch_wrealu24',
'channel_url': 'https://banbye.com/channel/ch_wrealu24',
'timestamp': 1647604800,
'upload_date': '20220318',
'duration': 1931,
'thumbnail': r're:https?://.*\.webp',
'tags': 'count:5',
'like_count': int,
'dislike_count': int,
'view_count': int,
'comment_count': int,
},
}, {
'url': 'https://banbye.com/watch/v_2JjQtqjKUE_F?playlistId=p_Ld82N6gBw_OJ',
'info_dict': {
'title': 'Krzysztof Karoń',
'id': 'p_Ld82N6gBw_OJ',
},
'playlist_mincount': 9,
}, {
# ['src']['mp4']['levels'] direct mp4 urls only
'url': 'https://banbye.com/watch/v_kb6_o1Kyq-CD',
'info_dict': {
'id': 'v_kb6_o1Kyq-CD',
'ext': 'mp4',
'title': 'Co tak naprawdę dzieje się we Francji?! Czy Warszawa a potem cała Polska będzie drugim Paryżem?!🤔🇵🇱',
'description': 'md5:82be4c0e13eae8ea1ca8b9f2e07226a8',
'uploader': 'Marcin Rola - MOIM ZDANIEM!🇵🇱',
'channel_id': 'ch_QgWnHvDG2fo5',
'channel_url': 'https://banbye.com/channel/ch_QgWnHvDG2fo5',
'duration': 597,
'timestamp': 1688642656,
'upload_date': '20230706',
'thumbnail': 'https://cdn.banbye.com/video/v_kb6_o1Kyq-CD/96.webp',
'tags': ['Paryż', 'Francja', 'Polska', 'Imigranci', 'Morawiecki', 'Tusk'],
'like_count': int,
'dislike_count': int,
'view_count': int,
'comment_count': int,
},
}, {
# ['src']['hls']['levels'] variant m3u8 urls only; master m3u8 is 404
'url': 'https://banbye.com/watch/v_a_gPFuC9LoW5',
'info_dict': {
'id': 'v_a_gPFuC9LoW5',
'ext': 'mp4',
'title': 'md5:183524056bebdfa245fd6d214f63c0fe',
'description': 'md5:943ac87287ca98d28d8b8797719827c6',
'uploader': 'wRealu24',
'channel_id': 'ch_wrealu24',
'channel_url': 'https://banbye.com/channel/ch_wrealu24',
'upload_date': '20231113',
'timestamp': 1699874062,
'view_count': int,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'thumbnail': 'https://cdn.banbye.com/video/v_a_gPFuC9LoW5/96.webp',
'tags': ['jaszczur', 'sejm', 'lewica', 'polska', 'ukrainizacja', 'pierwszeposiedzeniesejmu'],
},
'expected_warnings': ['Failed to download m3u8'],
}, {
# ['src']['hls']['masterPlaylist'] m3u8 only
'url': 'https://banbye.com/watch/v_B0rsKWsr-aaa',
'info_dict': {
'id': 'v_B0rsKWsr-aaa',
'ext': 'mp4',
'title': 'md5:00b254164b82101b3f9e5326037447ed',
'description': 'md5:3fd8b48aa81954ba024bc60f5de6e167',
'uploader': 'PSTV Piotr Szlachtowicz ',
'channel_id': 'ch_KV9EVObkB9wB',
'channel_url': 'https://banbye.com/channel/ch_KV9EVObkB9wB',
'upload_date': '20240629',
'timestamp': 1719646816,
'duration': 2377,
'view_count': int,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'thumbnail': 'https://cdn.banbye.com/video/v_B0rsKWsr-aaa/96.webp',
'tags': ['Biden', 'Trump', 'Wybory', 'USA'],
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
playlist_id = self._extract_playlist_id(url, 'playlistId')
if self._yes_playlist(playlist_id, video_id):
return self._extract_playlist(playlist_id)
data = self._download_json(f'{self._API_BASE}/videos/{video_id}', video_id)
thumbnails = [{
'id': f'{quality}p',
'url': f'{self._CDN_BASE}/video/{video_id}/{quality}.webp',
} for quality in [48, 96, 144, 240, 512, 1080]]
formats = []
url_data = self._download_json(f'{self._API_BASE}/videos/{video_id}/url', video_id, data=b'')
if master_url := traverse_obj(url_data, ('src', 'hls', 'masterPlaylist', {url_or_none})):
formats = self._extract_m3u8_formats(master_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
for format_id, format_url in traverse_obj(url_data, (
'src', ('mp4', 'hls'), 'levels', {dict.items}, lambda _, v: url_or_none(v[1]))):
ext = determine_ext(format_url)
is_hls = ext == 'm3u8'
formats.append({
'url': format_url,
'ext': 'mp4' if is_hls else ext,
'format_id': join_nonempty(is_hls and 'hls', format_id),
'protocol': 'm3u8_native' if is_hls else 'https',
'height': int_or_none(format_id),
})
self._remove_duplicate_formats(formats)
return {
'id': video_id,
'title': data.get('title'),
'description': data.get('desc'),
'uploader': traverse_obj(data, ('channel', 'name')),
'channel_id': data.get('channelId'),
'channel_url': format_field(data, 'channelId', 'https://banbye.com/channel/%s'),
'timestamp': unified_timestamp(data.get('publishedAt')),
'duration': data.get('duration'),
'tags': data.get('tags'),
'formats': formats,
'thumbnails': thumbnails,
'like_count': data.get('likes'),
'dislike_count': data.get('dislikes'),
'view_count': data.get('views'),
'comment_count': data.get('commentCount'),
}
class BanByeChannelIE(BanByeBaseIE):
_VALID_URL = r'https?://(?:www\.)?banbye\.com/(?:en/)?channel/(?P<id>\w+)'
_TESTS = [{
'url': 'https://banbye.com/channel/ch_wrealu24',
'info_dict': {
'title': 'wRealu24',
'id': 'ch_wrealu24',
'description': 'md5:da54e48416b74dfdde20a04867c0c2f6',
},
'playlist_mincount': 791,
}, {
'url': 'https://banbye.com/channel/ch_wrealu24?playlist=p_Ld82N6gBw_OJ',
'info_dict': {
'title': 'Krzysztof Karoń',
'id': 'p_Ld82N6gBw_OJ',
},
'playlist_count': 9,
}]
_PAGE_SIZE = 100
def _real_extract(self, url):
channel_id = self._match_id(url)
playlist_id = self._extract_playlist_id(url)
if playlist_id:
return self._extract_playlist(playlist_id)
def page_func(page_num):
data = self._download_json(f'{self._API_BASE}/videos', channel_id, query={
'channelId': channel_id,
'sort': 'new',
'limit': self._PAGE_SIZE,
'offset': page_num * self._PAGE_SIZE,
}, note=f'Downloading page {page_num + 1}')
return [
self.url_result(f"{self._VIDEO_BASE}/{video['_id']}", BanByeIE)
for video in data['items']
]
channel_data = self._download_json(f'{self._API_BASE}/channels/{channel_id}', channel_id)
entries = InAdvancePagedList(
page_func,
math.ceil(channel_data['videoCount'] / self._PAGE_SIZE),
self._PAGE_SIZE)
return self.playlist_result(
entries, channel_id, channel_data.get('name'), channel_data.get('description'))
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rtp.py | yt_dlp/extractor/rtp.py | import base64
import json
import re
import urllib.parse
from .common import InfoExtractor, Request
from ..utils import (
determine_ext,
int_or_none,
js_to_json,
parse_duration,
parse_iso8601,
url_or_none,
)
from ..utils.traversal import traverse_obj
class RTPIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rtp\.pt/play/(?:[^/#?]+/)?p(?P<program_id>\d+)/(?P<id>e\d+)'
_TESTS = [{
'url': 'http://www.rtp.pt/play/p405/e174042/paixoes-cruzadas',
'md5': 'e736ce0c665e459ddb818546220b4ef8',
'info_dict': {
'id': 'e174042',
'ext': 'mp3',
'title': 'Paixões Cruzadas',
'description': 'md5:af979e58ba0ab73f78435fc943fdb070',
'thumbnail': r're:^https?://.*\.jpg',
'series': 'Paixões Cruzadas',
'duration': 2950.0,
'modified_timestamp': 1553693464,
'modified_date': '20190327',
'timestamp': 1417219200,
'upload_date': '20141129',
},
}, {
'url': 'https://www.rtp.pt/play/zigzag/p13166/e757904/25-curiosidades-25-de-abril',
'md5': '5b4859940e3adef61247a77dfb76046a',
'info_dict': {
'id': 'e757904',
'ext': 'mp4',
'title': 'Estudar ou não estudar',
'description': 'md5:3bfd7eb8bebfd5711a08df69c9c14c35',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1711958401,
'duration': 146.0,
'upload_date': '20240401',
'modified_timestamp': 1712242991,
'series': '25 Curiosidades, 25 de Abril',
'episode_number': 2,
'episode': 'Estudar ou não estudar',
'modified_date': '20240404',
},
}, {
# Episode not accessible through API
'url': 'https://www.rtp.pt/play/estudoemcasa/p7776/e500050/portugues-1-ano',
'md5': '57660c0b46db9f22118c52cbd65975e4',
'info_dict': {
'id': 'e500050',
'ext': 'mp4',
'title': 'Português - 1.º ano',
'duration': 1669.0,
'description': 'md5:be68925c81269f8c6886589f25fe83ea',
'upload_date': '20201020',
'timestamp': 1603180799,
'thumbnail': 'https://cdn-images.rtp.pt/EPG/imagens/39482_59449_64850.png?v=3&w=860',
},
}]
_USER_AGENT = 'rtpplay/2.0.66 (pt.rtp.rtpplay; build:2066; iOS 15.8.3) Alamofire/5.9.1'
_AUTH_TOKEN = None
def _fetch_auth_token(self):
if self._AUTH_TOKEN:
return self._AUTH_TOKEN
self._AUTH_TOKEN = traverse_obj(self._download_json(Request(
'https://rtpplayapi.rtp.pt/play/api/2/token-manager',
headers={
'Accept': '*/*',
'rtp-play-auth': 'RTPPLAY_MOBILE_IOS',
'rtp-play-auth-hash': 'fac9c328b2f27e26e03d7f8942d66c05b3e59371e16c2a079f5c83cc801bd3ee',
'rtp-play-auth-timestamp': '2145973229682',
'User-Agent': self._USER_AGENT,
}, extensions={'keep_header_casing': True}), None,
note='Fetching guest auth token', errnote='Could not fetch guest auth token',
fatal=False), ('token', 'token', {str}))
return self._AUTH_TOKEN
@staticmethod
def _cleanup_media_url(url):
if urllib.parse.urlparse(url).netloc == 'streaming-ondemand.rtp.pt':
return None
return url.replace('/drm-fps/', '/hls/').replace('/drm-dash/', '/dash/')
def _extract_formats(self, media_urls, episode_id):
formats = []
subtitles = {}
for media_url in set(traverse_obj(media_urls, (..., {url_or_none}, {self._cleanup_media_url}))):
ext = determine_ext(media_url)
if ext == 'm3u8':
fmts, subs = self._extract_m3u8_formats_and_subtitles(
media_url, episode_id, m3u8_id='hls', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
elif ext == 'mpd':
fmts, subs = self._extract_mpd_formats_and_subtitles(
media_url, episode_id, mpd_id='dash', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
else:
formats.append({
'url': media_url,
'format_id': 'http',
})
return formats, subtitles
def _extract_from_api(self, program_id, episode_id):
auth_token = self._fetch_auth_token()
if not auth_token:
return
episode_data = traverse_obj(self._download_json(
f'https://www.rtp.pt/play/api/1/get-episode/{program_id}/{episode_id[1:]}', episode_id,
query={'include_assets': 'true', 'include_webparams': 'true'},
headers={
'Accept': '*/*',
'Authorization': f'Bearer {auth_token}',
'User-Agent': self._USER_AGENT,
}, fatal=False), 'result', {dict})
if not episode_data:
return
asset_urls = traverse_obj(episode_data, ('assets', 0, 'asset_url', {dict}))
media_urls = traverse_obj(asset_urls, (
((('hls', 'dash'), 'stream_url'), ('multibitrate', ('url_hls', 'url_dash'))),))
formats, subtitles = self._extract_formats(media_urls, episode_id)
for sub_data in traverse_obj(asset_urls, ('subtitles', 'vtt_list', lambda _, v: url_or_none(v['file']))):
subtitles.setdefault(sub_data.get('code') or 'pt', []).append({
'url': sub_data['file'],
'name': sub_data.get('language'),
})
return {
'id': episode_id,
'formats': formats,
'subtitles': subtitles,
'thumbnail': traverse_obj(episode_data, ('assets', 0, 'asset_thumbnail', {url_or_none})),
**traverse_obj(episode_data, ('episode', {
'title': (('episode_title', 'program_title'), {str}, filter, any),
'alt_title': ('episode_subtitle', {str}, filter),
'description': (('episode_description', 'episode_summary'), {str}, filter, any),
'timestamp': ('episode_air_date', {parse_iso8601(delimiter=' ')}),
'modified_timestamp': ('episode_lastchanged', {parse_iso8601(delimiter=' ')}),
'duration': ('episode_duration_complete', {parse_duration}),
'episode': ('episode_title', {str}, filter),
'episode_number': ('episode_number', {int_or_none}),
'season': ('program_season', {str}, filter),
'series': ('program_title', {str}, filter),
})),
}
_RX_OBFUSCATION = re.compile(r'''(?xs)
atob\s*\(\s*decodeURIComponent\s*\(\s*
(\[[0-9A-Za-z%,'"]*\])
\s*\.\s*join\(\s*(?:""|'')\s*\)\s*\)\s*\)
''')
def __unobfuscate(self, data):
return self._RX_OBFUSCATION.sub(
lambda m: json.dumps(
base64.b64decode(urllib.parse.unquote(
''.join(json.loads(m.group(1))),
)).decode('iso-8859-1')),
data)
def _extract_from_html(self, url, episode_id):
webpage = self._download_webpage(url, episode_id)
formats = []
subtitles = {}
media_urls = traverse_obj(re.findall(r'(?:var\s+f\s*=|RTPPlayer\({[^}]+file:)\s*({[^}]+}|"[^"]+")', webpage), (
-1, (({self.__unobfuscate}, {js_to_json}, {json.loads}, {dict.values}, ...), {json.loads})))
formats, subtitles = self._extract_formats(media_urls, episode_id)
return {
'id': episode_id,
'formats': formats,
'subtitles': subtitles,
'description': self._html_search_meta(['og:description', 'twitter:description'], webpage, default=None),
'thumbnail': self._html_search_meta(['og:image', 'twitter:image'], webpage, default=None),
**self._search_json_ld(webpage, episode_id, default={}),
'title': self._html_search_meta(['og:title', 'twitter:title'], webpage, default=None),
}
def _real_extract(self, url):
program_id, episode_id = self._match_valid_url(url).group('program_id', 'id')
return self._extract_from_api(program_id, episode_id) or self._extract_from_html(url, episode_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/wsj.py | yt_dlp/extractor/wsj.py | from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
join_nonempty,
unified_strdate,
)
class WSJIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://video-api\.wsj\.com/api-video/player/iframe\.html\?.*?\bguid=|
https?://(?:www\.)?(?:wsj|barrons)\.com/video/(?:[^/]+/)+|
wsj:
)
(?P<id>[a-fA-F0-9-]{36})
'''
IE_DESC = 'Wall Street Journal'
_TESTS = [{
'url': 'http://video-api.wsj.com/api-video/player/iframe.html?guid=1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'md5': 'e230a5bb249075e40793b655a54a02e4',
'info_dict': {
'id': '1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'ext': 'mp4',
'upload_date': '20150202',
'uploader_id': 'jdesai',
'creator': 'jdesai',
'categories': list, # a long list
'duration': 90,
'title': 'Bills Coach Rex Ryan Updates His Old Jets Tattoo',
},
}, {
'url': 'http://www.wsj.com/video/can-alphabet-build-a-smarter-city/359DDAA8-9AC1-489C-82E6-0429C1E430E0.html',
'only_matching': True,
}, {
'url': 'http://www.barrons.com/video/capitalism-deserves-more-respect-from-millennials/F301217E-6F46-43AE-B8D2-B7180D642EE9.html',
'only_matching': True,
}, {
'url': 'https://www.wsj.com/video/series/a-brief-history-of/the-modern-cell-carrier-how-we-got-here/980E2187-401D-48A1-B82B-1486CEE06CB9',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._download_json(
'http://video-api.wsj.com/api-video/find_all_videos.asp', video_id,
query={
'type': 'guid',
'count': 1,
'query': video_id,
'fields': ','.join((
'type', 'hls', 'videoMP4List', 'thumbnailList', 'author',
'description', 'name', 'duration', 'videoURL', 'titletag',
'formattedCreationDate', 'keywords', 'editor')),
})['items'][0]
title = info.get('name', info.get('titletag'))
formats = []
f4m_url = info.get('videoURL')
if f4m_url:
formats.extend(self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False))
m3u8_url = info.get('hls')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
info['hls'], video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
for v in info.get('videoMP4List', []):
mp4_url = v.get('url')
if not mp4_url:
continue
tbr = int_or_none(v.get('bitrate'))
formats.append({
'url': mp4_url,
'format_id': join_nonempty('http', tbr),
'tbr': tbr,
'width': int_or_none(v.get('width')),
'height': int_or_none(v.get('height')),
'fps': float_or_none(v.get('fps')),
})
return {
'id': video_id,
'formats': formats,
# Thumbnails are conveniently in the correct format already
'thumbnails': info.get('thumbnailList'),
'creator': info.get('author'),
'uploader_id': info.get('editor'),
'duration': int_or_none(info.get('duration')),
'upload_date': unified_strdate(info.get(
'formattedCreationDate'), day_first=False),
'title': title,
'categories': info.get('keywords'),
}
class WSJArticleIE(InfoExtractor):
_VALID_URL = r'(?i)https?://(?:www\.)?wsj\.com/(?:articles|opinion)/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.wsj.com/articles/dont-like-china-no-pandas-for-you-1490366939?',
'info_dict': {
'id': '4B13FA62-1D8C-45DB-8EA1-4105CB20B362',
'ext': 'mp4',
'upload_date': '20170221',
'uploader_id': 'ralcaraz',
'title': 'Bao Bao the Panda Leaves for China',
},
}, {
'url': 'https://www.wsj.com/opinion/hamas-hostages-caskets-bibas-family-israel-gaza-29da083b',
'info_dict': {
'id': 'CE68D629-8DB8-4CD3-B30A-92112C102054',
'ext': 'mp4',
'upload_date': '20241007',
'uploader_id': 'Tinnes, David',
'title': 'WSJ Opinion: "Get the Jew": The Crown Heights Riot Revisited',
},
}]
def _real_extract(self, url):
article_id = self._match_id(url)
webpage = self._download_webpage(url, article_id, impersonate=True)
video_id = self._search_regex(
r'(?:id=["\']video|video-|iframe\.html\?guid=|data-src=["\'])([a-fA-F0-9-]{36})',
webpage, 'video id')
return self.url_result(f'wsj:{video_id}', WSJIE.ie_key(), video_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/archiveorg.py | yt_dlp/extractor/archiveorg.py | from __future__ import annotations
import json
import re
import urllib.parse
from .common import InfoExtractor
from .youtube import YoutubeBaseInfoExtractor
from ..utils import (
KNOWN_EXTENSIONS,
bug_reports_message,
clean_html,
dict_get,
extract_attributes,
get_element_by_id,
get_element_text_and_html_by_tag,
int_or_none,
join_nonempty,
js_to_json,
merge_dicts,
orderedSet,
parse_duration,
parse_qs,
str_or_none,
traverse_obj,
unified_strdate,
unified_timestamp,
url_or_none,
)
class ArchiveOrgIE(InfoExtractor):
IE_NAME = 'archive.org'
IE_DESC = 'archive.org video and audio'
_VALID_URL = r'https?://(?:www\.)?archive\.org/(?:details|embed)/(?P<id>[^?#]+)(?:[?].*)?$'
_TESTS = [{
'url': 'http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
'md5': '8af1d4cf447933ed3c7f4871162602db',
'info_dict': {
'id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect',
'ext': 'ogv',
'title': '1968 Demo - FJCC Conference Presentation Reel #1',
'description': 'md5:da45c349df039f1cc8075268eb1b5c25',
'release_date': '19681210',
'timestamp': 1268695290,
'upload_date': '20100315',
'creators': ['SRI International'],
'uploader': 'laura@archive.org',
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
'display_id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.cdr',
'track': 'XD300-23 68HighlightsAResearchCntAugHumanIntellect',
},
}, {
'url': 'https://archive.org/details/Cops1922',
'md5': '0869000b4ce265e8ca62738b336b268a',
'info_dict': {
'id': 'Cops1922',
'ext': 'mp4',
'title': 'Buster Keaton\'s "Cops" (1922)',
'description': 'md5:cd6f9910c35aedd5fc237dbc3957e2ca',
'uploader': 'yorkmba99@hotmail.com',
'timestamp': 1387699629,
'upload_date': '20131222',
'display_id': 'Cops-v2.mp4',
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
'duration': 1091.96,
'track': 'Cops-v2',
},
}, {
'url': 'http://archive.org/embed/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
'only_matching': True,
}, {
'url': 'https://archive.org/details/Election_Ads',
'md5': 'eec5cddebd4793c6a653b69c3b11f2e6',
'info_dict': {
'id': 'Election_Ads/Commercial-JFK1960ElectionAdCampaignJingle.mpg',
'title': 'Commercial-JFK1960ElectionAdCampaignJingle.mpg',
'ext': 'mpg',
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
'duration': 59.77,
'display_id': 'Commercial-JFK1960ElectionAdCampaignJingle.mpg',
'track': 'Commercial-JFK1960ElectionAdCampaignJingle',
},
}, {
'url': 'https://archive.org/details/Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg',
'md5': 'ea1eed8234e7d4165f38c8c769edef38',
'info_dict': {
'id': 'Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg',
'title': 'Commercial-Nixon1960ElectionAdToughonDefense.mpg',
'ext': 'mpg',
'timestamp': 1205588045,
'uploader': 'mikedavisstripmaster@yahoo.com',
'description': '1960 Presidential Campaign Election Commercials John F Kennedy, Richard M Nixon',
'upload_date': '20080315',
'display_id': 'Commercial-Nixon1960ElectionAdToughonDefense.mpg',
'duration': 59.51,
'license': 'http://creativecommons.org/licenses/publicdomain/',
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
'track': 'Commercial-Nixon1960ElectionAdToughonDefense',
},
}, {
'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16',
'md5': '7d07ffb42aba6537c28e053efa4b54c9',
'info_dict': {
'id': 'gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t01.flac',
'title': 'Turning',
'ext': 'flac',
'track': 'Turning',
'creators': ['Grateful Dead'],
'display_id': 'gd1977-05-08d01t01.flac',
'track_number': 1,
'album': '1977-05-08 - Barton Hall - Cornell University',
'duration': 39.8,
},
}, {
'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t07.flac',
'md5': 'a07cd8c6ab4ee1560f8a0021717130f3',
'info_dict': {
'id': 'gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t07.flac',
'title': 'Deal',
'ext': 'flac',
'timestamp': 1205895624,
'uploader': 'mvernon54@yahoo.com',
'description': 'md5:6c921464414814720c6593810a5c7e3d',
'upload_date': '20080319',
'location': 'Barton Hall - Cornell University',
'duration': 438.68,
'track': 'Deal',
'creators': ['Grateful Dead'],
'album': '1977-05-08 - Barton Hall - Cornell University',
'release_date': '19770508',
'display_id': 'gd1977-05-08d01t07.flac',
'track_number': 7,
},
}, {
# FIXME: give a better error message than just IndexError when all available formats are restricted
'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik',
'md5': '7cb019baa9b332e82ea7c10403acd180',
'info_dict': {
'id': 'lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/01.01. Bells Of Rostov.mp3',
'title': 'Bells Of Rostov',
'ext': 'mp3',
},
'skip': 'restricted',
}, {
'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02.+Song+And+Chorus+In+The+Polovetsian+Camp+From+%22Prince+Igor%22+(Act+2%2C+Scene+1).mp3',
'md5': '1d0aabe03edca83ca58d9ed3b493a3c3',
'info_dict': {
'id': 'lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02. Song And Chorus In The Polovetsian Camp From "Prince Igor" (Act 2, Scene 1).mp3',
'title': 'Song And Chorus In The Polovetsian Camp From "Prince Igor" (Act 2, Scene 1)',
'ext': 'mp3',
'timestamp': 1569662587,
'uploader': 'associate-joygen-odiongan@archive.org',
'description': 'md5:012b2d668ae753be36896f343d12a236',
'upload_date': '20190928',
},
'skip': 'restricted',
}, {
# Original formats are private
'url': 'https://archive.org/details/irelandthemakingofarepublic',
'info_dict': {
'id': 'irelandthemakingofarepublic',
'title': 'Ireland: The Making of a Republic',
'upload_date': '20160610',
'description': 'md5:f70956a156645a658a0dc9513d9e78b7',
'uploader': 'dimitrios@archive.org',
'creators': ['British Broadcasting Corporation', 'Time-Life Films'],
'timestamp': 1465594947,
},
'playlist': [
{
'md5': '0b211261b26590d49df968f71b90690d',
'info_dict': {
'id': 'irelandthemakingofarepublic/irelandthemakingofarepublicreel1_01.mov',
'ext': 'mp4',
'title': 'irelandthemakingofarepublicreel1_01.mov',
'duration': 130.46,
'thumbnail': 'https://archive.org/download/irelandthemakingofarepublic/irelandthemakingofarepublic.thumbs/irelandthemakingofarepublicreel1_01_000117.jpg',
'display_id': 'irelandthemakingofarepublicreel1_01.mov',
'track': 'irelandthemakingofarepublicreel1 01',
},
}, {
'md5': '67335ee3b23a0da930841981c1e79b02',
'info_dict': {
'id': 'irelandthemakingofarepublic/irelandthemakingofarepublicreel1_02.mov',
'ext': 'mp4',
'duration': 1395.13,
'title': 'irelandthemakingofarepublicreel1_02.mov',
'display_id': 'irelandthemakingofarepublicreel1_02.mov',
'thumbnail': 'https://archive.org/download/irelandthemakingofarepublic/irelandthemakingofarepublic.thumbs/irelandthemakingofarepublicreel1_02_001374.jpg',
'track': 'irelandthemakingofarepublicreel1 02',
},
}, {
'md5': 'e470e86787893603f4a341a16c281eb5',
'info_dict': {
'id': 'irelandthemakingofarepublic/irelandthemakingofarepublicreel2.mov',
'ext': 'mp4',
'duration': 1602.67,
'title': 'irelandthemakingofarepublicreel2.mov',
'thumbnail': 'https://archive.org/download/irelandthemakingofarepublic/irelandthemakingofarepublic.thumbs/irelandthemakingofarepublicreel2_001554.jpg',
'display_id': 'irelandthemakingofarepublicreel2.mov',
'track': 'irelandthemakingofarepublicreel2',
},
},
],
}, {
# The reviewbody is None for one of the reviews; just need to extract data without crashing
'url': 'https://archive.org/details/gd95-04-02.sbd.11622.sbeok.shnf/gd95-04-02d1t04.shn',
'info_dict': {
'id': 'gd95-04-02.sbd.11622.sbeok.shnf/gd95-04-02d1t04.shn',
'ext': 'mp3',
'title': 'Stuck Inside of Mobile with the Memphis Blues Again',
'creators': ['Grateful Dead'],
'duration': 338.31,
'track': 'Stuck Inside of Mobile with the Memphis Blues Again',
'description': 'md5:764348a470b986f1217ffd38d6ac7b72',
'display_id': 'gd95-04-02d1t04.shn',
'location': 'Pyramid Arena',
'uploader': 'jon@archive.org',
'album': '1995-04-02 - Pyramid Arena',
'upload_date': '20040519',
'track_number': 4,
'release_date': '19950402',
'timestamp': 1084927901,
},
}, {
# metadata['metadata']['description'] is a list of strings instead of str
'url': 'https://archive.org/details/pra-KZ1908.02',
'info_dict': {
'id': 'pra-KZ1908.02',
'ext': 'mp3',
'display_id': 'KZ1908.02_01.wav',
'title': 'Crips and Bloods speak about gang life',
'description': 'md5:2b56b35ff021311e3554b47a285e70b3',
'uploader': 'jake@archive.org',
'duration': 1733.74,
'track': 'KZ1908.02 01',
'track_number': 1,
'timestamp': 1336026026,
'upload_date': '20120503',
'release_year': 1992,
},
}]
@staticmethod
def _playlist_data(webpage):
element = get_element_text_and_html_by_tag('play-av', webpage)[1]
return json.loads(extract_attributes(element)['playlist'])
def _real_extract(self, url):
video_id = urllib.parse.unquote_plus(self._match_id(url))
identifier, _, entry_id = video_id.partition('/')
# Archive.org metadata API doesn't clearly demarcate playlist entries
# or subtitle tracks, so we get them from the embeddable player.
embed_page = self._download_webpage(f'https://archive.org/embed/{identifier}', identifier)
playlist = self._playlist_data(embed_page)
entries = {}
for p in playlist:
# If the user specified a playlist entry in the URL, ignore the
# rest of the playlist.
if entry_id and p['orig'] != entry_id:
continue
entries[p['orig']] = {
'formats': [],
'thumbnails': [],
'artist': p.get('artist'),
'track': p.get('title'),
'subtitles': {},
}
for track in p.get('tracks', []):
if track['kind'] != 'subtitles':
continue
entries[p['orig']][track['label']] = {
'url': 'https://archive.org/' + track['file'].lstrip('/'),
}
metadata = self._download_json(f'https://archive.org/metadata/{identifier}', identifier)
m = metadata['metadata']
identifier = m['identifier']
info = traverse_obj(m, {
'title': ('title', {str}),
'description': ('description', ({str}, (..., all, {' '.join})), {clean_html}, filter, any),
'uploader': (('uploader', 'adder'), {str}, any),
'creators': ('creator', (None, ...), {str}, filter, all, filter),
'license': ('licenseurl', {url_or_none}),
'release_date': ('date', {unified_strdate}),
'timestamp': (('publicdate', 'addeddate'), {unified_timestamp}, any),
'location': ('venue', {str}),
'release_year': ('year', {int_or_none}),
})
info.update({
'id': identifier,
'webpage_url': f'https://archive.org/details/{identifier}',
})
for f in metadata['files']:
if f['name'] in entries:
entries[f['name']] = merge_dicts(entries[f['name']], {
'id': identifier + '/' + f['name'],
**traverse_obj(f, {
'title': (('title', 'name'), {str}, any),
'display_id': ('name', {str}),
'description': ('description', ({str}, (..., all, {' '.join})), {clean_html}, filter, any),
'creators': ('creator', (None, ...), {str}, filter, all, filter),
'duration': ('length', {parse_duration}),
'track_number': ('track', {int_or_none}),
'album': ('album', {str}),
'discnumber': ('disc', {int_or_none}),
'release_year': ('year', {int_or_none}),
}),
})
entry = entries[f['name']]
elif traverse_obj(f, ('original', {str})) in entries:
entry = entries[f['original']]
else:
continue
if f.get('format') == 'Thumbnail':
entry['thumbnails'].append({
'id': f['name'],
'url': 'https://archive.org/download/' + identifier + '/' + f['name'],
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('width')),
'filesize': int_or_none(f.get('size'))})
_, has_ext, extension = f['name'].rpartition('.')
if not has_ext:
extension = None
# We don't want to skip private formats if the user has access to them,
# however without access to an account with such privileges we can't implement/test this.
# For now to be safe, we will only skip them if there is no user logged in.
is_logged_in = bool(self._get_cookies('https://archive.org').get('logged-in-sig'))
if extension in KNOWN_EXTENSIONS and (not f.get('private') or is_logged_in):
entry['formats'].append({
'url': 'https://archive.org/download/' + identifier + '/' + urllib.parse.quote(f['name']),
'format': f.get('format'),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'filesize': int_or_none(f.get('size')),
'protocol': 'https',
'source_preference': 0 if f.get('source') == 'original' else -1,
'format_note': f.get('source'),
})
for entry in entries.values():
entry['_format_sort_fields'] = ('source', )
if len(entries) == 1:
# If there's only one item, use it as the main info dict
only_video = next(iter(entries.values()))
if entry_id:
info = merge_dicts(only_video, info)
else:
info = merge_dicts(info, only_video)
else:
# Otherwise, we have a playlist.
info['_type'] = 'playlist'
info['entries'] = list(entries.values())
if metadata.get('reviews'):
info['comments'] = []
for review in metadata['reviews']:
info['comments'].append({
'id': review.get('review_id'),
'author': review.get('reviewer'),
'text': join_nonempty('reviewtitle', 'reviewbody', from_dict=review, delim='\n\n'),
'timestamp': unified_timestamp(review.get('createdate')),
'parent': 'root'})
return info
class YoutubeWebArchiveIE(InfoExtractor):
IE_NAME = 'web.archive:youtube'
IE_DESC = 'web.archive.org saved youtube videos, "ytarchive:" prefix'
_VALID_URL = r'''(?x)(?:(?P<prefix>ytarchive:)|
(?:https?://)?web\.archive\.org/
(?:web/)?(?:(?P<date>[0-9]{14})?[0-9A-Za-z_*]*/)? # /web and the version index is optional
(?:https?(?::|%3[Aa])//)?(?:
(?:\w+\.)?youtube\.com(?::(?:80|443))?/watch(?:\.php)?(?:\?|%3[fF])(?:[^\#]+(?:&|%26))?v(?:=|%3[dD]) # Youtube URL
|(?:wayback-fakeurl\.archive\.org/yt/) # Or the internal fake url
)
)(?P<id>[0-9A-Za-z_-]{11})
(?(prefix)
(?::(?P<date2>[0-9]{14}))?$|
(?:%26|[#&]|$)
)'''
_TESTS = [
{
'url': 'https://web.archive.org/web/20150415002341/https://www.youtube.com/watch?v=aYAGB11YrSs',
'info_dict': {
'id': 'aYAGB11YrSs',
'ext': 'webm',
'title': 'Team Fortress 2 - Sandviches!',
'description': 'md5:4984c0f9a07f349fc5d8e82ab7af4eaf',
'upload_date': '20110926',
'uploader': 'Zeurel',
'channel_id': 'UCukCyHaD-bK3in_pKpfH9Eg',
'duration': 32,
'uploader_id': 'Zeurel',
'uploader_url': 'https://www.youtube.com/user/Zeurel',
'thumbnail': r're:https?://.*\.(jpg|webp)',
'channel_url': 'https://www.youtube.com/channel/UCukCyHaD-bK3in_pKpfH9Eg',
},
}, {
# Internal link
'url': 'https://web.archive.org/web/2oe/http://wayback-fakeurl.archive.org/yt/97t7Xj_iBv0',
'info_dict': {
'id': '97t7Xj_iBv0',
'ext': 'mp4',
'title': 'Why Machines That Bend Are Better',
'description': 'md5:00404df2c632d16a674ff8df1ecfbb6c',
'upload_date': '20190312',
'uploader': 'Veritasium',
'channel_id': 'UCHnyfMqiRRG1u-2MsSQLbXA',
'duration': 771,
'uploader_id': '1veritasium',
'uploader_url': 'https://www.youtube.com/user/1veritasium',
'thumbnail': r're:https?://.*\.(jpg|webp)',
'channel_url': 'https://www.youtube.com/channel/UCHnyfMqiRRG1u-2MsSQLbXA',
},
}, {
# Video from 2012, webm format itag 45. Newest capture is deleted video, with an invalid description.
# Should use the date in the link. Title ends with '- Youtube'. Capture has description in eow-description
'url': 'https://web.archive.org/web/20120712231619/http://www.youtube.com/watch?v=AkhihxRKcrs&gl=US&hl=en',
'info_dict': {
'id': 'AkhihxRKcrs',
'ext': 'webm',
'title': 'Limited Run: Mondo\'s Modern Classic 1 of 3 (SDCC 2012)',
'upload_date': '20120712',
'duration': 398,
'description': 'md5:ff4de6a7980cb65d951c2f6966a4f2f3',
'uploader_id': 'machinima',
'uploader_url': 'https://www.youtube.com/user/machinima',
'thumbnail': r're:https?://.*\.(jpg|webp)',
'uploader': 'machinima',
},
}, {
# FLV video. Video file URL does not provide itag information
'url': 'https://web.archive.org/web/20081211103536/http://www.youtube.com/watch?v=jNQXAC9IVRw',
'info_dict': {
'id': 'jNQXAC9IVRw',
'ext': 'flv',
'title': 'Me at the zoo',
'upload_date': '20050423',
'channel_id': 'UC4QobU6STFB0P71PMvOGN5A',
'duration': 19,
'description': 'md5:10436b12e07ac43ff8df65287a56efb4',
'uploader_id': 'jawed',
'uploader_url': 'https://www.youtube.com/user/jawed',
'channel_url': 'https://www.youtube.com/channel/UC4QobU6STFB0P71PMvOGN5A',
'thumbnail': r're:https?://.*\.(jpg|webp)',
'uploader': 'jawed',
},
}, {
'url': 'https://web.archive.org/web/20110712231407/http://www.youtube.com/watch?v=lTx3G6h2xyA',
'info_dict': {
'id': 'lTx3G6h2xyA',
'ext': 'mp4',
'title': 'Madeon - Pop Culture (live mashup)',
'upload_date': '20110711',
'uploader': 'Madeon',
'channel_id': 'UCqMDNf3Pn5L7pcNkuSEeO3w',
'duration': 204,
'description': 'md5:f7535343b6eda34a314eff8b85444680',
'uploader_id': 'itsmadeon',
'uploader_url': 'https://www.youtube.com/user/itsmadeon',
'channel_url': 'https://www.youtube.com/channel/UCqMDNf3Pn5L7pcNkuSEeO3w',
'thumbnail': r're:https?://.*\.(jpg|webp)',
},
}, {
# First capture is of dead video, second is the oldest from CDX response.
'url': 'https://web.archive.org/https://www.youtube.com/watch?v=1JYutPM8O6E',
'info_dict': {
'id': '1JYutPM8O6E',
'ext': 'mp4',
'title': 'Fake Teen Doctor Strikes AGAIN! - Weekly Weird News',
'upload_date': '20160218',
'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA',
'duration': 1235,
'description': 'md5:21032bae736421e89c2edf36d1936947',
'uploader_id': 'MachinimaETC',
'uploader_url': 'https://www.youtube.com/user/MachinimaETC',
'channel_url': 'https://www.youtube.com/channel/UCdIaNUarhzLSXGoItz7BHVA',
'thumbnail': r're:https?://.*\.(jpg|webp)',
'uploader': 'ETC News',
},
}, {
# First capture of dead video, capture date in link links to dead capture.
'url': 'https://web.archive.org/web/20180803221945/https://www.youtube.com/watch?v=6FPhZJGvf4E',
'info_dict': {
'id': '6FPhZJGvf4E',
'ext': 'mp4',
'title': 'WTF: Video Games Still Launch BROKEN?! - T.U.G.S.',
'upload_date': '20160219',
'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA',
'duration': 797,
'description': 'md5:a1dbf12d9a3bd7cb4c5e33b27d77ffe7',
'uploader_id': 'MachinimaETC',
'uploader_url': 'https://www.youtube.com/user/MachinimaETC',
'channel_url': 'https://www.youtube.com/channel/UCdIaNUarhzLSXGoItz7BHVA',
'thumbnail': r're:https?://.*\.(jpg|webp)',
'uploader': 'ETC News',
},
'expected_warnings': [
r'unable to download capture webpage \(it may not be archived\)',
],
}, { # Very old YouTube page, has - YouTube in title.
'url': 'http://web.archive.org/web/20070302011044/http://youtube.com/watch?v=-06-KB9XTzg',
'info_dict': {
'id': '-06-KB9XTzg',
'ext': 'flv',
'title': 'New Coin Hack!! 100% Safe!!',
},
}, {
'url': 'web.archive.org/https://www.youtube.com/watch?v=dWW7qP423y8',
'info_dict': {
'id': 'dWW7qP423y8',
'ext': 'mp4',
'title': 'It\'s Bootleg AirPods Time.',
'upload_date': '20211021',
'channel_id': 'UC7Jwj9fkrf1adN4fMmTkpug',
'channel_url': 'https://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug',
'duration': 810,
'description': 'md5:7b567f898d8237b256f36c1a07d6d7bc',
'thumbnail': r're:https?://.*\.(jpg|webp)',
'uploader': 'DankPods',
},
}, {
# player response contains '};' See: https://github.com/ytdl-org/youtube-dl/issues/27093
'url': 'https://web.archive.org/web/20200827003909if_/http://www.youtube.com/watch?v=6Dh-RL__uN4',
'info_dict': {
'id': '6Dh-RL__uN4',
'ext': 'mp4',
'title': 'bitch lasagna',
'upload_date': '20181005',
'channel_id': 'UC-lHJZR3Gqxm24_Vd_AJ5Yw',
'channel_url': 'https://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw',
'duration': 135,
'description': 'md5:2dbe4051feeff2dab5f41f82bb6d11d0',
'uploader': 'PewDiePie',
'uploader_id': 'PewDiePie',
'uploader_url': 'https://www.youtube.com/user/PewDiePie',
'thumbnail': r're:https?://.*\.(jpg|webp)',
},
}, {
# ~June 2010 Capture. swfconfig
'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=8XeW5ilk-9Y',
'info_dict': {
'id': '8XeW5ilk-9Y',
'ext': 'flv',
'title': 'Story of Stuff, The Critique Part 4 of 4',
'duration': 541,
'description': 'md5:28157da06f2c5e94c97f7f3072509972',
'uploader': 'HowTheWorldWorks',
'uploader_id': 'HowTheWorldWorks',
'thumbnail': r're:https?://.*\.(jpg|webp)',
'uploader_url': 'https://www.youtube.com/user/HowTheWorldWorks',
'upload_date': '20090520',
},
}, {
# Jan 2011: watch-video-date/eow-date surrounded by whitespace
'url': 'https://web.archive.org/web/20110126141719/http://www.youtube.com/watch?v=Q_yjX80U7Yc',
'info_dict': {
'id': 'Q_yjX80U7Yc',
'ext': 'webm',
'title': 'Spray Paint Art by Clay Butler: Purple Fantasy Forest',
'uploader_id': 'claybutlermusic',
'description': 'md5:4595264559e3d0a0ceb3f011f6334543',
'upload_date': '20090803',
'uploader': 'claybutlermusic',
'thumbnail': r're:https?://.*\.(jpg|webp)',
'duration': 132,
'uploader_url': 'https://www.youtube.com/user/claybutlermusic',
},
}, {
# ~May 2009 swfArgs. ytcfg is spread out over various vars
'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=c5uJgG05xUY',
'info_dict': {
'id': 'c5uJgG05xUY',
'ext': 'webm',
'title': 'Story of Stuff, The Critique Part 1 of 4',
'uploader_id': 'HowTheWorldWorks',
'uploader': 'HowTheWorldWorks',
'uploader_url': 'https://www.youtube.com/user/HowTheWorldWorks',
'upload_date': '20090513',
'description': 'md5:4ca77d79538064e41e4cc464e93f44f0',
'thumbnail': r're:https?://.*\.(jpg|webp)',
'duration': 754,
},
}, {
# ~June 2012. Upload date is in another lang so cannot extract.
'url': 'https://web.archive.org/web/20120607174520/http://www.youtube.com/watch?v=xWTLLl-dQaA',
'info_dict': {
'id': 'xWTLLl-dQaA',
'ext': 'mp4',
'title': 'Black Nerd eHarmony Video Bio Parody (SPOOF)',
'uploader_url': 'https://www.youtube.com/user/BlackNerdComedy',
'description': 'md5:e25f0133aaf9e6793fb81c18021d193e',
'uploader_id': 'BlackNerdComedy',
'uploader': 'BlackNerdComedy',
'duration': 182,
'thumbnail': r're:https?://.*\.(jpg|webp)',
},
}, {
# ~July 2013
'url': 'https://web.archive.org/web/*/https://www.youtube.com/watch?v=9eO1aasHyTM',
'info_dict': {
'id': '9eO1aasHyTM',
'ext': 'mp4',
'title': 'Polar-oid',
'description': 'Cameras and bears are dangerous!',
'uploader_url': 'https://www.youtube.com/user/punkybird',
'uploader_id': 'punkybird',
'duration': 202,
'channel_id': 'UC62R2cBezNBOqxSerfb1nMQ',
'channel_url': 'https://www.youtube.com/channel/UC62R2cBezNBOqxSerfb1nMQ',
'upload_date': '20060428',
'uploader': 'punkybird',
},
}, {
# April 2020: Player response in player config
'url': 'https://web.archive.org/web/20200416034815/https://www.youtube.com/watch?v=Cf7vS8jc7dY&gl=US&hl=en',
'info_dict': {
'id': 'Cf7vS8jc7dY',
'ext': 'mp4',
'title': 'A Dramatic Pool Story (by Jamie Spicer-Lewis) - Game Grumps Animated',
'duration': 64,
'upload_date': '20200408',
'uploader_id': 'GameGrumps',
'uploader': 'GameGrumps',
'channel_url': 'https://www.youtube.com/channel/UC9CuvdOVfMPvKCiwdGKL3cQ',
'channel_id': 'UC9CuvdOVfMPvKCiwdGKL3cQ',
'thumbnail': r're:https?://.*\.(jpg|webp)',
'description': 'md5:c625bb3c02c4f5fb4205971e468fa341',
'uploader_url': 'https://www.youtube.com/user/GameGrumps',
},
}, {
# watch7-user-header with yt-user-info
'url': 'ytarchive:kbh4T_b4Ixw:20160307085057',
'info_dict': {
'id': 'kbh4T_b4Ixw',
'ext': 'mp4',
'title': 'Shovel Knight OST - Strike the Earth! Plains of Passage 16 bit SNES style remake / remix',
'channel_url': 'https://www.youtube.com/channel/UCnTaGvsHmMy792DWeT6HbGA',
'uploader': 'Nelward music',
'duration': 213,
'description': 'md5:804b4a9ce37b050a5fefdbb23aeba54d',
'thumbnail': r're:https?://.*\.(jpg|webp)',
'upload_date': '20150503',
'channel_id': 'UCnTaGvsHmMy792DWeT6HbGA',
},
}, {
# April 2012
'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=SOm7mPoPskU',
'info_dict': {
'id': 'SOm7mPoPskU',
'ext': 'mp4',
'title': 'Boyfriend - Justin Bieber Parody',
'uploader_url': 'https://www.youtube.com/user/thecomputernerd01',
'uploader': 'thecomputernerd01',
'thumbnail': r're:https?://.*\.(jpg|webp)',
'description': 'md5:dd7fa635519c2a5b4d566beaecad7491',
'duration': 200,
'upload_date': '20120407',
'uploader_id': 'thecomputernerd01',
},
}, {
# Contains split audio/video formats
'url': 'ytarchive:o_T_S_TU12M',
'info_dict': {
'id': 'o_T_S_TU12M',
'ext': 'mp4',
'title': 'Prairie Pulse 1218; Lin Enger, Paul Olson',
'description': 'md5:36e7a34cdc8508e35a920ec042e799c7',
'uploader': 'Prairie Public',
'channel_id': 'UC4BOzQel6tvJm7OEDd3vZlw',
'channel_url': 'https://www.youtube.com/channel/UC4BOzQel6tvJm7OEDd3vZlw',
'duration': 1606,
'upload_date': '20150213',
},
}, {
# Video unavailable through wayback-fakeurl
'url': 'ytarchive:SQCom7wjGDs',
'info_dict': {
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | true |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/audimedia.py | yt_dlp/extractor/audimedia.py | from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
)
class AudiMediaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?audi-mediacenter\.com/(?:en|de)/audimediatv/(?:video/)?(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.audi-mediacenter.com/en/audimediatv/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test-1467',
'md5': '79a8b71c46d49042609795ab59779b66',
'info_dict': {
'id': '1565',
'ext': 'mp4',
'title': '60 Seconds of Audi Sport 104/2015 - WEC Bahrain, Rookie Test',
'description': 'md5:60e5d30a78ced725f7b8d34370762941',
'upload_date': '20151124',
'timestamp': 1448354940,
'duration': 74022,
'view_count': int,
},
}, {
'url': 'https://www.audi-mediacenter.com/en/audimediatv/video/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test-2991',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
raw_payload = self._search_regex([
r'class="amtv-embed"[^>]+id="([0-9a-z-]+)"',
r'id="([0-9a-z-]+)"[^>]+class="amtv-embed"',
r'class=\\"amtv-embed\\"[^>]+id=\\"([0-9a-z-]+)\\"',
r'id=\\"([0-9a-z-]+)\\"[^>]+class=\\"amtv-embed\\"',
r'id=(?:\\)?"(amtve-[a-z]-\d+-[a-z]{2})',
], webpage, 'raw payload')
_, stage_mode, video_id, _ = raw_payload.split('-')
# TODO: handle s and e stage_mode (live streams and ended live streams)
if stage_mode not in ('s', 'e'):
video_data = self._download_json(
'https://www.audimedia.tv/api/video/v1/videos/' + video_id,
video_id, query={
'embed[]': ['video_versions', 'thumbnail_image'],
})['results']
formats = []
stream_url_hls = video_data.get('stream_url_hls')
if stream_url_hls:
formats.extend(self._extract_m3u8_formats(
stream_url_hls, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
stream_url_hds = video_data.get('stream_url_hds')
if stream_url_hds:
formats.extend(self._extract_f4m_formats(
stream_url_hds + '?hdcore=3.4.0',
video_id, f4m_id='hds', fatal=False))
for video_version in video_data.get('video_versions', []):
video_version_url = video_version.get('download_url') or video_version.get('stream_url')
if not video_version_url:
continue
f = {
'url': video_version_url,
'width': int_or_none(video_version.get('width')),
'height': int_or_none(video_version.get('height')),
'abr': int_or_none(video_version.get('audio_bitrate')),
'vbr': int_or_none(video_version.get('video_bitrate')),
}
bitrate = self._search_regex(r'(\d+)k', video_version_url, 'bitrate', default=None)
if bitrate:
f.update({
'format_id': f'http-{bitrate}',
})
formats.append(f)
return {
'id': video_id,
'title': video_data['title'],
'description': video_data.get('subtitle'),
'thumbnail': video_data.get('thumbnail_image', {}).get('file'),
'timestamp': parse_iso8601(video_data.get('publication_date')),
'duration': int_or_none(video_data.get('duration')),
'view_count': int_or_none(video_data.get('view_count')),
'formats': formats,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/videofyme.py | yt_dlp/extractor/videofyme.py | from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
)
class VideofyMeIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:www\.videofy\.me/.+?|p\.videofy\.me/v)/(?P<id>\d+)(&|#|$)'
IE_NAME = 'videofy.me'
_TEST = {
'url': 'http://www.videofy.me/thisisvideofyme/1100701',
'md5': 'c77d700bdc16ae2e9f3c26019bd96143',
'info_dict': {
'id': '1100701',
'ext': 'mp4',
'title': 'This is VideofyMe',
'description': '',
'upload_date': '20130326',
'timestamp': 1364288959,
'uploader': 'VideofyMe',
'uploader_id': 'thisisvideofyme',
'view_count': int,
'like_count': int,
'comment_count': int,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_json(f'http://vf-player-info-loader.herokuapp.com/{video_id}.json', video_id)['videoinfo']
video = config.get('video')
blog = config.get('blog', {})
return {
'id': video_id,
'title': video['title'],
'url': video['sources']['source']['url'],
'thumbnail': video.get('thumb'),
'description': video.get('description'),
'timestamp': parse_iso8601(video.get('date')),
'uploader': blog.get('name'),
'uploader_id': blog.get('identifier'),
'view_count': int_or_none(self._search_regex(r'([0-9]+)', video.get('views'), 'view count', fatal=False)),
'like_count': int_or_none(video.get('likes')),
'comment_count': int_or_none(video.get('nrOfComments')),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/jtbc.py | yt_dlp/extractor/jtbc.py | import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
url_or_none,
)
from ..utils.traversal import traverse_obj
class JTBCIE(InfoExtractor):
IE_DESC = 'jtbc.co.kr'
_VALID_URL = r'''(?x)
https?://(?:
vod\.jtbc\.co\.kr/player/(?:program|clip)
|tv\.jtbc\.co\.kr/(?:replay|trailer|clip)/pr\d+/pm\d+
)/(?P<id>(?:ep|vo)\d+)'''
_GEO_COUNTRIES = ['KR']
_TESTS = [{
'url': 'https://tv.jtbc.co.kr/replay/pr10011629/pm10067930/ep20216321/view',
'md5': 'e6ade71d8c8685bbfd6e6ce4167c6a6c',
'info_dict': {
'id': 'VO10721192',
'display_id': 'ep20216321',
'ext': 'mp4',
'title': '힘쎈여자 강남순 2회 다시보기',
'description': 'md5:043c1d9019100ce271dba09995dbd1e2',
'duration': 3770.0,
'release_date': '20231008',
'age_limit': 15,
'thumbnail': 'https://fs.jtbc.co.kr//joydata/CP00000001/prog/drama/stronggirlnamsoon/img/20231008_163541_522_1.jpg',
'series': '힘쎈여자 강남순',
},
}, {
'url': 'https://vod.jtbc.co.kr/player/program/ep20216733',
'md5': '217a6d190f115a75e4bda0ceaa4cd7f4',
'info_dict': {
'id': 'VO10721429',
'display_id': 'ep20216733',
'ext': 'mp4',
'title': '헬로 마이 닥터 친절한 진료실 149회 다시보기',
'description': 'md5:1d70788a982dd5de26874a92fcffddb8',
'duration': 2720.0,
'release_date': '20231009',
'age_limit': 15,
'thumbnail': 'https://fs.jtbc.co.kr//joydata/CP00000001/prog/culture/hellomydoctor/img/20231009_095002_528_1.jpg',
'series': '헬로 마이 닥터 친절한 진료실',
},
}, {
'url': 'https://vod.jtbc.co.kr/player/clip/vo10721270',
'md5': '05782e2dc22a9c548aebefe62ae4328a',
'info_dict': {
'id': 'VO10721270',
'display_id': 'vo10721270',
'ext': 'mp4',
'title': '뭉쳐야 찬다3 2회 예고편 - A매치로 향하는 마지막 관문💥',
'description': 'md5:d48b51a8655c84843b4ed8d0c39aae68',
'duration': 46.0,
'release_date': '20231015',
'age_limit': 15,
'thumbnail': 'https://fs.jtbc.co.kr//joydata/CP00000001/prog/enter/soccer3/img/20231008_210957_775_1.jpg',
'series': '뭉쳐야 찬다3',
},
}, {
'url': 'https://tv.jtbc.co.kr/trailer/pr10010392/pm10032526/vo10720912/view',
'md5': '367d480eb3ef54a9cd7a4b4d69c4b32d',
'info_dict': {
'id': 'VO10720912',
'display_id': 'vo10720912',
'ext': 'mp4',
'title': '아는 형님 404회 예고편 | 10월 14일(토) 저녁 8시 50분 방송!',
'description': 'md5:2743bb1079ceb85bb00060f2ad8f0280',
'duration': 148.0,
'release_date': '20231014',
'age_limit': 15,
'thumbnail': 'https://fs.jtbc.co.kr//joydata/CP00000001/prog/enter/jtbcbros/img/20231006_230023_802_1.jpg',
'series': '아는 형님',
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
if display_id.startswith('vo'):
video_id = display_id.upper()
else:
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(r'data-vod="(VO\d+)"', webpage, 'vod id')
playback_data = self._download_json(
f'https://api.jtbc.co.kr/vod/{video_id}', video_id, note='Downloading VOD playback data')
subtitles = {}
for sub in traverse_obj(playback_data, ('tracks', lambda _, v: v['file'])):
subtitles.setdefault(sub.get('label', 'und'), []).append({'url': sub['file']})
formats = []
for stream_url in traverse_obj(playback_data, ('sources', 'HLS', ..., 'file', {url_or_none})):
stream_url = re.sub(r'/playlist_pd\d+\.m3u8', '/playlist.m3u8', stream_url)
formats.extend(self._extract_m3u8_formats(stream_url, video_id, fatal=False))
metadata = self._download_json(
'https://now-api.jtbc.co.kr/v1/vod/detail', video_id,
note='Downloading mobile details', fatal=False, query={'vodFileId': video_id})
return {
'id': video_id,
'display_id': display_id,
**traverse_obj(metadata, ('vodDetail', {
'title': 'vodTitleView',
'series': 'programTitle',
'age_limit': ('watchAge', {int_or_none}),
'release_date': ('broadcastDate', {lambda x: re.match(r'\d{8}', x.replace('.', ''))}, 0),
'description': 'episodeContents',
'thumbnail': ('imgFileUrl', {url_or_none}),
})),
'duration': parse_duration(playback_data.get('playTime')),
'formats': formats,
'subtitles': subtitles,
}
class JTBCProgramIE(InfoExtractor):
IE_NAME = 'JTBC:program'
_VALID_URL = r'https?://(?:vod\.jtbc\.co\.kr/program|tv\.jtbc\.co\.kr/replay)/(?P<id>pr\d+)/(?:replay|pm\d+)/?(?:$|[?#])'
_TESTS = [{
'url': 'https://tv.jtbc.co.kr/replay/pr10010392/pm10032710',
'info_dict': {
'_type': 'playlist',
'id': 'pr10010392',
},
'playlist_count': 398,
}, {
'url': 'https://vod.jtbc.co.kr/program/pr10011491/replay',
'info_dict': {
'_type': 'playlist',
'id': 'pr10011491',
},
'playlist_count': 59,
}]
def _real_extract(self, url):
program_id = self._match_id(url)
vod_list = self._download_json(
'https://now-api.jtbc.co.kr/v1/vodClip/programHome/programReplayVodList', program_id,
note='Downloading program replay list', query={
'programId': program_id,
'rowCount': '10000',
})
entries = [self.url_result(f'https://vod.jtbc.co.kr/player/program/{video_id}', JTBCIE, video_id)
for video_id in traverse_obj(vod_list, ('programReplayVodList', ..., 'episodeId'))]
return self.playlist_result(entries, program_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vbox7.py | yt_dlp/extractor/vbox7.py | from .common import InfoExtractor
from ..utils import ExtractorError, base_url, int_or_none, url_basename
from ..utils.traversal import traverse_obj
class Vbox7IE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:[^/]+\.)?vbox7\.com/
(?:
play:|
(?:
emb/external\.php|
player/ext\.swf
)\?.*?\bvid=
)
(?P<id>[\da-fA-F]+)
'''
_EMBED_REGEX = [r'<iframe[^>]+src=(?P<q>["\'])(?P<url>(?:https?:)?//vbox7\.com/emb/external\.php.+?)(?P=q)']
_GEO_COUNTRIES = ['BG']
_TESTS = [{
'url': 'http://vbox7.com/play:0946fff23c',
'md5': '50ca1f78345a9c15391af47d8062d074',
'info_dict': {
'id': '0946fff23c',
'ext': 'mp4',
'title': 'Борисов: Притеснен съм за бъдещето на България',
'description': 'По думите му е опасно страната ни да бъде обявена за "сигурна"',
'thumbnail': r're:https?://.+\.jpg',
'timestamp': 1470982814,
'upload_date': '20160812',
'uploader': 'zdraveibulgaria',
'view_count': int,
'duration': 2640,
},
}, {
'url': 'http://vbox7.com/play:249bb972c2',
'md5': 'da1dd2eb245200cb86e6d09d43232116',
'info_dict': {
'id': '249bb972c2',
'ext': 'mp4',
'title': 'Смях! Чудо - чист за секунди - Скрита камера',
'uploader': 'svideteliat_ot_varshava',
'view_count': int,
'timestamp': 1360215023,
'thumbnail': 'https://i49.vbox7.com/o/249/249bb972c20.jpg',
'description': 'Смях! Чудо - чист за секунди - Скрита камера',
'upload_date': '20130207',
'duration': 83,
},
'skip': 'Invalid URL',
}, {
'url': 'http://vbox7.com/emb/external.php?vid=a240d20f9c&autoplay=1',
'only_matching': True,
}, {
'url': 'http://i49.vbox7.com/player/ext.swf?vid=0946fff23c&autoplay=1',
'only_matching': True,
}]
_WEBPAGE_TESTS = [{
'url': 'https://nova.bg/news/view/2016/08/16/156543/%D0%BD%D0%B0-%D0%BA%D0%BE%D1%81%D1%8A%D0%BC-%D0%BE%D1%82-%D0%B2%D0%B7%D1%80%D0%B8%D0%B2-%D0%BE%D1%82%D1%86%D0%B5%D0%BF%D0%B8%D1%85%D0%B0-%D1%86%D1%8F%D0%BB-%D0%BA%D0%B2%D0%B0%D1%80%D1%82%D0%B0%D0%BB-%D0%B7%D0%B0%D1%80%D0%B0%D0%B4%D0%B8-%D0%B8%D0%B7%D1%82%D0%B8%D1%87%D0%B0%D0%BD%D0%B5-%D0%BD%D0%B0-%D0%B3%D0%B0%D0%B7-%D0%B2-%D0%BF%D0%BB%D0%BE%D0%B2%D0%B4%D0%B8%D0%B2/',
'info_dict': {
'id': '5a4d12166d',
'ext': 'mp4',
'title': 'НА КОСЪМ ОТ ВЗРИВ: Отцепиха цял квартал заради изтичане на газ в Пловдив',
'description': 'Инцидентът е станал на бензиностанция',
'duration': 200,
'thumbnail': r're:https?://.+\.jpg',
'timestamp': 1471353501,
'upload_date': '20160816',
'uploader': 'novinitenanova',
'view_count': int,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
'https://www.vbox7.com/aj/player/item/options', video_id,
query={'vid': video_id})['options']
src_url = data.get('src')
if src_url in (None, '', 'blank'):
raise ExtractorError('Video is unavailable', expected=True)
fmt_base = url_basename(src_url).rsplit('.', 1)[0].rsplit('_', 1)[0]
if fmt_base == 'vn':
self.raise_geo_restricted()
fmt_base = base_url(src_url) + fmt_base
formats = self._extract_m3u8_formats(
f'{fmt_base}.m3u8', video_id, m3u8_id='hls', fatal=False)
# TODO: Add MPD formats, when dash range support is added
for res in traverse_obj(data, ('resolutions', lambda _, v: v != 0, {int})):
formats.append({
'url': f'{fmt_base}_{res}.mp4',
'format_id': f'http-{res}',
'height': res,
})
return {
'id': video_id,
'formats': formats,
**self._search_json_ld(self._download_webpage(
f'https://www.vbox7.com/play:{video_id}', video_id, fatal=False) or '', video_id, fatal=False),
**traverse_obj(data, {
'title': ('title', {str}),
'uploader': ('uploader', {str}),
'duration': ('duration', {int_or_none}),
}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/movingimage.py | yt_dlp/extractor/movingimage.py | from .common import InfoExtractor
from ..utils import (
parse_duration,
unescapeHTML,
)
class MovingImageIE(InfoExtractor):
_VALID_URL = r'https?://movingimage\.nls\.uk/film/(?P<id>\d+)'
_TEST = {
'url': 'http://movingimage.nls.uk/film/3561',
'md5': '4caa05c2b38453e6f862197571a7be2f',
'info_dict': {
'id': '3561',
'ext': 'mp4',
'title': 'SHETLAND WOOL',
'description': 'md5:c5afca6871ad59b4271e7704fe50ab04',
'duration': 900,
'thumbnail': r're:^https?://.*\.jpg$',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = self._extract_m3u8_formats(
self._html_search_regex(r'file\s*:\s*"([^"]+)"', webpage, 'm3u8 manifest URL'),
video_id, ext='mp4', entry_protocol='m3u8_native')
def search_field(field_name, fatal=False):
return self._search_regex(
rf'<span\s+class="field_title">{field_name}:</span>\s*<span\s+class="field_content">([^<]+)</span>',
webpage, 'title', fatal=fatal)
title = unescapeHTML(search_field('Title', fatal=True)).strip('()[]')
description = unescapeHTML(search_field('Description'))
duration = parse_duration(search_field('Running time'))
thumbnail = self._search_regex(
r"image\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'duration': duration,
'thumbnail': thumbnail,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/moviepilot.py | yt_dlp/extractor/moviepilot.py | from .common import InfoExtractor
from .dailymotion import DailymotionIE
class MoviepilotIE(InfoExtractor):
IE_NAME = 'moviepilot'
IE_DESC = 'Moviepilot trailer'
_VALID_URL = r'https?://(?:www\.)?moviepilot\.de/movies/(?P<id>[^/]+)'
_TESTS = [{
'url': 'https://www.moviepilot.de/movies/interstellar-2/',
'info_dict': {
'id': 'x7xdpkk',
'display_id': 'interstellar-2',
'ext': 'mp4',
'title': 'Interstellar',
'thumbnail': r're:https://\w+\.dmcdn\.net/v/SaV-q1.*/x1080',
'timestamp': 1605010596,
'description': 'md5:0ae9cb452af52610c9ffc60f2fd0474c',
'uploader': 'Moviepilot',
'like_count': int,
'view_count': int,
'uploader_id': 'x6nd9k',
'upload_date': '20201110',
'duration': 97,
'age_limit': 0,
'tags': ['Alle Trailer', 'Movie', 'Verleih'],
},
}, {
'url': 'https://www.moviepilot.de/movies/interstellar-2/trailer',
'only_matching': True,
}, {
'url': 'https://www.moviepilot.de/movies/interstellar-2/kinoprogramm/berlin',
'only_matching': True,
}, {
'url': 'https://www.moviepilot.de/movies/queen-slim/trailer',
'info_dict': {
'id': 'x7xj6o7',
'display_id': 'queen-slim',
'title': 'Queen & Slim',
'ext': 'mp4',
'thumbnail': r're:https://\w+\.dmcdn\.net/v/SbUM71ZeG2N975lf2/x1080',
'timestamp': 1605555825,
'description': 'md5:83228bb86f5367dd181447fdc4873989',
'uploader': 'Moviepilot',
'like_count': int,
'view_count': int,
'uploader_id': 'x6nd9k',
'upload_date': '20201116',
'duration': 138,
'age_limit': 0,
'tags': ['Movie', 'Verleih', 'Neue Trailer'],
},
}, {
'url': 'https://www.moviepilot.de/movies/der-geiger-von-florenz/trailer',
'info_dict': {
'id': 'der-geiger-von-florenz',
'title': 'Der Geiger von Florenz',
'ext': 'mp4',
},
'skip': 'No trailer for this movie.',
}, {
'url': 'https://www.moviepilot.de/movies/muellers-buero/',
'info_dict': {
'id': 'x7xcw1i',
'display_id': 'muellers-buero',
'title': 'Müllers Büro',
'ext': 'mp4',
'description': 'md5:4d23a8f4ca035196cd4523863c4fe5a4',
'timestamp': 1604958457,
'age_limit': 0,
'duration': 82,
'upload_date': '20201109',
'thumbnail': r're:https://\w+\.dmcdn\.net/v/SaMes1Z.*/x1080',
'uploader': 'Moviepilot',
'like_count': int,
'view_count': int,
'tags': ['Alle Trailer', 'Movie', 'Verleih'],
'uploader_id': 'x6nd9k',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(f'https://www.moviepilot.de/movies/{video_id}/trailer', video_id)
clip = self._search_nextjs_data(webpage, video_id)['props']['initialProps']['pageProps']
return {
'_type': 'url_transparent',
'ie_key': DailymotionIE.ie_key(),
'display_id': video_id,
'title': clip.get('title'),
'url': f'https://www.dailymotion.com/video/{clip["video"]["remoteId"]}',
'description': clip.get('summary'),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/videoken.py | yt_dlp/extractor/videoken.py | import base64
import functools
import math
import re
import time
import urllib.parse
from .common import InfoExtractor
from .slideslive import SlidesLiveIE
from ..utils import (
ExtractorError,
InAdvancePagedList,
int_or_none,
remove_start,
traverse_obj,
update_url_query,
url_or_none,
)
class VideoKenBaseIE(InfoExtractor):
_ORGANIZATIONS = {
'videos.icts.res.in': 'icts',
'videos.cncf.io': 'cncf',
'videos.neurips.cc': 'neurips',
}
_BASE_URL_RE = rf'https?://(?P<host>{"|".join(map(re.escape, _ORGANIZATIONS))})/'
_PAGE_SIZE = 12
def _get_org_id_and_api_key(self, org, video_id):
details = self._download_json(
f'https://analytics.videoken.com/api/videolake/{org}/details', video_id,
note='Downloading organization ID and API key', headers={
'Accept': 'application/json',
})
return details['id'], details['apikey']
def _create_slideslive_url(self, video_url, video_id, referer):
if not video_url and not video_id:
return
elif not video_url or 'embed/sign-in' in video_url:
video_url = f'https://slideslive.com/embed/{remove_start(video_id, "slideslive-")}'
if url_or_none(referer):
return update_url_query(video_url, {
'embed_parent_url': referer,
'embed_container_origin': f'https://{urllib.parse.urlparse(referer).hostname}',
})
return video_url
def _extract_videos(self, videos, url):
for video in traverse_obj(videos, (('videos', 'results'), ...)):
video_id = traverse_obj(video, 'youtube_id', 'videoid')
if not video_id:
continue
ie_key = None
if traverse_obj(video, 'type', 'source') == 'youtube':
video_url = video_id
ie_key = 'Youtube'
else:
video_url = traverse_obj(video, 'embed_url', 'embeddableurl', expected_type=url_or_none)
if not video_url:
continue
elif urllib.parse.urlparse(video_url).hostname == 'slideslive.com':
ie_key = SlidesLiveIE
video_url = self._create_slideslive_url(video_url, video_id, url)
yield self.url_result(video_url, ie_key, video_id)
class VideoKenIE(VideoKenBaseIE):
_VALID_URL = VideoKenBaseIE._BASE_URL_RE + r'(?:(?:topic|category)/[^/#?]+/)?video/(?P<id>[\w-]+)'
_TESTS = [{
# neurips -> videoken -> slideslive
'url': 'https://videos.neurips.cc/video/slideslive-38922815',
'info_dict': {
'id': '38922815',
'ext': 'mp4',
'title': 'Efficient Processing of Deep Neural Network: from Algorithms to Hardware Architectures',
'timestamp': 1630939331,
'upload_date': '20210906',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'thumbnails': 'count:330',
'chapters': 'count:329',
},
'params': {
'skip_download': 'm3u8',
},
'expected_warnings': ['Failed to download VideoKen API JSON'],
}, {
# neurips -> videoken -> slideslive -> youtube
'url': 'https://videos.neurips.cc/topic/machine%20learning/video/slideslive-38923348',
'info_dict': {
'id': '2Xa_dt78rJE',
'ext': 'mp4',
'display_id': '38923348',
'title': 'Machine Education',
'description': 'Watch full version of this video at https://slideslive.com/38923348.',
'channel': 'SlidesLive Videos - G2',
'channel_id': 'UCOExahQQ588Da8Nft_Ltb9w',
'channel_url': 'https://www.youtube.com/channel/UCOExahQQ588Da8Nft_Ltb9w',
'uploader': 'SlidesLive Videos - G2',
'uploader_id': 'UCOExahQQ588Da8Nft_Ltb9w',
'uploader_url': 'http://www.youtube.com/channel/UCOExahQQ588Da8Nft_Ltb9w',
'duration': 2504,
'timestamp': 1618922125,
'upload_date': '20200131',
'age_limit': 0,
'channel_follower_count': int,
'view_count': int,
'availability': 'unlisted',
'live_status': 'not_live',
'playable_in_embed': True,
'categories': ['People & Blogs'],
'tags': [],
'thumbnail': r're:^https?://.*\.(?:jpg|webp)',
'thumbnails': 'count:78',
'chapters': 'count:77',
},
'params': {
'skip_download': 'm3u8',
},
'expected_warnings': ['Failed to download VideoKen API JSON'],
}, {
# icts -> videoken -> youtube
'url': 'https://videos.icts.res.in/topic/random%20variable/video/zysIsojYdvc',
'info_dict': {
'id': 'zysIsojYdvc',
'ext': 'mp4',
'title': 'Small-worlds, complex networks and random graphs (Lecture 3) by Remco van der Hofstad',
'description': 'md5:87433069d79719eeadc1962cc2ace00b',
'channel': 'International Centre for Theoretical Sciences',
'channel_id': 'UCO3xnVTHzB7l-nc8mABUJIQ',
'channel_url': 'https://www.youtube.com/channel/UCO3xnVTHzB7l-nc8mABUJIQ',
'uploader': 'International Centre for Theoretical Sciences',
'uploader_id': 'ICTStalks',
'uploader_url': 'http://www.youtube.com/user/ICTStalks',
'duration': 3372,
'upload_date': '20191004',
'age_limit': 0,
'live_status': 'not_live',
'availability': 'public',
'playable_in_embed': True,
'channel_follower_count': int,
'like_count': int,
'view_count': int,
'categories': ['Science & Technology'],
'tags': [],
'thumbnail': r're:^https?://.*\.(?:jpg|webp)',
'thumbnails': 'count:42',
'chapters': 'count:20',
},
'params': {
'skip_download': 'm3u8',
},
}, {
'url': 'https://videos.cncf.io/category/478/video/IL4nxbmUIX8',
'only_matching': True,
}, {
'url': 'https://videos.cncf.io/topic/kubernetes/video/YAM2d7yTrrI',
'only_matching': True,
}, {
'url': 'https://videos.icts.res.in/video/d7HuP_abpKU',
'only_matching': True,
}]
def _real_extract(self, url):
hostname, video_id = self._match_valid_url(url).group('host', 'id')
org_id, _ = self._get_org_id_and_api_key(self._ORGANIZATIONS[hostname], video_id)
details = self._download_json(
'https://analytics.videoken.com/api/videoinfo_private', video_id, query={
'videoid': video_id,
'org_id': org_id,
}, headers={'Accept': 'application/json'}, note='Downloading VideoKen API JSON',
errnote='Failed to download VideoKen API JSON', fatal=False)
if details:
return next(self._extract_videos({'videos': [details]}, url))
# fallback for API error 400 response
elif video_id.startswith('slideslive-'):
return self.url_result(
self._create_slideslive_url(None, video_id, url), SlidesLiveIE, video_id)
elif re.match(r'^[\w-]{11}$', video_id):
return self.url_result(video_id, 'Youtube', video_id)
else:
raise ExtractorError('Unable to extract without VideoKen API response')
class VideoKenPlayerIE(VideoKenBaseIE):
_VALID_URL = r'https?://player\.videoken\.com/embed/slideslive-(?P<id>\d+)'
_TESTS = [{
'url': 'https://player.videoken.com/embed/slideslive-38968434',
'info_dict': {
'id': '38968434',
'ext': 'mp4',
'title': 'Deep Learning with Label Differential Privacy',
'timestamp': 1643377020,
'upload_date': '20220128',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'thumbnails': 'count:30',
'chapters': 'count:29',
},
'params': {
'skip_download': 'm3u8',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result(
self._create_slideslive_url(None, video_id, url), SlidesLiveIE, video_id)
class VideoKenPlaylistIE(VideoKenBaseIE):
_VALID_URL = VideoKenBaseIE._BASE_URL_RE + r'(?:category/\d+/)?playlist/(?P<id>\d+)'
_TESTS = [{
'url': 'https://videos.icts.res.in/category/1822/playlist/381',
'playlist_mincount': 117,
'info_dict': {
'id': '381',
'title': 'Cosmology - The Next Decade',
},
}]
def _real_extract(self, url):
hostname, playlist_id = self._match_valid_url(url).group('host', 'id')
org_id, _ = self._get_org_id_and_api_key(self._ORGANIZATIONS[hostname], playlist_id)
videos = self._download_json(
f'https://analytics.videoken.com/api/{org_id}/playlistitems/{playlist_id}/',
playlist_id, headers={'Accept': 'application/json'}, note='Downloading API JSON')
return self.playlist_result(self._extract_videos(videos, url), playlist_id, videos.get('title'))
class VideoKenCategoryIE(VideoKenBaseIE):
_VALID_URL = VideoKenBaseIE._BASE_URL_RE + r'category/(?P<id>\d+)/?(?:$|[?#])'
_TESTS = [{
'url': 'https://videos.icts.res.in/category/1822/',
'playlist_mincount': 500,
'info_dict': {
'id': '1822',
'title': 'Programs',
},
}, {
'url': 'https://videos.neurips.cc/category/350/',
'playlist_mincount': 34,
'info_dict': {
'id': '350',
'title': 'NeurIPS 2018',
},
}, {
'url': 'https://videos.cncf.io/category/479/',
'playlist_mincount': 328,
'info_dict': {
'id': '479',
'title': 'KubeCon + CloudNativeCon Europe\'19',
},
}]
def _get_category_page(self, category_id, org_id, page=1, note=None):
return self._download_json(
f'https://analytics.videoken.com/api/videolake/{org_id}/category_videos', category_id,
fatal=False, note=note if note else f'Downloading category page {page}',
query={
'category_id': category_id,
'page_number': page,
'length': self._PAGE_SIZE,
}, headers={'Accept': 'application/json'}) or {}
def _entries(self, category_id, org_id, url, page):
videos = self._get_category_page(category_id, org_id, page + 1)
yield from self._extract_videos(videos, url)
def _real_extract(self, url):
hostname, category_id = self._match_valid_url(url).group('host', 'id')
org_id, _ = self._get_org_id_and_api_key(self._ORGANIZATIONS[hostname], category_id)
category_info = self._get_category_page(category_id, org_id, note='Downloading category info')
category = category_info['category_name']
total_pages = math.ceil(int(category_info['recordsTotal']) / self._PAGE_SIZE)
return self.playlist_result(InAdvancePagedList(
functools.partial(self._entries, category_id, org_id, url),
total_pages, self._PAGE_SIZE), category_id, category)
class VideoKenTopicIE(VideoKenBaseIE):
_VALID_URL = VideoKenBaseIE._BASE_URL_RE + r'topic/(?P<id>[^/#?]+)/?(?:$|[?#])'
_TESTS = [{
'url': 'https://videos.neurips.cc/topic/machine%20learning/',
'playlist_mincount': 500,
'info_dict': {
'id': 'machine_learning',
'title': 'machine learning',
},
}, {
'url': 'https://videos.icts.res.in/topic/gravitational%20waves/',
'playlist_mincount': 77,
'info_dict': {
'id': 'gravitational_waves',
'title': 'gravitational waves',
},
}, {
'url': 'https://videos.cncf.io/topic/prometheus/',
'playlist_mincount': 134,
'info_dict': {
'id': 'prometheus',
'title': 'prometheus',
},
}]
def _get_topic_page(self, topic, org_id, search_id, api_key, page=1, note=None):
return self._download_json(
'https://es.videoken.com/api/v1.0/get_results', topic, fatal=False, query={
'orgid': org_id,
'size': self._PAGE_SIZE,
'query': topic,
'page': page,
'sort': 'upload_desc',
'filter': 'all',
'token': api_key,
'is_topic': 'true',
'category': '',
'searchid': search_id,
}, headers={'Accept': 'application/json'},
note=note if note else f'Downloading topic page {page}') or {}
def _entries(self, topic, org_id, search_id, api_key, url, page):
videos = self._get_topic_page(topic, org_id, search_id, api_key, page + 1)
yield from self._extract_videos(videos, url)
def _real_extract(self, url):
hostname, topic_id = self._match_valid_url(url).group('host', 'id')
topic = urllib.parse.unquote(topic_id)
topic_id = topic.replace(' ', '_')
org_id, api_key = self._get_org_id_and_api_key(self._ORGANIZATIONS[hostname], topic)
search_id = base64.b64encode(f':{topic}:{int(time.time())}:transient'.encode()).decode()
total_pages = int_or_none(self._get_topic_page(
topic, org_id, search_id, api_key, note='Downloading topic info')['total_no_of_pages'])
return self.playlist_result(InAdvancePagedList(
functools.partial(self._entries, topic, org_id, search_id, api_key, url),
total_pages, self._PAGE_SIZE), topic_id, topic)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mixch.py | yt_dlp/extractor/mixch.py | from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
UserNotLive,
int_or_none,
str_or_none,
url_or_none,
)
from ..utils.traversal import traverse_obj
class MixchIE(InfoExtractor):
IE_NAME = 'mixch'
_VALID_URL = r'https?://mixch\.tv/u/(?P<id>\d+)'
_TESTS = [{
'url': 'https://mixch.tv/u/16943797/live',
'skip': 'don\'t know if this live persists',
'info_dict': {
'id': '16943797',
'ext': 'mp4',
'title': '#EntView #カリナ #セブチ 2024-05-05 06:58',
'comment_count': int,
'view_count': int,
'timestamp': 1714726805,
'uploader': 'Ent.View K-news🎶💕',
'uploader_id': '16943797',
'live_status': 'is_live',
'upload_date': '20240503',
},
}, {
'url': 'https://mixch.tv/u/16137876/live',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(f'https://mixch.tv/api-web/users/{video_id}/live', video_id)
if not traverse_obj(data, ('liveInfo', {dict})):
raise UserNotLive(video_id=video_id)
return {
'id': video_id,
'uploader_id': video_id,
**traverse_obj(data, {
'title': ('liveInfo', 'title', {str}),
'comment_count': ('liveInfo', 'comments', {int_or_none}),
'view_count': ('liveInfo', 'visitor', {int_or_none}),
'timestamp': ('liveInfo', 'created', {int_or_none}),
'uploader': ('broadcasterInfo', 'name', {str}),
}),
'formats': [{
'format_id': 'hls',
'url': data['liveInfo']['hls'],
'ext': 'mp4',
'protocol': 'm3u8',
}],
'is_live': True,
'__post_extractor': self.extract_comments(video_id),
}
def _get_comments(self, video_id):
yield from traverse_obj(self._download_json(
f'https://mixch.tv/api-web/lives/{video_id}/messages', video_id,
note='Downloading comments', errnote='Failed to download comments'), (..., {
'author': ('name', {str}),
'author_id': ('user_id', {str_or_none}),
'id': ('message_id', {str}, filter),
'text': ('body', {str}),
'timestamp': ('created', {int}),
}))
class MixchArchiveIE(InfoExtractor):
IE_NAME = 'mixch:archive'
_VALID_URL = r'https?://mixch\.tv/archive/(?P<id>\d+)'
_TESTS = [{
'url': 'https://mixch.tv/archive/421',
'skip': 'paid video, no DRM. expires at Jan 23',
'info_dict': {
'id': '421',
'ext': 'mp4',
'title': '96NEKO SHOW TIME',
},
}, {
'url': 'https://mixch.tv/archive/1213',
'skip': 'paid video, no DRM. expires at Dec 31, 2023',
'info_dict': {
'id': '1213',
'ext': 'mp4',
'title': '【特別トーク番組アーカイブス】Merm4id×燐舞曲 2nd LIVE「VERSUS」',
'release_date': '20231201',
'thumbnail': str,
},
}, {
'url': 'https://mixch.tv/archive/1214',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
try:
info_json = self._download_json(
f'https://mixch.tv/api-web/archive/{video_id}', video_id)['archive']
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
self.raise_login_required()
raise
return {
'id': video_id,
'title': traverse_obj(info_json, ('title', {str})),
'formats': self._extract_m3u8_formats(info_json['archiveURL'], video_id),
'thumbnail': traverse_obj(info_json, ('thumbnailURL', {url_or_none})),
}
class MixchMovieIE(InfoExtractor):
IE_NAME = 'mixch:movie'
_VALID_URL = r'https?://mixch\.tv/m/(?P<id>\w+)'
_TESTS = [{
'url': 'https://mixch.tv/m/Ve8KNkJ5',
'info_dict': {
'id': 'Ve8KNkJ5',
'title': '夏☀️\nムービーへのポイントは本イベントに加算されないので配信にてお願い致します🙇🏻\u200d♀️\n#TGCCAMPUS #ミス東大 #ミス東大2024 ',
'ext': 'mp4',
'uploader': 'ミス東大No.5 松藤百香🍑💫',
'uploader_id': '12299174',
'channel_follower_count': int,
'view_count': int,
'like_count': int,
'comment_count': int,
'timestamp': 1724070828,
'uploader_url': 'https://mixch.tv/u/12299174',
'live_status': 'not_live',
'upload_date': '20240819',
},
}, {
'url': 'https://mixch.tv/m/61DzpIKE',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
f'https://mixch.tv/api-web/movies/{video_id}', video_id)
return {
'id': video_id,
'formats': [{
'format_id': 'mp4',
'url': data['movie']['file'],
'ext': 'mp4',
}],
**traverse_obj(data, {
'title': ('movie', 'title', {str}),
'thumbnail': ('movie', 'thumbnailURL', {url_or_none}),
'uploader': ('ownerInfo', 'name', {str}),
'uploader_id': ('ownerInfo', 'id', {int}, {str_or_none}),
'channel_follower_count': ('ownerInfo', 'fan', {int_or_none}),
'view_count': ('ownerInfo', 'view', {int_or_none}),
'like_count': ('movie', 'favCount', {int_or_none}),
'comment_count': ('movie', 'commentCount', {int_or_none}),
'timestamp': ('movie', 'published', {int_or_none}),
'uploader_url': ('ownerInfo', 'id', {lambda x: x and f'https://mixch.tv/u/{x}'}, filter),
}),
'live_status': 'not_live',
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nfb.py | yt_dlp/extractor/nfb.py | from .common import InfoExtractor
from ..utils import (
int_or_none,
join_nonempty,
merge_dicts,
parse_count,
url_or_none,
)
from ..utils.traversal import traverse_obj
class NFBBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?(?P<site>nfb|onf)\.ca'
_GEO_COUNTRIES = ['CA']
def _extract_ep_data(self, webpage, video_id, fatal=False):
return self._search_json(
r'episodesData\s*:', webpage, 'episode data', video_id, fatal=fatal) or {}
def _extract_ep_info(self, data, video_id, slug=None):
info = traverse_obj(data, (lambda _, v: video_id in v['embed_url'], {
'description': ('description', {str}),
'thumbnail': ('thumbnail_url', {url_or_none}),
'uploader': ('data_layer', 'episodeMaker', {str}),
'release_year': ('data_layer', 'episodeYear', {int_or_none}),
'episode': ('data_layer', 'episodeTitle', {str}),
'season': ('data_layer', 'seasonTitle', {str}),
'season_number': ('data_layer', 'seasonTitle', {parse_count}),
'series': ('data_layer', 'seriesTitle', {str}),
}), get_all=False)
return {
**info,
'id': video_id,
'title': join_nonempty('series', 'episode', from_dict=info, delim=' - '),
'episode_number': int_or_none(self._search_regex(
r'[/-]e(?:pisode)?-?(\d+)(?:[/-]|$)', slug or video_id, 'episode number', default=None)),
}
class NFBIE(NFBBaseIE):
IE_NAME = 'nfb'
IE_DESC = 'nfb.ca and onf.ca films and episodes'
_VALID_URL = [
rf'{NFBBaseIE._VALID_URL_BASE}/(?P<type>film)/(?P<id>[^/?#&]+)',
rf'{NFBBaseIE._VALID_URL_BASE}/(?P<type>series?)/(?P<id>[^/?#&]+/s(?:ea|ai)son\d+/episode\d+)',
]
_TESTS = [{
'note': 'NFB film',
'url': 'https://www.nfb.ca/film/trafficopter/',
'info_dict': {
'id': 'trafficopter',
'ext': 'mp4',
'title': 'Trafficopter',
'description': 'md5:060228455eb85cf88785c41656776bc0',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Barrie Howells',
'release_year': 1972,
'duration': 600.0,
},
'params': {'skip_download': 'm3u8'},
}, {
'note': 'ONF film',
'url': 'https://www.onf.ca/film/mal-du-siecle/',
'info_dict': {
'id': 'mal-du-siecle',
'ext': 'mp4',
'title': 'Le mal du siècle',
'description': 'md5:1abf774d77569ebe603419f2d344102b',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Catherine Lepage',
'release_year': 2019,
'duration': 300.0,
},
'params': {'skip_download': 'm3u8'},
}, {
'note': 'NFB episode with English title',
'url': 'https://www.nfb.ca/series/true-north-inside-the-rise-of-toronto-basketball/season1/episode9/',
'info_dict': {
'id': 'true-north-episode9-true-north-finale-making-it',
'ext': 'mp4',
'title': 'True North: Inside the Rise of Toronto Basketball - Finale: Making It',
'description': 'We catch up with each player in the midst of their journey as they reflect on their road ahead.',
'series': 'True North: Inside the Rise of Toronto Basketball',
'release_year': 2018,
'season': 'Season 1',
'season_number': 1,
'episode': 'Finale: Making It',
'episode_number': 9,
'uploader': 'Ryan Sidhoo',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {'skip_download': 'm3u8'},
}, {
'note': 'ONF episode with French title',
'url': 'https://www.onf.ca/serie/direction-nord-la-montee-du-basketball-a-toronto/saison1/episode9/',
'info_dict': {
'id': 'direction-nord-episode-9',
'ext': 'mp4',
'title': 'Direction nord – La montée du basketball à Toronto - Finale : Réussir',
'description': 'md5:349a57419b71432b97bf6083d92b029d',
'series': 'Direction nord – La montée du basketball à Toronto',
'release_year': 2018,
'season': 'Saison 1',
'season_number': 1,
'episode': 'Finale : Réussir',
'episode_number': 9,
'uploader': 'Ryan Sidhoo',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {'skip_download': 'm3u8'},
}, {
'note': 'NFB episode with French title (needs geo-bypass)',
'url': 'https://www.nfb.ca/series/etoile-du-nord/saison1/episode1/',
'info_dict': {
'id': 'etoile-du-nord-episode-1-lobservation',
'ext': 'mp4',
'title': 'Étoile du Nord - L\'observation',
'description': 'md5:161a4617260dee3de70f509b2c9dd21b',
'series': 'Étoile du Nord',
'release_year': 2023,
'season': 'Saison 1',
'season_number': 1,
'episode': 'L\'observation',
'episode_number': 1,
'uploader': 'Patrick Bossé',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {'skip_download': 'm3u8'},
}, {
'note': 'ONF episode with English title (needs geo-bypass)',
'url': 'https://www.onf.ca/serie/north-star/season1/episode1/',
'info_dict': {
'id': 'north-star-episode-1-observation',
'ext': 'mp4',
'title': 'North Star - Observation',
'description': 'md5:c727f370839d8a817392b9e3f23655c7',
'series': 'North Star',
'release_year': 2023,
'season': 'Season 1',
'season_number': 1,
'episode': 'Observation',
'episode_number': 1,
'uploader': 'Patrick Bossé',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {'skip_download': 'm3u8'},
}, {
'note': 'NFB episode with /film/ URL and English title (needs geo-bypass)',
'url': 'https://www.nfb.ca/film/north-star-episode-1-observation/',
'info_dict': {
'id': 'north-star-episode-1-observation',
'ext': 'mp4',
'title': 'North Star - Observation',
'description': 'md5:c727f370839d8a817392b9e3f23655c7',
'series': 'North Star',
'release_year': 2023,
'season': 'Season 1',
'season_number': 1,
'episode': 'Observation',
'episode_number': 1,
'uploader': 'Patrick Bossé',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {'skip_download': 'm3u8'},
}, {
'note': 'ONF episode with /film/ URL and French title (needs geo-bypass)',
'url': 'https://www.onf.ca/film/etoile-du-nord-episode-1-lobservation/',
'info_dict': {
'id': 'etoile-du-nord-episode-1-lobservation',
'ext': 'mp4',
'title': 'Étoile du Nord - L\'observation',
'description': 'md5:161a4617260dee3de70f509b2c9dd21b',
'series': 'Étoile du Nord',
'release_year': 2023,
'season': 'Saison 1',
'season_number': 1,
'episode': 'L\'observation',
'episode_number': 1,
'uploader': 'Patrick Bossé',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {'skip_download': 'm3u8'},
}, {
'note': 'Season 2 episode w/o episode num in id, extract from json ld',
'url': 'https://www.onf.ca/film/liste-des-choses-qui-existent-saison-2-ours',
'info_dict': {
'id': 'liste-des-choses-qui-existent-saison-2-ours',
'ext': 'mp4',
'title': 'La liste des choses qui existent - L\'ours en peluche',
'description': 'md5:d5e8d8fc5f3a7385a9cf0f509b37e28a',
'series': 'La liste des choses qui existent',
'release_year': 2022,
'season': 'Saison 2',
'season_number': 2,
'episode': 'L\'ours en peluche',
'episode_number': 12,
'uploader': 'Francis Papillon',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {'skip_download': 'm3u8'},
}, {
'note': 'NFB film /embed/player/ page',
'url': 'https://www.nfb.ca/film/afterlife/embed/player/',
'info_dict': {
'id': 'afterlife',
'ext': 'mp4',
'title': 'Afterlife',
'description': 'md5:84951394f594f1fb1e62d9c43242fdf5',
'release_year': 1978,
'duration': 420.0,
'uploader': 'Ishu Patel',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {'skip_download': 'm3u8'},
}]
def _real_extract(self, url):
site, type_, slug = self._match_valid_url(url).group('site', 'type', 'id')
# Need to construct the URL since we match /embed/player/ URLs as well
webpage, urlh = self._download_webpage_handle(f'https://www.{site}.ca/{type_}/{slug}/', slug)
# type_ can change from film to serie(s) after redirect; new slug may have episode number
type_, slug = self._match_valid_url(urlh.url).group('type', 'id')
player_data = self._search_json(
r'window\.PLAYER_OPTIONS\[[^\]]+\]\s*=', webpage, 'player data', slug)
video_id = self._match_id(player_data['overlay']['url']) # overlay url always has unique slug
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
player_data['source'], video_id, 'mp4', m3u8_id='hls')
if dv_source := url_or_none(player_data.get('dvSource')):
fmts, subs = self._extract_m3u8_formats_and_subtitles(
dv_source, video_id, 'mp4', m3u8_id='dv', preference=-2, fatal=False)
for fmt in fmts:
fmt['format_note'] = 'described video'
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
info = {
'id': video_id,
'title': self._html_search_regex(
r'["\']nfb_version_title["\']\s*:\s*["\']([^"\']+)',
webpage, 'title', default=None),
'description': self._html_search_regex(
r'<[^>]+\bid=["\']tabSynopsis["\'][^>]*>\s*<p[^>]*>\s*([^<]+)',
webpage, 'description', default=None),
'thumbnail': url_or_none(player_data.get('poster')),
'uploader': self._html_search_regex(
r'<[^>]+\bitemprop=["\']director["\'][^>]*>([^<]+)', webpage, 'uploader', default=None),
'release_year': int_or_none(self._html_search_regex(
r'["\']nfb_version_year["\']\s*:\s*["\']([^"\']+)',
webpage, 'release_year', default=None)),
} if type_ == 'film' else self._extract_ep_info(self._extract_ep_data(webpage, video_id, slug), video_id)
return merge_dicts({
'formats': formats,
'subtitles': subtitles,
}, info, self._search_json_ld(webpage, video_id, default={}))
class NFBSeriesIE(NFBBaseIE):
IE_NAME = 'nfb:series'
IE_DESC = 'nfb.ca and onf.ca series'
_VALID_URL = rf'{NFBBaseIE._VALID_URL_BASE}/(?P<type>series?)/(?P<id>[^/?#&]+)/?(?:[?#]|$)'
_TESTS = [{
'url': 'https://www.nfb.ca/series/true-north-inside-the-rise-of-toronto-basketball/',
'playlist_mincount': 9,
'info_dict': {
'id': 'true-north-inside-the-rise-of-toronto-basketball',
},
}, {
'url': 'https://www.onf.ca/serie/la-liste-des-choses-qui-existent-serie/',
'playlist_mincount': 26,
'info_dict': {
'id': 'la-liste-des-choses-qui-existent-serie',
},
}]
def _entries(self, episodes):
for episode in traverse_obj(episodes, lambda _, v: NFBIE.suitable(v['embed_url'])):
mobj = NFBIE._match_valid_url(episode['embed_url'])
yield self.url_result(
mobj[0], NFBIE, **self._extract_ep_info([episode], mobj.group('id')))
def _real_extract(self, url):
site, type_, series_id = self._match_valid_url(url).group('site', 'type', 'id')
season_path = 'saison' if type_ == 'serie' else 'season'
webpage = self._download_webpage(
f'https://www.{site}.ca/{type_}/{series_id}/{season_path}1/episode1', series_id)
episodes = self._extract_ep_data(webpage, series_id, fatal=True)
return self.playlist_result(self._entries(episodes), series_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/usanetwork.py | yt_dlp/extractor/usanetwork.py | from .nbc import NBCIE
class USANetworkIE(NBCIE): # XXX: Do not subclass from concrete IE
_VALID_URL = r'https?(?P<permalink>://(?:www\.)?usanetwork\.com/(?:[^/]+/videos?|movies?)/(?:[^/]+/)?(?P<id>\d+))'
_TESTS = [{
'url': 'https://www.usanetwork.com/peacock-trailers/video/intelligence-trailer/4185302',
'info_dict': {
'id': '4185302',
'ext': 'mp4',
'title': 'Intelligence (Trailer)',
'description': 'A maverick NSA agent enlists the help of a junior systems analyst in a workplace power grab.',
'upload_date': '20200715',
'timestamp': 1594785600,
'uploader': 'NBCU-MPAT',
},
'params': {
# m3u8 download
'skip_download': True,
},
}]
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/buzzfeed.py | yt_dlp/extractor/buzzfeed.py | import json
import re
from .common import InfoExtractor
from .facebook import FacebookIE
class BuzzFeedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?buzzfeed\.com/[^?#]*?/(?P<id>[^?#]+)'
_TESTS = [{
'url': 'http://www.buzzfeed.com/abagg/this-angry-ram-destroys-a-punching-bag-like-a-boss?utm_term=4ldqpia',
'info_dict': {
'id': 'this-angry-ram-destroys-a-punching-bag-like-a-boss',
'title': 'This Angry Ram Destroys A Punching Bag Like A Boss',
'description': 'Rambro!',
},
'playlist': [{
'info_dict': {
'id': 'aVCR29aE_OQ',
'ext': 'mp4',
'title': 'Angry Ram destroys a punching bag..',
'description': 'md5:c59533190ef23fd4458a5e8c8c872345',
'upload_date': '20141024',
'uploader_id': 'Buddhanz1',
'uploader': 'Angry Ram',
},
}],
}, {
'url': 'http://www.buzzfeed.com/sheridanwatson/look-at-this-cute-dog-omg?utm_term=4ldqpia',
'params': {
'skip_download': True, # Got enough YouTube download tests
},
'info_dict': {
'id': 'look-at-this-cute-dog-omg',
'description': 're:Munchkin the Teddy Bear is back ?!',
'title': 'You Need To Stop What You\'re Doing And Watching This Dog Walk On A Treadmill',
},
'playlist': [{
'info_dict': {
'id': 'mVmBL8B-In0',
'ext': 'mp4',
'title': 're:Munchkin the Teddy Bear gets her exercise',
'description': 'md5:28faab95cda6e361bcff06ec12fc21d8',
'upload_date': '20141124',
'uploader_id': 'CindysMunchkin',
'uploader': 're:^Munchkin the',
},
}],
}, {
'url': 'http://www.buzzfeed.com/craigsilverman/the-most-adorable-crash-landing-ever#.eq7pX0BAmK',
'info_dict': {
'id': 'the-most-adorable-crash-landing-ever',
'title': 'Watch This Baby Goose Make The Most Adorable Crash Landing',
'description': 'This gosling knows how to stick a landing.',
},
'playlist': [{
'md5': '763ca415512f91ca62e4621086900a23',
'info_dict': {
'id': '971793786185728',
'ext': 'mp4',
'title': 'We set up crash pads so that the goslings on our roof would have a safe landi...',
'uploader': 'Calgary Outdoor Centre-University of Calgary',
},
}],
'add_ie': ['Facebook'],
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
all_buckets = re.findall(
r'(?s)<div class="video-embed[^"]*"..*?rel:bf_bucket_data=\'([^\']+)\'',
webpage)
entries = []
for bd_json in all_buckets:
bd = json.loads(bd_json)
video = bd.get('video') or bd.get('progload_video')
if not video:
continue
entries.append(self.url_result(video['url']))
facebook_urls = FacebookIE._extract_embed_urls(url, webpage)
entries.extend([
self.url_result(facebook_url)
for facebook_url in facebook_urls])
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'entries': entries,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tvplayer.py | yt_dlp/extractor/tvplayer.py | from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
extract_attributes,
try_get,
urlencode_postdata,
)
class TVPlayerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tvplayer\.com/watch/(?P<id>[^/?#]+)'
_TEST = {
'url': 'http://tvplayer.com/watch/bbcone',
'info_dict': {
'id': '89',
'ext': 'mp4',
'title': r're:^BBC One [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
current_channel = extract_attributes(self._search_regex(
r'(<div[^>]+class="[^"]*current-channel[^"]*"[^>]*>)',
webpage, 'channel element'))
title = current_channel['data-name']
resource_id = current_channel['data-id']
token = self._search_regex(
r'data-token=(["\'])(?P<token>(?!\1).+)\1', webpage,
'token', group='token')
context = self._download_json(
'https://tvplayer.com/watch/context', display_id,
'Downloading JSON context', query={
'resource': resource_id,
'gen': token,
})
validate = context['validate']
platform = try_get(
context, lambda x: x['platform']['key'], str) or 'firefox'
try:
response = self._download_json(
'http://api.tvplayer.com/api/v2/stream/live',
display_id, 'Downloading JSON stream', headers={
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
}, data=urlencode_postdata({
'id': resource_id,
'service': 1,
'platform': platform,
'validate': validate,
}))['tvplayer']['response']
except ExtractorError as e:
if isinstance(e.cause, HTTPError):
response = self._parse_json(
e.cause.response.read().decode(), resource_id)['tvplayer']['response']
raise ExtractorError(
'{} said: {}'.format(self.IE_NAME, response['error']), expected=True)
raise
formats = self._extract_m3u8_formats(response['stream'], display_id, 'mp4')
return {
'id': resource_id,
'display_id': display_id,
'title': title,
'formats': formats,
'is_live': True,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tvigle.py | yt_dlp/extractor/tvigle.py | from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
parse_age_limit,
try_get,
url_or_none,
)
class TvigleIE(InfoExtractor):
IE_NAME = 'tvigle'
IE_DESC = 'Интернет-телевидение Tvigle.ru'
_VALID_URL = r'https?://(?:www\.)?(?:tvigle\.ru/(?:[^/]+/)+(?P<display_id>[^/]+)/$|cloud\.tvigle\.ru/video/(?P<id>\d+))'
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1']
_GEO_BYPASS = False
_GEO_COUNTRIES = ['RU']
_TESTS = [
{
'url': 'http://www.tvigle.ru/video/sokrat/',
'info_dict': {
'id': '1848932',
'display_id': 'sokrat',
'ext': 'mp4',
'title': 'Сократ',
'description': 'md5:d6b92ffb7217b4b8ebad2e7665253c17',
'duration': 6586,
'age_limit': 12,
},
'skip': 'georestricted',
},
{
'url': 'http://www.tvigle.ru/video/vladimir-vysotskii/vedushchii-teleprogrammy-60-minut-ssha-o-vladimire-vysotskom/',
'info_dict': {
'id': '5142516',
'ext': 'flv',
'title': 'Ведущий телепрограммы «60 минут» (США) о Владимире Высоцком',
'description': 'md5:027f7dc872948f14c96d19b4178428a4',
'duration': 186.080,
'age_limit': 0,
},
'skip': 'georestricted',
}, {
'url': 'https://cloud.tvigle.ru/video/5267604/',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
if not video_id:
webpage = self._download_webpage(url, display_id)
video_id = self._html_search_regex(
(r'<div[^>]+class=["\']player["\'][^>]+id=["\'](\d+)',
r'cloudId\s*=\s*["\'](\d+)',
r'class="video-preview current_playing" id="(\d+)"'),
webpage, 'video id')
video_data = self._download_json(
f'http://cloud.tvigle.ru/api/play/video/{video_id}/', display_id)
item = video_data['playlist']['items'][0]
videos = item.get('videos')
error_message = item.get('errorMessage')
if not videos and error_message:
if item.get('isGeoBlocked') is True:
self.raise_geo_restricted(
msg=error_message, countries=self._GEO_COUNTRIES)
else:
raise ExtractorError(
f'{self.IE_NAME} returned error: {error_message}',
expected=True)
title = item['title']
description = item.get('description')
thumbnail = item.get('thumbnail')
duration = float_or_none(item.get('durationMilliseconds'), 1000)
age_limit = parse_age_limit(item.get('ageRestrictions'))
formats = []
for vcodec, url_or_fmts in item['videos'].items():
if vcodec == 'hls':
m3u8_url = url_or_none(url_or_fmts)
if not m3u8_url:
continue
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
elif vcodec == 'dash':
mpd_url = url_or_none(url_or_fmts)
if not mpd_url:
continue
formats.extend(self._extract_mpd_formats(
mpd_url, video_id, mpd_id='dash', fatal=False))
else:
if not isinstance(url_or_fmts, dict):
continue
for format_id, video_url in url_or_fmts.items():
if format_id == 'm3u8':
continue
video_url = url_or_none(video_url)
if not video_url:
continue
height = self._search_regex(
r'^(\d+)[pP]$', format_id, 'height', default=None)
filesize = int_or_none(try_get(
item, lambda x: x['video_files_size'][vcodec][format_id]))
formats.append({
'url': video_url,
'format_id': f'{vcodec}-{format_id}',
'vcodec': vcodec,
'height': int_or_none(height),
'filesize': filesize,
})
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'age_limit': age_limit,
'formats': formats,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/amp.py | yt_dlp/extractor/amp.py | from .common import InfoExtractor
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
mimetype2ext,
parse_iso8601,
strip_jsonp,
unified_timestamp,
url_or_none,
)
class AMPIE(InfoExtractor): # XXX: Conventionally, base classes should end with BaseIE/InfoExtractor
# parse Akamai Adaptive Media Player feed
def _extract_feed_info(self, url):
feed = self._download_json(
url, None, 'Downloading Akamai AMP feed',
'Unable to download Akamai AMP feed', transform_source=strip_jsonp)
item = feed.get('channel', {}).get('item')
if not item:
raise ExtractorError('{} said: {}'.format(self.IE_NAME, feed['error']))
video_id = item['guid']
def get_media_node(name, default=None):
media_name = f'media-{name}'
media_group = item.get('media-group') or item
return media_group.get(media_name) or item.get(media_name) or item.get(name, default)
thumbnails = []
media_thumbnail = get_media_node('thumbnail')
if media_thumbnail:
if isinstance(media_thumbnail, dict):
media_thumbnail = [media_thumbnail]
for thumbnail_data in media_thumbnail:
thumbnail = thumbnail_data.get('@attributes', {})
thumbnail_url = url_or_none(thumbnail.get('url'))
if not thumbnail_url:
continue
thumbnails.append({
'url': self._proto_relative_url(thumbnail_url, 'http:'),
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
})
subtitles = {}
media_subtitle = get_media_node('subTitle')
if media_subtitle:
if isinstance(media_subtitle, dict):
media_subtitle = [media_subtitle]
for subtitle_data in media_subtitle:
subtitle = subtitle_data.get('@attributes', {})
subtitle_href = url_or_none(subtitle.get('href'))
if not subtitle_href:
continue
subtitles.setdefault(subtitle.get('lang') or 'en', []).append({
'url': subtitle_href,
'ext': mimetype2ext(subtitle.get('type')) or determine_ext(subtitle_href),
})
formats = []
media_content = get_media_node('content')
if isinstance(media_content, dict):
media_content = [media_content]
for media_data in media_content:
media = media_data.get('@attributes', {})
media_url = url_or_none(media.get('url'))
if not media_url:
continue
ext = mimetype2ext(media.get('type')) or determine_ext(media_url)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
media_url + '?hdcore=3.4.0&plugin=aasp-3.4.0.132.124',
video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
fmts, subs = self._extract_m3u8_formats_and_subtitles(
media_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
else:
formats.append({
'format_id': media_data.get('media-category', {}).get('@attributes', {}).get('label'),
'url': media_url,
'tbr': int_or_none(media.get('bitrate')),
'filesize': int_or_none(media.get('fileSize')),
'ext': ext,
})
timestamp = unified_timestamp(item.get('pubDate'), ' ') or parse_iso8601(item.get('dc-date'))
return {
'id': video_id,
'title': get_media_node('title'),
'description': get_media_node('description'),
'thumbnails': thumbnails,
'timestamp': timestamp,
'duration': int_or_none(media_content[0].get('@attributes', {}).get('duration')),
'subtitles': subtitles,
'formats': formats,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/zhihu.py | yt_dlp/extractor/zhihu.py | from .common import InfoExtractor
from ..utils import float_or_none, format_field, int_or_none
class ZhihuIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?zhihu\.com/zvideo/(?P<id>[0-9]+)'
_TEST = {
'url': 'https://www.zhihu.com/zvideo/1342930761977176064',
'md5': 'c8d4c9cd72dd58e6f9bc9c2c84266464',
'info_dict': {
'id': '1342930761977176064',
'ext': 'mp4',
'title': '写春联也太难了吧!',
'thumbnail': r're:^https?://.*\.jpg',
'uploader': '桥半舫',
'timestamp': 1612959715,
'upload_date': '20210210',
'uploader_id': '244ecb13b0fd7daf92235288c8ca3365',
'duration': 146.333,
'view_count': int,
'like_count': int,
'comment_count': int,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
zvideo = self._download_json(
'https://www.zhihu.com/api/v4/zvideos/' + video_id, video_id)
title = zvideo['title']
video = zvideo.get('video') or {}
formats = []
for format_id, q in (video.get('playlist') or {}).items():
play_url = q.get('url') or q.get('play_url')
if not play_url:
continue
formats.append({
'asr': int_or_none(q.get('sample_rate')),
'filesize': int_or_none(q.get('size')),
'format_id': format_id,
'fps': int_or_none(q.get('fps')),
'height': int_or_none(q.get('height')),
'tbr': float_or_none(q.get('bitrate')),
'url': play_url,
'width': int_or_none(q.get('width')),
})
author = zvideo.get('author') or {}
url_token = author.get('url_token')
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': video.get('thumbnail') or zvideo.get('image_url'),
'uploader': author.get('name'),
'timestamp': int_or_none(zvideo.get('published_at')),
'uploader_id': author.get('id'),
'uploader_url': format_field(url_token, None, 'https://www.zhihu.com/people/%s'),
'duration': float_or_none(video.get('duration')),
'view_count': int_or_none(zvideo.get('play_count')),
'like_count': int_or_none(zvideo.get('liked_count')),
'comment_count': int_or_none(zvideo.get('comment_count')),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ruv.py | yt_dlp/extractor/ruv.py | from .common import InfoExtractor
from ..utils import (
determine_ext,
parse_duration,
traverse_obj,
unified_timestamp,
)
class RuvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ruv\.is/(?:sarpurinn/[^/]+|node)/(?P<id>[^/]+(?:/\d+)?)'
_TESTS = [{
# m3u8
'url': 'http://ruv.is/sarpurinn/ruv-aukaras/fh-valur/20170516',
'md5': '66347652f4e13e71936817102acc1724',
'info_dict': {
'id': '1144499',
'display_id': 'fh-valur/20170516',
'ext': 'mp4',
'title': 'FH - Valur',
'description': 'Bein útsending frá 3. leik FH og Vals í úrslitum Olísdeildar karla í handbolta.',
'timestamp': 1494963600,
'upload_date': '20170516',
},
}, {
# mp3
'url': 'http://ruv.is/sarpurinn/ras-2/morgunutvarpid/20170619',
'md5': '395ea250c8a13e5fdb39d4670ef85378',
'info_dict': {
'id': '1153630',
'display_id': 'morgunutvarpid/20170619',
'ext': 'mp3',
'title': 'Morgunútvarpið',
'description': 'md5:a4cf1202c0a1645ca096b06525915418',
'timestamp': 1497855000,
'upload_date': '20170619',
},
}, {
'url': 'http://ruv.is/sarpurinn/ruv/frettir/20170614',
'only_matching': True,
}, {
'url': 'http://www.ruv.is/node/1151854',
'only_matching': True,
}, {
'url': 'http://ruv.is/sarpurinn/klippa/secret-soltice-hefst-a-morgun',
'only_matching': True,
}, {
'url': 'http://ruv.is/sarpurinn/ras-1/morgunvaktin/20170619',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
title = self._og_search_title(webpage)
FIELD_RE = r'video\.%s\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'
media_url = self._html_search_regex(
FIELD_RE % 'src', webpage, 'video URL', group='url')
video_id = self._search_regex(
r'<link\b[^>]+\bhref=["\']https?://www\.ruv\.is/node/(\d+)',
webpage, 'video id', default=display_id)
ext = determine_ext(media_url)
if ext == 'm3u8':
formats = self._extract_m3u8_formats(
media_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls')
elif ext == 'mp3':
formats = [{
'format_id': 'mp3',
'url': media_url,
'vcodec': 'none',
}]
else:
formats = [{
'url': media_url,
}]
description = self._og_search_description(webpage, default=None)
thumbnail = self._og_search_thumbnail(
webpage, default=None) or self._search_regex(
FIELD_RE % 'poster', webpage, 'thumbnail', fatal=False)
timestamp = unified_timestamp(self._html_search_meta(
'article:published_time', webpage, 'timestamp', fatal=False))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'formats': formats,
}
class RuvSpilaIE(InfoExtractor):
IE_NAME = 'ruv.is:spila'
_VALID_URL = r'https?://(?:www\.)?ruv\.is/(?:(?:sjon|ut)varp|(?:krakka|ung)ruv)/spila/.+/(?P<series_id>[0-9]+)/(?P<id>[a-z0-9]+)'
_TESTS = [{
'url': 'https://www.ruv.is/sjonvarp/spila/ithrottir/30657/9jcnd4',
'info_dict': {
'id': '9jcnd4',
'ext': 'mp4',
'title': '01.02.2022',
'chapters': 'count:4',
'timestamp': 1643743500,
'upload_date': '20220201',
'thumbnail': 'https://d38kdhuogyllre.cloudfront.net/fit-in/1960x/filters:quality(65)/hd_posters/94boog-iti3jg.jpg',
'description': 'Íþróttafréttir.',
'age_limit': 0,
},
}, {
'url': 'https://www.ruv.is/utvarp/spila/i-ljosi-sogunnar/23795/7hqkre',
'info_dict': {
'id': '7hqkre',
'ext': 'mp3',
'thumbnail': 'https://d38kdhuogyllre.cloudfront.net/fit-in/1960x/filters:quality(65)/hd_posters/7hqkre-7uepao.jpg',
'description': 'md5:8d7046549daff35e9a3190dc9901a120',
'chapters': [],
'upload_date': '20220204',
'timestamp': 1643965500,
'title': 'Nellie Bly II',
'age_limit': 0,
},
}, {
'url': 'https://www.ruv.is/ungruv/spila/ungruv/28046/8beuph',
'only_matching': True,
}, {
'url': 'https://www.ruv.is/krakkaruv/spila/krakkafrettir/30712/9jbgb0',
'only_matching': True,
}]
def _real_extract(self, url):
display_id, series_id = self._match_valid_url(url).group('id', 'series_id')
program = self._download_json(
'https://www.ruv.is/gql/', display_id, query={'query': '''{
Program(id: %s){
title image description short_description
episodes(id: {value: "%s"}) {
rating title duration file image firstrun description
clips {
time text
}
subtitles {
name value
}
}
}
}''' % (series_id, display_id)})['data']['Program'] # noqa: UP031
episode = program['episodes'][0]
subs = {}
for trk in episode.get('subtitles'):
if trk.get('name') and trk.get('value'):
subs.setdefault(trk['name'], []).append({'url': trk['value'], 'ext': 'vtt'})
media_url = episode['file']
if determine_ext(media_url) == 'm3u8':
formats = self._extract_m3u8_formats(media_url, display_id)
else:
formats = [{'url': media_url}]
clips = [
{'start_time': parse_duration(c.get('time')), 'title': c.get('text')}
for c in episode.get('clips') or []]
return {
'id': display_id,
'title': traverse_obj(program, ('episodes', 0, 'title'), 'title'),
'description': traverse_obj(
program, ('episodes', 0, 'description'), 'description', 'short_description',
expected_type=lambda x: x or None),
'subtitles': subs,
'thumbnail': episode.get('image', '').replace('$$IMAGESIZE$$', '1960') or None,
'timestamp': unified_timestamp(episode.get('firstrun')),
'formats': formats,
'age_limit': episode.get('rating'),
'chapters': clips,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tnaflix.py | yt_dlp/extractor/tnaflix.py | import re
from .common import InfoExtractor
from ..utils import (
fix_xml_ampersands,
float_or_none,
int_or_none,
parse_duration,
str_to_int,
unescapeHTML,
url_basename,
xpath_text,
)
class TNAFlixNetworkBaseIE(InfoExtractor):
# May be overridden in descendants if necessary
_CONFIG_REGEX = [
r'flashvars\.config\s*=\s*escape\("(?P<url>[^"]+)"',
r'<input[^>]+name="config\d?" value="(?P<url>[^"]+)"',
r'config\s*=\s*(["\'])(?P<url>(?:https?:)?//(?:(?!\1).)+)\1',
]
_TITLE_REGEX = r'<input[^>]+name="title" value="([^"]+)"'
_DESCRIPTION_REGEX = r'<input[^>]+name="description" value="([^"]+)"'
_UPLOADER_REGEX = r'<input[^>]+name="username" value="([^"]+)"'
_VIEW_COUNT_REGEX = None
_COMMENT_COUNT_REGEX = None
_AVERAGE_RATING_REGEX = None
_CATEGORIES_REGEX = r'<li[^>]*>\s*<span[^>]+class="infoTitle"[^>]*>Categories:</span>\s*<span[^>]+class="listView"[^>]*>(.+?)</span>\s*</li>'
def _extract_thumbnails(self, flix_xml):
def get_child(elem, names):
for name in names:
child = elem.find(name)
if child is not None:
return child
timeline = get_child(flix_xml, ['timeline', 'rolloverBarImage'])
if timeline is None:
return
pattern_el = get_child(timeline, ['imagePattern', 'pattern'])
if pattern_el is None or not pattern_el.text:
return
first_el = get_child(timeline, ['imageFirst', 'first'])
last_el = get_child(timeline, ['imageLast', 'last'])
if first_el is None or last_el is None:
return
first_text = first_el.text
last_text = last_el.text
if not first_text.isdigit() or not last_text.isdigit():
return
first = int(first_text)
last = int(last_text)
if first > last:
return
width = int_or_none(xpath_text(timeline, './imageWidth', 'thumbnail width'))
height = int_or_none(xpath_text(timeline, './imageHeight', 'thumbnail height'))
return [{
'url': self._proto_relative_url(pattern_el.text.replace('#', str(i)), 'http:'),
'width': width,
'height': height,
} for i in range(first, last + 1)]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id, host = mobj.group('id', 'host')
for display_id_key in ('display_id', 'display_id_2'):
if display_id_key in mobj.groupdict():
display_id = mobj.group(display_id_key)
if display_id:
break
else:
display_id = video_id
webpage = self._download_webpage(url, display_id)
inputs = self._hidden_inputs(webpage)
query = {}
# check for MovieFap-style config
cfg_url = self._proto_relative_url(self._html_search_regex(
self._CONFIG_REGEX, webpage, 'flashvars.config', default=None,
group='url'), 'http:')
if not cfg_url:
cfg_url = inputs.get('config')
# check for TNAFlix-style config
if not cfg_url and inputs.get('vkey') and inputs.get('nkey'):
cfg_url = f'http://cdn-fck.{host}.com/{host}/{inputs["vkey"]}.fid'
query.update({
'key': inputs['nkey'],
'VID': video_id,
'premium': '1',
'vip': '1',
'alpha': '',
})
formats, json_ld = [], {}
# TNAFlix and MovieFap extraction
if cfg_url:
cfg_xml = self._download_xml(
cfg_url, display_id, 'Downloading metadata',
transform_source=fix_xml_ampersands, headers={'Referer': url}, query=query)
def extract_video_url(vl):
# Any URL modification now results in HTTP Error 403: Forbidden
return unescapeHTML(vl.text)
video_link = cfg_xml.find('./videoLink')
if video_link is not None:
formats.append({
'url': extract_video_url(video_link),
'ext': xpath_text(cfg_xml, './videoConfig/type', 'type', default='flv'),
})
for item in cfg_xml.findall('./quality/item'):
video_link = item.find('./videoLink')
if video_link is None:
continue
res = item.find('res')
format_id = None if res is None else res.text
height = int_or_none(self._search_regex(
r'^(\d+)[pP]', format_id, 'height', default=None))
formats.append({
'url': self._proto_relative_url(extract_video_url(video_link), 'http:'),
'format_id': format_id,
'height': height,
})
thumbnails = self._extract_thumbnails(cfg_xml) or []
thumbnails.append({
'url': self._proto_relative_url(xpath_text(cfg_xml, './startThumb', 'thumbnail'), 'http:'),
})
# check for EMPFlix-style JSON and extract
else:
player = self._download_json(
f'http://www.{host}.com/ajax/video-player/{video_id}', video_id,
headers={'Referer': url}).get('html', '')
for mobj in re.finditer(r'<source src="(?P<src>[^"]+)"', player):
video_url = mobj.group('src')
height = self._search_regex(r'-(\d+)p\.', url_basename(video_url), 'height', default=None)
formats.append({
'url': self._proto_relative_url(video_url, 'http:'),
'ext': url_basename(video_url).split('.')[-1],
'height': int_or_none(height),
'format_id': f'{height}p' if height else url_basename(video_url).split('.')[0],
})
thumbnail = self._proto_relative_url(self._search_regex(
r'data-poster="([^"]+)"', player, 'thumbnail', default=None), 'http:')
thumbnails = [{'url': thumbnail}] if thumbnail else None
json_ld = self._search_json_ld(webpage, display_id, default={})
def extract_field(pattern, name):
return self._html_search_regex(pattern, webpage, name, default=None) if pattern else None
return {
'id': video_id,
'display_id': display_id,
'title': (extract_field(self._TITLE_REGEX, 'title')
or self._og_search_title(webpage, default=None)
or json_ld.get('title')),
'description': extract_field(self._DESCRIPTION_REGEX, 'description') or json_ld.get('description'),
'thumbnails': thumbnails,
'duration': parse_duration(
self._html_search_meta('duration', webpage, 'duration', default=None)) or json_ld.get('duration'),
'age_limit': self._rta_search(webpage) or 18,
'uploader': extract_field(self._UPLOADER_REGEX, 'uploader') or json_ld.get('uploader'),
'view_count': str_to_int(extract_field(self._VIEW_COUNT_REGEX, 'view count')),
'comment_count': str_to_int(extract_field(self._COMMENT_COUNT_REGEX, 'comment count')),
'average_rating': float_or_none(extract_field(self._AVERAGE_RATING_REGEX, 'average rating')),
'categories': list(map(str.strip, (extract_field(self._CATEGORIES_REGEX, 'categories') or '').split(','))),
'formats': formats,
}
class TNAFlixNetworkEmbedIE(TNAFlixNetworkBaseIE):
_VALID_URL = r'https?://player\.(?P<host>tnaflix|empflix)\.com/video/(?P<id>\d+)'
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.(?:tna|emp)flix\.com/video/\d+)\1']
_TESTS = [{
'url': 'https://player.tnaflix.com/video/6538',
'info_dict': {
'id': '6538',
'display_id': '6538',
'ext': 'mp4',
'title': 'Educational xxx video (G Spot)',
'description': 'md5:b4fab8f88a8621c8fabd361a173fe5b8',
'thumbnail': r're:https?://.*\.jpg$',
'age_limit': 18,
'duration': 164,
'uploader': 'bobwhite39',
'categories': list,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://player.empflix.com/video/33051',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id, host = mobj.group('id', 'host')
return self.url_result(f'http://www.{host}.com/category/{video_id}/video{video_id}')
class TNAEMPFlixBaseIE(TNAFlixNetworkBaseIE):
_DESCRIPTION_REGEX = r'(?s)>Description:</[^>]+>(.+?)<'
_UPLOADER_REGEX = r'<span>by\s*<a[^>]+\bhref=["\']/profile/[^>]+>([^<]+)<'
_CATEGORIES_REGEX = r'(?s)<span[^>]*>Categories:</span>(.+?)</div>'
class TNAFlixIE(TNAEMPFlixBaseIE):
_VALID_URL = r'https?://(?:www\.)?(?P<host>tnaflix)\.com/[^/]+/(?P<display_id>[^/]+)/video(?P<id>\d+)'
_TITLE_REGEX = r'<title>(.+?) - (?:TNAFlix Porn Videos|TNAFlix\.com)</title>'
_TESTS = [{
# anonymous uploader, no categories
'url': 'http://www.tnaflix.com/porn-stars/Carmella-Decesare-striptease/video553878',
'md5': '7e569419fe6d69543d01e6be22f5f7c4',
'info_dict': {
'id': '553878',
'display_id': 'Carmella-Decesare-striptease',
'ext': 'mp4',
'title': 'Carmella Decesare - striptease',
'thumbnail': r're:https?://.*\.jpg$',
'duration': 91,
'age_limit': 18,
'categories': list,
},
}, {
# non-anonymous uploader, categories
'url': 'https://www.tnaflix.com/teen-porn/Educational-xxx-video/video6538',
'md5': 'add5a9fa7f4da53d3e9d0845ac58f20c',
'info_dict': {
'id': '6538',
'display_id': 'Educational-xxx-video',
'ext': 'mp4',
'title': 'Educational xxx video (G Spot)',
'description': 'md5:b4fab8f88a8621c8fabd361a173fe5b8',
'thumbnail': r're:https?://.*\.jpg$',
'duration': 164,
'age_limit': 18,
'uploader': 'bobwhite39',
'categories': list,
},
}, {
'url': 'https://www.tnaflix.com/amateur-porn/bunzHD-Ms.Donk/video358632',
'only_matching': True,
}]
class EMPFlixIE(TNAEMPFlixBaseIE):
_VALID_URL = r'https?://(?:www\.)?(?P<host>empflix)\.com/(?:videos/(?P<display_id>.+?)-|[^/]+/(?P<display_id_2>[^/]+)/video)(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.empflix.com/amateur-porn/Amateur-Finger-Fuck/video33051',
'md5': 'd761c7b26601bd14476cd9512f2654fc',
'info_dict': {
'id': '33051',
'display_id': 'Amateur-Finger-Fuck',
'ext': 'mp4',
'title': 'Amateur Finger Fuck',
'description': 'Amateur solo finger fucking.',
'thumbnail': r're:https?://.*\.jpg$',
'duration': 83,
'age_limit': 18,
'categories': list,
},
}, {
'url': 'http://www.empflix.com/videos/[AROMA][ARMD-718]-Aoi-Yoshino-Sawa-25826.html',
'only_matching': True,
}, {
'url': 'http://www.empflix.com/videos/Amateur-Finger-Fuck-33051.html',
'only_matching': True,
}]
class MovieFapIE(TNAFlixNetworkBaseIE):
_VALID_URL = r'https?://(?:www\.)?(?P<host>moviefap)\.com/videos/(?P<id>[0-9a-f]+)/(?P<display_id>[^/]+)\.html'
_VIEW_COUNT_REGEX = r'<br>Views\s*<strong>([\d,.]+)</strong>'
_COMMENT_COUNT_REGEX = r'<span[^>]+id="comCount"[^>]*>([\d,.]+)</span>'
_AVERAGE_RATING_REGEX = r'Current Rating\s*<br>\s*<strong>([\d.]+)</strong>'
_CATEGORIES_REGEX = r'(?s)<div[^>]+id="vid_info"[^>]*>\s*<div[^>]*>.+?</div>(.*?)<br>'
_TESTS = [{
# normal, multi-format video
'url': 'http://www.moviefap.com/videos/be9867c9416c19f54a4a/experienced-milf-amazing-handjob.html',
'md5': '26624b4e2523051b550067d547615906',
'info_dict': {
'id': 'be9867c9416c19f54a4a',
'display_id': 'experienced-milf-amazing-handjob',
'ext': 'mp4',
'title': 'Experienced MILF Amazing Handjob',
'description': 'Experienced MILF giving an Amazing Handjob',
'thumbnail': r're:https?://.*\.jpg$',
'age_limit': 18,
'uploader': 'darvinfred06',
'view_count': int,
'comment_count': int,
'average_rating': float,
'categories': ['Amateur', 'Masturbation', 'Mature', 'Flashing'],
},
}, {
# quirky single-format case where the extension is given as fid, but the video is really an flv
'url': 'http://www.moviefap.com/videos/e5da0d3edce5404418f5/jeune-couple-russe.html',
'md5': 'fa56683e291fc80635907168a743c9ad',
'info_dict': {
'id': 'e5da0d3edce5404418f5',
'display_id': 'jeune-couple-russe',
'ext': 'flv',
'title': 'Jeune Couple Russe',
'description': 'Amateur',
'thumbnail': r're:https?://.*\.jpg$',
'age_limit': 18,
'uploader': 'whiskeyjar',
'view_count': int,
'comment_count': int,
'average_rating': float,
'categories': ['Amateur', 'Teen'],
},
'skip': 'This video does not exist',
}]
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/urort.py | yt_dlp/extractor/urort.py | import urllib.parse
from .common import InfoExtractor
from ..utils import unified_strdate
class UrortIE(InfoExtractor):
_WORKING = False
IE_DESC = 'NRK P3 Urørt'
_VALID_URL = r'https?://(?:www\.)?urort\.p3\.no/#!/Band/(?P<id>[^/]+)$'
_TEST = {
'url': 'https://urort.p3.no/#!/Band/Gerilja',
'md5': '5ed31a924be8a05e47812678a86e127b',
'info_dict': {
'id': '33124-24',
'ext': 'mp3',
'title': 'The Bomb',
'thumbnail': r're:^https?://.+\.jpg',
'uploader': 'Gerilja',
'uploader_id': 'Gerilja',
'upload_date': '20100323',
},
'params': {
'matchtitle': '^The Bomb$', # To test, we want just one video
},
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
fstr = urllib.parse.quote(f"InternalBandUrl eq '{playlist_id}'")
json_url = f'http://urort.p3.no/breeze/urort/TrackDTOViews?$filter={fstr}&$orderby=Released%20desc&$expand=Tags%2CFiles'
songs = self._download_json(json_url, playlist_id)
entries = []
for s in songs:
formats = [{
'tbr': f.get('Quality'),
'ext': f['FileType'],
'format_id': '{}-{}'.format(f['FileType'], f.get('Quality', '')),
'url': 'http://p3urort.blob.core.windows.net/tracks/{}'.format(f['FileRef']),
'quality': 3 if f['FileType'] == 'mp3' else 2,
} for f in s['Files']]
e = {
'id': '%d-%s' % (s['BandId'], s['$id']),
'title': s['Title'],
'uploader_id': playlist_id,
'uploader': s.get('BandName', playlist_id),
'thumbnail': 'http://urort.p3.no/cloud/images/{}'.format(s['Image']),
'upload_date': unified_strdate(s.get('Released')),
'formats': formats,
}
entries.append(e)
return {
'_type': 'playlist',
'id': playlist_id,
'title': playlist_id,
'entries': entries,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/playsuisse.py | yt_dlp/extractor/playsuisse.py | import base64
import hashlib
import json
import uuid
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
join_nonempty,
parse_qs,
update_url_query,
urlencode_postdata,
)
from ..utils.traversal import traverse_obj, unpack
class PlaySuisseIE(InfoExtractor):
_NETRC_MACHINE = 'playsuisse'
_VALID_URL = r'https?://(?:www\.)?playsuisse\.ch/(?:watch|detail)/(?:[^#]*[?&]episodeId=)?(?P<id>[0-9]+)'
_TESTS = [
{
# Old URL
'url': 'https://www.playsuisse.ch/watch/763211/0',
'only_matching': True,
},
{
# episode in a series
'url': 'https://www.playsuisse.ch/watch/763182?episodeId=763211',
'md5': 'e20d1ede6872a03b41905ca1060a1ef2',
'info_dict': {
'id': '763211',
'ext': 'mp4',
'title': 'Knochen',
'description': 'md5:3bdd80e2ce20227c47aab1df2a79a519',
'duration': 3344,
'series': 'Wilder',
'season': 'Season 1',
'season_number': 1,
'episode': 'Knochen',
'episode_number': 1,
'thumbnail': 're:https://playsuisse-img.akamaized.net/',
},
}, {
# film
'url': 'https://www.playsuisse.ch/detail/2573198',
'md5': '1f115bb0a5191477b1a5771643a4283d',
'info_dict': {
'id': '2573198',
'ext': 'mp4',
'title': 'Azor',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'genres': ['Fiction'],
'creators': ['Andreas Fontana'],
'cast': ['Fabrizio Rongione', 'Stéphanie Cléau', 'Gilles Privat', 'Alexandre Trocki'],
'location': 'France; Argentine',
'release_year': 2021,
'duration': 5981,
'thumbnail': 're:https://playsuisse-img.akamaized.net/',
},
}, {
# series (treated as a playlist)
'url': 'https://www.playsuisse.ch/detail/1115687',
'info_dict': {
'id': '1115687',
'series': 'They all came out to Montreux',
'title': 'They all came out to Montreux',
'description': 'md5:0fefd8c5b4468a0bb35e916887681520',
'genres': ['Documentary'],
'creators': ['Oliver Murray'],
'location': 'Switzerland',
'release_year': 2021,
},
'playlist': [{
'info_dict': {
'description': 'md5:f2462744834b959a31adc6292380cda2',
'duration': 3180,
'episode': 'Folge 1',
'episode_number': 1,
'id': '1112663',
'season': 'Season 1',
'season_number': 1,
'series': 'They all came out to Montreux',
'thumbnail': 're:https://playsuisse-img.akamaized.net/',
'title': 'Folge 1',
'ext': 'mp4',
},
}, {
'info_dict': {
'description': 'md5:9dfd308699fe850d3bce12dc1bad9b27',
'duration': 2935,
'episode': 'Folge 2',
'episode_number': 2,
'id': '1112661',
'season': 'Season 1',
'season_number': 1,
'series': 'They all came out to Montreux',
'thumbnail': 're:https://playsuisse-img.akamaized.net/',
'title': 'Folge 2',
'ext': 'mp4',
},
}, {
'info_dict': {
'description': 'md5:14a93a3356b2492a8f786ab2227ef602',
'duration': 2994,
'episode': 'Folge 3',
'episode_number': 3,
'id': '1112664',
'season': 'Season 1',
'season_number': 1,
'series': 'They all came out to Montreux',
'thumbnail': 're:https://playsuisse-img.akamaized.net/',
'title': 'Folge 3',
'ext': 'mp4',
},
}],
},
]
_GRAPHQL_QUERY = '''
query AssetWatch($assetId: ID!) {
assetV2(id: $assetId) {
...Asset
episodes {
...Asset
}
}
}
fragment Asset on AssetV2 {
id
name
description
descriptionLong
year
contentTypes
directors
mainCast
productionCountries
duration
episodeNumber
seasonNumber
seriesName
medias {
type
url
}
thumbnail16x9 {
...ImageDetails
}
thumbnail2x3 {
...ImageDetails
}
thumbnail16x9WithTitle {
...ImageDetails
}
thumbnail2x3WithTitle {
...ImageDetails
}
}
fragment ImageDetails on AssetImage {
id
url
}'''
_CLIENT_ID = '1e33f1bf-8bf3-45e4-bbd9-c9ad934b5fca'
_LOGIN_BASE = 'https://account.srgssr.ch'
_ID_TOKEN = None
def _perform_login(self, username, password):
code_verifier = uuid.uuid4().hex + uuid.uuid4().hex + uuid.uuid4().hex
code_challenge = base64.urlsafe_b64encode(
hashlib.sha256(code_verifier.encode()).digest()).decode().rstrip('=')
request_id = parse_qs(self._request_webpage(
f'{self._LOGIN_BASE}/authz-srv/authz', None, 'Requesting session ID', query={
'client_id': self._CLIENT_ID,
'redirect_uri': 'https://www.playsuisse.ch/auth',
'scope': 'email profile openid offline_access',
'response_type': 'code',
'code_challenge': code_challenge,
'code_challenge_method': 'S256',
'view_type': 'login',
}).url)['requestId'][0]
try:
exchange_id = self._download_json(
f'{self._LOGIN_BASE}/verification-srv/v2/authenticate/initiate/password', None,
'Submitting username', headers={'content-type': 'application/json'}, data=json.dumps({
'usage_type': 'INITIAL_AUTHENTICATION',
'request_id': request_id,
'medium_id': 'PASSWORD',
'type': 'password',
'identifier': username,
}).encode())['data']['exchange_id']['exchange_id']
except ExtractorError:
raise ExtractorError('Invalid username', expected=True)
try:
login_data = self._download_json(
f'{self._LOGIN_BASE}/verification-srv/v2/authenticate/authenticate/password', None,
'Submitting password', headers={'content-type': 'application/json'}, data=json.dumps({
'requestId': request_id,
'exchange_id': exchange_id,
'type': 'password',
'password': password,
}).encode())['data']
except ExtractorError:
raise ExtractorError('Invalid password', expected=True)
authorization_code = parse_qs(self._request_webpage(
f'{self._LOGIN_BASE}/login-srv/verification/login', None, 'Logging in',
data=urlencode_postdata({
'requestId': request_id,
'exchange_id': login_data['exchange_id']['exchange_id'],
'verificationType': 'password',
'sub': login_data['sub'],
'status_id': login_data['status_id'],
'rememberMe': True,
'lat': '',
'lon': '',
})).url)['code'][0]
self._ID_TOKEN = self._download_json(
f'{self._LOGIN_BASE}/proxy/token', None, 'Downloading token', data=b'', query={
'client_id': self._CLIENT_ID,
'redirect_uri': 'https://www.playsuisse.ch/auth',
'code': authorization_code,
'code_verifier': code_verifier,
'grant_type': 'authorization_code',
})['id_token']
if not self._ID_TOKEN:
raise ExtractorError('Login failed')
def _get_media_data(self, media_id, locale=None):
response = self._download_json(
'https://www.playsuisse.ch/api/graphql',
media_id, data=json.dumps({
'operationName': 'AssetWatch',
'query': self._GRAPHQL_QUERY,
'variables': {'assetId': media_id},
}).encode(),
headers={'Content-Type': 'application/json', 'locale': locale or 'de'})
return response['data']['assetV2']
def _real_extract(self, url):
if not self._ID_TOKEN:
self.raise_login_required(method='password')
media_id = self._match_id(url)
media_data = self._get_media_data(media_id, traverse_obj(parse_qs(url), ('locale', 0)))
info = self._extract_single(media_data)
if media_data.get('episodes'):
info.update({
'_type': 'playlist',
'entries': map(self._extract_single, media_data['episodes']),
})
return info
def _extract_single(self, media_data):
thumbnails = traverse_obj(media_data, lambda k, _: k.startswith('thumbnail'))
formats, subtitles = [], {}
for media in traverse_obj(media_data, 'medias', default=[]):
if not media.get('url') or media.get('type') != 'HLS':
continue
f, subs = self._extract_m3u8_formats_and_subtitles(
update_url_query(media['url'], {'id_token': self._ID_TOKEN}),
media_data['id'], 'mp4', m3u8_id='HLS', fatal=False)
formats.extend(f)
self._merge_subtitles(subs, target=subtitles)
return {
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
**traverse_obj(media_data, {
'id': ('id', {str}),
'title': ('name', {str}),
'description': (('descriptionLong', 'description'), {str}, any),
'genres': ('contentTypes', ..., {str}),
'creators': ('directors', ..., {str}),
'cast': ('mainCast', ..., {str}),
'location': ('productionCountries', ..., {str}, all, {unpack(join_nonempty, delim='; ')}, filter),
'release_year': ('year', {str}, {lambda x: x[:4]}, {int_or_none}),
'duration': ('duration', {int_or_none}),
'series': ('seriesName', {str}),
'season_number': ('seasonNumber', {int_or_none}),
'episode': ('name', {str}, {lambda x: x if media_data['episodeNumber'] is not None else None}),
'episode_number': ('episodeNumber', {int_or_none}),
}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/teachertube.py | yt_dlp/extractor/teachertube.py | import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
determine_ext,
qualities,
)
class TeacherTubeIE(InfoExtractor):
_WORKING = False
IE_NAME = 'teachertube'
IE_DESC = 'teachertube.com videos'
_VALID_URL = r'https?://(?:www\.)?teachertube\.com/(viewVideo\.php\?video_id=|music\.php\?music_id=|video/(?:[\da-z-]+-)?|audio/)(?P<id>\d+)'
_TESTS = [{
# flowplayer
'url': 'http://www.teachertube.com/viewVideo.php?video_id=339997',
'md5': 'f9434ef992fd65936d72999951ee254c',
'info_dict': {
'id': '339997',
'ext': 'mp4',
'title': 'Measures of dispersion from a frequency table',
'description': 'Measures of dispersion from a frequency table',
'thumbnail': r're:https?://.*\.(?:jpg|png)',
},
}, {
# jwplayer
'url': 'http://www.teachertube.com/music.php?music_id=8805',
'md5': '01e8352006c65757caf7b961f6050e21',
'info_dict': {
'id': '8805',
'ext': 'mp3',
'title': 'PER ASPERA AD ASTRA',
'description': 'RADIJSKA EMISIJA ZRAKOPLOVNE TEHNI?KE ?KOLE P',
},
}, {
# unavailable video
'url': 'http://www.teachertube.com/video/intro-video-schleicher-297790',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
error = self._search_regex(
r'<div\b[^>]+\bclass=["\']msgBox error[^>]+>([^<]+)', webpage,
'error', default=None)
if error:
raise ExtractorError(f'{self.IE_NAME} said: {error}', expected=True)
title = self._html_search_meta('title', webpage, 'title', fatal=True)
TITLE_SUFFIX = ' - TeacherTube'
if title.endswith(TITLE_SUFFIX):
title = title[:-len(TITLE_SUFFIX)].strip()
description = self._html_search_meta('description', webpage, 'description')
if description:
description = description.strip()
quality = qualities(['mp3', 'flv', 'mp4'])
media_urls = re.findall(r'data-contenturl="([^"]+)"', webpage)
media_urls.extend(re.findall(r'var\s+filePath\s*=\s*"([^"]+)"', webpage))
media_urls.extend(re.findall(r'\'file\'\s*:\s*["\']([^"\']+)["\'],', webpage))
formats = [
{
'url': media_url,
'quality': quality(determine_ext(media_url)),
} for media_url in set(media_urls)
]
thumbnail = self._og_search_thumbnail(
webpage, default=None) or self._html_search_meta(
'thumbnail', webpage)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats,
}
class TeacherTubeUserIE(InfoExtractor):
_WORKING = False
IE_NAME = 'teachertube:user:collection'
IE_DESC = 'teachertube.com user and collection videos'
_VALID_URL = r'https?://(?:www\.)?teachertube\.com/(user/profile|collection)/(?P<user>[0-9a-zA-Z]+)/?'
_MEDIA_RE = r'''(?sx)
class="?sidebar_thumb_time"?>[0-9:]+</div>
\s*
<a\s+href="(https?://(?:www\.)?teachertube\.com/(?:video|audio)/[^"]+)"
'''
_TEST = {
'url': 'http://www.teachertube.com/user/profile/rbhagwati2',
'info_dict': {
'id': 'rbhagwati2',
},
'playlist_mincount': 179,
}
def _real_extract(self, url):
mobj = self._match_valid_url(url)
user_id = mobj.group('user')
urls = []
webpage = self._download_webpage(url, user_id)
urls.extend(re.findall(self._MEDIA_RE, webpage))
pages = re.findall(rf'/ajax-user/user-videos/{user_id}\?page=([0-9]+)', webpage)[:-1]
for p in pages:
more = f'http://www.teachertube.com/ajax-user/user-videos/{user_id}?page={p}'
webpage = self._download_webpage(more, user_id, f'Downloading page {p}/{len(pages)}')
video_urls = re.findall(self._MEDIA_RE, webpage)
urls.extend(video_urls)
entries = [self.url_result(vurl, 'TeacherTube') for vurl in urls]
return self.playlist_result(entries, user_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tubitv.py | yt_dlp/extractor/tubitv.py | import re
from .common import InfoExtractor
from ..networking import Request
from ..utils import (
ExtractorError,
int_or_none,
js_to_json,
strip_or_none,
traverse_obj,
url_or_none,
urlencode_postdata,
)
class TubiTvIE(InfoExtractor):
IE_NAME = 'tubitv'
_VALID_URL = r'https?://(?:www\.)?tubitv\.com/(?:[a-z]{2}-[a-z]{2}/)?(?P<type>video|movies|tv-shows)/(?P<id>\d+)'
_LOGIN_URL = 'http://tubitv.com/login'
_NETRC_MACHINE = 'tubitv'
_TESTS = [{
'url': 'https://tubitv.com/movies/100004539/the-39-steps',
'info_dict': {
'id': '100004539',
'ext': 'mp4',
'title': 'The 39 Steps',
'description': 'md5:bb2f2dd337f0dc58c06cb509943f54c8',
'uploader_id': 'abc2558d54505d4f0f32be94f2e7108c',
'release_year': 1935,
'thumbnail': r're:^https?://.+\.(jpe?g|png)$',
'duration': 5187,
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://tubitv.com/tv-shows/554628/s01-e01-rise-of-the-snakes',
'info_dict': {
'id': '554628',
'ext': 'mp4',
'title': 'S01:E01 - Rise of the Snakes',
'description': 'md5:ba136f586de53af0372811e783a3f57d',
'episode': 'Rise of the Snakes',
'episode_number': 1,
'season': 'Season 1',
'season_number': 1,
'uploader_id': '2a9273e728c510d22aa5c57d0646810b',
'release_year': 2011,
'thumbnail': r're:^https?://.+\.(jpe?g|png)$',
'duration': 1376,
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'http://tubitv.com/video/283829/the_comedian_at_the_friday',
'md5': '43ac06be9326f41912dc64ccf7a80320',
'info_dict': {
'id': '283829',
'ext': 'mp4',
'title': 'The Comedian at The Friday',
'description': 'A stand up comedian is forced to look at the decisions in his life while on a one week trip to the west coast.',
'uploader_id': 'bc168bee0d18dd1cb3b86c68706ab434',
},
'skip': 'Content Unavailable',
}, {
'url': 'http://tubitv.com/tv-shows/321886/s01_e01_on_nom_stories',
'only_matching': True,
}, {
'url': 'https://tubitv.com/movies/560057/penitentiary?start=true',
'info_dict': {
'id': '560057',
'ext': 'mp4',
'title': 'Penitentiary',
'description': 'md5:8d2fc793a93cc1575ff426fdcb8dd3f9',
'uploader_id': 'd8fed30d4f24fcb22ec294421b9defc2',
'release_year': 1979,
},
'skip': 'Content Unavailable',
}, {
'url': 'https://tubitv.com/es-mx/tv-shows/477363/s01-e03-jacob-dos-dos-y-la-tarjets-de-hockey-robada',
'only_matching': True,
}]
# DRM formats are included only to raise appropriate error
_UNPLAYABLE_FORMATS = ('hlsv6_widevine', 'hlsv6_widevine_nonclearlead', 'hlsv6_playready_psshv0',
'hlsv6_fairplay', 'dash_widevine', 'dash_widevine_nonclearlead')
def _perform_login(self, username, password):
self.report_login()
form_data = {
'username': username,
'password': password,
}
payload = urlencode_postdata(form_data)
request = Request(self._LOGIN_URL, payload)
request.headers['Content-Type'] = 'application/x-www-form-urlencoded'
login_page = self._download_webpage(
request, None, False, 'Wrong login info')
if not re.search(r'id="tubi-logout"', login_page):
raise ExtractorError(
'Login failed (invalid username/password)', expected=True)
def _real_extract(self, url):
video_id, video_type = self._match_valid_url(url).group('id', 'type')
webpage = self._download_webpage(f'https://tubitv.com/{video_type}/{video_id}/', video_id)
video_data = self._search_json(
r'window\.__data\s*=', webpage, 'data', video_id,
transform_source=js_to_json)['video']['byId'][video_id]
formats = []
drm_formats = False
for resource in traverse_obj(video_data, ('video_resources', lambda _, v: url_or_none(v['manifest']['url']))):
resource_type = resource.get('type')
manifest_url = resource['manifest']['url']
if resource_type == 'dash':
formats.extend(self._extract_mpd_formats(manifest_url, video_id, mpd_id=resource_type, fatal=False))
elif resource_type in ('hlsv3', 'hlsv6'):
formats.extend(self._extract_m3u8_formats(manifest_url, video_id, 'mp4', m3u8_id=resource_type, fatal=False))
elif resource_type in self._UNPLAYABLE_FORMATS:
drm_formats = True
else:
self.report_warning(f'Skipping unknown resource type "{resource_type}"')
if not formats and drm_formats:
self.report_drm(video_id)
elif not formats and not video_data.get('policy_match'): # policy_match is False if content was removed
raise ExtractorError('This content is currently unavailable', expected=True)
subtitles = {}
for sub in traverse_obj(video_data, ('subtitles', lambda _, v: url_or_none(v['url']))):
subtitles.setdefault(sub.get('lang', 'English'), []).append({
'url': self._proto_relative_url(sub['url']),
})
title = traverse_obj(video_data, ('title', {str}))
season_number, episode_number, episode_title = self._search_regex(
r'^S(\d+):E(\d+) - (.+)', title, 'episode info', fatal=False, group=(1, 2, 3), default=(None, None, None))
return {
'id': video_id,
'title': strip_or_none(title),
'formats': formats,
'subtitles': subtitles,
'season_number': int_or_none(season_number),
'episode_number': int_or_none(episode_number),
'episode': strip_or_none(episode_title),
**traverse_obj(video_data, {
'description': ('description', {str}),
'duration': ('duration', {int_or_none}),
'uploader_id': ('publisher_id', {str}),
'release_year': ('year', {int_or_none}),
'thumbnails': ('thumbnails', ..., {url_or_none}, {'url': {self._proto_relative_url}}),
}),
}
class TubiTvShowIE(InfoExtractor):
IE_NAME = 'tubitv:series'
_VALID_URL = r'https?://(?:www\.)?tubitv\.com/series/\d+/(?P<show_name>[^/?#]+)(?:/season-(?P<season>\d+))?'
_TESTS = [{
'url': 'https://tubitv.com/series/3936/the-joy-of-painting-with-bob-ross?start=true',
'playlist_mincount': 389,
'info_dict': {
'id': 'the-joy-of-painting-with-bob-ross',
},
}, {
'url': 'https://tubitv.com/series/2311/the-saddle-club/season-1',
'playlist_count': 26,
'info_dict': {
'id': 'the-saddle-club-season-1',
},
}, {
'url': 'https://tubitv.com/series/2311/the-saddle-club/season-3',
'playlist_count': 19,
'info_dict': {
'id': 'the-saddle-club-season-3',
},
}, {
'url': 'https://tubitv.com/series/2311/the-saddle-club/',
'playlist_mincount': 71,
'info_dict': {
'id': 'the-saddle-club',
},
}]
def _entries(self, show_url, playlist_id, selected_season):
webpage = self._download_webpage(show_url, playlist_id)
data = self._search_json(
r'window\.__REACT_QUERY_STATE__\s*=', webpage, 'data', playlist_id,
transform_source=js_to_json)['queries'][0]['state']['data']
# v['number'] is already a decimal string, but stringify to protect against API changes
path = [lambda _, v: str(v['number']) == selected_season] if selected_season else [..., {dict}]
for season in traverse_obj(data, ('seasons', *path)):
season_number = int_or_none(season.get('number'))
for episode in traverse_obj(season, ('episodes', lambda _, v: v['id'])):
episode_id = episode['id']
yield self.url_result(
f'https://tubitv.com/tv-shows/{episode_id}/', TubiTvIE, episode_id,
season_number=season_number, episode_number=int_or_none(episode.get('num')))
def _real_extract(self, url):
playlist_id, selected_season = self._match_valid_url(url).group('show_name', 'season')
if selected_season:
playlist_id = f'{playlist_id}-season-{selected_season}'
return self.playlist_result(self._entries(url, playlist_id, selected_season), playlist_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ondemandkorea.py | yt_dlp/extractor/ondemandkorea.py | import functools
import re
import uuid
from .common import InfoExtractor
from ..utils import (
ExtractorError,
OnDemandPagedList,
float_or_none,
int_or_none,
join_nonempty,
parse_age_limit,
parse_qs,
str_or_none,
unified_strdate,
url_or_none,
)
from ..utils.traversal import traverse_obj
class OnDemandKoreaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ondemandkorea\.com/(?:en/)?player/vod/[a-z0-9-]+\?(?:[^#]+&)?contentId=(?P<id>\d+)'
_GEO_COUNTRIES = ['US', 'CA']
_TESTS = [{
'url': 'https://www.ondemandkorea.com/player/vod/ask-us-anything?contentId=686471',
'md5': 'e2ff77255d989e3135bde0c5889fbce8',
'info_dict': {
'id': '686471',
'ext': 'mp4',
'title': 'Ask Us Anything: Jung Sung-ho, Park Seul-gi, Kim Bo-min, Yang Seung-won',
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)',
'duration': 5486.955,
'release_date': '20220924',
'series': 'Ask Us Anything',
'series_id': '11790',
'episode_number': 351,
'episode': 'Jung Sung-ho, Park Seul-gi, Kim Bo-min, Yang Seung-won',
},
}, {
'url': 'https://www.ondemandkorea.com/player/vod/breakup-probation-a-week?contentId=1595796',
'md5': '57266c720006962be7ff415b24775caa',
'info_dict': {
'id': '1595796',
'ext': 'mp4',
'title': 'Breakup Probation, A Week: E08',
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)',
'duration': 1586.0,
'release_date': '20231001',
'series': 'Breakup Probation, A Week',
'series_id': '22912',
'episode_number': 8,
'episode': 'E08',
},
}, {
'url': 'https://www.ondemandkorea.com/player/vod/the-outlaws?contentId=369531',
'md5': 'fa5523b87aa1f6d74fc622a97f2b47cd',
'info_dict': {
'id': '369531',
'ext': 'mp4',
'release_date': '20220519',
'duration': 7267.0,
'title': 'The Outlaws: Main Movie',
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)',
'age_limit': 18,
},
}, {
'url': 'https://www.ondemandkorea.com/en/player/vod/capture-the-moment-how-is-that-possible?contentId=1605006',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
f'https://odkmedia.io/odx/api/v3/playback/{video_id}/', video_id, fatal=False,
headers={'service-name': 'odk'}, query={'did': str(uuid.uuid4())}, expected_status=(403, 404))
if not traverse_obj(data, ('result', {dict})):
msg = traverse_obj(data, ('messages', '__default'), 'title', expected_type=str)
raise ExtractorError(msg or 'Got empty response from playback API', expected=True)
data = data['result']
def try_geo_bypass(url):
return traverse_obj(url, ({parse_qs}, 'stream_url', 0, {url_or_none})) or url
formats = []
for m3u8_url in traverse_obj(data, (('sources', 'manifest'), ..., 'url', {url_or_none}, {try_geo_bypass})):
mod_url = re.sub(r'_720(p?)\.m3u8', r'_1080\1.m3u8', m3u8_url)
if mod_url != m3u8_url:
mod_format = self._extract_m3u8_formats(
mod_url, video_id, note='Checking for higher quality format',
errnote='No higher quality format found', fatal=False)
if mod_format:
formats.extend(mod_format)
continue
formats.extend(self._extract_m3u8_formats(m3u8_url, video_id, fatal=False))
subtitles = {}
for track in traverse_obj(data, ('text_tracks', lambda _, v: url_or_none(v['url']))):
subtitles.setdefault(track.get('language', 'und'), []).append({
'url': track['url'],
'ext': track.get('codec'),
'name': track.get('label'),
})
def if_series(key=None):
return lambda obj: obj[key] if key and obj['kind'] == 'series' else None
return {
'id': video_id,
'title': join_nonempty(
('episode', 'program', 'title'),
('episode', 'title'), from_dict=data, delim=': '),
**traverse_obj(data, {
'thumbnail': ('episode', 'images', 'thumbnail', {url_or_none}),
'release_date': ('episode', 'release_date', {lambda x: x.replace('-', '')}, {unified_strdate}),
'duration': ('duration', {float_or_none(scale=1000)}),
'age_limit': ('age_rating', 'name', {lambda x: x.replace('R', '')}, {parse_age_limit}),
'series': ('episode', {if_series(key='program')}, 'title'),
'series_id': ('episode', {if_series(key='program')}, 'id', {str_or_none}),
'episode': ('episode', {if_series(key='title')}),
'episode_number': ('episode', {if_series(key='number')}, {int_or_none}),
}, get_all=False),
'formats': formats,
'subtitles': subtitles,
}
class OnDemandKoreaProgramIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ondemandkorea\.com/(?:en/)?player/vod/(?P<id>[a-z0-9-]+)(?:$|#)'
_GEO_COUNTRIES = ['US', 'CA']
_TESTS = [{
'url': 'https://www.ondemandkorea.com/player/vod/uskn-news',
'info_dict': {
'id': 'uskn-news',
},
'playlist_mincount': 755,
}, {
'url': 'https://www.ondemandkorea.com/en/player/vod/the-land',
'info_dict': {
'id': 'the-land',
},
'playlist_count': 52,
}]
_PAGE_SIZE = 100
def _fetch_page(self, display_id, page):
page += 1
page_data = self._download_json(
f'https://odkmedia.io/odx/api/v3/program/{display_id}/episodes/', display_id,
headers={'service-name': 'odk'}, query={
'page': page,
'page_size': self._PAGE_SIZE,
}, note=f'Downloading page {page}', expected_status=404)
for episode in traverse_obj(page_data, ('result', 'results', ...)):
yield self.url_result(
f'https://www.ondemandkorea.com/player/vod/{display_id}?contentId={episode["id"]}',
ie=OnDemandKoreaIE, video_title=episode.get('title'))
def _real_extract(self, url):
display_id = self._match_id(url)
entries = OnDemandPagedList(functools.partial(
self._fetch_page, display_id), self._PAGE_SIZE)
return self.playlist_result(entries, display_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bigflix.py | yt_dlp/extractor/bigflix.py | import base64
import re
import urllib.parse
from .common import InfoExtractor
class BigflixIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?bigflix\.com/.+/(?P<id>[0-9]+)'
_TESTS = [{
# 2 formats
'url': 'http://www.bigflix.com/Tamil-movies/Drama-movies/Madarasapatinam/16070',
'info_dict': {
'id': '16070',
'ext': 'mp4',
'title': 'Madarasapatinam',
'description': 'md5:9f0470b26a4ba8e824c823b5d95c2f6b',
'formats': 'mincount:2',
},
'params': {
'skip_download': True,
},
}, {
# multiple formats
'url': 'http://www.bigflix.com/Malayalam-movies/Drama-movies/Indian-Rupee/15967',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<div[^>]+class=["\']pagetitle["\'][^>]*>(.+?)</div>',
webpage, 'title')
def decode_url(quoted_b64_url):
return base64.b64decode(urllib.parse.unquote(
quoted_b64_url)).decode('utf-8')
formats = []
for height, encoded_url in re.findall(
r'ContentURL_(\d{3,4})[pP][^=]+=([^&]+)', webpage):
video_url = decode_url(encoded_url)
f = {
'url': video_url,
'format_id': f'{height}p',
'height': int(height),
}
if video_url.startswith('rtmp'):
f['ext'] = 'flv'
formats.append(f)
file_url = self._search_regex(
r'file=([^&]+)', webpage, 'video url', default=None)
if file_url:
video_url = decode_url(file_url)
if all(f['url'] != video_url for f in formats):
formats.append({
'url': decode_url(file_url),
})
description = self._html_search_meta('description', webpage)
return {
'id': video_id,
'title': title,
'description': description,
'formats': formats,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nobelprize.py | yt_dlp/extractor/nobelprize.py | from .common import InfoExtractor
from ..utils import (
UnsupportedError,
clean_html,
int_or_none,
parse_duration,
parse_qs,
str_or_none,
update_url,
)
from ..utils.traversal import find_element, traverse_obj
class NobelPrizeIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:mediaplayer|www)\.)?nobelprize\.org/mediaplayer/'
_TESTS = [{
'url': 'https://www.nobelprize.org/mediaplayer/?id=2636',
'info_dict': {
'id': '2636',
'ext': 'mp4',
'title': 'Announcement of the 2016 Nobel Prize in Physics',
'description': 'md5:1a2d8a6ca80c88fb3b9a326e0b0e8e43',
'duration': 1560.0,
'thumbnail': r're:https?://www\.nobelprize\.org/images/.+\.jpg',
'timestamp': 1504883793,
'upload_date': '20170908',
},
}, {
'url': 'https://mediaplayer.nobelprize.org/mediaplayer/?qid=12693',
'info_dict': {
'id': '12693',
'ext': 'mp4',
'title': 'Nobel Lecture by Peter Higgs',
'description': 'md5:9b12e275dbe3a8138484e70e00673a05',
'duration': 1800.0,
'thumbnail': r're:https?://www\.nobelprize\.org/images/.+\.jpg',
'timestamp': 1504883793,
'upload_date': '20170908',
},
}]
def _real_extract(self, url):
video_id = traverse_obj(parse_qs(url), (
('id', 'qid'), -1, {int_or_none}, {str_or_none}, any))
if not video_id:
raise UnsupportedError(url)
webpage = self._download_webpage(
update_url(url, netloc='mediaplayer.nobelprize.org'), video_id)
return {
**self._search_json_ld(webpage, video_id),
'id': video_id,
'title': self._html_search_meta('caption', webpage),
'description': traverse_obj(webpage, (
{find_element(tag='span', attr='itemprop', value='description')}, {clean_html})),
'duration': parse_duration(self._html_search_meta('duration', webpage)),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/puhutv.py | yt_dlp/extractor/puhutv.py | from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
parse_resolution,
str_or_none,
traverse_obj,
try_get,
unified_timestamp,
url_or_none,
urljoin,
)
class PuhuTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?puhutv\.com/(?P<id>[^/?#&]+)-izle'
IE_NAME = 'puhutv'
_TESTS = [{
# film
'url': 'https://puhutv.com/bi-kucuk-eylul-meselesi-izle',
'md5': '4de98170ccb84c05779b1f046b3c86f8',
'info_dict': {
'id': '11909',
'display_id': 'bi-kucuk-eylul-meselesi',
'ext': 'mp4',
'title': 'Bi Küçük Eylül Meselesi',
'description': 'md5:c2ab964c6542b7b26acacca0773adce2',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 6176.96,
'creator': 'Ay Yapım',
'creators': ['Ay Yapım'],
'timestamp': 1561062749,
'upload_date': '20190620',
'release_year': 2014,
'view_count': int,
'tags': list,
},
}, {
# episode, geo restricted, bypassable with --geo-verification-proxy
'url': 'https://puhutv.com/jet-sosyete-1-bolum-izle',
'only_matching': True,
}, {
# 4k, with subtitles
'url': 'https://puhutv.com/dip-1-bolum-izle',
'only_matching': True,
}]
_SUBTITLE_LANGS = {
'English': 'en',
'Deutsch': 'de',
'عربى': 'ar',
}
def _real_extract(self, url):
display_id = self._match_id(url)
info = self._download_json(
urljoin(url, f'/api/slug/{display_id}-izle'),
display_id)['data']
video_id = str(info['id'])
show = info.get('title') or {}
title = info.get('name') or show['name']
if info.get('display_name'):
title = '{} {}'.format(title, info['display_name'])
try:
videos = self._download_json(
f'https://puhutv.com/api/assets/{video_id}/videos',
display_id, 'Downloading video JSON',
headers=self.geo_verification_headers())
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
self.raise_geo_restricted()
raise
urls = []
formats = []
for video in videos['data']['videos']:
media_url = url_or_none(video.get('url'))
if not media_url or media_url in urls:
continue
urls.append(media_url)
playlist = video.get('is_playlist')
if (video.get('stream_type') == 'hls' and playlist is True) or 'playlist.m3u8' in media_url:
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
continue
quality = int_or_none(video.get('quality'))
f = {
'url': media_url,
'ext': 'mp4',
'height': quality,
}
video_format = video.get('video_format')
is_hls = (video_format == 'hls' or '/hls/' in media_url or '/chunklist.m3u8' in media_url) and playlist is False
if is_hls:
format_id = 'hls'
f['protocol'] = 'm3u8_native'
elif video_format == 'mp4':
format_id = 'http'
else:
continue
if quality:
format_id += f'-{quality}p'
f['format_id'] = format_id
formats.append(f)
creator = try_get(
show, lambda x: x['producer']['name'], str)
content = info.get('content') or {}
images = try_get(
content, lambda x: x['images']['wide'], dict) or {}
thumbnails = []
for image_id, image_url in images.items():
if not isinstance(image_url, str):
continue
if not image_url.startswith(('http', '//')):
image_url = f'https://{image_url}'
t = parse_resolution(image_id)
t.update({
'id': image_id,
'url': image_url,
})
thumbnails.append(t)
tags = []
for genre in show.get('genres') or []:
if not isinstance(genre, dict):
continue
genre_name = genre.get('name')
if genre_name and isinstance(genre_name, str):
tags.append(genre_name)
subtitles = {}
for subtitle in content.get('subtitles') or []:
if not isinstance(subtitle, dict):
continue
lang = subtitle.get('language')
sub_url = url_or_none(subtitle.get('url') or subtitle.get('file'))
if not lang or not isinstance(lang, str) or not sub_url:
continue
subtitles[self._SUBTITLE_LANGS.get(lang, lang)] = [{
'url': sub_url,
}]
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': info.get('description') or show.get('description'),
'season_id': str_or_none(info.get('season_id')),
'season_number': int_or_none(info.get('season_number')),
'episode_number': int_or_none(info.get('episode_number')),
'release_year': int_or_none(show.get('released_at')),
'timestamp': unified_timestamp(info.get('created_at')),
'creator': creator,
'view_count': int_or_none(content.get('watch_count')),
'duration': float_or_none(content.get('duration_in_ms'), 1000),
'tags': tags,
'subtitles': subtitles,
'thumbnails': thumbnails,
'formats': formats,
}
class PuhuTVSerieIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?puhutv\.com/(?P<id>[^/?#&]+)-detay'
IE_NAME = 'puhutv:serie'
_TESTS = [{
'url': 'https://puhutv.com/deniz-yildizi-detay',
'info_dict': {
'title': 'Deniz Yıldızı',
'id': 'deniz-yildizi',
},
'playlist_mincount': 205,
}, {
# a film detail page which is using same url with serie page
'url': 'https://puhutv.com/bizim-icin-sampiyon-detay',
'only_matching': True,
}]
def _extract_entries(self, seasons):
for season in seasons:
season_id = season.get('id')
if not season_id:
continue
page = 1
has_more = True
while has_more is True:
season = self._download_json(
f'https://appservice.puhutv.com/api/seasons/{season_id}/episodes?v=2',
season_id, f'Downloading page {page}', query={
'page': page,
'per': 100,
})['data']
for episode in traverse_obj(season, ('episodes', lambda _, v: v.get('slug') or v['assets'][0]['slug'])):
slug = episode.get('slug') or episode['assets'][0]['slug']
yield self.url_result(
f'https://puhutv.com/{slug}', PuhuTVIE, episode.get('id'), episode.get('name'))
page += 1
has_more = traverse_obj(season, 'has_more')
def _real_extract(self, url):
playlist_id = self._match_id(url)
info = self._download_json(
urljoin(url, f'/api/slug/{playlist_id}-detay'),
playlist_id)['data']
seasons = info.get('seasons')
if seasons:
return self.playlist_result(
self._extract_entries(seasons), playlist_id, info.get('name'))
# For films, these are using same url with series
video_id = info.get('slug')
if video_id:
video_id = video_id.removesuffix('-detay')
else:
video_id = info['assets'][0]['slug'].removesuffix('-izle')
return self.url_result(
f'https://puhutv.com/{video_id}-izle', PuhuTVIE, video_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/minoto.py | yt_dlp/extractor/minoto.py | from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_codecs,
)
class MinotoIE(InfoExtractor):
_VALID_URL = r'(?:minoto:|https?://(?:play|iframe|embed)\.minoto-video\.com/(?P<player_id>[0-9]+)/)(?P<id>[a-zA-Z0-9]+)'
def _real_extract(self, url):
mobj = self._match_valid_url(url)
player_id = mobj.group('player_id') or '1'
video_id = mobj.group('id')
video_data = self._download_json(f'http://play.minoto-video.com/{player_id}/{video_id}.js', video_id)
video_metadata = video_data['video-metadata']
formats = []
for fmt in video_data['video-files']:
fmt_url = fmt.get('url')
if not fmt_url:
continue
container = fmt.get('container')
if container == 'hls':
formats.extend(self._extract_m3u8_formats(fmt_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
else:
fmt_profile = fmt.get('profile') or {}
formats.append({
'format_id': fmt_profile.get('name-short'),
'format_note': fmt_profile.get('name'),
'url': fmt_url,
'container': container,
'tbr': int_or_none(fmt.get('bitrate')),
'filesize': int_or_none(fmt.get('filesize')),
'width': int_or_none(fmt.get('width')),
'height': int_or_none(fmt.get('height')),
**parse_codecs(fmt.get('codecs')),
})
return {
'id': video_id,
'title': video_metadata['title'],
'description': video_metadata.get('description'),
'thumbnail': video_metadata.get('video-poster', {}).get('url'),
'formats': formats,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/uliza.py | yt_dlp/extractor/uliza.py | from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
make_archive_id,
parse_qs,
time_seconds,
)
from ..utils.traversal import traverse_obj
class UlizaPlayerIE(InfoExtractor):
_VALID_URL = r'https://player-api\.p\.uliza\.jp/v1/players/[^?#]+\?(?:[^#]*&)?name=(?P<id>[^#&]+)'
_TESTS = [{
'url': 'https://player-api.p.uliza.jp/v1/players/timeshift-disabled/pia/admin?type=normal&playerobjectname=ulizaPlayer&name=livestream01_dvr&repeatable=true',
'info_dict': {
'id': '88f3109a-f503-4d0f-a9f7-9f39ac745d84',
'ext': 'mp4',
'title': '88f3109a-f503-4d0f-a9f7-9f39ac745d84',
'live_status': 'was_live',
'_old_archive_ids': ['piaulizaportal 88f3109a-f503-4d0f-a9f7-9f39ac745d84'],
},
}, {
'url': 'https://player-api.p.uliza.jp/v1/players/uliza_jp_gallery_normal/promotion/admin?type=presentation&name=cookings&targetid=player1',
'info_dict': {
'id': 'ae350126-5e22-4a7f-a8ac-8d0fd448b800',
'ext': 'mp4',
'title': 'ae350126-5e22-4a7f-a8ac-8d0fd448b800',
'live_status': 'not_live',
'_old_archive_ids': ['piaulizaportal ae350126-5e22-4a7f-a8ac-8d0fd448b800'],
},
}, {
'url': 'https://player-api.p.uliza.jp/v1/players/default-player/pia/admin?type=normal&name=pia_movie_uliza_fix&targetid=ulizahtml5&repeatable=true',
'info_dict': {
'id': '0644ecc8-e354-41b4-b957-3b08a2d63df1',
'ext': 'mp4',
'title': '0644ecc8-e354-41b4-b957-3b08a2d63df1',
'live_status': 'not_live',
'_old_archive_ids': ['piaulizaportal 0644ecc8-e354-41b4-b957-3b08a2d63df1'],
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
player_data = self._download_webpage(
url, display_id, headers={'Referer': 'https://player-api.p.uliza.jp/'},
note='Fetching player data', errnote='Unable to fetch player data')
m3u8_url = self._search_regex(
r'["\'](https://vms-api\.p\.uliza\.jp/v1/prog-index\.m3u8[^"\']+)', player_data, 'm3u8 url')
video_id = parse_qs(m3u8_url).get('ss', [display_id])[0]
formats = self._extract_m3u8_formats(m3u8_url, video_id)
m3u8_type = self._search_regex(
r'/hls/(dvr|video)/', traverse_obj(formats, (0, 'url')), 'm3u8 type', default=None)
return {
'id': video_id,
'title': video_id,
'formats': formats,
'live_status': {
'video': 'is_live',
'dvr': 'was_live', # short-term archives
}.get(m3u8_type, 'not_live'), # VOD or long-term archives
'_old_archive_ids': [make_archive_id('PIAULIZAPortal', video_id)],
}
class UlizaPortalIE(InfoExtractor):
IE_DESC = 'ulizaportal.jp'
_VALID_URL = r'https?://(?:www\.)?ulizaportal\.jp/pages/(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})'
_TESTS = [{
'url': 'https://ulizaportal.jp/pages/005f18b7-e810-5618-cb82-0987c5755d44',
'info_dict': {
'id': 'ae350126-5e22-4a7f-a8ac-8d0fd448b800',
'display_id': '005f18b7-e810-5618-cb82-0987c5755d44',
'title': 'プレゼンテーションプレイヤーのサンプル',
'live_status': 'not_live',
'_old_archive_ids': ['piaulizaportal ae350126-5e22-4a7f-a8ac-8d0fd448b800'],
},
'params': {
'skip_download': True,
'ignore_no_formats_error': True,
},
}, {
'url': 'https://ulizaportal.jp/pages/005e1b23-fe93-5780-19a0-98e917cc4b7d?expires=4102412400&signature=f422a993b683e1068f946caf406d211c17d1ef17da8bef3df4a519502155aa91&version=1',
'info_dict': {
'id': '0644ecc8-e354-41b4-b957-3b08a2d63df1',
'display_id': '005e1b23-fe93-5780-19a0-98e917cc4b7d',
'title': '【確認用】視聴サンプルページ(ULIZA)',
'live_status': 'not_live',
'_old_archive_ids': ['piaulizaportal 0644ecc8-e354-41b4-b957-3b08a2d63df1'],
},
'params': {
'skip_download': True,
'ignore_no_formats_error': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
expires = int_or_none(traverse_obj(parse_qs(url), ('expires', 0)))
if expires and expires <= time_seconds():
raise ExtractorError('The link is expired', video_id=video_id, expected=True)
webpage = self._download_webpage(url, video_id)
player_data_url = self._search_regex(
r'<script [^>]*\bsrc="(https://player-api\.p\.uliza\.jp/v1/players/[^"]+)"',
webpage, 'player data url')
return self.url_result(
player_data_url, UlizaPlayerIE, url_transparent=True,
display_id=video_id, video_title=self._html_extract_title(webpage))
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/thestar.py | yt_dlp/extractor/thestar.py | from .common import InfoExtractor
class TheStarIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?thestar\.com/(?:[^/]+/)*(?P<id>.+)\.html'
_TEST = {
'url': 'http://www.thestar.com/life/2016/02/01/mankind-why-this-woman-started-a-men-s-skincare-line.html',
'md5': '2c62dd4db2027e35579fefb97a8b6554',
'info_dict': {
'id': '4732393888001',
'ext': 'mp4',
'title': 'Mankind: Why this woman started a men\'s skin care line',
'description': 'Robert Cribb talks to Young Lee, the founder of Uncle Peter\'s MAN.',
'uploader_id': '794267642001',
'timestamp': 1454353482,
'upload_date': '20160201',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/794267642001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
brightcove_id = self._search_regex(
r'mainartBrightcoveVideoId["\']?\s*:\s*["\']?(\d+)',
webpage, 'brightcove id')
return self.url_result(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
'BrightcoveNew', brightcove_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/metacritic.py | yt_dlp/extractor/metacritic.py | import re
from .common import InfoExtractor
from ..utils import (
fix_xml_ampersands,
)
class MetacriticIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?metacritic\.com/.+?/trailers/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.metacritic.com/game/playstation-4/infamous-second-son/trailers/3698222',
'info_dict': {
'id': '3698222',
'ext': 'mp4',
'title': 'inFamous: Second Son - inSide Sucker Punch: Smoke & Mirrors',
'description': 'Take a peak behind-the-scenes to see how Sucker Punch brings smoke into the universe of inFAMOUS Second Son on the PS4.',
'duration': 221,
},
'skip': 'Not providing trailers anymore',
}, {
'url': 'http://www.metacritic.com/game/playstation-4/tales-from-the-borderlands-a-telltale-game-series/trailers/5740315',
'info_dict': {
'id': '5740315',
'ext': 'mp4',
'title': 'Tales from the Borderlands - Finale: The Vault of the Traveler',
'description': 'In the final episode of the season, all hell breaks loose. Jack is now in control of Helios\' systems, and he\'s ready to reclaim his rightful place as king of Hyperion (with or without you).',
'duration': 114,
},
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
# The xml is not well formatted, there are raw '&'
info = self._download_xml('http://www.metacritic.com/video_data?video=' + video_id,
video_id, 'Downloading info xml', transform_source=fix_xml_ampersands)
clip = next(c for c in info.findall('playList/clip') if c.find('id').text == video_id)
formats = []
for video_file in clip.findall('httpURI/videoFile'):
rate_str = video_file.find('rate').text
video_url = video_file.find('filePath').text
formats.append({
'url': video_url,
'ext': 'mp4',
'format_id': rate_str,
'tbr': int(rate_str),
})
description = self._html_search_regex(r'<b>Description:</b>(.*?)</p>',
webpage, 'description', flags=re.DOTALL)
return {
'id': video_id,
'title': clip.find('title').text,
'formats': formats,
'description': description,
'duration': int(clip.find('duration').text),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/stanfordoc.py | yt_dlp/extractor/stanfordoc.py | import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
orderedSet,
unescapeHTML,
)
class StanfordOpenClassroomIE(InfoExtractor):
IE_NAME = 'stanfordoc'
IE_DESC = 'Stanford Open ClassRoom'
_VALID_URL = r'https?://openclassroom\.stanford\.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
_TEST = {
'url': 'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100',
'md5': '544a9468546059d4e80d76265b0443b8',
'info_dict': {
'id': 'PracticalUnix_intro-environment',
'ext': 'mp4',
'title': 'Intro Environment',
},
}
def _real_extract(self, url):
mobj = self._match_valid_url(url)
if mobj.group('course') and mobj.group('video'): # A specific video
course = mobj.group('course')
video = mobj.group('video')
info = {
'id': course + '_' + video,
'uploader': None,
'upload_date': None,
}
base_url = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
xml_url = base_url + video + '.xml'
mdoc = self._download_xml(xml_url, info['id'])
try:
info['title'] = mdoc.findall('./title')[0].text
info['url'] = base_url + mdoc.findall('./videoFile')[0].text
except IndexError:
raise ExtractorError('Invalid metadata XML file')
return info
elif mobj.group('course'): # A course page
course = mobj.group('course')
info = {
'id': course,
'_type': 'playlist',
'uploader': None,
'upload_date': None,
}
coursepage = self._download_webpage(
url, info['id'],
note='Downloading course info page',
errnote='Unable to download course info page')
info['title'] = self._html_search_regex(
r'<h1>([^<]+)</h1>', coursepage, 'title', default=info['id'])
info['description'] = self._html_search_regex(
r'(?s)<description>([^<]+)</description>',
coursepage, 'description', fatal=False)
links = orderedSet(re.findall(r'<a href="(VideoPage\.php\?[^"]+)">', coursepage))
info['entries'] = [self.url_result(
f'http://openclassroom.stanford.edu/MainFolder/{unescapeHTML(l)}',
) for l in links]
return info
else: # Root page
info = {
'id': 'Stanford OpenClassroom',
'_type': 'playlist',
'uploader': None,
'upload_date': None,
}
info['title'] = info['id']
root_url = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
rootpage = self._download_webpage(root_url, info['id'],
errnote='Unable to download course info page')
links = orderedSet(re.findall(r'<a href="(CoursePage\.php\?[^"]+)">', rootpage))
info['entries'] = [self.url_result(
f'http://openclassroom.stanford.edu/MainFolder/{unescapeHTML(l)}',
) for l in links]
return info
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/amara.py | yt_dlp/extractor/amara.py | from .common import InfoExtractor
from .vimeo import VimeoIE
from .youtube import YoutubeIE
from ..utils import (
int_or_none,
parse_iso8601,
update_url_query,
)
class AmaraIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?amara\.org/(?:\w+/)?videos/(?P<id>\w+)'
_TESTS = [{
# Youtube
'url': 'https://amara.org/en/videos/jVx79ZKGK1ky/info/why-jury-trials-are-becoming-less-common/?tab=video',
'md5': 'ea10daf2b6154b8c1ecf9922aca5e8ae',
'info_dict': {
'id': 'h6ZuVdvYnfE',
'ext': 'mp4',
'title': 'Why jury trials are becoming less common',
'description': 'md5:a61811c319943960b6ab1c23e0cbc2c1',
'thumbnail': r're:^https?://.*\.jpg$',
'subtitles': dict,
'upload_date': '20160813',
'uploader': 'PBS NewsHour',
'uploader_id': 'PBSNewsHour',
'timestamp': 1549639570,
},
}, {
# Vimeo
'url': 'https://amara.org/en/videos/kYkK1VUTWW5I/info/vimeo-at-ces-2011',
'md5': '99392c75fa05d432a8f11df03612195e',
'info_dict': {
'id': '18622084',
'ext': 'mov',
'title': 'Vimeo at CES 2011!',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'thumbnail': r're:^https?://.*\.jpg$',
'subtitles': dict,
'timestamp': 1294763658,
'upload_date': '20110111',
'uploader': 'Sam Morrill',
'uploader_id': 'sammorrill',
},
}, {
# Direct Link
'url': 'https://amara.org/en/videos/s8KL7I3jLmh6/info/the-danger-of-a-single-story/',
'md5': 'd3970f08512738ee60c5807311ff5d3f',
'info_dict': {
'id': 's8KL7I3jLmh6',
'ext': 'mp4',
'title': 'The danger of a single story',
'description': 'md5:d769b31139c3b8bb5be9177f62ea3f23',
'thumbnail': r're:^https?://.*\.jpg$',
'subtitles': dict,
'upload_date': '20091007',
'timestamp': 1254942511,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
meta = self._download_json(
f'https://amara.org/api/videos/{video_id}/',
video_id, query={'format': 'json'})
title = meta['title']
video_url = meta['all_urls'][0]
subtitles = {}
for language in (meta.get('languages') or []):
subtitles_uri = language.get('subtitles_uri')
if not (subtitles_uri and language.get('published')):
continue
subtitle = subtitles.setdefault(language.get('code') or 'en', [])
for f in ('json', 'srt', 'vtt'):
subtitle.append({
'ext': f,
'url': update_url_query(subtitles_uri, {'format': f}),
})
info = {
'url': video_url,
'id': video_id,
'subtitles': subtitles,
'title': title,
'description': meta.get('description'),
'thumbnail': meta.get('thumbnail'),
'duration': int_or_none(meta.get('duration')),
'timestamp': parse_iso8601(meta.get('created')),
}
for ie in (YoutubeIE, VimeoIE):
if ie.suitable(video_url):
info.update({
'_type': 'url_transparent',
'ie_key': ie.ie_key(),
})
break
return info
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/radiko.py | yt_dlp/extractor/radiko.py | import base64
import random
import re
import urllib.parse
from .common import InfoExtractor
from ..utils import (
ExtractorError,
clean_html,
join_nonempty,
time_seconds,
try_call,
unified_timestamp,
update_url_query,
)
from ..utils.traversal import traverse_obj
class RadikoBaseIE(InfoExtractor):
_GEO_BYPASS = False
_FULL_KEY = None
_HOSTS_FOR_TIME_FREE_FFMPEG_UNSUPPORTED = (
'https://c-rpaa.smartstream.ne.jp',
'https://si-c-radiko.smartstream.ne.jp',
'https://tf-f-rpaa-radiko.smartstream.ne.jp',
'https://tf-c-rpaa-radiko.smartstream.ne.jp',
'https://si-f-radiko.smartstream.ne.jp',
'https://rpaa.smartstream.ne.jp',
)
_HOSTS_FOR_TIME_FREE_FFMPEG_SUPPORTED = (
'https://rd-wowza-radiko.radiko-cf.com',
'https://radiko.jp',
'https://f-radiko.smartstream.ne.jp',
)
# Following URL forcibly connects not Time Free but Live
_HOSTS_FOR_LIVE = (
'https://c-radiko.smartstream.ne.jp',
)
def _negotiate_token(self):
_, auth1_handle = self._download_webpage_handle(
'https://radiko.jp/v2/api/auth1', None, 'Downloading authentication page',
headers={
'x-radiko-app': 'pc_html5',
'x-radiko-app-version': '0.0.1',
'x-radiko-device': 'pc',
'x-radiko-user': 'dummy_user',
})
auth1_header = auth1_handle.headers
auth_token = auth1_header['X-Radiko-AuthToken']
kl = int(auth1_header['X-Radiko-KeyLength'])
ko = int(auth1_header['X-Radiko-KeyOffset'])
raw_partial_key = self._extract_full_key()[ko:ko + kl]
partial_key = base64.b64encode(raw_partial_key).decode()
area_id = self._download_webpage(
'https://radiko.jp/v2/api/auth2', None, 'Authenticating',
headers={
'x-radiko-device': 'pc',
'x-radiko-user': 'dummy_user',
'x-radiko-authtoken': auth_token,
'x-radiko-partialkey': partial_key,
}).split(',')[0]
if area_id == 'OUT':
self.raise_geo_restricted(countries=['JP'])
auth_data = (auth_token, area_id)
self.cache.store('radiko', 'auth_data', auth_data)
return auth_data
def _auth_client(self):
cachedata = self.cache.load('radiko', 'auth_data')
if cachedata is not None:
response = self._download_webpage(
'https://radiko.jp/v2/api/auth_check', None, 'Checking cached token', expected_status=401,
headers={'X-Radiko-AuthToken': cachedata[0], 'X-Radiko-AreaId': cachedata[1]})
if response == 'OK':
return cachedata
return self._negotiate_token()
def _extract_full_key(self):
if self._FULL_KEY:
return self._FULL_KEY
jscode = self._download_webpage(
'https://radiko.jp/apps/js/playerCommon.js', None,
note='Downloading player js code')
full_key = self._search_regex(
(r"RadikoJSPlayer\([^,]*,\s*(['\"])pc_html5\1,\s*(['\"])(?P<fullkey>[0-9a-f]+)\2,\s*{"),
jscode, 'full key', fatal=False, group='fullkey')
if full_key:
full_key = full_key.encode()
else: # use only full key ever known
full_key = b'bcd151073c03b352e1ef2fd66c32209da9ca0afa'
self._FULL_KEY = full_key
return full_key
def _find_program(self, video_id, station, cursor):
station_program = self._download_xml(
f'https://radiko.jp/v3/program/station/weekly/{station}.xml', video_id,
note=f'Downloading radio program for {station} station')
prog = None
for p in station_program.findall('.//prog'):
ft_str, to_str = p.attrib['ft'], p.attrib['to']
ft = unified_timestamp(ft_str, False)
to = unified_timestamp(to_str, False)
if ft <= cursor and cursor < to:
prog = p
break
if not prog:
raise ExtractorError('Cannot identify radio program to download!')
assert ft, to
return prog, station_program, ft, ft_str, to_str
def _extract_formats(self, video_id, station, is_onair, ft, cursor, auth_token, area_id, query):
m3u8_playlist_data = self._download_xml(
f'https://radiko.jp/v3/station/stream/pc_html5/{station}.xml', video_id,
note='Downloading stream information')
formats = []
found = set()
timefree_int = 0 if is_onair else 1
for element in m3u8_playlist_data.findall(f'.//url[@timefree="{timefree_int}"]/playlist_create_url'):
pcu = element.text
if pcu in found:
continue
found.add(pcu)
playlist_url = update_url_query(pcu, {
'station_id': station,
**query,
'l': '15',
'lsid': ''.join(random.choices('0123456789abcdef', k=32)),
'type': 'b',
})
time_to_skip = None if is_onair else cursor - ft
domain = urllib.parse.urlparse(playlist_url).netloc
subformats = self._extract_m3u8_formats(
playlist_url, video_id, ext='m4a',
live=True, fatal=False, m3u8_id=domain,
note=f'Downloading m3u8 information from {domain}',
headers={
'X-Radiko-AreaId': area_id,
'X-Radiko-AuthToken': auth_token,
})
for sf in subformats:
if (is_onair ^ pcu.startswith(self._HOSTS_FOR_LIVE)) or (
not is_onair and pcu.startswith(self._HOSTS_FOR_TIME_FREE_FFMPEG_UNSUPPORTED)):
sf['preference'] = -100
sf['format_note'] = 'not preferred'
if not is_onair and timefree_int == 1 and time_to_skip:
sf['downloader_options'] = {'ffmpeg_args': ['-ss', str(time_to_skip)]}
formats.extend(subformats)
return formats
def _extract_performers(self, prog):
return traverse_obj(prog, (
'pfm/text()', ..., {lambda x: re.split(r'[//、 ,,]', x)}, ..., {str.strip})) or None
class RadikoIE(RadikoBaseIE):
_VALID_URL = r'https?://(?:www\.)?radiko\.jp/#!/ts/(?P<station>[A-Z0-9-]+)/(?P<timestring>\d+)'
_TESTS = [{
# QRR (文化放送) station provides <desc>
'url': 'https://radiko.jp/#!/ts/QRR/20210425101300',
'only_matching': True,
}, {
# FMT (TOKYO FM) station does not provide <desc>
'url': 'https://radiko.jp/#!/ts/FMT/20210810150000',
'only_matching': True,
}, {
'url': 'https://radiko.jp/#!/ts/JOAK-FM/20210509090000',
'only_matching': True,
}]
def _real_extract(self, url):
station, timestring = self._match_valid_url(url).group('station', 'timestring')
video_id = join_nonempty(station, timestring)
vid_int = unified_timestamp(timestring, False)
prog, station_program, ft, radio_begin, radio_end = self._find_program(video_id, station, vid_int)
auth_token, area_id = self._auth_client()
return {
'id': video_id,
'title': try_call(lambda: prog.find('title').text),
'cast': self._extract_performers(prog),
'description': clean_html(try_call(lambda: prog.find('info').text)),
'uploader': try_call(lambda: station_program.find('.//name').text),
'uploader_id': station,
'timestamp': vid_int,
'duration': try_call(lambda: unified_timestamp(radio_end, False) - unified_timestamp(radio_begin, False)),
'is_live': True,
'formats': self._extract_formats(
video_id=video_id, station=station, is_onair=False,
ft=ft, cursor=vid_int, auth_token=auth_token, area_id=area_id,
query={
'start_at': radio_begin,
'ft': radio_begin,
'end_at': radio_end,
'to': radio_end,
'seek': timestring,
},
),
}
class RadikoRadioIE(RadikoBaseIE):
_VALID_URL = r'https?://(?:www\.)?radiko\.jp/#!/live/(?P<id>[A-Z0-9-]+)'
_TESTS = [{
# QRR (文化放送) station provides <desc>
'url': 'https://radiko.jp/#!/live/QRR',
'only_matching': True,
}, {
# FMT (TOKYO FM) station does not provide <desc>
'url': 'https://radiko.jp/#!/live/FMT',
'only_matching': True,
}, {
'url': 'https://radiko.jp/#!/live/JOAK-FM',
'only_matching': True,
}]
def _real_extract(self, url):
station = self._match_id(url)
self.report_warning('Downloader will not stop at the end of the program! Press Ctrl+C to stop')
auth_token, area_id = self._auth_client()
# get current time in JST (GMT+9:00 w/o DST)
vid_now = time_seconds(hours=9)
prog, station_program, ft, _, _ = self._find_program(station, station, vid_now)
title = prog.find('title').text
description = clean_html(prog.find('info').text)
station_name = station_program.find('.//name').text
formats = self._extract_formats(
video_id=station, station=station, is_onair=True,
ft=ft, cursor=vid_now, auth_token=auth_token, area_id=area_id,
query={})
return {
'id': station,
'title': title,
'cast': self._extract_performers(prog),
'description': description,
'uploader': station_name,
'uploader_id': station,
'timestamp': ft,
'formats': formats,
'is_live': True,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/foxsports.py | yt_dlp/extractor/foxsports.py | from .common import InfoExtractor
from .uplynk import UplynkPreplayIE
from ..networking import HEADRequest
from ..utils import float_or_none, make_archive_id, smuggle_url
class FoxSportsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?foxsports\.com/watch/(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://www.foxsports.com/watch/play-612168c6700004b',
'info_dict': {
'id': 'b72f5bd8658140baa5791bb676433733',
'ext': 'mp4',
'display_id': 'play-612168c6700004b',
'title': 'md5:e0c4ecac3a1f25295b4fae22fb5c126a',
'description': 'md5:371bc43609708ae2b9e1a939229762af',
'uploader_id': '06b4a36349624051a9ba52ac3a91d268',
'upload_date': '20221205',
'timestamp': 1670262586,
'duration': 31.7317,
'thumbnail': r're:^https?://.*\.jpg$',
'extra_param_to_segment_url': str,
},
'params': {
'skip_download': 'm3u8',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
json_ld = self._search_json_ld(webpage, video_id, expected_type='VideoObject', default={})
data = self._download_json(
f'https://api3.fox.com/v2.0/vodplayer/sportsclip/{video_id}',
video_id, note='Downloading API JSON', headers={
'x-api-key': 'cf289e299efdfa39fb6316f259d1de93',
})
preplay_url = self._request_webpage(
HEADRequest(data['url']), video_id, 'Fetching preplay URL').url
return {
'_type': 'url_transparent',
'ie_key': UplynkPreplayIE.ie_key(),
'url': smuggle_url(preplay_url, {'Origin': 'https://www.foxsports.com'}),
'display_id': video_id,
'title': data.get('name') or json_ld.get('title'),
'description': data.get('description') or json_ld.get('description'),
'duration': float_or_none(data.get('durationInSeconds')),
'timestamp': json_ld.get('timestamp'),
'thumbnails': json_ld.get('thumbnails'),
'_old_archive_ids': [make_archive_id(self, video_id)],
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/prosiebensat1.py | yt_dlp/extractor/prosiebensat1.py | import hashlib
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
determine_ext,
float_or_none,
int_or_none,
join_nonempty,
merge_dicts,
unified_strdate,
)
class ProSiebenSat1BaseIE(InfoExtractor):
_GEO_BYPASS = False
_ACCESS_ID = None
_SUPPORTED_PROTOCOLS = 'dash:clear,hls:clear,progressive:clear'
_V4_BASE_URL = 'https://vas-v4.p7s1video.net/4.0/get'
def _extract_video_info(self, url, clip_id):
client_location = url
video = self._download_json(
'http://vas.sim-technik.de/vas/live/v2/videos',
clip_id, 'Downloading videos JSON', query={
'access_token': self._TOKEN,
'client_location': client_location,
'client_name': self._CLIENT_NAME,
'ids': clip_id,
})[0]
if not self.get_param('allow_unplayable_formats') and video.get('is_protected') is True:
self.report_drm(clip_id)
formats = []
if self._ACCESS_ID:
raw_ct = self._ENCRYPTION_KEY + clip_id + self._IV + self._ACCESS_ID
protocols = self._download_json(
self._V4_BASE_URL + 'protocols', clip_id,
'Downloading protocols JSON',
headers=self.geo_verification_headers(), query={
'access_id': self._ACCESS_ID,
'client_token': hashlib.sha1((raw_ct).encode()).hexdigest(),
'video_id': clip_id,
}, fatal=False, expected_status=(403,)) or {}
error = protocols.get('error') or {}
if error.get('title') == 'Geo check failed':
self.raise_geo_restricted(countries=['AT', 'CH', 'DE'])
server_token = protocols.get('server_token')
if server_token:
urls = (self._download_json(
self._V4_BASE_URL + 'urls', clip_id, 'Downloading urls JSON', query={
'access_id': self._ACCESS_ID,
'client_token': hashlib.sha1((raw_ct + server_token + self._SUPPORTED_PROTOCOLS).encode()).hexdigest(),
'protocols': self._SUPPORTED_PROTOCOLS,
'server_token': server_token,
'video_id': clip_id,
}, fatal=False) or {}).get('urls') or {}
for protocol, variant in urls.items():
source_url = variant.get('clear', {}).get('url')
if not source_url:
continue
if protocol == 'dash':
formats.extend(self._extract_mpd_formats(
source_url, clip_id, mpd_id=protocol, fatal=False))
elif protocol == 'hls':
formats.extend(self._extract_m3u8_formats(
source_url, clip_id, 'mp4', 'm3u8_native',
m3u8_id=protocol, fatal=False))
else:
formats.append({
'url': source_url,
'format_id': protocol,
})
if not formats:
source_ids = [str(source['id']) for source in video['sources']]
client_id = self._SALT[:2] + hashlib.sha1(''.join([clip_id, self._SALT, self._TOKEN, client_location, self._SALT, self._CLIENT_NAME]).encode()).hexdigest()
sources = self._download_json(
f'http://vas.sim-technik.de/vas/live/v2/videos/{clip_id}/sources',
clip_id, 'Downloading sources JSON', query={
'access_token': self._TOKEN,
'client_id': client_id,
'client_location': client_location,
'client_name': self._CLIENT_NAME,
})
server_id = sources['server_id']
def fix_bitrate(bitrate):
bitrate = int_or_none(bitrate)
if not bitrate:
return None
return (bitrate // 1000) if bitrate % 1000 == 0 else bitrate
for source_id in source_ids:
client_id = self._SALT[:2] + hashlib.sha1(''.join([self._SALT, clip_id, self._TOKEN, server_id, client_location, source_id, self._SALT, self._CLIENT_NAME]).encode()).hexdigest()
urls = self._download_json(
f'http://vas.sim-technik.de/vas/live/v2/videos/{clip_id}/sources/url',
clip_id, 'Downloading urls JSON', fatal=False, query={
'access_token': self._TOKEN,
'client_id': client_id,
'client_location': client_location,
'client_name': self._CLIENT_NAME,
'server_id': server_id,
'source_ids': source_id,
})
if not urls:
continue
if urls.get('status_code') != 0:
raise ExtractorError('This video is unavailable', expected=True)
urls_sources = urls['sources']
if isinstance(urls_sources, dict):
urls_sources = urls_sources.values()
for source in urls_sources:
source_url = source.get('url')
if not source_url:
continue
protocol = source.get('protocol')
mimetype = source.get('mimetype')
if mimetype == 'application/f4m+xml' or 'f4mgenerator' in source_url or determine_ext(source_url) == 'f4m':
formats.extend(self._extract_f4m_formats(
source_url, clip_id, f4m_id='hds', fatal=False))
elif mimetype == 'application/x-mpegURL':
formats.extend(self._extract_m3u8_formats(
source_url, clip_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
elif mimetype == 'application/dash+xml':
formats.extend(self._extract_mpd_formats(
source_url, clip_id, mpd_id='dash', fatal=False))
else:
tbr = fix_bitrate(source['bitrate'])
if protocol in ('rtmp', 'rtmpe'):
mobj = re.search(r'^(?P<url>rtmpe?://[^/]+)/(?P<path>.+)$', source_url)
if not mobj:
continue
path = mobj.group('path')
mp4colon_index = path.rfind('mp4:')
app = path[:mp4colon_index]
play_path = path[mp4colon_index:]
formats.append({
'url': '{}/{}'.format(mobj.group('url'), app),
'app': app,
'play_path': play_path,
'player_url': 'http://livepassdl.conviva.com/hf/ver/2.79.0.17083/LivePassModuleMain.swf',
'page_url': 'http://www.prosieben.de',
'tbr': tbr,
'ext': 'flv',
'format_id': join_nonempty('rtmp', tbr),
})
else:
formats.append({
'url': source_url,
'tbr': tbr,
'format_id': join_nonempty('http', tbr),
})
return {
'duration': float_or_none(video.get('duration')),
'formats': formats,
}
class ProSiebenSat1IE(ProSiebenSat1BaseIE):
IE_NAME = 'prosiebensat1'
IE_DESC = 'ProSiebenSat.1 Digital'
_VALID_URL = r'''(?x)
https?://
(?:www\.)?
(?:
(?:beta\.)?
(?:
prosieben(?:maxx)?|sixx|sat1(?:gold)?|kabeleins(?:doku)?|the-voice-of-germany|advopedia
)\.(?:de|at|ch)|
ran\.de|fem\.com|advopedia\.de|galileo\.tv/video
)
/(?P<id>.+)
'''
_TESTS = [
{
# Tests changes introduced in https://github.com/ytdl-org/youtube-dl/pull/6242
# in response to fixing https://github.com/ytdl-org/youtube-dl/issues/6215:
# - malformed f4m manifest support
# - proper handling of URLs starting with `https?://` in 2.0 manifests
# - recursive child f4m manifests extraction
'url': 'http://www.prosieben.de/tv/circus-halligalli/videos/218-staffel-2-episode-18-jahresrueckblick-ganze-folge',
'info_dict': {
'id': '2104602',
'ext': 'mp4',
'title': 'CIRCUS HALLIGALLI - Episode 18 - Staffel 2',
'description': 'md5:8733c81b702ea472e069bc48bb658fc1',
'upload_date': '20131231',
'duration': 5845.04,
'series': 'CIRCUS HALLIGALLI',
'season_number': 2,
'episode': 'Episode 18 - Staffel 2',
'episode_number': 18,
},
},
{
'url': 'http://www.prosieben.de/videokatalog/Gesellschaft/Leben/Trends/video-Lady-Umstyling-f%C3%BCr-Audrina-Rebekka-Audrina-Fergen-billig-aussehen-Battal-Modica-700544.html',
'info_dict': {
'id': '2570327',
'ext': 'mp4',
'title': 'Lady-Umstyling für Audrina',
'description': 'md5:4c16d0c17a3461a0d43ea4084e96319d',
'upload_date': '20131014',
'duration': 606.76,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Seems to be broken',
},
{
'url': 'http://www.prosiebenmaxx.de/tv/experience/video/144-countdown-fuer-die-autowerkstatt-ganze-folge',
'info_dict': {
'id': '2429369',
'ext': 'mp4',
'title': 'Countdown für die Autowerkstatt',
'description': 'md5:809fc051a457b5d8666013bc40698817',
'upload_date': '20140223',
'duration': 2595.04,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.sixx.de/stars-style/video/sexy-laufen-in-ugg-boots-clip',
'info_dict': {
'id': '2904997',
'ext': 'mp4',
'title': 'Sexy laufen in Ugg Boots',
'description': 'md5:edf42b8bd5bc4e5da4db4222c5acb7d6',
'upload_date': '20140122',
'duration': 245.32,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.sat1.de/film/der-ruecktritt/video/im-interview-kai-wiesinger-clip',
'info_dict': {
'id': '2906572',
'ext': 'mp4',
'title': 'Im Interview: Kai Wiesinger',
'description': 'md5:e4e5370652ec63b95023e914190b4eb9',
'upload_date': '20140203',
'duration': 522.56,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.kabeleins.de/tv/rosins-restaurants/videos/jagd-auf-fertigkost-im-elsthal-teil-2-ganze-folge',
'info_dict': {
'id': '2992323',
'ext': 'mp4',
'title': 'Jagd auf Fertigkost im Elsthal - Teil 2',
'description': 'md5:2669cde3febe9bce13904f701e774eb6',
'upload_date': '20141014',
'duration': 2410.44,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.ran.de/fussball/bundesliga/video/schalke-toennies-moechte-raul-zurueck-ganze-folge',
'info_dict': {
'id': '3004256',
'ext': 'mp4',
'title': 'Schalke: Tönnies möchte Raul zurück',
'description': 'md5:4b5b271d9bcde223b54390754c8ece3f',
'upload_date': '20140226',
'duration': 228.96,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.the-voice-of-germany.de/video/31-andreas-kuemmert-rocket-man-clip',
'info_dict': {
'id': '2572814',
'ext': 'mp4',
'title': 'The Voice of Germany - Andreas Kümmert: Rocket Man',
'description': 'md5:6ddb02b0781c6adf778afea606652e38',
'timestamp': 1382041620,
'upload_date': '20131017',
'duration': 469.88,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.fem.com/videos/beauty-lifestyle/kurztrips-zum-valentinstag',
'info_dict': {
'id': '2156342',
'ext': 'mp4',
'title': 'Kurztrips zum Valentinstag',
'description': 'Romantischer Kurztrip zum Valentinstag? Nina Heinemann verrät, was sich hier wirklich lohnt.',
'duration': 307.24,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.prosieben.de/tv/joko-gegen-klaas/videos/playlists/episode-8-ganze-folge-playlist',
'info_dict': {
'id': '439664',
'title': 'Episode 8 - Ganze Folge - Playlist',
'description': 'md5:63b8963e71f481782aeea877658dec84',
},
'playlist_count': 2,
'skip': 'This video is unavailable',
},
{
# title in <h2 class="subtitle">
'url': 'http://www.prosieben.de/stars/oscar-award/videos/jetzt-erst-enthuellt-das-geheimnis-von-emma-stones-oscar-robe-clip',
'info_dict': {
'id': '4895826',
'ext': 'mp4',
'title': 'Jetzt erst enthüllt: Das Geheimnis von Emma Stones Oscar-Robe',
'description': 'md5:e5ace2bc43fadf7b63adc6187e9450b9',
'upload_date': '20170302',
},
'params': {
'skip_download': True,
},
'skip': 'geo restricted to Germany',
},
{
# geo restricted to Germany
'url': 'http://www.kabeleinsdoku.de/tv/mayday-alarm-im-cockpit/video/102-notlandung-im-hudson-river-ganze-folge',
'only_matching': True,
},
{
# geo restricted to Germany
'url': 'http://www.sat1gold.de/tv/edel-starck/video/11-staffel-1-episode-1-partner-wider-willen-ganze-folge',
'only_matching': True,
},
{
# geo restricted to Germany
'url': 'https://www.galileo.tv/video/diese-emojis-werden-oft-missverstanden',
'only_matching': True,
},
{
'url': 'http://www.sat1gold.de/tv/edel-starck/playlist/die-gesamte-1-staffel',
'only_matching': True,
},
{
'url': 'http://www.advopedia.de/videos/lenssen-klaert-auf/lenssen-klaert-auf-folge-8-staffel-3-feiertage-und-freie-tage',
'only_matching': True,
},
]
_TOKEN = 'prosieben'
_SALT = '01!8d8F_)r9]4s[qeuXfP%'
_CLIENT_NAME = 'kolibri-2.0.19-splec4'
_ACCESS_ID = 'x_prosiebenmaxx-de'
_ENCRYPTION_KEY = 'Eeyeey9oquahthainoofashoyoikosag'
_IV = 'Aeluchoc6aevechuipiexeeboowedaok'
_CLIPID_REGEXES = [
r'"clip_id"\s*:\s+"(\d+)"',
r'clipid: "(\d+)"',
r'clip[iI]d=(\d+)',
r'clip[iI][dD]\s*=\s*["\'](\d+)',
r"'itemImageUrl'\s*:\s*'/dynamic/thumbnails/full/\d+/(\d+)",
r'proMamsId"\s*:\s*"(\d+)',
r'proMamsId"\s*:\s*"(\d+)',
]
_TITLE_REGEXES = [
r'<h2 class="subtitle" itemprop="name">\s*(.+?)</h2>',
r'<header class="clearfix">\s*<h3>(.+?)</h3>',
r'<!-- start video -->\s*<h1>(.+?)</h1>',
r'<h1 class="att-name">\s*(.+?)</h1>',
r'<header class="module_header">\s*<h2>([^<]+)</h2>\s*</header>',
r'<h2 class="video-title" itemprop="name">\s*(.+?)</h2>',
r'<div[^>]+id="veeseoTitle"[^>]*>(.+?)</div>',
r'<h2[^>]+class="subtitle"[^>]*>([^<]+)</h2>',
]
_DESCRIPTION_REGEXES = [
r'<p itemprop="description">\s*(.+?)</p>',
r'<div class="videoDecription">\s*<p><strong>Beschreibung</strong>: (.+?)</p>',
r'<div class="g-plusone" data-size="medium"></div>\s*</div>\s*</header>\s*(.+?)\s*<footer>',
r'<p class="att-description">\s*(.+?)\s*</p>',
r'<p class="video-description" itemprop="description">\s*(.+?)</p>',
r'<div[^>]+id="veeseoDescription"[^>]*>(.+?)</div>',
]
_UPLOAD_DATE_REGEXES = [
r'<span>\s*(\d{2}\.\d{2}\.\d{4} \d{2}:\d{2}) \|\s*<span itemprop="duration"',
r'<footer>\s*(\d{2}\.\d{2}\.\d{4}) \d{2}:\d{2} Uhr',
r'<span style="padding-left: 4px;line-height:20px; color:#404040">(\d{2}\.\d{2}\.\d{4})</span>',
r'(\d{2}\.\d{2}\.\d{4}) \| \d{2}:\d{2} Min<br/>',
]
_PAGE_TYPE_REGEXES = [
r'<meta name="page_type" content="([^"]+)">',
r"'itemType'\s*:\s*'([^']*)'",
]
_PLAYLIST_ID_REGEXES = [
r'content[iI]d=(\d+)',
r"'itemId'\s*:\s*'([^']*)'",
]
_PLAYLIST_CLIP_REGEXES = [
r'(?s)data-qvt=.+?<a href="([^"]+)"',
]
def _extract_clip(self, url, webpage):
clip_id = self._html_search_regex(
self._CLIPID_REGEXES, webpage, 'clip id')
title = self._html_search_regex(
self._TITLE_REGEXES, webpage, 'title',
default=None) or self._og_search_title(webpage)
info = self._extract_video_info(url, clip_id)
description = self._html_search_regex(
self._DESCRIPTION_REGEXES, webpage, 'description', default=None)
if description is None:
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(
self._html_search_meta('og:published_time', webpage,
'upload date', default=None)
or self._html_search_regex(self._UPLOAD_DATE_REGEXES,
webpage, 'upload date', default=None))
json_ld = self._search_json_ld(webpage, clip_id, default={})
return merge_dicts(info, {
'id': clip_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
}, json_ld)
def _extract_playlist(self, url, webpage):
playlist_id = self._html_search_regex(
self._PLAYLIST_ID_REGEXES, webpage, 'playlist id')
playlist = self._parse_json(
self._search_regex(
r'var\s+contentResources\s*=\s*(\[.+?\]);\s*</script',
webpage, 'playlist'),
playlist_id)
entries = []
for item in playlist:
clip_id = item.get('id') or item.get('upc')
if not clip_id:
continue
info = self._extract_video_info(url, clip_id)
info.update({
'id': clip_id,
'title': item.get('title') or item.get('teaser', {}).get('headline'),
'description': item.get('teaser', {}).get('description'),
'thumbnail': item.get('poster'),
'duration': float_or_none(item.get('duration')),
'series': item.get('tvShowTitle'),
'uploader': item.get('broadcastPublisher'),
})
entries.append(info)
return self.playlist_result(entries, playlist_id)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
page_type = self._search_regex(
self._PAGE_TYPE_REGEXES, webpage,
'page type', default='clip').lower()
if page_type == 'clip':
return self._extract_clip(url, webpage)
elif page_type == 'playlist':
return self._extract_playlist(url, webpage)
else:
raise ExtractorError(
f'Unsupported page type {page_type}', expected=True)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rtvs.py | yt_dlp/extractor/rtvs.py | import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
traverse_obj,
unified_timestamp,
)
class RTVSIE(InfoExtractor):
IE_NAME = 'stvr'
IE_DESC = 'Slovak Television and Radio (formerly RTVS)'
_VALID_URL = r'https?://(?:www\.)?(?:rtvs|stvr)\.sk/(?:radio|televizia)/archiv(?:/\d+)?/(?P<id>\d+)/?(?:[#?]|$)'
_TESTS = [{
# radio archive
'url': 'http://www.rtvs.sk/radio/archiv/11224/414872',
'md5': '134d5d6debdeddf8a5d761cbc9edacb8',
'info_dict': {
'id': '414872',
'ext': 'mp3',
'title': 'Ostrov pokladov 1 časť.mp3',
'duration': 2854,
'thumbnail': 'https://www.stvr.sk/media/a501/image/file/2/0000/rtvs-00009383.png',
'display_id': '135331',
},
}, {
# tv archive
'url': 'http://www.rtvs.sk/televizia/archiv/8249/63118',
'info_dict': {
'id': '63118',
'ext': 'mp4',
'title': 'Amaro Džives - Náš deň',
'description': 'Galavečer pri príležitosti Medzinárodného dňa Rómov.',
'thumbnail': 'https://www.stvr.sk/media/a501/image/file/2/0031/L7Qm.amaro_dzives_png.jpg',
'timestamp': 1428555900,
'upload_date': '20150409',
'duration': 4986,
},
}, {
# tv archive
'url': 'https://www.rtvs.sk/televizia/archiv/18083?utm_source=web&utm_medium=rozcestnik&utm_campaign=Robin',
'info_dict': {
'id': '18083',
'ext': 'mp4',
'title': 'Robin',
'description': 'md5:2f70505a7b8364491003d65ff7a0940a',
'timestamp': 1636652760,
'display_id': '307655',
'duration': 831,
'upload_date': '20211111',
'thumbnail': 'https://www.stvr.sk/media/a501/image/file/2/0916/robin.jpg',
},
}, {
'url': 'https://www.stvr.sk/radio/archiv/11224/414872',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
iframe_id = self._search_regex(
r'<iframe[^>]+id\s*=\s*"player_[^_]+_([0-9]+)"', webpage, 'Iframe ID')
iframe_url = self._search_regex(
fr'<iframe[^>]+id\s*=\s*"player_[^_]+_{re.escape(iframe_id)}"[^>]+src\s*=\s*"([^"]+)"', webpage, 'Iframe URL')
webpage = self._download_webpage(iframe_url, video_id, 'Downloading iframe')
json_url = self._search_regex(r'var\s+url\s*=\s*"([^"]+)"\s*\+\s*ruurl', webpage, 'json URL')
data = self._download_json(f'https:{json_url}b=mozilla&p=win&v=97&f=0&d=1', video_id)
if data.get('clip'):
data['playlist'] = [data['clip']]
if traverse_obj(data, ('playlist', 0, 'sources', 0, 'type')) == 'audio/mp3':
formats = [{'url': traverse_obj(data, ('playlist', 0, 'sources', 0, 'src'))}]
else:
formats = self._extract_m3u8_formats(traverse_obj(data, ('playlist', 0, 'sources', 0, 'src')), video_id)
return {
'id': video_id,
'display_id': iframe_id,
'title': traverse_obj(data, ('playlist', 0, 'title')),
'description': traverse_obj(data, ('playlist', 0, 'description')),
'duration': parse_duration(traverse_obj(data, ('playlist', 0, 'length'))),
'thumbnail': traverse_obj(data, ('playlist', 0, 'image')),
'timestamp': unified_timestamp(traverse_obj(data, ('playlist', 0, 'datetime_create'))),
'formats': formats,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/npo.py | yt_dlp/extractor/npo.py | import random
import re
import urllib.parse
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
merge_dicts,
orderedSet,
str_or_none,
try_call,
unified_timestamp,
url_or_none,
urlencode_postdata,
)
class NPOIE(InfoExtractor):
IE_NAME = 'npo'
IE_DESC = 'npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl'
_VALID_URL = r'''(?x)
(?:
npo:|
https?://
(?:www\.)?
(?:
npo\.nl/(?:[^/]+/)*|
(?:ntr|npostart)\.nl/(?:[^/]+/){2,}|
omroepwnl\.nl/video/fragment/[^/]+__|
(?:zapp|npo3)\.nl/(?:[^/]+/){2,}
)
)
(?P<id>[^/?#]+)
'''
_TESTS = [{
'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
'md5': '4b3f9c429157ec4775f2c9cb7b911016',
'info_dict': {
'id': 'VPWON_1220719',
'ext': 'm4v',
'title': 'Nieuwsuur',
'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
'upload_date': '20140622',
},
'skip': 'Video was removed',
}, {
'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
'info_dict': {
'id': 'VARA_101191800',
'ext': 'm4v',
'title': 'De Mega Mike & Mega Thomas show: The best of.',
'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
'upload_date': '20090227',
'duration': 2400,
},
'skip': 'Video was removed',
}, {
'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
'md5': '1b279c0547f6b270e014c576415268c5',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'mp4',
'title': 'Zwart geld: de toekomst komt uit Afrika',
'description': 'md5:dffaf3d628a9c36f78ca48d834246261',
'upload_date': '20130225',
'duration': 3000,
'creator': 'NED2',
'series': 'Tegenlicht',
'timestamp': 1361822340,
'thumbnail': 'https://images.npo.nl/tile/1280x720/142854.jpg',
'episode': 'Zwart geld: de toekomst komt uit Afrika',
'episode_number': 18,
},
}, {
'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
'info_dict': {
'id': 'WO_VPRO_043706',
'ext': 'mp4',
'title': 'De nieuwe mens - Deel 1',
'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
'duration': 4680,
'episode': 'De nieuwe mens - Deel 1',
'thumbnail': 'https://images.npo.nl/tile/1280x720/6289.jpg',
'timestamp': 1279716057,
'series': 'De nieuwe mens - Deel 1',
'upload_date': '20100721',
},
'params': {
'skip_download': True,
},
}, {
# non asf in streams
'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
'info_dict': {
'id': 'WO_NOS_762771',
'ext': 'mp4',
'title': 'Hoe gaat Europa verder na Parijs?',
},
'params': {
'skip_download': True,
},
'skip': 'Video was removed',
}, {
'url': 'http://www.ntr.nl/Aap-Poot-Pies/27/detail/Aap-poot-pies/VPWON_1233944#content',
'info_dict': {
'id': 'VPWON_1233944',
'ext': 'mp4',
'title': 'Aap, poot, pies',
'description': 'md5:4b46b1b9553b4c036a04d2a532a137e6',
'upload_date': '20150508',
'duration': 599,
'episode': 'Aap, poot, pies',
'thumbnail': 'https://images.poms.omroep.nl/image/s1280/c1280x720/608118.jpg',
'timestamp': 1431064200,
'series': 'Aap, poot, pies',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698',
'info_dict': {
'id': 'POW_00996502',
'ext': 'm4v',
'title': '''"Dit is wel een 'landslide'..."''',
'description': 'md5:f8d66d537dfb641380226e31ca57b8e8',
'upload_date': '20150508',
'duration': 462,
},
'params': {
'skip_download': True,
},
'skip': 'Video was removed',
}, {
# audio
'url': 'http://www.npo.nl/jouw-stad-rotterdam/29-01-2017/RBX_FUNX_6683215/RBX_FUNX_7601437',
'info_dict': {
'id': 'RBX_FUNX_6683215',
'ext': 'mp3',
'title': 'Jouw Stad Rotterdam',
'description': 'md5:db251505244f097717ec59fabc372d9f',
},
'params': {
'skip_download': True,
},
'skip': 'Video was removed',
}, {
'url': 'http://www.zapp.nl/de-bzt-show/gemist/KN_1687547',
'only_matching': True,
}, {
'url': 'http://www.zapp.nl/de-bzt-show/filmpjes/POMS_KN_7315118',
'only_matching': True,
}, {
'url': 'http://www.zapp.nl/beste-vrienden-quiz/extra-video-s/WO_NTR_1067990',
'only_matching': True,
}, {
'url': 'https://www.npo3.nl/3onderzoekt/16-09-2015/VPWON_1239870',
'only_matching': True,
}, {
# live stream
'url': 'npo:LI_NL1_4188102',
'only_matching': True,
}, {
'url': 'http://www.npo.nl/radio-gaga/13-06-2017/BNN_101383373',
'only_matching': True,
}, {
'url': 'https://www.zapp.nl/1803-skelterlab/instructie-video-s/740-instructievideo-s/POMS_AT_11736927',
'only_matching': True,
}, {
'url': 'https://www.npostart.nl/broodje-gezond-ei/28-05-2018/KN_1698996',
'only_matching': True,
}, {
'url': 'https://npo.nl/KN_1698996',
'only_matching': True,
}, {
'url': 'https://www.npo3.nl/the-genius/21-11-2022/VPWON_1341105',
'info_dict': {
'id': 'VPWON_1341105',
'ext': 'mp4',
'duration': 2658,
'series': 'The Genius',
'description': 'md5:db02f1456939ca63f7c408f858044e94',
'title': 'The Genius',
'timestamp': 1669062000,
'creator': 'NED3',
'episode': 'The Genius',
'thumbnail': 'https://images.npo.nl/tile/1280x720/1827650.jpg',
'episode_number': 8,
'upload_date': '20221121',
},
'params': {
'skip_download': True,
},
}]
@classmethod
def suitable(cls, url):
return (False if any(ie.suitable(url)
for ie in (NPOLiveIE, NPORadioIE, NPORadioFragmentIE))
else super().suitable(url))
def _real_extract(self, url):
video_id = self._match_id(url)
if urllib.parse.urlparse(url).netloc in ['www.ntr.nl', 'ntr.nl']:
player = self._download_json(
f'https://www.ntr.nl/ajax/player/embed/{video_id}', video_id,
'Downloading player JSON', query={
'parameters[elementId]': f'npo{random.randint(0, 999)}',
'parameters[sterReferralUrl]': url,
'parameters[autoplay]': 0,
})
else:
self._request_webpage(
'https://www.npostart.nl/api/token', video_id,
'Downloading token', headers={
'Referer': url,
'X-Requested-With': 'XMLHttpRequest',
})
player = self._download_json(
f'https://www.npostart.nl/player/{video_id}', video_id,
'Downloading player JSON', data=urlencode_postdata({
'autoplay': 0,
'share': 1,
'pageUrl': url,
'hasAdConsent': 0,
}), headers={
'x-xsrf-token': try_call(lambda: urllib.parse.unquote(
self._get_cookies('https://www.npostart.nl')['XSRF-TOKEN'].value)),
})
player_token = player['token']
drm = False
format_urls = set()
formats = []
for profile in ('hls', 'dash-widevine', 'dash-playready', 'smooth'):
streams = self._download_json(
f'https://start-player.npo.nl/video/{video_id}/streams',
video_id, f'Downloading {profile} profile JSON', fatal=False,
query={
'profile': profile,
'quality': 'npoplus',
'tokenId': player_token,
'streamType': 'broadcast',
}, data=b'') # endpoint requires POST
if not streams:
continue
stream = streams.get('stream')
if not isinstance(stream, dict):
continue
stream_url = url_or_none(stream.get('src'))
if not stream_url or stream_url in format_urls:
continue
format_urls.add(stream_url)
if stream.get('protection') is not None or stream.get('keySystemOptions') is not None:
drm = True
continue
stream_type = stream.get('type')
stream_ext = determine_ext(stream_url)
if stream_type == 'application/dash+xml' or stream_ext == 'mpd':
formats.extend(self._extract_mpd_formats(
stream_url, video_id, mpd_id='dash', fatal=False))
elif stream_type == 'application/vnd.apple.mpegurl' or stream_ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
stream_url, video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
elif re.search(r'\.isml?/Manifest', stream_url):
formats.extend(self._extract_ism_formats(
stream_url, video_id, ism_id='mss', fatal=False))
else:
formats.append({
'url': stream_url,
})
if not formats:
if not self.get_param('allow_unplayable_formats') and drm:
self.report_drm(video_id)
info = {
'id': video_id,
'title': video_id,
'formats': formats,
}
embed_url = url_or_none(player.get('embedUrl'))
if embed_url:
webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed page', fatal=False)
if webpage:
video = self._parse_json(
self._search_regex(
r'\bvideo\s*=\s*({.+?})\s*;', webpage, 'video',
default='{}'), video_id)
if video:
title = video.get('episodeTitle')
subtitles = {}
subtitles_list = video.get('subtitles')
if isinstance(subtitles_list, list):
for cc in subtitles_list:
cc_url = url_or_none(cc.get('src'))
if not cc_url:
continue
lang = str_or_none(cc.get('language')) or 'nl'
subtitles.setdefault(lang, []).append({
'url': cc_url,
})
return merge_dicts({
'title': title,
'description': video.get('description'),
'thumbnail': url_or_none(
video.get('still_image_url') or video.get('orig_image_url')),
'duration': int_or_none(video.get('duration')),
'timestamp': unified_timestamp(video.get('broadcastDate')),
'creator': video.get('channel'),
'series': video.get('title'),
'episode': title,
'episode_number': int_or_none(video.get('episodeNumber')),
'subtitles': subtitles,
}, info)
return info
class NPOLiveIE(InfoExtractor):
IE_NAME = 'npo.nl:live'
_VALID_URL = r'https?://(?:www\.)?npo(?:start)?\.nl/live(?:/(?P<id>[^/?#&]+))?'
_TESTS = [{
'url': 'http://www.npo.nl/live/npo-1',
'info_dict': {
'id': 'LI_NL1_4188102',
'display_id': 'npo-1',
'ext': 'mp4',
'title': 're:^NPO 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.npo.nl/live',
'only_matching': True,
}, {
'url': 'https://www.npostart.nl/live/npo-1',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url) or 'npo-1'
webpage = self._download_webpage(url, display_id)
live_id = self._search_regex(
[r'media-id="([^"]+)"', r'data-prid="([^"]+)"'], webpage, 'live id')
return {
'_type': 'url_transparent',
'url': f'npo:{live_id}',
'ie_key': NPOIE.ie_key(),
'id': live_id,
'display_id': display_id,
}
class NPORadioIE(InfoExtractor):
IE_NAME = 'npo.nl:radio'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-1',
'info_dict': {
'id': 'radio-1',
'ext': 'mp3',
'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
},
}
@classmethod
def suitable(cls, url):
return False if NPORadioFragmentIE.suitable(url) else super().suitable(url)
@staticmethod
def _html_get_attribute_regex(attribute):
return rf'{attribute}\s*=\s*\'([^\']+)\''
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
self._html_get_attribute_regex('data-channel'), webpage, 'title')
stream = self._parse_json(
self._html_search_regex(self._html_get_attribute_regex('data-streams'), webpage, 'data-streams'),
video_id)
codec = stream.get('codec')
return {
'id': video_id,
'url': stream['url'],
'title': title,
'acodec': codec,
'ext': codec,
'is_live': True,
}
class NPORadioFragmentIE(InfoExtractor):
IE_NAME = 'npo.nl:radio:fragment'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/[^/]+/fragment/(?P<id>\d+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-5/fragment/174356',
'md5': 'dd8cc470dad764d0fdc70a9a1e2d18c2',
'info_dict': {
'id': '174356',
'ext': 'mp3',
'title': 'Jubileumconcert Willeke Alberti',
},
}
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
title = self._html_search_regex(
rf'href="/radio/[^/]+/fragment/{audio_id}" title="([^"]+)"',
webpage, 'title')
audio_url = self._search_regex(
r"data-streams='([^']+)'", webpage, 'audio url')
return {
'id': audio_id,
'url': audio_url,
'title': title,
}
class NPODataMidEmbedIE(InfoExtractor): # XXX: Conventionally, base classes should end with BaseIE/InfoExtractor
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'data-mid=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video_id', group='id')
return {
'_type': 'url_transparent',
'ie_key': 'NPO',
'url': f'npo:{video_id}',
'display_id': display_id,
}
class SchoolTVIE(NPODataMidEmbedIE):
IE_NAME = 'schooltv'
_VALID_URL = r'https?://(?:www\.)?schooltv\.nl/video/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'http://www.schooltv.nl/video/ademhaling-de-hele-dag-haal-je-adem-maar-wat-gebeurt-er-dan-eigenlijk-in-je-lichaam/',
'info_dict': {
'id': 'WO_NTR_429477',
'display_id': 'ademhaling-de-hele-dag-haal-je-adem-maar-wat-gebeurt-er-dan-eigenlijk-in-je-lichaam',
'title': 'Ademhaling: De hele dag haal je adem. Maar wat gebeurt er dan eigenlijk in je lichaam?',
'ext': 'mp4',
'description': 'md5:abfa0ff690adb73fd0297fd033aaa631',
},
'params': {
# Skip because of m3u8 download
'skip_download': True,
},
}
class HetKlokhuisIE(NPODataMidEmbedIE):
IE_NAME = 'hetklokhuis'
_VALID_URL = r'https?://(?:www\.)?hetklokhuis\.nl/[^/]+/\d+/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'http://hetklokhuis.nl/tv-uitzending/3471/Zwaartekrachtsgolven',
'info_dict': {
'id': 'VPWON_1260528',
'display_id': 'Zwaartekrachtsgolven',
'ext': 'm4v',
'title': 'Het Klokhuis: Zwaartekrachtsgolven',
'description': 'md5:c94f31fb930d76c2efa4a4a71651dd48',
'upload_date': '20170223',
},
'params': {
'skip_download': True,
},
}
class NPOPlaylistBaseIE(NPOIE): # XXX: Do not subclass from concrete IE
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result(f'npo:{video_id}' if not video_id.startswith('http') else video_id)
for video_id in orderedSet(re.findall(self._PLAYLIST_ENTRY_RE, webpage))
]
playlist_title = self._html_search_regex(
self._PLAYLIST_TITLE_RE, webpage, 'playlist title',
default=None) or self._og_search_title(webpage)
return self.playlist_result(entries, playlist_id, playlist_title)
class VPROIE(NPOPlaylistBaseIE):
IE_NAME = 'vpro'
_VALID_URL = r'https?://(?:www\.)?(?:(?:tegenlicht\.)?vpro|2doc)\.nl/(?:[^/]+/)*(?P<id>[^/]+)\.html'
_PLAYLIST_TITLE_RE = (r'<h1[^>]+class=["\'].*?\bmedia-platform-title\b.*?["\'][^>]*>([^<]+)',
r'<h5[^>]+class=["\'].*?\bmedia-platform-subtitle\b.*?["\'][^>]*>([^<]+)')
_PLAYLIST_ENTRY_RE = r'data-media-id="([^"]+)"'
_TESTS = [
{
'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'De toekomst komt uit Afrika',
'description': 'md5:52cf4eefbc96fffcbdc06d024147abea',
'upload_date': '20130225',
},
'skip': 'Video gone',
},
{
'url': 'http://www.vpro.nl/programmas/2doc/2015/sergio-herman.html',
'info_dict': {
'id': 'sergio-herman',
'title': 'sergio herman: fucking perfect',
},
'playlist_count': 2,
},
{
# playlist with youtube embed
'url': 'http://www.vpro.nl/programmas/2doc/2015/education-education.html',
'info_dict': {
'id': 'education-education',
'title': 'education education',
},
'playlist_count': 2,
},
{
'url': 'http://www.2doc.nl/documentaires/series/2doc/2015/oktober/de-tegenprestatie.html',
'info_dict': {
'id': 'de-tegenprestatie',
'title': 'De Tegenprestatie',
},
'playlist_count': 2,
}, {
'url': 'http://www.2doc.nl/speel~VARA_101375237~mh17-het-verdriet-van-nederland~.html',
'info_dict': {
'id': 'VARA_101375237',
'ext': 'm4v',
'title': 'MH17: Het verdriet van Nederland',
'description': 'md5:09e1a37c1fdb144621e22479691a9f18',
'upload_date': '20150716',
},
'params': {
# Skip because of m3u8 download
'skip_download': True,
},
},
]
class WNLIE(NPOPlaylistBaseIE):
IE_NAME = 'wnl'
_VALID_URL = r'https?://(?:www\.)?omroepwnl\.nl/video/detail/(?P<id>[^/]+)__\d+'
_PLAYLIST_TITLE_RE = r'(?s)<h1[^>]+class="subject"[^>]*>(.+?)</h1>'
_PLAYLIST_ENTRY_RE = r'<a[^>]+href="([^"]+)"[^>]+class="js-mid"[^>]*>Deel \d+'
_TESTS = [{
'url': 'http://www.omroepwnl.nl/video/detail/vandaag-de-dag-6-mei__060515',
'info_dict': {
'id': 'vandaag-de-dag-6-mei',
'title': 'Vandaag de Dag 6 mei',
},
'playlist_count': 4,
}]
class AndereTijdenIE(NPOPlaylistBaseIE):
IE_NAME = 'anderetijden'
_VALID_URL = r'https?://(?:www\.)?anderetijden\.nl/programma/(?:[^/]+/)+(?P<id>[^/?#&]+)'
_PLAYLIST_TITLE_RE = r'(?s)<h1[^>]+class=["\'].*?\bpage-title\b.*?["\'][^>]*>(.+?)</h1>'
_PLAYLIST_ENTRY_RE = r'<figure[^>]+class=["\']episode-container episode-page["\'][^>]+data-prid=["\'](.+?)["\']'
_TESTS = [{
'url': 'http://anderetijden.nl/programma/1/Andere-Tijden/aflevering/676/Duitse-soldaten-over-de-Slag-bij-Arnhem',
'info_dict': {
'id': 'Duitse-soldaten-over-de-Slag-bij-Arnhem',
'title': 'Duitse soldaten over de Slag bij Arnhem',
},
'playlist_count': 3,
}]
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/genericembeds.py | yt_dlp/extractor/genericembeds.py | import re
import urllib.parse
from .common import InfoExtractor
from ..utils import make_archive_id, unescapeHTML
class HTML5MediaEmbedIE(InfoExtractor):
_VALID_URL = False
IE_NAME = 'html5'
_WEBPAGE_TESTS = [
{
'url': 'https://html.com/media/',
'info_dict': {
'title': 'HTML5 Media',
'description': 'md5:933b2d02ceffe7a7a0f3c8326d91cc2a',
},
'playlist_count': 2,
},
]
def _extract_from_webpage(self, url, webpage):
video_id, title = self._generic_id(url), self._generic_title(url, webpage)
entries = self._parse_html5_media_entries(url, webpage, video_id, m3u8_id='hls') or []
for num, entry in enumerate(entries, start=1):
entry.update({
'id': f'{video_id}-{num}',
'title': f'{title} ({num})',
'_old_archive_ids': [
make_archive_id('generic', f'{video_id}-{num}' if len(entries) > 1 else video_id),
],
})
yield entry
class QuotedHTMLIE(InfoExtractor):
"""For common cases of quoted/escaped html parts in the webpage"""
_VALID_URL = False
IE_NAME = 'generic:quoted-html'
IE_DESC = False # Do not list
_WEBPAGE_TESTS = [{
# 2 YouTube embeds in data-html
'url': 'https://24tv.ua/bronetransporteri-ozbroyenni-zsu-shho-vidomo-pro-bronovik-wolfhound_n2167966',
'info_dict': {
'id': 'bronetransporteri-ozbroyenni-zsu-shho-vidomo-pro-bronovik-wolfhound_n2167966',
'title': 'Броньовик Wolfhound: гігант, який допомагає ЗСУ знищувати окупантів на фронті',
'thumbnail': r're:^https?://.*\.jpe?g',
'timestamp': float,
'upload_date': str,
'description': 'md5:6816e1e5a65304bd7898e4c7eb1b26f7',
'age_limit': 0,
},
'playlist_count': 2,
}, {
# Generic iframe embed of TV24UAPlayerIE within data-html
'url': 'https://24tv.ua/harkivyani-zgaduyut-misto-do-viyni-shhemlive-video_n1887584',
'info_dict': {
'id': '1887584',
'ext': 'mp4',
'title': 'Харків\'яни згадують місто до війни: щемливе відео',
'thumbnail': r're:^https?://.*\.jpe?g',
},
'params': {'skip_download': True},
}, {
# YouTube embeds on Squarespace (data-html): https://github.com/ytdl-org/youtube-dl/issues/21294
'url': 'https://www.harvardballetcompany.org/past-productions',
'info_dict': {
'id': 'past-productions',
'title': 'Productions — Harvard Ballet Company',
'age_limit': 0,
'description': 'Past Productions',
},
'playlist_mincount': 26,
}, {
# Squarespace video embed, 2019-08-28, data-html
'url': 'http://ootboxford.com',
'info_dict': {
'id': 'Tc7b_JGdZfw',
'title': 'Out of the Blue, at Childish Things 10',
'ext': 'mp4',
'description': 'md5:a83d0026666cf5ee970f8bd1cfd69c7f',
'uploader_id': 'helendouglashouse',
'uploader': 'Helen & Douglas House',
'upload_date': '20140328',
'availability': 'public',
'view_count': int,
'channel': 'Helen & Douglas House',
'comment_count': int,
'uploader_url': 'http://www.youtube.com/user/helendouglashouse',
'duration': 253,
'channel_url': 'https://www.youtube.com/channel/UCTChGezrZVmlYlpMlkmulPA',
'playable_in_embed': True,
'age_limit': 0,
'channel_follower_count': int,
'channel_id': 'UCTChGezrZVmlYlpMlkmulPA',
'tags': 'count:6',
'categories': ['Nonprofits & Activism'],
'like_count': int,
'thumbnail': 'https://i.ytimg.com/vi/Tc7b_JGdZfw/hqdefault.jpg',
},
'params': {
'skip_download': True,
},
}]
def _extract_from_webpage(self, url, webpage):
combined = ''
for _, html in re.findall(r'(?s)\bdata-html=(["\'])((?:(?!\1).)+)\1', webpage):
# unescapeHTML can handle " etc., unquote can handle percent encoding
unquoted_html = unescapeHTML(urllib.parse.unquote(html))
if unquoted_html != html:
combined += unquoted_html
if combined:
yield from self._extract_generic_embeds(url, combined)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/odkmedia.py | yt_dlp/extractor/odkmedia.py | import json
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
GeoRestrictedError,
float_or_none,
traverse_obj,
try_call,
)
class OnDemandChinaEpisodeIE(InfoExtractor):
_VALID_URL = r'https?://www\.ondemandchina\.com/\w+/watch/(?P<series>[\w-]+)/(?P<id>ep-(?P<ep>\d+))'
_TESTS = [{
'url': 'https://www.ondemandchina.com/en/watch/together-against-covid-19/ep-1',
'info_dict': {
'id': '264394',
'ext': 'mp4',
'duration': 3256.88,
'title': 'EP 1 The Calling',
'alt_title': '第1集 令出如山',
'thumbnail': 'https://d2y2efdi5wgkcl.cloudfront.net/fit-in/256x256/media-io/2020/9/11/image.d9816e81.jpg',
'description': '疫情严峻,党政军民学、东西南北中协同应考',
'tags': ['Social Humanities', 'Documentary', 'Medical', 'Social'],
},
}]
_QUERY = '''
query Episode($programSlug: String!, $episodeNumber: Int!) {
episode(
programSlug: $programSlug
episodeNumber: $episodeNumber
kind: "series"
part: null
) {
id
title
titleEn
titleKo
titleZhHans
titleZhHant
synopsis
synopsisEn
synopsisKo
synopsisZhHans
synopsisZhHant
videoDuration
images {
thumbnail
}
}
}'''
def _real_extract(self, url):
program_slug, display_id, ep_number = self._match_valid_url(url).group('series', 'id', 'ep')
webpage = self._download_webpage(url, display_id)
video_info = self._download_json(
'https://odc-graphql.odkmedia.io/graphql', display_id,
headers={'Content-type': 'application/json'},
data=json.dumps({
'operationName': 'Episode',
'query': self._QUERY,
'variables': {
'programSlug': program_slug,
'episodeNumber': int(ep_number),
},
}).encode())['data']['episode']
try:
source_json = self._download_json(
f'https://odkmedia.io/odc/api/v2/playback/{video_info["id"]}/', display_id,
headers={'Authorization': '', 'service-name': 'odc'})
except ExtractorError as e:
if isinstance(e.cause, HTTPError):
error_data = self._parse_json(e.cause.response.read(), display_id)['detail']
raise GeoRestrictedError(error_data)
formats, subtitles = [], {}
for source in traverse_obj(source_json, ('sources', ...)):
if source.get('type') == 'hls':
fmts, subs = self._extract_m3u8_formats_and_subtitles(source.get('url'), display_id)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
else:
self.report_warning(f'Unsupported format {source.get("type")}', display_id)
return {
'id': str(video_info['id']),
'duration': float_or_none(video_info.get('videoDuration'), 1000),
'thumbnail': (traverse_obj(video_info, ('images', 'thumbnail'))
or self._html_search_meta(['og:image', 'twitter:image'], webpage)),
'title': (traverse_obj(video_info, 'title', 'titleEn')
or self._html_search_meta(['og:title', 'twitter:title'], webpage)
or self._html_extract_title(webpage)),
'alt_title': traverse_obj(video_info, 'titleKo', 'titleZhHans', 'titleZhHant'),
'description': (traverse_obj(
video_info, 'synopsisEn', 'synopsisKo', 'synopsisZhHans', 'synopsisZhHant', 'synopisis')
or self._html_search_meta(['og:description', 'twitter:description', 'description'], webpage)),
'formats': formats,
'subtitles': subtitles,
'tags': try_call(lambda: self._html_search_meta('keywords', webpage).split(', ')),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/wwe.py | yt_dlp/extractor/wwe.py | import re
from .common import InfoExtractor
from ..utils import (
try_get,
unescapeHTML,
url_or_none,
urljoin,
)
class WWEBaseIE(InfoExtractor):
_SUBTITLE_LANGS = {
'English': 'en',
'Deutsch': 'de',
}
def _extract_entry(self, data, url, video_id=None):
video_id = str(video_id or data['nid'])
title = data['title']
formats = self._extract_m3u8_formats(
data['file'], video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls')
description = data.get('description')
thumbnail = urljoin(url, data.get('image'))
series = data.get('show_name')
episode = data.get('episode_name')
subtitles = {}
tracks = data.get('tracks')
if isinstance(tracks, list):
for track in tracks:
if not isinstance(track, dict):
continue
if track.get('kind') != 'captions':
continue
track_file = url_or_none(track.get('file'))
if not track_file:
continue
label = track.get('label')
lang = self._SUBTITLE_LANGS.get(label, label) or 'en'
subtitles.setdefault(lang, []).append({
'url': track_file,
})
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'series': series,
'episode': episode,
'formats': formats,
'subtitles': subtitles,
}
class WWEIE(WWEBaseIE):
_VALID_URL = r'https?://(?:[^/]+\.)?wwe\.com/(?:[^/]+/)*videos/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.wwe.com/videos/daniel-bryan-vs-andrade-cien-almas-smackdown-live-sept-4-2018',
'md5': '92811c6a14bfc206f7a6a9c5d9140184',
'info_dict': {
'id': '40048199',
'ext': 'mp4',
'title': 'Daniel Bryan vs. Andrade "Cien" Almas: SmackDown LIVE, Sept. 4, 2018',
'description': 'md5:2d7424dbc6755c61a0e649d2a8677f67',
'thumbnail': r're:^https?://.*\.jpg$',
},
}, {
'url': 'https://de.wwe.com/videos/gran-metalik-vs-tony-nese-wwe-205-live-sept-4-2018',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
landing = self._parse_json(
self._html_search_regex(
r'(?s)Drupal\.settings\s*,\s*({.+?})\s*\)\s*;',
webpage, 'drupal settings'),
display_id)['WWEVideoLanding']
data = landing['initialVideo']['playlist'][0]
video_id = landing.get('initialVideoId')
info = self._extract_entry(data, url, video_id)
info['display_id'] = display_id
return info
class WWEPlaylistIE(WWEBaseIE):
_VALID_URL = r'https?://(?:[^/]+\.)?wwe\.com/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.wwe.com/shows/raw/2018-11-12',
'info_dict': {
'id': '2018-11-12',
},
'playlist_mincount': 11,
}, {
'url': 'http://www.wwe.com/article/walk-the-prank-wwe-edition',
'only_matching': True,
}, {
'url': 'https://www.wwe.com/shows/wwenxt/article/matt-riddle-interview',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if WWEIE.suitable(url) else super().suitable(url)
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
entries = []
for mobj in re.finditer(
r'data-video\s*=\s*(["\'])(?P<data>{.+?})\1', webpage):
video = self._parse_json(
mobj.group('data'), display_id, transform_source=unescapeHTML,
fatal=False)
if not video:
continue
data = try_get(video, lambda x: x['playlist'][0], dict)
if not data:
continue
try:
entry = self._extract_entry(data, url)
except Exception:
continue
entry['extractor_key'] = WWEIE.ie_key()
entries.append(entry)
return self.playlist_result(entries, display_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/duoplay.py | yt_dlp/extractor/duoplay.py | from .common import InfoExtractor
from ..utils import (
ExtractorError,
extract_attributes,
get_element_text_and_html_by_tag,
int_or_none,
join_nonempty,
parse_qs,
str_or_none,
try_call,
unified_timestamp,
)
from ..utils.traversal import traverse_obj, value
class DuoplayIE(InfoExtractor):
_VALID_URL = r'https?://duoplay\.ee/(?P<id>\d+)(?:[/?#]|$)'
_TESTS = [{
'note': 'Siberi võmm S02E12',
'url': 'https://duoplay.ee/4312/siberi-vomm?ep=24',
'md5': '1ff59d535310ac9c5cf5f287d8f91b2d',
'info_dict': {
'id': '4312_24',
'ext': 'mp4',
'title': 'Operatsioon "Öö"',
'thumbnail': r're:https://.+\.jpg(?:\?c=\d+)?$',
'description': 'md5:8ef98f38569d6b8b78f3d350ccc6ade8',
'upload_date': '20170523',
'timestamp': 1495567800,
'series': 'Siberi võmm',
'series_id': '4312',
'season': 'Season 2',
'season_number': 2,
'episode': 'Operatsioon "Öö"',
'episode_number': 12,
'episode_id': '24',
},
'skip': 'No video found',
}, {
'note': 'Empty title',
'url': 'https://duoplay.ee/17/uhikarotid?ep=14',
'md5': 'cba9f5dabf2582b224d80ac44fb80e47',
'info_dict': {
'id': '17_14',
'ext': 'mp4',
'title': 'Episode 14',
'thumbnail': r're:https?://.+\.jpg',
'description': 'md5:4719b418e058c209def41d48b601276e',
'upload_date': '20100916',
'timestamp': 1284661800,
'series': 'Ühikarotid',
'series_id': '17',
'season': 'Season 2',
'season_number': 2,
'episode_id': '14',
'release_year': 2010,
'episode': 'Episode 14',
'episode_number': 14,
},
}, {
'note': 'Movie without expiry',
'url': 'https://duoplay.ee/5501/pilvede-all.-neljas-ode',
'md5': '7abf63d773a49ef7c39f2c127842b8fd',
'info_dict': {
'id': '5501',
'ext': 'mp4',
'title': 'Pilvede all. Neljas õde',
'thumbnail': r're:https://.+\.jpg(?:\?c=\d+)?$',
'description': 'md5:d86a70f8f31e82c369d4d4f4c79b1279',
'cast': 'count:9',
'upload_date': '20221214',
'timestamp': 1671054000,
'release_year': 2018,
},
'skip': 'No video found',
}, {
'note': 'Episode url without show name',
'url': 'https://duoplay.ee/9644?ep=185',
'md5': '63f324b4fe2dbd8194dca16a6d52184a',
'info_dict': {
'id': '9644_185',
'ext': 'mp4',
'title': 'Episode 185',
'thumbnail': r're:https?://.+\.jpg',
'description': 'md5:ed25ba4e9e5d54bc291a4a0cdd241467',
'upload_date': '20241120',
'timestamp': 1732077000,
'episode': 'Episode 63',
'episode_id': '185',
'episode_number': 63,
'season': 'Season 2',
'season_number': 2,
'series': 'Telehommik',
'series_id': '9644',
},
}]
def _real_extract(self, url):
telecast_id = self._match_id(url)
episode = traverse_obj(parse_qs(url), ('ep', 0, {int_or_none}, {str_or_none}))
video_id = join_nonempty(telecast_id, episode, delim='_')
webpage = self._download_webpage(url, video_id)
video_player = try_call(lambda: extract_attributes(
get_element_text_and_html_by_tag('video-player', webpage)[1]))
if not video_player or not video_player.get('manifest-url'):
raise ExtractorError('No video found', expected=True)
manifest_url = video_player['manifest-url']
session_token = self._download_json(
'https://sts.postimees.ee/session/register', video_id, 'Registering session',
'Unable to register session', headers={
'Accept': 'application/json',
'X-Original-URI': manifest_url,
})['session']
episode_attr = self._parse_json(video_player.get(':episode') or '', video_id, fatal=False) or {}
return {
'id': video_id,
'formats': self._extract_m3u8_formats(manifest_url, video_id, 'mp4', query={'s': session_token}),
**traverse_obj(episode_attr, {
'title': ('title', {str}),
'description': ('synopsis', {str}),
'thumbnail': ('images', 'original'),
'timestamp': ('airtime', {lambda x: unified_timestamp(x + ' +0200')}),
'cast': ('cast', filter, {lambda x: x.split(', ')}),
'release_year': ('year', {int_or_none}),
}),
**(traverse_obj(episode_attr, {
'title': (None, (('subtitle', {str}, filter), {value(f'Episode {episode}' if episode else None)})),
'series': ('title', {str}),
'series_id': ('telecast_id', {str_or_none}),
'season_number': ('season_id', {int_or_none}),
'episode': ('subtitle', {str}, filter),
'episode_number': ('episode_nr', {int_or_none}),
'episode_id': ('episode_id', {str_or_none}),
}, get_all=False) if episode_attr.get('category') != 'movies' else {}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/zetland.py | yt_dlp/extractor/zetland.py | from .common import InfoExtractor
from ..utils import merge_dicts, unified_timestamp, url_or_none
from ..utils.traversal import traverse_obj
class ZetlandDKArticleIE(InfoExtractor):
_VALID_URL = r'https?://www\.zetland\.dk/\w+/(?P<id>(?P<story_id>\w{8})-(?P<uploader_id>\w{8})-(?:\w{5}))'
_TESTS = [{
'url': 'https://www.zetland.dk/historie/sO9aq2MY-a81VP3BY-66e69?utm_source=instagram&utm_medium=linkibio&utm_campaign=artikel',
'info_dict': {
'id': 'sO9aq2MY-a81VP3BY-66e69',
'ext': 'mp3',
'modified_date': '20240118',
'title': 'Afsnit 1: “Det føltes som en kidnapning.” ',
'upload_date': '20240116',
'uploader_id': 'a81VP3BY',
'modified_timestamp': 1705568739,
'release_timestamp': 1705377592,
'uploader_url': 'https://www.zetland.dk/skribent/a81VP3BY',
'uploader': 'Helle Fuusager',
'release_date': '20240116',
'thumbnail': r're:https://zetland\.imgix\.net/2aafe500-b14e-11ee-bf83-65d5e1283a57/Zetland_Image_1\.jpg',
'description': 'md5:9619d426772c133f5abb26db27f26a01',
'timestamp': 1705377592,
'series_id': '62d54630-e87b-4ab1-a255-8de58dbe1b14',
},
}]
def _real_extract(self, url):
display_id, uploader_id = self._match_valid_url(url).group('id', 'uploader_id')
webpage = self._download_webpage(url, display_id)
next_js_data = self._search_nextjs_data(webpage, display_id)['props']['pageProps']
story_data = traverse_obj(next_js_data, ('initialState', 'consume', 'story', 'story'))
formats = []
for audio_url in traverse_obj(story_data, ('story_content', 'meta', 'audioFiles', ..., {url_or_none})):
formats.append({
'url': audio_url,
'vcodec': 'none',
})
return merge_dicts({
'id': display_id,
'formats': formats,
'uploader_id': uploader_id,
}, traverse_obj(story_data, {
'title': ((('story_content', 'content', 'title'), 'title'), {str}),
'uploader': ('sharer', 'name'),
'uploader_id': ('sharer', 'sharer_id'),
'description': ('story_content', 'content', 'socialDescription'),
'series_id': ('story_content', 'meta', 'seriesId'),
'release_timestamp': ('published_at', {unified_timestamp}),
'modified_timestamp': ('revised_at', {unified_timestamp}),
}, get_all=False), traverse_obj(next_js_data, ('metaInfo', {
'title': ((('meta', 'title'), ('ld', 'headline'), ('og', 'og:title'), ('og', 'twitter:title')), {str}),
'description': ((('meta', 'description'), ('ld', 'description'), ('og', 'og:description'), ('og', 'twitter:description')), {str}),
'uploader': ((('meta', 'author'), ('ld', 'author', 'name')), {str}),
'uploader_url': ('ld', 'author', 'url', {url_or_none}),
'thumbnail': ((('ld', 'image'), ('og', 'og:image'), ('og', 'twitter:image')), {url_or_none}),
'modified_timestamp': ('ld', 'dateModified', {unified_timestamp}),
'release_timestamp': ('ld', 'datePublished', {unified_timestamp}),
'timestamp': ('ld', 'dateCreated', {unified_timestamp}),
}), get_all=False), {
'title': self._html_search_meta(['title', 'og:title', 'twitter:title'], webpage),
'description': self._html_search_meta(['description', 'og:description', 'twitter:description'], webpage),
'thumbnail': self._html_search_meta(['og:image', 'twitter:image'], webpage),
'uploader': self._html_search_meta(['author'], webpage),
'release_timestamp': unified_timestamp(self._html_search_meta(['article:published_time'], webpage)),
}, self._search_json_ld(webpage, display_id, fatal=False))
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/taptap.py | yt_dlp/extractor/taptap.py | import re
import uuid
from .common import InfoExtractor
from ..utils import (
clean_html,
int_or_none,
join_nonempty,
str_or_none,
url_or_none,
)
from ..utils.traversal import traverse_obj
class TapTapBaseIE(InfoExtractor):
_X_UA = 'V=1&PN=WebApp&LANG=zh_CN&VN_CODE=102&LOC=CN&PLT=PC&DS=Android&UID={uuid}&OS=Windows&OSV=10&DT=PC'
_VIDEO_API = 'https://www.taptap.cn/webapiv2/video-resource/v1/multi-get'
_INFO_API = None
_INFO_QUERY_KEY = 'id'
_DATA_PATH = None
_ID_PATH = None
_META_PATH = None
def _get_api(self, url, video_id, query, **kwargs):
query = {**query, 'X-UA': self._X_UA.format(uuid=uuid.uuid4())}
return self._download_json(url, video_id, query=query, **kwargs)['data']
def _extract_video(self, video_id):
video_data = self._get_api(self._VIDEO_API, video_id, query={'video_ids': video_id})['list'][0]
# h265 playlist contains both h265 and h264 formats
video_url = traverse_obj(video_data, ('play_url', ('url_h265', 'url'), {url_or_none}, any))
formats = self._extract_m3u8_formats(video_url, video_id, fatal=False)
for fmt in formats:
if re.search(r'^(hev|hvc|hvt)\d', fmt.get('vcodec', '')):
fmt['format_id'] = join_nonempty(fmt.get('format_id'), 'h265', delim='_')
return {
'id': str(video_id),
'formats': formats,
**traverse_obj(video_data, ({
'duration': ('info', 'duration', {int_or_none}),
'thumbnail': ('thumbnail', ('original_url', 'url'), {url_or_none}),
}), get_all=False),
}
def _real_extract(self, url):
video_id = self._match_id(url)
query = {self._INFO_QUERY_KEY: video_id}
data = traverse_obj(
self._get_api(self._INFO_API, video_id, query=query), self._DATA_PATH)
metainfo = traverse_obj(data, self._META_PATH)
entries = [{
**metainfo,
**self._extract_video(id_),
} for id_ in set(traverse_obj(data, self._ID_PATH))]
return self.playlist_result(entries, **metainfo, id=video_id)
class TapTapMomentIE(TapTapBaseIE):
_VALID_URL = r'https?://www\.taptap\.cn/moment/(?P<id>\d+)'
_INFO_API = 'https://www.taptap.cn/webapiv2/moment/v3/detail'
_ID_PATH = ('moment', 'topic', (('videos', ...), 'pin_video'), 'video_id')
_META_PATH = ('moment', {
'timestamp': ('created_time', {int_or_none}),
'modified_timestamp': ('edited_time', {int_or_none}),
'uploader': ('author', 'user', 'name', {str}),
'uploader_id': ('author', 'user', 'id', {int}, {str_or_none}),
'title': ('topic', 'title', {str}),
'description': ('topic', 'summary', {str}),
})
_TESTS = [{
'url': 'https://www.taptap.cn/moment/194618230982052443',
'info_dict': {
'id': '194618230982052443',
'title': '《崩坏3》开放世界「后崩坏书」新篇章 于淹没之地仰视辰星',
'description': 'md5:cf66f7819d413641b8b28c8543f4ecda',
'timestamp': 1633453402,
'upload_date': '20211005',
'modified_timestamp': 1633453402,
'modified_date': '20211005',
'uploader': '乌酱',
'uploader_id': '532896',
},
'playlist_count': 1,
'playlist': [{
'info_dict': {
'id': '2202584',
'ext': 'mp4',
'title': '《崩坏3》开放世界「后崩坏书」新篇章 于淹没之地仰视辰星',
'description': 'md5:cf66f7819d413641b8b28c8543f4ecda',
'duration': 66,
'timestamp': 1633453402,
'upload_date': '20211005',
'modified_timestamp': 1633453402,
'modified_date': '20211005',
'uploader': '乌酱',
'uploader_id': '532896',
'thumbnail': r're:^https?://.*\.(png|jpg)',
},
}],
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://www.taptap.cn/moment/521630629209573493',
'info_dict': {
'id': '521630629209573493',
'title': '《崩坏:星穹铁道》黄泉角色PV——「你的颜色」',
'description': 'md5:2c81245da864428c904d53ae4ad2182b',
'timestamp': 1711425600,
'upload_date': '20240326',
'modified_timestamp': 1711425600,
'modified_date': '20240326',
'uploader': '崩坏:星穹铁道',
'uploader_id': '414732580',
},
'playlist_count': 1,
'playlist': [{
'info_dict': {
'id': '4006511',
'ext': 'mp4',
'title': '《崩坏:星穹铁道》黄泉角色PV——「你的颜色」',
'description': 'md5:2c81245da864428c904d53ae4ad2182b',
'duration': 173,
'timestamp': 1711425600,
'upload_date': '20240326',
'modified_timestamp': 1711425600,
'modified_date': '20240326',
'uploader': '崩坏:星穹铁道',
'uploader_id': '414732580',
'thumbnail': r're:^https?://.*\.(png|jpg)',
},
}],
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://www.taptap.cn/moment/540493587511511299',
'playlist_count': 2,
'info_dict': {
'id': '540493587511511299',
'title': '中式民俗解谜《纸嫁衣7》、新系列《纸不语》公布!',
'description': 'md5:d60842350e686ddb242291ddfb8e39c9',
'timestamp': 1715920200,
'upload_date': '20240517',
'modified_timestamp': 1715942225,
'modified_date': '20240517',
'uploader': 'TapTap 编辑',
'uploader_id': '7159244',
},
'params': {'skip_download': 'm3u8'},
}]
class TapTapAppIE(TapTapBaseIE):
_VALID_URL = r'https?://www\.taptap\.cn/app/(?P<id>\d+)'
_INFO_API = 'https://www.taptap.cn/webapiv2/app/v4/detail'
_ID_PATH = (('app_videos', 'videos'), ..., 'video_id')
_META_PATH = {
'title': ('title', {str}),
'description': ('description', 'text', {str}, {clean_html}),
}
_TESTS = [{
'url': 'https://www.taptap.cn/app/168332',
'info_dict': {
'id': '168332',
'title': '原神',
'description': 'md5:e345f39a5fea5de2a46923f70d5f76ab',
},
'playlist_count': 2,
'playlist': [{
'info_dict': {
'id': '4058443',
'ext': 'mp4',
'title': '原神',
'description': 'md5:e345f39a5fea5de2a46923f70d5f76ab',
'duration': 26,
'thumbnail': r're:^https?://.*\.(png|jpg)',
},
}, {
'info_dict': {
'id': '4058462',
'ext': 'mp4',
'title': '原神',
'description': 'md5:e345f39a5fea5de2a46923f70d5f76ab',
'duration': 295,
'thumbnail': r're:^https?://.*\.(png|jpg)',
},
}],
'params': {'skip_download': 'm3u8'},
}]
class TapTapIntlBaseIE(TapTapBaseIE):
_X_UA = 'V=1&PN=WebAppIntl2&LANG=zh_TW&VN_CODE=115&VN=0.1.0&LOC=CN&PLT=PC&DS=Android&UID={uuid}&CURR=&DT=PC&OS=Windows&OSV=NT%208.0.0'
_VIDEO_API = 'https://www.taptap.io/webapiv2/video-resource/v1/multi-get'
class TapTapAppIntlIE(TapTapIntlBaseIE):
_VALID_URL = r'https?://www\.taptap\.io/app/(?P<id>\d+)'
_INFO_API = 'https://www.taptap.io/webapiv2/i/app/v5/detail'
_DATA_PATH = 'app'
_ID_PATH = (('app_videos', 'videos'), ..., 'video_id')
_META_PATH = {
'title': ('title', {str}),
'description': ('description', 'text', {str}, {clean_html}),
}
_TESTS = [{
'url': 'https://www.taptap.io/app/233287',
'info_dict': {
'id': '233287',
'title': '《虹彩六號 M》',
'description': 'md5:418285f9c15347fc3cf3e3a3c649f182',
},
'playlist_count': 1,
'playlist': [{
'info_dict': {
'id': '2149708997',
'ext': 'mp4',
'title': '《虹彩六號 M》',
'description': 'md5:418285f9c15347fc3cf3e3a3c649f182',
'duration': 78,
'thumbnail': r're:^https?://.*\.(png|jpg)',
},
}],
'params': {'skip_download': 'm3u8'},
}]
class TapTapPostIntlIE(TapTapIntlBaseIE):
_VALID_URL = r'https?://www\.taptap\.io/post/(?P<id>\d+)'
_INFO_API = 'https://www.taptap.io/webapiv2/creation/post/v1/detail'
_INFO_QUERY_KEY = 'id_str'
_DATA_PATH = 'post'
_ID_PATH = ((('videos', ...), 'pin_video'), 'video_id')
_META_PATH = {
'timestamp': ('published_time', {int_or_none}),
'modified_timestamp': ('edited_time', {int_or_none}),
'uploader': ('user', 'name', {str}),
'uploader_id': ('user', 'id', {int}, {str_or_none}),
'title': ('title', {str}),
'description': ('list_fields', 'summary', {str}),
}
_TESTS = [{
'url': 'https://www.taptap.io/post/571785',
'info_dict': {
'id': '571785',
'title': 'Arknights x Rainbow Six Siege | Event PV',
'description': 'md5:f7717c13f6d3108e22db7303e6690bf7',
'timestamp': 1614664951,
'upload_date': '20210302',
'modified_timestamp': 1614664951,
'modified_date': '20210302',
'uploader': 'TapTap Editor',
'uploader_id': '80224473',
},
'playlist_count': 1,
'playlist': [{
'info_dict': {
'id': '2149491903',
'ext': 'mp4',
'title': 'Arknights x Rainbow Six Siege | Event PV',
'description': 'md5:f7717c13f6d3108e22db7303e6690bf7',
'duration': 122,
'timestamp': 1614664951,
'upload_date': '20210302',
'modified_timestamp': 1614664951,
'modified_date': '20210302',
'uploader': 'TapTap Editor',
'uploader_id': '80224473',
'thumbnail': r're:^https?://.*\.(png|jpg)',
},
}],
'params': {'skip_download': 'm3u8'},
}]
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/egghead.py | yt_dlp/extractor/egghead.py | from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
try_get,
unified_timestamp,
url_or_none,
)
class EggheadBaseIE(InfoExtractor):
def _call_api(self, path, video_id, resource, fatal=True):
return self._download_json(
'https://app.egghead.io/api/v1/' + path,
video_id, f'Downloading {resource} JSON', fatal=fatal)
class EggheadCourseIE(EggheadBaseIE):
IE_DESC = 'egghead.io course'
IE_NAME = 'egghead:course'
_VALID_URL = r'https?://(?:app\.)?egghead\.io/(?:course|playlist)s/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://egghead.io/courses/professor-frisby-introduces-composable-functional-javascript',
'playlist_count': 29,
'info_dict': {
'id': '432655',
'title': 'Professor Frisby Introduces Composable Functional JavaScript',
'description': 're:(?s)^This course teaches the ubiquitous.*You\'ll start composing functionality before you know it.$',
},
}, {
'url': 'https://app.egghead.io/playlists/professor-frisby-introduces-composable-functional-javascript',
'only_matching': True,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
series_path = 'series/' + playlist_id
lessons = self._call_api(
series_path + '/lessons', playlist_id, 'course lessons')
entries = []
for lesson in lessons:
lesson_url = url_or_none(lesson.get('http_url'))
if not lesson_url:
continue
lesson_id = lesson.get('id')
if lesson_id:
lesson_id = str(lesson_id)
entries.append(self.url_result(
lesson_url, ie=EggheadLessonIE.ie_key(), video_id=lesson_id))
course = self._call_api(
series_path, playlist_id, 'course', False) or {}
playlist_id = course.get('id')
if playlist_id:
playlist_id = str(playlist_id)
return self.playlist_result(
entries, playlist_id, course.get('title'),
course.get('description'))
class EggheadLessonIE(EggheadBaseIE):
IE_DESC = 'egghead.io lesson'
IE_NAME = 'egghead:lesson'
_VALID_URL = r'https?://(?:app\.)?egghead\.io/(?:api/v1/)?lessons/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://egghead.io/lessons/javascript-linear-data-flow-with-container-style-types-box',
'info_dict': {
'id': '1196',
'display_id': 'javascript-linear-data-flow-with-container-style-types-box',
'ext': 'mp4',
'title': 'Create linear data flow with container style types (Box)',
'description': 'md5:9aa2cdb6f9878ed4c39ec09e85a8150e',
'thumbnail': r're:^https?:.*\.jpg$',
'timestamp': 1481296768,
'upload_date': '20161209',
'duration': 304,
'view_count': 0,
'tags': 'count:2',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://egghead.io/api/v1/lessons/react-add-redux-to-a-react-application',
'only_matching': True,
}, {
'url': 'https://app.egghead.io/lessons/javascript-linear-data-flow-with-container-style-types-box',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
lesson = self._call_api(
'lessons/' + display_id, display_id, 'lesson')
lesson_id = str(lesson['id'])
title = lesson['title']
formats = []
for _, format_url in lesson['media_urls'].items():
format_url = url_or_none(format_url)
if not format_url:
continue
ext = determine_ext(format_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, lesson_id, 'mp4', m3u8_id='hls', fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
format_url, lesson_id, mpd_id='dash', fatal=False))
else:
formats.append({
'url': format_url,
})
return {
'id': lesson_id,
'display_id': display_id,
'title': title,
'description': lesson.get('summary'),
'thumbnail': lesson.get('thumb_nail'),
'timestamp': unified_timestamp(lesson.get('published_at')),
'duration': int_or_none(lesson.get('duration')),
'view_count': int_or_none(lesson.get('plays_count')),
'tags': try_get(lesson, lambda x: x['tag_list'], list),
'series': try_get(
lesson, lambda x: x['series']['title'], str),
'formats': formats,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/contv.py | yt_dlp/extractor/contv.py | from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
)
class CONtvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?contv\.com/details-movie/(?P<id>[^/]+)'
_TESTS = [{
'url': 'https://www.contv.com/details-movie/CEG10022949/days-of-thrills-&-laughter',
'info_dict': {
'id': 'CEG10022949',
'ext': 'mp4',
'title': 'Days Of Thrills & Laughter',
'description': 'md5:5d6b3d0b1829bb93eb72898c734802eb',
'upload_date': '20180703',
'timestamp': 1530634789.61,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'https://www.contv.com/details-movie/CLIP-show_fotld_bts/fight-of-the-living-dead:-behind-the-scenes-bites',
'info_dict': {
'id': 'CLIP-show_fotld_bts',
'title': 'Fight of the Living Dead: Behind the Scenes Bites',
},
'playlist_mincount': 7,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
details = self._download_json(
'http://metax.contv.live.junctiontv.net/metax/2.5/details/' + video_id,
video_id, query={'device': 'web'})
if details.get('type') == 'episodic':
seasons = self._download_json(
'http://metax.contv.live.junctiontv.net/metax/2.5/seriesfeed/json/' + video_id,
video_id)
entries = []
for season in seasons:
for episode in season.get('episodes', []):
episode_id = episode.get('id')
if not episode_id:
continue
entries.append(self.url_result(
'https://www.contv.com/details-movie/' + episode_id,
CONtvIE.ie_key(), episode_id))
return self.playlist_result(entries, video_id, details.get('title'))
m_details = details['details']
title = details['title']
formats = []
media_hls_url = m_details.get('media_hls_url')
if media_hls_url:
formats.extend(self._extract_m3u8_formats(
media_hls_url, video_id, 'mp4',
m3u8_id='hls', fatal=False))
media_mp4_url = m_details.get('media_mp4_url')
if media_mp4_url:
formats.append({
'format_id': 'http',
'url': media_mp4_url,
})
subtitles = {}
captions = m_details.get('captions') or {}
for caption_url in captions.values():
subtitles.setdefault('en', []).append({
'url': caption_url,
})
thumbnails = []
for image in m_details.get('images', []):
image_url = image.get('url')
if not image_url:
continue
thumbnails.append({
'url': image_url,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
})
description = None
for p in ('large_', 'medium_', 'small_', ''):
d = m_details.get(p + 'description')
if d:
description = d
break
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnails': thumbnails,
'description': description,
'timestamp': float_or_none(details.get('metax_added_on'), 1000),
'subtitles': subtitles,
'duration': float_or_none(m_details.get('duration'), 1000),
'view_count': int_or_none(details.get('num_watched')),
'like_count': int_or_none(details.get('num_fav')),
'categories': details.get('category'),
'tags': details.get('tags'),
'season_number': int_or_none(details.get('season')),
'episode_number': int_or_none(details.get('episode')),
'release_year': int_or_none(details.get('pub_year')),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/daystar.py | yt_dlp/extractor/daystar.py | from .common import InfoExtractor
from ..utils import js_to_json, urljoin
class DaystarClipIE(InfoExtractor):
IE_NAME = 'daystar:clip'
_VALID_URL = r'https?://player\.daystar\.tv/(?P<id>\w+)'
_TESTS = [{
'url': 'https://player.daystar.tv/0MTO2ITM',
'info_dict': {
'id': '0MTO2ITM',
'ext': 'mp4',
'title': 'The Dark World of COVID Pt. 1 | Aaron Siri',
'description': 'a420d320dda734e5f29458df3606c5f4',
'thumbnail': r're:^https?://.+\.jpg',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
src_iframe = self._search_regex(r'\<iframe[^>]+src="([^"]+)"', webpage, 'src iframe')
webpage_iframe = self._download_webpage(
src_iframe.replace('player.php', 'config2.php'), video_id, headers={'Referer': src_iframe})
sources = self._parse_json(self._search_regex(
r'sources\:\s*(\[.*?\])', webpage_iframe, 'm3u8 source'), video_id, transform_source=js_to_json)
formats, subtitles = [], {}
for source in sources:
file = source.get('file')
if file and source.get('type') == 'm3u8':
fmts, subs = self._extract_m3u8_formats_and_subtitles(
urljoin('https://www.lightcast.com/embed/', file),
video_id, 'mp4', fatal=False, headers={'Referer': src_iframe})
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
return {
'id': video_id,
'title': self._html_search_meta(['og:title', 'twitter:title'], webpage),
'description': self._html_search_meta(['og:description', 'twitter:description'], webpage),
'thumbnail': self._search_regex(r'image:\s*"([^"]+)', webpage_iframe, 'thumbnail'),
'formats': formats,
'subtitles': subtitles,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sonyliv.py | yt_dlp/extractor/sonyliv.py | import datetime as dt
import itertools
import json
import math
import random
import time
import uuid
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
int_or_none,
jwt_decode_hs256,
try_call,
)
from ..utils.traversal import traverse_obj
class SonyLIVIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
sonyliv:|
https?://(?:www\.)?sonyliv\.com/(?:s(?:how|port)s/[^/]+|movies|clip|trailer|music-videos)/[^/?#&]+-
)
(?P<id>\d+)
'''
_TESTS = [{
'url': 'https://www.sonyliv.com/shows/bachelors-delight-1700000113/achaari-cheese-toast-1000022678?watch=true',
'info_dict': {
'title': 'Achaari Cheese Toast',
'id': '1000022678',
'ext': 'mp4',
'upload_date': '20200411',
'description': 'md5:3957fa31d9309bf336ceb3f37ad5b7cb',
'timestamp': 1586632091,
'duration': 185,
'season_number': 1,
'series': 'Bachelors Delight',
'episode_number': 1,
'release_year': 2016,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.sonyliv.com/movies/tahalka-1000050121?watch=true',
'only_matching': True,
}, {
'url': 'https://www.sonyliv.com/clip/jigarbaaz-1000098925',
'only_matching': True,
}, {
'url': 'https://www.sonyliv.com/trailer/sandwiched-forever-1000100286?watch=true',
'only_matching': True,
}, {
'url': 'https://www.sonyliv.com/sports/india-tour-of-australia-2020-21-1700000286/cricket-hls-day-3-1st-test-aus-vs-ind-19-dec-2020-1000100959?watch=true',
'only_matching': True,
}, {
'url': 'https://www.sonyliv.com/music-videos/yeh-un-dinon-ki-baat-hai-1000018779',
'only_matching': True,
}]
_GEO_COUNTRIES = ['IN']
_HEADERS = {}
_LOGIN_HINT = 'Use "--username <mobile_number>" to login using OTP or "--username token --password <auth_token>" to login using auth token.'
_NETRC_MACHINE = 'sonyliv'
def _get_device_id(self):
e = int(time.time() * 1000)
t = list('xxxxxxxxxxxx4xxxyxxxxxxxxxxxxxxx')
for i, c in enumerate(t):
n = int((e + 16 * random.random()) % 16) | 0
e = math.floor(e / 16)
if c == 'x':
t[i] = str(n)
elif c == 'y':
t[i] = f'{3 & n | 8:x}'
return ''.join(t) + '-' + str(int(time.time() * 1000))
def _perform_login(self, username, password):
self._HEADERS['device_id'] = self._get_device_id()
self._HEADERS['content-type'] = 'application/json'
if username.lower() == 'token' and try_call(lambda: jwt_decode_hs256(password)):
self._HEADERS['authorization'] = password
self.report_login()
return
elif len(username) != 10 or not username.isdigit():
raise ExtractorError(f'Invalid username/password; {self._LOGIN_HINT}')
self.report_login()
otp_request_json = self._download_json(
'https://apiv2.sonyliv.com/AGL/1.6/A/ENG/WEB/IN/HR/CREATEOTP-V2',
None, note='Sending OTP', headers=self._HEADERS, data=json.dumps({
'mobileNumber': username,
'channelPartnerID': 'MSMIND',
'country': 'IN',
'timestamp': dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
'otpSize': 6,
'loginType': 'REGISTERORSIGNIN',
'isMobileMandatory': True,
}).encode())
if otp_request_json['resultCode'] == 'KO':
raise ExtractorError(otp_request_json['message'], expected=True)
otp_verify_json = self._download_json(
'https://apiv2.sonyliv.com/AGL/2.0/A/ENG/WEB/IN/HR/CONFIRMOTP-V2',
None, note='Verifying OTP', headers=self._HEADERS, data=json.dumps({
'channelPartnerID': 'MSMIND',
'mobileNumber': username,
'country': 'IN',
'otp': self._get_tfa_info('OTP'),
'dmaId': 'IN',
'ageConfirmation': True,
'timestamp': dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
'isMobileMandatory': True,
}).encode())
if otp_verify_json['resultCode'] == 'KO':
raise ExtractorError(otp_request_json['message'], expected=True)
self._HEADERS['authorization'] = otp_verify_json['resultObj']['accessToken']
def _call_api(self, version, path, video_id):
try:
return self._download_json(
f'https://apiv2.sonyliv.com/AGL/{version}/A/ENG/WEB/{path}',
video_id, headers=self._HEADERS)['resultObj']
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 406 and self._parse_json(
e.cause.response.read().decode(), video_id)['message'] == 'Please subscribe to watch this content':
self.raise_login_required(self._LOGIN_HINT, method=None)
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
message = self._parse_json(
e.cause.response.read().decode(), video_id)['message']
if message == 'Geoblocked Country':
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
raise ExtractorError(message)
raise
def _initialize_pre_login(self):
self._HEADERS['security_token'] = self._call_api('1.4', 'ALL/GETTOKEN', None)
def _real_extract(self, url):
video_id = self._match_id(url)
content = self._call_api(
'1.5', 'IN/CONTENT/VIDEOURL/VOD/' + video_id, video_id)
if not self.get_param('allow_unplayable_formats') and content.get('isEncrypted'):
self.report_drm(video_id)
dash_url = content['videoURL']
headers = {
'x-playback-session-id': '%s-%d' % (uuid.uuid4().hex, time.time() * 1000),
}
formats = self._extract_mpd_formats(
dash_url, video_id, mpd_id='dash', headers=headers, fatal=False)
formats.extend(self._extract_m3u8_formats(
dash_url.replace('.mpd', '.m3u8').replace('/DASH/', '/HLS/'),
video_id, 'mp4', m3u8_id='hls', headers=headers, fatal=False))
for f in formats:
f.setdefault('http_headers', {}).update(headers)
metadata = self._call_api(
'1.6', 'IN/DETAIL/' + video_id, video_id)['containers'][0]['metadata']
title = metadata['episodeTitle']
subtitles = {}
for sub in content.get('subtitle', []):
sub_url = sub.get('subtitleUrl')
if not sub_url:
continue
subtitles.setdefault(sub.get('subtitleLanguageName', 'ENG'), []).append({
'url': sub_url,
})
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': content.get('posterURL'),
'description': metadata.get('longDescription') or metadata.get('shortDescription'),
'timestamp': int_or_none(metadata.get('creationDate'), 1000),
'duration': int_or_none(metadata.get('duration')),
'season_number': int_or_none(metadata.get('season')),
'series': metadata.get('title'),
'episode_number': int_or_none(metadata.get('episodeNumber')),
'release_year': int_or_none(metadata.get('year')),
'subtitles': subtitles,
}
class SonyLIVSeriesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?sonyliv\.com/shows/[^/?#&]+-(?P<id>\d{10})/?(?:$|[?#])'
_TESTS = [{
'url': 'https://www.sonyliv.com/shows/adaalat-1700000091',
'playlist_mincount': 452,
'info_dict': {
'id': '1700000091',
},
}, {
'url': 'https://www.sonyliv.com/shows/beyhadh-1700000007/',
'playlist_mincount': 358,
'info_dict': {
'id': '1700000007',
},
}]
_API_BASE = 'https://apiv2.sonyliv.com/AGL'
_SORT_ORDERS = ('asc', 'desc')
def _entries(self, show_id, sort_order):
headers = {
'Accept': 'application/json, text/plain, */*',
'Referer': 'https://www.sonyliv.com',
}
headers['security_token'] = self._download_json(
f'{self._API_BASE}/1.4/A/ENG/WEB/ALL/GETTOKEN', show_id,
'Downloading security token', headers=headers)['resultObj']
seasons = traverse_obj(self._download_json(
f'{self._API_BASE}/1.9/R/ENG/WEB/IN/DL/DETAIL/{show_id}', show_id,
'Downloading series JSON', headers=headers, query={
'kids_safe': 'false',
'from': '0',
'to': '49',
}), ('resultObj', 'containers', 0, 'containers', lambda _, v: int_or_none(v['id'])))
if sort_order == 'desc':
seasons = reversed(seasons)
for season in seasons:
season_id = str(season['id'])
note = traverse_obj(season, ('metadata', 'title', {str})) or 'season'
cursor = 0
for page_num in itertools.count(1):
episodes = traverse_obj(self._download_json(
f'{self._API_BASE}/1.4/R/ENG/WEB/IN/CONTENT/DETAIL/BUNDLE/{season_id}',
season_id, f'Downloading {note} page {page_num} JSON', headers=headers, query={
'from': str(cursor),
'to': str(cursor + 99),
'orderBy': 'episodeNumber',
'sortOrder': sort_order,
}), ('resultObj', 'containers', 0, 'containers', lambda _, v: int_or_none(v['id'])))
if not episodes:
break
for episode in episodes:
video_id = str(episode['id'])
yield self.url_result(f'sonyliv:{video_id}', SonyLIVIE, video_id)
cursor += 100
def _real_extract(self, url):
show_id = self._match_id(url)
sort_order = self._configuration_arg('sort_order', [self._SORT_ORDERS[0]])[0]
if sort_order not in self._SORT_ORDERS:
raise ValueError(
f'Invalid sort order "{sort_order}". Allowed values are: {", ".join(self._SORT_ORDERS)}')
return self.playlist_result(self._entries(show_id, sort_order), playlist_id=show_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/raywenderlich.py | yt_dlp/extractor/raywenderlich.py | import re
from .common import InfoExtractor
from .vimeo import VimeoIE
from ..utils import (
ExtractorError,
int_or_none,
merge_dicts,
try_get,
unescapeHTML,
unified_timestamp,
urljoin,
)
class RayWenderlichIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:
videos\.raywenderlich\.com/courses|
(?:www\.)?raywenderlich\.com
)/
(?P<course_id>[^/]+)/lessons/(?P<id>\d+)
'''
_TESTS = [{
'url': 'https://www.raywenderlich.com/3530-testing-in-ios/lessons/1',
'info_dict': {
'id': '248377018',
'ext': 'mp4',
'title': 'Introduction',
'description': 'md5:804d031b3efa9fcb49777d512d74f722',
'timestamp': 1513906277,
'upload_date': '20171222',
'duration': 133,
'uploader': 'Ray Wenderlich',
'uploader_id': 'user3304672',
},
'params': {
'noplaylist': True,
'skip_download': True,
},
'add_ie': [VimeoIE.ie_key()],
'expected_warnings': ['HTTP Error 403: Forbidden'],
}, {
'url': 'https://videos.raywenderlich.com/courses/105-testing-in-ios/lessons/1',
'only_matching': True,
}]
@staticmethod
def _extract_video_id(data, lesson_id):
if not data:
return
groups = try_get(data, lambda x: x['groups'], list) or []
if not groups:
return
for group in groups:
if not isinstance(group, dict):
continue
contents = try_get(data, lambda x: x['contents'], list) or []
for content in contents:
if not isinstance(content, dict):
continue
ordinal = int_or_none(content.get('ordinal'))
if ordinal != lesson_id:
continue
video_id = content.get('identifier')
if video_id:
return str(video_id)
def _real_extract(self, url):
mobj = self._match_valid_url(url)
course_id, lesson_id = mobj.group('course_id', 'id')
display_id = f'{course_id}/{lesson_id}'
webpage = self._download_webpage(url, display_id)
thumbnail = self._og_search_thumbnail(
webpage, default=None) or self._html_search_meta(
'twitter:image', webpage, 'thumbnail')
if '>Subscribe to unlock' in webpage:
raise ExtractorError(
'This content is only available for subscribers',
expected=True)
info = {
'thumbnail': thumbnail,
}
vimeo_id = self._search_regex(
r'data-vimeo-id=["\'](\d+)', webpage, 'vimeo id', default=None)
if not vimeo_id:
data = self._parse_json(
self._search_regex(
r'data-collection=(["\'])(?P<data>{.+?})\1', webpage,
'data collection', default='{}', group='data'),
display_id, transform_source=unescapeHTML, fatal=False)
video_id = self._extract_video_id(
data, lesson_id) or self._search_regex(
r'/videos/(\d+)/', thumbnail, 'video id')
headers = {
'Referer': url,
'X-Requested-With': 'XMLHttpRequest',
}
csrf_token = self._html_search_meta(
'csrf-token', webpage, 'csrf token', default=None)
if csrf_token:
headers['X-CSRF-Token'] = csrf_token
video = self._download_json(
f'https://videos.raywenderlich.com/api/v1/videos/{video_id}.json',
display_id, headers=headers)['video']
vimeo_id = video['clips'][0]['provider_id']
info.update({
'_type': 'url_transparent',
'title': video.get('name'),
'description': video.get('description') or video.get(
'meta_description'),
'duration': int_or_none(video.get('duration')),
'timestamp': unified_timestamp(video.get('created_at')),
})
return merge_dicts(info, self.url_result(
VimeoIE._smuggle_referrer(
f'https://player.vimeo.com/video/{vimeo_id}', url),
ie=VimeoIE.ie_key(), video_id=vimeo_id))
class RayWenderlichCourseIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:
videos\.raywenderlich\.com/courses|
(?:www\.)?raywenderlich\.com
)/
(?P<id>[^/]+)
'''
_TEST = {
'url': 'https://www.raywenderlich.com/3530-testing-in-ios',
'info_dict': {
'title': 'Testing in iOS',
'id': '3530-testing-in-ios',
},
'params': {
'noplaylist': False,
},
'playlist_count': 29,
}
@classmethod
def suitable(cls, url):
return False if RayWenderlichIE.suitable(url) else super().suitable(url)
def _real_extract(self, url):
course_id = self._match_id(url)
webpage = self._download_webpage(url, course_id)
entries = []
lesson_urls = set()
for lesson_url in re.findall(
rf'<a[^>]+\bhref=["\'](/{course_id}/lessons/\d+)', webpage):
if lesson_url in lesson_urls:
continue
lesson_urls.add(lesson_url)
entries.append(self.url_result(
urljoin(url, lesson_url), ie=RayWenderlichIE.ie_key()))
title = self._og_search_title(
webpage, default=None) or self._html_search_meta(
'twitter:title', webpage, 'title', default=None)
return self.playlist_result(entries, course_id, title)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tver.py | yt_dlp/extractor/tver.py | import datetime as dt
from .streaks import StreaksBaseIE
from ..utils import (
ExtractorError,
GeoRestrictedError,
int_or_none,
join_nonempty,
make_archive_id,
smuggle_url,
str_or_none,
strip_or_none,
time_seconds,
update_url_query,
)
from ..utils.traversal import require, traverse_obj
class TVerIE(StreaksBaseIE):
_VALID_URL = r'https?://(?:www\.)?tver\.jp/(?:(?P<type>lp|corner|series|episodes?|feature)/)+(?P<id>[a-zA-Z0-9]+)'
_GEO_COUNTRIES = ['JP']
_GEO_BYPASS = False
_TESTS = [{
# via Streaks backend
'url': 'https://tver.jp/episodes/epc1hdugbk',
'info_dict': {
'id': 'epc1hdugbk',
'ext': 'mp4',
'display_id': 'ref:baeebeac-a2a6-4dbf-9eb3-c40d59b40068',
'title': '神回だけ見せます! #2 壮烈!車大騎馬戦(木曜スペシャル)',
'alt_title': '神回だけ見せます! #2 壮烈!車大騎馬戦(木曜スペシャル) 日テレ',
'description': 'md5:2726f742d5e3886edeaf72fb6d740fef',
'uploader_id': 'tver-ntv',
'channel': '日テレ',
'duration': 1158.024,
'thumbnail': 'https://statics.tver.jp/images/content/thumbnail/episode/xlarge/epc1hdugbk.jpg?v=16',
'series': '神回だけ見せます!',
'episode': '#2 壮烈!車大騎馬戦(木曜スペシャル)',
'episode_number': 2,
'timestamp': 1736486036,
'upload_date': '20250110',
'modified_timestamp': 1736870264,
'modified_date': '20250114',
'live_status': 'not_live',
'release_timestamp': 1651453200,
'release_date': '20220502',
'_old_archive_ids': ['brightcovenew ref:baeebeac-a2a6-4dbf-9eb3-c40d59b40068'],
'series_id': 'sru35hwdd2',
'season_id': 'ss2lcn4af6',
},
}, {
# via Brightcove backend (deprecated)
'url': 'https://tver.jp/episodes/epc1hdugbk',
'info_dict': {
'id': 'ref:baeebeac-a2a6-4dbf-9eb3-c40d59b40068',
'ext': 'mp4',
'title': '神回だけ見せます! #2 壮烈!車大騎馬戦(木曜スペシャル)',
'alt_title': '神回だけ見せます! #2 壮烈!車大騎馬戦(木曜スペシャル) 日テレ',
'description': 'md5:2726f742d5e3886edeaf72fb6d740fef',
'uploader_id': '4394098882001',
'channel': '日テレ',
'duration': 1158.101,
'thumbnail': 'https://statics.tver.jp/images/content/thumbnail/episode/xlarge/epc1hdugbk.jpg?v=16',
'tags': [],
'series': '神回だけ見せます!',
'episode': '#2 壮烈!車大騎馬戦(木曜スペシャル)',
'episode_number': 2,
'timestamp': 1651388531,
'upload_date': '20220501',
'release_timestamp': 1651453200,
'release_date': '20220502',
'series_id': 'sru35hwdd2',
'season_id': 'ss2lcn4af6',
},
'params': {'extractor_args': {'tver': {'backend': ['brightcove']}}},
}, {
'url': 'https://tver.jp/corner/f0103888',
'only_matching': True,
}, {
'url': 'https://tver.jp/lp/f0033031',
'only_matching': True,
}, {
'url': 'https://tver.jp/series/srtxft431v',
'info_dict': {
'id': 'srtxft431v',
'title': '名探偵コナン',
},
'playlist_mincount': 21,
}, {
'url': 'https://tver.jp/series/sru35hwdd2',
'info_dict': {
'id': 'sru35hwdd2',
'title': '神回だけ見せます!',
},
'playlist_count': 11,
}, {
'url': 'https://tver.jp/series/srkq2shp9d',
'only_matching': True,
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s'
_HEADERS = {
'x-tver-platform-type': 'web',
'Origin': 'https://tver.jp',
'Referer': 'https://tver.jp/',
}
_PLATFORM_QUERY = {}
_STREAKS_API_INFO = {}
def _real_initialize(self):
session_info = self._download_json(
'https://platform-api.tver.jp/v2/api/platform_users/browser/create',
None, 'Creating session', data=b'device_type=pc')
self._PLATFORM_QUERY = traverse_obj(session_info, ('result', {
'platform_uid': 'platform_uid',
'platform_token': 'platform_token',
}))
self._STREAKS_API_INFO = self._download_json(
'https://player.tver.jp/player/streaks_info_v2.json', None,
'Downloading STREAKS API info', 'Unable to download STREAKS API info')
def _call_platform_api(self, path, video_id, note=None, fatal=True, query=None):
return self._download_json(
f'https://platform-api.tver.jp/service/api/{path}', video_id, note,
fatal=fatal, headers=self._HEADERS, query={
**self._PLATFORM_QUERY,
**(query or {}),
})
def _yield_episode_ids_for_series(self, series_id):
seasons_info = self._download_json(
f'https://service-api.tver.jp/api/v1/callSeriesSeasons/{series_id}',
series_id, 'Downloading seasons info', headers=self._HEADERS)
for season_id in traverse_obj(
seasons_info, ('result', 'contents', lambda _, v: v['type'] == 'season', 'content', 'id', {str})):
episodes_info = self._call_platform_api(
f'v1/callSeasonEpisodes/{season_id}', series_id, f'Downloading season {season_id} episodes info')
yield from traverse_obj(episodes_info, (
'result', 'contents', lambda _, v: v['type'] == 'episode', 'content', 'id', {str}))
def _real_extract(self, url):
video_id, video_type = self._match_valid_url(url).group('id', 'type')
backend = self._configuration_arg('backend', ['streaks'])[0]
if backend not in ('brightcove', 'streaks'):
raise ExtractorError(f'Invalid backend value: {backend}', expected=True)
if video_type == 'series':
series_info = self._call_platform_api(
f'v2/callSeries/{video_id}', video_id, 'Downloading series info')
return self.playlist_from_matches(
self._yield_episode_ids_for_series(video_id), video_id,
traverse_obj(series_info, ('result', 'content', 'content', 'title', {str})),
ie=TVerIE, getter=lambda x: f'https://tver.jp/episodes/{x}')
if video_type != 'episodes':
webpage = self._download_webpage(url, video_id, note='Resolving to new URL')
video_id = self._match_id(self._search_regex(
(r'canonical"\s*href="(https?://tver\.jp/[^"]+)"', r'&link=(https?://tver\.jp/[^?&]+)[?&]'),
webpage, 'url regex'))
episode_info = self._call_platform_api(
f'v1/callEpisode/{video_id}', video_id, 'Downloading episode info', fatal=False, query={
'require_data': 'mylist,later[epefy106ur],good[epefy106ur],resume[epefy106ur]',
})
episode_content = traverse_obj(
episode_info, ('result', 'episode', 'content')) or {}
version = traverse_obj(episode_content, ('version', {str_or_none}), default='5')
video_info = self._download_json(
f'https://statics.tver.jp/content/episode/{video_id}.json', video_id, 'Downloading video info',
query={'v': version}, headers={'Referer': 'https://tver.jp/'})
episode = strip_or_none(episode_content.get('title'))
series = str_or_none(episode_content.get('seriesTitle'))
title = (
join_nonempty(series, episode, delim=' ')
or str_or_none(video_info.get('title')))
provider = str_or_none(episode_content.get('productionProviderName'))
onair_label = str_or_none(episode_content.get('broadcastDateLabel'))
thumbnails = [
{
'id': quality,
'url': update_url_query(
f'https://statics.tver.jp/images/content/thumbnail/episode/{quality}/{video_id}.jpg',
{'v': version}),
'width': width,
'height': height,
}
for quality, width, height in [
('small', 480, 270),
('medium', 640, 360),
('large', 960, 540),
('xlarge', 1280, 720),
]
]
metadata = {
'title': title,
'series': series,
'episode': episode,
# an another title which is considered "full title" for some viewers
'alt_title': join_nonempty(title, provider, onair_label, delim=' '),
'channel': provider,
'thumbnails': thumbnails,
**traverse_obj(video_info, {
'description': ('description', {str}),
'release_timestamp': ('viewStatus', 'startAt', {int_or_none}),
'episode_number': ('no', {int_or_none}),
'series_id': ('seriesID', {str}),
'season_id': ('seasonID', {str}),
}),
}
brightcove_id = traverse_obj(video_info, ('video', ('videoRefID', 'videoID'), {str}, any))
if brightcove_id and not brightcove_id.isdecimal():
brightcove_id = f'ref:{brightcove_id}'
streaks_id = traverse_obj(video_info, ('streaks', 'videoRefID', {str}))
if streaks_id and not streaks_id.startswith('ref:'):
streaks_id = f'ref:{streaks_id}'
# Deprecated Brightcove extraction reachable w/extractor-arg or fallback; errors are expected
if backend == 'brightcove' or not streaks_id:
if backend != 'brightcove':
self.report_warning(
'No STREAKS ID found; falling back to Brightcove extraction', video_id=video_id)
if not brightcove_id:
raise ExtractorError('Unable to extract brightcove reference ID', expected=True)
account_id = traverse_obj(video_info, (
'video', 'accountID', {str}, {require('brightcove account ID', expected=True)}))
return {
**metadata,
'_type': 'url_transparent',
'url': smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % (account_id, brightcove_id),
{'geo_countries': self._GEO_COUNTRIES}),
'ie_key': 'BrightcoveNew',
}
project_id = video_info['streaks']['projectID']
key_idx = dt.datetime.fromtimestamp(time_seconds(hours=9), dt.timezone.utc).month % 6 or 6
try:
streaks_info = self._extract_from_streaks_api(project_id, streaks_id, {
'Origin': 'https://tver.jp',
'Referer': 'https://tver.jp/',
'X-Streaks-Api-Key': self._STREAKS_API_INFO[project_id]['api_key'][f'key0{key_idx}'],
})
except GeoRestrictedError as e:
# Catch and re-raise with metadata_available to support --ignore-no-formats-error
self.raise_geo_restricted(e.orig_msg, countries=self._GEO_COUNTRIES, metadata_available=True)
streaks_info = {}
return {
**streaks_info,
**metadata,
'id': video_id,
'_old_archive_ids': [make_archive_id('BrightcoveNew', brightcove_id)] if brightcove_id else None,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/videopress.py | yt_dlp/extractor/videopress.py | from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
parse_age_limit,
qualities,
random_birthday,
unified_timestamp,
urljoin,
)
class VideoPressIE(InfoExtractor):
_ID_REGEX = r'[\da-zA-Z]{8}'
_PATH_REGEX = r'video(?:\.word)?press\.com/embed/'
_VALID_URL = rf'https?://{_PATH_REGEX}(?P<id>{_ID_REGEX})'
_EMBED_REGEX = [rf'<iframe[^>]+src=["\'](?P<url>(?:https?://)?{_PATH_REGEX}{_ID_REGEX})']
_TESTS = [{
'url': 'https://videopress.com/embed/kUJmAcSf',
'md5': '706956a6c875873d51010921310e4bc6',
'info_dict': {
'id': 'kUJmAcSf',
'ext': 'mp4',
'title': 'VideoPress Demo',
'description': '',
'duration': 635.0,
'thumbnail': r're:https?://videos\.files\.wordpress\.com/.+\.jpg',
'timestamp': 1434983935,
'upload_date': '20150622',
'age_limit': 0,
},
}, {
# 17+, requires birth_* params
'url': 'https://videopress.com/embed/iH3gstfZ',
'only_matching': True,
}, {
'url': 'https://video.wordpress.com/embed/kUJmAcSf',
'only_matching': True,
}]
_WEBPAGE_TESTS = [{
'url': 'https://wordpress.com/support/videopress/',
'info_dict': {
'id': 'BZHMfMfN',
'ext': 'mp4',
'title': 'videopress example',
'age_limit': 0,
'description': '',
'duration': 19.796,
'thumbnail': r're:https?://videos\.files\.wordpress\.com/.+\.jpg',
'timestamp': 1748969554,
'upload_date': '20250603',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
query = random_birthday('birth_year', 'birth_month', 'birth_day')
query['fields'] = 'description,duration,file_url_base,files,height,original,poster,rating,title,upload_date,width'
video = self._download_json(
f'https://public-api.wordpress.com/rest/v1.1/videos/{video_id}',
video_id, query=query)
title = video['title']
file_url_base = video.get('file_url_base') or {}
base_url = file_url_base.get('https') or file_url_base.get('http')
QUALITIES = ('std', 'dvd', 'hd')
quality = qualities(QUALITIES)
formats = []
for format_id, f in (video.get('files') or {}).items():
if not isinstance(f, dict):
continue
for ext, path in f.items():
if ext in ('mp4', 'ogg'):
formats.append({
'url': urljoin(base_url, path),
'format_id': f'{format_id}-{ext}',
'ext': determine_ext(path, ext),
'quality': quality(format_id),
})
original_url = video.get('original')
if original_url:
formats.append({
'url': original_url,
'format_id': 'original',
'quality': len(QUALITIES),
'width': int_or_none(video.get('width')),
'height': int_or_none(video.get('height')),
})
return {
'id': video_id,
'title': title,
'description': video.get('description'),
'thumbnail': video.get('poster'),
'duration': float_or_none(video.get('duration'), 1000),
'timestamp': unified_timestamp(video.get('upload_date')),
'age_limit': parse_age_limit(video.get('rating')),
'formats': formats,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lego.py | yt_dlp/extractor/lego.py | import uuid
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
int_or_none,
join_nonempty,
qualities,
)
class LEGOIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?lego\.com/(?P<locale>[a-z]{2}-[a-z]{2})/(?:[^/]+/)*videos/(?:[^/]+/)*[^/?#]+-(?P<id>[0-9a-f]{32})'
_TESTS = [{
'url': 'http://www.lego.com/en-us/videos/themes/club/blocumentary-kawaguchi-55492d823b1b4d5e985787fa8c2973b1',
'md5': 'f34468f176cfd76488767fc162c405fa',
'info_dict': {
'id': '55492d82-3b1b-4d5e-9857-87fa8c2973b1_en-US',
'ext': 'mp4',
'title': 'Blocumentary Great Creations: Akiyuki Kawaguchi',
'description': 'Blocumentary Great Creations: Akiyuki Kawaguchi',
},
}, {
# geo-restricted but the contentUrl contain a valid url
'url': 'http://www.lego.com/nl-nl/videos/themes/nexoknights/episode-20-kingdom-of-heroes-13bdc2299ab24d9685701a915b3d71e7##sp=399',
'md5': 'c7420221f7ffd03ff056f9db7f8d807c',
'info_dict': {
'id': '13bdc229-9ab2-4d96-8570-1a915b3d71e7_nl-NL',
'ext': 'mp4',
'title': 'Aflevering 20: Helden van het koninkrijk',
'description': 'md5:8ee499aac26d7fa8bcb0cedb7f9c3941',
'age_limit': 5,
},
}, {
# with subtitle
'url': 'https://www.lego.com/nl-nl/kids/videos/classic/creative-storytelling-the-little-puppy-aa24f27c7d5242bc86102ebdc0f24cba',
'info_dict': {
'id': 'aa24f27c-7d52-42bc-8610-2ebdc0f24cba_nl-NL',
'ext': 'mp4',
'title': 'De kleine puppy',
'description': 'md5:5b725471f849348ac73f2e12cfb4be06',
'age_limit': 1,
'subtitles': {
'nl': [{
'ext': 'srt',
'url': r're:^https://.+\.srt$',
}],
},
},
'params': {
'skip_download': True,
},
}]
_QUALITIES = {
'Lowest': (64, 180, 320),
'Low': (64, 270, 480),
'Medium': (96, 360, 640),
'High': (128, 540, 960),
'Highest': (128, 720, 1280),
}
def _real_extract(self, url):
locale, video_id = self._match_valid_url(url).groups()
countries = [locale.split('-')[1].upper()]
self._initialize_geo_bypass({
'countries': countries,
})
try:
item = self._download_json(
# https://contentfeed.services.lego.com/api/v2/item/[VIDEO_ID]?culture=[LOCALE]&contentType=Video
'https://services.slingshot.lego.com/mediaplayer/v2',
video_id, query={
'videoId': f'{uuid.UUID(video_id)}_{locale}',
}, headers=self.geo_verification_headers())
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 451:
self.raise_geo_restricted(countries=countries)
raise
video = item['Video']
video_id = video['Id']
title = video['Title']
q = qualities(['Lowest', 'Low', 'Medium', 'High', 'Highest'])
formats = []
for video_source in item.get('VideoFormats', []):
video_source_url = video_source.get('Url')
if not video_source_url:
continue
video_source_format = video_source.get('Format')
if video_source_format == 'F4M':
formats.extend(self._extract_f4m_formats(
video_source_url, video_id,
f4m_id=video_source_format, fatal=False))
elif video_source_format == 'M3U8':
formats.extend(self._extract_m3u8_formats(
video_source_url, video_id, 'mp4', 'm3u8_native',
m3u8_id=video_source_format, fatal=False))
else:
video_source_quality = video_source.get('Quality')
f = {
'format_id': join_nonempty(video_source_format, video_source_quality),
'quality': q(video_source_quality),
'url': video_source_url,
}
quality = self._QUALITIES.get(video_source_quality)
if quality:
f.update({
'abr': quality[0],
'height': quality[1],
'width': quality[2],
})
formats.append(f)
subtitles = {}
sub_file_id = video.get('SubFileId')
if sub_file_id and sub_file_id != '00000000-0000-0000-0000-000000000000':
net_storage_path = video.get('NetstoragePath')
invariant_id = video.get('InvariantId')
video_file_id = video.get('VideoFileId')
video_version = video.get('VideoVersion')
if net_storage_path and invariant_id and video_file_id and video_version:
subtitles.setdefault(locale[:2], []).append({
'url': f'https://lc-mediaplayerns-live-s.legocdn.com/public/{net_storage_path}/{invariant_id}_{video_file_id}_{locale}_{video_version}_sub.srt',
})
return {
'id': video_id,
'title': title,
'description': video.get('Description'),
'thumbnail': video.get('GeneratedCoverImage') or video.get('GeneratedThumbnail'),
'duration': int_or_none(video.get('Length')),
'formats': formats,
'subtitles': subtitles,
'age_limit': int_or_none(video.get('AgeFrom')),
'season': video.get('SeasonTitle'),
'season_number': int_or_none(video.get('Season')) or None,
'episode_number': int_or_none(video.get('Episode')) or None,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/canalalpha.py | yt_dlp/extractor/canalalpha.py | from .common import InfoExtractor
from ..utils import (
clean_html,
dict_get,
try_get,
unified_strdate,
)
class CanalAlphaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?canalalpha\.ch/play/[^/]+/[^/]+/(?P<id>\d+)/?.*'
_TESTS = [{
'url': 'https://www.canalalpha.ch/play/le-journal/episode/24520/jeudi-28-octobre-2021',
'info_dict': {
'id': '24520',
'ext': 'mp4',
'title': 'Jeudi 28 octobre 2021',
'description': 'md5:d30c6c3e53f8ad40d405379601973b30',
'thumbnail': 'https://static.canalalpha.ch/poster/journal/journal_20211028.jpg',
'upload_date': '20211028',
'duration': 1125,
},
'params': {'skip_download': True},
}, {
'url': 'https://www.canalalpha.ch/play/le-journal/topic/24512/la-poste-fait-de-neuchatel-un-pole-cryptographique',
'info_dict': {
'id': '24512',
'ext': 'mp4',
'title': 'La Poste fait de Neuchâtel un pôle cryptographique',
'description': 'md5:4ba63ae78a0974d1a53d6703b6e1dedf',
'thumbnail': 'https://static.canalalpha.ch/poster/news/news_39712.jpg',
'upload_date': '20211028',
'duration': 138,
},
'params': {'skip_download': True},
}, {
'url': 'https://www.canalalpha.ch/play/eureka/episode/24484/ces-innovations-qui-veulent-rendre-lagriculture-plus-durable',
'info_dict': {
'id': '24484',
'ext': 'mp4',
'title': 'Ces innovations qui veulent rendre l’agriculture plus durable',
'description': 'md5:85d594a3b5dc6ccfc4a85aba6e73b129',
'thumbnail': 'https://static.canalalpha.ch/poster/magazine/magazine_10236.jpg',
'upload_date': '20211026',
'duration': 360,
},
'params': {'skip_download': True},
}, {
'url': 'https://www.canalalpha.ch/play/avec-le-temps/episode/23516/redonner-de-leclat-grace-au-polissage',
'info_dict': {
'id': '23516',
'ext': 'mp4',
'title': 'Redonner de l\'éclat grâce au polissage',
'description': 'md5:0d8fbcda1a5a4d6f6daa3165402177e1',
'thumbnail': 'https://static.canalalpha.ch/poster/magazine/magazine_9990.png',
'upload_date': '20210726',
'duration': 360,
},
'params': {'skip_download': True},
}, {
'url': 'https://www.canalalpha.ch/play/le-journal/topic/33500/encore-des-mesures-deconomie-dans-le-jura',
'info_dict': {
'id': '33500',
'ext': 'mp4',
'title': 'Encore des mesures d\'économie dans le Jura',
'description': 'md5:938b5b556592f2d1b9ab150268082a80',
'thumbnail': 'https://static.canalalpha.ch/poster/news/news_46665.jpg',
'upload_date': '20240411',
'duration': 105,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
data_json = self._parse_json(self._search_regex(
r'window\.__SERVER_STATE__\s?=\s?({(?:(?!};)[^"]|"([^"]|\\")*")+})\s?;',
webpage, 'data_json'), video_id)['1']['data']['data']
manifests = try_get(data_json, lambda x: x['video']['manifests'], expected_type=dict) or {}
subtitles = {}
formats = [{
'url': video['$url'],
'ext': 'mp4',
'width': try_get(video, lambda x: x['res']['width'], expected_type=int),
'height': try_get(video, lambda x: x['res']['height'], expected_type=int),
} for video in try_get(data_json, lambda x: x['video']['mp4'], expected_type=list) or [] if video.get('$url')]
if manifests.get('hls'):
fmts, subs = self._extract_m3u8_formats_and_subtitles(
manifests['hls'], video_id, m3u8_id='hls', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
if manifests.get('dash'):
fmts, subs = self._extract_mpd_formats_and_subtitles(
manifests['dash'], video_id, mpd_id='dash', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
return {
'id': video_id,
'title': data_json.get('title').strip(),
'description': clean_html(dict_get(data_json, ('longDesc', 'shortDesc'))),
'thumbnail': data_json.get('poster'),
'upload_date': unified_strdate(dict_get(data_json, ('webPublishAt', 'featuredAt', 'diffusionDate'))),
'duration': try_get(data_json, lambda x: x['video']['duration'], expected_type=int),
'formats': formats,
'subtitles': subtitles,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ichinanalive.py | yt_dlp/extractor/ichinanalive.py |
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
str_or_none,
traverse_obj,
unified_strdate,
url_or_none,
)
class IchinanaLiveIE(InfoExtractor):
IE_NAME = '17live'
_VALID_URL = r'https?://(?:www\.)?17\.live/(?:[^/]+/)*(?:live|profile/r)/(?P<id>\d+)'
_TESTS = [{
'url': 'https://17.live/live/3773096',
'info_dict': {
'id': '3773096',
'title': '萠珈☕🤡🍫moka',
'is_live': True,
'uploader': '萠珈☕🤡🍫moka',
'uploader_id': '3773096',
'like_count': 366,
'view_count': 18121,
'timestamp': 1630569012,
},
'skip': 'running as of writing, but may be ended as of testing',
}, {
'note': 'nothing except language differs',
'url': 'https://17.live/ja/live/3773096',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return not IchinanaLiveClipIE.suitable(url) and super().suitable(url)
def _real_extract(self, url):
video_id = self._match_id(url)
url = f'https://17.live/live/{video_id}'
enter = self._download_json(
f'https://api-dsa.17app.co/api/v1/lives/{video_id}/enter', video_id,
headers={'Referer': url}, fatal=False, expected_status=420,
data=b'\0')
if enter and enter.get('message') == 'ended':
raise ExtractorError('This live has ended.', expected=True)
view_data = self._download_json(
f'https://api-dsa.17app.co/api/v1/lives/{video_id}', video_id,
headers={'Referer': url})
uploader = traverse_obj(
view_data, ('userInfo', 'displayName'), ('userInfo', 'openID'))
video_urls = view_data.get('rtmpUrls')
if not video_urls:
raise ExtractorError('unable to extract live URL information')
formats = []
for (name, value) in video_urls[0].items():
if not isinstance(value, str):
continue
if not value.startswith('http'):
continue
quality = -1
if 'web' in name:
quality -= 1
if 'High' in name:
quality += 4
if 'Low' in name:
quality -= 2
formats.append({
'format_id': name,
'url': value,
'quality': quality,
'http_headers': {'Referer': url},
'ext': 'flv',
'vcodec': 'h264',
'acodec': 'aac',
})
return {
'id': video_id,
'title': uploader or video_id,
'formats': formats,
'is_live': True,
'uploader': uploader,
'uploader_id': video_id,
'like_count': view_data.get('receivedLikeCount'),
'view_count': view_data.get('viewerCount'),
'thumbnail': view_data.get('coverPhoto'),
'description': view_data.get('caption'),
'timestamp': view_data.get('beginTime'),
}
class IchinanaLiveClipIE(InfoExtractor):
IE_NAME = '17live:clip'
_VALID_URL = r'https?://(?:www\.)?17\.live/(?:[^/]+/)*profile/r/(?P<uploader_id>\d+)/clip/(?P<id>[^/]+)'
_TESTS = [{
'url': 'https://17.live/profile/r/1789280/clip/1bHQSK8KUieruFXaCH4A4upCzlN',
'info_dict': {
'id': '1bHQSK8KUieruFXaCH4A4upCzlN',
'title': 'マチコ先生🦋Class💋',
'description': 'マチ戦隊 第一次 バスターコール\n総額200万coin!\n動画制作@うぉーかー🌱Walker🎫',
'uploader_id': '1789280',
},
}, {
'url': 'https://17.live/ja/profile/r/1789280/clip/1bHQSK8KUieruFXaCH4A4upCzlN',
'only_matching': True,
}]
def _real_extract(self, url):
uploader_id, video_id = self._match_valid_url(url).groups()
url = f'https://17.live/profile/r/{uploader_id}/clip/{video_id}'
view_data = self._download_json(
f'https://api-dsa.17app.co/api/v1/clips/{video_id}', video_id,
headers={'Referer': url})
uploader = traverse_obj(
view_data, ('userInfo', 'displayName'), ('userInfo', 'name'))
formats = []
if view_data.get('videoURL'):
formats.append({
'id': 'video',
'url': view_data['videoURL'],
'quality': -1,
})
if view_data.get('transcodeURL'):
formats.append({
'id': 'transcode',
'url': view_data['transcodeURL'],
'quality': -1,
})
if view_data.get('srcVideoURL'):
# highest quality
formats.append({
'id': 'srcVideo',
'url': view_data['srcVideoURL'],
'quality': 1,
})
for fmt in formats:
fmt.update({
'ext': 'mp4',
'protocol': 'https',
'vcodec': 'h264',
'acodec': 'aac',
'http_headers': {'Referer': url},
})
return {
'id': video_id,
'title': uploader or video_id,
'formats': formats,
'uploader': uploader,
'uploader_id': uploader_id,
'like_count': view_data.get('likeCount'),
'view_count': view_data.get('viewCount'),
'thumbnail': view_data.get('imageURL'),
'duration': view_data.get('duration'),
'description': view_data.get('caption'),
'upload_date': unified_strdate(str_or_none(view_data.get('createdAt'))),
}
class IchinanaLiveVODIE(InfoExtractor):
IE_NAME = '17live:vod'
_VALID_URL = r'https?://(?:www\.)?17\.live/ja/vod/[^/?#]+/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://17.live/ja/vod/27323042/2cf84520-e65e-4b22-891e-1d3a00b0f068',
'md5': '3299b930d7457b069639486998a89580',
'info_dict': {
'id': '2cf84520-e65e-4b22-891e-1d3a00b0f068',
'ext': 'mp4',
'title': 'md5:b5f8cbf497d54cc6a60eb3b480182f01',
'uploader': 'md5:29fb12122ab94b5a8495586e7c3085a5',
'uploader_id': '27323042',
'channel': '🌟オールナイトニッポン アーカイブ🌟',
'channel_id': '2b4f85f1-d61e-429d-a901-68d32bdd8645',
'like_count': int,
'view_count': int,
'thumbnail': r're:https?://.+/.+\.(?:jpe?g|png)',
'duration': 549,
'description': 'md5:116f326579700f00eaaf5581aae1192e',
'timestamp': 1741058645,
'upload_date': '20250304',
},
}, {
'url': 'https://17.live/ja/vod/27323042/0de11bac-9bea-40b8-9eab-0239a7d88079',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
json_data = self._download_json(f'https://wap-api.17app.co/api/v1/vods/{video_id}', video_id)
return traverse_obj(json_data, {
'id': ('vodID', {str}),
'title': ('title', {str}),
'formats': ('vodURL', {lambda x: self._extract_m3u8_formats(x, video_id)}),
'uploader': ('userInfo', 'displayName', {str}),
'uploader_id': ('userInfo', 'roomID', {int}, {str_or_none}),
'channel': ('userInfo', 'name', {str}),
'channel_id': ('userInfo', 'userID', {str}),
'like_count': ('likeCount', {int_or_none}),
'view_count': ('viewCount', {int_or_none}),
'thumbnail': ('imageURL', {url_or_none}),
'duration': ('duration', {int_or_none}),
'description': ('description', {str}),
'timestamp': ('createdAt', {int_or_none}),
})
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sibnet.py | yt_dlp/extractor/sibnet.py | from .common import InfoExtractor
class SibnetEmbedIE(InfoExtractor):
# Ref: https://help.sibnet.ru/?sibnet_video_embed
_VALID_URL = False
_EMBED_REGEX = [r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//video\.sibnet\.ru/shell\.php\?.*?\bvideoid=\d+.*?)\1']
_WEBPAGE_TESTS = [{
'url': 'https://phpbb3.x-tk.ru/bbcode-video-sibnet-t24.html',
'info_dict': {
'id': 'shell', # FIXME: Non unique ID?
'ext': 'mp4',
'age_limit': 0,
'thumbnail': 'https://video.sibnet.ru/upload/cover/video_1887072_0.jpg',
'title': 'КВН Москва не сразу строилась - Девушка впервые играет в Mortal Kombat',
},
}]
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/filmon.py | yt_dlp/extractor/filmon.py | from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
int_or_none,
qualities,
strip_or_none,
)
class FilmOnIE(InfoExtractor):
IE_NAME = 'filmon'
_VALID_URL = r'(?:https?://(?:www\.)?filmon\.com/vod/view/|filmon:)(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.filmon.com/vod/view/24869-0-plan-9-from-outer-space',
'info_dict': {
'id': '24869',
'ext': 'mp4',
'title': 'Plan 9 From Outer Space',
'description': 'Dead human, zombies and vampires',
},
}, {
'url': 'https://www.filmon.com/vod/view/2825-1-popeye-series-1',
'info_dict': {
'id': '2825',
'title': 'Popeye Series 1',
'description': 'The original series of Popeye.',
},
'playlist_mincount': 8,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
try:
response = self._download_json(
f'https://www.filmon.com/api/vod/movie?id={video_id}',
video_id)['response']
except ExtractorError as e:
if isinstance(e.cause, HTTPError):
errmsg = self._parse_json(e.cause.response.read().decode(), video_id)['reason']
raise ExtractorError(f'{self.IE_NAME} said: {errmsg}', expected=True)
raise
title = response['title']
description = strip_or_none(response.get('description'))
if response.get('type_id') == 1:
entries = [self.url_result('filmon:' + episode_id) for episode_id in response.get('episodes', [])]
return self.playlist_result(entries, video_id, title, description)
QUALITY = qualities(('low', 'high'))
formats = []
for format_id, stream in response.get('streams', {}).items():
stream_url = stream.get('url')
if not stream_url:
continue
formats.append({
'format_id': format_id,
'url': stream_url,
'ext': 'mp4',
'quality': QUALITY(stream.get('quality')),
'protocol': 'm3u8_native',
})
thumbnails = []
poster = response.get('poster', {})
thumbs = poster.get('thumbs', {})
thumbs['poster'] = poster
for thumb_id, thumb in thumbs.items():
thumb_url = thumb.get('url')
if not thumb_url:
continue
thumbnails.append({
'id': thumb_id,
'url': thumb_url,
'width': int_or_none(thumb.get('width')),
'height': int_or_none(thumb.get('height')),
})
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'thumbnails': thumbnails,
}
class FilmOnChannelIE(InfoExtractor):
IE_NAME = 'filmon:channel'
_VALID_URL = r'https?://(?:www\.)?filmon\.com/(?:tv|channel)/(?P<id>[a-z0-9-]+)'
_TESTS = [{
# VOD
'url': 'http://www.filmon.com/tv/sports-haters',
'info_dict': {
'id': '4190',
'ext': 'mp4',
'title': 'Sports Haters',
'description': 'md5:dabcb4c1d9cfc77085612f1a85f8275d',
},
}, {
# LIVE
'url': 'https://www.filmon.com/channel/filmon-sports',
'only_matching': True,
}, {
'url': 'https://www.filmon.com/tv/2894',
'only_matching': True,
}]
_THUMBNAIL_RES = [
('logo', 56, 28),
('big_logo', 106, 106),
('extra_big_logo', 300, 300),
]
def _real_extract(self, url):
channel_id = self._match_id(url)
try:
channel_data = self._download_json(
'http://www.filmon.com/api-v2/channel/' + channel_id, channel_id)['data']
except ExtractorError as e:
if isinstance(e.cause, HTTPError):
errmsg = self._parse_json(e.cause.response.read().decode(), channel_id)['message']
raise ExtractorError(f'{self.IE_NAME} said: {errmsg}', expected=True)
raise
channel_id = str(channel_data['id'])
is_live = not channel_data.get('is_vod') and not channel_data.get('is_vox')
title = channel_data['title']
QUALITY = qualities(('low', 'high'))
formats = []
for stream in channel_data.get('streams', []):
stream_url = stream.get('url')
if not stream_url:
continue
if not is_live:
formats.extend(self._extract_wowza_formats(
stream_url, channel_id, skip_protocols=['dash', 'rtmp', 'rtsp']))
continue
quality = stream.get('quality')
formats.append({
'format_id': quality,
# this is an m3u8 stream, but we are deliberately not using _extract_m3u8_formats
# because it doesn't have bitrate variants anyway
'url': stream_url,
'ext': 'mp4',
'quality': QUALITY(quality),
})
thumbnails = []
for name, width, height in self._THUMBNAIL_RES:
thumbnails.append({
'id': name,
'url': f'http://static.filmon.com/assets/channels/{channel_id}/{name}.png',
'width': width,
'height': height,
})
return {
'id': channel_id,
'display_id': channel_data.get('alias'),
'title': title,
'description': channel_data.get('description'),
'thumbnails': thumbnails,
'formats': formats,
'is_live': is_live,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/anchorfm.py | yt_dlp/extractor/anchorfm.py | from .common import InfoExtractor
from ..utils import (
clean_html,
float_or_none,
int_or_none,
str_or_none,
traverse_obj,
unified_timestamp,
)
class AnchorFMEpisodeIE(InfoExtractor):
_VALID_URL = r'https?://anchor\.fm/(?P<channel_name>\w+)/(?:embed/)?episodes/[\w-]+-(?P<episode_id>\w+)'
_EMBED_REGEX = [rf'<iframe[^>]+\bsrc=[\'"](?P<url>{_VALID_URL})']
_TESTS = [{
'url': 'https://anchor.fm/lovelyti/episodes/Chrisean-Rock-takes-to-twitter-to-announce-shes-pregnant--Blueface-denies-he-is-the-father-e1tpt3d',
'info_dict': {
'id': 'e1tpt3d',
'ext': 'mp3',
'title': ' Chrisean Rock takes to twitter to announce she\'s pregnant, Blueface denies he is the father!',
'description': 'md5:207d167de3e28ceb4ddc1ebf5a30044c',
'thumbnail': 'https://s3-us-west-2.amazonaws.com/anchor-generated-image-bank/production/podcast_uploaded_nologo/1034827/1034827-1658438968460-5f3bfdf3601e8.jpg',
'duration': 624.718,
'uploader': 'Lovelyti ',
'uploader_id': '991541',
'channel': 'lovelyti',
'modified_date': '20230121',
'modified_timestamp': 1674285178,
'release_date': '20230121',
'release_timestamp': 1674285179,
'episode_id': 'e1tpt3d',
},
}, {
# embed url
'url': 'https://anchor.fm/apakatatempo/embed/episodes/S2E75-Perang-Bintang-di-Balik-Kasus-Ferdy-Sambo-dan-Ismail-Bolong-e1shjqd',
'info_dict': {
'id': 'e1shjqd',
'ext': 'mp3',
'title': 'S2E75 Perang Bintang di Balik Kasus Ferdy Sambo dan Ismail Bolong',
'description': 'md5:9e95ad9293bf00178bf8d33e9cb92c41',
'duration': 1042.008,
'thumbnail': 'https://s3-us-west-2.amazonaws.com/anchor-generated-image-bank/production/podcast_uploaded_episode400/2627805/2627805-1671590688729-4db3882ac9e4b.jpg',
'release_date': '20221221',
'release_timestamp': 1671595916,
'modified_date': '20221221',
'modified_timestamp': 1671590834,
'channel': 'apakatatempo',
'uploader': 'Podcast Tempo',
'uploader_id': '2585461',
'season': 'Season 2',
'season_number': 2,
'episode_id': 'e1shjqd',
},
}]
_WEBPAGE_TESTS = [{
'url': 'https://podcast.tempo.co/podcast/192/perang-bintang-di-balik-kasus-ferdy-sambo-dan-ismail-bolong',
'info_dict': {
'id': 'e1shjqd',
'ext': 'mp3',
'release_date': '20221221',
'duration': 1042.008,
'season': 'Season 2',
'modified_timestamp': 1671590834,
'uploader_id': '2585461',
'modified_date': '20221221',
'description': 'md5:9e95ad9293bf00178bf8d33e9cb92c41',
'season_number': 2,
'title': 'S2E75 Perang Bintang di Balik Kasus Ferdy Sambo dan Ismail Bolong',
'release_timestamp': 1671595916,
'episode_id': 'e1shjqd',
'thumbnail': 'https://s3-us-west-2.amazonaws.com/anchor-generated-image-bank/production/podcast_uploaded_episode400/2627805/2627805-1671590688729-4db3882ac9e4b.jpg',
'uploader': 'Podcast Tempo',
'channel': 'apakatatempo',
},
}]
def _real_extract(self, url):
channel_name, episode_id = self._match_valid_url(url).group('channel_name', 'episode_id')
api_data = self._download_json(f'https://anchor.fm/api/v3/episodes/{episode_id}', episode_id)
return {
'id': episode_id,
'title': traverse_obj(api_data, ('episode', 'title')),
'url': traverse_obj(api_data, ('episode', 'episodeEnclosureUrl'), ('episodeAudios', 0, 'url')),
'ext': 'mp3',
'vcodec': 'none',
'thumbnail': traverse_obj(api_data, ('episode', 'episodeImage')),
'description': clean_html(traverse_obj(api_data, ('episode', ('description', 'descriptionPreview')), get_all=False)),
'duration': float_or_none(traverse_obj(api_data, ('episode', 'duration')), 1000),
'modified_timestamp': unified_timestamp(traverse_obj(api_data, ('episode', 'modified'))),
'release_timestamp': int_or_none(traverse_obj(api_data, ('episode', 'publishOnUnixTimestamp'))),
'episode_id': episode_id,
'uploader': traverse_obj(api_data, ('creator', 'name')),
'uploader_id': str_or_none(traverse_obj(api_data, ('creator', 'userId'))),
'season_number': int_or_none(traverse_obj(api_data, ('episode', 'podcastSeasonNumber'))),
'channel': channel_name or traverse_obj(api_data, ('creator', 'vanitySlug')),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/oneplace.py | yt_dlp/extractor/oneplace.py | from .common import InfoExtractor
class OnePlacePodcastIE(InfoExtractor):
_VALID_URL = r'https?://www\.oneplace\.com/[\w]+/[^/]+/listen/[\w-]+-(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.oneplace.com/ministries/a-daily-walk/listen/living-in-the-last-days-part-2-958461.html',
'info_dict': {
'id': '958461',
'ext': 'mp3',
'title': 'Living in the Last Days Part 2 | A Daily Walk with John Randall',
'description': 'md5:fbb8f1cf21447ac54ecaa2887fc20c6e',
},
}, {
'url': 'https://www.oneplace.com/ministries/ankerberg-show/listen/ep-3-relying-on-the-constant-companionship-of-the-holy-spirit-part-2-922513.html',
'info_dict': {
'id': '922513',
'ext': 'mp3',
'description': 'md5:8b810b4349aa40a5d033b4536fe428e1',
'title': 'md5:ce10f7d8d5ddcf485ed8905ef109659d',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
return {
'id': video_id,
'url': self._search_regex((
r'mp3-url\s*=\s*"([^"]+)',
r'<div[^>]+id\s*=\s*"player"[^>]+data-media-url\s*=\s*"(?P<media_url>[^"]+)',
), webpage, 'media url'),
'ext': 'mp3',
'vcodec': 'none',
'title': self._html_search_regex((
r'<div[^>]class\s*=\s*"details"[^>]+>[^<]<h2[^>]+>(?P<content>[^>]+)>',
self._meta_regex('og:title'), self._meta_regex('title'),
), webpage, 'title', group='content', default=None),
'description': self._html_search_regex(
r'<div[^>]+class="[^"]+epDesc"[^>]*>\s*(?P<desc>.+?)\s*</div>',
webpage, 'description', default=None),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nest.py | yt_dlp/extractor/nest.py | from .common import InfoExtractor
from ..utils import ExtractorError, float_or_none, update_url_query, url_or_none
from ..utils.traversal import traverse_obj
class NestIE(InfoExtractor):
_VALID_URL = r'https?://video\.nest\.com/(?:embedded/)?live/(?P<id>\w+)'
_EMBED_REGEX = [rf'<iframe [^>]*\bsrc=[\'"](?P<url>{_VALID_URL})']
_TESTS = [{
'url': 'https://video.nest.com/embedded/live/4fvYdSo8AX?autoplay=0',
'info_dict': {
'id': '4fvYdSo8AX',
'ext': 'mp4',
'title': 'startswith:Outside ',
'alt_title': 'Outside',
'description': '<null>',
'location': 'Los Angeles',
'availability': 'public',
'thumbnail': r're:https?://',
'live_status': 'is_live',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'https://video.nest.com/live/4fvYdSo8AX',
'only_matching': True,
}]
_WEBPAGE_TESTS = [{
'url': 'https://www.pacificblue.biz/noyo-harbor-webcam/',
'info_dict': {
'id': '4fvYdSo8AX',
'ext': 'mp4',
'title': 'startswith:Outside ',
'alt_title': 'Outside',
'description': '<null>',
'location': 'Los Angeles',
'availability': 'public',
'thumbnail': r're:https?://',
'live_status': 'is_live',
},
'params': {
# m3u8 download
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
item = self._download_json(
'https://video.nest.com/api/dropcam/cameras.get_by_public_token',
video_id, query={'token': video_id})['items'][0]
uuid = item.get('uuid')
stream_domain = item.get('live_stream_host')
if not stream_domain or not uuid:
raise ExtractorError('Unable to construct playlist URL')
thumb_domain = item.get('nexus_api_nest_domain_host')
return {
'id': video_id,
**traverse_obj(item, {
'description': ('description', {str}),
'title': (('title', 'name', 'where'), {str}, filter, any),
'alt_title': ('name', {str}),
'location': ((('timezone', {lambda x: x.split('/')[1].replace('_', ' ')}), 'where'), {str}, filter, any),
}),
'thumbnail': update_url_query(
f'https://{thumb_domain}/get_image',
{'uuid': uuid, 'public': video_id}) if thumb_domain else None,
'availability': self._availability(is_private=item.get('is_public') is False),
'formats': self._extract_m3u8_formats(
f'https://{stream_domain}/nexus_aac/{uuid}/playlist.m3u8',
video_id, 'mp4', live=True, query={'public': video_id}),
'is_live': True,
}
class NestClipIE(InfoExtractor):
_VALID_URL = r'https?://video\.nest\.com/(?:embedded/)?clip/(?P<id>\w+)'
_EMBED_REGEX = [rf'<iframe [^>]*\bsrc=[\'"](?P<url>{_VALID_URL})']
_TESTS = [{
'url': 'https://video.nest.com/clip/f34c9dd237a44eca9a0001af685e3dff',
'info_dict': {
'id': 'f34c9dd237a44eca9a0001af685e3dff',
'ext': 'mp4',
'title': 'NestClip video #f34c9dd237a44eca9a0001af685e3dff',
'thumbnail': 'https://clips.dropcam.com/f34c9dd237a44eca9a0001af685e3dff.jpg',
'timestamp': 1735413474.468,
'upload_date': '20241228',
},
}, {
'url': 'https://video.nest.com/embedded/clip/34e0432adc3c46a98529443d8ad5aa76',
'info_dict': {
'id': '34e0432adc3c46a98529443d8ad5aa76',
'ext': 'mp4',
'title': 'Shootout at Veterans Boulevard at Fleur De Lis Drive',
'thumbnail': 'https://clips.dropcam.com/34e0432adc3c46a98529443d8ad5aa76.jpg',
'upload_date': '20230817',
'timestamp': 1692262897.191,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
'https://video.nest.com/api/dropcam/videos.get_by_filename', video_id,
query={'filename': f'{video_id}.mp4'})
return {
'id': video_id,
**traverse_obj(data, ('items', 0, {
'title': ('title', {str}),
'thumbnail': ('thumbnail_url', {url_or_none}),
'url': ('download_url', {url_or_none}),
'timestamp': ('start_time', {float_or_none}),
})),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tagesschau.py | yt_dlp/extractor/tagesschau.py | import re
from .common import InfoExtractor
from ..utils import (
UnsupportedError,
extract_attributes,
int_or_none,
js_to_json,
parse_iso8601,
try_get,
)
class TagesschauIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:www\.)?tagesschau\.de/(?P<path>[^/]+/(?:[^/]+/)*?(?P<id>[^/#?]+?(?:-?[0-9]+)?))(?:~_?[^/#?]+?)?\.html'
_TESTS = [{
'url': 'http://www.tagesschau.de/multimedia/video/video-102143.html',
'md5': 'ccb9359bf8c4795836e43759f3408a93',
'info_dict': {
'id': 'video-102143-1',
'ext': 'mp4',
'title': 'Regierungsumbildung in Athen: Neue Minister in Griechenland vereidigt',
'duration': 138,
},
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/ts-5727.html',
'md5': '5c15e8f3da049e48829ec9786d835536',
'info_dict': {
'id': 'ts-5727-1',
'ext': 'mp4',
'title': 'Ganze Sendung',
'duration': 932,
},
}, {
# exclusive audio
'url': 'http://www.tagesschau.de/multimedia/audio/audio-29417.html',
'md5': '4bff8f23504df56a0d86ed312d654182',
'info_dict': {
'id': 'audio-29417-1',
'ext': 'mp3',
'title': 'EU-Gipfel: Im Verbrennerstreit hat Deutschland maximalen Schaden angerichtet',
},
}, {
'url': 'http://www.tagesschau.de/inland/bnd-303.html',
'md5': 'f049fa1698d7564e9ca4c3325108f034',
'info_dict': {
'id': 'bnd-303-1',
'ext': 'mp3',
'title': 'Das Siegel des Bundesnachrichtendienstes | dpa',
},
}, {
'url': 'http://www.tagesschau.de/inland/afd-parteitag-135.html',
'info_dict': {
'id': 'afd-parteitag-135',
'title': 'AfD',
},
'playlist_mincount': 15,
}, {
'url': 'https://www.tagesschau.de/multimedia/audio/audio-29417~player.html',
'info_dict': {
'id': 'audio-29417-1',
'ext': 'mp3',
'title': 'EU-Gipfel: Im Verbrennerstreit hat Deutschland maximalen Schaden angerichtet',
},
}, {
'url': 'https://www.tagesschau.de/multimedia/audio/podcast-11km-327.html',
'info_dict': {
'id': 'podcast-11km-327',
'ext': 'mp3',
'title': 'Gewalt in der Kita – Wenn Erzieher:innen schweigen',
'upload_date': '20230322',
'timestamp': 1679482808,
'thumbnail': 'https://www.tagesschau.de/multimedia/audio/podcast-11km-329~_v-original.jpg',
'description': 'md5:dad059931fe4b3693e3656e93a249848',
},
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/tsg-3771.html',
'only_matching': True,
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/tt-3827.html',
'only_matching': True,
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/nm-3475.html',
'only_matching': True,
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/weltspiegel-3167.html',
'only_matching': True,
}, {
'url': 'http://www.tagesschau.de/multimedia/tsvorzwanzig-959.html',
'only_matching': True,
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/bab/bab-3299~_bab-sendung-209.html',
'only_matching': True,
}, {
'url': 'http://www.tagesschau.de/multimedia/video/video-102303~_bab-sendung-211.html',
'only_matching': True,
}, {
'url': 'http://www.tagesschau.de/100sekunden/index.html',
'only_matching': True,
}, {
# playlist article with collapsing sections
'url': 'http://www.tagesschau.de/wirtschaft/faq-freihandelszone-eu-usa-101.html',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id') or mobj.group('path')
display_id = video_id.lstrip('-')
webpage = self._download_webpage(url, display_id)
title = self._html_search_regex(
r'<span[^>]*class="headline"[^>]*>(.+?)</span>',
webpage, 'title', default=None) or self._og_search_title(webpage, fatal=False)
entries = []
videos = re.findall(r'<div[^>]+>', webpage)
num = 0
for video in videos:
video = extract_attributes(video).get('data-config')
if not video:
continue
video = self._parse_json(video, video_id, transform_source=js_to_json, fatal=False)
video_formats = try_get(video, lambda x: x['mc']['_mediaArray'][0]['_mediaStreamArray'])
if not video_formats:
continue
num += 1
for video_format in video_formats:
media_url = video_format.get('_stream') or ''
formats = []
if media_url.endswith('master.m3u8'):
formats = self._extract_m3u8_formats(media_url, video_id, 'mp4', m3u8_id='hls')
elif media_url.endswith('.mp3'):
formats = [{
'url': media_url,
'vcodec': 'none',
}]
if not formats:
continue
entries.append({
'id': f'{display_id}-{num}',
'title': try_get(video, lambda x: x['mc']['_title']),
'duration': int_or_none(try_get(video, lambda x: x['mc']['_duration'])),
'formats': formats,
})
if not entries:
raise UnsupportedError(url)
if len(entries) > 1:
return self.playlist_result(entries, display_id, title)
return {
'id': display_id,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
'formats': entries[0]['formats'],
'timestamp': parse_iso8601(self._html_search_meta('date', webpage)),
'description': self._og_search_description(webpage),
'duration': entries[0]['duration'],
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nubilesporn.py | yt_dlp/extractor/nubilesporn.py | import re
from .common import InfoExtractor
from ..utils import (
clean_html,
float_or_none,
format_field,
get_element_by_class,
get_element_by_id,
get_element_html_by_class,
get_elements_by_class,
int_or_none,
unified_timestamp,
urlencode_postdata,
)
from ..utils.traversal import find_element, find_elements, traverse_obj
class NubilesPornIE(InfoExtractor):
_NETRC_MACHINE = 'nubiles-porn'
_VALID_URL = r'''(?x)
https://members\.nubiles-porn\.com/video/watch/(?P<id>\d+)
(?:/(?P<display_id>[\w\-]+-s(?P<season>\d+)e(?P<episode>\d+)))?
'''
_TESTS = [{
'url': 'https://members.nubiles-porn.com/video/watch/165320/trying-to-focus-my-one-track-mind-s3e1',
'md5': 'fa7f09da8027c35e4bdf0f94f55eac82',
'info_dict': {
'id': '165320',
'title': 'Trying To Focus My One Track Mind - S3:E1',
'ext': 'mp4',
'display_id': 'trying-to-focus-my-one-track-mind-s3e1',
'thumbnail': 'https://images.nubiles-porn.com/videos/trying_to_focus_my_one_track_mind/samples/cover1280.jpg',
'description': 'md5:81f3d4372e0e39bff5c801da277a5141',
'timestamp': 1676160000,
'upload_date': '20230212',
'channel': 'Younger Mommy',
'channel_id': '64',
'channel_url': 'https://members.nubiles-porn.com/video/website/64',
'like_count': int,
'average_rating': float,
'age_limit': 18,
'categories': ['Big Boobs', 'Big Naturals', 'Blowjob', 'Brunette', 'Cowgirl', 'Girl Orgasm', 'Girl-Boy',
'Glasses', 'Hardcore', 'Milf', 'Shaved Pussy', 'Tattoos', 'YoungerMommy.com'],
'tags': list,
'cast': ['Kenzie Love'],
'availability': 'needs_auth',
'series': 'Younger Mommy',
'series_id': '64',
'season': 'Season 3',
'season_number': 3,
'episode': 'Episode 1',
'episode_number': 1,
},
}]
def _perform_login(self, username, password):
login_webpage = self._download_webpage('https://nubiles-porn.com/login', video_id=None)
inputs = self._hidden_inputs(login_webpage)
inputs.update({'username': username, 'password': password})
self._request_webpage('https://nubiles-porn.com/authentication/login', None, data=urlencode_postdata(inputs))
def _real_extract(self, url):
url_match = self._match_valid_url(url)
video_id = url_match.group('id')
page = self._download_webpage(url, video_id)
media_entries = self._parse_html5_media_entries(
url, get_element_by_class('watch-page-video-wrapper', page), video_id)[0]
channel_id, channel_name = self._search_regex(
r'/video/website/(?P<id>\d+).+>(?P<name>\w+).com', get_element_html_by_class('site-link', page) or '',
'channel', fatal=False, group=('id', 'name')) or (None, None)
return {
'id': video_id,
'title': self._search_regex('<h2>([^<]+)</h2>', page, 'title', fatal=False),
'formats': media_entries.get('formats'),
'display_id': url_match.group('display_id'),
'thumbnail': media_entries.get('thumbnail'),
'description': clean_html(get_element_html_by_class('content-pane-description', page)),
'timestamp': unified_timestamp(get_element_by_class('date', page)),
'channel': re.sub(r'([^A-Z]+)([A-Z]+)', r'\1 \2', channel_name) if channel_name else None,
'channel_id': channel_id,
'channel_url': format_field(channel_id, None, 'https://members.nubiles-porn.com/video/website/%s'),
'like_count': int_or_none(get_element_by_id('likecount', page)),
'average_rating': float_or_none(get_element_by_class('score', page)),
'age_limit': 18,
'categories': traverse_obj(page, ({find_element(cls='categories')}, {find_elements(cls='btn')}, ..., {clean_html})),
'tags': traverse_obj(page, ({find_elements(cls='tags')}, 1, {find_elements(cls='btn')}, ..., {clean_html})),
'cast': get_elements_by_class('content-pane-performer', page),
'availability': 'needs_auth',
'series': channel_name,
'series_id': channel_id,
'season_number': int_or_none(url_match.group('season')),
'episode_number': int_or_none(url_match.group('episode')),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/jixie.py | yt_dlp/extractor/jixie.py | from .common import InfoExtractor
from ..utils import clean_html, float_or_none, traverse_obj, try_call
class JixieBaseIE(InfoExtractor):
"""
API Reference:
https://jixie.atlassian.net/servicedesk/customer/portal/2/article/1339654214?src=-1456335525,
https://scripts.jixie.media/jxvideo.3.1.min.js
"""
def _extract_data_from_jixie_id(self, display_id, video_id, webpage):
json_data = self._download_json(
'https://apidam.jixie.io/api/public/stream', display_id,
query={'metadata': 'full', 'video_id': video_id})['data']
formats, subtitles = [], {}
for stream in json_data['streams']:
if stream.get('type') == 'HLS':
fmt, sub = self._extract_m3u8_formats_and_subtitles(stream.get('url'), display_id, ext='mp4')
if json_data.get('drm'):
for f in fmt:
f['has_drm'] = True
formats.extend(fmt)
self._merge_subtitles(sub, target=subtitles)
else:
formats.append({
'url': stream.get('url'),
'width': stream.get('width'),
'height': stream.get('height'),
'ext': 'mp4',
})
return {
'id': video_id,
'display_id': display_id,
'formats': formats,
'subtitles': subtitles,
'title': json_data.get('title') or self._html_search_meta(['og:title', 'twitter:title'], webpage),
'description': (clean_html(traverse_obj(json_data, ('metadata', 'description')))
or self._html_search_meta(['description', 'og:description', 'twitter:description'], webpage)),
'thumbnails': traverse_obj(json_data, ('metadata', 'thumbnails')),
'duration': float_or_none(traverse_obj(json_data, ('metadata', 'duration'))),
'tags': try_call(lambda: (json_data['metadata']['keywords'] or None).split(',')),
'categories': try_call(lambda: (json_data['metadata']['categories'] or None).split(',')),
'uploader_id': json_data.get('owner_id'),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/camtasia.py | yt_dlp/extractor/camtasia.py | import os
import urllib.parse
from .common import InfoExtractor
from ..utils import float_or_none
class CamtasiaEmbedIE(InfoExtractor):
_VALID_URL = False
_WEBPAGE_TESTS = [
{
'url': 'http://www.ll.mit.edu/workshops/education/videocourses/antennas/lecture1/video/',
'playlist': [{
'md5': '0c5e352edabf715d762b0ad4e6d9ee67',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - video1',
'ext': 'flv',
'duration': 2235.90,
},
}, {
'md5': '10e4bb3aaca9fd630e273ff92d9f3c63',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final_PIP',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - pip',
'ext': 'flv',
'duration': 2235.93,
},
}],
'info_dict': {
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
},
'skip': 'webpage dead',
},
]
def _extract_from_webpage(self, url, webpage):
camtasia_cfg = self._search_regex(
r'fo\.addVariable\(\s*"csConfigFile",\s*"([^"]+)"\s*\);',
webpage, 'camtasia configuration file', default=None)
if camtasia_cfg is None:
return None
title = self._html_search_meta('DC.title', webpage, fatal=True)
camtasia_url = urllib.parse.urljoin(url, camtasia_cfg)
camtasia_cfg = self._download_xml(
camtasia_url, self._generic_id(url),
note='Downloading camtasia configuration',
errnote='Failed to download camtasia configuration')
fileset_node = camtasia_cfg.find('./playlist/array/fileset')
entries = []
for n in fileset_node.getchildren():
url_n = n.find('./uri')
if url_n is None:
continue
entries.append({
'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
'title': f'{title} - {n.tag}',
'url': urllib.parse.urljoin(url, url_n.text),
'duration': float_or_none(n.find('./duration').text),
})
return {
'_type': 'playlist',
'entries': entries,
'title': title,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/eltrecetv.py | yt_dlp/extractor/eltrecetv.py | from .common import InfoExtractor
class ElTreceTVIE(InfoExtractor):
IE_DESC = 'El Trece TV (Argentina)'
_VALID_URL = r'https?://(?:www\.)?eltrecetv\.com\.ar/[\w-]+/capitulos/temporada-\d+/(?P<id>[\w-]+)'
_TESTS = [
{
'url': 'https://www.eltrecetv.com.ar/ahora-caigo/capitulos/temporada-2023/programa-del-061023/',
'md5': '71a66673dc63f9a5939d97bfe4b311ba',
'info_dict': {
'id': 'AHCA05102023145553329621094',
'ext': 'mp4',
'title': 'AHORA CAIGO - Programa 06/10/23',
'thumbnail': 'https://thumbs.vodgc.net/AHCA05102023145553329621094.JPG?649339',
},
},
{
'url': 'https://www.eltrecetv.com.ar/poco-correctos/capitulos/temporada-2023/programa-del-250923-invitada-dalia-gutmann/',
'only_matching': True,
},
{
'url': 'https://www.eltrecetv.com.ar/argentina-tierra-de-amor-y-venganza/capitulos/temporada-2023/atav-2-capitulo-121-del-250923/',
'only_matching': True,
},
{
'url': 'https://www.eltrecetv.com.ar/ahora-caigo/capitulos/temporada-2023/programa-del-250923/',
'only_matching': True,
},
{
'url': 'https://www.eltrecetv.com.ar/pasaplatos/capitulos/temporada-2023/pasaplatos-el-restaurante-del-250923/',
'only_matching': True,
},
{
'url': 'https://www.eltrecetv.com.ar/el-galpon/capitulos/temporada-2023/programa-del-160923-invitado-raul-lavie/',
'only_matching': True,
},
]
def _real_extract(self, url):
slug = self._match_id(url)
webpage = self._download_webpage(url, slug)
config = self._search_json(
r'Fusion.globalContent\s*=', webpage, 'content', slug)['promo_items']['basic']['embed']['config']
video_url = config['m3u8']
video_id = self._search_regex(r'/(\w+)\.m3u8', video_url, 'video id', default=slug)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(video_url, video_id, 'mp4', m3u8_id='hls')
formats.extend([{
'url': f['url'][:-23],
'format_id': f['format_id'].replace('hls', 'http'),
'width': f.get('width'),
'height': f.get('height'),
} for f in formats if f['url'].endswith('/tracks-v1a1/index.m3u8') and f.get('height') != 1080])
return {
'id': video_id,
'title': config.get('title'),
'thumbnail': config.get('thumbnail'),
'formats': formats,
'subtitles': subtitles,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tf1.py | yt_dlp/extractor/tf1.py | import json
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
try_get,
)
class TF1IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tf1\.fr/[^/]+/(?P<program_slug>[^/]+)/videos/(?P<id>[^/?&#]+)\.html'
_TESTS = [{
'url': 'https://www.tf1.fr/tmc/quotidien-avec-yann-barthes/videos/quotidien-premiere-partie-11-juin-2019.html',
'info_dict': {
'id': '13641379',
'ext': 'mp4',
'title': 'md5:f392bc52245dc5ad43771650c96fb620',
'description': 'md5:a02cdb217141fb2d469d6216339b052f',
'upload_date': '20190611',
'timestamp': 1560273989,
'duration': 1738,
'series': 'Quotidien avec Yann Barthès',
'tags': ['intégrale', 'quotidien', 'Replay'],
},
'params': {
# Sometimes wat serves the whole file with the --test option
'skip_download': True,
},
}, {
'url': 'https://www.tf1.fr/tmc/burger-quiz/videos/burger-quiz-du-19-aout-2023-s03-episode-21-85585666.html',
'info_dict': {
'id': '14010600',
'ext': 'mp4',
'title': 'Burger Quiz - S03 EP21 avec Eye Haidara, Anne Depétrini, Jonathan Zaccaï et Pio Marmaï',
'thumbnail': 'https://photos.tf1.fr/1280/720/burger-quiz-11-9adb79-0@1x.jpg',
'description': 'Manu Payet recevra Eye Haidara, Anne Depétrini, Jonathan Zaccaï et Pio Marmaï.',
'upload_date': '20230819',
'timestamp': 1692469471,
'season_number': 3,
'series': 'Burger Quiz',
'episode_number': 21,
'season': 'Season 3',
'tags': 'count:13',
'episode': 'Episode 21',
'duration': 2312,
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'http://www.tf1.fr/tf1/koh-lanta/videos/replay-koh-lanta-22-mai-2015.html',
'only_matching': True,
}, {
'url': 'http://www.tf1.fr/hd1/documentaire/videos/mylene-farmer-d-une-icone.html',
'only_matching': True,
}]
def _real_extract(self, url):
program_slug, slug = self._match_valid_url(url).groups()
video = self._download_json(
'https://www.tf1.fr/graphql/web', slug, query={
'id': '9b80783950b85247541dd1d851f9cc7fa36574af015621f853ab111a679ce26f',
'variables': json.dumps({
'programSlug': program_slug,
'slug': slug,
}),
})['data']['videoBySlug']
wat_id = video['streamId']
tags = []
for tag in (video.get('tags') or []):
label = tag.get('label')
if not label:
continue
tags.append(label)
decoration = video.get('decoration') or {}
thumbnails = []
for source in (try_get(decoration, lambda x: x['image']['sources'], list) or []):
source_url = source.get('url')
if not source_url:
continue
thumbnails.append({
'url': source_url,
'width': int_or_none(source.get('width')),
})
return {
'_type': 'url_transparent',
'id': wat_id,
'url': 'wat:' + wat_id,
'title': video.get('title'),
'thumbnails': thumbnails,
'description': decoration.get('description'),
'timestamp': parse_iso8601(video.get('date')),
'duration': int_or_none(try_get(video, lambda x: x['publicPlayingInfos']['duration'])),
'tags': tags,
'series': decoration.get('programLabel'),
'season_number': int_or_none(video.get('season')),
'episode_number': int_or_none(video.get('episode')),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cbsnews.py | yt_dlp/extractor/cbsnews.py | import base64
import re
import urllib.parse
import zlib
from .anvato import AnvatoIE
from .common import InfoExtractor
from ..utils import (
ExtractorError,
UserNotLive,
determine_ext,
float_or_none,
format_field,
int_or_none,
make_archive_id,
mimetype2ext,
parse_duration,
smuggle_url,
traverse_obj,
url_or_none,
)
class CBSNewsBaseIE(InfoExtractor):
_LOCALES = {
'atlanta': None,
'baltimore': 'BAL',
'boston': 'BOS',
'chicago': 'CHI',
'colorado': 'DEN',
'detroit': 'DET',
'losangeles': 'LA',
'miami': 'MIA',
'minnesota': 'MIN',
'newyork': 'NY',
'philadelphia': 'PHI',
'pittsburgh': 'PIT',
'sacramento': 'SAC',
'sanfrancisco': 'SF',
'texas': 'DAL',
}
_LOCALE_RE = '|'.join(map(re.escape, _LOCALES))
_ANVACK = '5VD6Eyd6djewbCmNwBFnsJj17YAvGRwl'
def _get_item(self, webpage, display_id):
return traverse_obj(self._search_json(
r'CBSNEWS\.defaultPayload\s*=', webpage, 'payload', display_id,
default={}), ('items', 0, {dict})) or {}
def _get_video_url(self, item):
return traverse_obj(item, 'video', 'video2', expected_type=url_or_none)
def _extract_playlist(self, webpage, playlist_id):
entries = [self.url_result(embed_url, CBSNewsEmbedIE) for embed_url in re.findall(
r'<iframe[^>]+data-src="(https?://(?:www\.)?cbsnews\.com/embed/video/[^#]*#[^"]+)"', webpage)]
if entries:
return self.playlist_result(
entries, playlist_id, self._html_search_meta(['og:title', 'twitter:title'], webpage),
self._html_search_meta(['og:description', 'twitter:description', 'description'], webpage))
def _extract_video(self, item, video_url, video_id):
if mimetype2ext(item.get('format'), default=determine_ext(video_url)) == 'mp4':
formats = [{'url': video_url, 'ext': 'mp4'}]
else:
manifest = self._download_webpage(video_url, video_id, note='Downloading m3u8 information')
anvato_id = self._search_regex(r'anvato-(\d+)', manifest, 'Anvato ID', default=None)
# Prefer Anvato if available; cbsnews.com m3u8 formats are re-encoded from Anvato source
if anvato_id:
return self.url_result(
smuggle_url(f'anvato:{self._ANVACK}:{anvato_id}', {'token': 'default'}),
AnvatoIE, url_transparent=True, _old_archive_ids=[make_archive_id(self, anvato_id)])
formats, _ = self._parse_m3u8_formats_and_subtitles(
manifest, video_url, 'mp4', m3u8_id='hls', video_id=video_id)
def get_subtitles(subs_url):
return {
'en': [{
'url': subs_url,
'ext': 'dfxp', # TTAF1
}],
} if url_or_none(subs_url) else None
episode_meta = traverse_obj(item, {
'season_number': ('season', {int_or_none}),
'episode_number': ('episode', {int_or_none}),
}) if item.get('isFullEpisode') else {}
return {
'id': video_id,
'formats': formats,
**traverse_obj(item, {
'title': (None, ('fulltitle', 'title')),
'description': 'dek',
'timestamp': ('timestamp', {float_or_none(scale=1000)}),
'duration': ('duration', {float_or_none}),
'subtitles': ('captions', {get_subtitles}),
'thumbnail': ('images', ('hd', 'sd'), {url_or_none}),
'is_live': ('type', {lambda x: x == 'live'}),
}, get_all=False),
**episode_meta,
}
class CBSNewsEmbedIE(CBSNewsBaseIE):
IE_NAME = 'cbsnews:embed'
_VALID_URL = r'https?://(?:www\.)?cbsnews\.com/embed/video[^#]*#(?P<id>.+)'
_TESTS = [{
'url': 'https://www.cbsnews.com/embed/video/?v=1.c9b5b61492913d6660db0b2f03579ef25e86307a#1Vb7b9s2EP5XBAHbT6Gt98PAMKTJ0se6LVjWYWtdGBR1stlIpEBSTtwi%2F%2FvuJNkNhmHdGxgM2NL57vjd6zt%2B8PngdN%2Fyg79qeGvhzN%2FLGrS%2F%2BuBLB531V28%2B%2BO7Qg7%2Fy97r2z3xZ42NW8yLhDbA0S0KWlHnIijwKWJBHZZnHBa8Cgbpdf%2F89NM9Hi9fXifhpr8sr%2FlP848tn%2BTdXycX25zh4cdX%2FvHl6PmmPqnWQv9w8Ed%2B9GjYRim07bFEqdG%2BZVHuwTm65A7bVRrYtR5lAyMox7pigF6W4k%2By91mjspGsJ%2BwVae4%2BsvdnaO1p73HkXs%2FVisUDTGm7R8IcdnOROeq%2B19qT1amhA1VJtPenoTUgrtfKc9m7Rq8dP7nnjwOB7wg7ADdNt7VX64DWAWlKhPtmDEq22g4GF99x6Dk9E8OSsankHXqPNKDxC%2FdK7MLKTircTDgsI3mmj4OBdSq64dy7fd1x577RU1rt4cvMtOaulFYOd%2FLewRWvDO9lIgXFpZSnkZmjbv5SxKTPoQXClFbpsf%2Fhbbpzs0IB3vb8KkyzJQ%2BywOAgCrMpgRrz%2BKk4fvb7kFbR4XJCu0gAdtNO7woCwZTu%2BBUs9bam%2Fds71drVerpeisgrubLjAB4nnOSkWQnfr5W6o1ku5Xpr1MgrCbL0M0vUyDtfLLK15WiYp47xKWSLyjFVpwVmVJSLIoCjSOFkv3W7oKsVliwZJcB9nwXpZ5GEQQwY8jNKqKCBrgjTLeFxgdCIpazojDgnRtn43J6kG7nZ6cAbxh0EeFFk4%2B1u867cY5u4344n%2FxXjCqAjucdTHgLKojNKmSfO8KRsOFY%2FzKEYCKEJBzv90QA9nfm9gL%2BHulaFqUkz9ULUYxl62B3U%2FRVNLA8IhggaPycOoBuwOCESciDQVSSUgiOMsROB%2FhKfwCKOzEk%2B4k6rWd4uuT%2FwTDz7K7t3d3WLO8ISD95jSPQbayBacthbz86XVgxHwhex5zawzgDOmtp%2F3GPcXn0VXHdSS029%2Fj99UC%2FwJUvyKQ%2FzKyixIEVlYJOn4RxxuaH43Ty9fbJ5OObykHH435XAzJTHeOF4hhEUXD8URe%2FQ%2FBT%2BMpf8d5GN02Ox%2FfiGsl7TA7POu1xZ5%2BbTzcAVKMe48mqcC21hkacVEVScM26liVVBnrKkC4CLKyzAvHu0lhEaTKMFwI3a4SN9MsrfYzdBLq2vkwRD1gVviLT8kY9h2CHH6Y%2Bix6609weFtey4ESp60WtyeWMy%2BsmBuhsoKIyuoT%2Bq2R%2FrW5qi3g%2FvzS2j40DoixDP8%2BKP0yUdpXJ4l6Vla%2Bg9vce%2BC4yM5YlUcbA%2F0jLKdpmTwvsdN5z88nAIe08%2F0HgxeG1iv%2B6Hlhjh7uiW0SDzYNI92L401uha3JKYk268UVRzdOzNQvAaJqoXzAc80dAV440NZ1WVVAAMRYQ2KrGJFmDUsq8saWSnjvIj8t78y%2FRa3JRnbHVfyFpfwoDiGpPgjzekyUiKNlU3OMlwuLMmzgvEojllYVE2Z1HhImvsnk%2BuhusTEoB21PAtSFodeFK3iYhXEH9WOG2%2FkOE833sfeG%2Ff5cfHtEFNXgYes0%2FXj7aGivUgJ9XpusCtoNcNYVVnJVrrDo0OmJAutHCpuZul4W9lLcfy7BnuLPT02%2ByXsCTk%2B9zhzswIN04YueNSK%2BPtM0jS88QdLqSLJDTLsuGZJNolm2yO0PXh3UPnz9Ix5bfIAqxPjvETQsDCEiPG4QbqNyhBZISxybLnZYCrW5H3Axp690%2F0BJdXtDZ5ITuM4xj3f4oUHGzc5JeJmZKpp%2FjwKh4wMV%2FV1yx3emLoR0MwbG4K%2F%2BZgVep3PnzXGDHZ6a3i%2Fk%2BJrONDN13%2Bnq6tBTYk4o7cLGhBtqCC4KwacGHpEVuoH5JNro%2FE6JfE6d5RydbiR76k%2BW5wioDHBIjw1euhHjUGRB0y5A97KoaPx6MlL%2BwgboUVtUFRI%2FLemgTpdtF59ii7pab08kuPcfWzs0l%2FRI5takWnFpka0zOgWRtYcuf9aIxZMxlwr6IiGpsb6j2DQUXPl%2FimXI599Ev7fWjoPD78A',
'info_dict': {
'id': '6ZP4cXvo9FaX3VLH7MF4CgY30JFpY_GA',
'ext': 'mp4',
'title': 'Cops investigate gorilla incident at Cincinnati Zoo',
'description': 'md5:fee7441ab8aaeb3c693482394738102b',
'duration': 350,
'timestamp': 1464719713,
'upload_date': '20160531',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {'skip_download': 'm3u8'},
}]
def _real_extract(self, url):
item = traverse_obj(self._parse_json(zlib.decompress(base64.b64decode(
urllib.parse.unquote(self._match_id(url))),
-zlib.MAX_WBITS).decode(), None), ('video', 'items', 0, {dict})) or {}
video_id = item['mpxRefId']
video_url = self._get_video_url(item)
if not video_url:
raise ExtractorError('This video is no longer available', expected=True)
return self._extract_video(item, video_url, video_id)
class CBSNewsIE(CBSNewsBaseIE):
IE_NAME = 'cbsnews'
IE_DESC = 'CBS News'
_VALID_URL = r'https?://(?:www\.)?cbsnews\.com/(?:news|video)/(?P<id>[\w-]+)'
_TESTS = [
{
# 60 minutes
'url': 'http://www.cbsnews.com/news/artificial-intelligence-positioned-to-be-a-game-changer/',
'info_dict': {
'id': 'Y_nf_aEg6WwO9OLAq0MpKaPgfnBUxfW4',
'ext': 'flv',
'title': 'Artificial Intelligence, real-life applications',
'description': 'md5:a7aaf27f1b4777244de8b0b442289304',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 317,
'uploader': 'CBSI-NEW',
'timestamp': 1476046464,
'upload_date': '20161009',
},
'skip': 'This video is no longer available',
},
{
'url': 'https://www.cbsnews.com/video/fort-hood-shooting-army-downplays-mental-illness-as-cause-of-attack/',
'info_dict': {
'id': 'SNJBOYzXiWBOvaLsdzwH8fmtP1SCd91Y',
'ext': 'mp4',
'title': 'Fort Hood shooting: Army downplays mental illness as cause of attack',
'description': 'md5:4a6983e480542d8b333a947bfc64ddc7',
'upload_date': '20140404',
'timestamp': 1396650660,
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 205,
'subtitles': {
'en': [{
'ext': 'dfxp',
}],
},
},
'params': {
'skip_download': 'm3u8',
},
},
{
# 48 hours
'url': 'http://www.cbsnews.com/news/maria-ridulph-murder-will-the-nations-oldest-cold-case-to-go-to-trial-ever-get-solved/',
'info_dict': {
'id': 'maria-ridulph-murder-will-the-nations-oldest-cold-case-to-go-to-trial-ever-get-solved',
'title': 'Cold as Ice',
'description': 'Can a childhood memory solve the 1957 murder of 7-year-old Maria Ridulph?',
},
'playlist_mincount': 7,
},
{
'url': 'https://www.cbsnews.com/video/032823-cbs-evening-news/',
'info_dict': {
'id': '_2wuO7hD9LwtyM_TwSnVwnKp6kxlcXgE',
'ext': 'mp4',
'title': 'CBS Evening News, March 28, 2023',
'description': 'md5:db20615aae54adc1d55a1fd69dc75d13',
'duration': 1189,
'timestamp': 1680042600,
'upload_date': '20230328',
'season': 'Season 2023',
'season_number': 2023,
'episode': 'Episode 83',
'episode_number': 83,
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': 'm3u8',
},
},
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
playlist = self._extract_playlist(webpage, display_id)
if playlist:
return playlist
item = self._get_item(webpage, display_id)
video_id = item.get('mpxRefId') or display_id
video_url = self._get_video_url(item)
if not video_url:
self.raise_no_formats('No video content was found', expected=True, video_id=video_id)
return self._extract_video(item, video_url, video_id)
class CBSLocalBaseIE(CBSNewsBaseIE):
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
item = self._get_item(webpage, display_id)
video_id = item.get('mpxRefId') or display_id
anvato_id = None
video_url = self._get_video_url(item)
if not video_url:
anv_params = self._search_regex(
r'<iframe[^>]+\bdata-src="https?://w3\.mp\.lura\.live/player/prod/v3/anvload\.html\?key=([^"]+)"',
webpage, 'Anvato URL', default=None)
if not anv_params:
playlist = self._extract_playlist(webpage, display_id)
if playlist:
return playlist
self.raise_no_formats('No video content was found', expected=True, video_id=video_id)
anv_data = self._parse_json(base64.urlsafe_b64decode(f'{anv_params}===').decode(), video_id)
anvato_id = anv_data['v']
return self.url_result(
smuggle_url(f'anvato:{anv_data.get("anvack") or self._ANVACK}:{anvato_id}', {
'token': anv_data.get('token') or 'default',
}), AnvatoIE, url_transparent=True, _old_archive_ids=[make_archive_id(self, anvato_id)])
return self._extract_video(item, video_url, video_id)
class CBSLocalIE(CBSLocalBaseIE):
_VALID_URL = rf'https?://(?:www\.)?cbsnews\.com/(?:{CBSNewsBaseIE._LOCALE_RE})/(?:live/)?video/(?P<id>[\w-]+)'
_TESTS = [{
# Anvato video via defaultPayload JSON
'url': 'https://www.cbsnews.com/newyork/video/1st-cannabis-dispensary-opens-in-queens/',
'info_dict': {
'id': '6376747',
'ext': 'mp4',
'title': '1st cannabis dispensary opens in Queens',
'description': 'The dispensary is women-owned and located in Jamaica.',
'uploader': 'CBS',
'duration': 20,
'timestamp': 1680193657,
'upload_date': '20230330',
'categories': ['Stations\\Spoken Word\\WCBSTV', 'Content\\Google', 'Content\\News', 'Content\\News\\Local News'],
'tags': 'count:11',
'thumbnail': 're:^https?://.*',
'_old_archive_ids': ['cbslocal 6376747'],
},
'params': {'skip_download': 'm3u8'},
}, {
# cbsnews.com video via defaultPayload JSON
'url': 'https://www.cbsnews.com/newyork/live/video/20230330171655-the-city-is-sounding-the-alarm-on-dangerous-social-media-challenges/',
'info_dict': {
'id': 'sJqfw7YvgSC6ant2zVmzt3y1jYKoL5J3',
'ext': 'mp4',
'title': 'the city is sounding the alarm on dangerous social media challenges',
'description': 'md5:8eccc9b1b73be5138a52e9c4350d2cd6',
'thumbnail': 'https://images-cbsn.cbsnews.com/prod/2023/03/30/story_22509622_1680196925.jpg',
'duration': 41.0,
'timestamp': 1680196615,
'upload_date': '20230330',
},
'params': {'skip_download': 'm3u8'},
}]
class CBSLocalArticleIE(CBSLocalBaseIE):
_VALID_URL = rf'https?://(?:www\.)?cbsnews\.com/(?:{CBSNewsBaseIE._LOCALE_RE})/news/(?P<id>[\w-]+)'
_TESTS = [{
# Anvato video via iframe embed
'url': 'https://www.cbsnews.com/newyork/news/mta-station-agents-leaving-their-booths-to-provide-more-direct-customer-service/',
'playlist_count': 2,
'info_dict': {
'id': 'mta-station-agents-leaving-their-booths-to-provide-more-direct-customer-service',
'title': 'MTA station agents begin leaving their booths to provide more direct customer service',
'description': 'The more than 2,200 agents will provide face-to-face customer service to passengers.',
},
}, {
'url': 'https://www.cbsnews.com/losangeles/news/safety-advocates-say-fatal-car-seat-failures-are-public-health-crisis/',
'md5': 'f0ee3081e3843f575fccef901199b212',
'info_dict': {
'id': '3401037',
'ext': 'mp4',
'title': 'Safety Advocates Say Fatal Car Seat Failures Are \'Public Health Crisis\'',
'thumbnail': 're:^https?://.*',
'timestamp': 1463440500,
'upload_date': '20160516',
},
'skip': 'Video has been removed',
}]
class CBSNewsLiveBaseIE(CBSNewsBaseIE):
def _get_id(self, url):
raise NotImplementedError('This method must be implemented by subclasses')
def _real_extract(self, url):
video_id = self._get_id(url)
if not video_id:
raise ExtractorError('Livestream is not available', expected=True)
data = traverse_obj(self._download_json(
'https://feeds-cbsn.cbsnews.com/2.0/rundown/', video_id, query={
'partner': 'cbsnsite',
'edition': video_id,
'type': 'live',
}), ('navigation', 'data', 0, {dict}))
video_url = traverse_obj(data, (('videoUrlDAI', ('videoUrl', 'base')), {url_or_none}), get_all=False)
if not video_url:
raise UserNotLive(video_id=video_id)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(video_url, video_id, 'mp4', m3u8_id='hls')
return {
'id': video_id,
'formats': formats,
'subtitles': subtitles,
'is_live': True,
**traverse_obj(data, {
'title': 'headline',
'description': 'rundown_slug',
'thumbnail': ('images', 'thumbnail_url_hd', {url_or_none}),
}),
}
class CBSLocalLiveIE(CBSNewsLiveBaseIE):
_VALID_URL = rf'https?://(?:www\.)?cbsnews\.com/(?P<id>{CBSNewsBaseIE._LOCALE_RE})/live/?(?:[?#]|$)'
_TESTS = [{
'url': 'https://www.cbsnews.com/losangeles/live/',
'info_dict': {
'id': 'CBSN-LA',
'ext': 'mp4',
'title': str,
'description': r're:KCBS/CBSN_LA.CRISPIN.\w+.RUNDOWN \w+ \w+',
'thumbnail': r're:^https?://.*\.jpg$',
'live_status': 'is_live',
},
'params': {'skip_download': 'm3u8'},
}]
def _get_id(self, url):
return format_field(self._LOCALES, self._match_id(url), 'CBSN-%s')
class CBSNewsLiveIE(CBSNewsLiveBaseIE):
IE_NAME = 'cbsnews:live'
IE_DESC = 'CBS News Livestream'
_VALID_URL = r'https?://(?:www\.)?cbsnews\.com/live/?(?:[?#]|$)'
_TESTS = [{
'url': 'https://www.cbsnews.com/live/',
'info_dict': {
'id': 'CBSN-US',
'ext': 'mp4',
'title': str,
'description': r're:\w+ \w+ CRISPIN RUNDOWN',
'thumbnail': r're:^https?://.*\.jpg$',
'live_status': 'is_live',
},
'params': {'skip_download': 'm3u8'},
}]
def _get_id(self, url):
return 'CBSN-US'
class CBSNewsLiveVideoIE(InfoExtractor):
IE_NAME = 'cbsnews:livevideo'
IE_DESC = 'CBS News Live Videos'
_VALID_URL = r'https?://(?:www\.)?cbsnews\.com/live/video/(?P<id>[^/?#]+)'
# Live videos get deleted soon. See http://www.cbsnews.com/live/ for the latest examples
_TESTS = [{
'url': 'http://www.cbsnews.com/live/video/clinton-sanders-prepare-to-face-off-in-nh/',
'info_dict': {
'id': 'clinton-sanders-prepare-to-face-off-in-nh',
'ext': 'mp4',
'title': 'Clinton, Sanders Prepare To Face Off In NH',
'duration': 334,
},
'skip': 'Video gone',
}]
def _real_extract(self, url):
display_id = self._match_id(url)
video_info = self._download_json(
'http://feeds.cbsn.cbsnews.com/rundown/story', display_id, query={
'device': 'desktop',
'dvr_slug': display_id,
})
return {
'id': display_id,
'display_id': display_id,
'formats': self._extract_akamai_formats(video_info['url'], display_id),
**traverse_obj(video_info, {
'title': 'headline',
'thumbnail': ('thumbnail_url_hd', {url_or_none}),
'duration': ('segmentDur', {parse_duration}),
}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lenta.py | yt_dlp/extractor/lenta.py | from .common import InfoExtractor
class LentaIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:www\.)?lenta\.ru/[^/]+/\d+/\d+/\d+/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://lenta.ru/news/2018/03/22/savshenko_go/',
'info_dict': {
'id': '964400',
'ext': 'mp4',
'title': 'Надежду Савченко задержали',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 61,
'view_count': int,
},
'params': {
'skip_download': True,
},
}, {
# EaglePlatform iframe embed
'url': 'http://lenta.ru/news/2015/03/06/navalny/',
'info_dict': {
'id': '227304',
'ext': 'mp4',
'title': 'Навальный вышел на свободу',
'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 87,
'view_count': int,
'age_limit': 0,
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'vid\s*:\s*["\']?(\d+)', webpage, 'eagleplatform id',
default=None)
if video_id:
return self.url_result(
f'eagleplatform:lentaru.media.eagleplatform.com:{video_id}',
ie='EaglePlatform', video_id=video_id)
return self.url_result(url, ie='Generic')
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/beatbump.py | yt_dlp/extractor/beatbump.py | from .common import InfoExtractor
from .youtube import YoutubeIE, YoutubeTabIE
class BeatBumpVideoIE(InfoExtractor):
_VALID_URL = r'https?://beatbump\.(?:ml|io)/listen\?id=(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://beatbump.ml/listen?id=MgNrAu2pzNs',
'md5': '5ff3fff41d3935b9810a9731e485fe66',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'artist': 'Stephen',
'thumbnail': 'https://i.ytimg.com/vi_webp/MgNrAu2pzNs/maxresdefault.webp',
'channel_url': 'https://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
'upload_date': '20190312',
'categories': ['Music'],
'playable_in_embed': True,
'duration': 169,
'like_count': int,
'alt_title': 'Voyeur Girl',
'view_count': int,
'track': 'Voyeur Girl',
'uploader': 'Stephen',
'title': 'Voyeur Girl',
'channel_follower_count': int,
'age_limit': 0,
'availability': 'public',
'live_status': 'not_live',
'album': 'it\'s too much love to know my dear',
'channel': 'Stephen',
'comment_count': int,
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
'tags': 'count:11',
'creator': 'Stephen',
'channel_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'channel_is_verified': True,
'heatmap': 'count:100',
},
}, {
'url': 'https://beatbump.io/listen?id=LDGZAprNGWo',
'only_matching': True,
}]
def _real_extract(self, url):
id_ = self._match_id(url)
return self.url_result(f'https://music.youtube.com/watch?v={id_}', YoutubeIE, id_)
class BeatBumpPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://beatbump\.(?:ml|io)/(?:release\?id=|artist/|playlist/)(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://beatbump.ml/release?id=MPREb_gTAcphH99wE',
'playlist_count': 50,
'info_dict': {
'id': 'OLAK5uy_l1m0thk3g31NmIIz_vMIbWtyv7eZixlH0',
'availability': 'unlisted',
'view_count': int,
'title': 'Album - Royalty Free Music Library V2 (50 Songs)',
'description': '',
'tags': [],
'modified_date': '20231110',
},
'expected_warnings': ['YouTube Music is not directly supported'],
}, {
'url': 'https://beatbump.ml/artist/UC_aEa8K-EOJ3D6gOs7HcyNg',
'playlist_mincount': 1,
'params': {'flatplaylist': True},
'info_dict': {
'id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'uploader_url': 'https://www.youtube.com/@NoCopyrightSounds',
'channel_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg',
'uploader_id': '@NoCopyrightSounds',
'channel_follower_count': int,
'title': 'NoCopyrightSounds',
'uploader': 'NoCopyrightSounds',
'description': 'md5:cd4fd53d81d363d05eee6c1b478b491a',
'channel': 'NoCopyrightSounds',
'tags': 'count:65',
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'channel_is_verified': True,
},
'expected_warnings': ['YouTube Music is not directly supported'],
}, {
'url': 'https://beatbump.ml/playlist/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'playlist_mincount': 1,
'params': {'flatplaylist': True},
'info_dict': {
'id': 'PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'uploader_url': 'https://www.youtube.com/@NoCopyrightSounds',
'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg',
'uploader_id': '@NoCopyrightSounds',
'title': 'NCS : All Releases 💿',
'uploader': 'NoCopyrightSounds',
'availability': 'public',
'channel': 'NoCopyrightSounds',
'tags': [],
'modified_date': '20231112',
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
},
'expected_warnings': ['YouTube Music is not directly supported'],
}, {
'url': 'https://beatbump.io/playlist/VLPLFCHGavqRG-q_2ZhmgU2XB2--ZY6irT1c',
'only_matching': True,
}]
def _real_extract(self, url):
id_ = self._match_id(url)
return self.url_result(f'https://music.youtube.com/browse/{id_}', YoutubeTabIE, id_)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tmz.py | yt_dlp/extractor/tmz.py | import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
get_element_by_attribute,
)
class TMZIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tmz\.com/.*'
_TESTS = [
{
'url': 'http://www.tmz.com/videos/0-cegprt2p/',
'info_dict': {
'id': 'http://www.tmz.com/videos/0-cegprt2p/',
'ext': 'mp4',
'title': 'No Charges Against Hillary Clinton? Harvey Says It Ain\'t Over Yet',
'description': 'Harvey talks about Director Comey’s decision not to prosecute Hillary Clinton.',
'timestamp': 1467831837,
'uploader': 'TMZ Staff',
'upload_date': '20160706',
'thumbnail': 'https://imagez.tmz.com/image/5e/4by3/2016/07/06/5eea7dc01baa5c2e83eb06930c170e46_xl.jpg',
'duration': 772.0,
},
},
{
'url': 'https://www.tmz.com/videos/071119-chris-morgan-women-4590005-0-zcsejvcr/',
'info_dict': {
'id': 'https://www.tmz.com/videos/071119-chris-morgan-women-4590005-0-zcsejvcr/',
'ext': 'mp4',
'title': 'Angry Bagel Shop Guy Says He Doesn\'t Trust Women',
'description': 'The enraged man who went viral for ranting about women on dating sites before getting ragdolled in a bagel shop is defending his misogyny ... he says it\'s women\'s fault in the first place.',
'timestamp': 1562889485,
'uploader': 'TMZ Staff',
'upload_date': '20190711',
'thumbnail': 'https://imagez.tmz.com/image/a8/4by3/2019/07/12/a85480d27b2f50a7bfea2322151d67a5_xl.jpg',
'duration': 123.0,
},
},
{
'url': 'http://www.tmz.com/2015/04/19/bobby-brown-bobbi-kristina-awake-video-concert',
'md5': '5429c85db8bde39a473a56ca8c4c5602',
'info_dict': {
'id': 'http://www.tmz.com/2015/04/19/bobby-brown-bobbi-kristina-awake-video-concert',
'ext': 'mp4',
'title': 'Bobby Brown Tells Crowd ... Bobbi Kristina is Awake',
'description': 'Bobby Brown stunned his audience during a concert Saturday night, when he told the crowd, "Bobbi is awake. She\'s watching me."',
'timestamp': 1429467813,
'uploader': 'TMZ Staff',
'upload_date': '20150419',
'duration': 29.0,
'thumbnail': 'https://imagez.tmz.com/image/15/4by3/2015/04/20/1539c7ae136359fc979236fa6a9449dd_xl.jpg',
},
},
{
'url': 'http://www.tmz.com/2015/09/19/patti-labelle-concert-fan-stripping-kicked-out-nicki-minaj/',
'info_dict': {
'id': 'http://www.tmz.com/2015/09/19/patti-labelle-concert-fan-stripping-kicked-out-nicki-minaj/',
'ext': 'mp4',
'title': 'Patti LaBelle -- Goes Nuclear On Stripping Fan',
'description': 'Patti LaBelle made it known loud and clear last night ... NO '
'ONE gets on her stage and strips down.',
'timestamp': 1442683746,
'uploader': 'TMZ Staff',
'upload_date': '20150919',
'duration': 104.0,
'thumbnail': 'https://imagez.tmz.com/image/5e/4by3/2015/09/20/5e57d7575062528082994e18ac3f0f48_xl.jpg',
},
},
{
'url': 'http://www.tmz.com/2016/01/28/adam-silver-sting-drake-blake-griffin/',
'info_dict': {
'id': 'http://www.tmz.com/2016/01/28/adam-silver-sting-drake-blake-griffin/',
'ext': 'mp4',
'title': 'NBA\'s Adam Silver -- Blake Griffin\'s a Great Guy ... He\'ll Learn from This',
'description': 'Two pretty parts of this video with NBA Commish Adam Silver.',
'timestamp': 1454010989,
'uploader': 'TMZ Staff',
'upload_date': '20160128',
'duration': 59.0,
'thumbnail': 'https://imagez.tmz.com/image/38/4by3/2016/01/29/3856e83e0beb57059ec412122b842fb1_xl.jpg',
},
},
{
'url': 'http://www.tmz.com/2016/10/27/donald-trump-star-vandal-arrested-james-otis/',
'info_dict': {
'id': 'http://www.tmz.com/2016/10/27/donald-trump-star-vandal-arrested-james-otis/',
'ext': 'mp4',
'title': 'Trump Star Vandal -- I\'m Not Afraid of Donald or the Cops!',
'description': 'James Otis is the the guy who took a pickaxe to Donald Trump\'s star on the Walk of Fame, and he tells TMZ .. he\'s ready and willing to go to jail for the crime.',
'timestamp': 1477500095,
'uploader': 'TMZ Staff',
'upload_date': '20161026',
'thumbnail': 'https://imagez.tmz.com/image/0d/4by3/2016/10/27/0d904814d4a75dcf9cc3b8cfd1edc1a3_xl.jpg',
'duration': 128.0,
},
},
{
'url': 'https://www.tmz.com/videos/2020-10-31-103120-beverly-hills-protest-4878209/',
'info_dict': {
'id': 'https://www.tmz.com/videos/2020-10-31-103120-beverly-hills-protest-4878209/',
'ext': 'mp4',
'title': 'Cops Use Billy Clubs Against Pro-Trump and Anti-Fascist '
'Demonstrators',
'description': 'Beverly Hills may be an omen of what\'s coming next week, '
'because things got crazy on the streets and cops started '
'swinging their billy clubs at both Anti-Fascist and Pro-Trump '
'demonstrators.',
'timestamp': 1604182772,
'uploader': 'TMZ Staff',
'upload_date': '20201031',
'duration': 96.0,
'thumbnail': 'https://imagez.tmz.com/image/f3/4by3/2020/10/31/f37bd5a8aef84497866f425130c58be3_xl.jpg',
},
},
{
'url': 'https://www.tmz.com/2020/11/05/gervonta-davis-car-crash-hit-and-run-police/',
'info_dict': {
'id': 'Dddb6IGe-ws',
'ext': 'mp4',
'title': 'SICK LAMBO GERVONTA DAVIS IN HIS NEW RIDE RIGHT AFTER KO AFTER LEO EsNews Boxing',
'uploader': 'ESNEWS',
'description': 'md5:49675bc58883ccf80474b8aa701e1064',
'upload_date': '20201102',
'uploader_id': '@ESNEWS',
'uploader_url': 'https://www.youtube.com/@ESNEWS',
'like_count': int,
'channel_id': 'UCI-Oq7oFGakzSzHFlTtsUsQ',
'channel': 'ESNEWS',
'view_count': int,
'duration': 225,
'live_status': 'not_live',
'thumbnail': 'https://i.ytimg.com/vi_webp/Dddb6IGe-ws/maxresdefault.webp',
'channel_url': 'https://www.youtube.com/channel/UCI-Oq7oFGakzSzHFlTtsUsQ',
'channel_follower_count': int,
'playable_in_embed': True,
'categories': ['Sports'],
'age_limit': 0,
'tags': 'count:10',
'availability': 'public',
'comment_count': int,
},
},
{
'url': 'https://www.tmz.com/2020/11/19/conor-mcgregor-dustin-poirier-contract-fight-ufc-257-fight-island/',
'info_dict': {
'id': '1329448013937471491',
'ext': 'mp4',
'title': 'The Mac Life - BREAKING: Conor McGregor (@thenotoriousmma) has signed his bout agreement for his rematch with Dustin Poirier for January 23.',
'uploader': 'The Mac Life',
'description': 'md5:56e6009bbc3d12498e10d08a8e1f1c69',
'upload_date': '20201119',
'display_id': '1329450007125225473',
'uploader_id': 'TheMacLife',
'timestamp': 1605800556,
'thumbnail': 'https://pbs.twimg.com/media/EnMmfT8XYAExgxJ.jpg?name=small',
'like_count': int,
'duration': 11.812,
'uploader_url': 'https://twitter.com/TheMacLife',
'age_limit': 0,
'repost_count': int,
'tags': [],
'comment_count': int,
},
},
]
def _real_extract(self, url):
webpage = self._download_webpage(url, url)
jsonld = self._search_json_ld(webpage, url)
if not jsonld or 'url' not in jsonld:
# try to extract from YouTube Player API
# see https://developers.google.com/youtube/iframe_api_reference#Video_Queueing_Functions
match_obj = re.search(r'\.cueVideoById\(\s*(?P<quote>[\'"])(?P<id>.*?)(?P=quote)', webpage)
if match_obj:
return self.url_result(match_obj.group('id'))
# try to extract from twitter
blockquote_el = get_element_by_attribute('class', 'twitter-tweet', webpage)
if blockquote_el:
matches = re.findall(
r'<a[^>]+href=\s*(?P<quote>[\'"])(?P<link>.*?)(?P=quote)',
blockquote_el)
if matches:
for _, match in matches:
if '/status/' in match:
return self.url_result(match)
raise ExtractorError('No video found!')
if id not in jsonld:
jsonld['id'] = url
return jsonld
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/japandiet.py | yt_dlp/extractor/japandiet.py | import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
clean_html,
int_or_none,
join_nonempty,
parse_qs,
smuggle_url,
traverse_obj,
try_call,
unsmuggle_url,
)
def _parse_japanese_date(text):
if not text:
return None
ERA_TABLE = {
'明治': 1868,
'大正': 1912,
'昭和': 1926,
'平成': 1989,
'令和': 2019,
}
ERA_RE = '|'.join(map(re.escape, ERA_TABLE.keys()))
mobj = re.search(rf'({ERA_RE})?(\d+)年(\d+)月(\d+)日', re.sub(r'[\s\u3000]+', '', text))
if not mobj:
return None
era, year, month, day = mobj.groups()
year, month, day = map(int, (year, month, day))
if era:
# example input: 令和5年3月34日
# even though each era have their end, don't check here
year += ERA_TABLE[era]
return '%04d%02d%02d' % (year, month, day)
def _parse_japanese_duration(text):
mobj = re.search(r'(?:(\d+)日間?)?(?:(\d+)時間?)?(?:(\d+)分)?(?:(\d+)秒)?', re.sub(r'[\s\u3000]+', '', text or ''))
if not mobj:
return
days, hours, mins, secs = (int_or_none(x, default=0) for x in mobj.groups())
return secs + mins * 60 + hours * 60 * 60 + days * 24 * 60 * 60
class ShugiinItvBaseIE(InfoExtractor):
_INDEX_ROOMS = None
@classmethod
def _find_rooms(cls, webpage):
return [{
'_type': 'url',
'id': x.group(1),
'title': clean_html(x.group(2)).strip(),
'url': smuggle_url(f'https://www.shugiintv.go.jp/jp/index.php?room_id={x.group(1)}', {'g': x.groups()}),
'ie_key': ShugiinItvLiveIE.ie_key(),
} for x in re.finditer(r'(?s)<a\s+href="[^"]+\?room_id=(room\d+)"\s*class="play_live".+?class="s12_14">(.+?)</td>', webpage)]
def _fetch_rooms(self):
if not self._INDEX_ROOMS:
webpage = self._download_webpage(
'https://www.shugiintv.go.jp/jp/index.php', None,
encoding='euc-jp', note='Downloading proceedings info')
ShugiinItvBaseIE._INDEX_ROOMS = self._find_rooms(webpage)
return self._INDEX_ROOMS
class ShugiinItvLiveIE(ShugiinItvBaseIE):
_VALID_URL = r'https?://(?:www\.)?shugiintv\.go\.jp/(?:jp|en)(?:/index\.php)?$'
IE_DESC = '衆議院インターネット審議中継'
_TESTS = [{
'url': 'https://www.shugiintv.go.jp/jp/index.php',
'info_dict': {
'_type': 'playlist',
'title': 'All proceedings for today',
},
# expect at least one proceedings is running
'playlist_mincount': 1,
}]
@classmethod
def suitable(cls, url):
return super().suitable(url) and not any(x.suitable(url) for x in (ShugiinItvLiveRoomIE, ShugiinItvVodIE))
def _real_extract(self, url):
self.to_screen(
'Downloading all running proceedings. To specify one proceeding, use direct link from the website')
return self.playlist_result(self._fetch_rooms(), playlist_title='All proceedings for today')
class ShugiinItvLiveRoomIE(ShugiinItvBaseIE):
_VALID_URL = r'https?://(?:www\.)?shugiintv\.go\.jp/(?:jp|en)/index\.php\?room_id=(?P<id>room\d+)'
IE_DESC = '衆議院インターネット審議中継 (中継)'
_TESTS = [{
'url': 'https://www.shugiintv.go.jp/jp/index.php?room_id=room01',
'info_dict': {
'id': 'room01',
'title': '内閣委員会',
},
'skip': 'this runs for a time and not every day',
}, {
'url': 'https://www.shugiintv.go.jp/jp/index.php?room_id=room11',
'info_dict': {
'id': 'room11',
'title': '外務委員会',
},
'skip': 'this runs for a time and not every day',
}]
def _real_extract(self, url):
url, smug = unsmuggle_url(url, default={})
if smug.get('g'):
room_id, title = smug['g']
else:
room_id = self._match_id(url)
title = traverse_obj(self._fetch_rooms(), (lambda k, v: v['id'] == room_id, 'title'), get_all=False)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
f'https://hlslive.shugiintv.go.jp/{room_id}/amlst:{room_id}/playlist.m3u8',
room_id, ext='mp4')
return {
'id': room_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
'is_live': True,
}
class ShugiinItvVodIE(ShugiinItvBaseIE):
_VALID_URL = r'https?://(?:www\.)?shugiintv\.go\.jp/(?:jp|en)/index\.php\?ex=VL(?:\&[^=]+=[^&]*)*\&deli_id=(?P<id>\d+)'
IE_DESC = '衆議院インターネット審議中継 (ビデオライブラリ)'
_TESTS = [{
'url': 'https://www.shugiintv.go.jp/jp/index.php?ex=VL&media_type=&deli_id=53846',
'info_dict': {
'id': '53846',
'title': 'ウクライナ大統領国会演説(オンライン)',
'release_date': '20220323',
'chapters': 'count:4',
},
}, {
'url': 'https://www.shugiintv.go.jp/en/index.php?ex=VL&media_type=&deli_id=53846',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
f'https://www.shugiintv.go.jp/jp/index.php?ex=VL&media_type=&deli_id={video_id}', video_id,
encoding='euc-jp')
m3u8_url = self._search_regex(
r'id="vtag_src_base_vod"\s*value="(http.+?\.m3u8)"', webpage, 'm3u8 url')
m3u8_url = re.sub(r'^http://', 'https://', m3u8_url)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
m3u8_url, video_id, ext='mp4')
title = self._html_search_regex(
(r'<td\s+align="left">(.+)\s*\(\d+分\)',
r'<TD.+?<IMG\s*src=".+?/spacer\.gif".+?height="15">(.+?)<IMG'), webpage, 'title', fatal=False)
release_date = _parse_japanese_date(self._html_search_regex(
r'開会日</td>\s*<td.+?/td>\s*<TD>(.+?)</TD>',
webpage, 'title', fatal=False))
chapters = []
for chp in re.finditer(r'(?i)<A\s+HREF="([^"]+?)"\s*class="play_vod">(?!<img)(.+)</[Aa]>', webpage):
chapters.append({
'title': clean_html(chp.group(2)).strip(),
'start_time': try_call(lambda: float(parse_qs(chp.group(1))['time'][0].strip())),
})
# NOTE: there are blanks at the first and the end of the videos,
# so getting/providing the video duration is not possible
# also, the exact end_time for the last chapter is unknown (we can get at most minutes of granularity)
last_tr = re.findall(r'(?s)<TR\s*class="s14_24">(.+?)</TR>', webpage)[-1]
if last_tr and chapters:
last_td = re.findall(r'<TD.+?</TD>', last_tr)[-1]
if last_td:
chapters[-1]['end_time'] = chapters[-1]['start_time'] + _parse_japanese_duration(clean_html(last_td))
return {
'id': video_id,
'title': title,
'release_date': release_date,
'chapters': chapters,
'formats': formats,
'subtitles': subtitles,
}
class SangiinInstructionIE(InfoExtractor):
_VALID_URL = r'https?://www\.webtv\.sangiin\.go\.jp/webtv/index\.php'
IE_DESC = False # this shouldn't be listed as a supported site
def _real_extract(self, url):
raise ExtractorError(
'Copy the link from the button below the video description/player '
'and use that link to download. If there is no button in the frame, '
'get the URL of the frame showing the video.', expected=True)
class SangiinIE(InfoExtractor):
_VALID_URL = r'https?://www\.webtv\.sangiin\.go\.jp/webtv/detail\.php\?sid=(?P<id>\d+)'
IE_DESC = '参議院インターネット審議中継 (archive)'
_TESTS = [{
'url': 'https://www.webtv.sangiin.go.jp/webtv/detail.php?sid=7052',
'info_dict': {
'id': '7052',
'title': '2022年10月7日 本会議',
'description': 'md5:0a5fed523f95c88105a0b0bf1dd71489',
'upload_date': '20221007',
'ext': 'mp4',
},
}, {
'url': 'https://www.webtv.sangiin.go.jp/webtv/detail.php?sid=7037',
'info_dict': {
'id': '7037',
'title': '2022年10月3日 開会式',
'upload_date': '20221003',
'ext': 'mp4',
},
}, {
'url': 'https://www.webtv.sangiin.go.jp/webtv/detail.php?sid=7076',
'info_dict': {
'id': '7076',
'title': '2022年10月27日 法務委員会',
'upload_date': '20221027',
'ext': 'mp4',
'is_live': True,
},
'skip': 'this live is turned into archive after it ends',
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
date = self._html_search_regex(
r'<dt[^>]*>\s*開会日\s*</dt>\s*<dd[^>]*>\s*(.+?)\s*</dd>', webpage,
'date', fatal=False)
upload_date = _parse_japanese_date(date)
title = self._html_search_regex(
r'<dt[^>]*>\s*会議名\s*</dt>\s*<dd[^>]*>\s*(.+?)\s*</dd>', webpage,
'date', fatal=False)
# some videos don't have the elements, so assume it's missing
description = self._html_search_regex(
r'会議の経過\s*</h3>\s*<span[^>]*>(.+?)</span>', webpage,
'description', default=None)
# this row appears only when it's livestream
is_live = bool(self._html_search_regex(
r'<dt[^>]*>\s*公報掲載時刻\s*</dt>\s*<dd[^>]*>\s*(.+?)\s*</dd>', webpage,
'is_live', default=None))
m3u8_url = self._search_regex(
r'var\s+videopath\s*=\s*(["\'])([^"\']+)\1', webpage,
'm3u8 url', group=2)
formats, subs = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id, 'mp4')
return {
'id': video_id,
'title': join_nonempty(date, title, delim=' '),
'description': description,
'upload_date': upload_date,
'formats': formats,
'subtitles': subs,
'is_live': is_live,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/uplynk.py | yt_dlp/extractor/uplynk.py | import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
smuggle_url,
traverse_obj,
unsmuggle_url,
update_url_query,
)
class UplynkBaseIE(InfoExtractor):
_UPLYNK_URL_RE = r'''(?x)
https?://[\w-]+\.uplynk\.com/(?P<path>
ext/[0-9a-f]{32}/(?P<external_id>[^/?&]+)|
(?P<id>[0-9a-f]{32})
)\.(?:m3u8|json)
(?:.*?\bpbs=(?P<session_id>[^&]+))?'''
def _extract_uplynk_info(self, url):
uplynk_content_url, smuggled_data = unsmuggle_url(url, {})
mobj = re.match(self._UPLYNK_URL_RE, uplynk_content_url)
if not mobj:
raise ExtractorError('Necessary parameters not found in Uplynk URL')
path, external_id, video_id, session_id = mobj.group('path', 'external_id', 'id', 'session_id')
display_id = video_id or external_id
headers = traverse_obj(
smuggled_data, {'Referer': 'Referer', 'Origin': 'Origin'}, casesense=False)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
f'http://content.uplynk.com/{path}.m3u8', display_id, 'mp4', headers=headers)
if session_id:
for f in formats:
f['extra_param_to_segment_url'] = f'pbs={session_id}'
asset = self._download_json(
f'http://content.uplynk.com/player/assetinfo/{path}.json', display_id)
if asset.get('error') == 1:
msg = asset.get('msg') or 'unknown error'
raise ExtractorError(f'{self.IE_NAME} said: {msg}', expected=True)
return {
'id': asset['asset'],
'title': asset['desc'],
'thumbnail': asset.get('default_poster_url'),
'duration': float_or_none(asset.get('duration')),
'uploader_id': asset.get('owner'),
'formats': formats,
'subtitles': subtitles,
}
class UplynkIE(UplynkBaseIE):
IE_NAME = 'uplynk'
_VALID_URL = UplynkBaseIE._UPLYNK_URL_RE
_TEST = {
'url': 'http://content.uplynk.com/e89eaf2ce9054aa89d92ddb2d817a52e.m3u8',
'info_dict': {
'id': 'e89eaf2ce9054aa89d92ddb2d817a52e',
'ext': 'mp4',
'title': '030816-kgo-530pm-solar-eclipse-vid_web.mp4',
'uploader_id': '4413701bf5a1488db55b767f8ae9d4fa',
'duration': 530.2739166666679,
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': 'm3u8',
},
}
def _real_extract(self, url):
return self._extract_uplynk_info(url)
class UplynkPreplayIE(UplynkBaseIE):
IE_NAME = 'uplynk:preplay'
_VALID_URL = r'https?://[\w-]+\.uplynk\.com/preplay2?/(?P<path>ext/[0-9a-f]{32}/(?P<external_id>[^/?&]+)|(?P<id>[0-9a-f]{32}))\.json'
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
path, external_id, video_id = self._match_valid_url(url).groups()
display_id = video_id or external_id
preplay = self._download_json(url, display_id)
content_url = f'http://content.uplynk.com/{path}.m3u8'
session_id = preplay.get('sid')
if session_id:
content_url = update_url_query(content_url, {'pbs': session_id})
return self._extract_uplynk_info(smuggle_url(content_url, smuggled_data))
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tv5unis.py | yt_dlp/extractor/tv5unis.py | from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
join_nonempty,
make_archive_id,
parse_age_limit,
remove_end,
)
from ..utils.traversal import traverse_obj
class TV5UnisBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['CA']
_GEO_BYPASS = False
def _real_extract(self, url):
groups = self._match_valid_url(url).groups()
product = self._download_json(
'https://api.tv5unis.ca/graphql', groups[0], query={
'query': '''{
%s(%s) {
title
summary
tags
duration
seasonNumber
episodeNumber
collection {
title
}
rating {
name
}
videoElement {
__typename
... on Video {
mediaId
encodings {
hls {
url
}
}
}
... on RestrictedVideo {
code
reason
}
}
}
}''' % (self._GQL_QUERY_NAME, self._gql_args(groups)), # noqa: UP031
})['data'][self._GQL_QUERY_NAME]
video = product['videoElement']
if video is None:
raise ExtractorError('This content is no longer available', expected=True)
if video.get('__typename') == 'RestrictedVideo':
code = video.get('code')
if code == 1001:
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
reason = video.get('reason')
raise ExtractorError(join_nonempty(
'This video is restricted',
code is not None and f', error code {code}',
reason and f': {remove_end(reason, ".")}',
delim=''))
media_id = video['mediaId']
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
video['encodings']['hls']['url'], media_id, 'mp4')
return {
'id': media_id,
'_old_archive_ids': [make_archive_id('LimelightMedia', media_id)],
'formats': formats,
'subtitles': subtitles,
**traverse_obj(product, {
'title': ('title', {str}),
'description': ('summary', {str}),
'tags': ('tags', ..., {str}),
'duration': ('duration', {int_or_none}),
'season_number': ('seasonNumber', {int_or_none}),
'episode_number': ('episodeNumber', {int_or_none}),
'series': ('collection', 'title', {str}),
'age_limit': ('rating', 'name', {parse_age_limit}),
}),
}
class TV5UnisVideoIE(TV5UnisBaseIE):
IE_NAME = 'tv5unis:video'
_VALID_URL = r'https?://(?:www\.)?tv5unis\.ca/videos/[^/?#]+/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.tv5unis.ca/videos/bande-annonces/144041',
'md5': '24a247c96119d77fe1bae8b440457dfa',
'info_dict': {
'id': '56862325352147149dce0ae139afced6',
'_old_archive_ids': ['limelightmedia 56862325352147149dce0ae139afced6'],
'ext': 'mp4',
'title': 'Antigone',
'description': r"re:En aidant son frère .+ dicté par l'amour et la solidarité.",
'duration': 61,
},
}]
_GQL_QUERY_NAME = 'productById'
@staticmethod
def _gql_args(groups):
return f'id: {groups[0]}'
class TV5UnisIE(TV5UnisBaseIE):
IE_NAME = 'tv5unis'
_VALID_URL = r'https?://(?:www\.)?tv5unis\.ca/videos/(?P<id>[^/?#]+)(?:/saisons/(?P<season_number>\d+)/episodes/(?P<episode_number>\d+))?/?(?:[?#&]|$)'
_TESTS = [{
# geo-restricted to Canada; xff is ineffective
'url': 'https://www.tv5unis.ca/videos/watatatow/saisons/11/episodes/1',
'md5': '43beebd47eefb1c5caf9a47a3fc35589',
'info_dict': {
'id': '2c06e4af20f0417b86c2536825287690',
'_old_archive_ids': ['limelightmedia 2c06e4af20f0417b86c2536825287690'],
'ext': 'mp4',
'title': "L'homme éléphant",
'description': r're:Paul-André et Jean-Yves, .+ quand elle parle du feu au Spot.',
'subtitles': {
'fr': 'count:1',
},
'duration': 1440,
'age_limit': 8,
'tags': 'count:4',
'series': 'Watatatow',
'season': 'Season 11',
'season_number': 11,
'episode': 'Episode 1',
'episode_number': 1,
},
}, {
# geo-restricted to Canada; xff is ineffective
'url': 'https://www.tv5unis.ca/videos/boite-a-savon',
'md5': '7898e868e8c540f03844660e0aab6bbe',
'info_dict': {
'id': '4de6d0c6467b4511a0c04b92037a9f15',
'_old_archive_ids': ['limelightmedia 4de6d0c6467b4511a0c04b92037a9f15'],
'ext': 'mp4',
'title': 'Boîte à savon',
'description': r're:Dans le petit village de Broche-à-foin, .+ celle qui fait battre son coeur.',
'subtitles': {
'fr': 'count:1',
},
'duration': 1200,
'tags': 'count:5',
},
}]
_GQL_QUERY_NAME = 'productByRootProductSlug'
@staticmethod
def _gql_args(groups):
args = f'rootProductSlug: "{groups[0]}"'
if groups[1]:
args += ', seasonNumber: {}, episodeNumber: {}'.format(*groups[1:])
return args
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/telemb.py | yt_dlp/extractor/telemb.py | import re
from .common import InfoExtractor
from ..utils import remove_start
class TeleMBIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:www\.)?telemb\.be/(?P<display_id>.+?)_d_(?P<id>\d+)\.html'
_TESTS = [
{
'url': 'http://www.telemb.be/mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-_d_13466.html',
'md5': 'f45ea69878516ba039835794e0f8f783',
'info_dict': {
'id': '13466',
'display_id': 'mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-',
'ext': 'mp4',
'title': 'Mons - Cook with Danielle : des cours de cuisine en anglais ! - Les reportages',
'description': 'md5:bc5225f47b17c309761c856ad4776265',
'thumbnail': r're:^http://.*\.(?:jpg|png)$',
},
},
{
# non-ASCII characters in download URL
'url': 'http://telemb.be/les-reportages-havre-incendie-mortel_d_13514.html',
'md5': '6e9682736e5ccd4eab7f21e855350733',
'info_dict': {
'id': '13514',
'display_id': 'les-reportages-havre-incendie-mortel',
'ext': 'mp4',
'title': 'Havré - Incendie mortel - Les reportages',
'description': 'md5:5e54cb449acb029c2b7734e2d946bd4a',
'thumbnail': r're:^http://.*\.(?:jpg|png)$',
},
},
]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
formats = []
for video_url in re.findall(r'file\s*:\s*"([^"]+)"', webpage):
fmt = {
'url': video_url,
'format_id': video_url.split(':')[0],
}
rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', video_url)
if rtmp:
fmt.update({
'play_path': rtmp.group('playpath'),
'app': rtmp.group('app'),
'player_url': 'http://p.jwpcdn.com/6/10/jwplayer.flash.swf',
'page_url': 'http://www.telemb.be',
'preference': -10,
})
formats.append(fmt)
title = remove_start(self._og_search_title(webpage), 'TéléMB : ')
description = self._html_search_regex(
r'<meta property="og:description" content="(.+?)" />',
webpage, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/beeg.py | yt_dlp/extractor/beeg.py | from .common import InfoExtractor
from ..utils import (
int_or_none,
str_or_none,
traverse_obj,
try_get,
unified_timestamp,
)
class BeegIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?beeg\.(?:com(?:/video)?)/-?(?P<id>\d+)'
_TESTS = [{
'url': 'https://beeg.com/-0983946056129650',
'md5': '51d235147c4627cfce884f844293ff88',
'info_dict': {
'id': '0983946056129650',
'ext': 'mp4',
'title': 'sucked cock and fucked in a private plane',
'duration': 927,
'tags': list,
'age_limit': 18,
'upload_date': '20220131',
'timestamp': 1643656455,
'display_id': '2540839',
},
}, {
'url': 'https://beeg.com/-0599050563103750?t=4-861',
'md5': 'bd8b5ea75134f7f07fad63008db2060e',
'info_dict': {
'id': '0599050563103750',
'ext': 'mp4',
'title': 'Bad Relatives',
'duration': 2060,
'tags': list,
'age_limit': 18,
'description': 'md5:b4fc879a58ae6c604f8f259155b7e3b9',
'timestamp': 1643623200,
'display_id': '2569965',
'upload_date': '20220131',
},
}, {
# api/v6 v2
'url': 'https://beeg.com/1941093077?t=911-1391',
'only_matching': True,
}, {
# api/v6 v2 w/o t
'url': 'https://beeg.com/1277207756',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video = self._download_json(
f'https://store.externulls.com/facts/file/{video_id}',
video_id, f'Downloading JSON for {video_id}')
fc_facts = video.get('fc_facts')
first_fact = {}
for fact in fc_facts:
if not first_fact or try_get(fact, lambda x: x['id'] < first_fact['id']):
first_fact = fact
resources = traverse_obj(video, ('file', 'hls_resources')) or first_fact.get('hls_resources')
formats = []
for format_id, video_uri in resources.items():
if not video_uri:
continue
height = int_or_none(self._search_regex(r'fl_cdn_(\d+)', format_id, 'height', default=None))
current_formats = self._extract_m3u8_formats(f'https://video.beeg.com/{video_uri}', video_id, ext='mp4', m3u8_id=str(height))
for f in current_formats:
f['height'] = height
formats.extend(current_formats)
return {
'id': video_id,
'display_id': str_or_none(first_fact.get('id')),
'title': traverse_obj(video, ('file', 'stuff', 'sf_name')),
'description': traverse_obj(video, ('file', 'stuff', 'sf_story')),
'timestamp': unified_timestamp(first_fact.get('fc_created')),
'duration': int_or_none(traverse_obj(video, ('file', 'fl_duration'))),
'tags': traverse_obj(video, ('tags', ..., 'tg_name')),
'formats': formats,
'age_limit': self._rta_search(webpage),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vidio.py | yt_dlp/extractor/vidio.py | from .common import InfoExtractor
from ..utils import (
ExtractorError,
clean_html,
format_field,
get_element_by_class,
int_or_none,
parse_iso8601,
smuggle_url,
str_or_none,
strip_or_none,
try_get,
unsmuggle_url,
urlencode_postdata,
)
class VidioBaseIE(InfoExtractor):
_LOGIN_URL = 'https://www.vidio.com/users/login'
_NETRC_MACHINE = 'vidio'
def _perform_login(self, username, password):
def is_logged_in():
res = self._download_json(
'https://www.vidio.com/interactions.json', None, 'Checking if logged in', fatal=False) or {}
return bool(res.get('current_user'))
if is_logged_in():
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading log in page')
login_form = self._form_hidden_inputs('login-form', login_page)
login_form.update({
'user[login]': username,
'user[password]': password,
})
login_post, login_post_urlh = self._download_webpage_handle(
self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(login_form), expected_status=[302, 401])
if login_post_urlh.status == 401:
if get_element_by_class('onboarding-content-register-popup__title', login_post):
raise ExtractorError(
'Unable to log in: The provided email has not registered yet.', expected=True)
reason = get_element_by_class('onboarding-form__general-error', login_post) or get_element_by_class('onboarding-modal__title', login_post)
if 'Akun terhubung ke' in reason:
raise ExtractorError(
'Unable to log in: Your account is linked to a social media account. '
'Use --cookies to provide account credentials instead', expected=True)
elif reason:
subreason = get_element_by_class('onboarding-modal__description-text', login_post) or ''
raise ExtractorError(
f'Unable to log in: {reason}. {clean_html(subreason)}', expected=True)
raise ExtractorError('Unable to log in')
def _initialize_pre_login(self):
self._api_key = self._download_json(
'https://www.vidio.com/auth', None, data=b'')['api_key']
def _call_api(self, url, video_id, note=None):
return self._download_json(url, video_id, note=note, headers={
'Content-Type': 'application/vnd.api+json',
'X-API-KEY': self._api_key,
})
class VidioIE(VidioBaseIE):
_VALID_URL = r'https?://(?:www\.)?vidio\.com/(watch|embed)/(?P<id>\d+)-(?P<display_id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015',
'md5': 'abac81b1a205a8d94c609a473b5ea62a',
'info_dict': {
'id': '165683',
'display_id': 'dj_ambred-booyah-live-2015',
'ext': 'mp4',
'title': 'DJ_AMBRED - Booyah (Live 2015)',
'description': 'md5:27dc15f819b6a78a626490881adbadf8',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 149,
'like_count': int,
'uploader': 'TWELVE Pic',
'timestamp': 1444902800,
'upload_date': '20151015',
'uploader_id': 'twelvepictures',
'channel': 'Cover Music Video',
'channel_id': '280236',
'view_count': int,
'dislike_count': int,
'comment_count': int,
'tags': 'count:3',
'uploader_url': 'https://www.vidio.com/@twelvepictures',
},
}, {
'url': 'https://www.vidio.com/watch/77949-south-korea-test-fires-missile-that-can-strike-all-of-the-north',
'only_matching': True,
}, {
# Premier-exclusive video
'url': 'https://www.vidio.com/watch/1550718-stand-by-me-doraemon',
'only_matching': True,
}, {
# embed url from https://enamplus.liputan6.com/read/5033648/video-fakta-temuan-suspek-cacar-monyet-di-jawa-tengah
'url': 'https://www.vidio.com/embed/7115874-fakta-temuan-suspek-cacar-monyet-di-jawa-tengah',
'info_dict': {
'id': '7115874',
'ext': 'mp4',
'channel_id': '40172876',
'comment_count': int,
'uploader_id': 'liputan6',
'view_count': int,
'dislike_count': int,
'upload_date': '20220804',
'uploader': 'Liputan6.com',
'display_id': 'fakta-temuan-suspek-cacar-monyet-di-jawa-tengah',
'channel': 'ENAM PLUS 165',
'timestamp': 1659605520,
'title': 'Fakta Temuan Suspek Cacar Monyet di Jawa Tengah',
'duration': 59,
'like_count': int,
'tags': ['monkeypox indonesia', 'cacar monyet menyebar', 'suspek cacar monyet di indonesia', 'fakta', 'hoax atau bukan?', 'jawa tengah'],
'thumbnail': 'https://thumbor.prod.vidiocdn.com/83PN-_BKm5sS7emLtRxl506MLqQ=/640x360/filters:quality(70)/vidio-web-prod-video/uploads/video/image/7115874/fakta-suspek-cacar-monyet-di-jawa-tengah-24555a.jpg',
'uploader_url': 'https://www.vidio.com/@liputan6',
'description': 'md5:6d595a18d3b19ee378e335a6f288d5ac',
},
}]
def _real_extract(self, url):
match = self._match_valid_url(url).groupdict()
video_id, display_id = match.get('id'), match.get('display_id')
data = self._call_api('https://api.vidio.com/videos/' + video_id, display_id)
video = data['videos'][0]
title = video['title'].strip()
is_premium = video.get('is_premium')
if is_premium:
sources = self._download_json(
f'https://www.vidio.com/interactions_stream.json?video_id={video_id}&type=videos',
display_id, note='Downloading premier API JSON')
if not (sources.get('source') or sources.get('source_dash')):
self.raise_login_required('This video is only available for registered users with the appropriate subscription')
formats, subs = [], {}
if sources.get('source'):
hls_formats, hls_subs = self._extract_m3u8_formats_and_subtitles(
sources['source'], display_id, 'mp4', 'm3u8_native')
formats.extend(hls_formats)
subs.update(hls_subs)
if sources.get('source_dash'): # TODO: Find video example with source_dash
dash_formats, dash_subs = self._extract_mpd_formats_and_subtitles(
sources['source_dash'], display_id, 'dash')
formats.extend(dash_formats)
subs.update(dash_subs)
else:
hls_url = data['clips'][0]['hls_url']
formats, subs = self._extract_m3u8_formats_and_subtitles(
hls_url, display_id, 'mp4', 'm3u8_native')
get_first = lambda x: try_get(data, lambda y: y[x + 's'][0], dict) or {}
channel = get_first('channel')
user = get_first('user')
username = user.get('username')
get_count = lambda x: int_or_none(video.get('total_' + x))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': strip_or_none(video.get('description')),
'thumbnail': video.get('image_url_medium'),
'duration': int_or_none(video.get('duration')),
'like_count': get_count('likes'),
'formats': formats,
'subtitles': subs,
'uploader': user.get('name'),
'timestamp': parse_iso8601(video.get('created_at')),
'uploader_id': username,
'uploader_url': format_field(username, None, 'https://www.vidio.com/@%s'),
'channel': channel.get('name'),
'channel_id': str_or_none(channel.get('id')),
'view_count': get_count('view_count'),
'dislike_count': get_count('dislikes'),
'comment_count': get_count('comments'),
'tags': video.get('tag_list'),
}
class VidioPremierIE(VidioBaseIE):
_VALID_URL = r'https?://(?:www\.)?vidio\.com/premier/(?P<id>\d+)/(?P<display_id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.vidio.com/premier/2885/badai-pasti-berlalu',
'playlist_mincount': 14,
}, {
# Series with both free and premier-exclusive videos
'url': 'https://www.vidio.com/premier/2567/sosmed',
'only_matching': True,
}]
def _playlist_entries(self, playlist_url, display_id):
index = 1
while playlist_url:
playlist_json = self._call_api(playlist_url, display_id, f'Downloading API JSON page {index}')
for video_json in playlist_json.get('data', []):
link = video_json['links']['watchpage']
yield self.url_result(link, 'Vidio', video_json['id'])
playlist_url = try_get(playlist_json, lambda x: x['links']['next'])
index += 1
def _real_extract(self, url):
url, idata = unsmuggle_url(url, {})
playlist_id, display_id = self._match_valid_url(url).groups()
playlist_url = idata.get('url')
if playlist_url: # Smuggled data contains an API URL. Download only that playlist
playlist_id = idata['id']
return self.playlist_result(
self._playlist_entries(playlist_url, playlist_id),
playlist_id=playlist_id, playlist_title=idata.get('title'))
playlist_data = self._call_api(f'https://api.vidio.com/content_profiles/{playlist_id}/playlists', display_id)
return self.playlist_from_matches(
playlist_data.get('data', []), playlist_id=playlist_id, ie=self.ie_key(),
getter=lambda data: smuggle_url(url, {
'url': data['relationships']['videos']['links']['related'],
'id': data['id'],
'title': try_get(data, lambda x: x['attributes']['name']),
}))
class VidioLiveIE(VidioBaseIE):
_VALID_URL = r'https?://(?:www\.)?vidio\.com/live/(?P<id>\d+)-(?P<display_id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.vidio.com/live/204-sctv',
'info_dict': {
'id': '204',
'title': 'SCTV',
'uploader': 'SCTV',
'uploader_id': 'sctv',
'thumbnail': r're:^https?://.*\.jpg$',
},
}, {
# Premier-exclusive livestream
'url': 'https://www.vidio.com/live/6362-tvn',
'only_matching': True,
}, {
# DRM premier-exclusive livestream
'url': 'https://www.vidio.com/live/6299-bein-1',
'only_matching': True,
}]
def _real_extract(self, url):
video_id, display_id = self._match_valid_url(url).groups()
stream_data = self._call_api(
f'https://www.vidio.com/api/livestreamings/{video_id}/detail', display_id)
stream_meta = stream_data['livestreamings'][0]
user = stream_data.get('users', [{}])[0]
title = stream_meta.get('title')
username = user.get('username')
formats = []
if stream_meta.get('is_drm'):
if not self.get_param('allow_unplayable_formats'):
self.report_drm(video_id)
if stream_meta.get('is_premium'):
sources = self._download_json(
f'https://www.vidio.com/interactions_stream.json?video_id={video_id}&type=livestreamings',
display_id, note='Downloading premier API JSON')
if not (sources.get('source') or sources.get('source_dash')):
self.raise_login_required('This video is only available for registered users with the appropriate subscription')
if str_or_none(sources.get('source')):
token_json = self._download_json(
f'https://www.vidio.com/live/{video_id}/tokens',
display_id, note='Downloading HLS token JSON', data=b'')
formats.extend(self._extract_m3u8_formats(
sources['source'] + '?' + token_json.get('token', ''), display_id, 'mp4', 'm3u8_native'))
if str_or_none(sources.get('source_dash')):
pass
else:
if stream_meta.get('stream_token_url'):
token_json = self._download_json(
f'https://www.vidio.com/live/{video_id}/tokens',
display_id, note='Downloading HLS token JSON', data=b'')
formats.extend(self._extract_m3u8_formats(
stream_meta['stream_token_url'] + '?' + token_json.get('token', ''),
display_id, 'mp4', 'm3u8_native'))
if stream_meta.get('stream_dash_url'):
pass
if stream_meta.get('stream_url'):
formats.extend(self._extract_m3u8_formats(
stream_meta['stream_url'], display_id, 'mp4', 'm3u8_native'))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'is_live': True,
'description': strip_or_none(stream_meta.get('description')),
'thumbnail': stream_meta.get('image'),
'like_count': int_or_none(stream_meta.get('like')),
'dislike_count': int_or_none(stream_meta.get('dislike')),
'formats': formats,
'uploader': user.get('name'),
'timestamp': parse_iso8601(stream_meta.get('start_time')),
'uploader_id': username,
'uploader_url': format_field(username, None, 'https://www.vidio.com/@%s'),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/livestreamfails.py | yt_dlp/extractor/livestreamfails.py | from .common import InfoExtractor
from ..utils import format_field, traverse_obj, unified_timestamp
class LivestreamfailsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?livestreamfails\.com/(?:clip|post)/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://livestreamfails.com/clip/139200',
'md5': '8a03aea1a46e94a05af6410337463102',
'info_dict': {
'id': '139200',
'ext': 'mp4',
'display_id': 'ConcernedLitigiousSalmonPeteZaroll-O8yo9W2L8OZEKhV2',
'title': 'Streamer jumps off a trampoline at full speed',
'creator': 'paradeev1ch',
'thumbnail': r're:^https?://.+',
'timestamp': 1656271785,
'upload_date': '20220626',
},
}, {
'url': 'https://livestreamfails.com/post/139200',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
api_response = self._download_json(f'https://api.livestreamfails.com/clip/{video_id}', video_id)
return {
'id': video_id,
'display_id': api_response.get('sourceId'),
'timestamp': unified_timestamp(api_response.get('createdAt')),
'url': f'https://livestreamfails-video-prod.b-cdn.net/video/{api_response["videoId"]}',
'title': api_response.get('label'),
'creator': traverse_obj(api_response, ('streamer', 'label')),
'thumbnail': format_field(api_response, 'imageId', 'https://livestreamfails-image-prod.b-cdn.net/image/%s'),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/reddit.py | yt_dlp/extractor/reddit.py | import json
import urllib.parse
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
parse_qs,
traverse_obj,
truncate_string,
try_get,
unescapeHTML,
update_url_query,
url_or_none,
urlencode_postdata,
)
class RedditIE(InfoExtractor):
_NETRC_MACHINE = 'reddit'
_VALID_URL = r'https?://(?:\w+\.)?reddit(?:media)?\.com/(?P<slug>(?:(?:r|user)/[^/]+/)?comments/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
'info_dict': {
'id': 'zv89llsvexdz',
'ext': 'mp4',
'display_id': '6rrwyj',
'title': 'That small heart attack.',
'alt_title': 'That small heart attack.',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'thumbnails': 'count:4',
'timestamp': 1501941939,
'upload_date': '20170805',
'uploader': 'Antw87',
'duration': 12,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'age_limit': 0,
'channel_id': 'videos',
},
'params': {
'skip_download': True,
},
}, {
# 1080p fallback format
'url': 'https://www.reddit.com/r/aww/comments/90bu6w/heat_index_was_110_degrees_so_we_offered_him_a/',
'md5': '8b5902cfda3006bf90faea7adf765a49',
'info_dict': {
'id': 'gyh95hiqc0b11',
'ext': 'mp4',
'display_id': '90bu6w',
'title': 'Heat index was 110 degrees so we offered him a cold drink. He went fo...',
'alt_title': 'Heat index was 110 degrees so we offered him a cold drink. He went for a full body soak instead',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'thumbnails': 'count:7',
'timestamp': 1532051078,
'upload_date': '20180720',
'uploader': 'FootLoosePickleJuice',
'duration': 14,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'age_limit': 0,
'channel_id': 'aww',
},
}, {
# User post
'url': 'https://www.reddit.com/user/creepyt0es/comments/nip71r/i_plan_to_make_more_stickers_and_prints_check/',
'info_dict': {
'id': 'zasobba6wp071',
'ext': 'mp4',
'display_id': 'nip71r',
'title': 'I plan to make more stickers and prints! Check them out on my Etsy! O...',
'alt_title': 'I plan to make more stickers and prints! Check them out on my Etsy! Or get them through my Patreon. Links below.',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'thumbnails': 'count:5',
'timestamp': 1621709093,
'upload_date': '20210522',
'uploader': 'creepyt0es',
'duration': 6,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'age_limit': 18,
'channel_id': 'u_creepyt0es',
},
'params': {
'skip_download': True,
},
}, {
# videos embedded in reddit text post
'url': 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/',
'playlist_count': 2,
'info_dict': {
'id': 'wzqkxp',
'title': '[Finale] Kamen Rider Revice Episode 50 "Family to the End, Until the ...',
'alt_title': '[Finale] Kamen Rider Revice Episode 50 "Family to the End, Until the Day We Meet Again" Discussion',
'description': 'md5:5b7deb328062b164b15704c5fd67c335',
'uploader': 'TheTwelveYearOld',
'channel_id': 'KamenRider',
'comment_count': int,
'like_count': int,
'dislike_count': int,
'age_limit': 0,
'timestamp': 1661676059.0,
'upload_date': '20220828',
},
}, {
# crossposted reddit-hosted media
'url': 'https://www.reddit.com/r/dumbfuckers_club/comments/zjjw82/cringe/',
'md5': '746180895c7b75a9d6b05341f507699a',
'info_dict': {
'id': 'a1oneun6pa5a1',
'ext': 'mp4',
'display_id': 'zjjw82',
'title': 'Cringe',
'alt_title': 'Cringe',
'uploader': 'Otaku-senpai69420',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'upload_date': '20221212',
'timestamp': 1670812309,
'duration': 16,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'age_limit': 0,
'channel_id': 'dumbfuckers_club',
},
}, {
# post link without subreddit
'url': 'https://www.reddit.com/comments/124pp33',
'md5': '15eec9d828adcef4468b741a7e45a395',
'info_dict': {
'id': 'antsenjc2jqa1',
'ext': 'mp4',
'display_id': '124pp33',
'title': 'Harmless prank of some old friends',
'alt_title': 'Harmless prank of some old friends',
'uploader': 'Dudezila',
'channel_id': 'ContagiousLaughter',
'duration': 17,
'upload_date': '20230328',
'timestamp': 1680012043,
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'age_limit': 0,
'comment_count': int,
'dislike_count': int,
'like_count': int,
},
}, {
# quarantined subreddit post
'url': 'https://old.reddit.com/r/GenZedong/comments/12fujy3/based_hasan/',
'md5': '3156ea69e3c1f1b6259683c5abd36e71',
'info_dict': {
'id': '8bwtclfggpsa1',
'ext': 'mp4',
'display_id': '12fujy3',
'title': 'Based Hasan?',
'alt_title': 'Based Hasan?',
'uploader': 'KingNigelXLII',
'channel_id': 'GenZedong',
'duration': 16,
'upload_date': '20230408',
'timestamp': 1680979138,
'age_limit': 0,
'comment_count': int,
'dislike_count': int,
'like_count': int,
},
'skip': 'Requires account that has opted-in to the GenZedong subreddit',
}, {
# subtitles in HLS manifest
'url': 'https://www.reddit.com/r/Unexpected/comments/1cl9h0u/the_insurance_claim_will_be_interesting/',
'info_dict': {
'id': 'a2mdj5d57qyc1',
'ext': 'mp4',
'display_id': '1cl9h0u',
'title': 'The insurance claim will be interesting',
'alt_title': 'The insurance claim will be interesting',
'uploader': 'darrenpauli',
'channel_id': 'Unexpected',
'duration': 53,
'upload_date': '20240506',
'timestamp': 1714966382,
'age_limit': 0,
'comment_count': int,
'dislike_count': int,
'like_count': int,
'subtitles': {'en': 'mincount:1'},
},
'params': {
'skip_download': True,
},
}, {
# subtitles from caption-url
'url': 'https://www.reddit.com/r/soccer/comments/1cxwzso/tottenham_1_0_newcastle_united_james_maddison_31/',
'info_dict': {
'id': 'xbmj4t3igy1d1',
'ext': 'mp4',
'display_id': '1cxwzso',
'title': 'Tottenham [1] - 0 Newcastle United - James Maddison 31\'',
'alt_title': 'Tottenham [1] - 0 Newcastle United - James Maddison 31\'',
'uploader': 'Woodstovia',
'channel_id': 'soccer',
'duration': 30,
'upload_date': '20240522',
'timestamp': 1716373798,
'age_limit': 0,
'comment_count': int,
'dislike_count': int,
'like_count': int,
'subtitles': {'en': 'mincount:1'},
},
'params': {
'skip_download': True,
'writesubtitles': True,
},
}, {
# "gated" subreddit post
'url': 'https://old.reddit.com/r/ketamine/comments/degtjo/when_the_k_hits/',
'info_dict': {
'id': 'gqsbxts133r31',
'ext': 'mp4',
'display_id': 'degtjo',
'title': 'When the K hits',
'alt_title': 'When the K hits',
'uploader': '[deleted]',
'channel_id': 'ketamine',
'comment_count': int,
'like_count': int,
'dislike_count': int,
'age_limit': 18,
'duration': 34,
'thumbnail': r're:https?://.+/.+\.(?:jpg|png)',
'timestamp': 1570438713.0,
'upload_date': '20191007',
},
}, {
'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
'only_matching': True,
}, {
# imgur
'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
'only_matching': True,
}, {
# imgur @ old reddit
'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
'only_matching': True,
}, {
# streamable
'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
'only_matching': True,
}, {
# youtube
'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
'only_matching': True,
}, {
# reddit video @ nm reddit
'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
'only_matching': True,
}, {
'url': 'https://www.redditmedia.com/r/serbia/comments/pu9wbx/ako_vu%C4%8Di%C4%87_izgubi_izbore_ja_%C4%87u_da_crknem/',
'only_matching': True,
}]
def _perform_login(self, username, password):
captcha = self._download_json(
'https://www.reddit.com/api/requires_captcha/login.json', None,
'Checking login requirement')['required']
if captcha:
raise ExtractorError('Reddit is requiring captcha before login', expected=True)
login = self._download_json(
f'https://www.reddit.com/api/login/{username}', None, data=urlencode_postdata({
'op': 'login-main',
'user': username,
'passwd': password,
'api_type': 'json',
}), note='Logging in', errnote='Login request failed')
errors = '; '.join(traverse_obj(login, ('json', 'errors', ..., 1)))
if errors:
raise ExtractorError(f'Unable to login, Reddit API says {errors}', expected=True)
elif not traverse_obj(login, ('json', 'data', 'cookie', {str})):
raise ExtractorError('Unable to login, no cookie was returned')
def _real_initialize(self):
# Set cookie to opt-in to age-restricted subreddits
self._set_cookie('reddit.com', 'over18', '1')
# Set cookie to opt-in to "gated" subreddits
options = traverse_obj(self._get_cookies('https://www.reddit.com/'), (
'_options', 'value', {urllib.parse.unquote}, {json.loads}, {dict})) or {}
options['pref_gated_sr_optin'] = True
self._set_cookie('reddit.com', '_options', urllib.parse.quote(json.dumps(options)))
def _get_subtitles(self, video_id):
# Fallback if there were no subtitles provided by DASH or HLS manifests
caption_url = f'https://v.redd.it/{video_id}/wh_ben_en.vtt'
if self._is_valid_url(caption_url, video_id, item='subtitles'):
return {'en': [{'url': caption_url}]}
def _real_extract(self, url):
slug, video_id = self._match_valid_url(url).group('slug', 'id')
try:
data = self._download_json(
f'https://www.reddit.com/{slug}/.json', video_id, expected_status=403)
except ExtractorError as e:
if isinstance(e.cause, json.JSONDecodeError):
if self._get_cookies('https://www.reddit.com/').get('reddit_session'):
raise ExtractorError('Your IP address is unable to access the Reddit API', expected=True)
self.raise_login_required('Account authentication is required')
raise
if traverse_obj(data, 'error') == 403:
reason = data.get('reason')
if reason == 'quarantined':
self.raise_login_required('Quarantined subreddit; an account that has opted in is required')
elif reason == 'private':
self.raise_login_required('Private subreddit; an account that has been approved is required')
else:
raise ExtractorError(f'HTTP Error 403 Forbidden; reason given: {reason}')
data = data[0]['data']['children'][0]['data']
video_url = data['url']
thumbnails = []
def add_thumbnail(src):
if not isinstance(src, dict):
return
thumbnail_url = url_or_none(src.get('url'))
if not thumbnail_url:
return
thumbnails.append({
'url': unescapeHTML(thumbnail_url),
'width': int_or_none(src.get('width')),
'height': int_or_none(src.get('height')),
'http_headers': {'Accept': '*/*'},
})
for image in try_get(data, lambda x: x['preview']['images']) or []:
if not isinstance(image, dict):
continue
add_thumbnail(image.get('source'))
resolutions = image.get('resolutions')
if isinstance(resolutions, list):
for resolution in resolutions:
add_thumbnail(resolution)
info = {
'thumbnails': thumbnails,
'age_limit': {True: 18, False: 0}.get(data.get('over_18')),
**traverse_obj(data, {
'title': ('title', {truncate_string(left=72)}),
'alt_title': ('title', {str}),
'description': ('selftext', {str}, filter),
'timestamp': ('created_utc', {float_or_none}),
'uploader': ('author', {str}),
'channel_id': ('subreddit', {str}),
'like_count': ('ups', {int_or_none}),
'dislike_count': ('downs', {int_or_none}),
'comment_count': ('num_comments', {int_or_none}),
}),
}
parsed_url = urllib.parse.urlparse(video_url)
# Check for embeds in text posts, or else raise to avoid recursing into the same reddit URL
if 'reddit.com' in parsed_url.netloc and f'/{video_id}/' in parsed_url.path:
entries = []
for media in traverse_obj(data, ('media_metadata', ...), expected_type=dict):
if not media.get('id') or media.get('e') != 'RedditVideo':
continue
formats = []
if media.get('hlsUrl'):
formats.extend(self._extract_m3u8_formats(
unescapeHTML(media['hlsUrl']), video_id, 'mp4', m3u8_id='hls', fatal=False))
if media.get('dashUrl'):
formats.extend(self._extract_mpd_formats(
unescapeHTML(media['dashUrl']), video_id, mpd_id='dash', fatal=False))
if formats:
entries.append({
'id': media['id'],
'display_id': video_id,
'formats': formats,
**info,
})
if entries:
return self.playlist_result(entries, video_id, **info)
self.raise_no_formats('No media found', expected=True, video_id=video_id)
return {**info, 'id': video_id}
# Check if media is hosted on reddit:
reddit_video = traverse_obj(data, (
(None, ('crosspost_parent_list', ...)), ('secure_media', 'media'), 'reddit_video'), get_all=False)
if reddit_video:
playlist_urls = [
try_get(reddit_video, lambda x: unescapeHTML(x[y]))
for y in ('dash_url', 'hls_url')
]
# Update video_id
display_id = video_id
video_id = self._search_regex(
r'https?://v\.redd\.it/(?P<id>[^/?#&]+)', reddit_video['fallback_url'],
'video_id', default=display_id)
dash_playlist_url = playlist_urls[0] or f'https://v.redd.it/{video_id}/DASHPlaylist.mpd'
hls_playlist_url = playlist_urls[1] or f'https://v.redd.it/{video_id}/HLSPlaylist.m3u8'
qs = traverse_obj(parse_qs(hls_playlist_url), {
'f': ('f', 0, {lambda x: ','.join([x, 'subsAll']) if x else 'hd,subsAll'}),
})
hls_playlist_url = update_url_query(hls_playlist_url, qs)
formats = [{
'url': unescapeHTML(reddit_video['fallback_url']),
'height': int_or_none(reddit_video.get('height')),
'width': int_or_none(reddit_video.get('width')),
'tbr': int_or_none(reddit_video.get('bitrate_kbps')),
'acodec': 'none',
'vcodec': 'h264',
'ext': 'mp4',
'format_id': 'fallback',
'format_note': 'DASH video, mp4_dash',
}]
hls_fmts, subtitles = self._extract_m3u8_formats_and_subtitles(
hls_playlist_url, display_id, 'mp4', m3u8_id='hls', fatal=False)
formats.extend(hls_fmts)
dash_fmts, dash_subs = self._extract_mpd_formats_and_subtitles(
dash_playlist_url, display_id, mpd_id='dash', fatal=False)
formats.extend(dash_fmts)
self._merge_subtitles(dash_subs, target=subtitles)
return {
**info,
'id': video_id,
'display_id': display_id,
'formats': formats,
'subtitles': subtitles or self.extract_subtitles(video_id),
'duration': int_or_none(reddit_video.get('duration')),
}
if parsed_url.netloc == 'v.redd.it':
self.raise_no_formats('This video is processing', expected=True, video_id=video_id)
return {
**info,
'id': parsed_url.path.split('/')[1],
'display_id': video_id,
}
# Not hosted on reddit, must continue extraction
return {
**info,
'display_id': video_id,
'_type': 'url_transparent',
'url': video_url,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/europa.py | yt_dlp/extractor/europa.py | from .common import InfoExtractor
from ..utils import (
int_or_none,
orderedSet,
parse_duration,
parse_iso8601,
parse_qs,
qualities,
traverse_obj,
unified_strdate,
xpath_text,
)
class EuropaIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)'
_TESTS = [{
'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758',
'md5': '574f080699ddd1e19a675b0ddf010371',
'info_dict': {
'id': 'I107758',
'ext': 'mp4',
'title': 'TRADE - Wikileaks on TTIP',
'description': 'NEW LIVE EC Midday press briefing of 11/08/2015',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20150811',
'duration': 34,
'view_count': int,
'formats': 'mincount:3',
},
}, {
'url': 'http://ec.europa.eu/avservices/video/player.cfm?sitelang=en&ref=I107786',
'only_matching': True,
}, {
'url': 'http://ec.europa.eu/avservices/audio/audioDetails.cfm?ref=I-109295&sitelang=en',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
playlist = self._download_xml(
f'http://ec.europa.eu/avservices/video/player/playlist.cfm?ID={video_id}', video_id)
def get_item(type_, preference):
items = {}
for item in playlist.findall(f'./info/{type_}/item'):
lang, label = xpath_text(item, 'lg', default=None), xpath_text(item, 'label', default=None)
if lang and label:
items[lang] = label.strip()
for p in preference:
if items.get(p):
return items[p]
query = parse_qs(url)
preferred_lang = query.get('sitelang', ('en', ))[0]
preferred_langs = orderedSet((preferred_lang, 'en', 'int'))
title = get_item('title', preferred_langs) or video_id
description = get_item('description', preferred_langs)
thumbnail = xpath_text(playlist, './info/thumburl', 'thumbnail')
upload_date = unified_strdate(xpath_text(playlist, './info/date', 'upload date'))
duration = parse_duration(xpath_text(playlist, './info/duration', 'duration'))
view_count = int_or_none(xpath_text(playlist, './info/views', 'views'))
language_preference = qualities(preferred_langs[::-1])
formats = []
for file_ in playlist.findall('./files/file'):
video_url = xpath_text(file_, './url')
if not video_url:
continue
lang = xpath_text(file_, './lg')
formats.append({
'url': video_url,
'format_id': lang,
'format_note': xpath_text(file_, './lglabel'),
'language_preference': language_preference(lang),
})
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
class EuroParlWebstreamIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://multimedia\.europarl\.europa\.eu/
(?:\w+/)?webstreaming/(?:[\w-]+_)?(?P<id>[\w-]+)
'''
_TESTS = [{
'url': 'https://multimedia.europarl.europa.eu/pl/webstreaming/plenary-session_20220914-0900-PLENARY',
'info_dict': {
'id': '62388b15-d85b-4add-99aa-ba12ccf64f0d',
'display_id': '20220914-0900-PLENARY',
'ext': 'mp4',
'title': 'Plenary session',
'release_timestamp': 1663139069,
'release_date': '20220914',
},
'params': {
'skip_download': True,
},
}, {
# live webstream
'url': 'https://multimedia.europarl.europa.eu/en/webstreaming/euroscola_20221115-1000-SPECIAL-EUROSCOLA',
'info_dict': {
'ext': 'mp4',
'id': '510eda7f-ba72-161b-7ee7-0e836cd2e715',
'release_timestamp': 1668502800,
'title': 'Euroscola 2022-11-15 19:21',
'release_date': '20221115',
'live_status': 'is_live',
},
'skip': 'not live anymore',
}, {
'url': 'https://multimedia.europarl.europa.eu/en/webstreaming/committee-on-culture-and-education_20230301-1130-COMMITTEE-CULT',
'info_dict': {
'id': '7355662c-8eac-445e-4bb9-08db14b0ddd7',
'display_id': '20230301-1130-COMMITTEE-CULT',
'ext': 'mp4',
'release_date': '20230301',
'title': 'Committee on Culture and Education',
'release_timestamp': 1677666641,
},
}, {
# live stream
'url': 'https://multimedia.europarl.europa.eu/en/webstreaming/committee-on-environment-public-health-and-food-safety_20230524-0900-COMMITTEE-ENVI',
'info_dict': {
'id': 'e4255f56-10aa-4b3c-6530-08db56d5b0d9',
'ext': 'mp4',
'release_date': '20230524',
'title': r're:Committee on Environment, Public Health and Food Safety \d{4}-\d{2}-\d{2}\s\d{2}:\d{2}',
'release_timestamp': 1684911541,
'live_status': 'is_live',
},
'skip': 'Not live anymore',
}, {
'url': 'https://multimedia.europarl.europa.eu/en/webstreaming/20240320-1345-SPECIAL-PRESSER',
'info_dict': {
'id': 'c1f11567-5b52-470a-f3e1-08dc3c216ace',
'display_id': '20240320-1345-SPECIAL-PRESSER',
'ext': 'mp4',
'release_date': '20240320',
'title': 'md5:7c6c814cac55dea5e2d87bf8d3db2234',
'release_timestamp': 1710939767,
},
}, {
'url': 'https://multimedia.europarl.europa.eu/webstreaming/briefing-for-media-on-2024-european-elections_20240429-1000-SPECIAL-OTHER',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
webpage_nextjs = self._search_nextjs_data(webpage, display_id)['props']['pageProps']
json_info = self._download_json(
'https://acs-api.europarl.connectedviews.eu/api/FullMeeting', display_id,
query={
'api-version': 1.0,
'tenantId': 'bae646ca-1fc8-4363-80ba-2c04f06b4968',
'externalReference': display_id,
})
formats, subtitles = [], {}
for hls_url in traverse_obj(json_info, ((('meetingVideo'), ('meetingVideos', ...)), 'hlsUrl')):
fmt, subs = self._extract_m3u8_formats_and_subtitles(hls_url, display_id)
formats.extend(fmt)
self._merge_subtitles(subs, target=subtitles)
return {
'id': json_info['id'],
'display_id': display_id,
'title': traverse_obj(webpage_nextjs, (('mediaItem', 'title'), ('title', )), get_all=False),
'formats': formats,
'subtitles': subtitles,
'release_timestamp': parse_iso8601(json_info.get('startDateTime')),
'is_live': traverse_obj(webpage_nextjs, ('mediaItem', 'mediaSubType')) == 'Live',
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/appleconnect.py | yt_dlp/extractor/appleconnect.py | import time
from .common import InfoExtractor
from ..utils import (
ExtractorError,
extract_attributes,
float_or_none,
jwt_decode_hs256,
jwt_encode,
parse_resolution,
qualities,
unified_strdate,
update_url,
url_or_none,
urljoin,
)
from ..utils.traversal import (
find_element,
require,
traverse_obj,
)
class AppleConnectIE(InfoExtractor):
IE_NAME = 'apple:music:connect'
IE_DESC = 'Apple Music Connect'
_BASE_URL = 'https://music.apple.com'
_QUALITIES = {
'provisionalUploadVideo': None,
'sdVideo': 480,
'sdVideoWithPlusAudio': 480,
'sd480pVideo': 480,
'720pHdVideo': 720,
'1080pHdVideo': 1080,
}
_VALID_URL = r'https?://music\.apple\.com/[\w-]+/post/(?P<id>\d+)'
_TESTS = [{
'url': 'https://music.apple.com/us/post/1018290019',
'info_dict': {
'id': '1018290019',
'ext': 'm4v',
'title': 'Energy',
'duration': 177.911,
'thumbnail': r're:https?://.+\.png',
'upload_date': '20150710',
'uploader': 'Drake',
},
}, {
'url': 'https://music.apple.com/us/post/1016746627',
'info_dict': {
'id': '1016746627',
'ext': 'm4v',
'title': 'Body Shop (Madonna) - Chellous Lima (Acoustic Cover)',
'duration': 210.278,
'thumbnail': r're:https?://.+\.png',
'upload_date': '20150706',
'uploader': 'Chellous Lima',
},
}]
_jwt = None
@staticmethod
def _jwt_is_expired(token):
return jwt_decode_hs256(token)['exp'] - time.time() < 120
def _get_token(self, webpage, video_id):
if self._jwt and not self._jwt_is_expired(self._jwt):
return self._jwt
js_url = traverse_obj(webpage, (
{find_element(tag='script', attr='crossorigin', value='', html=True)},
{extract_attributes}, 'src', {urljoin(self._BASE_URL)}, {require('JS URL')}))
js = self._download_webpage(
js_url, video_id, 'Downloading token JS', 'Unable to download token JS')
header = jwt_encode({}, '', headers={'alg': 'ES256', 'kid': 'WebPlayKid'}).split('.')[0]
self._jwt = self._search_regex(
fr'(["\'])(?P<jwt>{header}(?:\.[\w-]+){{2}})\1', js, 'JSON Web Token', group='jwt')
if self._jwt_is_expired(self._jwt):
raise ExtractorError('The fetched token is already expired')
return self._jwt
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
videos = self._download_json(
'https://amp-api.music.apple.com/v1/catalog/us/uploaded-videos',
video_id, headers={
'Authorization': f'Bearer {self._get_token(webpage, video_id)}',
'Origin': self._BASE_URL,
}, query={'ids': video_id, 'l': 'en-US'})
attributes = traverse_obj(videos, (
'data', ..., 'attributes', any, {require('video information')}))
formats = []
quality = qualities(list(self._QUALITIES.keys()))
for format_id, src_url in traverse_obj(attributes, (
'assetTokens', {dict.items}, lambda _, v: url_or_none(v[1]),
)):
formats.append({
'ext': 'm4v',
'format_id': format_id,
'height': self._QUALITIES.get(format_id),
'quality': quality(format_id),
'url': src_url,
**parse_resolution(update_url(src_url, query=None), lenient=True),
})
return {
'id': video_id,
'formats': formats,
'thumbnail': self._html_search_meta(
['og:image', 'og:image:secure_url', 'twitter:image'], webpage),
**traverse_obj(attributes, {
'title': ('name', {str}),
'duration': ('durationInMilliseconds', {float_or_none(scale=1000)}),
'upload_date': ('uploadDate', {unified_strdate}),
'uploader': (('artistName', 'uploadingArtistName'), {str}, any),
'webpage_url': ('postUrl', {url_or_none}),
}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hitrecord.py | yt_dlp/extractor/hitrecord.py | from .common import InfoExtractor
from ..utils import (
clean_html,
float_or_none,
int_or_none,
try_get,
)
class HitRecordIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hitrecord\.org/records/(?P<id>\d+)'
_TEST = {
'url': 'https://hitrecord.org/records/2954362',
'md5': 'fe1cdc2023bce0bbb95c39c57426aa71',
'info_dict': {
'id': '2954362',
'ext': 'mp4',
'title': 'A Very Different World (HITRECORD x ACLU)',
'description': 'md5:e62defaffab5075a5277736bead95a3d',
'duration': 139.327,
'timestamp': 1471557582,
'upload_date': '20160818',
'uploader': 'Zuzi.C12',
'uploader_id': '362811',
'view_count': int,
'like_count': int,
'comment_count': int,
'tags': list,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
f'https://hitrecord.org/api/web/records/{video_id}', video_id)
title = video['title']
video_url = video['source_url']['mp4_url']
tags = None
tags_list = try_get(video, lambda x: x['tags'], list)
if tags_list:
tags = [
t['text']
for t in tags_list
if isinstance(t, dict) and t.get('text')
and isinstance(t['text'], str)]
return {
'id': video_id,
'url': video_url,
'title': title,
'description': clean_html(video.get('body')),
'duration': float_or_none(video.get('duration'), 1000),
'timestamp': int_or_none(video.get('created_at_i')),
'uploader': try_get(
video, lambda x: x['user']['username'], str),
'uploader_id': try_get(
video, lambda x: str(x['user']['id'])),
'view_count': int_or_none(video.get('total_views_count')),
'like_count': int_or_none(video.get('hearts_count')),
'comment_count': int_or_none(video.get('comments_count')),
'tags': tags,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lrt.py | yt_dlp/extractor/lrt.py | from .common import InfoExtractor
from ..utils import (
clean_html,
unified_timestamp,
url_or_none,
urljoin,
)
from ..utils.traversal import traverse_obj
class LRTStreamIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?lrt\.lt/mediateka/tiesiogiai/(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://www.lrt.lt/mediateka/tiesiogiai/lrt-opus',
'info_dict': {
'id': 'lrt-opus',
'live_status': 'is_live',
'title': 're:^LRT Opus.+$',
'ext': 'mp4',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
# TODO: Use _search_nextjs_v13_data once fixed
get_stream_url = self._search_regex(
r'\\"get_streams_url\\":\\"([^"]+)\\"', webpage, 'stream URL')
streams_data = self._download_json(get_stream_url, video_id)
formats, subtitles = [], {}
for stream_url in traverse_obj(streams_data, (
'response', 'data', lambda k, _: k.startswith('content'), {url_or_none})):
fmts, subs = self._extract_m3u8_formats_and_subtitles(
stream_url, video_id, 'mp4', m3u8_id='hls', live=True)
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
return {
'id': video_id,
'formats': formats,
'subtitles': subtitles,
'is_live': True,
'title': self._og_search_title(webpage),
}
class LRTVODIE(InfoExtractor):
_VALID_URL = [
r'https?://(?:(?:www|archyvai)\.)?lrt\.lt/mediateka/irasas/(?P<id>[0-9]+)',
r'https?://(?:(?:www|archyvai)\.)?lrt\.lt/mediateka/video/[^?#]+\?(?:[^#]*&)?episode=(?P<id>[0-9]+)',
]
_TESTS = [{
# m3u8 download
'url': 'https://www.lrt.lt/mediateka/irasas/2000127261/greita-ir-gardu-sicilijos-ikvepta-klasikiniu-makaronu-su-baklazanais-vakariene',
'info_dict': {
'id': '2000127261',
'ext': 'mp4',
'title': 'Nustebinkite svečius klasikiniu makaronų su baklažanais receptu',
'description': 'md5:ad7d985f51b0dc1489ba2d76d7ed47fa',
'timestamp': 1604086200,
'upload_date': '20201030',
'tags': ['LRT TELEVIZIJA', 'Beatos virtuvė', 'Beata Nicholson', 'Makaronai', 'Baklažanai', 'Vakarienė', 'Receptas'],
'thumbnail': 'https://www.lrt.lt/img/2020/10/30/764041-126478-1287x836.jpg',
'channel': 'Beatos virtuvė',
},
}, {
# audio download
'url': 'https://www.lrt.lt/mediateka/irasas/1013074524/kita-tema',
'md5': 'fc982f10274929c66fdff65f75615cb0',
'info_dict': {
'id': '1013074524',
'ext': 'mp4',
'title': 'Kita tema',
'description': 'md5:1b295a8fc7219ed0d543fc228c931fb5',
'channel': 'Kita tema',
'timestamp': 1473087900,
'upload_date': '20160905',
},
}, {
'url': 'https://www.lrt.lt/mediateka/video/auksinis-protas-vasara?episode=2000420320&season=%2Fmediateka%2Fvideo%2Fauksinis-protas-vasara%2F2025',
'info_dict': {
'id': '2000420320',
'ext': 'mp4',
'title': 'Kuris senovės romėnų poetas aprašė Narcizo mitą?',
'description': 'Intelektinė viktorina. Ved. Arūnas Valinskas ir Andrius Tapinas.',
'channel': 'Auksinis protas. Vasara',
'thumbnail': 'https://www.lrt.lt/img/2025/06/09/2094343-987905-1287x836.jpg',
'tags': ['LRT TELEVIZIJA', 'Auksinis protas'],
'timestamp': 1749851040,
'upload_date': '20250613',
},
}, {
'url': 'https://archyvai.lrt.lt/mediateka/video/ziniu-riteriai-ir-damos?episode=49685&season=%2Fmediateka%2Fvideo%2Fziniu-riteriai-ir-damos%2F2013',
'only_matching': True,
}, {
'url': 'https://archyvai.lrt.lt/mediateka/irasas/2000077058/panorama-1989-baltijos-kelias',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
# TODO: Use _search_nextjs_v13_data once fixed
canonical_url = (
self._search_regex(r'\\"(?:article|data)\\":{[^}]*\\"url\\":\\"(/[^"]+)\\"', webpage, 'content URL', fatal=False)
or self._search_regex(r'<link\s+rel="canonical"\s*href="(/[^"]+)"', webpage, 'canonical URL'))
media = self._download_json(
'https://www.lrt.lt/servisai/stream_url/vod/media_info/',
video_id, query={'url': canonical_url})
jw_data = self._parse_jwplayer_data(
media['playlist_item'], video_id, base_url=url)
return {
**jw_data,
**traverse_obj(media, {
'id': ('id', {str}),
'title': ('title', {str}),
'description': ('content', {clean_html}),
'timestamp': ('date', {lambda x: x.replace('.', '/')}, {unified_timestamp}),
'tags': ('tags', ..., 'name', {str}),
}),
}
class LRTRadioIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?lrt\.lt/radioteka/irasas/(?P<id>\d+)/(?P<path>[^?#/]+)'
_TESTS = [{
# m3u8 download
'url': 'https://www.lrt.lt/radioteka/irasas/2000359728/nemarios-eiles-apie-pragarus-ir-skaistyklas-su-aiste-kiltinaviciute',
'info_dict': {
'id': '2000359728',
'ext': 'm4a',
'title': 'Nemarios eilės: apie pragarus ir skaistyklas su Aiste Kiltinavičiūte',
'description': 'md5:5eee9a0e86a55bf547bd67596204625d',
'timestamp': 1726143120,
'upload_date': '20240912',
'tags': 'count:5',
'thumbnail': r're:https?://.+/.+\.jpe?g',
'categories': ['Daiktiniai įrodymai'],
},
}, {
'url': 'https://www.lrt.lt/radioteka/irasas/2000304654/vakaras-su-knyga-svetlana-aleksijevic-cernobylio-malda-v-dalis?season=%2Fmediateka%2Faudio%2Fvakaras-su-knyga%2F2023',
'only_matching': True,
}]
def _real_extract(self, url):
video_id, path = self._match_valid_url(url).group('id', 'path')
media = self._download_json(
'https://www.lrt.lt/rest-api/media', video_id,
query={'url': f'/mediateka/irasas/{video_id}/{path}'})
return {
'id': video_id,
'formats': self._extract_m3u8_formats(media['playlist_item']['file'], video_id),
**traverse_obj(media, {
'title': ('title', {str}),
'tags': ('tags', ..., 'name', {str}),
'categories': ('playlist_item', 'category', {str}, filter, all, filter),
'description': ('content', {clean_html}, {str}),
'timestamp': ('date', {lambda x: x.replace('.', '/')}, {unified_timestamp}),
'thumbnail': ('playlist_item', 'image', {urljoin('https://www.lrt.lt')}),
}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/adobetv.py | yt_dlp/extractor/adobetv.py | from .common import InfoExtractor
from ..utils import (
ISO639Utils,
clean_html,
determine_ext,
float_or_none,
int_or_none,
join_nonempty,
url_or_none,
)
from ..utils.traversal import traverse_obj
class AdobeTVVideoIE(InfoExtractor):
IE_NAME = 'adobetv'
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
_EMBED_REGEX = [r'<iframe[^>]+src=["\'](?P<url>(?:https?:)?//video\.tv\.adobe\.com/v/\d+)']
_TESTS = [{
'url': 'https://video.tv.adobe.com/v/2456',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.522,
'thumbnail': r're:https?://images-tv\.adobe\.com/.+\.jpg',
},
}, {
'url': 'https://video.tv.adobe.com/v/3463980/adobe-acrobat',
'info_dict': {
'id': '3463980',
'ext': 'mp4',
'title': 'Adobe Acrobat: How to Customize the Toolbar for Faster PDF Editing',
'description': 'md5:94368ab95ae24f9c1bee0cb346e03dc3',
'duration': 97.514,
'thumbnail': r're:https?://images-tv\.adobe\.com/.+\.jpg',
},
}]
_WEBPAGE_TESTS = [{
# https://video.tv.adobe.com/v/3442499
'url': 'https://business.adobe.com/dx-fragments/summit/2025/marquees/S335/ondemand.live.html',
'info_dict': {
'id': '3442499',
'ext': 'mp4',
'title': 'S335 - Beyond Personalization: Creating Intent-Based Experiences at Scale',
'description': 'Beyond Personalization: Creating Intent-Based Experiences at Scale',
'duration': 2906.8,
'thumbnail': r're:https?://images-tv\.adobe\.com/.+\.jpg',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_data = self._search_json(
r'var\s+bridge\s*=', webpage, 'bridged data', video_id)
formats = []
for source in traverse_obj(video_data, (
'sources', lambda _, v: v['format'] != 'playlist' and url_or_none(v['src']),
)):
source_url = self._proto_relative_url(source['src'])
if determine_ext(source_url) == 'm3u8':
fmts = self._extract_m3u8_formats(
source_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
else:
fmts = [{'url': source_url}]
for fmt in fmts:
fmt.update(traverse_obj(source, {
'duration': ('duration', {float_or_none(scale=1000)}),
'filesize': ('kilobytes', {float_or_none(invscale=1000)}),
'format_id': (('format', 'label'), {str}, all, {lambda x: join_nonempty(*x)}),
'height': ('height', {int_or_none}),
'tbr': ('bitrate', {int_or_none}),
'width': ('width', {int_or_none}),
}))
formats.extend(fmts)
subtitles = {}
for translation in traverse_obj(video_data, (
'translations', lambda _, v: url_or_none(v['vttPath']),
)):
lang = translation.get('language_w3c') or ISO639Utils.long2short(translation.get('language_medium')) or 'und'
subtitles.setdefault(lang, []).append({
'ext': 'vtt',
'url': self._proto_relative_url(translation['vttPath']),
})
return {
'id': video_id,
'formats': formats,
'subtitles': subtitles,
**traverse_obj(video_data, {
'title': ('title', {clean_html}),
'description': ('description', {clean_html}, filter),
'thumbnail': ('video', 'poster', {self._proto_relative_url}, {url_or_none}),
}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tube8.py | yt_dlp/extractor/tube8.py | import re
import urllib.parse
from .common import InfoExtractor
from ..aes import aes_decrypt_text
from ..utils import (
determine_ext,
format_field,
int_or_none,
str_to_int,
strip_or_none,
url_or_none,
)
class Tube8IE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/(?P<id>\d+)'
_EMBED_REGEX = [r'<iframe[^>]+\bsrc=["\'](?P<url>(?:https?:)?//(?:www\.)?tube8\.com/embed/(?:[^/]+/)+\d+)']
_TESTS = [{
'url': 'http://www.tube8.com/teen/kasia-music-video/229795/',
'md5': '65e20c48e6abff62ed0c3965fff13a39',
'info_dict': {
'id': '229795',
'display_id': 'kasia-music-video',
'ext': 'mp4',
'description': 'hot teen Kasia grinding',
'uploader': 'unknown',
'title': 'Kasia music video',
'age_limit': 18,
'duration': 230,
'categories': ['Teen'],
'tags': ['dancing'],
},
}, {
'url': 'http://www.tube8.com/shemale/teen/blonde-cd-gets-kidnapped-by-two-blacks-and-punished-for-being-a-slutty-girl/19569151/',
'only_matching': True,
}]
def _extract_info(self, url, fatal=True):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
display_id = (mobj.group('display_id')
if 'display_id' in mobj.groupdict()
else None) or mobj.group('id')
webpage = self._download_webpage(
url, display_id, headers={'Cookie': 'age_verified=1'})
formats = []
format_urls = set()
title = None
thumbnail = None
duration = None
encrypted = False
def extract_format(format_url, height=None):
format_url = url_or_none(format_url)
if not format_url or not format_url.startswith(('http', '//')):
return
if format_url in format_urls:
return
format_urls.add(format_url)
tbr = int_or_none(self._search_regex(
r'[/_](\d+)[kK][/_]', format_url, 'tbr', default=None))
if not height:
height = int_or_none(self._search_regex(
r'[/_](\d+)[pP][/_]', format_url, 'height', default=None))
if encrypted:
format_url = aes_decrypt_text(
video_url, title, 32).decode('utf-8')
formats.append({
'url': format_url,
'format_id': format_field(height, None, '%dp'),
'height': height,
'tbr': tbr,
})
flashvars = self._parse_json(
self._search_regex(
r'flashvars\s*=\s*({.+?});', webpage,
'flashvars', default='{}'),
display_id, fatal=False)
if flashvars:
title = flashvars.get('video_title')
thumbnail = flashvars.get('image_url')
duration = int_or_none(flashvars.get('video_duration'))
encrypted = flashvars.get('encrypted') is True
for key, value in flashvars.items():
mobj = re.search(r'quality_(\d+)[pP]', key)
if mobj:
extract_format(value, int(mobj.group(1)))
video_url = flashvars.get('video_url')
if video_url and determine_ext(video_url, None):
extract_format(video_url)
video_url = self._html_search_regex(
r'flashvars\.video_url\s*=\s*(["\'])(?P<url>http.+?)\1',
webpage, 'video url', default=None, group='url')
if video_url:
extract_format(urllib.parse.unquote(video_url))
if not formats:
if 'title="This video is no longer available"' in webpage:
self.raise_no_formats(
f'Video {video_id} is no longer available', expected=True)
if not title:
title = self._html_search_regex(
r'<h1[^>]*>([^<]+)', webpage, 'title')
return webpage, {
'id': video_id,
'display_id': display_id,
'title': strip_or_none(title),
'thumbnail': thumbnail,
'duration': duration,
'age_limit': 18,
'formats': formats,
}
def _real_extract(self, url):
webpage, info = self._extract_info(url)
if not info['title']:
info['title'] = self._html_search_regex(
r'videoTitle\s*=\s*"([^"]+)', webpage, 'title')
description = self._html_search_regex(
r'(?s)Description:</dt>\s*<dd>(.+?)</dd>', webpage, 'description', fatal=False)
uploader = self._html_search_regex(
r'<span class="username">\s*(.+?)\s*<',
webpage, 'uploader', fatal=False)
like_count = int_or_none(self._search_regex(
r'rupVar\s*=\s*"(\d+)"', webpage, 'like count', fatal=False))
dislike_count = int_or_none(self._search_regex(
r'rdownVar\s*=\s*"(\d+)"', webpage, 'dislike count', fatal=False))
view_count = str_to_int(self._search_regex(
r'Views:\s*</dt>\s*<dd>([\d,\.]+)',
webpage, 'view count', fatal=False))
comment_count = str_to_int(self._search_regex(
r'<span id="allCommentsCount">(\d+)</span>',
webpage, 'comment count', fatal=False))
category = self._search_regex(
r'Category:\s*</dt>\s*<dd>\s*<a[^>]+href=[^>]+>([^<]+)',
webpage, 'category', fatal=False)
categories = [category] if category else None
tags_str = self._search_regex(
r'(?s)Tags:\s*</dt>\s*<dd>(.+?)</(?!a)',
webpage, 'tags', fatal=False)
tags = list(re.findall(
r'<a[^>]+href=[^>]+>([^<]+)', tags_str)) if tags_str else None
info.update({
'description': description,
'uploader': uploader,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'categories': categories,
'tags': tags,
})
return info
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/epicon.py | yt_dlp/extractor/epicon.py | import re
from .common import InfoExtractor
from ..utils import ExtractorError
class EpiconIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?epicon\.in/(?:documentaries|movies|tv-shows/[^/?#]+/[^/?#]+)/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.epicon.in/documentaries/air-battle-of-srinagar',
'info_dict': {
'id': 'air-battle-of-srinagar',
'ext': 'mp4',
'title': 'Air Battle of Srinagar',
'description': 'md5:c4de2013af9bc05ae4392e4115d518d7',
'thumbnail': r're:^https?://.*\.jpg$',
},
}, {
'url': 'https://www.epicon.in/movies/krit',
'info_dict': {
'id': 'krit',
'ext': 'mp4',
'title': 'Krit',
'description': 'md5:c12b35dad915d48ccff7f013c79bab4a',
'thumbnail': r're:^https?://.*\.jpg$',
},
}, {
'url': 'https://www.epicon.in/tv-shows/paapnaashini-ganga/season-1/vardaan',
'info_dict': {
'id': 'vardaan',
'ext': 'mp4',
'title': 'Paapnaashini Ganga - Season 1 - Ep 1 - VARDAAN',
'description': 'md5:f517058c3d0402398eefa6242f4dd6ae',
'thumbnail': r're:^https?://.*\.jpg$',
},
}, {
'url': 'https://www.epicon.in/movies/jayadev',
'info_dict': {
'id': 'jayadev',
'ext': 'mp4',
'title': 'Jayadev',
'description': 'md5:09e349eecd8e585a3b6466904f19df6c',
'thumbnail': r're:^https?://.*\.jpg$',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
cid = self._search_regex(r'class=\"mylist-icon\ iconclick\"\ id=\"(\d+)', webpage, 'cid')
headers = {'content-type': 'application/x-www-form-urlencoded; charset=UTF-8'}
data = f'cid={cid}&action=st&type=video'.encode()
data_json = self._parse_json(
self._download_json('https://www.epicon.in/ajaxplayer/', video_id, headers=headers, data=data), video_id)
if not data_json['success']:
raise ExtractorError(data_json['message'], expected=True)
title = self._search_regex(r'setplaytitle=\"([^\"]+)', webpage, 'title')
description = self._og_search_description(webpage) or None
thumbnail = self._og_search_thumbnail(webpage) or None
formats = self._extract_m3u8_formats(data_json['url']['video_url'], video_id)
subtitles = {}
for subtitle in data_json.get('subtitles', []):
sub_url = subtitle.get('file')
if not sub_url:
continue
subtitles.setdefault(subtitle.get('lang', 'English'), []).append({
'url': self._proto_relative_url(sub_url),
})
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
'subtitles': subtitles,
}
class EpiconSeriesIE(InfoExtractor):
_VALID_URL = r'(?!.*season)https?://(?:www\.)?epicon\.in/tv-shows/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.epicon.in/tv-shows/1-of-something',
'playlist_mincount': 5,
'info_dict': {
'id': '1-of-something',
},
}, {
'url': 'https://www.epicon.in/tv-shows/eco-india-english',
'playlist_mincount': 76,
'info_dict': {
'id': 'eco-india-english',
},
}, {
'url': 'https://www.epicon.in/tv-shows/s/',
'playlist_mincount': 25,
'info_dict': {
'id': 's',
},
}, {
'url': 'https://www.epicon.in/tv-shows/ekaant',
'playlist_mincount': 38,
'info_dict': {
'id': 'ekaant',
},
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
episodes = re.findall(rf'ct-tray-url=\"(tv-shows/{playlist_id}/[^\"]+)', webpage)
entries = [self.url_result(f'https://www.epicon.in/{episode}', EpiconIE) for episode in episodes]
return self.playlist_result(entries, playlist_id=playlist_id)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/commonprotocols.py | yt_dlp/extractor/commonprotocols.py | import urllib.parse
from .common import InfoExtractor
class RtmpIE(InfoExtractor):
IE_DESC = False # Do not list
_VALID_URL = r'(?i)rtmp[est]?://.+'
_TESTS = [{
'url': 'rtmp://cp44293.edgefcs.net/ondemand?auth=daEcTdydfdqcsb8cZcDbAaCbhamacbbawaS-bw7dBb-bWG-GqpGFqCpNCnGoyL&aifp=v001&slist=public/unsecure/audio/2c97899446428e4301471a8cb72b4b97--audio--pmg-20110908-0900a_flv_aac_med_int.mp4',
'only_matching': True,
}, {
'url': 'rtmp://edge.live.hitbox.tv/live/dimak',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._generic_id(url)
title = self._generic_title(url)
return {
'id': video_id,
'title': title,
'formats': [{
'url': url,
'ext': 'flv',
'format_id': urllib.parse.urlparse(url).scheme,
}],
}
class MmsIE(InfoExtractor):
IE_DESC = False # Do not list
_VALID_URL = r'(?i)mms://.+'
_TEST = {
# Direct MMS link
'url': 'mms://kentro.kaist.ac.kr/200907/MilesReid(0709).wmv',
'info_dict': {
'id': 'MilesReid(0709)',
'ext': 'wmv',
'title': 'MilesReid(0709)',
},
'params': {
'skip_download': True, # rtsp downloads, requiring mplayer or mpv
},
}
def _real_extract(self, url):
video_id = self._generic_id(url)
title = self._generic_title(url)
return {
'id': video_id,
'title': title,
'url': url,
}
class ViewSourceIE(InfoExtractor):
IE_DESC = False
_VALID_URL = r'view-source:(?P<url>.+)'
_TEST = {
'url': 'view-source:https://www.youtube.com/watch?v=BaW_jenozKc',
'only_matching': True,
}
def _real_extract(self, url):
return self.url_result(self._match_valid_url(url).group('url'))
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vice.py | yt_dlp/extractor/vice.py | import functools
import hashlib
import json
import random
import time
from .adobepass import AdobePassIE
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
OnDemandPagedList,
clean_html,
int_or_none,
parse_age_limit,
str_or_none,
try_get,
)
class ViceBaseIE(InfoExtractor):
def _call_api(self, resource, resource_key, resource_id, locale, fields, args=''):
return self._download_json(
'https://video.vice.com/api/v1/graphql', resource_id, query={
'query': '''{
%s(locale: "%s", %s: "%s"%s) {
%s
}
}''' % (resource, locale, resource_key, resource_id, args, fields), # noqa: UP031
})['data'][resource]
class ViceIE(ViceBaseIE, AdobePassIE):
_WORKING = False
IE_NAME = 'vice'
_VALID_URL = r'https?://(?:(?:video|vms)\.vice|(?:www\.)?vice(?:land|tv))\.com/(?P<locale>[^/]+)/(?:video/[^/]+|embed)/(?P<id>[\da-f]{24})'
_EMBED_REGEX = [r'<iframe\b[^>]+\bsrc=["\'](?P<url>(?:https?:)?//video\.vice\.com/[^/]+/embed/[\da-f]{24})']
_TESTS = [{
'url': 'https://video.vice.com/en_us/video/pet-cremator/58c69e38a55424f1227dc3f7',
'info_dict': {
'id': '58c69e38a55424f1227dc3f7',
'ext': 'mp4',
'title': '10 Questions You Always Wanted To Ask: Pet Cremator',
'description': 'md5:fe856caacf61fe0e74fab15ce2b07ca5',
'uploader': 'vice',
'uploader_id': '57a204088cb727dec794c67b',
'timestamp': 1489664942,
'upload_date': '20170316',
'age_limit': 14,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# geo restricted to US
'url': 'https://video.vice.com/en_us/video/the-signal-from-tolva/5816510690b70e6c5fd39a56',
'info_dict': {
'id': '5816510690b70e6c5fd39a56',
'ext': 'mp4',
'uploader': 'vice',
'title': 'The Signal From Tölva',
'description': 'md5:3927e3c79f9e8094606a2b3c5b5e55d5',
'uploader_id': '57a204088cb727dec794c67b',
'timestamp': 1477941983,
'upload_date': '20161031',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'https://video.vice.com/alps/video/ulfs-wien-beruchtigste-grafitti-crew-part-1/581b12b60a0e1f4c0fb6ea2f',
'info_dict': {
'id': '581b12b60a0e1f4c0fb6ea2f',
'ext': 'mp4',
'title': 'ULFs - Wien berüchtigste Grafitti Crew - Part 1',
'description': 'Zwischen Hinterzimmer-Tattoos und U-Bahnschächten erzählen uns die Ulfs, wie es ist, "süchtig nach Sachbeschädigung" zu sein.',
'uploader': 'vice',
'uploader_id': '57a204088cb727dec794c67b',
'timestamp': 1485368119,
'upload_date': '20170125',
'age_limit': 14,
},
'params': {
# AES-encrypted m3u8
'skip_download': True,
},
}, {
'url': 'https://video.vice.com/en_us/video/pizza-show-trailer/56d8c9a54d286ed92f7f30e4',
'only_matching': True,
}, {
'url': 'https://video.vice.com/en_us/embed/57f41d3556a0a80f54726060',
'only_matching': True,
}, {
'url': 'https://vms.vice.com/en_us/video/preplay/58c69e38a55424f1227dc3f7',
'only_matching': True,
}, {
'url': 'https://www.viceland.com/en_us/video/thursday-march-1-2018/5a8f2d7ff1cdb332dd446ec1',
'only_matching': True,
}]
_SOFTWARE_STATEMENT = 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiIwMTVjODBlZC04ZDcxLTQ4ZGEtOTZkZi00NzU5NjIwNzJlYTQiLCJuYmYiOjE2NjgwMTM0ODQsImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNjY4MDEzNDg0fQ.CjhUnTrlh-bmYnEFHyC2Y4it5Y_Zfza1x66O4-ki5gBR7JT6aUunYI_YflXomQPACriMpObkITFz4grVaDwdd8Xp9hrQ2R0SwRBdaklkdy1_j68RqSP5PnexJIa0q_ThtOwfRBd5uGcb33nMJ9Qs92W4kVXuca0Ta-i7SJyWgXUaPDlRDdgyCL3hKj5wuM7qUIwrd9A5CMm-j3dMIBCDgw7X6TwRK65eUQe6gTWqcvL2yONHHTpmIfeOTUxGwwKFr29COOTBowm0VJ6HE08xjXCShP08Neusu-JsgkjzhkEbiDE2531EKgfAki_7WCd2JUZVsAsCusv4a1maokk6NA'
def _real_extract(self, url):
locale, video_id = self._match_valid_url(url).groups()
video = self._call_api('videos', 'id', video_id, locale, '''body
locked
rating
thumbnail_url
title''')[0]
title = video['title'].strip()
rating = video.get('rating')
query = {}
if video.get('locked'):
resource = self._get_mvpd_resource(
'VICELAND', title, video_id, rating)
query['tvetoken'] = self._extract_mvpd_auth(
url, video_id, 'VICELAND', resource, self._SOFTWARE_STATEMENT)
# signature generation algorithm is reverse engineered from signatureGenerator in
# webpack:///../shared/~/vice-player/dist/js/vice-player.js in
# https://www.viceland.com/assets/common/js/web.vendor.bundle.js
# new JS is located here https://vice-web-statics-cdn.vice.com/vice-player/player-embed.js
exp = int(time.time()) + 1440
query.update({
'exp': exp,
'sign': hashlib.sha512(f'{video_id}:GET:{exp}'.encode()).hexdigest(),
'skipadstitching': 1,
'platform': 'desktop',
'rn': random.randint(10000, 100000),
})
try:
preplay = self._download_json(
f'https://vms.vice.com/{locale}/video/preplay/{video_id}',
video_id, query=query)
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status in (400, 401):
error = json.loads(e.cause.response.read().decode())
error_message = error.get('error_description') or error['details']
raise ExtractorError(f'{self.IE_NAME} said: {error_message}', expected=True)
raise
video_data = preplay['video']
formats = self._extract_m3u8_formats(
preplay['playURL'], video_id, 'mp4', 'm3u8_native')
episode = video_data.get('episode') or {}
channel = video_data.get('channel') or {}
season = video_data.get('season') or {}
subtitles = {}
for subtitle in preplay.get('subtitleURLs', []):
cc_url = subtitle.get('url')
if not cc_url:
continue
language_code = try_get(subtitle, lambda x: x['languages'][0]['language_code'], str) or 'en'
subtitles.setdefault(language_code, []).append({
'url': cc_url,
})
return {
'formats': formats,
'id': video_id,
'title': title,
'description': clean_html(video.get('body')),
'thumbnail': video.get('thumbnail_url'),
'duration': int_or_none(video_data.get('video_duration')),
'timestamp': int_or_none(video_data.get('created_at'), 1000),
'age_limit': parse_age_limit(video_data.get('video_rating') or rating),
'series': try_get(video_data, lambda x: x['show']['base']['display_title'], str),
'episode_number': int_or_none(episode.get('episode_number')),
'episode_id': str_or_none(episode.get('id') or video_data.get('episode_id')),
'season_number': int_or_none(season.get('season_number')),
'season_id': str_or_none(season.get('id') or video_data.get('season_id')),
'uploader': channel.get('name'),
'uploader_id': str_or_none(channel.get('id')),
'subtitles': subtitles,
}
class ViceShowIE(ViceBaseIE):
_WORKING = False
IE_NAME = 'vice:show'
_VALID_URL = r'https?://(?:video\.vice|(?:www\.)?vice(?:land|tv))\.com/(?P<locale>[^/]+)/show/(?P<id>[^/?#&]+)'
_PAGE_SIZE = 25
_TESTS = [{
'url': 'https://video.vice.com/en_us/show/fck-thats-delicious',
'info_dict': {
'id': '57a2040c8cb727dec794c901',
'title': 'F*ck, That’s Delicious',
'description': 'The life and eating habits of rap’s greatest bon vivant, Action Bronson.',
},
'playlist_mincount': 64,
}, {
'url': 'https://www.vicetv.com/en_us/show/fck-thats-delicious',
'only_matching': True,
}]
def _fetch_page(self, locale, show_id, page):
videos = self._call_api('videos', 'show_id', show_id, locale, '''body
id
url''', f', page: {page + 1}, per_page: {self._PAGE_SIZE}')
for video in videos:
yield self.url_result(
video['url'], ViceIE.ie_key(), video.get('id'))
def _real_extract(self, url):
locale, display_id = self._match_valid_url(url).groups()
show = self._call_api('shows', 'slug', display_id, locale, '''dek
id
title''')[0]
show_id = show['id']
entries = OnDemandPagedList(
functools.partial(self._fetch_page, locale, show_id),
self._PAGE_SIZE)
return self.playlist_result(
entries, show_id, show.get('title'), show.get('dek'))
class ViceArticleIE(ViceBaseIE):
_WORKING = False
IE_NAME = 'vice:article'
_VALID_URL = r'https?://(?:www\.)?vice\.com/(?P<locale>[^/]+)/article/(?:[0-9a-z]{6}/)?(?P<id>[^?#]+)'
_TESTS = [{
'url': 'https://www.vice.com/en_us/article/on-set-with-the-woman-making-mormon-porn-in-utah',
'info_dict': {
'id': '58dc0a3dee202d2a0ccfcbd8',
'ext': 'mp4',
'title': 'Mormon War on Porn',
'description': 'md5:1c5d91fe25fa8aa304f9def118b92dbf',
'uploader': 'vice',
'uploader_id': '57a204088cb727dec794c67b',
'timestamp': 1491883129,
'upload_date': '20170411',
'age_limit': 17,
},
'params': {
# AES-encrypted m3u8
'skip_download': True,
},
'add_ie': [ViceIE.ie_key()],
}, {
'url': 'https://www.vice.com/en_us/article/how-to-hack-a-car',
'md5': '13010ee0bc694ea87ec40724397c2349',
'info_dict': {
'id': '3jstaBeXgAs',
'ext': 'mp4',
'title': 'How to Hack a Car: Phreaked Out (Episode 2)',
'description': 'md5:ee95453f7ff495db8efe14ae8bf56f30',
'uploader': 'Motherboard',
'uploader_id': 'MotherboardTV',
'upload_date': '20140529',
},
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://www.vice.com/en_us/article/znm9dx/karley-sciortino-slutever-reloaded',
'md5': 'a7ecf64ee4fa19b916c16f4b56184ae2',
'info_dict': {
'id': '57f41d3556a0a80f54726060',
'ext': 'mp4',
'title': "Making The World's First Male Sex Doll",
'description': 'md5:19b00b215b99961cf869c40fbe9df755',
'uploader': 'vice',
'uploader_id': '57a204088cb727dec794c67b',
'timestamp': 1476919911,
'upload_date': '20161019',
'age_limit': 17,
},
'params': {
'skip_download': True,
},
'add_ie': [ViceIE.ie_key()],
}, {
'url': 'https://www.vice.com/en_us/article/cowboy-capitalists-part-1',
'only_matching': True,
}, {
'url': 'https://www.vice.com/ru/article/big-night-out-ibiza-clive-martin-229',
'only_matching': True,
}]
def _real_extract(self, url):
locale, display_id = self._match_valid_url(url).groups()
article = self._call_api('articles', 'slug', display_id, locale, '''body
embed_code''')[0]
body = article['body']
def _url_res(video_url, ie_key):
return {
'_type': 'url_transparent',
'url': video_url,
'display_id': display_id,
'ie_key': ie_key,
}
vice_url = ViceIE._extract_url(body)
if vice_url:
return _url_res(vice_url, ViceIE.ie_key())
youtube_url = YoutubeIE._extract_url(body)
if youtube_url:
return _url_res(youtube_url, YoutubeIE.ie_key())
video_url = self._html_search_regex(
r'data-video-url="([^"]+)"',
article['embed_code'], 'video URL')
return _url_res(video_url, ViceIE.ie_key())
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/trtcocuk.py | yt_dlp/extractor/trtcocuk.py | from .common import InfoExtractor
from ..utils import ExtractorError, int_or_none, parse_iso8601, traverse_obj
class TrtCocukVideoIE(InfoExtractor):
_VALID_URL = r'https?://www\.trtcocuk\.net\.tr/video/(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://www.trtcocuk.net.tr/video/kaptan-pengu-ve-arkadaslari-1',
'info_dict': {
'id': '3789738',
'ext': 'mp4',
'season_number': 1,
'series': '"Kaptan Pengu ve Arkadaşları"',
'season': 'Season 1',
'title': 'Kaptan Pengu ve Arkadaşları 1 Bölüm İzle TRT Çocuk',
'release_date': '20201209',
'release_timestamp': 1607513774,
},
}, {
'url': 'https://www.trtcocuk.net.tr/video/sef-rokanin-lezzet-dunyasi-17',
'info_dict': {
'id': '10260842',
'ext': 'mp4',
'series': '"Şef Roka\'nın Lezzet Dünyası"',
'title': 'Şef Roka\'nın Lezzet Dünyası 17 Bölüm İzle TRT Çocuk',
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
nuxtjs_data = self._search_nuxt_data(webpage, display_id)['data']
try:
video_url = self._parse_json(nuxtjs_data['video'], display_id)
except ExtractorError:
video_url = nuxtjs_data['video']
formats, subtitles = self._extract_m3u8_formats_and_subtitles(video_url, display_id)
return {
'id': str(nuxtjs_data['id']),
'formats': formats,
'subtitles': subtitles,
'season_number': int_or_none(nuxtjs_data.get('season')),
'release_timestamp': parse_iso8601(nuxtjs_data.get('publishedDate')),
'series': traverse_obj(nuxtjs_data, ('show', 0, 'title')),
'title': self._html_extract_title(webpage), # TODO: get better title
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/youjizz.py | yt_dlp/extractor/youjizz.py | from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
parse_duration,
url_or_none,
)
class YouJizzIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/(?:[^/#?]*-(?P<id>\d+)\.html|embed/(?P<embed_id>\d+))'
_TESTS = [{
'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
'md5': 'b1e1dfaa8bb9537d8b84eeda9cf4acf4',
'info_dict': {
'id': '2189178',
'ext': 'mp4',
'title': 'Zeichentrick 1',
'age_limit': 18,
'duration': 2874,
},
}, {
'url': 'http://www.youjizz.com/videos/-2189178.html',
'only_matching': True,
}, {
'url': 'https://www.youjizz.com/videos/embed/31991001',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id') or mobj.group('embed_id')
webpage = self._download_webpage(url, video_id)
title = self._html_extract_title(webpage)
formats = []
encodings = self._parse_json(
self._search_regex(
r'[Ee]ncodings\s*=\s*(\[.+?\]);\n', webpage, 'encodings',
default='[]'),
video_id, fatal=False)
for encoding in encodings:
if not isinstance(encoding, dict):
continue
format_url = url_or_none(encoding.get('filename'))
if not format_url:
continue
if determine_ext(format_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
format_id = encoding.get('name') or encoding.get('quality')
height = int_or_none(self._search_regex(
r'^(\d+)[pP]', format_id, 'height', default=None))
formats.append({
'url': format_url,
'format_id': format_id,
'height': height,
})
if formats:
info_dict = {
'formats': formats,
}
else:
# YouJizz's HTML5 player has invalid HTML
webpage = webpage.replace('"controls', '" controls')
info_dict = self._parse_html5_media_entries(
url, webpage, video_id)[0]
duration = parse_duration(self._search_regex(
r'<strong>Runtime:</strong>([^<]+)', webpage, 'duration',
default=None))
uploader = self._search_regex(
r'<strong>Uploaded By:.*?<a[^>]*>([^<]+)', webpage, 'uploader',
default=None)
info_dict.update({
'id': video_id,
'title': title,
'age_limit': self._rta_search(webpage),
'duration': duration,
'uploader': uploader,
})
return info_dict
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/weibo.py | yt_dlp/extractor/weibo.py | import itertools
import json
import random
import urllib.parse
from .common import InfoExtractor
from ..utils import (
int_or_none,
make_archive_id,
mimetype2ext,
parse_qs,
parse_resolution,
str_or_none,
strip_jsonp,
traverse_obj,
truncate_string,
url_or_none,
urlencode_postdata,
urljoin,
)
class WeiboBaseIE(InfoExtractor):
def _update_visitor_cookies(self, visitor_url, video_id):
headers = {'Referer': visitor_url}
chrome_ver = self._search_regex(
r'Chrome/(\d+)', self.get_param('http_headers')['User-Agent'], 'user agent version', default='90')
visitor_data = self._download_json(
'https://passport.weibo.com/visitor/genvisitor', video_id,
note='Generating first-visit guest request',
headers=headers, transform_source=strip_jsonp,
data=urlencode_postdata({
'cb': 'gen_callback',
'fp': json.dumps({
'os': '1',
'browser': f'Chrome{chrome_ver},0,0,0',
'fonts': 'undefined',
'screenInfo': '1920*1080*24',
'plugins': '',
}, separators=(',', ':'))}))['data']
self._download_webpage(
'https://passport.weibo.com/visitor/visitor', video_id,
note='Running first-visit callback to get guest cookies',
headers=headers, query={
'a': 'incarnate',
't': visitor_data['tid'],
'w': 3 if visitor_data.get('new_tid') else 2,
'c': f'{visitor_data.get("confidence", 100):03d}',
'gc': '',
'cb': 'cross_domain',
'from': 'weibo',
'_rand': random.random(),
})
def _weibo_download_json(self, url, video_id, note='Downloading JSON metadata', data=None, headers=None, query=None):
headers = {
'Referer': 'https://weibo.com/',
**(headers or {}),
}
webpage, urlh = self._download_webpage_handle(url, video_id, note=note, data=data, headers=headers, query=query)
if urllib.parse.urlparse(urlh.url).netloc == 'passport.weibo.com':
self._update_visitor_cookies(urlh.url, video_id)
webpage = self._download_webpage(url, video_id, note=note, data=data, headers=headers, query=query)
return self._parse_json(webpage, video_id)
def _extract_formats(self, video_info):
media_info = traverse_obj(video_info, ('page_info', 'media_info'))
formats = traverse_obj(media_info, (
'playback_list', lambda _, v: url_or_none(v['play_info']['url']), 'play_info', {
'url': 'url',
'format': ('quality_desc', {str}),
'format_id': ('label', {str}),
'ext': ('mime', {mimetype2ext}),
'tbr': ('bitrate', {int_or_none}, filter),
'vcodec': ('video_codecs', {str}),
'fps': ('fps', {int_or_none}),
'width': ('width', {int_or_none}),
'height': ('height', {int_or_none}),
'filesize': ('size', {int_or_none}),
'acodec': ('audio_codecs', {str}),
'asr': ('audio_sample_rate', {int_or_none}),
'audio_channels': ('audio_channels', {int_or_none}),
}))
if not formats: # fallback, should be barely used
for url in set(traverse_obj(media_info, (..., {url_or_none}))):
if 'label=' in url: # filter out non-video urls
format_id, resolution = self._search_regex(
r'label=(\w+)&template=(\d+x\d+)', url, 'format info',
group=(1, 2), default=(None, None))
formats.append({
'url': url,
'format_id': format_id,
**parse_resolution(resolution),
**traverse_obj(media_info, (
'video_details', lambda _, v: v['label'].startswith(format_id), {
'size': ('size', {int_or_none}),
'tbr': ('bitrate', {int_or_none}),
},
), get_all=False),
})
return formats
def _parse_video_info(self, video_info):
video_id = traverse_obj(video_info, (('id', 'id_str', 'mid'), {str_or_none}, any))
return {
'id': video_id,
'extractor_key': WeiboIE.ie_key(),
'extractor': WeiboIE.IE_NAME,
'formats': self._extract_formats(video_info),
'http_headers': {'Referer': 'https://weibo.com/'},
'_old_archive_ids': [make_archive_id('WeiboMobile', video_id)],
**traverse_obj(video_info, {
'display_id': ('mblogid', {str_or_none}),
'title': ('page_info', 'media_info', ('video_title', 'kol_title', 'name'),
{lambda x: x.replace('\n', ' ')}, {truncate_string(left=72)}, filter),
'alt_title': ('page_info', 'media_info', ('video_title', 'kol_title', 'name'), {str}, filter),
'description': ('text_raw', {str}),
'duration': ('page_info', 'media_info', 'duration', {int_or_none}),
'timestamp': ('page_info', 'media_info', 'video_publish_time', {int_or_none}),
'thumbnail': ('page_info', 'page_pic', {url_or_none}),
'uploader': ('user', 'screen_name', {str}),
'uploader_id': ('user', ('id', 'id_str'), {str_or_none}),
'uploader_url': ('user', 'profile_url', {urljoin('https://weibo.com/')}),
'view_count': ('page_info', 'media_info', 'online_users_number', {int_or_none}),
'like_count': ('attitudes_count', {int_or_none}),
'repost_count': ('reposts_count', {int_or_none}),
}, get_all=False),
'tags': traverse_obj(video_info, ('topic_struct', ..., 'topic_title', {str})) or None,
}
class WeiboIE(WeiboBaseIE):
_VALID_URL = r'https?://(?:m\.weibo\.cn/(?:status|detail)|(?:www\.)?weibo\.com/\d+)/(?P<id>[a-zA-Z0-9]+)'
_TESTS = [{
'url': 'https://weibo.com/7827771738/N4xlMvjhI',
'info_dict': {
'id': '4910815147462302',
'_old_archive_ids': ['weibomobile 4910815147462302'],
'ext': 'mp4',
'display_id': 'N4xlMvjhI',
'title': '【睡前消息暑假版第一期:拉泰国一把 对中国有好处】',
'alt_title': '【睡前消息暑假版第一期:拉泰国一把 对中国有好处】',
'description': 'md5:e2637a7673980d68694ea7c43cf12a5f',
'duration': 918,
'timestamp': 1686312819,
'upload_date': '20230609',
'thumbnail': r're:https://.*\.jpg',
'uploader': '睡前视频基地',
'uploader_id': '7827771738',
'uploader_url': 'https://weibo.com/u/7827771738',
'view_count': int,
'like_count': int,
'repost_count': int,
'tags': ['泰国大选远进党获胜', '睡前消息', '暑期版'],
},
}, {
'url': 'https://m.weibo.cn/status/4189191225395228',
'info_dict': {
'id': '4189191225395228',
'_old_archive_ids': ['weibomobile 4189191225395228'],
'ext': 'mp4',
'display_id': 'FBqgOmDxO',
'title': '柴犬柴犬的秒拍视频',
'alt_title': '柴犬柴犬的秒拍视频',
'description': 'md5:80f461ab5cdae6bbdb70efbf5a1db24f',
'duration': 53,
'timestamp': 1514264429,
'upload_date': '20171226',
'thumbnail': r're:https://.*\.jpg',
'uploader': '柴犬柴犬',
'uploader_id': '5926682210',
'uploader_url': 'https://weibo.com/u/5926682210',
'view_count': int,
'like_count': int,
'repost_count': int,
},
}, {
'url': 'https://m.weibo.cn/detail/4189191225395228',
'only_matching': True,
}, {
'url': 'https://weibo.com/0/4224132150961381',
'note': 'no playback_list example',
'only_matching': True,
}, {
'url': 'https://m.weibo.cn/detail/5120561132606436',
'info_dict': {
'id': '5120561132606436',
},
'playlist_count': 9,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
meta = self._weibo_download_json(
'https://weibo.com/ajax/statuses/show', video_id, query={'id': video_id})
mix_media_info = traverse_obj(meta, ('mix_media_info', 'items', ...))
if not mix_media_info:
return self._parse_video_info(meta)
return self.playlist_result(self._entries(mix_media_info), video_id)
def _entries(self, mix_media_info):
for media_info in traverse_obj(mix_media_info, lambda _, v: v['type'] != 'pic'):
yield self._parse_video_info(traverse_obj(media_info, {
'id': ('data', 'object_id'),
'page_info': {'media_info': ('data', 'media_info', {dict})},
}))
class WeiboVideoIE(WeiboBaseIE):
_VIDEO_ID_RE = r'\d+:(?:[\da-f]{32}|\d{16,})'
_VALID_URL = [
fr'https?://(?:www\.)?weibo\.com/tv/show/(?P<id>{_VIDEO_ID_RE})',
fr'https?://video\.weibo\.com/show/?\?(?:[^#]+&)?fid=(?P<id>{_VIDEO_ID_RE})',
]
_TESTS = [{
'url': 'https://weibo.com/tv/show/1034:4797699866951785?from=old_pc_videoshow',
'info_dict': {
'id': '4797700463137878',
'ext': 'mp4',
'display_id': 'LEZDodaiW',
'title': '呃,稍微了解了一下靡烟miya,感觉这东西也太二了',
'alt_title': '呃,稍微了解了一下靡烟miya,感觉这东西也太二了',
'description': '呃,稍微了解了一下靡烟miya,感觉这东西也太二了 http://t.cn/A6aerGsM \u200b\u200b\u200b',
'duration': 76,
'timestamp': 1659344278,
'upload_date': '20220801',
'thumbnail': r're:https://.*\.jpg',
'uploader': '君子爱财陈平安',
'uploader_id': '3905382233',
'uploader_url': 'https://weibo.com/u/3905382233',
'view_count': int,
'like_count': int,
'repost_count': int,
'_old_archive_ids': ['weibomobile 4797700463137878'],
},
}, {
'url': 'https://weibo.com/tv/show/1034:633c288cc043d0ca7808030f1157da64',
'info_dict': {
'id': '4189191225395228',
'ext': 'mp4',
'display_id': 'FBqgOmDxO',
'title': '柴犬柴犬的秒拍视频',
'alt_title': '柴犬柴犬的秒拍视频',
'description': '午睡当然是要甜甜蜜蜜的啦![坏笑] Instagram:shibainu.gaku http://t.cn/RHbmjzW \u200B\u200B\u200B',
'uploader': '柴犬柴犬',
'uploader_id': '5926682210',
'uploader_url': 'https://weibo.com/u/5926682210',
'view_count': int,
'like_count': int,
'repost_count': int,
'duration': 53,
'thumbnail': 'https://wx1.sinaimg.cn/large/006t5KMygy1fmu31fsqbej30hs0hstav.jpg',
'timestamp': 1514264429,
'upload_date': '20171226',
'_old_archive_ids': ['weibomobile 4189191225395228'],
},
}, {
'url': 'https://video.weibo.com/show?fid=1034:4967272104787984',
'info_dict': {
'id': '4967273022359838',
'ext': 'mp4',
'display_id': 'Nse4S9TTU',
'title': '#张婧仪[超话]#📸#婧仪的相册集# 早收工的一天,小张@张婧仪 变身可可爱爱小导游,来次说走就走的泉州City Walk[举手]',
'alt_title': '#张婧仪[超话]#📸#婧仪的相册集# \n早收工的一天,小张@张婧仪 变身可可爱爱小导游,来次说走就走的泉州City Walk[举手]',
'description': '#张婧仪[超话]#📸#婧仪的相册集# \n早收工的一天,小张@张婧仪 变身可可爱爱小导游,来次说走就走的泉州City Walk[举手] http://t.cn/A6WTpbEu \u200B\u200B\u200B',
'uploader': '张婧仪工作室',
'uploader_id': '7610808848',
'uploader_url': 'https://weibo.com/u/7610808848',
'view_count': int,
'like_count': int,
'repost_count': int,
'duration': 85,
'thumbnail': 'https://wx2.sinaimg.cn/orj480/008j4b3qly1hjsce01gnqj30u00gvwf8.jpg',
'tags': ['婧仪的相册集'],
'timestamp': 1699773545,
'upload_date': '20231112',
'_old_archive_ids': ['weibomobile 4967273022359838'],
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
post_data = f'data={{"Component_Play_Playinfo":{{"oid":"{video_id}"}}}}'.encode()
video_info = self._weibo_download_json(
'https://weibo.com/tv/api/component', video_id, data=post_data, headers={'Referer': url},
query={'page': f'/tv/show/{video_id}'})['data']['Component_Play_Playinfo']
return self.url_result(f'https://weibo.com/0/{video_info["mid"]}', WeiboIE)
class WeiboUserIE(WeiboBaseIE):
_VALID_URL = r'https?://(?:www\.)?weibo\.com/u/(?P<id>\d+)'
_TESTS = [{
'url': 'https://weibo.com/u/2066652961?tabtype=video',
'info_dict': {
'id': '2066652961',
'title': '萧影殿下的视频',
'description': '萧影殿下的全部视频',
'uploader': '萧影殿下',
},
'playlist_mincount': 195,
}, {
'url': 'https://weibo.com/u/7610808848?tabtype=newVideo&layerid=4967273022359838',
'info_dict': {
'id': '7610808848',
'title': '张婧仪工作室的视频',
'description': '张婧仪工作室的全部视频',
'uploader': '张婧仪工作室',
},
'playlist_mincount': 61,
}, {
'url': 'https://weibo.com/u/7610808848?tabtype=newVideo&layerid=4967273022359838',
'info_dict': {
'id': '4967273022359838',
'ext': 'mp4',
'display_id': 'Nse4S9TTU',
'title': '#张婧仪[超话]#📸#婧仪的相册集# 早收工的一天,小张@张婧仪 变身可可爱爱小导游,来次说走就走的泉州City Walk[举手]',
'alt_title': '#张婧仪[超话]#📸#婧仪的相册集# \n早收工的一天,小张@张婧仪 变身可可爱爱小导游,来次说走就走的泉州City Walk[举手]',
'description': '#张婧仪[超话]#📸#婧仪的相册集# \n早收工的一天,小张@张婧仪 变身可可爱爱小导游,来次说走就走的泉州City Walk[举手] http://t.cn/A6WTpbEu \u200B\u200B\u200B',
'uploader': '张婧仪工作室',
'uploader_id': '7610808848',
'uploader_url': 'https://weibo.com/u/7610808848',
'view_count': int,
'like_count': int,
'repost_count': int,
'duration': 85,
'thumbnail': 'https://wx2.sinaimg.cn/orj480/008j4b3qly1hjsce01gnqj30u00gvwf8.jpg',
'tags': ['婧仪的相册集'],
'timestamp': 1699773545,
'upload_date': '20231112',
'_old_archive_ids': ['weibomobile 4967273022359838'],
},
'params': {'noplaylist': True},
}]
def _fetch_page(self, uid, cursor=0, page=1):
return self._weibo_download_json(
'https://weibo.com/ajax/profile/getWaterFallContent',
uid, note=f'Downloading videos page {page}',
query={'uid': uid, 'cursor': cursor})['data']
def _entries(self, uid, first_page):
cursor = 0
for page in itertools.count(1):
response = first_page if page == 1 else self._fetch_page(uid, cursor, page)
for video_info in traverse_obj(response, ('list', ..., {dict})):
yield self._parse_video_info(video_info)
cursor = response.get('next_cursor')
if (int_or_none(cursor) or -1) < 0:
break
def _real_extract(self, url):
uid = self._match_id(url)
params = {k: v[-1] for k, v in parse_qs(url).items()}
video_id = params.get('layerid') if params.get('tabtype') == 'newVideo' else None
if not self._yes_playlist(uid, video_id):
return self.url_result(f'https://weibo.com/{uid}/{video_id}', WeiboIE, video_id)
first_page = self._fetch_page(uid)
uploader = traverse_obj(first_page, ('list', ..., 'user', 'screen_name', {str}), get_all=False)
metainfo = {
'title': f'{uploader}的视频',
'description': f'{uploader}的全部视频',
'uploader': uploader,
} if uploader else {}
return self.playlist_result(self._entries(uid, first_page), uid, **metainfo)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/craftsy.py | yt_dlp/extractor/craftsy.py | import json
from .brightcove import BrightcoveNewIE
from .common import InfoExtractor
from ..utils import (
extract_attributes,
get_element_html_by_class,
get_element_text_and_html_by_tag,
)
from ..utils.traversal import traverse_obj
class CraftsyIE(InfoExtractor):
_VALID_URL = r'https?://www\.craftsy\.com/class/(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://www.craftsy.com/class/the-midnight-quilt-show-season-5/',
'info_dict': {
'id': 'the-midnight-quilt-show-season-5',
'title': 'The Midnight Quilt Show Season 5',
'description': 'md5:113eda818e985d1a566625fb2f833b7a',
},
'playlist_count': 10,
}, {
'url': 'https://www.craftsy.com/class/sew-your-own-designer-handbag/',
'info_dict': {
'id': 'sew-your-own-designer-handbag',
'title': 'Sew Your Own Designer Handbag',
'description': 'md5:8270d0ef5427d3c895a27351aeaac276',
},
'playlist_mincount': 1,
}, {
'url': 'https://www.craftsy.com/class/all-access-estes-park-wool-market/',
'info_dict': {
'id': 'all-access-estes-park-wool-market',
'title': 'All Access: Estes Park Wool Market',
'description': 'md5:aded1bd8d38ae2fae4dae936c0ae01e7',
},
'playlist_count': 6,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_player = get_element_html_by_class('class-video-player', webpage)
video_data = traverse_obj(video_player, (
{extract_attributes}, 'wire:snapshot', {json.loads}, 'data', {dict})) or {}
video_js = traverse_obj(video_player, (
{lambda x: get_element_text_and_html_by_tag('video-js', x)}, 1, {extract_attributes})) or {}
has_access = video_data.get('userHasAccess')
lessons = traverse_obj(video_data, ('lessons', ..., ..., lambda _, v: v['video_id']))
preview_id = video_js.get('data-video-id')
if preview_id and preview_id not in traverse_obj(lessons, (..., 'video_id')):
if not lessons and not has_access:
self.report_warning(
'Only extracting preview. For the full class, pass cookies '
f'from an account that has access. {self._login_hint()}')
lessons.append({'video_id': preview_id})
if not lessons and not has_access:
self.raise_login_required('You do not have access to this class')
account_id = video_data.get('accountId') or video_js['data-account']
def entries(lessons):
for lesson in lessons:
yield self.url_result(
f'http://players.brightcove.net/{account_id}/default_default/index.html?videoId={lesson["video_id"]}',
BrightcoveNewIE, lesson['video_id'], lesson.get('title'))
return self.playlist_result(
entries(lessons), video_id, self._html_search_meta(('og:title', 'twitter:title'), webpage),
self._html_search_meta(('og:description', 'description'), webpage, default=None))
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/eplus.py | yt_dlp/extractor/eplus.py | import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
try_call,
unified_timestamp,
urlencode_postdata,
)
class EplusIbIE(InfoExtractor):
_NETRC_MACHINE = 'eplus'
IE_NAME = 'eplus'
IE_DESC = 'e+ (イープラス)'
_VALID_URL = [r'https?://live\.eplus\.jp/ex/player\?ib=(?P<id>(?:\w|%2B|%2F){86}%3D%3D)',
r'https?://live\.eplus\.jp/(?P<id>sample|\d+)']
_TESTS = [{
'url': 'https://live.eplus.jp/ex/player?ib=41K6Wzbr3PlcMD%2FOKHFlC%2FcZCe2Eaw7FK%2BpJS1ooUHki8d0vGSy2mYqxillQBe1dSnOxU%2B8%2FzXKls4XPBSb3vw%3D%3D',
'info_dict': {
'id': '335699-0001-006',
'title': '少女☆歌劇 レヴュースタァライト -The LIVE 青嵐- BLUE GLITTER <定点映像配信>【Streaming+(配信)】',
'live_status': 'was_live',
'release_date': '20201221',
'release_timestamp': 1608544800,
},
'params': {
'skip_download': True,
'ignore_no_formats_error': True,
},
'expected_warnings': [
'This event may not be accessible',
'No video formats found',
'Requested format is not available',
],
}, {
'url': 'https://live.eplus.jp/ex/player?ib=6QSsQdyRAwOFZrEHWlhRm7vocgV%2FO0YzBZ%2BaBEBg1XR%2FmbLn0R%2F048dUoAY038%2F%2F92MJ73BsoAtvUpbV6RLtDQ%3D%3D&show_id=2371511',
'info_dict': {
'id': '348021-0054-001',
'title': 'ラブライブ!スーパースター!! Liella! First LoveLive! Tour ~Starlines~【東京/DAY.1】',
'live_status': 'was_live',
'release_date': '20220115',
'release_timestamp': 1642233600,
'description': str,
},
'params': {
'skip_download': True,
'ignore_no_formats_error': True,
},
'expected_warnings': [
'Could not find the playlist URL. This event may not be accessible',
'No video formats found!',
'Requested format is not available',
],
}, {
'url': 'https://live.eplus.jp/sample',
'info_dict': {
'id': 'stream1ng20210719-test-005',
'title': 'Online streaming test for DRM',
'live_status': 'was_live',
'release_date': '20210719',
'release_timestamp': 1626703200,
},
'params': {
'skip_download': True,
'ignore_no_formats_error': True,
},
'expected_warnings': [
'Could not find the playlist URL. This event may not be accessible',
'No video formats found!',
'Requested format is not available',
'This video is DRM protected',
],
}, {
'url': 'https://live.eplus.jp/2053935',
'info_dict': {
'id': '331320-0001-001',
'title': '丘みどり2020配信LIVE Vol.2 ~秋麗~ 【Streaming+(配信チケット)】',
'live_status': 'was_live',
'release_date': '20200920',
'release_timestamp': 1600596000,
},
'params': {
'skip_download': True,
'ignore_no_formats_error': True,
},
'expected_warnings': [
'Could not find the playlist URL. This event may not be accessible',
'No video formats found!',
'Requested format is not available',
],
}]
_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0'
def _login(self, username, password, urlh):
if not self._get_cookies('https://live.eplus.jp/').get('ci_session'):
raise ExtractorError('Unable to get ci_session cookie')
cltft_token = urlh.headers.get('X-CLTFT-Token')
if not cltft_token:
raise ExtractorError('Unable to get X-CLTFT-Token')
self._set_cookie('live.eplus.jp', 'X-CLTFT-Token', cltft_token)
login_json = self._download_json(
'https://live.eplus.jp/member/api/v1/FTAuth/idpw', None,
note='Sending pre-login info', errnote='Unable to send pre-login info', headers={
'Content-Type': 'application/json; charset=UTF-8',
'Referer': urlh.url,
'X-Cltft-Token': cltft_token,
'Accept': '*/*',
}, data=json.dumps({
'loginId': username,
'loginPassword': password,
}).encode())
if not login_json.get('isSuccess'):
raise ExtractorError('Login failed: Invalid id or password', expected=True)
self._request_webpage(
urlh.url, None, note='Logging in', errnote='Unable to log in',
data=urlencode_postdata({
'loginId': username,
'loginPassword': password,
'Token.Default': cltft_token,
'op': 'nextPage',
}), headers={'Referer': urlh.url})
def _real_extract(self, url):
video_id = self._match_id(url)
webpage, urlh = self._download_webpage_handle(
url, video_id, headers={'User-Agent': self._USER_AGENT})
if urlh.url.startswith('https://live.eplus.jp/member/auth'):
username, password = self._get_login_info()
if not username:
self.raise_login_required()
self._login(username, password, urlh)
webpage = self._download_webpage(
url, video_id, headers={'User-Agent': self._USER_AGENT})
data_json = self._search_json(r'<script>\s*var app\s*=', webpage, 'data json', video_id)
if data_json.get('drm_mode') == 'ON':
self.report_drm(video_id)
if data_json.get('is_pass_ticket') == 'YES':
raise ExtractorError(
'This URL is for a pass ticket instead of a player page', expected=True)
delivery_status = data_json.get('delivery_status')
archive_mode = data_json.get('archive_mode')
release_timestamp = try_call(lambda: unified_timestamp(data_json['event_datetime']) - 32400)
release_timestamp_str = data_json.get('event_datetime_text') # JST
self.write_debug(f'delivery_status = {delivery_status}, archive_mode = {archive_mode}')
if delivery_status == 'PREPARING':
live_status = 'is_upcoming'
elif delivery_status == 'STARTED':
live_status = 'is_live'
elif delivery_status == 'STOPPED':
if archive_mode != 'ON':
raise ExtractorError(
'This event has ended and there is no archive for this event', expected=True)
live_status = 'post_live'
elif delivery_status == 'WAIT_CONFIRM_ARCHIVED':
live_status = 'post_live'
elif delivery_status == 'CONFIRMED_ARCHIVE':
live_status = 'was_live'
else:
self.report_warning(f'Unknown delivery_status {delivery_status}, treat it as a live')
live_status = 'is_live'
formats = []
m3u8_playlist_urls = self._search_json(
r'var\s+listChannels\s*=', webpage, 'hls URLs', video_id, contains_pattern=r'\[.+\]', default=[])
if not m3u8_playlist_urls:
if live_status == 'is_upcoming':
self.raise_no_formats(
f'Could not find the playlist URL. This live event will begin at {release_timestamp_str} JST', expected=True)
else:
self.raise_no_formats(
'Could not find the playlist URL. This event may not be accessible', expected=True)
elif live_status == 'is_upcoming':
self.raise_no_formats(f'This live event will begin at {release_timestamp_str} JST', expected=True)
elif live_status == 'post_live':
self.raise_no_formats('This event has ended, and the archive will be available shortly', expected=True)
else:
for m3u8_playlist_url in m3u8_playlist_urls:
formats.extend(self._extract_m3u8_formats(m3u8_playlist_url, video_id))
# FIXME: HTTP request headers need to be updated to continue download
warning = 'Due to technical limitations, the download will be interrupted after one hour'
if live_status == 'is_live':
self.report_warning(warning)
elif live_status == 'was_live':
self.report_warning(f'{warning}. You can restart to continue the download')
return {
'id': data_json['app_id'],
'title': data_json.get('app_name'),
'formats': formats,
'live_status': live_status,
'description': data_json.get('content'),
'release_timestamp': release_timestamp,
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/atresplayer.py | yt_dlp/extractor/atresplayer.py | import urllib.parse
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
int_or_none,
parse_age_limit,
url_or_none,
urlencode_postdata,
)
from ..utils.traversal import traverse_obj
class AtresPlayerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?atresplayer\.com/(?:[^/?#]+/){4}(?P<display_id>.+?)_(?P<id>[0-9a-f]{24})'
_NETRC_MACHINE = 'atresplayer'
_TESTS = [{
'url': 'https://www.atresplayer.com/lasexta/programas/el-objetivo/clips/mbappe-describe-como-entrenador-a-carlo-ancelotti-sabe-cuando-tiene-que-ser-padre-jefe-amigo-entrenador_67f2dfb2fb6ab0e4c7203849/',
'info_dict': {
'ext': 'mp4',
'id': '67f2dfb2fb6ab0e4c7203849',
'display_id': 'md5:c203f8d4e425ed115ba56a1c6e4b3e6c',
'title': 'Mbappé describe como entrenador a Carlo Ancelotti: "Sabe cuándo tiene que ser padre, jefe, amigo, entrenador..."',
'channel': 'laSexta',
'duration': 31,
'thumbnail': 'https://imagenes.atresplayer.com/atp/clipping/cmsimages02/2025/04/06/B02DBE1E-D59B-4683-8404-1A9595D15269/1920x1080.jpg',
'tags': ['Entrevista informativa', 'Actualidad', 'Debate informativo', 'Política', 'Economía', 'Sociedad', 'Cara a cara', 'Análisis', 'Más periodismo'],
'series': 'El Objetivo',
'season': 'Temporada 12',
'timestamp': 1743970079,
'upload_date': '20250406',
},
}, {
'url': 'https://www.atresplayer.com/antena3/programas/el-hormiguero/clips/revive-la-entrevista-completa-a-miguel-bose-en-el-hormiguero_67f836baa4a5b0e4147ca59a/',
'info_dict': {
'ext': 'mp4',
'id': '67f836baa4a5b0e4147ca59a',
'display_id': 'revive-la-entrevista-completa-a-miguel-bose-en-el-hormiguero',
'title': 'Revive la entrevista completa a Miguel Bosé en El Hormiguero',
'description': 'md5:c6d2b591408d45a7bc2986dfb938eb72',
'channel': 'Antena 3',
'duration': 2556,
'thumbnail': 'https://imagenes.atresplayer.com/atp/clipping/cmsimages02/2025/04/10/9076395F-F1FD-48BE-9F18-540DBA10EBAD/1920x1080.jpg',
'tags': ['Entrevista', 'Variedades', 'Humor', 'Entretenimiento', 'Te sigo', 'Buen rollo', 'Cara a cara'],
'series': 'El Hormiguero ',
'season': 'Temporada 14',
'timestamp': 1744320111,
'upload_date': '20250410',
},
}, {
'url': 'https://www.atresplayer.com/flooxer/series/biara-proyecto-lazarus/temporada-1/capitulo-3-supervivientes_67a6038b64ceca00070f4f69/',
'info_dict': {
'ext': 'mp4',
'id': '67a6038b64ceca00070f4f69',
'display_id': 'capitulo-3-supervivientes',
'title': 'Capítulo 3: Supervivientes',
'description': 'md5:65b231f20302f776c2b0dd24594599a1',
'channel': 'Flooxer',
'duration': 1196,
'thumbnail': 'https://imagenes.atresplayer.com/atp/clipping/cmsimages01/2025/02/14/17CF90D3-FE67-40C5-A941-7825B3E13992/1920x1080.jpg',
'tags': ['Juvenil', 'Terror', 'Piel de gallina', 'Te sigo', 'Un break', 'Del tirón'],
'series': 'BIARA: Proyecto Lázarus',
'season': 'Temporada 1',
'season_number': 1,
'episode': 'Episode 3',
'episode_number': 3,
'timestamp': 1743095191,
'upload_date': '20250327',
},
}, {
'url': 'https://www.atresplayer.com/lasexta/programas/el-club-de-la-comedia/temporada-4/capitulo-10-especial-solidario-nochebuena_5ad08edf986b2855ed47adc4/',
'only_matching': True,
}, {
'url': 'https://www.atresplayer.com/antena3/series/el-secreto-de-puente-viejo/el-chico-de-los-tres-lunares/capitulo-977-29-12-14_5ad51046986b2886722ccdea/',
'only_matching': True,
}]
_API_BASE = 'https://api.atresplayer.com/'
def _perform_login(self, username, password):
try:
self._download_webpage(
'https://account.atresplayer.com/auth/v1/login', None,
'Logging in', 'Failed to log in', data=urlencode_postdata({
'username': username,
'password': password,
}))
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 400:
raise ExtractorError('Invalid username and/or password', expected=True)
raise
def _real_extract(self, url):
display_id, video_id = self._match_valid_url(url).groups()
metadata_url = self._download_json(
self._API_BASE + 'client/v1/url', video_id, 'Downloading API endpoint data',
query={'href': urllib.parse.urlparse(url).path})['href']
metadata = self._download_json(metadata_url, video_id)
try:
video_data = self._download_json(metadata['urlVideo'], video_id, 'Downloading video data')
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
error = self._parse_json(e.cause.response.read(), None)
if error.get('error') == 'required_registered':
self.raise_login_required()
raise ExtractorError(error['error_description'], expected=True)
raise
formats = []
subtitles = {}
for source in traverse_obj(video_data, ('sources', lambda _, v: url_or_none(v['src']))):
src_url = source['src']
src_type = source.get('type')
if src_type in ('application/vnd.apple.mpegurl', 'application/hls+legacy', 'application/hls+hevc'):
fmts, subs = self._extract_m3u8_formats_and_subtitles(
src_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
elif src_type in ('application/dash+xml', 'application/dash+hevc'):
fmts, subs = self._extract_mpd_formats_and_subtitles(
src_url, video_id, mpd_id='dash', fatal=False)
else:
continue
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
return {
'display_id': display_id,
'id': video_id,
'formats': formats,
'subtitles': subtitles,
**traverse_obj(video_data, {
'title': ('titulo', {str}),
'description': ('descripcion', {str}),
'duration': ('duration', {int_or_none}),
'thumbnail': ('imgPoster', {url_or_none}, {lambda v: f'{v}1920x1080.jpg'}),
'age_limit': ('ageRating', {parse_age_limit}),
}),
**traverse_obj(metadata, {
'title': ('title', {str}),
'description': ('description', {str}),
'duration': ('duration', {int_or_none}),
'tags': ('tags', ..., 'title', {str}),
'age_limit': ('ageRating', {parse_age_limit}),
'series': ('format', 'title', {str}),
'season': ('currentSeason', 'title', {str}),
'season_number': ('currentSeason', 'seasonNumber', {int_or_none}),
'episode_number': ('numberOfEpisode', {int_or_none}),
'timestamp': ('publicationDate', {int_or_none(scale=1000)}),
'channel': ('channel', 'title', {str}),
}),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/twitcasting.py | yt_dlp/extractor/twitcasting.py | import base64
import hashlib
import itertools
import re
from .common import InfoExtractor
from ..dependencies import websockets
from ..utils import (
ExtractorError,
UserNotLive,
clean_html,
float_or_none,
get_element_by_class,
get_element_by_id,
parse_duration,
qualities,
str_to_int,
try_get,
unified_timestamp,
update_url_query,
url_or_none,
urlencode_postdata,
urljoin,
)
from ..utils.traversal import traverse_obj
class TwitCastingIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/?#]+\.)?twitcasting\.tv/(?P<uploader_id>[^/?#]+)/(?:movie|twplayer)/(?P<id>\d+)'
_M3U8_HEADERS = {
'Origin': 'https://twitcasting.tv',
'Referer': 'https://twitcasting.tv/',
}
_TESTS = [{
'url': 'https://twitcasting.tv/ivetesangalo/movie/2357609',
'md5': '745243cad58c4681dc752490f7540d7f',
'info_dict': {
'id': '2357609',
'ext': 'mp4',
'title': 'Live #2357609',
'uploader_id': 'ivetesangalo',
'description': 'Twitter Oficial da cantora brasileira Ivete Sangalo.',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20110822',
'timestamp': 1313978424,
'duration': 32,
'view_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://twitcasting.tv/mttbernardini/movie/3689740',
'info_dict': {
'id': '3689740',
'ext': 'mp4',
'title': 'Live playing something #3689740',
'uploader_id': 'mttbernardini',
'description': 'md5:1dc7efa2f1ab932fcd119265cebeec69',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20120211',
'timestamp': 1328995624,
'duration': 681,
'view_count': int,
},
'params': {
'skip_download': True,
'videopassword': 'abc',
},
}, {
'url': 'https://twitcasting.tv/loft_heaven/movie/685979292',
'info_dict': {
'id': '685979292',
'ext': 'mp4',
'title': '【無料配信】南波一海のhear/here “ナタリー望月哲さんに聞く編集と「渋谷系狂騒曲」”',
'uploader_id': 'loft_heaven',
'description': 'md5:3a0c7b53019df987ce545c935538bacf',
'upload_date': '20210604',
'timestamp': 1622802114,
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 6964,
'view_count': int,
},
'params': {
'skip_download': True,
},
}]
def _parse_data_movie_playlist(self, dmp, video_id):
# attempt 1: parse as JSON directly
try:
return self._parse_json(dmp, video_id)
except ExtractorError:
pass
# attempt 2: decode reversed base64
decoded = base64.b64decode(dmp[::-1])
return self._parse_json(decoded, video_id)
def _real_extract(self, url):
uploader_id, video_id = self._match_valid_url(url).groups()
webpage, urlh = self._download_webpage_handle(url, video_id)
video_password = self.get_param('videopassword')
request_data = None
if video_password:
request_data = urlencode_postdata({
'password': video_password,
**self._hidden_inputs(webpage),
}, encoding='utf-8')
webpage, urlh = self._download_webpage_handle(
url, video_id, data=request_data,
headers={'Origin': 'https://twitcasting.tv'},
note='Trying video password')
if urlh.url != url and request_data:
webpage = self._download_webpage(
urlh.url, video_id, data=request_data,
headers={'Origin': 'https://twitcasting.tv'},
note='Retrying authentication')
# has to check here as the first request can contain password input form even if the password is correct
if re.search(r'<form\s+method="POST">\s*<input\s+[^>]+?name="password"', webpage):
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
title = (clean_html(get_element_by_id('movietitle', webpage))
or self._html_search_meta(['og:title', 'twitter:title'], webpage, fatal=True))
video_js_data = try_get(
webpage,
lambda x: self._parse_data_movie_playlist(self._search_regex(
r'data-movie-playlist=\'([^\']+?)\'',
x, 'movie playlist', default=None), video_id)['2'], list)
thumbnail = traverse_obj(video_js_data, (0, 'thumbnailUrl')) or self._og_search_thumbnail(webpage)
description = clean_html(get_element_by_id(
'authorcomment', webpage)) or self._html_search_meta(
['description', 'og:description', 'twitter:description'], webpage)
duration = (try_get(video_js_data, lambda x: sum(float_or_none(y.get('duration')) for y in x) / 1000)
or parse_duration(clean_html(get_element_by_class('tw-player-duration-time', webpage))))
view_count = str_to_int(self._search_regex(
(r'Total\s*:\s*Views\s*([\d,]+)', r'総視聴者\s*:\s*([\d,]+)\s*</'), webpage, 'views', None))
timestamp = unified_timestamp(self._search_regex(
r'data-toggle="true"[^>]+datetime="([^"]+)"',
webpage, 'datetime', None))
is_live = any(f'data-{x}' in webpage for x in ['is-onlive="true"', 'live-type="live"', 'status="online"'])
base_dict = {
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'uploader_id': uploader_id,
'duration': duration,
'view_count': view_count,
'is_live': is_live,
}
def find_dmu(x):
data_movie_url = self._search_regex(
r'data-movie-url=(["\'])(?P<url>(?:(?!\1).)+)\1',
x, 'm3u8 url', group='url', default=None)
if data_movie_url:
return [data_movie_url]
m3u8_urls = (try_get(webpage, find_dmu, list)
or traverse_obj(video_js_data, (..., 'source', 'url')))
if is_live:
stream_data = self._download_json(
'https://twitcasting.tv/streamserver.php',
video_id, 'Downloading live info', query={
'target': uploader_id,
'mode': 'client',
'player': 'pc_web',
})
password_params = {
'word': hashlib.md5(video_password.encode()).hexdigest(),
} if video_password else None
formats = []
# low: 640x360, medium: 1280x720, high: 1920x1080
qq = qualities(['low', 'medium', 'high'])
for quality, m3u8_url in traverse_obj(stream_data, (
'tc-hls', 'streams', {dict.items}, lambda _, v: url_or_none(v[1]),
)):
formats.append({
'url': update_url_query(m3u8_url, password_params),
'format_id': f'hls-{quality}',
'ext': 'mp4',
'quality': qq(quality),
'protocol': 'm3u8',
'http_headers': self._M3U8_HEADERS,
})
if websockets:
qq = qualities(['base', 'mobilesource', 'main'])
for mode, ws_url in traverse_obj(stream_data, (
'llfmp4', 'streams', {dict.items}, lambda _, v: url_or_none(v[1]),
)):
formats.append({
'url': update_url_query(ws_url, password_params),
'format_id': f'ws-{mode}',
'ext': 'mp4',
'quality': qq(mode),
'source_preference': -10,
# TwitCasting simply sends moof atom directly over WS
'protocol': 'websocket_frag',
})
if not formats:
self.raise_login_required()
infodict = {
'formats': formats,
'_format_sort_fields': ('source', ),
}
elif not m3u8_urls:
raise ExtractorError('Failed to get m3u8 playlist')
elif len(m3u8_urls) == 1:
formats = self._extract_m3u8_formats(
m3u8_urls[0], video_id, 'mp4', headers=self._M3U8_HEADERS)
infodict = {
# No problem here since there's only one manifest
'formats': formats,
'http_headers': self._M3U8_HEADERS,
}
else:
infodict = {
'_type': 'multi_video',
'entries': [{
'id': f'{video_id}-{num}',
'url': m3u8_url,
'ext': 'mp4',
# Requesting the manifests here will cause download to fail.
# So use ffmpeg instead. See: https://github.com/yt-dlp/yt-dlp/issues/382
'protocol': 'm3u8',
'http_headers': self._M3U8_HEADERS,
**base_dict,
} for (num, m3u8_url) in enumerate(m3u8_urls)],
}
return {
'id': video_id,
**base_dict,
**infodict,
}
class TwitCastingLiveIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/?#]+\.)?twitcasting\.tv/(?P<id>[^/?#]+)/?(?:[#?]|$)'
_TESTS = [{
'url': 'https://twitcasting.tv/ivetesangalo',
'only_matching': True,
}, {
'url': 'https://twitcasting.tv/c:unusedlive',
'expected_exception': 'UserNotLive',
}]
def _real_extract(self, url):
uploader_id = self._match_id(url)
self.to_screen(
f'Downloading live video of user {uploader_id}. '
f'Pass "https://twitcasting.tv/{uploader_id}/show" to download the history')
is_live = traverse_obj(self._download_json(
f'https://frontendapi.twitcasting.tv/watch/user/{uploader_id}',
uploader_id, 'Checking live status', data=b'', fatal=False), ('is_live', {bool}))
if is_live is False: # only raise here if API response was as expected
raise UserNotLive(video_id=uploader_id)
# Use /show/ page so that password-protected and members-only livestreams can be found
webpage = self._download_webpage(
f'https://twitcasting.tv/{uploader_id}/show/', uploader_id, 'Downloading live history')
is_live = is_live or self._search_regex(
r'(?s)(<span\s*class="tw-movie-thumbnail2-badge"\s*data-status="live">\s*LIVE)',
webpage, 'is live?', default=False)
# Current live is always the first match
current_live = self._search_regex(
r'(?s)<a\s+class="tw-movie-thumbnail2"\s+href="/[^/"]+/movie/(?P<video_id>\d+)"',
webpage, 'current live ID', default=None, group='video_id')
if not is_live or not current_live:
raise UserNotLive(video_id=uploader_id)
return self.url_result(f'https://twitcasting.tv/{uploader_id}/movie/{current_live}', TwitCastingIE)
class TwitCastingUserIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/?#]+\.)?twitcasting\.tv/(?P<id>[^/?#]+)/(?:show|archive)/?(?:[#?]|$)'
_TESTS = [{
'url': 'https://twitcasting.tv/natsuiromatsuri/archive/',
'info_dict': {
'id': 'natsuiromatsuri',
'title': 'natsuiromatsuri - Live History',
},
'playlist_mincount': 235,
}, {
'url': 'https://twitcasting.tv/noriyukicas/show',
'only_matching': True,
}]
def _entries(self, uploader_id):
base_url = next_url = f'https://twitcasting.tv/{uploader_id}/show'
for page_num in itertools.count(1):
webpage = self._download_webpage(
next_url, uploader_id, query={'filter': 'watchable'}, note=f'Downloading page {page_num}')
matches = re.finditer(
r'(?s)<a\s+class="tw-movie-thumbnail2"\s+href="(?P<url>/[^/"]+/movie/\d+)"', webpage)
for mobj in matches:
yield self.url_result(urljoin(base_url, mobj.group('url')))
next_url = self._search_regex(
r'<a href="(/%s/show/%d-\d+)[?"]' % (re.escape(uploader_id), page_num),
webpage, 'next url', default=None)
next_url = urljoin(base_url, next_url)
if not next_url:
return
def _real_extract(self, url):
uploader_id = self._match_id(url)
return self.playlist_result(
self._entries(uploader_id), uploader_id, f'{uploader_id} - Live History')
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/airtv.py | yt_dlp/extractor/airtv.py | from .common import InfoExtractor
from .youtube import YoutubeIE
from ..utils import (
determine_ext,
int_or_none,
mimetype2ext,
parse_iso8601,
traverse_obj,
)
class AirTVIE(InfoExtractor):
_VALID_URL = r'https?://www\.air\.tv/watch\?v=(?P<id>\w+)'
_TESTS = [{
# without youtube_id
'url': 'https://www.air.tv/watch?v=W87jcWleSn2hXZN47zJZsQ',
'info_dict': {
'id': 'W87jcWleSn2hXZN47zJZsQ',
'ext': 'mp4',
'release_date': '20221003',
'release_timestamp': 1664792603,
'channel_id': 'vgfManQlRQKgoFQ8i8peFQ',
'title': 'md5:c12d49ed367c3dadaa67659aff43494c',
'upload_date': '20221003',
'duration': 151,
'view_count': int,
'thumbnail': 'https://cdn-sp-gcs.air.tv/videos/W/8/W87jcWleSn2hXZN47zJZsQ/b13fc56464f47d9d62a36d110b9b5a72-4096x2160_9.jpg',
'timestamp': 1664792603,
},
}, {
# with youtube_id
'url': 'https://www.air.tv/watch?v=sv57EC8tRXG6h8dNXFUU1Q',
'info_dict': {
'id': '2ZTqmpee-bQ',
'ext': 'mp4',
'comment_count': int,
'tags': 'count:11',
'channel_follower_count': int,
'like_count': int,
'uploader': 'Newsflare',
'thumbnail': 'https://i.ytimg.com/vi_webp/2ZTqmpee-bQ/maxresdefault.webp',
'availability': 'public',
'title': 'Geese Chase Alligator Across Golf Course',
'uploader_id': 'NewsflareBreaking',
'channel_url': 'https://www.youtube.com/channel/UCzSSoloGEz10HALUAbYhngQ',
'description': 'md5:99b21d9cea59330149efbd9706e208f5',
'age_limit': 0,
'channel_id': 'UCzSSoloGEz10HALUAbYhngQ',
'uploader_url': 'http://www.youtube.com/user/NewsflareBreaking',
'view_count': int,
'categories': ['News & Politics'],
'live_status': 'not_live',
'playable_in_embed': True,
'channel': 'Newsflare',
'duration': 37,
'upload_date': '20180511',
},
}]
def _get_formats_and_subtitle(self, json_data, video_id):
formats, subtitles = [], {}
for source in traverse_obj(json_data, 'sources', 'sources_desktop', ...):
ext = determine_ext(source.get('src'), mimetype2ext(source.get('type')))
if ext == 'm3u8':
fmts, subs = self._extract_m3u8_formats_and_subtitles(source.get('src'), video_id)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
else:
formats.append({'url': source.get('src'), 'ext': ext})
return formats, subtitles
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
nextjs_json = self._search_nextjs_data(webpage, display_id)['props']['pageProps']['initialState']['videos'][display_id]
if nextjs_json.get('youtube_id'):
return self.url_result(
f'https://www.youtube.com/watch?v={nextjs_json.get("youtube_id")}', YoutubeIE)
formats, subtitles = self._get_formats_and_subtitle(nextjs_json, display_id)
return {
'id': display_id,
'title': nextjs_json.get('title') or self._html_search_meta('og:title', webpage),
'formats': formats,
'subtitles': subtitles,
'description': nextjs_json.get('description') or None,
'duration': int_or_none(nextjs_json.get('duration')),
'thumbnails': [
{'url': thumbnail}
for thumbnail in traverse_obj(nextjs_json, ('default_thumbnails', ...))],
'channel_id': traverse_obj(nextjs_json, 'channel', 'channel_slug'),
'timestamp': parse_iso8601(nextjs_json.get('created')),
'release_timestamp': parse_iso8601(nextjs_json.get('published')),
'view_count': int_or_none(nextjs_json.get('views')),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/crtvg.py | yt_dlp/extractor/crtvg.py | import re
from .common import InfoExtractor
from ..utils import make_archive_id, remove_end
class CrtvgIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?crtvg\.es/tvg/a-carta/(?P<id>[^/#?]+)'
_TESTS = [{
'url': 'https://www.crtvg.es/tvg/a-carta/os-caimans-do-tea-5839623',
'md5': 'c0958d9ff90e4503a75544358758921d',
'info_dict': {
'id': 'os-caimans-do-tea-5839623',
'title': 'Os caimáns do Tea',
'ext': 'mp4',
'description': 'md5:f71cfba21ae564f0a6f415b31de1f842',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'_old_archive_ids': ['crtvg 5839623'],
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://www.crtvg.es/tvg/a-carta/a-parabolica-love-story',
'md5': '9a47b95a1749db7b7eb3214904624584',
'info_dict': {
'id': 'a-parabolica-love-story',
'title': 'A parabólica / Trabuco, o can mordedor / Love Story',
'ext': 'mp4',
'description': 'md5:f71cfba21ae564f0a6f415b31de1f842',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
},
'params': {'skip_download': 'm3u8'},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(r'var\s+url\s*=\s*["\']([^"\']+)', webpage, 'video url')
formats = self._extract_m3u8_formats(video_url + '/playlist.m3u8', video_id, fatal=False)
formats.extend(self._extract_mpd_formats(video_url + '/manifest.mpd', video_id, fatal=False))
old_video_id = None
if mobj := re.fullmatch(r'[^/#?]+-(?P<old_id>\d{7})', video_id):
old_video_id = [make_archive_id(self, mobj.group('old_id'))]
return {
'id': video_id,
'_old_archive_ids': old_video_id,
'formats': formats,
'title': remove_end(self._html_search_meta(
['og:title', 'twitter:title'], webpage, 'title', default=None), ' | CRTVG'),
'description': self._html_search_meta('description', webpage, 'description', default=None),
'thumbnail': self._html_search_meta(['og:image', 'twitter:image'], webpage, 'thumbnail', default=None),
}
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/wrestleuniverse.py | yt_dlp/extractor/wrestleuniverse.py | import base64
import binascii
import json
import time
import uuid
from .common import InfoExtractor
from ..dependencies import Cryptodome
from ..utils import (
ExtractorError,
int_or_none,
jwt_decode_hs256,
traverse_obj,
try_call,
url_basename,
url_or_none,
urlencode_postdata,
variadic,
)
class WrestleUniverseBaseIE(InfoExtractor):
_NETRC_MACHINE = 'wrestleuniverse'
_VALID_URL_TMPL = r'https?://(?:www\.)?wrestle-universe\.com/(?:(?P<lang>\w{2})/)?%s/(?P<id>\w+)'
_API_HOST = 'api.wrestle-universe.com'
_API_PATH = None
_REAL_TOKEN = None
_TOKEN_EXPIRY = None
_REFRESH_TOKEN = None
_DEVICE_ID = None
_LOGIN_QUERY = {'key': 'AIzaSyCaRPBsDQYVDUWWBXjsTrHESi2r_F3RAdA'}
_LOGIN_HEADERS = {
'Accept': '*/*',
'Content-Type': 'application/json',
'X-Client-Version': 'Chrome/JsCore/9.9.4/FirebaseCore-web',
'X-Firebase-gmpid': '1:307308870738:web:820f38fe5150c8976e338b',
'Referer': 'https://www.wrestle-universe.com/',
'Origin': 'https://www.wrestle-universe.com',
}
@property
def _TOKEN(self):
if not self._REAL_TOKEN or not self._TOKEN_EXPIRY:
token = try_call(lambda: self._get_cookies('https://www.wrestle-universe.com/')['token'].value)
if not token and not self._REFRESH_TOKEN:
self.raise_login_required()
self._TOKEN = token
if not self._REAL_TOKEN or self._TOKEN_EXPIRY <= int(time.time()):
if not self._REFRESH_TOKEN:
raise ExtractorError(
'Expired token. Refresh your cookies in browser and try again', expected=True)
self._refresh_token()
return self._REAL_TOKEN
@_TOKEN.setter
def _TOKEN(self, value):
self._REAL_TOKEN = value
expiry = traverse_obj(value, ({jwt_decode_hs256}, 'exp', {int_or_none}))
if not expiry:
raise ExtractorError('There was a problem with the auth token')
self._TOKEN_EXPIRY = expiry
def _perform_login(self, username, password):
login = self._download_json(
'https://identitytoolkit.googleapis.com/v1/accounts:signInWithPassword', None,
'Logging in', query=self._LOGIN_QUERY, headers=self._LOGIN_HEADERS, data=json.dumps({
'returnSecureToken': True,
'email': username,
'password': password,
}, separators=(',', ':')).encode(), expected_status=400)
token = traverse_obj(login, ('idToken', {str}))
if not token:
raise ExtractorError(
f'Unable to log in: {traverse_obj(login, ("error", "message"))}', expected=True)
self._REFRESH_TOKEN = traverse_obj(login, ('refreshToken', {str}))
if not self._REFRESH_TOKEN:
self.report_warning('No refresh token was granted')
self._TOKEN = token
def _real_initialize(self):
if self._DEVICE_ID:
return
self._DEVICE_ID = self._configuration_arg('device_id', [None], ie_key=self._NETRC_MACHINE)[0]
if not self._DEVICE_ID:
self._DEVICE_ID = self.cache.load(self._NETRC_MACHINE, 'device_id')
if self._DEVICE_ID:
return
self._DEVICE_ID = str(uuid.uuid4())
self.cache.store(self._NETRC_MACHINE, 'device_id', self._DEVICE_ID)
def _refresh_token(self):
refresh = self._download_json(
'https://securetoken.googleapis.com/v1/token', None, 'Refreshing token',
query=self._LOGIN_QUERY, data=urlencode_postdata({
'grant_type': 'refresh_token',
'refresh_token': self._REFRESH_TOKEN,
}), headers={
**self._LOGIN_HEADERS,
'Content-Type': 'application/x-www-form-urlencoded',
})
if traverse_obj(refresh, ('refresh_token', {str})):
self._REFRESH_TOKEN = refresh['refresh_token']
token = traverse_obj(refresh, 'access_token', 'id_token', expected_type=str)
if not token:
raise ExtractorError('No auth token returned from refresh request')
self._TOKEN = token
def _call_api(self, video_id, param='', msg='API', auth=True, data=None, query={}, fatal=True):
headers = {'CA-CID': ''}
if data:
headers['Content-Type'] = 'application/json;charset=utf-8'
data = json.dumps(data, separators=(',', ':')).encode()
if auth and self._TOKEN:
headers['Authorization'] = f'Bearer {self._TOKEN}'
return self._download_json(
f'https://{self._API_HOST}/v1/{self._API_PATH}/{video_id}{param}', video_id,
note=f'Downloading {msg} JSON', errnote=f'Failed to download {msg} JSON',
data=data, headers=headers, query=query, fatal=fatal)
def _call_encrypted_api(self, video_id, param='', msg='API', data={}, query={}, fatal=True):
if not Cryptodome.RSA:
raise ExtractorError('pycryptodomex not found. Please install', expected=True)
private_key = Cryptodome.RSA.generate(2048)
cipher = Cryptodome.PKCS1_OAEP.new(private_key, hashAlgo=Cryptodome.SHA1)
def decrypt(data):
if not data:
return None
try:
return cipher.decrypt(base64.b64decode(data)).decode()
except (ValueError, binascii.Error) as e:
raise ExtractorError(f'Could not decrypt data: {e}')
token = base64.b64encode(private_key.public_key().export_key('DER')).decode()
api_json = self._call_api(video_id, param, msg, data={
'deviceId': self._DEVICE_ID,
'token': token,
**data,
}, query=query, fatal=fatal)
return api_json, decrypt
def _download_metadata(self, url, video_id, lang, props_keys):
metadata = self._call_api(video_id, msg='metadata', query={'al': lang or 'ja'}, auth=False, fatal=False)
if not metadata:
webpage = self._download_webpage(url, video_id)
nextjs_data = self._search_nextjs_data(webpage, video_id, fatal=False)
metadata = traverse_obj(nextjs_data, (
'props', 'pageProps', *variadic(props_keys, (str, bytes, dict, set)), {dict})) or {}
return metadata
def _get_formats(self, data, path, video_id=None):
hls_url = traverse_obj(data, path, get_all=False)
if not hls_url and not data.get('canWatch'):
self.raise_no_formats(
'This account does not have access to the requested content', expected=True)
elif not hls_url:
self.raise_no_formats('No supported formats found')
return self._extract_m3u8_formats(hls_url, video_id, 'mp4', m3u8_id='hls', live=True)
class WrestleUniverseVODIE(WrestleUniverseBaseIE):
_VALID_URL = WrestleUniverseBaseIE._VALID_URL_TMPL % 'videos'
_TESTS = [{
'url': 'https://www.wrestle-universe.com/en/videos/dp8mpjmcKfxzUhEHM2uFws',
'info_dict': {
'id': 'dp8mpjmcKfxzUhEHM2uFws',
'ext': 'mp4',
'title': 'The 3rd “Futari wa Princess” Max Heart Tournament',
'description': 'md5:318d5061e944797fbbb81d5c7dd00bf5',
'location': '埼玉・春日部ふれあいキューブ',
'channel': 'tjpw',
'duration': 7119,
'timestamp': 1674979200,
'upload_date': '20230129',
'thumbnail': 'https://image.asset.wrestle-universe.com/8FjD67P8rZc446RBQs5RBN/8FjD67P8rZc446RBQs5RBN',
'chapters': 'count:7',
'cast': 'count:21',
},
'params': {
'skip_download': 'm3u8',
},
}]
_API_PATH = 'videoEpisodes'
def _real_extract(self, url):
lang, video_id = self._match_valid_url(url).group('lang', 'id')
metadata = self._download_metadata(url, video_id, lang, 'videoEpisodeFallbackData')
video_data = self._call_api(video_id, ':watch', 'watch', data={'deviceId': self._DEVICE_ID})
return {
'id': video_id,
'formats': self._get_formats(video_data, ('protocolHls', 'url', {url_or_none}), video_id),
**traverse_obj(metadata, {
'title': ('displayName', {str}),
'description': ('description', {str}),
'channel': ('labels', 'group', {str}),
'location': ('labels', 'venue', {str}),
'timestamp': ('watchStartTime', {int_or_none}),
'thumbnail': ('keyVisualUrl', {url_or_none}),
'cast': ('casts', ..., 'displayName', {str}),
'duration': ('duration', {int}),
'chapters': ('videoChapters', lambda _, v: isinstance(v.get('start'), int), {
'title': ('displayName', {str}),
'start_time': ('start', {int}),
'end_time': ('end', {int}),
}),
}),
}
class WrestleUniversePPVIE(WrestleUniverseBaseIE):
_VALID_URL = WrestleUniverseBaseIE._VALID_URL_TMPL % 'lives'
_TESTS = [{
'note': 'HLS AES-128 key obtained via API',
'url': 'https://www.wrestle-universe.com/en/lives/buH9ibbfhdJAY4GKZcEuJX',
'info_dict': {
'id': 'buH9ibbfhdJAY4GKZcEuJX',
'ext': 'mp4',
'title': '【PPV】Beyond the origins, into the future',
'description': 'md5:9a872db68cd09be4a1e35a3ee8b0bdfc',
'channel': 'tjpw',
'location': '東京・Twin Box AKIHABARA',
'duration': 10098,
'timestamp': 1675076400,
'upload_date': '20230130',
'thumbnail': 'https://image.asset.wrestle-universe.com/rJs2m7cBaLXrwCcxMdQGRM/rJs2m7cBaLXrwCcxMdQGRM',
'thumbnails': 'count:3',
'hls_aes': {
'key': '5633184acd6e43f1f1ac71c6447a4186',
'iv': '5bac71beb33197d5600337ce86de7862',
},
},
'params': {
'skip_download': 'm3u8',
},
'skip': 'No longer available',
}, {
'note': 'unencrypted HLS',
'url': 'https://www.wrestle-universe.com/en/lives/wUG8hP5iApC63jbtQzhVVx',
'info_dict': {
'id': 'wUG8hP5iApC63jbtQzhVVx',
'ext': 'mp4',
'title': 'GRAND PRINCESS \'22',
'description': 'md5:e4f43d0d4262de3952ff34831bc99858',
'channel': 'tjpw',
'location': '東京・両国国技館',
'duration': 18044,
'timestamp': 1647665400,
'upload_date': '20220319',
'thumbnail': 'https://image.asset.wrestle-universe.com/i8jxSTCHPfdAKD4zN41Psx/i8jxSTCHPfdAKD4zN41Psx',
'thumbnails': 'count:3',
},
'params': {
'skip_download': 'm3u8',
},
}, {
'note': 'manifest provides live-a (partial) and live-b (full) streams',
'url': 'https://www.wrestle-universe.com/en/lives/umc99R9XsexXrxr9VjTo9g',
'only_matching': True,
}]
_API_PATH = 'events'
def _real_extract(self, url):
lang, video_id = self._match_valid_url(url).group('lang', 'id')
metadata = self._download_metadata(url, video_id, lang, 'eventFallbackData')
info = {
'id': video_id,
**traverse_obj(metadata, {
'title': ('displayName', {str}),
'description': ('description', {str}),
'channel': ('labels', 'group', {str}),
'location': ('labels', 'venue', {str}),
'timestamp': ('startTime', {int_or_none}),
'thumbnails': (('keyVisualUrl', 'alterKeyVisualUrl', 'heroKeyVisualUrl'), {'url': {url_or_none}}),
}),
}
ended_time = traverse_obj(metadata, ('endedTime', {int_or_none}))
if info.get('timestamp') and ended_time:
info['duration'] = ended_time - info['timestamp']
video_data, decrypt = self._call_encrypted_api(
video_id, ':watchArchive', 'watch archive', data={'method': 1})
# 'chromecastUrls' can be only partial videos, avoid
info['formats'] = self._get_formats(video_data, ('hls', (('urls', ...), 'url'), {url_or_none}), video_id)
for f in info['formats']:
# bitrates are exaggerated in PPV playlists, so avoid wrong/huge filesize_approx values
if f.get('tbr'):
f['tbr'] = int(f['tbr'] / 2.5)
# prefer variants with the same basename as the master playlist to avoid partial streams
f['format_id'] = url_basename(f['url']).partition('.')[0]
if not f['format_id'].startswith(url_basename(f['manifest_url']).partition('.')[0]):
f['preference'] = -10
hls_aes_key = traverse_obj(video_data, ('hls', 'key', {decrypt}))
if hls_aes_key:
info['hls_aes'] = {
'key': hls_aes_key,
'iv': traverse_obj(video_data, ('hls', 'iv', {decrypt})),
}
elif traverse_obj(video_data, ('hls', 'encryptType', {int})):
self.report_warning('HLS AES-128 key was not found in API response')
return info
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/radiofrance.py | yt_dlp/extractor/radiofrance.py | import itertools
import re
import urllib.parse
from .common import InfoExtractor
from ..utils import (
int_or_none,
join_nonempty,
js_to_json,
parse_duration,
strftime_or_none,
traverse_obj,
unified_strdate,
urljoin,
)
class RadioFranceIE(InfoExtractor):
_VALID_URL = r'https?://maison\.radiofrance\.fr/radiovisions/(?P<id>[^?#]+)'
IE_NAME = 'radiofrance'
_TEST = {
'url': 'http://maison.radiofrance.fr/radiovisions/one-one',
'md5': 'bdbb28ace95ed0e04faab32ba3160daf',
'info_dict': {
'id': 'one-one',
'ext': 'ogg',
'title': 'One to one',
'description': "Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
'uploader': 'Thomas Hercouët',
},
}
def _real_extract(self, url):
m = self._match_valid_url(url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
description = self._html_search_regex(
r'<div class="bloc_page_wrapper"><div class="text">(.*?)</div>',
webpage, 'description', fatal=False)
uploader = self._html_search_regex(
r'<div class="credit"> © (.*?)</div>',
webpage, 'uploader', fatal=False)
formats_str = self._html_search_regex(
r'class="jp-jplayer[^"]*" data-source="([^"]+)">',
webpage, 'audio URLs')
formats = [
{
'format_id': fm[0],
'url': fm[1],
'vcodec': 'none',
'quality': i,
}
for i, fm in
enumerate(re.findall(r"([a-z0-9]+)\s*:\s*'([^']+)'", formats_str))
]
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'uploader': uploader,
}
class RadioFranceBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?radiofrance\.fr'
_STATIONS_RE = '|'.join(map(re.escape, (
'franceculture',
'franceinfo',
'franceinter',
'francemusique',
'fip',
'mouv',
)))
def _extract_data_from_webpage(self, webpage, display_id, key):
return traverse_obj(self._search_json(
r'\bconst\s+data\s*=', webpage, key, display_id,
contains_pattern=r'\[\{(?s:.+)\}\]', transform_source=js_to_json),
(..., 'data', key, {dict}), get_all=False) or {}
class FranceCultureIE(RadioFranceBaseIE):
_VALID_URL = rf'''(?x)
{RadioFranceBaseIE._VALID_URL_BASE}
/(?:{RadioFranceBaseIE._STATIONS_RE})
/podcasts/(?:[^?#]+/)?(?P<display_id>[^?#]+)-(?P<id>\d{{6,}})(?:$|[?#])
'''
_TESTS = [
{
'url': 'https://www.radiofrance.fr/franceculture/podcasts/science-en-questions/la-physique-d-einstein-aiderait-elle-a-comprendre-le-cerveau-8440487',
'info_dict': {
'id': '8440487',
'display_id': 'la-physique-d-einstein-aiderait-elle-a-comprendre-le-cerveau',
'ext': 'mp3',
'title': 'La physique d’Einstein aiderait-elle à comprendre le cerveau ?',
'description': 'Existerait-il un pont conceptuel entre la physique de l’espace-temps et les neurosciences ?',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'upload_date': '20220514',
'duration': 2750,
},
},
{
'url': 'https://www.radiofrance.fr/franceinter/podcasts/le-7-9-30/le-7-9-30-du-vendredi-10-mars-2023-2107675',
'info_dict': {
'id': '2107675',
'display_id': 'le-7-9-30-du-vendredi-10-mars-2023',
'title': 'Inflation alimentaire : comment en sortir ? - Régis Debray et Claude Grange - Cybèle Idelot',
'description': 'md5:36ee74351ede77a314fdebb94026b916',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'upload_date': '20230310',
'duration': 8977,
'ext': 'mp3',
},
},
{
'url': 'https://www.radiofrance.fr/franceinter/podcasts/la-rafle-du-vel-d-hiv-une-affaire-d-etat/les-racines-du-crime-episode-1-3715507',
'only_matching': True,
}, {
'url': 'https://www.radiofrance.fr/franceinfo/podcasts/le-billet-sciences/sante-bientot-un-vaccin-contre-l-asthme-allergique-3057200',
'only_matching': True,
},
]
def _real_extract(self, url):
video_id, display_id = self._match_valid_url(url).group('id', 'display_id')
webpage = self._download_webpage(url, display_id)
# _search_json_ld doesn't correctly handle this. See https://github.com/yt-dlp/yt-dlp/pull/3874#discussion_r891903846
video_data = self._search_json('', webpage, 'audio data', display_id, contains_pattern=r'{\s*"@type"\s*:\s*"AudioObject".+}')
return {
'id': video_id,
'display_id': display_id,
'url': video_data['contentUrl'],
'vcodec': 'none' if video_data.get('encodingFormat') == 'mp3' else None,
'duration': parse_duration(video_data.get('duration')),
'title': self._html_search_regex(r'(?s)<h1[^>]*itemprop="[^"]*name[^"]*"[^>]*>(.+?)</h1>',
webpage, 'title', default=self._og_search_title(webpage)),
'description': self._html_search_regex(
r'(?s)<meta name="description"\s*content="([^"]+)', webpage, 'description', default=None),
'thumbnail': self._og_search_thumbnail(webpage),
'uploader': self._html_search_regex(
r'(?s)<span class="author">(.*?)</span>', webpage, 'uploader', default=None),
'upload_date': unified_strdate(self._search_regex(
r'"datePublished"\s*:\s*"([^"]+)', webpage, 'timestamp', fatal=False)),
}
class RadioFranceLiveIE(RadioFranceBaseIE):
_VALID_URL = rf'''(?x)
https?://(?:www\.)?radiofrance\.fr
/(?P<id>{RadioFranceBaseIE._STATIONS_RE})
/?(?P<substation_id>radio-[\w-]+)?(?:[#?]|$)
'''
_TESTS = [{
'url': 'https://www.radiofrance.fr/franceinter/',
'info_dict': {
'id': 'franceinter',
'title': str,
'live_status': 'is_live',
'ext': 'aac',
},
'params': {
'skip_download': 'Livestream',
},
}, {
'url': 'https://www.radiofrance.fr/franceculture',
'info_dict': {
'id': 'franceculture',
'title': str,
'live_status': 'is_live',
'ext': 'aac',
},
'params': {
'skip_download': 'Livestream',
},
}, {
'url': 'https://www.radiofrance.fr/mouv/radio-musique-kids-family',
'info_dict': {
'id': 'mouv-radio-musique-kids-family',
'title': str,
'live_status': 'is_live',
'ext': 'aac',
},
'params': {
'skip_download': 'Livestream',
},
}, {
'url': 'https://www.radiofrance.fr/mouv/radio-rnb-soul',
'info_dict': {
'id': 'mouv-radio-rnb-soul',
'title': str,
'live_status': 'is_live',
'ext': 'aac',
},
'params': {
'skip_download': 'Livestream',
},
}, {
'url': 'https://www.radiofrance.fr/mouv/radio-musique-mix',
'info_dict': {
'id': 'mouv-radio-musique-mix',
'title': str,
'live_status': 'is_live',
'ext': 'aac',
},
'params': {
'skip_download': 'Livestream',
},
}, {
'url': 'https://www.radiofrance.fr/fip/radio-rock',
'info_dict': {
'id': 'fip-radio-rock',
'title': str,
'live_status': 'is_live',
'ext': 'aac',
},
'params': {
'skip_download': 'Livestream',
},
}, {
'url': 'https://www.radiofrance.fr/mouv',
'only_matching': True,
}]
def _real_extract(self, url):
station_id, substation_id = self._match_valid_url(url).group('id', 'substation_id')
if substation_id:
webpage = self._download_webpage(url, station_id)
api_response = self._extract_data_from_webpage(webpage, station_id, 'webRadioData')
else:
api_response = self._download_json(
f'https://www.radiofrance.fr/{station_id}/api/live', station_id)
formats, subtitles = [], {}
for media_source in traverse_obj(api_response, (('now', None), 'media', 'sources', lambda _, v: v['url'])):
if media_source.get('format') == 'hls':
fmts, subs = self._extract_m3u8_formats_and_subtitles(media_source['url'], station_id, fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
else:
formats.append({
'url': media_source['url'],
'abr': media_source.get('bitrate'),
})
return {
'id': join_nonempty(station_id, substation_id),
'title': traverse_obj(api_response, ('visual', 'legend')) or join_nonempty(
('now', 'firstLine', 'title'), ('now', 'secondLine', 'title'), from_dict=api_response, delim=' - '),
'formats': formats,
'subtitles': subtitles,
'is_live': True,
}
class RadioFrancePlaylistBaseIE(RadioFranceBaseIE):
"""Subclasses must set _METADATA_KEY"""
def _call_api(self, content_id, cursor, page_num):
raise NotImplementedError('This method must be implemented by subclasses')
def _generate_playlist_entries(self, content_id, content_response):
for page_num in itertools.count(2):
for entry in content_response['items']:
yield self.url_result(
f'https://www.radiofrance.fr/{entry["path"]}', url_transparent=True, **traverse_obj(entry, {
'title': 'title',
'description': 'standFirst',
'timestamp': ('publishedDate', {int_or_none}),
'thumbnail': ('visual', 'src'),
}))
next_cursor = traverse_obj(content_response, (('pagination', None), 'next'), get_all=False)
if not next_cursor:
break
content_response = self._call_api(content_id, next_cursor, page_num)
def _real_extract(self, url):
display_id = self._match_id(url)
metadata = self._download_json(
'https://www.radiofrance.fr/api/v2.1/path', display_id,
query={'value': urllib.parse.urlparse(url).path})['content']
content_id = metadata['id']
return self.playlist_result(
self._generate_playlist_entries(content_id, metadata[self._METADATA_KEY]), content_id,
display_id=display_id, **{**traverse_obj(metadata, {
'title': 'title',
'description': 'standFirst',
'thumbnail': ('visual', 'src'),
}), **traverse_obj(metadata, {
'title': 'name',
'description': 'role',
})})
class RadioFrancePodcastIE(RadioFrancePlaylistBaseIE):
_VALID_URL = rf'''(?x)
{RadioFranceBaseIE._VALID_URL_BASE}
/(?:{RadioFranceBaseIE._STATIONS_RE})
/podcasts/(?P<id>[\w-]+)/?(?:[?#]|$)
'''
_TESTS = [{
'url': 'https://www.radiofrance.fr/franceinfo/podcasts/le-billet-vert',
'info_dict': {
'id': 'eaf6ef81-a980-4f1c-a7d1-8a75ecd54b17',
'display_id': 'le-billet-vert',
'title': 'Le billet sciences',
'description': 'md5:eb1007b34b0c0a680daaa71525bbd4c1',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
},
'playlist_mincount': 11,
}, {
'url': 'https://www.radiofrance.fr/franceinter/podcasts/jean-marie-le-pen-l-obsession-nationale',
'info_dict': {
'id': '566fd524-3074-4fbc-ac69-8696f2152a54',
'display_id': 'jean-marie-le-pen-l-obsession-nationale',
'title': 'Jean-Marie Le Pen, l\'obsession nationale',
'description': 'md5:a07c0cfb894f6d07a62d0ad12c4b7d73',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
},
'playlist_count': 7,
}, {
'url': 'https://www.radiofrance.fr/franceculture/podcasts/serie-thomas-grjebine',
'info_dict': {
'id': '63c1ddc9-9f15-457a-98b2-411bac63f48d',
'display_id': 'serie-thomas-grjebine',
'title': 'Thomas Grjebine',
},
'playlist_count': 1,
}, {
'url': 'https://www.radiofrance.fr/fip/podcasts/certains-l-aiment-fip',
'info_dict': {
'id': '143dff38-e956-4a5d-8576-1c0b7242b99e',
'display_id': 'certains-l-aiment-fip',
'title': 'Certains l’aiment Fip',
'description': 'md5:ff974672ba00d4fd5be80fb001c5b27e',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
},
'playlist_mincount': 321,
}, {
'url': 'https://www.radiofrance.fr/franceinter/podcasts/le-7-9',
'only_matching': True,
}, {
'url': 'https://www.radiofrance.fr/mouv/podcasts/dirty-mix',
'only_matching': True,
}]
_METADATA_KEY = 'expressions'
def _call_api(self, podcast_id, cursor, page_num):
return self._download_json(
f'https://www.radiofrance.fr/api/v2.1/concepts/{podcast_id}/expressions', podcast_id,
note=f'Downloading page {page_num}', query={'pageCursor': cursor})
class RadioFranceProfileIE(RadioFrancePlaylistBaseIE):
_VALID_URL = rf'{RadioFranceBaseIE._VALID_URL_BASE}/personnes/(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://www.radiofrance.fr/personnes/thomas-pesquet?p=3',
'info_dict': {
'id': '86c62790-e481-11e2-9f7b-782bcb6744eb',
'display_id': 'thomas-pesquet',
'title': 'Thomas Pesquet',
'description': 'Astronaute à l\'agence spatiale européenne',
},
'playlist_mincount': 212,
}, {
'url': 'https://www.radiofrance.fr/personnes/eugenie-bastie',
'info_dict': {
'id': '9593050b-0183-4972-a0b5-d8f699079e02',
'display_id': 'eugenie-bastie',
'title': 'Eugénie Bastié',
'description': 'Journaliste et essayiste',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
},
'playlist_mincount': 39,
}, {
'url': 'https://www.radiofrance.fr/personnes/lea-salame',
'only_matching': True,
}]
_METADATA_KEY = 'documents'
def _call_api(self, profile_id, cursor, page_num):
resp = self._download_json(
f'https://www.radiofrance.fr/api/v2.1/taxonomy/{profile_id}/documents', profile_id,
note=f'Downloading page {page_num}', query={
'relation': 'personality',
'cursor': cursor,
})
resp['next'] = traverse_obj(resp, ('pagination', 'next'))
return resp
class RadioFranceProgramScheduleIE(RadioFranceBaseIE):
_VALID_URL = rf'''(?x)
{RadioFranceBaseIE._VALID_URL_BASE}
/(?P<station>{RadioFranceBaseIE._STATIONS_RE})
/grille-programmes
'''
_TESTS = [{
'url': 'https://www.radiofrance.fr/franceinter/grille-programmes?date=17-02-2023',
'info_dict': {
'id': 'franceinter-program-20230217',
'upload_date': '20230217',
},
'playlist_count': 25,
}, {
'url': 'https://www.radiofrance.fr/franceculture/grille-programmes?date=01-02-2023',
'info_dict': {
'id': 'franceculture-program-20230201',
'upload_date': '20230201',
},
'playlist_count': 25,
}, {
'url': 'https://www.radiofrance.fr/mouv/grille-programmes?date=19-03-2023',
'info_dict': {
'id': 'mouv-program-20230319',
'upload_date': '20230319',
},
'playlist_count': 3,
}, {
'url': 'https://www.radiofrance.fr/francemusique/grille-programmes?date=18-03-2023',
'info_dict': {
'id': 'francemusique-program-20230318',
'upload_date': '20230318',
},
'playlist_count': 15,
}, {
'url': 'https://www.radiofrance.fr/franceculture/grille-programmes',
'only_matching': True,
}]
def _generate_playlist_entries(self, webpage_url, api_response):
for entry in traverse_obj(api_response, ('steps', lambda _, v: v['expression']['path'])):
yield self.url_result(
urljoin(webpage_url, f'/{entry["expression"]["path"]}'), ie=FranceCultureIE,
url_transparent=True, **traverse_obj(entry, {
'title': ('expression', 'title'),
'thumbnail': ('expression', 'visual', 'src'),
'timestamp': ('startTime', {int_or_none}),
'series_id': ('concept', 'id'),
'series': ('concept', 'title'),
}))
def _real_extract(self, url):
station = self._match_valid_url(url).group('station')
webpage = self._download_webpage(url, station)
grid_data = self._extract_data_from_webpage(webpage, station, 'grid')
upload_date = strftime_or_none(grid_data.get('date'), '%Y%m%d')
return self.playlist_result(
self._generate_playlist_entries(url, grid_data),
join_nonempty(station, 'program', upload_date), upload_date=upload_date)
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
yt-dlp/yt-dlp | https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/byutv.py | yt_dlp/extractor/byutv.py | from .common import InfoExtractor
from ..utils import (
determine_ext,
merge_dicts,
parse_duration,
url_or_none,
)
class BYUtvIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:www\.)?byutv\.org/(?:watch|player)/(?!event/)(?P<id>[0-9a-f-]+)(?:/(?P<display_id>[^/?#&]+))?'
_TESTS = [{
'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d/studio-c-season-5-episode-5',
'info_dict': {
'id': 'ZvanRocTpW-G5_yZFeltTAMv6jxOU9KH',
'display_id': 'studio-c-season-5-episode-5',
'ext': 'mp4',
'title': 'Season 5 Episode 5',
'description': 'md5:1d31dc18ef4f075b28f6a65937d22c65',
'thumbnail': r're:^https?://.*',
'duration': 1486.486,
},
'params': {
'skip_download': True,
},
}, {
# dvr
'url': 'https://www.byutv.org/player/8f1dab9b-b243-47c8-b525-3e2d021a3451/byu-softball-pacific-vs-byu-41219---game-2',
'info_dict': {
'id': '8f1dab9b-b243-47c8-b525-3e2d021a3451',
'display_id': 'byu-softball-pacific-vs-byu-41219---game-2',
'ext': 'mp4',
'title': 'Pacific vs. BYU (4/12/19)',
'description': 'md5:1ac7b57cb9a78015910a4834790ce1f3',
'duration': 11645,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d',
'only_matching': True,
}, {
'url': 'https://www.byutv.org/player/27741493-dc83-40b0-8420-e7ae38a2ae98/byu-football-toledo-vs-byu-93016?listid=4fe0fee5-0d3c-4a29-b725-e4948627f472&listindex=0&q=toledo',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
display_id = mobj.group('display_id') or video_id
video = self._download_json(
'https://api.byutv.org/api3/catalog/getvideosforcontent',
display_id, query={
'contentid': video_id,
'channel': 'byutv',
'x-byutv-context': 'web$US',
}, headers={
'x-byutv-context': 'web$US',
'x-byutv-platformkey': 'xsaaw9c7y5',
})
info = {}
formats = []
subtitles = {}
for format_id, ep in video.items():
if not isinstance(ep, dict):
continue
video_url = url_or_none(ep.get('videoUrl'))
if not video_url:
continue
ext = determine_ext(video_url)
if ext == 'm3u8':
m3u8_fmts, m3u8_subs = self._extract_m3u8_formats_and_subtitles(
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
formats.extend(m3u8_fmts)
subtitles = self._merge_subtitles(subtitles, m3u8_subs)
elif ext == 'mpd':
mpd_fmts, mpd_subs = self._extract_mpd_formats_and_subtitles(
video_url, video_id, mpd_id='dash', fatal=False)
formats.extend(mpd_fmts)
subtitles = self._merge_subtitles(subtitles, mpd_subs)
else:
formats.append({
'url': video_url,
'format_id': format_id,
})
merge_dicts(info, {
'title': ep.get('title'),
'description': ep.get('description'),
'thumbnail': ep.get('imageThumbnail'),
'duration': parse_duration(ep.get('length')),
})
return merge_dicts(info, {
'id': video_id,
'display_id': display_id,
'title': display_id,
'formats': formats,
'subtitles': subtitles,
})
| python | Unlicense | 5a481d65fa99862110bb84d10a2f15f0cb47cab3 | 2026-01-04T14:38:15.430780Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.