repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/fczenit.py
youtube_dl/extractor/fczenit.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, float_or_none, ) class FczenitIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?fc-zenit\.ru/video/(?P<id>[0-9]+)' _TEST = { 'url': 'http://fc-zenit.ru/video/41044/', 'md5': '0e3fab421b455e970fa1aa3891e57df0', 'info_dict': { 'id': '41044', 'ext': 'mp4', 'title': 'Так пишется история: казанский разгром ЦСКА на «Зенит-ТВ»', 'timestamp': 1462283735, 'upload_date': '20160503', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) msi_id = self._search_regex( r"(?s)config\s*=\s*{.+?video_id\s*:\s*'([^']+)'", webpage, 'msi id') msi_data = self._download_json( 'http://player.fc-zenit.ru/msi/video', msi_id, query={ 'video': msi_id, })['data'] title = msi_data['name'] formats = [{ 'format_id': q.get('label'), 'url': q['url'], 'height': int_or_none(q.get('label')), } for q in msi_data['qualities'] if q.get('url')] self._sort_formats(formats) tags = [tag['label'] for tag in msi_data.get('tags', []) if tag.get('label')] return { 'id': video_id, 'title': title, 'thumbnail': msi_data.get('preview'), 'formats': formats, 'duration': float_or_none(msi_data.get('duration')), 'timestamp': int_or_none(msi_data.get('date')), 'tags': tags, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/morningstar.py
youtube_dl/extractor/morningstar.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class MorningstarIE(InfoExtractor): IE_DESC = 'morningstar.com' _VALID_URL = r'https?://(?:(?:www|news)\.)morningstar\.com/[cC]over/video[cC]enter\.aspx\?id=(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://www.morningstar.com/cover/videocenter.aspx?id=615869', 'md5': '6c0acface7a787aadc8391e4bbf7b0f5', 'info_dict': { 'id': '615869', 'ext': 'mp4', 'title': 'Get Ahead of the Curve on 2013 Taxes', 'description': "Vanguard's Joel Dickson on managing higher tax rates for high-income earners and fund capital-gain distributions in 2013.", 'thumbnail': r're:^https?://.*m(?:orning)?star\.com/.+thumb\.jpg$' } }, { 'url': 'http://news.morningstar.com/cover/videocenter.aspx?id=825556', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<h1 id="titleLink">(.*?)</h1>', webpage, 'title') video_url = self._html_search_regex( r'<input type="hidden" id="hidVideoUrl" value="([^"]+)"', webpage, 'video URL') thumbnail = self._html_search_regex( r'<input type="hidden" id="hidSnapshot" value="([^"]+)"', webpage, 'thumbnail', fatal=False) description = self._html_search_regex( r'<div id="mstarDeck".*?>(.*?)</div>', webpage, 'description', fatal=False) return { 'id': video_id, 'title': title, 'url': video_url, 'thumbnail': thumbnail, 'description': description, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/skyit.py
youtube_dl/extractor/skyit.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_str, compat_parse_qs, compat_urllib_parse_urlparse, ) from ..utils import ( dict_get, int_or_none, parse_duration, unified_timestamp, ) class SkyItPlayerIE(InfoExtractor): IE_NAME = 'player.sky.it' _VALID_URL = r'https?://player\.sky\.it/player/(?:external|social)\.html\?.*?\bid=(?P<id>\d+)' _GEO_BYPASS = False _DOMAIN = 'sky' _PLAYER_TMPL = 'https://player.sky.it/player/external.html?id=%s&domain=%s' # http://static.sky.it/static/skyplayer/conf.json _TOKEN_MAP = { 'cielo': 'Hh9O7M8ks5yi6nSROL7bKYz933rdf3GhwZlTLMgvy4Q', 'hotclub': 'kW020K2jq2lk2eKRJD2vWEg832ncx2EivZlTLQput2C', 'mtv8': 'A5Nn9GGb326CI7vP5e27d7E4PIaQjota', 'salesforce': 'C6D585FD1615272C98DE38235F38BD86', 'sitocommerciale': 'VJwfFuSGnLKnd9Phe9y96WkXgYDCguPMJ2dLhGMb2RE', 'sky': 'F96WlOd8yoFmLQgiqv6fNQRvHZcsWk5jDaYnDvhbiJk', 'skyacademy': 'A6LAn7EkO2Q26FRy0IAMBekX6jzDXYL3', 'skyarte': 'LWk29hfiU39NNdq87ePeRach3nzTSV20o0lTv2001Cd', 'theupfront': 'PRSGmDMsg6QMGc04Obpoy7Vsbn7i2Whp', } def _player_url_result(self, video_id): return self.url_result( self._PLAYER_TMPL % (video_id, self._DOMAIN), SkyItPlayerIE.ie_key(), video_id) def _parse_video(self, video, video_id): title = video['title'] is_live = video.get('type') == 'live' hls_url = video.get(('streaming' if is_live else 'hls') + '_url') if not hls_url and video.get('geoblock' if is_live else 'geob'): self.raise_geo_restricted(countries=['IT']) if is_live: formats = self._extract_m3u8_formats(hls_url, video_id, 'mp4') else: formats = self._extract_akamai_formats( hls_url, video_id, {'http': 'videoplatform.sky.it'}) self._sort_formats(formats) return { 'id': video_id, 'title': self._live_title(title) if is_live else title, 'formats': formats, 'thumbnail': dict_get(video, ('video_still', 'video_still_medium', 'thumb')), 'description': video.get('short_desc') or None, 'timestamp': unified_timestamp(video.get('create_date')), 'duration': int_or_none(video.get('duration_sec')) or parse_duration(video.get('duration')), 'is_live': is_live, } def _real_extract(self, url): video_id = self._match_id(url) domain = compat_parse_qs(compat_urllib_parse_urlparse( url).query).get('domain', [None])[0] token = dict_get(self._TOKEN_MAP, (domain, 'sky')) video = self._download_json( 'https://apid.sky.it/vdp/v1/getVideoData', video_id, query={ 'caller': 'sky', 'id': video_id, 'token': token }, headers=self.geo_verification_headers()) return self._parse_video(video, video_id) class SkyItVideoIE(SkyItPlayerIE): IE_NAME = 'video.sky.it' _VALID_URL = r'https?://(?:masterchef|video|xfactor)\.sky\.it(?:/[^/]+)*/video/[0-9a-z-]+-(?P<id>\d+)' _TESTS = [{ 'url': 'https://video.sky.it/news/mondo/video/uomo-ucciso-da-uno-squalo-in-australia-631227', 'md5': 'fe5c91e59a84a3437eaa0bca6e134ccd', 'info_dict': { 'id': '631227', 'ext': 'mp4', 'title': 'Uomo ucciso da uno squalo in Australia', 'timestamp': 1606036192, 'upload_date': '20201122', } }, { 'url': 'https://xfactor.sky.it/video/x-factor-2020-replay-audizioni-1-615820', 'only_matching': True, }, { 'url': 'https://masterchef.sky.it/video/masterchef-9-cosa-e-successo-nella-prima-puntata-562831', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) return self._player_url_result(video_id) class SkyItVideoLiveIE(SkyItPlayerIE): IE_NAME = 'video.sky.it:live' _VALID_URL = r'https?://video\.sky\.it/diretta/(?P<id>[^/?&#]+)' _TEST = { 'url': 'https://video.sky.it/diretta/tg24', 'info_dict': { 'id': '1', 'ext': 'mp4', 'title': r're:Diretta TG24 \d{4}-\d{2}-\d{2} \d{2}:\d{2}', 'description': 'Guarda la diretta streaming di SkyTg24, segui con Sky tutti gli appuntamenti e gli speciali di Tg24.', }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) asset_id = compat_str(self._parse_json(self._search_regex( r'<script[^>]+id="__NEXT_DATA__"[^>]*>({.+?})</script>', webpage, 'next data'), display_id)['props']['initialState']['livePage']['content']['asset_id']) livestream = self._download_json( 'https://apid.sky.it/vdp/v1/getLivestream', asset_id, query={'id': asset_id}) return self._parse_video(livestream, asset_id) class SkyItIE(SkyItPlayerIE): IE_NAME = 'sky.it' _VALID_URL = r'https?://(?:sport|tg24)\.sky\.it(?:/[^/]+)*/\d{4}/\d{2}/\d{2}/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://sport.sky.it/calcio/serie-a/2020/11/21/juventus-cagliari-risultato-gol', 'info_dict': { 'id': '631201', 'ext': 'mp4', 'title': 'Un rosso alla violenza: in campo per i diritti delle donne', 'upload_date': '20201121', 'timestamp': 1605995753, }, 'expected_warnings': ['Unable to download f4m manifest'], }, { 'url': 'https://tg24.sky.it/mondo/2020/11/22/australia-squalo-uccide-uomo', 'md5': 'fe5c91e59a84a3437eaa0bca6e134ccd', 'info_dict': { 'id': '631227', 'ext': 'mp4', 'title': 'Uomo ucciso da uno squalo in Australia', 'timestamp': 1606036192, 'upload_date': '20201122', }, }] _VIDEO_ID_REGEX = r'data-videoid="(\d+)"' def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._search_regex( self._VIDEO_ID_REGEX, webpage, 'video id') return self._player_url_result(video_id) class SkyItAcademyIE(SkyItIE): IE_NAME = 'skyacademy.it' _VALID_URL = r'https?://(?:www\.)?skyacademy\.it(?:/[^/]+)*/\d{4}/\d{2}/\d{2}/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://www.skyacademy.it/eventi-speciali/2019/07/05/a-lezione-di-cinema-con-sky-academy-/', 'md5': 'ced5c26638b7863190cbc44dd6f6ba08', 'info_dict': { 'id': '523458', 'ext': 'mp4', 'title': 'Sky Academy "The Best CineCamp 2019"', 'timestamp': 1562843784, 'upload_date': '20190711', } }] _DOMAIN = 'skyacademy' _VIDEO_ID_REGEX = r'id="news-videoId_(\d+)"' class SkyItArteIE(SkyItIE): IE_NAME = 'arte.sky.it' _VALID_URL = r'https?://arte\.sky\.it/video/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://arte.sky.it/video/serie-musei-venezia-collezionismo-12-novembre/', 'md5': '515aee97b87d7a018b6c80727d3e7e17', 'info_dict': { 'id': '627926', 'ext': 'mp4', 'title': "Musei Galleria Franchetti alla Ca' d'Oro Palazzo Grimani", 'upload_date': '20201106', 'timestamp': 1604664493, } }] _DOMAIN = 'skyarte' _VIDEO_ID_REGEX = r'(?s)<iframe[^>]+src="(?:https:)?//player\.sky\.it/player/external\.html\?[^"]*\bid=(\d+)' class CieloTVItIE(SkyItIE): IE_NAME = 'cielotv.it' _VALID_URL = r'https?://(?:www\.)?cielotv\.it/video/(?P<id>[^.]+)\.html' _TESTS = [{ 'url': 'https://www.cielotv.it/video/Il-lunedi-e-sempre-un-dramma.html', 'md5': 'c4deed77552ba901c2a0d9258320304b', 'info_dict': { 'id': '499240', 'ext': 'mp4', 'title': 'Il lunedì è sempre un dramma', 'upload_date': '20190329', 'timestamp': 1553862178, } }] _DOMAIN = 'cielo' _VIDEO_ID_REGEX = r'videoId\s*=\s*"(\d+)"' class TV8ItIE(SkyItVideoIE): IE_NAME = 'tv8.it' _VALID_URL = r'https?://tv8\.it/showvideo/(?P<id>\d+)' _TESTS = [{ 'url': 'https://tv8.it/showvideo/630529/ogni-mattina-ucciso-asino-di-andrea-lo-cicero/18-11-2020/', 'md5': '9ab906a3f75ea342ed928442f9dabd21', 'info_dict': { 'id': '630529', 'ext': 'mp4', 'title': 'Ogni mattina - Ucciso asino di Andrea Lo Cicero', 'timestamp': 1605721374, 'upload_date': '20201118', } }] _DOMAIN = 'mtv8'
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ehow.py
youtube_dl/extractor/ehow.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urllib_parse_unquote class EHowIE(InfoExtractor): IE_NAME = 'eHow' _VALID_URL = r'https?://(?:www\.)?ehow\.com/[^/_?]*_(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.ehow.com/video_12245069_hardwood-flooring-basics.html', 'md5': '9809b4e3f115ae2088440bcb4efbf371', 'info_dict': { 'id': '12245069', 'ext': 'flv', 'title': 'Hardwood Flooring Basics', 'description': 'Hardwood flooring may be time consuming, but its ultimately a pretty straightforward concept. Learn about hardwood flooring basics with help from a hardware flooring business owner in this free video...', 'uploader': 'Erick Nathan', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_url = self._search_regex( r'(?:file|source)=(http[^\'"&]*)', webpage, 'video URL') final_url = compat_urllib_parse_unquote(video_url) uploader = self._html_search_meta('uploader', webpage) title = self._og_search_title(webpage).replace(' | eHow', '') return { 'id': video_id, 'url': final_url, 'title': title, 'thumbnail': self._og_search_thumbnail(webpage), 'description': self._og_search_description(webpage), 'uploader': uploader, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/movieclips.py
youtube_dl/extractor/movieclips.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( smuggle_url, float_or_none, parse_iso8601, update_url_query, ) class MovieClipsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?movieclips\.com/videos/.+-(?P<id>\d+)(?:\?|$)' _TEST = { 'url': 'http://www.movieclips.com/videos/warcraft-trailer-1-561180739597', 'md5': '42b5a0352d4933a7bd54f2104f481244', 'info_dict': { 'id': 'pKIGmG83AqD9', 'ext': 'mp4', 'title': 'Warcraft Trailer 1', 'description': 'Watch Trailer 1 from Warcraft (2016). Legendary’s WARCRAFT is a 3D epic adventure of world-colliding conflict based.', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1446843055, 'upload_date': '20151106', 'uploader': 'Movieclips', }, 'add_ie': ['ThePlatform'], } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video = next(v for v in self._parse_json(self._search_regex( r'var\s+__REACT_ENGINE__\s*=\s*({.+});', webpage, 'react engine'), video_id)['playlist']['videos'] if v['id'] == video_id) return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': smuggle_url(update_url_query( video['contentUrl'], {'mbr': 'true'}), {'force_smil_url': True}), 'title': self._og_search_title(webpage), 'description': self._html_search_meta('description', webpage), 'duration': float_or_none(video.get('duration')), 'timestamp': parse_iso8601(video.get('dateCreated')), 'thumbnail': video.get('defaultImage'), 'uploader': video.get('provider'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/mgtv.py
youtube_dl/extractor/mgtv.py
# coding: utf-8 from __future__ import unicode_literals import base64 import time import uuid from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_str, ) from ..utils import ( ExtractorError, int_or_none, ) class MGTVIE(InfoExtractor): _VALID_URL = r'https?://(?:w(?:ww)?\.)?mgtv\.com/(v|b)/(?:[^/]+/)*(?P<id>\d+)\.html' IE_DESC = '芒果TV' _TESTS = [{ 'url': 'http://www.mgtv.com/v/1/290525/f/3116640.html', 'info_dict': { 'id': '3116640', 'ext': 'mp4', 'title': '我是歌手 第四季', 'description': '我是歌手第四季双年巅峰会', 'duration': 7461, 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'http://www.mgtv.com/b/301817/3826653.html', 'only_matching': True, }, { 'url': 'https://w.mgtv.com/b/301817/3826653.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) tk2 = base64.urlsafe_b64encode(b'did=%s|pno=1030|ver=0.3.0301|clit=%d' % (compat_str(uuid.uuid4()).encode(), time.time()))[::-1] try: api_data = self._download_json( 'https://pcweb.api.mgtv.com/player/video', video_id, query={ 'tk2': tk2, 'video_id': video_id, }, headers=self.geo_verification_headers())['data'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: error = self._parse_json(e.cause.read().decode(), None) if error.get('code') == 40005: self.raise_geo_restricted(countries=self._GEO_COUNTRIES) raise ExtractorError(error['msg'], expected=True) raise info = api_data['info'] title = info['title'].strip() stream_data = self._download_json( 'https://pcweb.api.mgtv.com/player/getSource', video_id, query={ 'pm2': api_data['atc']['pm2'], 'tk2': tk2, 'video_id': video_id, }, headers=self.geo_verification_headers())['data'] stream_domain = stream_data['stream_domain'][0] formats = [] for idx, stream in enumerate(stream_data['stream']): stream_path = stream.get('url') if not stream_path: continue format_data = self._download_json( stream_domain + stream_path, video_id, note='Download video info for format #%d' % idx) format_url = format_data.get('info') if not format_url: continue tbr = int_or_none(stream.get('filebitrate') or self._search_regex( r'_(\d+)_mp4/', format_url, 'tbr', default=None)) formats.append({ 'format_id': compat_str(tbr or idx), 'url': format_url, 'ext': 'mp4', 'tbr': tbr, 'protocol': 'm3u8_native', 'http_headers': { 'Referer': url, }, 'format_note': stream.get('name'), }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'description': info.get('desc'), 'duration': int_or_none(info.get('duration')), 'thumbnail': info.get('thumb'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ctsnews.py
youtube_dl/extractor/ctsnews.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import unified_timestamp from .youtube import YoutubeIE class CtsNewsIE(InfoExtractor): IE_DESC = '華視新聞' _VALID_URL = r'https?://news\.cts\.com\.tw/[a-z]+/[a-z]+/\d+/(?P<id>\d+)\.html' _TESTS = [{ 'url': 'http://news.cts.com.tw/cts/international/201501/201501291578109.html', 'md5': 'a9875cb790252b08431186d741beaabe', 'info_dict': { 'id': '201501291578109', 'ext': 'mp4', 'title': '以色列.真主黨交火 3人死亡 - 華視新聞網', 'description': '以色列和黎巴嫩真主黨,爆發五年最嚴重衝突,雙方砲轟交火,兩名以軍死亡,還有一名西班牙籍的聯合國維和人員也不幸罹難。大陸陝西、河南、安徽、江蘇和湖北五個省份出現大暴雪,嚴重影響陸空交通,不過九華山卻出現...', 'timestamp': 1422528540, 'upload_date': '20150129', } }, { # News count not appear on page but still available in database 'url': 'http://news.cts.com.tw/cts/international/201309/201309031304098.html', 'md5': '3aee7e0df7cdff94e43581f54c22619e', 'info_dict': { 'id': '201309031304098', 'ext': 'mp4', 'title': '韓國31歲童顏男 貌如十多歲小孩 - 華視新聞網', 'description': '越有年紀的人,越希望看起來年輕一點,而南韓卻有一位31歲的男子,看起來像是11、12歲的小孩,身...', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1378205880, 'upload_date': '20130903', } }, { # With Youtube embedded video 'url': 'http://news.cts.com.tw/cts/money/201501/201501291578003.html', 'md5': 'e4726b2ccd70ba2c319865e28f0a91d1', 'info_dict': { 'id': 'OVbfO7d0_hQ', 'ext': 'mp4', 'title': 'iPhone6熱銷 蘋果財報亮眼', 'description': 'md5:f395d4f485487bb0f992ed2c4b07aa7d', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20150128', 'uploader_id': 'TBSCTS', 'uploader': '中華電視公司', }, 'add_ie': ['Youtube'], }] def _real_extract(self, url): news_id = self._match_id(url) page = self._download_webpage(url, news_id) news_id = self._hidden_inputs(page).get('get_id') if news_id: mp4_feed = self._download_json( 'http://news.cts.com.tw/action/test_mp4feed.php', news_id, note='Fetching feed', query={'news_id': news_id}) video_url = mp4_feed['source_url'] else: self.to_screen('Not CTSPlayer video, trying Youtube...') youtube_url = YoutubeIE._extract_url(page) return self.url_result(youtube_url, ie='Youtube') description = self._html_search_meta('description', page) title = self._html_search_meta('title', page, fatal=True) thumbnail = self._html_search_meta('image', page) datetime_str = self._html_search_regex( r'(\d{4}/\d{2}/\d{2} \d{2}:\d{2})', page, 'date and time', fatal=False) timestamp = None if datetime_str: timestamp = unified_timestamp(datetime_str) - 8 * 3600 return { 'id': news_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cnbc.py
youtube_dl/extractor/cnbc.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import smuggle_url class CNBCIE(InfoExtractor): _VALID_URL = r'https?://video\.cnbc\.com/gallery/\?video=(?P<id>[0-9]+)' _TEST = { 'url': 'http://video.cnbc.com/gallery/?video=3000503714', 'info_dict': { 'id': '3000503714', 'ext': 'mp4', 'title': 'Fighting zombies is big business', 'description': 'md5:0c100d8e1a7947bd2feec9a5550e519e', 'timestamp': 1459332000, 'upload_date': '20160330', 'uploader': 'NBCU-CNBC', }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': smuggle_url( 'http://link.theplatform.com/s/gZWlPC/media/guid/2408950221/%s?mbr=true&manifest=m3u' % video_id, {'force_smil_url': True}), 'id': video_id, } class CNBCVideoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?cnbc\.com(?P<path>/video/(?:[^/]+/)+(?P<id>[^./?#&]+)\.html)' _TEST = { 'url': 'https://www.cnbc.com/video/2018/07/19/trump-i-dont-necessarily-agree-with-raising-rates.html', 'info_dict': { 'id': '7000031301', 'ext': 'mp4', 'title': "Trump: I don't necessarily agree with raising rates", 'description': 'md5:878d8f0b4ebb5bb1dda3514b91b49de3', 'timestamp': 1531958400, 'upload_date': '20180719', 'uploader': 'NBCU-CNBC', }, 'params': { 'skip_download': True, }, } def _real_extract(self, url): path, display_id = re.match(self._VALID_URL, url).groups() video_id = self._download_json( 'https://webql-redesign.cnbcfm.com/graphql', display_id, query={ 'query': '''{ page(path: "%s") { vcpsId } }''' % path, })['data']['page']['vcpsId'] return self.url_result( 'http://video.cnbc.com/gallery/?video=%d' % video_id, CNBCIE.ie_key())
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/alphaporno.py
youtube_dl/extractor/alphaporno.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( parse_iso8601, parse_duration, parse_filesize, int_or_none, ) class AlphaPornoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?alphaporno\.com/videos/(?P<id>[^/]+)' _TEST = { 'url': 'http://www.alphaporno.com/videos/sensual-striptease-porn-with-samantha-alexandra/', 'md5': 'feb6d3bba8848cd54467a87ad34bd38e', 'info_dict': { 'id': '258807', 'display_id': 'sensual-striptease-porn-with-samantha-alexandra', 'ext': 'mp4', 'title': 'Sensual striptease porn with Samantha Alexandra', 'thumbnail': r're:https?://.*\.jpg$', 'timestamp': 1418694611, 'upload_date': '20141216', 'duration': 387, 'filesize_approx': 54120000, 'tbr': 1145, 'categories': list, 'age_limit': 18, } } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._search_regex( r"video_id\s*:\s*'([^']+)'", webpage, 'video id', default=None) video_url = self._search_regex( r"video_url\s*:\s*'([^']+)'", webpage, 'video url') ext = self._html_search_meta( 'encodingFormat', webpage, 'ext', default='.mp4')[1:] title = self._search_regex( [r'<meta content="([^"]+)" itemprop="description">', r'class="title" itemprop="name">([^<]+)<'], webpage, 'title') thumbnail = self._html_search_meta('thumbnail', webpage, 'thumbnail') timestamp = parse_iso8601(self._html_search_meta( 'uploadDate', webpage, 'upload date')) duration = parse_duration(self._html_search_meta( 'duration', webpage, 'duration')) filesize_approx = parse_filesize(self._html_search_meta( 'contentSize', webpage, 'file size')) bitrate = int_or_none(self._html_search_meta( 'bitrate', webpage, 'bitrate')) categories = self._html_search_meta( 'keywords', webpage, 'categories', default='').split(',') age_limit = self._rta_search(webpage) return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'ext': ext, 'title': title, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, 'filesize_approx': filesize_approx, 'tbr': bitrate, 'categories': categories, 'age_limit': age_limit, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cloudflarestream.py
youtube_dl/extractor/cloudflarestream.py
# coding: utf-8 from __future__ import unicode_literals import base64 import re from .common import InfoExtractor class CloudflareStreamIE(InfoExtractor): _DOMAIN_RE = r'(?:cloudflarestream\.com|(?:videodelivery|bytehighway)\.net)' _EMBED_RE = r'embed\.%s/embed/[^/]+\.js\?.*?\bvideo=' % _DOMAIN_RE _ID_RE = r'[\da-f]{32}|[\w-]+\.[\w-]+\.[\w-]+' _VALID_URL = r'''(?x) https?:// (?: (?:watch\.)?%s/| %s ) (?P<id>%s) ''' % (_DOMAIN_RE, _EMBED_RE, _ID_RE) _TESTS = [{ 'url': 'https://embed.cloudflarestream.com/embed/we4g.fla9.latest.js?video=31c9291ab41fac05471db4e73aa11717', 'info_dict': { 'id': '31c9291ab41fac05471db4e73aa11717', 'ext': 'mp4', 'title': '31c9291ab41fac05471db4e73aa11717', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://watch.cloudflarestream.com/9df17203414fd1db3e3ed74abbe936c1', 'only_matching': True, }, { 'url': 'https://cloudflarestream.com/31c9291ab41fac05471db4e73aa11717/manifest/video.mpd', 'only_matching': True, }, { 'url': 'https://embed.videodelivery.net/embed/r4xu.fla9.latest.js?video=81d80727f3022488598f68d323c1ad5e', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return [ mobj.group('url') for mobj in re.finditer( r'<script[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//%s(?:%s).*?)\1' % (CloudflareStreamIE._EMBED_RE, CloudflareStreamIE._ID_RE), webpage)] def _real_extract(self, url): video_id = self._match_id(url) domain = 'bytehighway.net' if 'bytehighway.net/' in url else 'videodelivery.net' base_url = 'https://%s/%s/' % (domain, video_id) if '.' in video_id: video_id = self._parse_json(base64.urlsafe_b64decode( video_id.split('.')[1]), video_id)['sub'] manifest_base_url = base_url + 'manifest/video.' formats = self._extract_m3u8_formats( manifest_base_url + 'm3u8', video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) formats.extend(self._extract_mpd_formats( manifest_base_url + 'mpd', video_id, mpd_id='dash', fatal=False)) self._sort_formats(formats) return { 'id': video_id, 'title': video_id, 'thumbnail': base_url + 'thumbnails/thumbnail.jpg', 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/veehd.py
youtube_dl/extractor/veehd.py
from __future__ import unicode_literals import re import json from .common import InfoExtractor from ..compat import ( compat_urllib_parse_unquote, compat_urlparse, ) from ..utils import ( ExtractorError, clean_html, get_element_by_id, ) class VeeHDIE(InfoExtractor): _VALID_URL = r'https?://veehd\.com/video/(?P<id>\d+)' # Seems VeeHD videos have multiple copies on several servers, all of # whom have different MD5 checksums, so omit md5 field in all tests _TESTS = [{ 'url': 'http://veehd.com/video/4639434_Solar-Sinter', 'info_dict': { 'id': '4639434', 'ext': 'mp4', 'title': 'Solar Sinter', 'uploader_id': 'VideoEyes', 'description': 'md5:46a840e8692ddbaffb5f81d9885cb457', }, 'skip': 'Video deleted', }, { 'url': 'http://veehd.com/video/4905758_Elysian-Fields-Channeling', 'info_dict': { 'id': '4905758', 'ext': 'mp4', 'title': 'Elysian Fields - Channeling', 'description': 'md5:360e4e95fdab58aefbea0f2a19e5604b', 'uploader_id': 'spotted', } }, { 'url': 'http://veehd.com/video/2046729_2012-2009-DivX-Trailer', 'info_dict': { 'id': '2046729', 'ext': 'avi', 'title': '2012 (2009) DivX Trailer', 'description': 'md5:75435ee95255e6a9838ac6f6f3a2396b', 'uploader_id': 'Movie_Trailers', } }] def _real_extract(self, url): video_id = self._match_id(url) # VeeHD seems to send garbage on the first request. # See https://github.com/ytdl-org/youtube-dl/issues/2102 self._download_webpage(url, video_id, 'Requesting webpage') webpage = self._download_webpage(url, video_id) if 'This video has been removed<' in webpage: raise ExtractorError('Video %s has been removed' % video_id, expected=True) player_path = self._search_regex( r'\$\("#playeriframe"\).attr\({src : "(.+?)"', webpage, 'player path') player_url = compat_urlparse.urljoin(url, player_path) self._download_webpage(player_url, video_id, 'Requesting player page') player_page = self._download_webpage( player_url, video_id, 'Downloading player page') video_url = None config_json = self._search_regex( r'value=\'config=({.+?})\'', player_page, 'config json', default=None) if config_json: config = json.loads(config_json) video_url = compat_urllib_parse_unquote(config['clip']['url']) if not video_url: video_url = self._html_search_regex( r'<embed[^>]+type="video/divx"[^>]+src="([^"]+)"', player_page, 'video url', default=None) if not video_url: iframe_src = self._search_regex( r'<iframe[^>]+src="/?([^"]+)"', player_page, 'iframe url') iframe_url = 'http://veehd.com/%s' % iframe_src self._download_webpage(iframe_url, video_id, 'Requesting iframe page') iframe_page = self._download_webpage( iframe_url, video_id, 'Downloading iframe page') video_url = self._search_regex( r"file\s*:\s*'([^']+)'", iframe_page, 'video url') title = clean_html(get_element_by_id('videoName', webpage).rpartition('|')[0]) uploader_id = self._html_search_regex( r'<a href="/profile/\d+">(.+?)</a>', webpage, 'uploader') thumbnail = self._search_regex( r'<img id="veehdpreview" src="(.+?)"', webpage, 'thumbnail') description = self._html_search_regex( r'<td class="infodropdown".*?<div>(.*?)<ul', webpage, 'description', flags=re.DOTALL) return { '_type': 'video', 'id': video_id, 'title': title, 'url': video_url, 'uploader_id': uploader_id, 'thumbnail': thumbnail, 'description': description, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ooyala.py
youtube_dl/extractor/ooyala.py
from __future__ import unicode_literals import base64 import re from .common import InfoExtractor from ..compat import ( compat_b64decode, compat_str, ) from ..utils import ( determine_ext, ExtractorError, float_or_none, int_or_none, try_get, unsmuggle_url, ) class OoyalaBaseIE(InfoExtractor): _PLAYER_BASE = 'http://player.ooyala.com/' _CONTENT_TREE_BASE = _PLAYER_BASE + 'player_api/v1/content_tree/' _AUTHORIZATION_URL_TEMPLATE = _PLAYER_BASE + 'sas/player_api/v2/authorization/embed_code/%s/%s' def _extract(self, content_tree_url, video_id, domain=None, supportedformats=None, embed_token=None): content_tree = self._download_json(content_tree_url, video_id)['content_tree'] metadata = content_tree[list(content_tree)[0]] embed_code = metadata['embed_code'] pcode = metadata.get('asset_pcode') or embed_code title = metadata['title'] auth_data = self._download_json( self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code), video_id, headers=self.geo_verification_headers(), query={ 'domain': domain or 'player.ooyala.com', 'supportedFormats': supportedformats or 'mp4,rtmp,m3u8,hds,dash,smooth', 'embedToken': embed_token, })['authorization_data'][embed_code] urls = [] formats = [] streams = auth_data.get('streams') or [{ 'delivery_type': 'hls', 'url': { 'data': base64.b64encode(('http://player.ooyala.com/hls/player/all/%s.m3u8' % embed_code).encode()).decode(), } }] for stream in streams: url_data = try_get(stream, lambda x: x['url']['data'], compat_str) if not url_data: continue s_url = compat_b64decode(url_data).decode('utf-8') if not s_url or s_url in urls: continue urls.append(s_url) ext = determine_ext(s_url, None) delivery_type = stream.get('delivery_type') if delivery_type == 'hls' or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( re.sub(r'/ip(?:ad|hone)/', '/all/', s_url), embed_code, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif delivery_type == 'hds' or ext == 'f4m': formats.extend(self._extract_f4m_formats( s_url + '?hdcore=3.7.0', embed_code, f4m_id='hds', fatal=False)) elif delivery_type == 'dash' or ext == 'mpd': formats.extend(self._extract_mpd_formats( s_url, embed_code, mpd_id='dash', fatal=False)) elif delivery_type == 'smooth': self._extract_ism_formats( s_url, embed_code, ism_id='mss', fatal=False) elif ext == 'smil': formats.extend(self._extract_smil_formats( s_url, embed_code, fatal=False)) else: formats.append({ 'url': s_url, 'ext': ext or delivery_type, 'vcodec': stream.get('video_codec'), 'format_id': delivery_type, 'width': int_or_none(stream.get('width')), 'height': int_or_none(stream.get('height')), 'abr': int_or_none(stream.get('audio_bitrate')), 'vbr': int_or_none(stream.get('video_bitrate')), 'fps': float_or_none(stream.get('framerate')), }) if not formats and not auth_data.get('authorized'): raise ExtractorError('%s said: %s' % ( self.IE_NAME, auth_data['message']), expected=True) self._sort_formats(formats) subtitles = {} for lang, sub in metadata.get('closed_captions_vtt', {}).get('captions', {}).items(): sub_url = sub.get('url') if not sub_url: continue subtitles[lang] = [{ 'url': sub_url, }] return { 'id': embed_code, 'title': title, 'description': metadata.get('description'), 'thumbnail': metadata.get('thumbnail_image') or metadata.get('promo_image'), 'duration': float_or_none(metadata.get('duration'), 1000), 'subtitles': subtitles, 'formats': formats, } class OoyalaIE(OoyalaBaseIE): _VALID_URL = r'(?:ooyala:|https?://.+?\.ooyala\.com/.*?(?:embedCode|ec)=)(?P<id>.+?)(&|$)' _TESTS = [ { # From http://it.slashdot.org/story/13/04/25/178216/recovering-data-from-broken-hard-drives-and-ssds-video 'url': 'http://player.ooyala.com/player.js?embedCode=pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8', 'info_dict': { 'id': 'pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8', 'ext': 'mp4', 'title': 'Explaining Data Recovery from Hard Drives and SSDs', 'description': 'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.', 'duration': 853.386, }, # The video in the original webpage now uses PlayWire 'skip': 'Ooyala said: movie expired', }, { # Only available for ipad 'url': 'http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0', 'info_dict': { 'id': 'x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0', 'ext': 'mp4', 'title': 'Simulation Overview - Levels of Simulation', 'duration': 194.948, }, }, { # Information available only through SAS api # From http://community.plm.automation.siemens.com/t5/News-NX-Manufacturing/Tool-Path-Divide/ba-p/4187 'url': 'http://player.ooyala.com/player.js?embedCode=FiOG81ZTrvckcchQxmalf4aQj590qTEx', 'md5': 'a84001441b35ea492bc03736e59e7935', 'info_dict': { 'id': 'FiOG81ZTrvckcchQxmalf4aQj590qTEx', 'ext': 'mp4', 'title': 'Divide Tool Path.mp4', 'duration': 204.405, } }, { # empty stream['url']['data'] 'url': 'http://player.ooyala.com/player.js?embedCode=w2bnZtYjE6axZ_dw1Cd0hQtXd_ige2Is', 'only_matching': True, } ] @staticmethod def _url_for_embed_code(embed_code): return 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code @classmethod def _build_url_result(cls, embed_code): return cls.url_result(cls._url_for_embed_code(embed_code), ie=cls.ie_key()) def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) embed_code = self._match_id(url) domain = smuggled_data.get('domain') supportedformats = smuggled_data.get('supportedformats') embed_token = smuggled_data.get('embed_token') content_tree_url = self._CONTENT_TREE_BASE + 'embed_code/%s/%s' % (embed_code, embed_code) return self._extract(content_tree_url, embed_code, domain, supportedformats, embed_token) class OoyalaExternalIE(OoyalaBaseIE): _VALID_URL = r'''(?x) (?: ooyalaexternal:| https?://.+?\.ooyala\.com/.*?\bexternalId= ) (?P<partner_id>[^:]+) : (?P<id>.+) (?: :| .*?&pcode= ) (?P<pcode>.+?) (?:&|$) ''' _TEST = { 'url': 'https://player.ooyala.com/player.js?externalId=espn:10365079&pcode=1kNG061cgaoolOncv54OAO1ceO-I&adSetCode=91cDU6NuXTGKz3OdjOxFdAgJVtQcKJnI&callback=handleEvents&hasModuleParams=1&height=968&playerBrandingId=7af3bd04449c444c964f347f11873075&targetReplaceId=videoPlayer&width=1656&wmode=opaque&allowScriptAccess=always', 'info_dict': { 'id': 'FkYWtmazr6Ed8xmvILvKLWjd4QvYZpzG', 'ext': 'mp4', 'title': 'dm_140128_30for30Shorts___JudgingJewellv2', 'duration': 1302.0, }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): partner_id, video_id, pcode = re.match(self._VALID_URL, url).groups() content_tree_url = self._CONTENT_TREE_BASE + 'external_id/%s/%s:%s' % (pcode, partner_id, video_id) return self._extract(content_tree_url, video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/screencastomatic.py
youtube_dl/extractor/screencastomatic.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( get_element_by_class, int_or_none, remove_start, strip_or_none, unified_strdate, ) class ScreencastOMaticIE(InfoExtractor): _VALID_URL = r'https?://screencast-o-matic\.com/(?:(?:watch|player)/|embed\?.*?\bsc=)(?P<id>[0-9a-zA-Z]+)' _TESTS = [{ 'url': 'http://screencast-o-matic.com/watch/c2lD3BeOPl', 'md5': '483583cb80d92588f15ccbedd90f0c18', 'info_dict': { 'id': 'c2lD3BeOPl', 'ext': 'mp4', 'title': 'Welcome to 3-4 Philosophy @ DECV!', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'as the title says! also: some general info re 1) VCE philosophy and 2) distance learning.', 'duration': 369, 'upload_date': '20141216', } }, { 'url': 'http://screencast-o-matic.com/player/c2lD3BeOPl', 'only_matching': True, }, { 'url': 'http://screencast-o-matic.com/embed?ff=true&sc=cbV2r4Q5TL&fromPH=true&a=1', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://screencast-o-matic.com/player/' + video_id, video_id) info = self._parse_html5_media_entries(url, webpage, video_id)[0] info.update({ 'id': video_id, 'title': get_element_by_class('overlayTitle', webpage), 'description': strip_or_none(get_element_by_class('overlayDescription', webpage)) or None, 'duration': int_or_none(self._search_regex( r'player\.duration\s*=\s*function\(\)\s*{\s*return\s+(\d+);\s*};', webpage, 'duration', default=None)), 'upload_date': unified_strdate(remove_start( get_element_by_class('overlayPublished', webpage), 'Published: ')), }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/behindkink.py
youtube_dl/extractor/behindkink.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import url_basename class BehindKinkIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)' _TEST = { 'url': 'http://www.behindkink.com/2014/12/05/what-are-you-passionate-about-marley-blaze/', 'md5': '507b57d8fdcd75a41a9a7bdb7989c762', 'info_dict': { 'id': '37127', 'ext': 'mp4', 'title': 'What are you passionate about – Marley Blaze', 'description': 'md5:aee8e9611b4ff70186f752975d9b94b4', 'upload_date': '20141205', 'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/12/blaze-1.jpg', 'age_limit': 18, } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id = mobj.group('id') webpage = self._download_webpage(url, display_id) video_url = self._search_regex( r'<source src="([^"]+)"', webpage, 'video URL') video_id = url_basename(video_url).split('_')[0] upload_date = mobj.group('year') + mobj.group('month') + mobj.group('day') return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'title': self._og_search_title(webpage), 'thumbnail': self._og_search_thumbnail(webpage), 'description': self._og_search_description(webpage), 'upload_date': upload_date, 'age_limit': 18, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/huajiao.py
youtube_dl/extractor/huajiao.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( parse_duration, parse_iso8601, ) class HuajiaoIE(InfoExtractor): IE_DESC = '花椒直播' _VALID_URL = r'https?://(?:www\.)?huajiao\.com/l/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.huajiao.com/l/38941232', 'md5': 'd08bf9ac98787d24d1e4c0283f2d372d', 'info_dict': { 'id': '38941232', 'ext': 'mp4', 'title': '#新人求关注#', 'description': 're:.*', 'duration': 2424.0, 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1475866459, 'upload_date': '20161007', 'uploader': 'Penny_余姿昀', 'uploader_id': '75206005', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) feed_json = self._search_regex( r'var\s+feed\s*=\s*({.+})', webpage, 'feed json') feed = self._parse_json(feed_json, video_id) description = self._html_search_meta( 'description', webpage, 'description', fatal=False) def get(section, field): return feed.get(section, {}).get(field) return { 'id': video_id, 'title': feed['feed']['formated_title'], 'description': description, 'duration': parse_duration(get('feed', 'duration')), 'thumbnail': get('feed', 'image'), 'timestamp': parse_iso8601(feed.get('creatime'), ' '), 'uploader': get('author', 'nickname'), 'uploader_id': get('author', 'uid'), 'formats': self._extract_m3u8_formats( feed['feed']['m3u8'], video_id, 'mp4', 'm3u8_native'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/baidu.py
youtube_dl/extractor/baidu.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import unescapeHTML class BaiduVideoIE(InfoExtractor): IE_DESC = '百度视频' _VALID_URL = r'https?://v\.baidu\.com/(?P<type>[a-z]+)/(?P<id>\d+)\.htm' _TESTS = [{ 'url': 'http://v.baidu.com/comic/1069.htm?frp=bdbrand&q=%E4%B8%AD%E5%8D%8E%E5%B0%8F%E5%BD%93%E5%AE%B6', 'info_dict': { 'id': '1069', 'title': '中华小当家 TV版国语', 'description': 'md5:51be07afe461cf99fa61231421b5397c', }, 'playlist_count': 52, }, { 'url': 'http://v.baidu.com/show/11595.htm?frp=bdbrand', 'info_dict': { 'id': '11595', 'title': 're:^奔跑吧兄弟', 'description': 'md5:1bf88bad6d850930f542d51547c089b8', }, 'playlist_mincount': 12, }] def _call_api(self, path, category, playlist_id, note): return self._download_json('http://app.video.baidu.com/%s/?worktype=adnative%s&id=%s' % ( path, category, playlist_id), playlist_id, note) def _real_extract(self, url): category, playlist_id = re.match(self._VALID_URL, url).groups() if category == 'show': category = 'tvshow' if category == 'tv': category = 'tvplay' playlist_detail = self._call_api( 'xqinfo', category, playlist_id, 'Download playlist JSON metadata') playlist_title = playlist_detail['title'] playlist_description = unescapeHTML(playlist_detail.get('intro')) episodes_detail = self._call_api( 'xqsingle', category, playlist_id, 'Download episodes JSON metadata') entries = [self.url_result( episode['url'], video_title=episode['title'] ) for episode in episodes_detail['videos']] return self.playlist_result( entries, playlist_id, playlist_title, playlist_description)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/mitele.py
youtube_dl/extractor/mitele.py
# coding: utf-8 from __future__ import unicode_literals from .telecinco import TelecincoIE from ..utils import ( int_or_none, parse_iso8601, ) class MiTeleIE(TelecincoIE): IE_DESC = 'mitele.es' _VALID_URL = r'https?://(?:www\.)?mitele\.es/(?:[^/]+/)+(?P<id>[^/]+)/player' _TESTS = [{ 'url': 'http://www.mitele.es/programas-tv/diario-de/57b0dfb9c715da65618b4afa/player', 'info_dict': { 'id': 'FhYW1iNTE6J6H7NkQRIEzfne6t2quqPg', 'ext': 'mp4', 'title': 'Diario de La redacción Programa 144', 'description': 'md5:07c35a7b11abb05876a6a79185b58d27', 'series': 'Diario de', 'season': 'Season 14', 'season_number': 14, 'episode': 'Tor, la web invisible', 'episode_number': 3, 'thumbnail': r're:(?i)^https?://.*\.jpg$', 'duration': 2913, 'age_limit': 16, 'timestamp': 1471209401, 'upload_date': '20160814', }, }, { # no explicit title 'url': 'http://www.mitele.es/programas-tv/cuarto-milenio/57b0de3dc915da14058b4876/player', 'info_dict': { 'id': 'oyNG1iNTE6TAPP-JmCjbwfwJqqMMX3Vq', 'ext': 'mp4', 'title': 'Cuarto Milenio Temporada 6 Programa 226', 'description': 'md5:5ff132013f0cd968ffbf1f5f3538a65f', 'series': 'Cuarto Milenio', 'season': 'Season 6', 'season_number': 6, 'episode': 'Episode 24', 'episode_number': 24, 'thumbnail': r're:(?i)^https?://.*\.jpg$', 'duration': 7313, 'age_limit': 12, 'timestamp': 1471209021, 'upload_date': '20160814', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.mitele.es/series-online/la-que-se-avecina/57aac5c1c915da951a8b45ed/player', 'only_matching': True, }, { 'url': 'https://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144-40_1006364575251/player/', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) pre_player = self._parse_json(self._search_regex( r'window\.\$REACTBASE_STATE\.prePlayer_mtweb\s*=\s*({.+})', webpage, 'Pre Player'), display_id)['prePlayer'] title = pre_player['title'] video_info = self._parse_content(pre_player['video'], url) content = pre_player.get('content') or {} info = content.get('info') or {} video_info.update({ 'title': title, 'description': info.get('synopsis'), 'series': content.get('title'), 'season_number': int_or_none(info.get('season_number')), 'episode': content.get('subtitle'), 'episode_number': int_or_none(info.get('episode_number')), 'duration': int_or_none(info.get('duration')), 'age_limit': int_or_none(info.get('rating')), 'timestamp': parse_iso8601(pre_player.get('publishedTime')), }) return video_info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rte.py
youtube_dl/extractor/rte.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( float_or_none, parse_iso8601, str_or_none, try_get, unescapeHTML, url_or_none, ExtractorError, ) class RteBaseIE(InfoExtractor): def _real_extract(self, url): item_id = self._match_id(url) info_dict = {} formats = [] ENDPOINTS = ( 'https://feeds.rasset.ie/rteavgen/player/playlist?type=iptv&format=json&showId=', 'http://www.rte.ie/rteavgen/getplaylist/?type=web&format=json&id=', ) for num, ep_url in enumerate(ENDPOINTS, start=1): try: data = self._download_json(ep_url + item_id, item_id) except ExtractorError as ee: if num < len(ENDPOINTS) or formats: continue if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404: error_info = self._parse_json(ee.cause.read().decode(), item_id, fatal=False) if error_info: raise ExtractorError( '%s said: %s' % (self.IE_NAME, error_info['message']), expected=True) raise # NB the string values in the JSON are stored using XML escaping(!) show = try_get(data, lambda x: x['shows'][0], dict) if not show: continue if not info_dict: title = unescapeHTML(show['title']) description = unescapeHTML(show.get('description')) thumbnail = show.get('thumbnail') duration = float_or_none(show.get('duration'), 1000) timestamp = parse_iso8601(show.get('published')) info_dict = { 'id': item_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, } mg = try_get(show, lambda x: x['media:group'][0], dict) if not mg: continue if mg.get('url'): m = re.match(r'(?P<url>rtmpe?://[^/]+)/(?P<app>.+)/(?P<playpath>mp4:.*)', mg['url']) if m: m = m.groupdict() formats.append({ 'url': m['url'] + '/' + m['app'], 'app': m['app'], 'play_path': m['playpath'], 'player_url': url, 'ext': 'flv', 'format_id': 'rtmp', }) if mg.get('hls_server') and mg.get('hls_url'): formats.extend(self._extract_m3u8_formats( mg['hls_server'] + mg['hls_url'], item_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) if mg.get('hds_server') and mg.get('hds_url'): formats.extend(self._extract_f4m_formats( mg['hds_server'] + mg['hds_url'], item_id, f4m_id='hds', fatal=False)) mg_rte_server = str_or_none(mg.get('rte:server')) mg_url = str_or_none(mg.get('url')) if mg_rte_server and mg_url: hds_url = url_or_none(mg_rte_server + mg_url) if hds_url: formats.extend(self._extract_f4m_formats( hds_url, item_id, f4m_id='hds', fatal=False)) self._sort_formats(formats) info_dict['formats'] = formats return info_dict class RteIE(RteBaseIE): IE_NAME = 'rte' IE_DESC = 'Raidió Teilifís Éireann TV' _VALID_URL = r'https?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/[^/]+/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.rte.ie/player/ie/show/iwitness-862/10478715/', 'md5': '4a76eb3396d98f697e6e8110563d2604', 'info_dict': { 'id': '10478715', 'ext': 'mp4', 'title': 'iWitness', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'The spirit of Ireland, one voice and one minute at a time.', 'duration': 60.046, 'upload_date': '20151012', 'timestamp': 1444694160, }, } class RteRadioIE(RteBaseIE): IE_NAME = 'rte:radio' IE_DESC = 'Raidió Teilifís Éireann radio' # Radioplayer URLs have two distinct specifier formats, # the old format #!rii=<channel_id>:<id>:<playable_item_id>:<date>: # the new format #!rii=b<channel_id>_<id>_<playable_item_id>_<date>_ # where the IDs are int/empty, the date is DD-MM-YYYY, and the specifier may be truncated. # An <id> uniquely defines an individual recording, and is the only part we require. _VALID_URL = r'https?://(?:www\.)?rte\.ie/radio/utils/radioplayer/rteradioweb\.html#!rii=(?:b?[0-9]*)(?:%3A|:|%5F|_)(?P<id>[0-9]+)' _TESTS = [{ # Old-style player URL; HLS and RTMPE formats 'url': 'http://www.rte.ie/radio/utils/radioplayer/rteradioweb.html#!rii=16:10507902:2414:27-12-2015:', 'md5': 'c79ccb2c195998440065456b69760411', 'info_dict': { 'id': '10507902', 'ext': 'mp4', 'title': 'Gloria', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:9ce124a7fb41559ec68f06387cabddf0', 'timestamp': 1451203200, 'upload_date': '20151227', 'duration': 7230.0, }, }, { # New-style player URL; RTMPE formats only 'url': 'http://rte.ie/radio/utils/radioplayer/rteradioweb.html#!rii=b16_3250678_8861_06-04-2012_', 'info_dict': { 'id': '3250678', 'ext': 'flv', 'title': 'The Lyric Concert with Paul Herriott', 'thumbnail': r're:^https?://.*\.jpg$', 'description': '', 'timestamp': 1333742400, 'upload_date': '20120406', 'duration': 7199.016, }, 'params': { # rtmp download 'skip_download': True, }, }]
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/lbry.py
youtube_dl/extractor/lbry.py
# coding: utf-8 from __future__ import unicode_literals import functools import json from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_str, compat_urllib_parse_unquote, compat_urllib_parse_urlparse, ) from ..utils import ( determine_ext, ExtractorError, int_or_none, mimetype2ext, OnDemandPagedList, try_get, urljoin, ) class LBRYBaseIE(InfoExtractor): _BASE_URL_REGEX = r'https?://(?:www\.)?(?:lbry\.tv|odysee\.com)/' _CLAIM_ID_REGEX = r'[0-9a-f]{1,40}' _OPT_CLAIM_ID = '[^:/?#&]+(?::%s)?' % _CLAIM_ID_REGEX _SUPPORTED_STREAM_TYPES = ['video', 'audio'] def _call_api_proxy(self, method, display_id, params, resource): return self._download_json( 'https://api.lbry.tv/api/v1/proxy', display_id, 'Downloading %s JSON metadata' % resource, headers={'Content-Type': 'application/json-rpc'}, data=json.dumps({ 'method': method, 'params': params, }).encode())['result'] def _resolve_url(self, url, display_id, resource): return self._call_api_proxy( 'resolve', display_id, {'urls': url}, resource)[url] def _permanent_url(self, url, claim_name, claim_id): return urljoin(url, '/%s:%s' % (claim_name, claim_id)) def _parse_stream(self, stream, url): stream_value = stream.get('value') or {} stream_type = stream_value.get('stream_type') source = stream_value.get('source') or {} media = stream_value.get(stream_type) or {} signing_channel = stream.get('signing_channel') or {} channel_name = signing_channel.get('name') channel_claim_id = signing_channel.get('claim_id') channel_url = None if channel_name and channel_claim_id: channel_url = self._permanent_url(url, channel_name, channel_claim_id) info = { 'thumbnail': try_get(stream_value, lambda x: x['thumbnail']['url'], compat_str), 'description': stream_value.get('description'), 'license': stream_value.get('license'), 'timestamp': int_or_none(stream.get('timestamp')), 'release_timestamp': int_or_none(stream_value.get('release_time')), 'tags': stream_value.get('tags'), 'duration': int_or_none(media.get('duration')), 'channel': try_get(signing_channel, lambda x: x['value']['title']), 'channel_id': channel_claim_id, 'channel_url': channel_url, 'ext': determine_ext(source.get('name')) or mimetype2ext(source.get('media_type')), 'filesize': int_or_none(source.get('size')), } if stream_type == 'audio': info['vcodec'] = 'none' else: info.update({ 'width': int_or_none(media.get('width')), 'height': int_or_none(media.get('height')), }) return info class LBRYIE(LBRYBaseIE): IE_NAME = 'lbry' _VALID_URL = LBRYBaseIE._BASE_URL_REGEX + r'(?P<id>\$/[^/]+/[^/]+/{1}|@{0}/{0}|(?!@){0})'.format(LBRYBaseIE._OPT_CLAIM_ID, LBRYBaseIE._CLAIM_ID_REGEX) _TESTS = [{ # Video 'url': 'https://lbry.tv/@Mantega:1/First-day-LBRY:1', 'md5': '65bd7ec1f6744ada55da8e4c48a2edf9', 'info_dict': { 'id': '17f983b61f53091fb8ea58a9c56804e4ff8cff4d', 'ext': 'mp4', 'title': 'First day in LBRY? Start HERE!', 'description': 'md5:f6cb5c704b332d37f5119313c2c98f51', 'timestamp': 1595694354, 'upload_date': '20200725', 'release_timestamp': 1595340697, 'release_date': '20200721', 'width': 1280, 'height': 720, } }, { # Audio 'url': 'https://lbry.tv/@LBRYFoundation:0/Episode-1:e', 'md5': 'c94017d3eba9b49ce085a8fad6b98d00', 'info_dict': { 'id': 'e7d93d772bd87e2b62d5ab993c1c3ced86ebb396', 'ext': 'mp3', 'title': 'The LBRY Foundation Community Podcast Episode 1 - Introduction, Streaming on LBRY, Transcoding', 'description': 'md5:661ac4f1db09f31728931d7b88807a61', 'timestamp': 1591312601, 'upload_date': '20200604', 'release_timestamp': 1591312421, 'release_date': '20200604', 'tags': list, 'duration': 2570, 'channel': 'The LBRY Foundation', 'channel_id': '0ed629d2b9c601300cacf7eabe9da0be79010212', 'channel_url': 'https://lbry.tv/@LBRYFoundation:0ed629d2b9c601300cacf7eabe9da0be79010212', 'vcodec': 'none', } }, { # HLS 'url': 'https://odysee.com/@gardeningincanada:b/plants-i-will-never-grow-again.-the:e', 'md5': 'fc82f45ea54915b1495dd7cb5cc1289f', 'info_dict': { 'id': 'e51671357333fe22ae88aad320bde2f6f96b1410', 'ext': 'mp4', 'title': 'PLANTS I WILL NEVER GROW AGAIN. THE BLACK LIST PLANTS FOR A CANADIAN GARDEN | Gardening in Canada 🍁', 'description': 'md5:9c539c6a03fb843956de61a4d5288d5e', 'timestamp': 1618254123, 'upload_date': '20210412', 'release_timestamp': 1618254002, 'release_date': '20210412', 'tags': list, 'duration': 554, 'channel': 'Gardening In Canada', 'channel_id': 'b8be0e93b423dad221abe29545fbe8ec36e806bc', 'channel_url': 'https://odysee.com/@gardeningincanada:b8be0e93b423dad221abe29545fbe8ec36e806bc', 'formats': 'mincount:3', } }, { 'url': 'https://odysee.com/@BrodieRobertson:5/apple-is-tracking-everything-you-do-on:e', 'only_matching': True, }, { 'url': "https://odysee.com/@ScammerRevolts:b0/I-SYSKEY'D-THE-SAME-SCAMMERS-3-TIMES!:b", 'only_matching': True, }, { 'url': 'https://lbry.tv/Episode-1:e7d93d772bd87e2b62d5ab993c1c3ced86ebb396', 'only_matching': True, }, { 'url': 'https://lbry.tv/$/embed/Episode-1/e7d93d772bd87e2b62d5ab993c1c3ced86ebb396', 'only_matching': True, }, { 'url': 'https://lbry.tv/Episode-1:e7', 'only_matching': True, }, { 'url': 'https://lbry.tv/@LBRYFoundation/Episode-1', 'only_matching': True, }, { 'url': 'https://lbry.tv/$/download/Episode-1/e7d93d772bd87e2b62d5ab993c1c3ced86ebb396', 'only_matching': True, }, { 'url': 'https://lbry.tv/@lacajadepandora:a/TRUMP-EST%C3%81-BIEN-PUESTO-con-Pilar-Baselga,-Carlos-Senra,-Luis-Palacios-(720p_30fps_H264-192kbit_AAC):1', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) if display_id.startswith('$/'): display_id = display_id.split('/', 2)[-1].replace('/', ':') else: display_id = display_id.replace(':', '#') display_id = compat_urllib_parse_unquote(display_id) uri = 'lbry://' + display_id result = self._resolve_url(uri, display_id, 'stream') result_value = result['value'] if result_value.get('stream_type') not in self._SUPPORTED_STREAM_TYPES: raise ExtractorError('Unsupported URL', expected=True) claim_id = result['claim_id'] title = result_value['title'] streaming_url = self._call_api_proxy( 'get', claim_id, {'uri': uri}, 'streaming url')['streaming_url'] info = self._parse_stream(result, url) urlh = self._request_webpage( streaming_url, display_id, note='Downloading streaming redirect url info') if determine_ext(urlh.geturl()) == 'm3u8': info['formats'] = self._extract_m3u8_formats( urlh.geturl(), display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') self._sort_formats(info['formats']) else: info['url'] = streaming_url info.update({ 'id': claim_id, 'title': title, }) return info class LBRYChannelIE(LBRYBaseIE): IE_NAME = 'lbry:channel' _VALID_URL = LBRYBaseIE._BASE_URL_REGEX + r'(?P<id>@%s)/?(?:[?#&]|$)' % LBRYBaseIE._OPT_CLAIM_ID _TESTS = [{ 'url': 'https://lbry.tv/@LBRYFoundation:0', 'info_dict': { 'id': '0ed629d2b9c601300cacf7eabe9da0be79010212', 'title': 'The LBRY Foundation', 'description': 'Channel for the LBRY Foundation. Follow for updates and news.', }, 'playlist_count': 29, }, { 'url': 'https://lbry.tv/@LBRYFoundation', 'only_matching': True, }] _PAGE_SIZE = 50 def _fetch_page(self, claim_id, url, params, page): page += 1 page_params = { 'channel_ids': [claim_id], 'claim_type': 'stream', 'no_totals': True, 'page': page, 'page_size': self._PAGE_SIZE, } page_params.update(params) result = self._call_api_proxy( 'claim_search', claim_id, page_params, 'page %d' % page) for item in (result.get('items') or []): stream_claim_name = item.get('name') stream_claim_id = item.get('claim_id') if not (stream_claim_name and stream_claim_id): continue info = self._parse_stream(item, url) info.update({ '_type': 'url', 'id': stream_claim_id, 'title': try_get(item, lambda x: x['value']['title']), 'url': self._permanent_url(url, stream_claim_name, stream_claim_id), }) yield info def _real_extract(self, url): display_id = self._match_id(url).replace(':', '#') result = self._resolve_url( 'lbry://' + display_id, display_id, 'channel') claim_id = result['claim_id'] qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query) content = qs.get('content', [None])[0] params = { 'fee_amount': qs.get('fee_amount', ['>=0'])[0], 'order_by': { 'new': ['release_time'], 'top': ['effective_amount'], 'trending': ['trending_group', 'trending_mixed'], }[qs.get('order', ['new'])[0]], 'stream_types': [content] if content in ['audio', 'video'] else self._SUPPORTED_STREAM_TYPES, } duration = qs.get('duration', [None])[0] if duration: params['duration'] = { 'long': '>=1200', 'short': '<=240', }[duration] language = qs.get('language', ['all'])[0] if language != 'all': languages = [language] if language == 'en': languages.append('none') params['any_languages'] = languages entries = OnDemandPagedList( functools.partial(self._fetch_page, claim_id, url, params), self._PAGE_SIZE) result_value = result.get('value') or {} return self.playlist_result( entries, claim_id, result_value.get('title'), result_value.get('description'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/unistra.py
youtube_dl/extractor/unistra.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import qualities class UnistraIE(InfoExtractor): _VALID_URL = r'https?://utv\.unistra\.fr/(?:index|video)\.php\?id_video\=(?P<id>\d+)' _TESTS = [ { 'url': 'http://utv.unistra.fr/video.php?id_video=154', 'md5': '736f605cfdc96724d55bb543ab3ced24', 'info_dict': { 'id': '154', 'ext': 'mp4', 'title': 'M!ss Yella', 'description': 'md5:104892c71bd48e55d70b902736b81bbf', }, }, { 'url': 'http://utv.unistra.fr/index.php?id_video=437', 'md5': '1ddddd6cccaae76f622ce29b8779636d', 'info_dict': { 'id': '437', 'ext': 'mp4', 'title': 'Prix Louise Weiss 2014', 'description': 'md5:cc3a8735f079f4fb6b0b570fc10c135a', }, } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) files = set(re.findall(r'file\s*:\s*"(/[^"]+)"', webpage)) quality = qualities(['SD', 'HD']) formats = [] for file_path in files: format_id = 'HD' if file_path.endswith('-HD.mp4') else 'SD' formats.append({ 'url': 'http://vod-flash.u-strasbg.fr:8080%s' % file_path, 'format_id': format_id, 'quality': quality(format_id) }) self._sort_formats(formats) title = self._html_search_regex( r'<title>UTV - (.*?)</', webpage, 'title') description = self._html_search_regex( r'<meta name="Description" content="(.*?)"', webpage, 'description', flags=re.DOTALL) thumbnail = self._search_regex( r'image: "(.*?)"', webpage, 'thumbnail') return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'formats': formats }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/sapo.py
youtube_dl/extractor/sapo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( parse_duration, unified_strdate, ) class SapoIE(InfoExtractor): IE_DESC = 'SAPO Vídeos' _VALID_URL = r'https?://(?:(?:v2|www)\.)?videos\.sapo\.(?:pt|cv|ao|mz|tl)/(?P<id>[\da-zA-Z]{20})' _TESTS = [ { 'url': 'http://videos.sapo.pt/UBz95kOtiWYUMTA5Ghfi', 'md5': '79ee523f6ecb9233ac25075dee0eda83', 'note': 'SD video', 'info_dict': { 'id': 'UBz95kOtiWYUMTA5Ghfi', 'ext': 'mp4', 'title': 'Benfica - Marcas na Hitória', 'description': 'md5:c9082000a128c3fd57bf0299e1367f22', 'duration': 264, 'uploader': 'tiago_1988', 'upload_date': '20080229', 'categories': ['benfica', 'cabral', 'desporto', 'futebol', 'geovanni', 'hooijdonk', 'joao', 'karel', 'lisboa', 'miccoli'], }, }, { 'url': 'http://videos.sapo.pt/IyusNAZ791ZdoCY5H5IF', 'md5': '90a2f283cfb49193fe06e861613a72aa', 'note': 'HD video', 'info_dict': { 'id': 'IyusNAZ791ZdoCY5H5IF', 'ext': 'mp4', 'title': 'Codebits VII - Report', 'description': 'md5:6448d6fd81ce86feac05321f354dbdc8', 'duration': 144, 'uploader': 'codebits', 'upload_date': '20140427', 'categories': ['codebits', 'codebits2014'], }, }, { 'url': 'http://v2.videos.sapo.pt/yLqjzPtbTimsn2wWBKHz', 'md5': 'e5aa7cc0bdc6db9b33df1a48e49a15ac', 'note': 'v2 video', 'info_dict': { 'id': 'yLqjzPtbTimsn2wWBKHz', 'ext': 'mp4', 'title': 'Hipnose Condicionativa 4', 'description': 'md5:ef0481abf8fb4ae6f525088a6dadbc40', 'duration': 692, 'uploader': 'sapozen', 'upload_date': '20090609', 'categories': ['condicionativa', 'heloisa', 'hipnose', 'miranda', 'sapo', 'zen'], }, }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') item = self._download_xml( 'http://rd3.videos.sapo.pt/%s/rss2' % video_id, video_id).find('./channel/item') title = item.find('./title').text description = item.find('./{http://videos.sapo.pt/mrss/}synopse').text thumbnail = item.find('./{http://search.yahoo.com/mrss/}content').get('url') duration = parse_duration(item.find('./{http://videos.sapo.pt/mrss/}time').text) uploader = item.find('./{http://videos.sapo.pt/mrss/}author').text upload_date = unified_strdate(item.find('./pubDate').text) view_count = int(item.find('./{http://videos.sapo.pt/mrss/}views').text) comment_count = int(item.find('./{http://videos.sapo.pt/mrss/}comment_count').text) tags = item.find('./{http://videos.sapo.pt/mrss/}tags').text categories = tags.split() if tags else [] age_limit = 18 if item.find('./{http://videos.sapo.pt/mrss/}m18').text == 'true' else 0 video_url = item.find('./{http://videos.sapo.pt/mrss/}videoFile').text video_size = item.find('./{http://videos.sapo.pt/mrss/}videoSize').text.split('x') formats = [{ 'url': video_url, 'ext': 'mp4', 'format_id': 'sd', 'width': int(video_size[0]), 'height': int(video_size[1]), }] if item.find('./{http://videos.sapo.pt/mrss/}HD').text == 'true': formats.append({ 'url': re.sub(r'/mov/1$', '/mov/39', video_url), 'ext': 'mp4', 'format_id': 'hd', 'width': 1280, 'height': 720, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'uploader': uploader, 'upload_date': upload_date, 'view_count': view_count, 'comment_count': comment_count, 'categories': categories, 'age_limit': age_limit, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/megaphone.py
youtube_dl/extractor/megaphone.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import js_to_json class MegaphoneIE(InfoExtractor): IE_NAME = 'megaphone.fm' IE_DESC = 'megaphone.fm embedded players' _VALID_URL = r'https://player\.megaphone\.fm/(?P<id>[A-Z0-9]+)' _TEST = { 'url': 'https://player.megaphone.fm/GLT9749789991?"', 'md5': '4816a0de523eb3e972dc0dda2c191f96', 'info_dict': { 'id': 'GLT9749789991', 'ext': 'mp3', 'title': '#97 What Kind Of Idiot Gets Phished?', 'thumbnail': r're:^https://.*\.png.*$', 'duration': 1776.26375, 'author': 'Reply All', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_property('audio:title', webpage) author = self._og_search_property('audio:artist', webpage) thumbnail = self._og_search_thumbnail(webpage) episode_json = self._search_regex(r'(?s)var\s+episode\s*=\s*(\{.+?\});', webpage, 'episode JSON') episode_data = self._parse_json(episode_json, video_id, js_to_json) video_url = self._proto_relative_url(episode_data['mediaUrl'], 'https:') formats = [{ 'url': video_url, }] return { 'id': video_id, 'thumbnail': thumbnail, 'title': title, 'author': author, 'duration': episode_data['duration'], 'formats': formats, } @classmethod def _extract_urls(cls, webpage): return [m[0] for m in re.findall( r'<iframe[^>]*?\ssrc=["\'](%s)' % cls._VALID_URL, webpage)]
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/markiza.py
youtube_dl/extractor/markiza.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( orderedSet, parse_duration, try_get, ) class MarkizaIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?videoarchiv\.markiza\.sk/(?:video/(?:[^/]+/)*|embed/)(?P<id>\d+)(?:[_/]|$)' _TESTS = [{ 'url': 'http://videoarchiv.markiza.sk/video/oteckovia/84723_oteckovia-109', 'md5': 'ada4e9fad038abeed971843aa028c7b0', 'info_dict': { 'id': '139078', 'ext': 'mp4', 'title': 'Oteckovia 109', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2760, }, }, { 'url': 'http://videoarchiv.markiza.sk/video/televizne-noviny/televizne-noviny/85430_televizne-noviny', 'info_dict': { 'id': '85430', 'title': 'Televízne noviny', }, 'playlist_count': 23, }, { 'url': 'http://videoarchiv.markiza.sk/video/oteckovia/84723', 'only_matching': True, }, { 'url': 'http://videoarchiv.markiza.sk/video/84723', 'only_matching': True, }, { 'url': 'http://videoarchiv.markiza.sk/video/filmy/85190_kamenak', 'only_matching': True, }, { 'url': 'http://videoarchiv.markiza.sk/video/reflex/zo-zakulisia/84651_pribeh-alzbetky', 'only_matching': True, }, { 'url': 'http://videoarchiv.markiza.sk/embed/85295', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) data = self._download_json( 'http://videoarchiv.markiza.sk/json/video_jwplayer7.json', video_id, query={'id': video_id}) info = self._parse_jwplayer_data(data, m3u8_id='hls', mpd_id='dash') if info.get('_type') == 'playlist': info.update({ 'id': video_id, 'title': try_get( data, lambda x: x['details']['name'], compat_str), }) else: info['duration'] = parse_duration( try_get(data, lambda x: x['details']['duration'], compat_str)) return info class MarkizaPageIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:(?:[^/]+\.)?markiza|tvnoviny)\.sk/(?:[^/]+/)*(?P<id>\d+)_' _TESTS = [{ 'url': 'http://www.markiza.sk/soubiz/zahranicny/1923705_oteckovia-maju-svoj-den-ti-slavni-nie-su-o-nic-menej-rozkosni', 'md5': 'ada4e9fad038abeed971843aa028c7b0', 'info_dict': { 'id': '139355', 'ext': 'mp4', 'title': 'Oteckovia 110', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2604, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://dajto.markiza.sk/filmy-a-serialy/1774695_frajeri-vo-vegas', 'only_matching': True, }, { 'url': 'http://superstar.markiza.sk/aktualne/1923870_to-je-ale-telo-spevacka-ukazala-sexy-postavicku-v-bikinach', 'only_matching': True, }, { 'url': 'http://hybsa.markiza.sk/aktualne/1923790_uzasna-atmosfera-na-hybsa-v-poprade-superstaristi-si-prve-koncerty-pred-davom-ludi-poriadne-uzili', 'only_matching': True, }, { 'url': 'http://doma.markiza.sk/filmy/1885250_moja-vysnivana-svadba', 'only_matching': True, }, { 'url': 'http://www.tvnoviny.sk/domace/1923887_po-smrti-manzela-ju-cakalo-poriadne-prekvapenie', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if MarkizaIE.suitable(url) else super(MarkizaPageIE, cls).suitable(url) def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage( # Downloading for some hosts (e.g. dajto, doma) fails with 500 # although everything seems to be OK, so considering 500 # status code to be expected. url, playlist_id, expected_status=500) entries = [ self.url_result('http://videoarchiv.markiza.sk/video/%s' % video_id) for video_id in orderedSet(re.findall( r'(?:initPlayer_|data-entity=["\']|id=["\']player_)(\d+)', webpage))] return self.playlist_result(entries, playlist_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/pornoxo.py
youtube_dl/extractor/pornoxo.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( str_to_int, ) class PornoXOIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?pornoxo\.com/videos/(?P<id>\d+)/(?P<display_id>[^/]+)\.html' _TEST = { 'url': 'http://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary.html', 'md5': '582f28ecbaa9e6e24cb90f50f524ce87', 'info_dict': { 'id': '7564', 'ext': 'flv', 'title': 'Striptease From Sexy Secretary!', 'display_id': 'striptease-from-sexy-secretary', 'description': 'md5:0ee35252b685b3883f4a1d38332f9980', 'categories': list, # NSFW 'thumbnail': r're:https?://.*\.jpg$', 'age_limit': 18, } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id, display_id = mobj.groups() webpage = self._download_webpage(url, video_id) video_data = self._extract_jwplayer_data(webpage, video_id, require_title=False) title = self._html_search_regex( r'<title>([^<]+)\s*-\s*PornoXO', webpage, 'title') view_count = str_to_int(self._html_search_regex( r'[vV]iews:\s*([0-9,]+)', webpage, 'view count', fatal=False)) categories_str = self._html_search_regex( r'<meta name="description" content=".*featuring\s*([^"]+)"', webpage, 'categories', fatal=False) categories = ( None if categories_str is None else categories_str.split(',')) video_data.update({ 'id': video_id, 'title': title, 'display_id': display_id, 'description': self._html_search_meta('description', webpage), 'categories': categories, 'view_count': view_count, 'age_limit': 18, }) return video_data
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/gfycat.py
youtube_dl/extractor/gfycat.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, float_or_none, qualities, ExtractorError, ) class GfycatIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|giant|thumbs)\.)?gfycat\.com/(?:ru/|ifr/|gifs/detail/)?(?P<id>[^-/?#\.]+)' _TESTS = [{ 'url': 'http://gfycat.com/DeadlyDecisiveGermanpinscher', 'info_dict': { 'id': 'DeadlyDecisiveGermanpinscher', 'ext': 'mp4', 'title': 'Ghost in the Shell', 'timestamp': 1410656006, 'upload_date': '20140914', 'uploader': 'anonymous', 'duration': 10.4, 'view_count': int, 'like_count': int, 'dislike_count': int, 'categories': list, 'age_limit': 0, } }, { 'url': 'http://gfycat.com/ifr/JauntyTimelyAmazontreeboa', 'info_dict': { 'id': 'JauntyTimelyAmazontreeboa', 'ext': 'mp4', 'title': 'JauntyTimelyAmazontreeboa', 'timestamp': 1411720126, 'upload_date': '20140926', 'uploader': 'anonymous', 'duration': 3.52, 'view_count': int, 'like_count': int, 'dislike_count': int, 'categories': list, 'age_limit': 0, } }, { 'url': 'https://gfycat.com/ru/RemarkableDrearyAmurstarfish', 'only_matching': True }, { 'url': 'https://gfycat.com/gifs/detail/UnconsciousLankyIvorygull', 'only_matching': True }, { 'url': 'https://gfycat.com/acceptablehappygoluckyharborporpoise-baseball', 'only_matching': True }, { 'url': 'https://thumbs.gfycat.com/acceptablehappygoluckyharborporpoise-size_restricted.gif', 'only_matching': True }, { 'url': 'https://giant.gfycat.com/acceptablehappygoluckyharborporpoise.mp4', 'only_matching': True }] def _real_extract(self, url): video_id = self._match_id(url) gfy = self._download_json( 'https://api.gfycat.com/v1/gfycats/%s' % video_id, video_id, 'Downloading video info') if 'error' in gfy: raise ExtractorError('Gfycat said: ' + gfy['error'], expected=True) gfy = gfy['gfyItem'] title = gfy.get('title') or gfy['gfyName'] description = gfy.get('description') timestamp = int_or_none(gfy.get('createDate')) uploader = gfy.get('userName') view_count = int_or_none(gfy.get('views')) like_count = int_or_none(gfy.get('likes')) dislike_count = int_or_none(gfy.get('dislikes')) age_limit = 18 if gfy.get('nsfw') == '1' else 0 width = int_or_none(gfy.get('width')) height = int_or_none(gfy.get('height')) fps = int_or_none(gfy.get('frameRate')) num_frames = int_or_none(gfy.get('numFrames')) duration = float_or_none(num_frames, fps) if num_frames and fps else None categories = gfy.get('tags') or gfy.get('extraLemmas') or [] FORMATS = ('gif', 'webm', 'mp4') quality = qualities(FORMATS) formats = [] for format_id in FORMATS: video_url = gfy.get('%sUrl' % format_id) if not video_url: continue filesize = int_or_none(gfy.get('%sSize' % format_id)) formats.append({ 'url': video_url, 'format_id': format_id, 'width': width, 'height': height, 'fps': fps, 'filesize': filesize, 'quality': quality(format_id), }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'timestamp': timestamp, 'uploader': uploader, 'duration': duration, 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, 'categories': categories, 'age_limit': age_limit, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tinypic.py
youtube_dl/extractor/tinypic.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ExtractorError class TinyPicIE(InfoExtractor): IE_NAME = 'tinypic' IE_DESC = 'tinypic.com videos' _VALID_URL = r'https?://(?:.+?\.)?tinypic\.com/player\.php\?v=(?P<id>[^&]+)&s=\d+' _TESTS = [ { 'url': 'http://tinypic.com/player.php?v=6xw7tc%3E&s=5#.UtqZmbRFCM8', 'md5': '609b74432465364e72727ebc6203f044', 'info_dict': { 'id': '6xw7tc', 'ext': 'flv', 'title': 'shadow phenomenon weird', }, }, { 'url': 'http://de.tinypic.com/player.php?v=dy90yh&s=8', 'only_matching': True, } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id, 'Downloading page') mobj = re.search(r'(?m)fo\.addVariable\("file",\s"(?P<fileid>[\da-z]+)"\);\n' r'\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage) if mobj is None: raise ExtractorError('Video %s does not exist' % video_id, expected=True) file_id = mobj.group('fileid') server_id = mobj.group('serverid') KEYWORDS_SUFFIX = ', Video, images, photos, videos, myspace, ebay, video hosting, photo hosting' keywords = self._html_search_meta('keywords', webpage, 'title') title = keywords[:-len(KEYWORDS_SUFFIX)] if keywords.endswith(KEYWORDS_SUFFIX) else '' video_url = 'http://v%s.tinypic.com/%s.flv' % (server_id, file_id) thumbnail = 'http://v%s.tinypic.com/%s_th.jpg' % (server_id, file_id) return { 'id': file_id, 'url': video_url, 'thumbnail': thumbnail, 'title': title }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/amcnetworks.py
youtube_dl/extractor/amcnetworks.py
# coding: utf-8 from __future__ import unicode_literals import re from .theplatform import ThePlatformIE from ..utils import ( int_or_none, parse_age_limit, try_get, update_url_query, ) class AMCNetworksIE(ThePlatformIE): _VALID_URL = r'https?://(?:www\.)?(?P<site>amc|bbcamerica|ifc|(?:we|sundance)tv)\.com/(?P<id>(?:movies|shows(?:/[^/]+)+)/[^/?#&]+)' _TESTS = [{ 'url': 'https://www.bbcamerica.com/shows/the-graham-norton-show/videos/tina-feys-adorable-airline-themed-family-dinner--51631', 'info_dict': { 'id': '4Lq1dzOnZGt0', 'ext': 'mp4', 'title': "The Graham Norton Show - Season 28 - Tina Fey's Adorable Airline-Themed Family Dinner", 'description': "It turns out child stewardesses are very generous with the wine! All-new episodes of 'The Graham Norton Show' premiere Fridays at 11/10c on BBC America.", 'upload_date': '20201120', 'timestamp': 1605904350, 'uploader': 'AMCN', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.bbcamerica.com/shows/the-hunt/full-episodes/season-1/episode-01-the-hardest-challenge', 'only_matching': True, }, { 'url': 'http://www.amc.com/shows/preacher/full-episodes/season-01/episode-00/pilot', 'only_matching': True, }, { 'url': 'http://www.wetv.com/shows/million-dollar-matchmaker/season-01/episode-06-the-dumped-dj-and-shallow-hal', 'only_matching': True, }, { 'url': 'http://www.ifc.com/movies/chaos', 'only_matching': True, }, { 'url': 'http://www.bbcamerica.com/shows/doctor-who/full-episodes/the-power-of-the-daleks/episode-01-episode-1-color-version', 'only_matching': True, }, { 'url': 'http://www.wetv.com/shows/mama-june-from-not-to-hot/full-episode/season-01/thin-tervention', 'only_matching': True, }, { 'url': 'http://www.wetv.com/shows/la-hair/videos/season-05/episode-09-episode-9-2/episode-9-sneak-peek-3', 'only_matching': True, }, { 'url': 'https://www.sundancetv.com/shows/riviera/full-episodes/season-1/episode-01-episode-1', 'only_matching': True, }] _REQUESTOR_ID_MAP = { 'amc': 'AMC', 'bbcamerica': 'BBCA', 'ifc': 'IFC', 'sundancetv': 'SUNDANCE', 'wetv': 'WETV', } def _real_extract(self, url): site, display_id = re.match(self._VALID_URL, url).groups() requestor_id = self._REQUESTOR_ID_MAP[site] properties = self._download_json( 'https://content-delivery-gw.svc.ds.amcn.com/api/v2/content/amcn/%s/url/%s' % (requestor_id.lower(), display_id), display_id)['data']['properties'] query = { 'mbr': 'true', 'manifest': 'm3u', } tp_path = 'M_UwQC/media/' + properties['videoPid'] media_url = 'https://link.theplatform.com/s/' + tp_path theplatform_metadata = self._download_theplatform_metadata(tp_path, display_id) info = self._parse_theplatform_metadata(theplatform_metadata) video_id = theplatform_metadata['pid'] title = theplatform_metadata['title'] rating = try_get( theplatform_metadata, lambda x: x['ratings'][0]['rating']) video_category = properties.get('videoCategory') if video_category and video_category.endswith('-Auth'): resource = self._get_mvpd_resource( requestor_id, title, video_id, rating) query['auth'] = self._extract_mvpd_auth( url, video_id, requestor_id, resource) media_url = update_url_query(media_url, query) formats, subtitles = self._extract_theplatform_smil( media_url, video_id) self._sort_formats(formats) info.update({ 'id': video_id, 'subtitles': subtitles, 'formats': formats, 'age_limit': parse_age_limit(parse_age_limit(rating)), }) ns_keys = theplatform_metadata.get('$xmlns', {}).keys() if ns_keys: ns = list(ns_keys)[0] series = theplatform_metadata.get(ns + '$show') season_number = int_or_none( theplatform_metadata.get(ns + '$season')) episode = theplatform_metadata.get(ns + '$episodeTitle') episode_number = int_or_none( theplatform_metadata.get(ns + '$episode')) if season_number: title = 'Season %d - %s' % (season_number, title) if series: title = '%s - %s' % (series, title) info.update({ 'title': title, 'series': series, 'season_number': season_number, 'episode': episode, 'episode_number': episode_number, }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/condenast.py
youtube_dl/extractor/condenast.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_urllib_parse_urlparse, compat_urlparse, ) from ..utils import ( determine_ext, extract_attributes, int_or_none, js_to_json, mimetype2ext, orderedSet, parse_iso8601, strip_or_none, try_get, ) class CondeNastIE(InfoExtractor): """ Condé Nast is a media group, some of its sites use a custom HTML5 player that works the same in all of them. """ # The keys are the supported sites and the values are the name to be shown # to the user and in the extractor description. _SITES = { 'allure': 'Allure', 'architecturaldigest': 'Architectural Digest', 'arstechnica': 'Ars Technica', 'bonappetit': 'Bon Appétit', 'brides': 'Brides', 'cnevids': 'Condé Nast', 'cntraveler': 'Condé Nast Traveler', 'details': 'Details', 'epicurious': 'Epicurious', 'glamour': 'Glamour', 'golfdigest': 'Golf Digest', 'gq': 'GQ', 'newyorker': 'The New Yorker', 'self': 'SELF', 'teenvogue': 'Teen Vogue', 'vanityfair': 'Vanity Fair', 'vogue': 'Vogue', 'wired': 'WIRED', 'wmagazine': 'W Magazine', } _VALID_URL = r'''(?x)https?://(?:video|www|player(?:-backend)?)\.(?:%s)\.com/ (?: (?: embed(?:js)?| (?:script|inline)/video )/(?P<id>[0-9a-f]{24})(?:/(?P<player_id>[0-9a-f]{24}))?(?:.+?\btarget=(?P<target>[^&]+))?| (?P<type>watch|series|video)/(?P<display_id>[^/?#]+) )''' % '|'.join(_SITES.keys()) IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values())) EMBED_URL = r'(?:https?:)?//player(?:-backend)?\.(?:%s)\.com/(?:embed(?:js)?|(?:script|inline)/video)/.+?' % '|'.join(_SITES.keys()) _TESTS = [{ 'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led', 'md5': '1921f713ed48aabd715691f774c451f7', 'info_dict': { 'id': '5171b343c2b4c00dd0c1ccb3', 'ext': 'mp4', 'title': '3D Printed Speakers Lit With LED', 'description': 'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.', 'uploader': 'wired', 'upload_date': '20130314', 'timestamp': 1363219200, } }, { 'url': 'http://video.gq.com/watch/the-closer-with-keith-olbermann-the-only-true-surprise-trump-s-an-idiot?c=series', 'info_dict': { 'id': '58d1865bfd2e6126e2000015', 'ext': 'mp4', 'title': 'The Only True Surprise? Trump’s an Idiot', 'uploader': 'gq', 'upload_date': '20170321', 'timestamp': 1490126427, 'description': 'How much grimmer would things be if these people were competent?', }, }, { # JS embed 'url': 'http://player.cnevids.com/embedjs/55f9cf8b61646d1acf00000c/5511d76261646d5566020000.js', 'md5': 'f1a6f9cafb7083bab74a710f65d08999', 'info_dict': { 'id': '55f9cf8b61646d1acf00000c', 'ext': 'mp4', 'title': '3D printed TSA Travel Sentry keys really do open TSA locks', 'uploader': 'arstechnica', 'upload_date': '20150916', 'timestamp': 1442434920, } }, { 'url': 'https://player.cnevids.com/inline/video/59138decb57ac36b83000005.js?target=js-cne-player', 'only_matching': True, }, { 'url': 'http://player-backend.cnevids.com/script/video/59138decb57ac36b83000005.js', 'only_matching': True, }] def _extract_series(self, url, webpage): title = self._html_search_regex( r'(?s)<div class="cne-series-info">.*?<h1>(.+?)</h1>', webpage, 'series title') url_object = compat_urllib_parse_urlparse(url) base_url = '%s://%s' % (url_object.scheme, url_object.netloc) m_paths = re.finditer( r'(?s)<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]', webpage) paths = orderedSet(m.group(1) for m in m_paths) build_url = lambda path: compat_urlparse.urljoin(base_url, path) entries = [self.url_result(build_url(path), 'CondeNast') for path in paths] return self.playlist_result(entries, playlist_title=title) def _extract_video_params(self, webpage, display_id): query = self._parse_json( self._search_regex( r'(?s)var\s+params\s*=\s*({.+?})[;,]', webpage, 'player params', default='{}'), display_id, transform_source=js_to_json, fatal=False) if query: query['videoId'] = self._search_regex( r'(?:data-video-id=|currentVideoId\s*=\s*)["\']([\da-f]+)', webpage, 'video id', default=None) else: params = extract_attributes(self._search_regex( r'(<[^>]+data-js="video-player"[^>]+>)', webpage, 'player params element')) query.update({ 'videoId': params['data-video'], 'playerId': params['data-player'], 'target': params['id'], }) return query def _extract_video(self, params): video_id = params['videoId'] video_info = None # New API path query = params.copy() query['embedType'] = 'inline' info_page = self._download_json( 'http://player.cnevids.com/embed-api.json', video_id, 'Downloading embed info', fatal=False, query=query) # Old fallbacks if not info_page: if params.get('playerId'): info_page = self._download_json( 'http://player.cnevids.com/player/video.js', video_id, 'Downloading video info', fatal=False, query=params) if info_page: video_info = info_page.get('video') if not video_info: info_page = self._download_webpage( 'http://player.cnevids.com/player/loader.js', video_id, 'Downloading loader info', query=params) if not video_info: info_page = self._download_webpage( 'https://player.cnevids.com/inline/video/%s.js' % video_id, video_id, 'Downloading inline info', query={ 'target': params.get('target', 'embedplayer') }) if not video_info: video_info = self._parse_json( self._search_regex( r'(?s)var\s+config\s*=\s*({.+?});', info_page, 'config'), video_id, transform_source=js_to_json)['video'] title = video_info['title'] formats = [] for fdata in video_info['sources']: src = fdata.get('src') if not src: continue ext = mimetype2ext(fdata.get('type')) or determine_ext(src) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( src, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) continue quality = fdata.get('quality') formats.append({ 'format_id': ext + ('-%s' % quality if quality else ''), 'url': src, 'ext': ext, 'quality': 1 if quality == 'high' else 0, }) self._sort_formats(formats) subtitles = {} for t, caption in video_info.get('captions', {}).items(): caption_url = caption.get('src') if not (t in ('vtt', 'srt', 'tml') and caption_url): continue subtitles.setdefault('en', []).append({'url': caption_url}) return { 'id': video_id, 'formats': formats, 'title': title, 'thumbnail': video_info.get('poster_frame'), 'uploader': video_info.get('brand'), 'duration': int_or_none(video_info.get('duration')), 'tags': video_info.get('tags'), 'series': video_info.get('series_title'), 'season': video_info.get('season_title'), 'timestamp': parse_iso8601(video_info.get('premiere_date')), 'categories': video_info.get('categories'), 'subtitles': subtitles, } def _real_extract(self, url): video_id, player_id, target, url_type, display_id = re.match(self._VALID_URL, url).groups() if video_id: return self._extract_video({ 'videoId': video_id, 'playerId': player_id, 'target': target, }) webpage = self._download_webpage(url, display_id) if url_type == 'series': return self._extract_series(url, webpage) else: video = try_get(self._parse_json(self._search_regex( r'__PRELOADED_STATE__\s*=\s*({.+?});', webpage, 'preload state', '{}'), display_id), lambda x: x['transformed']['video']) if video: params = {'videoId': video['id']} info = {'description': strip_or_none(video.get('description'))} else: params = self._extract_video_params(webpage, display_id) info = self._search_json_ld( webpage, display_id, fatal=False) info.update(self._extract_video(params)) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vtm.py
youtube_dl/extractor/vtm.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, try_get, ) class VTMIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?vtm\.be/([^/?&#]+)~v(?P<id>[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12})' _TEST = { 'url': 'https://vtm.be/gast-vernielt-genkse-hotelkamer~ve7534523-279f-4b4d-a5c9-a33ffdbe23e1', 'md5': '37dca85fbc3a33f2de28ceb834b071f8', 'info_dict': { 'id': '192445', 'ext': 'mp4', 'title': 'Gast vernielt Genkse hotelkamer', 'timestamp': 1611060180, 'upload_date': '20210119', 'duration': 74, # TODO: fix url _type result processing # 'series': 'Op Interventie', } } def _real_extract(self, url): uuid = self._match_id(url) video = self._download_json( 'https://omc4vm23offuhaxx6hekxtzspi.appsync-api.eu-west-1.amazonaws.com/graphql', uuid, query={ 'query': '''{ getComponent(type: Video, uuid: "%s") { ... on Video { description duration myChannelsVideo program { title } publishedAt title } } }''' % uuid, }, headers={ 'x-api-key': 'da2-lz2cab4tfnah3mve6wiye4n77e', })['data']['getComponent'] return { '_type': 'url', 'id': uuid, 'title': video.get('title'), 'url': 'http://mychannels.video/embed/%d' % video['myChannelsVideo'], 'description': video.get('description'), 'timestamp': parse_iso8601(video.get('publishedAt')), 'duration': int_or_none(video.get('duration')), 'series': try_get(video, lambda x: x['program']['title']), 'ie_key': 'Medialaan', }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/kuwo.py
youtube_dl/extractor/kuwo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( get_element_by_id, clean_html, ExtractorError, InAdvancePagedList, remove_start, ) class KuwoBaseIE(InfoExtractor): _FORMATS = [ {'format': 'ape', 'ext': 'ape', 'preference': 100}, {'format': 'mp3-320', 'ext': 'mp3', 'br': '320kmp3', 'abr': 320, 'preference': 80}, {'format': 'mp3-192', 'ext': 'mp3', 'br': '192kmp3', 'abr': 192, 'preference': 70}, {'format': 'mp3-128', 'ext': 'mp3', 'br': '128kmp3', 'abr': 128, 'preference': 60}, {'format': 'wma', 'ext': 'wma', 'preference': 20}, {'format': 'aac', 'ext': 'aac', 'abr': 48, 'preference': 10} ] def _get_formats(self, song_id, tolerate_ip_deny=False): formats = [] for file_format in self._FORMATS: query = { 'format': file_format['ext'], 'br': file_format.get('br', ''), 'rid': 'MUSIC_%s' % song_id, 'type': 'convert_url', 'response': 'url' } song_url = self._download_webpage( 'http://antiserver.kuwo.cn/anti.s', song_id, note='Download %s url info' % file_format['format'], query=query, headers=self.geo_verification_headers(), ) if song_url == 'IPDeny' and not tolerate_ip_deny: raise ExtractorError('This song is blocked in this region', expected=True) if song_url.startswith('http://') or song_url.startswith('https://'): formats.append({ 'url': song_url, 'format_id': file_format['format'], 'format': file_format['format'], 'preference': file_format['preference'], 'abr': file_format.get('abr'), }) return formats class KuwoIE(KuwoBaseIE): IE_NAME = 'kuwo:song' IE_DESC = '酷我音乐' _VALID_URL = r'https?://(?:www\.)?kuwo\.cn/yinyue/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.kuwo.cn/yinyue/635632/', 'info_dict': { 'id': '635632', 'ext': 'ape', 'title': '爱我别走', 'creator': '张震岳', 'upload_date': '20080122', 'description': 'md5:ed13f58e3c3bf3f7fd9fbc4e5a7aa75c' }, 'skip': 'this song has been offline because of copyright issues', }, { 'url': 'http://www.kuwo.cn/yinyue/6446136/', 'info_dict': { 'id': '6446136', 'ext': 'mp3', 'title': '心', 'description': 'md5:5d0e947b242c35dc0eb1d2fce9fbf02c', 'creator': 'IU', 'upload_date': '20150518', }, 'params': { 'format': 'mp3-320', }, }, { 'url': 'http://www.kuwo.cn/yinyue/3197154?catalog=yueku2016', 'only_matching': True, }] def _real_extract(self, url): song_id = self._match_id(url) webpage, urlh = self._download_webpage_handle( url, song_id, note='Download song detail info', errnote='Unable to get song detail info') if song_id not in urlh.geturl() or '对不起,该歌曲由于版权问题已被下线,将返回网站首页' in webpage: raise ExtractorError('this song has been offline because of copyright issues', expected=True) song_name = self._html_search_regex( r'<p[^>]+id="lrcName">([^<]+)</p>', webpage, 'song name') singer_name = remove_start(self._html_search_regex( r'<a[^>]+href="http://www\.kuwo\.cn/artist/content\?name=([^"]+)">', webpage, 'singer name', fatal=False), '歌手') lrc_content = clean_html(get_element_by_id('lrcContent', webpage)) if lrc_content == '暂无': # indicates no lyrics lrc_content = None formats = self._get_formats(song_id) self._sort_formats(formats) album_id = self._html_search_regex( r'<a[^>]+href="http://www\.kuwo\.cn/album/(\d+)/"', webpage, 'album id', fatal=False) publish_time = None if album_id is not None: album_info_page = self._download_webpage( 'http://www.kuwo.cn/album/%s/' % album_id, song_id, note='Download album detail info', errnote='Unable to get album detail info') publish_time = self._html_search_regex( r'发行时间:(\d{4}-\d{2}-\d{2})', album_info_page, 'publish time', fatal=False) if publish_time: publish_time = publish_time.replace('-', '') return { 'id': song_id, 'title': song_name, 'creator': singer_name, 'upload_date': publish_time, 'description': lrc_content, 'formats': formats, } class KuwoAlbumIE(InfoExtractor): IE_NAME = 'kuwo:album' IE_DESC = '酷我音乐 - 专辑' _VALID_URL = r'https?://(?:www\.)?kuwo\.cn/album/(?P<id>\d+?)/' _TEST = { 'url': 'http://www.kuwo.cn/album/502294/', 'info_dict': { 'id': '502294', 'title': 'Made\xa0Series\xa0《M》', 'description': 'md5:d463f0d8a0ff3c3ea3d6ed7452a9483f', }, 'playlist_count': 2, } def _real_extract(self, url): album_id = self._match_id(url) webpage = self._download_webpage( url, album_id, note='Download album info', errnote='Unable to get album info') album_name = self._html_search_regex( r'<div[^>]+class="comm"[^<]+<h1[^>]+title="([^"]+)"', webpage, 'album name') album_intro = remove_start( clean_html(get_element_by_id('intro', webpage)), '%s简介:' % album_name) entries = [ self.url_result(song_url, 'Kuwo') for song_url in re.findall( r'<p[^>]+class="listen"><a[^>]+href="(http://www\.kuwo\.cn/yinyue/\d+/)"', webpage) ] return self.playlist_result(entries, album_id, album_name, album_intro) class KuwoChartIE(InfoExtractor): IE_NAME = 'kuwo:chart' IE_DESC = '酷我音乐 - 排行榜' _VALID_URL = r'https?://yinyue\.kuwo\.cn/billboard_(?P<id>[^.]+).htm' _TEST = { 'url': 'http://yinyue.kuwo.cn/billboard_香港中文龙虎榜.htm', 'info_dict': { 'id': '香港中文龙虎榜', }, 'playlist_mincount': 7, } def _real_extract(self, url): chart_id = self._match_id(url) webpage = self._download_webpage( url, chart_id, note='Download chart info', errnote='Unable to get chart info') entries = [ self.url_result(song_url, 'Kuwo') for song_url in re.findall( r'<a[^>]+href="(http://www\.kuwo\.cn/yinyue/\d+)', webpage) ] return self.playlist_result(entries, chart_id) class KuwoSingerIE(InfoExtractor): IE_NAME = 'kuwo:singer' IE_DESC = '酷我音乐 - 歌手' _VALID_URL = r'https?://(?:www\.)?kuwo\.cn/mingxing/(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.kuwo.cn/mingxing/bruno+mars/', 'info_dict': { 'id': 'bruno+mars', 'title': 'Bruno\xa0Mars', }, 'playlist_mincount': 329, }, { 'url': 'http://www.kuwo.cn/mingxing/Ali/music.htm', 'info_dict': { 'id': 'Ali', 'title': 'Ali', }, 'playlist_mincount': 95, 'skip': 'Regularly stalls travis build', # See https://travis-ci.org/ytdl-org/youtube-dl/jobs/78878540 }] PAGE_SIZE = 15 def _real_extract(self, url): singer_id = self._match_id(url) webpage = self._download_webpage( url, singer_id, note='Download singer info', errnote='Unable to get singer info') singer_name = self._html_search_regex( r'<h1>([^<]+)</h1>', webpage, 'singer name') artist_id = self._html_search_regex( r'data-artistid="(\d+)"', webpage, 'artist id') page_count = int(self._html_search_regex( r'data-page="(\d+)"', webpage, 'page count')) def page_func(page_num): webpage = self._download_webpage( 'http://www.kuwo.cn/artist/contentMusicsAjax', singer_id, note='Download song list page #%d' % (page_num + 1), errnote='Unable to get song list page #%d' % (page_num + 1), query={'artistId': artist_id, 'pn': page_num, 'rn': self.PAGE_SIZE}) return [ self.url_result(compat_urlparse.urljoin(url, song_url), 'Kuwo') for song_url in re.findall( r'<div[^>]+class="name"><a[^>]+href="(/yinyue/\d+)', webpage) ] entries = InAdvancePagedList(page_func, page_count, self.PAGE_SIZE) return self.playlist_result(entries, singer_id, singer_name) class KuwoCategoryIE(InfoExtractor): IE_NAME = 'kuwo:category' IE_DESC = '酷我音乐 - 分类' _VALID_URL = r'https?://yinyue\.kuwo\.cn/yy/cinfo_(?P<id>\d+?).htm' _TEST = { 'url': 'http://yinyue.kuwo.cn/yy/cinfo_86375.htm', 'info_dict': { 'id': '86375', 'title': '八十年代精选', 'description': '这些都是属于八十年代的回忆!', }, 'playlist_mincount': 24, } def _real_extract(self, url): category_id = self._match_id(url) webpage = self._download_webpage( url, category_id, note='Download category info', errnote='Unable to get category info') category_name = self._html_search_regex( r'<h1[^>]+title="([^<>]+?)">[^<>]+?</h1>', webpage, 'category name') category_desc = remove_start( get_element_by_id('intro', webpage).strip(), '%s简介:' % category_name) if category_desc == '暂无': category_desc = None jsonm = self._parse_json(self._html_search_regex( r'var\s+jsonm\s*=\s*([^;]+);', webpage, 'category songs'), category_id) entries = [ self.url_result('http://www.kuwo.cn/yinyue/%s/' % song['musicrid'], 'Kuwo') for song in jsonm['musiclist'] ] return self.playlist_result(entries, category_id, category_name, category_desc) class KuwoMvIE(KuwoBaseIE): IE_NAME = 'kuwo:mv' IE_DESC = '酷我音乐 - MV' _VALID_URL = r'https?://(?:www\.)?kuwo\.cn/mv/(?P<id>\d+?)/' _TEST = { 'url': 'http://www.kuwo.cn/mv/6480076/', 'info_dict': { 'id': '6480076', 'ext': 'mp4', 'title': 'My HouseMV', 'creator': '2PM', }, # In this video, music URLs (anti.s) are blocked outside China and # USA, while the MV URL (mvurl) is available globally, so force the MV # URL for consistent results in different countries 'params': { 'format': 'mv', }, } _FORMATS = KuwoBaseIE._FORMATS + [ {'format': 'mkv', 'ext': 'mkv', 'preference': 250}, {'format': 'mp4', 'ext': 'mp4', 'preference': 200}, ] def _real_extract(self, url): song_id = self._match_id(url) webpage = self._download_webpage( url, song_id, note='Download mv detail info: %s' % song_id, errnote='Unable to get mv detail info: %s' % song_id) mobj = re.search( r'<h1[^>]+title="(?P<song>[^"]+)">[^<]+<span[^>]+title="(?P<singer>[^"]+)"', webpage) if mobj: song_name = mobj.group('song') singer_name = mobj.group('singer') else: raise ExtractorError('Unable to find song or singer names') formats = self._get_formats(song_id, tolerate_ip_deny=True) mv_url = self._download_webpage( 'http://www.kuwo.cn/yy/st/mvurl?rid=MUSIC_%s' % song_id, song_id, note='Download %s MV URL' % song_id) formats.append({ 'url': mv_url, 'format_id': 'mv', }) self._sort_formats(formats) return { 'id': song_id, 'title': song_name, 'creator': singer_name, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/gazeta.py
youtube_dl/extractor/gazeta.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class GazetaIE(InfoExtractor): _VALID_URL = r'(?P<url>https?://(?:www\.)?gazeta\.ru/(?:[^/]+/)?video/(?:main/)*(?:\d{4}/\d{2}/\d{2}/)?(?P<id>[A-Za-z0-9-_.]+)\.s?html)' _TESTS = [{ 'url': 'http://www.gazeta.ru/video/main/zadaite_vopros_vladislavu_yurevichu.shtml', 'md5': 'd49c9bdc6e5a7888f27475dc215ee789', 'info_dict': { 'id': '205566', 'ext': 'mp4', 'title': '«70–80 процентов гражданских в Донецке на грани голода»', 'description': 'md5:38617526050bd17b234728e7f9620a71', 'thumbnail': r're:^https?://.*\.jpg', }, 'skip': 'video not found', }, { 'url': 'http://www.gazeta.ru/lifestyle/video/2015/03/08/master-klass_krasivoi_byt._delaem_vesennii_makiyazh.shtml', 'only_matching': True, }, { 'url': 'http://www.gazeta.ru/video/main/main/2015/06/22/platit_ili_ne_platit_po_isku_yukosa.shtml', 'md5': '37f19f78355eb2f4256ee1688359f24c', 'info_dict': { 'id': '252048', 'ext': 'mp4', 'title': '"Если по иску ЮКОСа придется платить, это будет большой удар по бюджету"', }, 'add_ie': ['EaglePlatform'], }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id = mobj.group('id') embed_url = '%s?p=embed' % mobj.group('url') embed_page = self._download_webpage( embed_url, display_id, 'Downloading embed page') video_id = self._search_regex( r'<div[^>]*?class="eagleplayer"[^>]*?data-id="([^"]+)"', embed_page, 'video id') return self.url_result( 'eagleplatform:gazeta.media.eagleplatform.com:%s' % video_id, 'EaglePlatform')
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/youku.py
youtube_dl/extractor/youku.py
# coding: utf-8 from __future__ import unicode_literals import random import re import string import time from .common import InfoExtractor from ..utils import ( ExtractorError, get_element_by_class, js_to_json, str_or_none, strip_jsonp, ) class YoukuIE(InfoExtractor): IE_NAME = 'youku' IE_DESC = '优酷' _VALID_URL = r'''(?x) (?: https?://( (?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)| video\.tudou\.com/v/)| youku:) (?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|) ''' _TESTS = [{ # MD5 is unstable 'url': 'http://v.youku.com/v_show/id_XMTc1ODE5Njcy.html', 'info_dict': { 'id': 'XMTc1ODE5Njcy', 'title': '★Smile﹗♡ Git Fresh -Booty Music舞蹈.', 'ext': 'mp4', 'duration': 74.73, 'thumbnail': r're:^https?://.*', 'uploader': '。躲猫猫、', 'uploader_id': '36017967', 'uploader_url': 'http://i.youku.com/u/UMTQ0MDcxODY4', 'tags': list, } }, { 'url': 'http://player.youku.com/player.php/sid/XNDgyMDQ2NTQw/v.swf', 'only_matching': True, }, { 'url': 'http://v.youku.com/v_show/id_XODgxNjg1Mzk2_ev_1.html', 'info_dict': { 'id': 'XODgxNjg1Mzk2', 'ext': 'mp4', 'title': '武媚娘传奇 85', 'duration': 1999.61, 'thumbnail': r're:^https?://.*', 'uploader': '疯狂豆花', 'uploader_id': '62583473', 'uploader_url': 'http://i.youku.com/u/UMjUwMzMzODky', 'tags': list, }, }, { 'url': 'http://v.youku.com/v_show/id_XMTI1OTczNDM5Mg==.html', 'info_dict': { 'id': 'XMTI1OTczNDM5Mg', 'ext': 'mp4', 'title': '花千骨 04', 'duration': 2363, 'thumbnail': r're:^https?://.*', 'uploader': '放剧场-花千骨', 'uploader_id': '772849359', 'uploader_url': 'http://i.youku.com/u/UMzA5MTM5NzQzNg==', 'tags': list, }, }, { 'url': 'http://v.youku.com/v_show/id_XNjA1NzA2Njgw.html', 'note': 'Video protected with password', 'info_dict': { 'id': 'XNjA1NzA2Njgw', 'ext': 'mp4', 'title': '邢義田复旦讲座之想象中的胡人—从“左衽孔子”说起', 'duration': 7264.5, 'thumbnail': r're:^https?://.*', 'uploader': 'FoxJin1006', 'uploader_id': '322014285', 'uploader_url': 'http://i.youku.com/u/UMTI4ODA1NzE0MA==', 'tags': list, }, 'params': { 'videopassword': '100600', }, }, { # /play/get.json contains streams with "channel_type":"tail" 'url': 'http://v.youku.com/v_show/id_XOTUxMzg4NDMy.html', 'info_dict': { 'id': 'XOTUxMzg4NDMy', 'ext': 'mp4', 'title': '我的世界☆明月庄主☆车震猎杀☆杀人艺术Minecraft', 'duration': 702.08, 'thumbnail': r're:^https?://.*', 'uploader': '明月庄主moon', 'uploader_id': '38465621', 'uploader_url': 'http://i.youku.com/u/UMTUzODYyNDg0', 'tags': list, }, }, { 'url': 'http://video.tudou.com/v/XMjIyNzAzMTQ4NA==.html?f=46177805', 'info_dict': { 'id': 'XMjIyNzAzMTQ4NA', 'ext': 'mp4', 'title': '卡马乔国足开大脚长传冲吊集锦', 'duration': 289, 'thumbnail': r're:^https?://.*', 'uploader': '阿卜杜拉之星', 'uploader_id': '2382249', 'uploader_url': 'http://i.youku.com/u/UOTUyODk5Ng==', 'tags': list, }, }, { 'url': 'http://video.tudou.com/v/XMjE4ODI3OTg2MA==.html', 'only_matching': True, }] @staticmethod def get_ysuid(): return '%d%s' % (int(time.time()), ''.join([ random.choice(string.ascii_letters) for i in range(3)])) def get_format_name(self, fm): _dict = { '3gp': 'h6', '3gphd': 'h5', 'flv': 'h4', 'flvhd': 'h4', 'mp4': 'h3', 'mp4hd': 'h3', 'mp4hd2': 'h4', 'mp4hd3': 'h4', 'hd2': 'h2', 'hd3': 'h1', } return _dict.get(fm) def _real_extract(self, url): video_id = self._match_id(url) self._set_cookie('youku.com', '__ysuid', self.get_ysuid()) self._set_cookie('youku.com', 'xreferrer', 'http://www.youku.com') _, urlh = self._download_webpage_handle( 'https://log.mmstat.com/eg.js', video_id, 'Retrieving cna info') # The etag header is '"foobar"'; let's remove the double quotes cna = urlh.headers['etag'][1:-1] # request basic data basic_data_params = { 'vid': video_id, 'ccode': '0532', 'client_ip': '192.168.1.1', 'utid': cna, 'client_ts': time.time() / 1000, } video_password = self._downloader.params.get('videopassword') if video_password: basic_data_params['password'] = video_password headers = { 'Referer': url, } headers.update(self.geo_verification_headers()) data = self._download_json( 'https://ups.youku.com/ups/get.json', video_id, 'Downloading JSON metadata', query=basic_data_params, headers=headers)['data'] error = data.get('error') if error: error_note = error.get('note') if error_note is not None and '因版权原因无法观看此视频' in error_note: raise ExtractorError( 'Youku said: Sorry, this video is available in China only', expected=True) elif error_note and '该视频被设为私密' in error_note: raise ExtractorError( 'Youku said: Sorry, this video is private', expected=True) else: msg = 'Youku server reported error %i' % error.get('code') if error_note is not None: msg += ': ' + error_note raise ExtractorError(msg) # get video title video_data = data['video'] title = video_data['title'] formats = [{ 'url': stream['m3u8_url'], 'format_id': self.get_format_name(stream.get('stream_type')), 'ext': 'mp4', 'protocol': 'm3u8_native', 'filesize': int(stream.get('size')), 'width': stream.get('width'), 'height': stream.get('height'), } for stream in data['stream'] if stream.get('channel_type') != 'tail'] self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'duration': video_data.get('seconds'), 'thumbnail': video_data.get('logo'), 'uploader': video_data.get('username'), 'uploader_id': str_or_none(video_data.get('userid')), 'uploader_url': data.get('uploader', {}).get('homepage'), 'tags': video_data.get('tags'), } class YoukuShowIE(InfoExtractor): _VALID_URL = r'https?://list\.youku\.com/show/id_(?P<id>[0-9a-z]+)\.html' IE_NAME = 'youku:show' _TESTS = [{ 'url': 'http://list.youku.com/show/id_zc7c670be07ff11e48b3f.html', 'info_dict': { 'id': 'zc7c670be07ff11e48b3f', 'title': '花千骨 DVD版', 'description': 'md5:a1ae6f5618571bbeb5c9821f9c81b558', }, 'playlist_count': 50, }, { # Episode number not starting from 1 'url': 'http://list.youku.com/show/id_zefbfbd70efbfbd780bef.html', 'info_dict': { 'id': 'zefbfbd70efbfbd780bef', 'title': '超级飞侠3', 'description': 'md5:275715156abebe5ccc2a1992e9d56b98', }, 'playlist_count': 24, }, { # Ongoing playlist. The initial page is the last one 'url': 'http://list.youku.com/show/id_za7c275ecd7b411e1a19e.html', 'only_matching': True, }, { # No data-id value. 'url': 'http://list.youku.com/show/id_zefbfbd61237fefbfbdef.html', 'only_matching': True, }, { # Wrong number of reload_id. 'url': 'http://list.youku.com/show/id_z20eb4acaf5c211e3b2ad.html', 'only_matching': True, }] def _extract_entries(self, playlist_data_url, show_id, note, query): query['callback'] = 'cb' playlist_data = self._download_json( playlist_data_url, show_id, query=query, note=note, transform_source=lambda s: js_to_json(strip_jsonp(s))).get('html') if playlist_data is None: return [None, None] drama_list = (get_element_by_class('p-drama-grid', playlist_data) or get_element_by_class('p-drama-half-row', playlist_data)) if drama_list is None: raise ExtractorError('No episodes found') video_urls = re.findall(r'<a[^>]+href="([^"]+)"', drama_list) return playlist_data, [ self.url_result(self._proto_relative_url(video_url, 'http:'), YoukuIE.ie_key()) for video_url in video_urls] def _real_extract(self, url): show_id = self._match_id(url) webpage = self._download_webpage(url, show_id) entries = [] page_config = self._parse_json(self._search_regex( r'var\s+PageConfig\s*=\s*({.+});', webpage, 'page config'), show_id, transform_source=js_to_json) first_page, initial_entries = self._extract_entries( 'http://list.youku.com/show/module', show_id, note='Downloading initial playlist data page', query={ 'id': page_config['showid'], 'tab': 'showInfo', }) first_page_reload_id = self._html_search_regex( r'<div[^>]+id="(reload_\d+)', first_page, 'first page reload id') # The first reload_id has the same items as first_page reload_ids = re.findall('<li[^>]+data-id="([^"]+)">', first_page) entries.extend(initial_entries) for idx, reload_id in enumerate(reload_ids): if reload_id == first_page_reload_id: continue _, new_entries = self._extract_entries( 'http://list.youku.com/show/episode', show_id, note='Downloading playlist data page %d' % (idx + 1), query={ 'id': page_config['showid'], 'stage': reload_id, }) if new_entries is not None: entries.extend(new_entries) desc = self._html_search_meta('description', webpage, fatal=False) playlist_title = desc.split(',')[0] if desc else None detail_li = get_element_by_class('p-intro', webpage) playlist_description = get_element_by_class( 'intro-more', detail_li) if detail_li else None return self.playlist_result( entries, show_id, playlist_title, playlist_description)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/peekvids.py
youtube_dl/extractor/peekvids.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, get_element_by_class, int_or_none, merge_dicts, url_or_none, ) class PeekVidsIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?:www\.)?peekvids\.com/ (?:(?:[^/?#]+/){2}|embed/?\?(?:[^#]*&)?v=) (?P<id>[^/?&#]*) ''' _TESTS = [{ 'url': 'https://peekvids.com/pc/dane-jones-cute-redhead-with-perfect-tits-with-mini-vamp/BSyLMbN0YCd', 'md5': '2ff6a357a9717dc9dc9894b51307e9a2', 'info_dict': { 'id': '1262717', 'display_id': 'BSyLMbN0YCd', 'title': ' Dane Jones - Cute redhead with perfect tits with Mini Vamp', 'ext': 'mp4', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:0a61df3620de26c0af8963b1a730cd69', 'timestamp': 1642579329, 'upload_date': '20220119', 'duration': 416, 'view_count': int, 'age_limit': 18, 'uploader': 'SEXYhub.com', 'categories': list, 'tags': list, }, }] _DOMAIN = 'www.peekvids.com' def _get_detail(self, html): return get_element_by_class('detail-video-block', html) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id, expected_status=429) if '>Rate Limit Exceeded' in webpage: raise ExtractorError( '[%s] %s: %s' % (self.IE_NAME, video_id, 'You are suspected as a bot. Wait, or pass the captcha test on the site and provide --cookies.'), expected=True) title = self._html_search_regex(r'(?s)<h1\b[^>]*>(.+?)</h1>', webpage, 'title') display_id = video_id video_id = self._search_regex(r'(?s)<video\b[^>]+\bdata-id\s*=\s*["\']?([\w-]+)', webpage, 'short video ID') srcs = self._download_json( 'https://%s/v-alt/%s' % (self._DOMAIN, video_id), video_id, note='Downloading list of source files') formats = [{ 'url': f_url, 'format_id': f_id, 'height': int_or_none(f_id), } for f_url, f_id in ( (url_or_none(f_v), f_match.group(1)) for f_v, f_match in ( (v, re.match(r'^data-src(\d{3,})$', k)) for k, v in srcs.items() if v) if f_match) if f_url ] if not formats: formats = [{'url': url} for url in srcs.values()] self._sort_formats(formats) info = self._search_json_ld(webpage, video_id, expected_type='VideoObject', default={}) info.pop('url', None) # may not have found the thumbnail if it was in a list in the ld+json info.setdefault('thumbnail', self._og_search_thumbnail(webpage)) detail = self._get_detail(webpage) or '' info['description'] = self._html_search_regex( r'(?s)(.+?)(?:%s\s*<|<ul\b)' % (re.escape(info.get('description', '')), ), detail, 'description', default=None) or None info['title'] = re.sub(r'\s*[,-][^,-]+$', '', info.get('title') or title) or self._generic_title(url) def cat_tags(name, html): l = self._html_search_regex( r'(?s)<span\b[^>]*>\s*%s\s*:\s*</span>(.+?)</li>' % (re.escape(name), ), html, name, default='') return [x for x in re.split(r'\s+', l) if x] return merge_dicts({ 'id': video_id, 'display_id': display_id, 'age_limit': 18, 'formats': formats, 'categories': cat_tags('Categories', detail), 'tags': cat_tags('Tags', detail), 'uploader': self._html_search_regex(r'[Uu]ploaded\s+by\s(.+?)"', webpage, 'uploader', default=None), }, info) class PlayVidsIE(PeekVidsIE): _VALID_URL = r'https?://(?:www\.)?playvids\.com/(?:embed/|\w\w?/)?(?P<id>[^/?#]*)' _TESTS = [{ 'url': 'https://www.playvids.com/U3pBrYhsjXM/pc/dane-jones-cute-redhead-with-perfect-tits-with-mini-vamp', 'md5': '2f12e50213dd65f142175da633c4564c', 'info_dict': { 'id': '1978030', 'display_id': 'U3pBrYhsjXM', 'title': ' Dane Jones - Cute redhead with perfect tits with Mini Vamp', 'ext': 'mp4', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:0a61df3620de26c0af8963b1a730cd69', 'timestamp': 1640435839, 'upload_date': '20211225', 'duration': 416, 'view_count': int, 'age_limit': 18, 'uploader': 'SEXYhub.com', 'categories': list, 'tags': list, }, }, { 'url': 'https://www.playvids.com/es/U3pBrYhsjXM/pc/dane-jones-cute-redhead-with-perfect-tits-with-mini-vamp', 'only_matching': True, }, { 'url': 'https://www.playvids.com/embed/U3pBrYhsjXM', 'only_matching': True, }, { 'url': 'https://www.playvids.com/bKmGLe3IwjZ/sv/brazzers-800-phone-sex-madison-ivy-always-on-the-line', 'md5': 'e783986e596cafbf46411a174ab42ba6', 'info_dict': { 'id': '762385', 'display_id': 'bKmGLe3IwjZ', 'ext': 'mp4', 'title': 'Brazzers - 1 800 Phone Sex: Madison Ivy Always On The Line 6', 'description': 'md5:bdcd2db2b8ad85831a491d7c8605dcef', 'timestamp': 1516958544, 'upload_date': '20180126', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 480, 'uploader': 'Brazzers', 'age_limit': 18, 'view_count': int, 'age_limit': 18, 'categories': list, 'tags': list, }, }, { 'url': 'https://www.playvids.com/v/47iUho33toY', 'md5': 'b056b5049d34b648c1e86497cf4febce', 'info_dict': { 'id': '700621', 'display_id': '47iUho33toY', 'ext': 'mp4', 'title': 'KATEE OWEN STRIPTIASE IN SEXY RED LINGERIE', 'description': None, 'timestamp': 1507052209, 'upload_date': '20171003', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 332, 'uploader': 'Cacerenele', 'age_limit': 18, 'view_count': int, 'categories': list, 'tags': list, } }, { 'url': 'https://www.playvids.com/z3_7iwWCmqt/sexy-teen-filipina-striptease-beautiful-pinay-bargirl-strips-and-dances', 'md5': 'efa09be9f031314b7b7e3bc6510cd0df', 'info_dict': { 'id': '1523518', 'display_id': 'z3_7iwWCmqt', 'ext': 'mp4', 'title': 'SEXY TEEN FILIPINA STRIPTEASE - Beautiful Pinay Bargirl Strips and Dances', 'description': None, 'timestamp': 1607470323, 'upload_date': '20201208', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 593, 'uploader': 'yorours', 'age_limit': 18, 'view_count': int, 'categories': list, 'tags': list, }, }] _DOMAIN = 'www.playvids.com' def _get_detail(self, html): return get_element_by_class('detail-block', html)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rbmaradio.py
youtube_dl/extractor/rbmaradio.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( clean_html, int_or_none, unified_timestamp, update_url_query, ) class RBMARadioIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:rbmaradio|redbullradio)\.com/shows/(?P<show_id>[^/]+)/episodes/(?P<id>[^/?#&]+)' _TEST = { 'url': 'https://www.rbmaradio.com/shows/main-stage/episodes/ford-lopatin-live-at-primavera-sound-2011', 'md5': '6bc6f9bcb18994b4c983bc3bf4384d95', 'info_dict': { 'id': 'ford-lopatin-live-at-primavera-sound-2011', 'ext': 'mp3', 'title': 'Main Stage - Ford & Lopatin at Primavera Sound', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 2452, 'timestamp': 1307103164, 'upload_date': '20110603', }, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) show_id = mobj.group('show_id') episode_id = mobj.group('id') webpage = self._download_webpage(url, episode_id) episode = self._parse_json( self._search_regex( r'__INITIAL_STATE__\s*=\s*({.+?})\s*</script>', webpage, 'json data'), episode_id)['episodes'][show_id][episode_id] title = episode['title'] show_title = episode.get('showTitle') if show_title: title = '%s - %s' % (show_title, title) formats = [{ 'url': update_url_query(episode['audioURL'], query={'cbr': abr}), 'format_id': compat_str(abr), 'abr': abr, 'vcodec': 'none', } for abr in (96, 128, 192, 256)] self._check_formats(formats, episode_id) description = clean_html(episode.get('longTeaser')) thumbnail = self._proto_relative_url(episode.get('imageURL', {}).get('landscape')) duration = int_or_none(episode.get('duration')) timestamp = unified_timestamp(episode.get('publishedAt')) return { 'id': episode_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'timestamp': timestamp, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cliphunter.py
youtube_dl/extractor/cliphunter.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, url_or_none, ) class CliphunterIE(InfoExtractor): IE_NAME = 'cliphunter' _VALID_URL = r'''(?x)https?://(?:www\.)?cliphunter\.com/w/ (?P<id>[0-9]+)/ (?P<seo>.+?)(?:$|[#\?]) ''' _TESTS = [{ 'url': 'http://www.cliphunter.com/w/1012420/Fun_Jynx_Maze_solo', 'md5': 'b7c9bbd4eb3a226ab91093714dcaa480', 'info_dict': { 'id': '1012420', 'ext': 'flv', 'title': 'Fun Jynx Maze solo', 'thumbnail': r're:^https?://.*\.jpg$', 'age_limit': 18, }, 'skip': 'Video gone', }, { 'url': 'http://www.cliphunter.com/w/2019449/ShesNew__My_booty_girlfriend_Victoria_Paradices_pussy_filled_with_jizz', 'md5': '55a723c67bfc6da6b0cfa00d55da8a27', 'info_dict': { 'id': '2019449', 'ext': 'mp4', 'title': 'ShesNew - My booty girlfriend, Victoria Paradice\'s pussy filled with jizz', 'thumbnail': r're:^https?://.*\.jpg$', 'age_limit': 18, }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_title = self._search_regex( r'mediaTitle = "([^"]+)"', webpage, 'title') gexo_files = self._parse_json( self._search_regex( r'var\s+gexoFiles\s*=\s*({.+?});', webpage, 'gexo files'), video_id) formats = [] for format_id, f in gexo_files.items(): video_url = url_or_none(f.get('url')) if not video_url: continue fmt = f.get('fmt') height = f.get('h') format_id = '%s_%sp' % (fmt, height) if fmt and height else format_id formats.append({ 'url': video_url, 'format_id': format_id, 'width': int_or_none(f.get('w')), 'height': int_or_none(height), 'tbr': int_or_none(f.get('br')), }) self._sort_formats(formats) thumbnail = self._search_regex( r"var\s+mov_thumb\s*=\s*'([^']+)';", webpage, 'thumbnail', fatal=False) return { 'id': video_id, 'title': video_title, 'formats': formats, 'age_limit': self._rta_search(webpage), 'thumbnail': thumbnail, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tdslifeway.py
youtube_dl/extractor/tdslifeway.py
from __future__ import unicode_literals from .common import InfoExtractor class TDSLifewayIE(InfoExtractor): _VALID_URL = r'https?://tds\.lifeway\.com/v1/trainingdeliverysystem/courses/(?P<id>\d+)/index\.html' _TEST = { # From http://www.ministrygrid.com/training-viewer/-/training/t4g-2014-conference/the-gospel-by-numbers-4/the-gospel-by-numbers 'url': 'http://tds.lifeway.com/v1/trainingdeliverysystem/courses/3453494717001/index.html?externalRegistration=AssetId%7C34F466F1-78F3-4619-B2AB-A8EFFA55E9E9%21InstanceId%7C0%21UserId%7Caaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa&grouping=http%3A%2F%2Flifeway.com%2Fvideo%2F3453494717001&activity_id=http%3A%2F%2Flifeway.com%2Fvideo%2F3453494717001&content_endpoint=http%3A%2F%2Ftds.lifeway.com%2Fv1%2Ftrainingdeliverysystem%2FScormEngineInterface%2FTCAPI%2Fcontent%2F&actor=%7B%22name%22%3A%5B%22Guest%20Guest%22%5D%2C%22account%22%3A%5B%7B%22accountServiceHomePage%22%3A%22http%3A%2F%2Fscorm.lifeway.com%2F%22%2C%22accountName%22%3A%22aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa%22%7D%5D%2C%22objectType%22%3A%22Agent%22%7D&content_token=462a50b2-b6f9-4970-99b1-930882c499fb&registration=93d6ec8e-7f7b-4ed3-bbc8-a857913c0b2a&externalConfiguration=access%7CFREE%21adLength%7C-1%21assignOrgId%7C4AE36F78-299A-425D-91EF-E14A899B725F%21assignOrgParentId%7C%21courseId%7C%21isAnonymous%7Cfalse%21previewAsset%7Cfalse%21previewLength%7C-1%21previewMode%7Cfalse%21royalty%7CFREE%21sessionId%7C671422F9-8E79-48D4-9C2C-4EE6111EA1CD%21trackId%7C&auth=Basic%20OjhmZjk5MDBmLTBlYTMtNDJhYS04YjFlLWE4MWQ3NGNkOGRjYw%3D%3D&endpoint=http%3A%2F%2Ftds.lifeway.com%2Fv1%2Ftrainingdeliverysystem%2FScormEngineInterface%2FTCAPI%2F', 'info_dict': { 'id': '3453494717001', 'ext': 'mp4', 'title': 'The Gospel by Numbers', 'thumbnail': r're:^https?://.*\.jpg', 'upload_date': '20140410', 'description': 'Coming soon from T4G 2014!', 'uploader_id': '2034960640001', 'timestamp': 1397145591, }, 'params': { # m3u8 download 'skip_download': True, }, 'add_ie': ['BrightcoveNew'], } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/2034960640001/default_default/index.html?videoId=%s' def _real_extract(self, url): brightcove_id = self._match_id(url) return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/webcaster.py
youtube_dl/extractor/webcaster.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, xpath_text, ) class WebcasterIE(InfoExtractor): _VALID_URL = r'https?://bl\.webcaster\.pro/(?:quote|media)/start/free_(?P<id>[^/]+)' _TESTS = [{ # http://video.khl.ru/quotes/393859 'url': 'http://bl.webcaster.pro/quote/start/free_c8cefd240aa593681c8d068cff59f407_hd/q393859/eb173f99dd5f558674dae55f4ba6806d/1480289104?sr%3D105%26fa%3D1%26type_id%3D18', 'md5': '0c162f67443f30916ff1c89425dcd4cd', 'info_dict': { 'id': 'c8cefd240aa593681c8d068cff59f407_hd', 'ext': 'mp4', 'title': 'Сибирь - Нефтехимик. Лучшие моменты первого периода', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'http://bl.webcaster.pro/media/start/free_6246c7a4453ac4c42b4398f840d13100_hd/2_2991109016/e8d0d82587ef435480118f9f9c41db41/4635726126', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_xml(url, video_id) title = xpath_text(video, './/event_name', 'event name', fatal=True) def make_id(parts, separator): return separator.join(filter(None, parts)) formats = [] for format_id in (None, 'noise'): track_tag = make_id(('track', format_id), '_') for track in video.findall('.//iphone/%s' % track_tag): track_url = track.text if not track_url: continue if determine_ext(track_url) == 'm3u8': m3u8_formats = self._extract_m3u8_formats( track_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=make_id(('hls', format_id), '-'), fatal=False) for f in m3u8_formats: f.update({ 'source_preference': 0 if format_id == 'noise' else 1, 'format_note': track.get('title'), }) formats.extend(m3u8_formats) self._sort_formats(formats) thumbnail = xpath_text(video, './/image', 'thumbnail') return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'formats': formats, } class WebcasterFeedIE(InfoExtractor): _VALID_URL = r'https?://bl\.webcaster\.pro/feed/start/free_(?P<id>[^/]+)' _TEST = { 'url': 'http://bl.webcaster.pro/feed/start/free_c8cefd240aa593681c8d068cff59f407_hd/q393859/eb173f99dd5f558674dae55f4ba6806d/1480289104', 'only_matching': True, } @staticmethod def _extract_url(ie, webpage): mobj = re.search( r'<(?:object|a[^>]+class=["\']webcaster-player["\'])[^>]+data(?:-config)?=(["\']).*?config=(?P<url>https?://bl\.webcaster\.pro/feed/start/free_.*?)(?:[?&]|\1)', webpage) if mobj: return mobj.group('url') for secure in (True, False): video_url = ie._og_search_video_url( webpage, secure=secure, default=None) if video_url: mobj = re.search( r'config=(?P<url>https?://bl\.webcaster\.pro/feed/start/free_[^?&=]+)', video_url) if mobj: return mobj.group('url') def _real_extract(self, url): video_id = self._match_id(url) feed = self._download_xml(url, video_id) video_url = xpath_text( feed, ('video_hd', 'video'), 'video url', fatal=True) return self.url_result(video_url, WebcasterIE.ie_key())
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/miaopai.py
youtube_dl/extractor/miaopai.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class MiaoPaiIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?miaopai\.com/show/(?P<id>[-A-Za-z0-9~_]+)' _TEST = { 'url': 'http://www.miaopai.com/show/n~0hO7sfV1nBEw4Y29-Hqg__.htm', 'md5': '095ed3f1cd96b821add957bdc29f845b', 'info_dict': { 'id': 'n~0hO7sfV1nBEw4Y29-Hqg__', 'ext': 'mp4', 'title': '西游记音乐会的秒拍视频', 'thumbnail': 're:^https?://.*/n~0hO7sfV1nBEw4Y29-Hqg___m.jpg', } } _USER_AGENT_IPAD = 'Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1' def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( url, video_id, headers={'User-Agent': self._USER_AGENT_IPAD}) title = self._html_search_regex( r'<title>([^<]+)</title>', webpage, 'title') thumbnail = self._html_search_regex( r'<div[^>]+class=(?P<q1>[\'"]).*\bvideo_img\b.*(?P=q1)[^>]+data-url=(?P<q2>[\'"])(?P<url>[^\'"]+)(?P=q2)', webpage, 'thumbnail', fatal=False, group='url') videos = self._parse_html5_media_entries(url, webpage, video_id) info = videos[0] info.update({ 'id': video_id, 'title': title, 'thumbnail': thumbnail, }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/sportbox.py
youtube_dl/extractor/sportbox.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, js_to_json, merge_dicts, ) class SportBoxIE(InfoExtractor): _VALID_URL = r'https?://(?:news\.sportbox|matchtv)\.ru/vdl/player(?:/[^/]+/|\?.*?\bn?id=)(?P<id>\d+)' _TESTS = [{ 'url': 'http://news.sportbox.ru/vdl/player/ci/211355', 'info_dict': { 'id': '109158', 'ext': 'mp4', 'title': 'В Новороссийске прошел детский турнир «Поле славы боевой»', 'description': 'В Новороссийске прошел детский турнир «Поле славы боевой»', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 292, 'view_count': int, 'timestamp': 1426237001, 'upload_date': '20150313', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://news.sportbox.ru/vdl/player?nid=370908&only_player=1&autostart=false&playeri=2&height=340&width=580', 'only_matching': True, }, { 'url': 'https://news.sportbox.ru/vdl/player/media/193095', 'only_matching': True, }, { 'url': 'https://news.sportbox.ru/vdl/player/media/109158', 'only_matching': True, }, { 'url': 'https://matchtv.ru/vdl/player/media/109158', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+src="(https?://(?:news\.sportbox|matchtv)\.ru/vdl/player[^"]+)"', webpage) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) sources = self._parse_json( self._search_regex( r'(?s)playerOptions\.sources(?:WithRes)?\s*=\s*(\[.+?\])\s*;\s*\n', webpage, 'sources'), video_id, transform_source=js_to_json) formats = [] for source in sources: src = source.get('src') if not src: continue if determine_ext(src) == 'm3u8': formats.extend(self._extract_m3u8_formats( src, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'url': src, }) self._sort_formats(formats) player = self._parse_json( self._search_regex( r'(?s)playerOptions\s*=\s*({.+?})\s*;\s*\n', webpage, 'player options', default='{}'), video_id, transform_source=js_to_json) media_id = player['mediaId'] info = self._search_json_ld(webpage, media_id, default={}) view_count = int_or_none(self._search_regex( r'Просмотров\s*:\s*(\d+)', webpage, 'view count', default=None)) return merge_dicts(info, { 'id': media_id, 'title': self._og_search_title(webpage, default=None) or media_id, 'thumbnail': player.get('poster'), 'duration': int_or_none(player.get('duration')), 'view_count': view_count, 'formats': formats, })
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/motherless.py
youtube_dl/extractor/motherless.py
# coding: utf-8 from __future__ import unicode_literals import datetime import re from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( ExtractorError, InAdvancePagedList, orderedSet, str_to_int, unified_strdate, ) class MotherlessIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?motherless\.com/(?:g/[a-z0-9_]+/)?(?P<id>[A-Z0-9]+)' _TESTS = [{ 'url': 'http://motherless.com/AC3FFE1', 'md5': '310f62e325a9fafe64f68c0bccb6e75f', 'info_dict': { 'id': 'AC3FFE1', 'ext': 'mp4', 'title': 'Fucked in the ass while playing PS3', 'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'], 'upload_date': '20100913', 'uploader_id': 'famouslyfuckedup', 'thumbnail': r're:https?://.*\.jpg', 'age_limit': 18, } }, { 'url': 'http://motherless.com/532291B', 'md5': 'bc59a6b47d1f958e61fbd38a4d31b131', 'info_dict': { 'id': '532291B', 'ext': 'mp4', 'title': 'Amazing girl playing the omegle game, PERFECT!', 'categories': ['Amateur', 'webcam', 'omegle', 'pink', 'young', 'masturbate', 'teen', 'game', 'hairy'], 'upload_date': '20140622', 'uploader_id': 'Sulivana7x', 'thumbnail': r're:https?://.*\.jpg', 'age_limit': 18, }, 'skip': '404', }, { 'url': 'http://motherless.com/g/cosplay/633979F', 'md5': '0b2a43f447a49c3e649c93ad1fafa4a0', 'info_dict': { 'id': '633979F', 'ext': 'mp4', 'title': 'Turtlette', 'categories': ['superheroine heroine superher'], 'upload_date': '20140827', 'uploader_id': 'shade0230', 'thumbnail': r're:https?://.*\.jpg', 'age_limit': 18, } }, { # no keywords 'url': 'http://motherless.com/8B4BBC1', 'only_matching': True, }, { # see https://motherless.com/videos/recent for recent videos with # uploaded date in "ago" format 'url': 'https://motherless.com/3C3E2CF', 'info_dict': { 'id': '3C3E2CF', 'ext': 'mp4', 'title': 'a/ Hot Teens', 'categories': list, 'upload_date': '20210104', 'uploader_id': 'anonymous', 'thumbnail': r're:https?://.*\.jpg', 'age_limit': 18, }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) if any(p in webpage for p in ( '<title>404 - MOTHERLESS.COM<', ">The page you're looking for cannot be found.<")): raise ExtractorError('Video %s does not exist' % video_id, expected=True) if '>The content you are trying to view is for friends only.' in webpage: raise ExtractorError('Video %s is for friends only' % video_id, expected=True) title = self._html_search_regex( (r'(?s)<div[^>]+\bclass=["\']media-meta-title[^>]+>(.+?)</div>', r'id="view-upload-title">\s+([^<]+)<'), webpage, 'title') video_url = (self._html_search_regex( (r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', r'fileurl\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'), webpage, 'video URL', default=None, group='url') or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id) age_limit = self._rta_search(webpage) view_count = str_to_int(self._html_search_regex( (r'>([\d,.]+)\s+Views<', r'<strong>Views</strong>\s+([^<]+)<'), webpage, 'view count', fatal=False)) like_count = str_to_int(self._html_search_regex( (r'>([\d,.]+)\s+Favorites<', r'<strong>Favorited</strong>\s+([^<]+)<'), webpage, 'like count', fatal=False)) upload_date = unified_strdate(self._search_regex( r'class=["\']count[^>]+>(\d+\s+[a-zA-Z]{3}\s+\d{4})<', webpage, 'upload date', default=None)) if not upload_date: uploaded_ago = self._search_regex( r'>\s*(\d+[hd])\s+[aA]go\b', webpage, 'uploaded ago', default=None) if uploaded_ago: delta = int(uploaded_ago[:-1]) _AGO_UNITS = { 'h': 'hours', 'd': 'days', } kwargs = {_AGO_UNITS.get(uploaded_ago[-1]): delta} upload_date = (datetime.datetime.utcnow() - datetime.timedelta(**kwargs)).strftime('%Y%m%d') comment_count = len(re.findall(r'''class\s*=\s*['"]media-comment-contents\b''', webpage)) uploader_id = self._html_search_regex( (r'''<span\b[^>]+\bclass\s*=\s*["']username\b[^>]*>([^<]+)</span>''', r'''(?s)['"](?:media-meta-member|thumb-member-username)\b[^>]+>\s*<a\b[^>]+\bhref\s*=\s*['"]/m/([^"']+)'''), webpage, 'uploader_id') categories = self._html_search_meta('keywords', webpage, default=None) if categories: categories = [cat.strip() for cat in categories.split(',')] return { 'id': video_id, 'title': title, 'upload_date': upload_date, 'uploader_id': uploader_id, 'thumbnail': self._og_search_thumbnail(webpage), 'categories': categories, 'view_count': view_count, 'like_count': like_count, 'comment_count': comment_count, 'age_limit': age_limit, 'url': video_url, } class MotherlessGroupIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?motherless\.com/gv?/(?P<id>[a-z0-9_]+)' _TESTS = [{ 'url': 'http://motherless.com/g/movie_scenes', 'info_dict': { 'id': 'movie_scenes', 'title': 'Movie Scenes', 'description': 'Hot and sexy scenes from "regular" movies... ' 'Beautiful actresses fully nude... A looot of ' 'skin! :)Enjoy!', }, 'playlist_mincount': 662, }, { 'url': 'http://motherless.com/gv/sex_must_be_funny', 'info_dict': { 'id': 'sex_must_be_funny', 'title': 'Sex must be funny', 'description': 'Sex can be funny. Wide smiles,laugh, games, fun of ' 'any kind!' }, 'playlist_mincount': 0, 'expected_warnings': [ 'This group has no videos.', ] }, { 'url': 'https://motherless.com/g/beautiful_cock', 'info_dict': { 'id': 'beautiful_cock', 'title': 'Beautiful Cock', 'description': 'Group for lovely cocks yours, mine, a friends anything human', }, 'playlist_mincount': 2500, }] @classmethod def suitable(cls, url): return (False if MotherlessIE.suitable(url) else super(MotherlessGroupIE, cls).suitable(url)) def _extract_entries(self, webpage, base): entries = [] for mobj in re.finditer( r'href="(?P<href>/[^"]+)"[^>]*>(?:\s*<img[^>]+alt="[^-]+-\s(?P<title>[^"]+)")?', webpage): video_url = compat_urlparse.urljoin(base, mobj.group('href')) if not MotherlessIE.suitable(video_url): continue video_id = MotherlessIE._match_id(video_url) title = mobj.group('title') entries.append(self.url_result( video_url, ie=MotherlessIE.ie_key(), video_id=video_id, video_title=title)) # Alternative fallback if not entries: entries = [ self.url_result( compat_urlparse.urljoin(base, '/' + entry_id), ie=MotherlessIE.ie_key(), video_id=entry_id) for entry_id in orderedSet(re.findall( r'data-codename=["\']([A-Z0-9]+)', webpage))] return entries def _real_extract(self, url): group_id = self._match_id(url) page_url = compat_urlparse.urljoin(url, '/gv/%s' % group_id) webpage = self._download_webpage(page_url, group_id) title = self._search_regex( r'<title>([\w\s]+\w)\s+-', webpage, 'title', fatal=False) description = self._html_search_meta( 'description', webpage, fatal=False) page_count = str_to_int(self._search_regex( r'(\d+)\s*</(?:a|span)>\s*<(?:a|span)[^>]+(?:>\s*NEXT|\brel\s*=\s*["\']?next)\b', webpage, 'page_count', default=0)) if not page_count: message = self._search_regex( r'''class\s*=\s*['"]error-page\b[^>]*>\s*<p[^>]*>\s*(?P<error_msg>[^<]+)(?<=\S)\s*''', webpage, 'error_msg', default=None) or 'This group has no videos.' self.report_warning(message, group_id) page_count = 1 PAGE_SIZE = 80 def _get_page(idx): if idx > 0: webpage = self._download_webpage( page_url, group_id, query={'page': idx + 1}, note='Downloading page %d/%d' % (idx + 1, page_count) ) for entry in self._extract_entries(webpage, url): yield entry playlist = InAdvancePagedList(_get_page, page_count, PAGE_SIZE) return { '_type': 'playlist', 'id': group_id, 'title': title, 'description': description, 'entries': playlist }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vzaar.py
youtube_dl/extractor/vzaar.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, float_or_none, unified_timestamp, url_or_none, ) class VzaarIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|view)\.)?vzaar\.com/(?:videos/)?(?P<id>\d+)' _TESTS = [{ # HTTP and HLS 'url': 'https://vzaar.com/videos/1152805', 'md5': 'bde5ddfeb104a6c56a93a06b04901dbf', 'info_dict': { 'id': '1152805', 'ext': 'mp4', 'title': 'sample video (public)', }, }, { 'url': 'https://view.vzaar.com/27272/player', 'md5': '3b50012ac9bbce7f445550d54e0508f2', 'info_dict': { 'id': '27272', 'ext': 'mp3', 'title': 'MP3', }, }, { # hlsAes = true 'url': 'https://view.vzaar.com/11379930/player', 'info_dict': { 'id': '11379930', 'ext': 'mp4', 'title': 'Videoaula', }, 'params': { # m3u8 download 'skip_download': True, }, }, { # with null videoTitle 'url': 'https://view.vzaar.com/20313539/download', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+src=["\']((?:https?:)?//(?:view\.vzaar\.com)/[0-9]+)', webpage) def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( 'http://view.vzaar.com/v2/%s/video' % video_id, video_id) title = video_data.get('videoTitle') or video_id formats = [] source_url = url_or_none(video_data.get('sourceUrl')) if source_url: f = { 'url': source_url, 'format_id': 'http', 'preference': 1, } if 'audio' in source_url: f.update({ 'vcodec': 'none', 'ext': 'mp3', }) else: f.update({ 'width': int_or_none(video_data.get('width')), 'height': int_or_none(video_data.get('height')), 'ext': 'mp4', 'fps': float_or_none(video_data.get('fps')), }) formats.append(f) video_guid = video_data.get('guid') usp = video_data.get('usp') if video_data.get('uspEnabled') and isinstance(video_guid, compat_str) and isinstance(usp, dict): hls_aes = video_data.get('hlsAes') qs = '&'.join('%s=%s' % (k, v) for k, v in usp.items()) url_templ = 'http://%%s.vzaar.com/v5/usp%s/%s/%s.ism%%s?' % ('aes' if hls_aes else '', video_guid, video_id) m3u8_formats = self._extract_m3u8_formats( url_templ % ('fable', '/.m3u8') + qs, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) if hls_aes: for f in m3u8_formats: f['_decryption_key_url'] = url_templ % ('goose', '') + qs formats.extend(m3u8_formats) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'thumbnail': self._proto_relative_url(video_data.get('poster')), 'duration': float_or_none(video_data.get('videoDuration')), 'timestamp': unified_timestamp(video_data.get('ts')), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/inc.py
youtube_dl/extractor/inc.py
from __future__ import unicode_literals from .common import InfoExtractor from .kaltura import KalturaIE class IncIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?inc\.com/(?:[^/]+/)+(?P<id>[^.]+).html' _TESTS = [{ 'url': 'http://www.inc.com/tip-sheet/bill-gates-says-these-5-books-will-make-you-smarter.html', 'md5': '7416739c9c16438c09fa35619d6ba5cb', 'info_dict': { 'id': '1_wqig47aq', 'ext': 'mov', 'title': 'Bill Gates Says These 5 Books Will Make You Smarter', 'description': 'md5:bea7ff6cce100886fc1995acb743237e', 'timestamp': 1474414430, 'upload_date': '20160920', 'uploader_id': 'video@inc.com', }, 'params': { 'skip_download': True, }, }, { # div with id=kaltura_player_1_kqs38cgm 'url': 'https://www.inc.com/oscar-raymundo/richard-branson-young-entrepeneurs.html', 'info_dict': { 'id': '1_kqs38cgm', 'ext': 'mp4', 'title': 'Branson: "In the end, you have to say, Screw it. Just do it."', 'description': 'md5:21b832d034f9af5191ca5959da5e9cb6', 'timestamp': 1364403232, 'upload_date': '20130327', 'uploader_id': 'incdigital@inc.com', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.inc.com/video/david-whitford/founders-forum-tripadvisor-steve-kaufer-most-enjoyable-moment-for-entrepreneur.html', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) partner_id = self._search_regex( r'var\s+_?bizo_data_partner_id\s*=\s*["\'](\d+)', webpage, 'partner id', default='1034971') kaltura_id = self._search_regex( r'id=(["\'])kaltura_player_(?P<id>.+?)\1', webpage, 'kaltura id', default=None, group='id') or self._parse_json(self._search_regex( r'pageInfo\.videos\s*=\s*\[(.+)\];', webpage, 'kaltura id'), display_id)['vid_kaltura_id'] return self.url_result( 'kaltura:%s:%s' % (partner_id, kaltura_id), KalturaIE.ie_key())
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ceskatelevize.py
youtube_dl/extractor/ceskatelevize.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_urllib_parse_unquote, compat_urllib_parse_urlparse, ) from ..utils import ( ExtractorError, float_or_none, sanitized_Request, str_or_none, traverse_obj, urlencode_postdata, USER_AGENTS, ) class CeskaTelevizeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ceskatelevize\.cz/(?:ivysilani|porady|zive)/(?:[^/?#&]+/)*(?P<id>[^/#?]+)' _TESTS = [{ 'url': 'http://www.ceskatelevize.cz/ivysilani/10441294653-hyde-park-civilizace/215411058090502/bonus/20641-bonus-01-en', 'info_dict': { 'id': '61924494877028507', 'ext': 'mp4', 'title': 'Bonus 01 - En - Hyde Park Civilizace', 'description': 'English Subtittles', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 81.3, }, 'params': { # m3u8 download 'skip_download': True, }, }, { # live stream 'url': 'http://www.ceskatelevize.cz/zive/ct1/', 'info_dict': { 'id': '102', 'ext': 'mp4', 'title': r'ČT1 - živé vysílání online', 'description': 'Sledujte živé vysílání kanálu ČT1 online. Vybírat si můžete i z dalších kanálů České televize na kterémkoli z vašich zařízení.', 'is_live': True, }, 'params': { # m3u8 download 'skip_download': True, }, }, { # another 'url': 'http://www.ceskatelevize.cz/ivysilani/zive/ct4/', 'only_matching': True, 'info_dict': { 'id': 402, 'ext': 'mp4', 'title': r're:^ČT Sport \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'is_live': True, }, # 'skip': 'Georestricted to Czech Republic', }, { 'url': 'http://www.ceskatelevize.cz/ivysilani/embed/iFramePlayer.php?hash=d6a3e1370d2e4fa76296b90bad4dfc19673b641e&IDEC=217 562 22150/0004&channelID=1&width=100%25', 'only_matching': True, }, { # video with 18+ caution trailer 'url': 'http://www.ceskatelevize.cz/porady/10520528904-queer/215562210900007-bogotart/', 'info_dict': { 'id': '215562210900007-bogotart', 'title': 'Bogotart - Queer', 'description': 'Hlavní město Kolumbie v doprovodu queer umělců. Vroucí svět plný vášně, sebevědomí, ale i násilí a bolesti', }, 'playlist': [{ 'info_dict': { 'id': '61924494877311053', 'ext': 'mp4', 'title': 'Bogotart - Queer (Varování 18+)', 'duration': 11.9, }, }, { 'info_dict': { 'id': '61924494877068022', 'ext': 'mp4', 'title': 'Bogotart - Queer (Queer)', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 1558.3, }, }], 'params': { # m3u8 download 'skip_download': True, }, }, { # iframe embed 'url': 'http://www.ceskatelevize.cz/porady/10614999031-neviditelni/21251212048/', 'only_matching': True, }] def _search_nextjs_data(self, webpage, video_id, **kw): return self._parse_json( self._search_regex( r'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[\'"][^>]*>([^<]+)</script>', webpage, 'next.js data', **kw), video_id, **kw) def _real_extract(self, url): playlist_id = self._match_id(url) webpage, urlh = self._download_webpage_handle(url, playlist_id) parsed_url = compat_urllib_parse_urlparse(urlh.geturl()) site_name = self._og_search_property('site_name', webpage, fatal=False, default='Česká televize') playlist_title = self._og_search_title(webpage, default=None) if site_name and playlist_title: playlist_title = re.split(r'\s*[—|]\s*%s' % (site_name, ), playlist_title, 1)[0] playlist_description = self._og_search_description(webpage, default=None) if playlist_description: playlist_description = playlist_description.replace('\xa0', ' ') type_ = 'IDEC' if re.search(r'(^/porady|/zive)/', parsed_url.path): next_data = self._search_nextjs_data(webpage, playlist_id) if '/zive/' in parsed_url.path: idec = traverse_obj(next_data, ('props', 'pageProps', 'data', 'liveBroadcast', 'current', 'idec'), get_all=False) else: idec = traverse_obj(next_data, ('props', 'pageProps', 'data', ('show', 'mediaMeta'), 'idec'), get_all=False) if not idec: idec = traverse_obj(next_data, ('props', 'pageProps', 'data', 'videobonusDetail', 'bonusId'), get_all=False) if idec: type_ = 'bonus' if not idec: raise ExtractorError('Failed to find IDEC id') iframe_hash = self._download_webpage( 'https://www.ceskatelevize.cz/v-api/iframe-hash/', playlist_id, note='Getting IFRAME hash') query = {'hash': iframe_hash, 'origin': 'iVysilani', 'autoStart': 'true', type_: idec, } webpage = self._download_webpage( 'https://www.ceskatelevize.cz/ivysilani/embed/iFramePlayer.php', playlist_id, note='Downloading player', query=query) NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.' if '%s</p>' % NOT_AVAILABLE_STRING in webpage: self.raise_geo_restricted(NOT_AVAILABLE_STRING) if any(not_found in webpage for not_found in ('Neplatný parametr pro videopřehrávač', 'IDEC nebyl nalezen', )): raise ExtractorError('no video with IDEC available', video_id=idec, expected=True) type_ = None episode_id = None playlist = self._parse_json( self._search_regex( r'getPlaylistUrl\(\[({.+?})\]', webpage, 'playlist', default='{}'), playlist_id) if playlist: type_ = playlist.get('type') episode_id = playlist.get('id') if not type_: type_ = self._html_search_regex( r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type') if not episode_id: episode_id = self._html_search_regex( r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id') data = { 'playlist[0][type]': type_, 'playlist[0][id]': episode_id, 'requestUrl': parsed_url.path, 'requestSource': 'iVysilani', } entries = [] for user_agent in (None, USER_AGENTS['Safari']): req = sanitized_Request( 'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist/', data=urlencode_postdata(data)) req.add_header('Content-type', 'application/x-www-form-urlencoded') req.add_header('x-addr', '127.0.0.1') req.add_header('X-Requested-With', 'XMLHttpRequest') if user_agent: req.add_header('User-Agent', user_agent) req.add_header('Referer', url) playlistpage = self._download_json(req, playlist_id, fatal=False) if not playlistpage: continue playlist_url = playlistpage['url'] if playlist_url == 'error_region': raise ExtractorError(NOT_AVAILABLE_STRING, expected=True) req = sanitized_Request(compat_urllib_parse_unquote(playlist_url)) req.add_header('Referer', url) playlist = self._download_json(req, playlist_id, fatal=False) if not playlist: continue playlist = playlist.get('playlist') if not isinstance(playlist, list): continue playlist_len = len(playlist) for num, item in enumerate(playlist): is_live = item.get('type') == 'LIVE' formats = [] for format_id, stream_url in item.get('streamUrls', {}).items(): if 'drmOnly=true' in stream_url: continue if 'playerType=flash' in stream_url: stream_formats = self._extract_m3u8_formats( stream_url, playlist_id, 'mp4', 'm3u8_native', m3u8_id='hls-%s' % format_id, fatal=False) else: stream_formats = self._extract_mpd_formats( stream_url, playlist_id, mpd_id='dash-%s' % format_id, fatal=False) # See https://github.com/ytdl-org/youtube-dl/issues/12119#issuecomment-280037031 if format_id == 'audioDescription': for f in stream_formats: f['source_preference'] = -10 formats.extend(stream_formats) if user_agent and len(entries) == playlist_len: entries[num]['formats'].extend(formats) continue item_id = str_or_none(item.get('id') or item['assetId']) title = item['title'] duration = float_or_none(item.get('duration')) thumbnail = item.get('previewImageUrl') subtitles = {} if item.get('type') == 'VOD': subs = item.get('subtitles') if subs: subtitles = self.extract_subtitles(episode_id, subs) if playlist_len == 1: final_title = playlist_title or title else: final_title = '%s (%s)' % (playlist_title, title) entries.append({ 'id': item_id, 'title': final_title, 'description': playlist_description if playlist_len == 1 else None, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, 'subtitles': subtitles, 'is_live': is_live, }) for e in entries: self._sort_formats(e['formats']) if len(entries) == 1: return entries[0] return self.playlist_result(entries, playlist_id, playlist_title, playlist_description) def _get_subtitles(self, episode_id, subs): original_subtitles = self._download_webpage( subs[0]['url'], episode_id, 'Downloading subtitles') srt_subs = self._fix_subtitles(original_subtitles) return { 'cs': [{ 'ext': 'srt', 'data': srt_subs, }] } @staticmethod def _fix_subtitles(subtitles): """ Convert millisecond-based subtitles to SRT """ def _msectotimecode(msec): """ Helper utility to convert milliseconds to timecode """ components = [] for divider in [1000, 60, 60, 100]: components.append(msec % divider) msec //= divider return '{3:02}:{2:02}:{1:02},{0:03}'.format(*components) def _fix_subtitle(subtitle): for line in subtitle.splitlines(): m = re.match(r'^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$', line) if m: yield m.group(1) start, stop = (_msectotimecode(int(t)) for t in m.groups()[1:]) yield '{0} --> {1}'.format(start, stop) else: yield line return '\r\n'.join(_fix_subtitle(subtitles))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/watchindianporn.py
youtube_dl/extractor/watchindianporn.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import parse_duration class WatchIndianPornIE(InfoExtractor): IE_DESC = 'Watch Indian Porn' _VALID_URL = r'https?://(?:www\.)?watchindianporn\.net/(?:[^/]+/)*video/(?P<display_id>[^/]+)-(?P<id>[a-zA-Z0-9]+)\.html' _TEST = { 'url': 'http://www.watchindianporn.net/video/hot-milf-from-kerala-shows-off-her-gorgeous-large-breasts-on-camera-RZa2avywNPa.html', 'md5': '249589a164dde236ec65832bfce17440', 'info_dict': { 'id': 'RZa2avywNPa', 'display_id': 'hot-milf-from-kerala-shows-off-her-gorgeous-large-breasts-on-camera', 'ext': 'mp4', 'title': 'Hot milf from kerala shows off her gorgeous large breasts on camera', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 226, 'view_count': int, 'categories': list, 'age_limit': 18, } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') webpage = self._download_webpage(url, display_id) info_dict = self._parse_html5_media_entries(url, webpage, video_id)[0] title = self._html_search_regex(( r'<title>(.+?)\s*-\s*Indian\s+Porn</title>', r'<h4>(.+?)</h4>' ), webpage, 'title') duration = parse_duration(self._search_regex( r'Time:\s*<strong>\s*(.+?)\s*</strong>', webpage, 'duration', fatal=False)) view_count = int(self._search_regex( r'(?s)Time:\s*<strong>.*?</strong>.*?<strong>\s*(\d+)\s*</strong>', webpage, 'view count', fatal=False)) categories = re.findall( r'<a[^>]+class=[\'"]categories[\'"][^>]*>\s*([^<]+)\s*</a>', webpage) info_dict.update({ 'id': video_id, 'display_id': display_id, 'http_headers': { 'Referer': url, }, 'title': title, 'duration': duration, 'view_count': view_count, 'categories': categories, 'age_limit': 18, }) return info_dict
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vvvvid.py
youtube_dl/extractor/vvvvid.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import ( ExtractorError, int_or_none, str_or_none, ) class VVVVIDIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:www\.)?vvvvid\.it/(?:#!)?(?:show|anime|film|series)/' _VALID_URL = r'%s(?P<show_id>\d+)/[^/]+/(?P<season_id>\d+)/(?P<id>[0-9]+)' % _VALID_URL_BASE _TESTS = [{ # video_type == 'video/vvvvid' 'url': 'https://www.vvvvid.it/#!show/434/perche-dovrei-guardarlo-di-dario-moccia/437/489048/ping-pong', 'md5': 'b8d3cecc2e981adc3835adf07f6df91b', 'info_dict': { 'id': '489048', 'ext': 'mp4', 'title': 'Ping Pong', 'duration': 239, 'series': '"Perché dovrei guardarlo?" di Dario Moccia', 'season_id': '437', 'episode': 'Ping Pong', 'episode_number': 1, 'episode_id': '3334', 'view_count': int, 'like_count': int, 'repost_count': int, }, 'params': { 'skip_download': True, }, }, { # video_type == 'video/rcs' 'url': 'https://www.vvvvid.it/#!show/376/death-note-live-action/377/482493/episodio-01', 'md5': '33e0edfba720ad73a8782157fdebc648', 'info_dict': { 'id': '482493', 'ext': 'mp4', 'title': 'Episodio 01', }, 'params': { 'skip_download': True, }, }, { # video_type == 'video/youtube' 'url': 'https://www.vvvvid.it/show/404/one-punch-man/406/486683/trailer', 'md5': '33e0edfba720ad73a8782157fdebc648', 'info_dict': { 'id': 'RzmFKUDOUgw', 'ext': 'mp4', 'title': 'Trailer', 'upload_date': '20150906', 'description': 'md5:a5e802558d35247fee285875328c0b80', 'uploader_id': 'BandaiVisual', 'uploader': 'BANDAI NAMCO Arts Channel', }, 'params': { 'skip_download': True, }, }, { # video_type == 'video/dash' 'url': 'https://www.vvvvid.it/show/683/made-in-abyss/1542/693786/nanachi', 'info_dict': { 'id': '693786', 'ext': 'mp4', 'title': 'Nanachi', }, 'params': { 'skip_download': True, 'format': 'mp4', }, }, { 'url': 'https://www.vvvvid.it/show/434/perche-dovrei-guardarlo-di-dario-moccia/437/489048', 'only_matching': True }] _conn_id = None def _real_initialize(self): self._conn_id = self._download_json( 'https://www.vvvvid.it/user/login', None, headers=self.geo_verification_headers())['data']['conn_id'] def _download_info(self, show_id, path, video_id, fatal=True, query=None): q = { 'conn_id': self._conn_id, } if query: q.update(query) response = self._download_json( 'https://www.vvvvid.it/vvvvid/ondemand/%s/%s' % (show_id, path), video_id, headers=self.geo_verification_headers(), query=q, fatal=fatal) if not (response or fatal): return if response.get('result') == 'error': raise ExtractorError('%s said: %s' % ( self.IE_NAME, response['message']), expected=True) return response['data'] def _extract_common_video_info(self, video_data): return { 'thumbnail': video_data.get('thumbnail'), 'episode_id': str_or_none(video_data.get('id')), } def _real_extract(self, url): show_id, season_id, video_id = re.match(self._VALID_URL, url).groups() response = self._download_info( show_id, 'season/%s' % season_id, video_id, query={'video_id': video_id}) vid = int(video_id) video_data = list(filter( lambda episode: episode.get('video_id') == vid, response))[0] title = video_data['title'] formats = [] # vvvvid embed_info decryption algorithm is reverse engineered from function $ds(h) at vvvvid.js def ds(h): g = "MNOPIJKL89+/4567UVWXQRSTEFGHABCDcdefYZabstuvopqr0123wxyzklmnghij" def f(m): l = [] o = 0 b = False m_len = len(m) while ((not b) and o < m_len): n = m[o] << 2 o += 1 k = -1 j = -1 if o < m_len: n += m[o] >> 4 o += 1 if o < m_len: k = (m[o - 1] << 4) & 255 k += m[o] >> 2 o += 1 if o < m_len: j = (m[o - 1] << 6) & 255 j += m[o] o += 1 else: b = True else: b = True else: b = True l.append(n) if k != -1: l.append(k) if j != -1: l.append(j) return l c = [] for e in h: c.append(g.index(e)) c_len = len(c) for e in range(c_len * 2 - 1, -1, -1): a = c[e % c_len] ^ c[(e + 1) % c_len] c[e % c_len] = a c = f(c) d = '' for e in c: d += chr(e) return d info = {} def metadata_from_url(r_url): if not info and r_url: mobj = re.search(r'_(?:S(\d+))?Ep(\d+)', r_url) if mobj: info['episode_number'] = int(mobj.group(2)) season_number = mobj.group(1) if season_number: info['season_number'] = int(season_number) video_type = video_data.get('video_type') is_youtube = False for quality in ('', '_sd'): embed_code = video_data.get('embed_info' + quality) if not embed_code: continue embed_code = ds(embed_code) if video_type == 'video/kenc': embed_code = re.sub(r'https?(://[^/]+)/z/', r'https\1/i/', embed_code).replace('/manifest.f4m', '/master.m3u8') kenc = self._download_json( 'https://www.vvvvid.it/kenc', video_id, query={ 'action': 'kt', 'conn_id': self._conn_id, 'url': embed_code, }, fatal=False) or {} kenc_message = kenc.get('message') if kenc_message: embed_code += '?' + ds(kenc_message) formats.extend(self._extract_m3u8_formats( embed_code, video_id, 'mp4', m3u8_id='hls', fatal=False)) elif video_type == 'video/rcs': formats.extend(self._extract_akamai_formats(embed_code, video_id)) elif video_type == 'video/youtube': info.update({ '_type': 'url_transparent', 'ie_key': YoutubeIE.ie_key(), 'url': embed_code, }) is_youtube = True break elif video_type == 'video/dash': formats.extend(self._extract_m3u8_formats( embed_code, video_id, 'mp4', m3u8_id='hls', fatal=False)) else: formats.extend(self._extract_wowza_formats( 'http://sb.top-ix.org/videomg/_definst_/mp4:%s/playlist.m3u8' % embed_code, video_id)) metadata_from_url(embed_code) if not is_youtube: self._sort_formats(formats) info['formats'] = formats metadata_from_url(video_data.get('thumbnail')) info.update(self._extract_common_video_info(video_data)) info.update({ 'id': video_id, 'title': title, 'duration': int_or_none(video_data.get('length')), 'series': video_data.get('show_title'), 'season_id': season_id, 'episode': title, 'view_count': int_or_none(video_data.get('views')), 'like_count': int_or_none(video_data.get('video_likes')), 'repost_count': int_or_none(video_data.get('video_shares')), }) return info class VVVVIDShowIE(VVVVIDIE): _VALID_URL = r'(?P<base_url>%s(?P<id>\d+)(?:/(?P<show_title>[^/?&#]+))?)/?(?:[?#&]|$)' % VVVVIDIE._VALID_URL_BASE _TESTS = [{ 'url': 'https://www.vvvvid.it/show/156/psyco-pass', 'info_dict': { 'id': '156', 'title': 'Psycho-Pass', 'description': 'md5:94d572c0bd85894b193b8aebc9a3a806', }, 'playlist_count': 46, }, { 'url': 'https://www.vvvvid.it/show/156', 'only_matching': True, }] def _real_extract(self, url): base_url, show_id, show_title = re.match(self._VALID_URL, url).groups() seasons = self._download_info( show_id, 'seasons/', show_title) show_info = self._download_info( show_id, 'info/', show_title, fatal=False) if not show_title: base_url += "/title" entries = [] for season in (seasons or []): episodes = season.get('episodes') or [] playlist_title = season.get('name') or show_info.get('title') for episode in episodes: if episode.get('playable') is False: continue season_id = str_or_none(episode.get('season_id')) video_id = str_or_none(episode.get('video_id')) if not (season_id and video_id): continue info = self._extract_common_video_info(episode) info.update({ '_type': 'url_transparent', 'ie_key': VVVVIDIE.ie_key(), 'url': '/'.join([base_url, season_id, video_id]), 'title': episode.get('title'), 'description': episode.get('description'), 'season_id': season_id, 'playlist_title': playlist_title, }) entries.append(info) return self.playlist_result( entries, show_id, show_info.get('title'), show_info.get('description'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/meipai.py
youtube_dl/extractor/meipai.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, unified_timestamp, ) class MeipaiIE(InfoExtractor): IE_DESC = '美拍' _VALID_URL = r'https?://(?:www\.)?meipai\.com/media/(?P<id>[0-9]+)' _TESTS = [{ # regular uploaded video 'url': 'http://www.meipai.com/media/531697625', 'md5': 'e3e9600f9e55a302daecc90825854b4f', 'info_dict': { 'id': '531697625', 'ext': 'mp4', 'title': '#葉子##阿桑##余姿昀##超級女聲#', 'description': '#葉子##阿桑##余姿昀##超級女聲#', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 152, 'timestamp': 1465492420, 'upload_date': '20160609', 'view_count': 35511, 'creator': '她她-TATA', 'tags': ['葉子', '阿桑', '余姿昀', '超級女聲'], } }, { # record of live streaming 'url': 'http://www.meipai.com/media/585526361', 'md5': 'ff7d6afdbc6143342408223d4f5fb99a', 'info_dict': { 'id': '585526361', 'ext': 'mp4', 'title': '姿昀和善願 練歌練琴啦😁😁😁', 'description': '姿昀和善願 練歌練琴啦😁😁😁', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 5975, 'timestamp': 1474311799, 'upload_date': '20160919', 'view_count': 1215, 'creator': '她她-TATA', } }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_title( webpage, default=None) or self._html_search_regex( r'<title[^>]*>([^<]+)</title>', webpage, 'title') formats = [] # recorded playback of live streaming m3u8_url = self._html_search_regex( r'file:\s*encodeURIComponent\((["\'])(?P<url>(?:(?!\1).)+)\1\)', webpage, 'm3u8 url', group='url', default=None) if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) if not formats: # regular uploaded video video_url = self._search_regex( r'data-video=(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'video url', group='url', default=None) if video_url: formats.append({ 'url': video_url, 'format_id': 'http', }) timestamp = unified_timestamp(self._og_search_property( 'video:release_date', webpage, 'release date', fatal=False)) tags = self._og_search_property( 'video:tag', webpage, 'tags', default='').split(',') view_count = int_or_none(self._html_search_meta( 'interactionCount', webpage, 'view count')) duration = parse_duration(self._html_search_meta( 'duration', webpage, 'duration')) creator = self._og_search_property( 'video:director', webpage, 'creator', fatal=False) return { 'id': video_id, 'title': title, 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), 'duration': duration, 'timestamp': timestamp, 'view_count': view_count, 'creator': creator, 'tags': tags, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/fox9.py
youtube_dl/extractor/fox9.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class FOX9IE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?fox9\.com/video/(?P<id>\d+)' def _real_extract(self, url): video_id = self._match_id(url) return self.url_result( 'anvato:anvato_epfox_app_web_prod_b3373168e12f423f41504f207000188daf88251b:' + video_id, 'Anvato', video_id) class FOX9NewsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?fox9\.com/news/(?P<id>[^/?&#]+)' _TEST = { 'url': 'https://www.fox9.com/news/black-bear-in-tree-draws-crowd-in-downtown-duluth-minnesota', 'md5': 'd6e1b2572c3bab8a849c9103615dd243', 'info_dict': { 'id': '314473', 'ext': 'mp4', 'title': 'Bear climbs tree in downtown Duluth', 'description': 'md5:6a36bfb5073a411758a752455408ac90', 'duration': 51, 'timestamp': 1478123580, 'upload_date': '20161102', 'uploader': 'EPFOX', 'categories': ['News', 'Sports'], 'tags': ['news', 'video'], }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) anvato_id = self._search_regex( r'anvatoId\s*:\s*[\'"](\d+)', webpage, 'anvato id') return self.url_result('https://www.fox9.com/video/' + anvato_id, 'FOX9')
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cloudy.py
youtube_dl/extractor/cloudy.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( str_to_int, unified_strdate, ) class CloudyIE(InfoExtractor): IE_DESC = 'cloudy.ec' _VALID_URL = r'https?://(?:www\.)?cloudy\.ec/(?:v/|embed\.php\?.*?\bid=)(?P<id>[A-Za-z0-9]+)' _TESTS = [{ 'url': 'https://www.cloudy.ec/v/af511e2527aac', 'md5': '29832b05028ead1b58be86bf319397ca', 'info_dict': { 'id': 'af511e2527aac', 'ext': 'mp4', 'title': 'Funny Cats and Animals Compilation june 2013', 'upload_date': '20130913', 'view_count': int, } }, { 'url': 'http://www.cloudy.ec/embed.php?autoplay=1&id=af511e2527aac', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://www.cloudy.ec/embed.php', video_id, query={ 'id': video_id, 'playerPage': 1, 'autoplay': 1, }) info = self._parse_html5_media_entries(url, webpage, video_id)[0] webpage = self._download_webpage( 'https://www.cloudy.ec/v/%s' % video_id, video_id, fatal=False) if webpage: info.update({ 'title': self._search_regex( r'<h\d[^>]*>([^<]+)<', webpage, 'title'), 'upload_date': unified_strdate(self._search_regex( r'>Published at (\d{4}-\d{1,2}-\d{1,2})', webpage, 'upload date', fatal=False)), 'view_count': str_to_int(self._search_regex( r'([\d,.]+) views<', webpage, 'view count', fatal=False)), }) if not info.get('title'): info['title'] = video_id info['id'] = video_id return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vevo.py
youtube_dl/extractor/vevo.py
from __future__ import unicode_literals import re import json from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, compat_HTTPError, ) from ..utils import ( ExtractorError, int_or_none, parse_iso8601, ) class VevoBaseIE(InfoExtractor): def _extract_json(self, webpage, video_id): return self._parse_json( self._search_regex( r'window\.__INITIAL_STORE__\s*=\s*({.+?});\s*</script>', webpage, 'initial store'), video_id) class VevoIE(VevoBaseIE): ''' Accepts urls from vevo.com or in the format 'vevo:{id}' (currently used by MTVIE and MySpaceIE) ''' _VALID_URL = r'''(?x) (?:https?://(?:www\.)?vevo\.com/watch/(?!playlist|genre)(?:[^/]+/(?:[^/]+/)?)?| https?://cache\.vevo\.com/m/html/embed\.html\?video=| https?://videoplayer\.vevo\.com/embed/embedded\?videoId=| https?://embed\.vevo\.com/.*?[?&]isrc=| vevo:) (?P<id>[^&?#]+)''' _TESTS = [{ 'url': 'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280', 'md5': '95ee28ee45e70130e3ab02b0f579ae23', 'info_dict': { 'id': 'GB1101300280', 'ext': 'mp4', 'title': 'Hurts - Somebody to Die For', 'timestamp': 1372057200, 'upload_date': '20130624', 'uploader': 'Hurts', 'track': 'Somebody to Die For', 'artist': 'Hurts', 'genre': 'Pop', }, 'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'], }, { 'note': 'v3 SMIL format', 'url': 'http://www.vevo.com/watch/cassadee-pope/i-wish-i-could-break-your-heart/USUV71302923', 'md5': 'f6ab09b034f8c22969020b042e5ac7fc', 'info_dict': { 'id': 'USUV71302923', 'ext': 'mp4', 'title': 'Cassadee Pope - I Wish I Could Break Your Heart', 'timestamp': 1392796919, 'upload_date': '20140219', 'uploader': 'Cassadee Pope', 'track': 'I Wish I Could Break Your Heart', 'artist': 'Cassadee Pope', 'genre': 'Country', }, 'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'], }, { 'note': 'Age-limited video', 'url': 'https://www.vevo.com/watch/justin-timberlake/tunnel-vision-explicit/USRV81300282', 'info_dict': { 'id': 'USRV81300282', 'ext': 'mp4', 'title': 'Justin Timberlake - Tunnel Vision (Explicit)', 'age_limit': 18, 'timestamp': 1372888800, 'upload_date': '20130703', 'uploader': 'Justin Timberlake', 'track': 'Tunnel Vision (Explicit)', 'artist': 'Justin Timberlake', 'genre': 'Pop', }, 'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'], }, { 'note': 'No video_info', 'url': 'http://www.vevo.com/watch/k-camp-1/Till-I-Die/USUV71503000', 'md5': '8b83cc492d72fc9cf74a02acee7dc1b0', 'info_dict': { 'id': 'USUV71503000', 'ext': 'mp4', 'title': 'K Camp ft. T.I. - Till I Die', 'age_limit': 18, 'timestamp': 1449468000, 'upload_date': '20151207', 'uploader': 'K Camp', 'track': 'Till I Die', 'artist': 'K Camp', 'genre': 'Hip-Hop', }, 'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'], }, { 'note': 'Featured test', 'url': 'https://www.vevo.com/watch/lemaitre/Wait/USUV71402190', 'md5': 'd28675e5e8805035d949dc5cf161071d', 'info_dict': { 'id': 'USUV71402190', 'ext': 'mp4', 'title': 'Lemaitre ft. LoLo - Wait', 'age_limit': 0, 'timestamp': 1413432000, 'upload_date': '20141016', 'uploader': 'Lemaitre', 'track': 'Wait', 'artist': 'Lemaitre', 'genre': 'Electronic', }, 'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'], }, { 'note': 'Only available via webpage', 'url': 'http://www.vevo.com/watch/GBUV71600656', 'md5': '67e79210613865b66a47c33baa5e37fe', 'info_dict': { 'id': 'GBUV71600656', 'ext': 'mp4', 'title': 'ABC - Viva Love', 'age_limit': 0, 'timestamp': 1461830400, 'upload_date': '20160428', 'uploader': 'ABC', 'track': 'Viva Love', 'artist': 'ABC', 'genre': 'Pop', }, 'expected_warnings': ['Failed to download video versions info'], }, { # no genres available 'url': 'http://www.vevo.com/watch/INS171400764', 'only_matching': True, }, { # Another case available only via the webpage; using streams/streamsV3 formats # Geo-restricted to Netherlands/Germany 'url': 'http://www.vevo.com/watch/boostee/pop-corn-clip-officiel/FR1A91600909', 'only_matching': True, }, { 'url': 'https://embed.vevo.com/?isrc=USH5V1923499&partnerId=4d61b777-8023-4191-9ede-497ed6c24647&partnerAdCode=', 'only_matching': True, }] _VERSIONS = { 0: 'youtube', # only in AuthenticateVideo videoVersions 1: 'level3', 2: 'akamai', 3: 'level3', 4: 'amazon', } def _initialize_api(self, video_id): webpage = self._download_webpage( 'https://accounts.vevo.com/token', None, note='Retrieving oauth token', errnote='Unable to retrieve oauth token', data=json.dumps({ 'client_id': 'SPupX1tvqFEopQ1YS6SS', 'grant_type': 'urn:vevo:params:oauth:grant-type:anonymous', }).encode('utf-8'), headers={ 'Content-Type': 'application/json', }) if re.search(r'(?i)THIS PAGE IS CURRENTLY UNAVAILABLE IN YOUR REGION', webpage): self.raise_geo_restricted( '%s said: This page is currently unavailable in your region' % self.IE_NAME) auth_info = self._parse_json(webpage, video_id) self._api_url_template = self.http_scheme() + '//apiv2.vevo.com/%s?token=' + auth_info['legacy_token'] def _call_api(self, path, *args, **kwargs): try: data = self._download_json(self._api_url_template % path, *args, **kwargs) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError): errors = self._parse_json(e.cause.read().decode(), None)['errors'] error_message = ', '.join([error['message'] for error in errors]) raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True) raise return data def _real_extract(self, url): video_id = self._match_id(url) self._initialize_api(video_id) video_info = self._call_api( 'video/%s' % video_id, video_id, 'Downloading api video info', 'Failed to download video info') video_versions = self._call_api( 'video/%s/streams' % video_id, video_id, 'Downloading video versions info', 'Failed to download video versions info', fatal=False) # Some videos are only available via webpage (e.g. # https://github.com/ytdl-org/youtube-dl/issues/9366) if not video_versions: webpage = self._download_webpage(url, video_id) json_data = self._extract_json(webpage, video_id) if 'streams' in json_data.get('default', {}): video_versions = json_data['default']['streams'][video_id][0] else: video_versions = [ value for key, value in json_data['apollo']['data'].items() if key.startswith('%s.streams' % video_id)] uploader = None artist = None featured_artist = None artists = video_info.get('artists') for curr_artist in artists: if curr_artist.get('role') == 'Featured': featured_artist = curr_artist['name'] else: artist = uploader = curr_artist['name'] formats = [] for video_version in video_versions: version = self._VERSIONS.get(video_version.get('version'), 'generic') version_url = video_version.get('url') if not version_url: continue if '.ism' in version_url: continue elif '.mpd' in version_url: formats.extend(self._extract_mpd_formats( version_url, video_id, mpd_id='dash-%s' % version, note='Downloading %s MPD information' % version, errnote='Failed to download %s MPD information' % version, fatal=False)) elif '.m3u8' in version_url: formats.extend(self._extract_m3u8_formats( version_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls-%s' % version, note='Downloading %s m3u8 information' % version, errnote='Failed to download %s m3u8 information' % version, fatal=False)) else: m = re.search(r'''(?xi) _(?P<width>[0-9]+)x(?P<height>[0-9]+) _(?P<vcodec>[a-z0-9]+) _(?P<vbr>[0-9]+) _(?P<acodec>[a-z0-9]+) _(?P<abr>[0-9]+) \.(?P<ext>[a-z0-9]+)''', version_url) if not m: continue formats.append({ 'url': version_url, 'format_id': 'http-%s-%s' % (version, video_version['quality']), 'vcodec': m.group('vcodec'), 'acodec': m.group('acodec'), 'vbr': int(m.group('vbr')), 'abr': int(m.group('abr')), 'ext': m.group('ext'), 'width': int(m.group('width')), 'height': int(m.group('height')), }) self._sort_formats(formats) track = video_info['title'] if featured_artist: artist = '%s ft. %s' % (artist, featured_artist) title = '%s - %s' % (artist, track) if artist else track genres = video_info.get('genres') genre = ( genres[0] if genres and isinstance(genres, list) and isinstance(genres[0], compat_str) else None) is_explicit = video_info.get('isExplicit') if is_explicit is True: age_limit = 18 elif is_explicit is False: age_limit = 0 else: age_limit = None return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': video_info.get('imageUrl') or video_info.get('thumbnailUrl'), 'timestamp': parse_iso8601(video_info.get('releaseDate')), 'uploader': uploader, 'duration': int_or_none(video_info.get('duration')), 'view_count': int_or_none(video_info.get('views', {}).get('total')), 'age_limit': age_limit, 'track': track, 'artist': uploader, 'genre': genre, } class VevoPlaylistIE(VevoBaseIE): _VALID_URL = r'https?://(?:www\.)?vevo\.com/watch/(?P<kind>playlist|genre)/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://www.vevo.com/watch/playlist/dadbf4e7-b99f-4184-9670-6f0e547b6a29', 'info_dict': { 'id': 'dadbf4e7-b99f-4184-9670-6f0e547b6a29', 'title': 'Best-Of: Birdman', }, 'playlist_count': 10, }, { 'url': 'http://www.vevo.com/watch/genre/rock', 'info_dict': { 'id': 'rock', 'title': 'Rock', }, 'playlist_count': 20, }, { 'url': 'http://www.vevo.com/watch/playlist/dadbf4e7-b99f-4184-9670-6f0e547b6a29?index=0', 'md5': '32dcdfddddf9ec6917fc88ca26d36282', 'info_dict': { 'id': 'USCMV1100073', 'ext': 'mp4', 'title': 'Birdman - Y.U. MAD', 'timestamp': 1323417600, 'upload_date': '20111209', 'uploader': 'Birdman', 'track': 'Y.U. MAD', 'artist': 'Birdman', 'genre': 'Rap/Hip-Hop', }, 'expected_warnings': ['Unable to download SMIL file'], }, { 'url': 'http://www.vevo.com/watch/genre/rock?index=0', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) playlist_id = mobj.group('id') playlist_kind = mobj.group('kind') webpage = self._download_webpage(url, playlist_id) qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) index = qs.get('index', [None])[0] if index: video_id = self._search_regex( r'<meta[^>]+content=(["\'])vevo://video/(?P<id>.+?)\1[^>]*>', webpage, 'video id', default=None, group='id') if video_id: return self.url_result('vevo:%s' % video_id, VevoIE.ie_key()) playlists = self._extract_json(webpage, playlist_id)['default']['%ss' % playlist_kind] playlist = (list(playlists.values())[0] if playlist_kind == 'playlist' else playlists[playlist_id]) entries = [ self.url_result('vevo:%s' % src, VevoIE.ie_key()) for src in playlist['isrcs']] return self.playlist_result( entries, playlist.get('playlistId') or playlist_id, playlist.get('name'), playlist.get('description'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/giga.py
youtube_dl/extractor/giga.py
# coding: utf-8 from __future__ import unicode_literals import itertools from .common import InfoExtractor from ..utils import ( qualities, compat_str, parse_duration, parse_iso8601, str_to_int, ) class GigaIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?giga\.de/(?:[^/]+/)*(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.giga.de/filme/anime-awesome/trailer/anime-awesome-chihiros-reise-ins-zauberland-das-beste-kommt-zum-schluss/', 'md5': '6bc5535e945e724640664632055a584f', 'info_dict': { 'id': '2622086', 'display_id': 'anime-awesome-chihiros-reise-ins-zauberland-das-beste-kommt-zum-schluss', 'ext': 'mp4', 'title': 'Anime Awesome: Chihiros Reise ins Zauberland – Das Beste kommt zum Schluss', 'description': 'md5:afdf5862241aded4718a30dff6a57baf', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 578, 'timestamp': 1414749706, 'upload_date': '20141031', 'uploader': 'Robin Schweiger', 'view_count': int, }, }, { 'url': 'http://www.giga.de/games/channel/giga-top-montag/giga-topmontag-die-besten-serien-2014/', 'only_matching': True, }, { 'url': 'http://www.giga.de/extra/netzkultur/videos/giga-games-tom-mats-robin-werden-eigene-wege-gehen-eine-ankuendigung/', 'only_matching': True, }, { 'url': 'http://www.giga.de/tv/jonas-liest-spieletitel-eingedeutscht-episode-2/', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._search_regex( [r'data-video-id="(\d+)"', r'/api/video/jwplayer/#v=(\d+)'], webpage, 'video id') playlist = self._download_json( 'http://www.giga.de/api/syndication/video/video_id/%s/playlist.json?content=syndication/key/368b5f151da4ae05ced7fa296bdff65a/' % video_id, video_id)[0] quality = qualities(['normal', 'hd720']) formats = [] for format_id in itertools.count(0): fmt = playlist.get(compat_str(format_id)) if not fmt: break formats.append({ 'url': fmt['src'], 'format_id': '%s-%s' % (fmt['quality'], fmt['type'].split('/')[-1]), 'quality': quality(fmt['quality']), }) self._sort_formats(formats) title = self._html_search_meta( 'title', webpage, 'title', fatal=True) description = self._html_search_meta( 'description', webpage, 'description') thumbnail = self._og_search_thumbnail(webpage) duration = parse_duration(self._search_regex( r'(?s)(?:data-video-id="{0}"|data-video="[^"]*/api/video/jwplayer/#v={0}[^"]*")[^>]*>.+?<span class="duration">([^<]+)</span>'.format(video_id), webpage, 'duration', fatal=False)) timestamp = parse_iso8601(self._search_regex( r'datetime="([^"]+)"', webpage, 'upload date', fatal=False)) uploader = self._search_regex( r'class="author">([^<]+)</a>', webpage, 'uploader', fatal=False) view_count = str_to_int(self._search_regex( r'<span class="views"><strong>([\d.,]+)</strong>', webpage, 'view count', fatal=False)) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'timestamp': timestamp, 'uploader': uploader, 'view_count': view_count, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ctvnews.py
youtube_dl/extractor/ctvnews.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import orderedSet class CTVNewsIE(InfoExtractor): _VALID_URL = r'https?://(?:.+?\.)?ctvnews\.ca/(?:video\?(?:clip|playlist|bin)Id=|.*?)(?P<id>[0-9.]+)' _TESTS = [{ 'url': 'http://www.ctvnews.ca/video?clipId=901995', 'md5': '9b8624ba66351a23e0b6e1391971f9af', 'info_dict': { 'id': '901995', 'ext': 'flv', 'title': 'Extended: \'That person cannot be me\' Johnson says', 'description': 'md5:958dd3b4f5bbbf0ed4d045c790d89285', 'timestamp': 1467286284, 'upload_date': '20160630', } }, { 'url': 'http://www.ctvnews.ca/video?playlistId=1.2966224', 'info_dict': { 'id': '1.2966224', }, 'playlist_mincount': 19, }, { 'url': 'http://www.ctvnews.ca/video?binId=1.2876780', 'info_dict': { 'id': '1.2876780', }, 'playlist_mincount': 100, }, { 'url': 'http://www.ctvnews.ca/1.810401', 'only_matching': True, }, { 'url': 'http://www.ctvnews.ca/canadiens-send-p-k-subban-to-nashville-in-blockbuster-trade-1.2967231', 'only_matching': True, }, { 'url': 'http://vancouverisland.ctvnews.ca/video?clipId=761241', 'only_matching': True, }] def _real_extract(self, url): page_id = self._match_id(url) def ninecninemedia_url_result(clip_id): return { '_type': 'url_transparent', 'id': clip_id, 'url': '9c9media:ctvnews_web:%s' % clip_id, 'ie_key': 'NineCNineMedia', } if page_id.isdigit(): return ninecninemedia_url_result(page_id) else: webpage = self._download_webpage('http://www.ctvnews.ca/%s' % page_id, page_id, query={ 'ot': 'example.AjaxPageLayout.ot', 'maxItemsPerPage': 1000000, }) entries = [ninecninemedia_url_result(clip_id) for clip_id in orderedSet( re.findall(r'clip\.id\s*=\s*(\d+);', webpage))] return self.playlist_result(entries, page_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/canvas.py
youtube_dl/extractor/canvas.py
from __future__ import unicode_literals import re import json from .common import InfoExtractor from .gigya import GigyaBaseIE from ..compat import compat_HTTPError from ..utils import ( ExtractorError, clean_html, extract_attributes, float_or_none, get_element_by_class, int_or_none, merge_dicts, str_or_none, strip_or_none, url_or_none, ) class CanvasIE(InfoExtractor): _VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?P<site_id>canvas|een|ketnet|vrt(?:video|nieuws)|sporza|dako)/assets/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475', 'md5': '68993eda72ef62386a15ea2cf3c93107', 'info_dict': { 'id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475', 'display_id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475', 'ext': 'mp4', 'title': 'Nachtwacht: De Greystook', 'description': 'Nachtwacht: De Greystook', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1468.04, }, 'expected_warnings': ['is not a supported codec', 'Unknown MIME type'], }, { 'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e', 'only_matching': True, }] _GEO_BYPASS = False _HLS_ENTRY_PROTOCOLS_MAP = { 'HLS': 'm3u8_native', 'HLS_AES': 'm3u8', } _REST_API_BASE = 'https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/v1' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) site_id, video_id = mobj.group('site_id'), mobj.group('id') data = None if site_id != 'vrtvideo': # Old API endpoint, serves more formats but may fail for some videos data = self._download_json( 'https://mediazone.vrt.be/api/v1/%s/assets/%s' % (site_id, video_id), video_id, 'Downloading asset JSON', 'Unable to download asset JSON', fatal=False) # New API endpoint if not data: headers = self.geo_verification_headers() headers.update({'Content-Type': 'application/json'}) token = self._download_json( '%s/tokens' % self._REST_API_BASE, video_id, 'Downloading token', data=b'', headers=headers)['vrtPlayerToken'] data = self._download_json( '%s/videos/%s' % (self._REST_API_BASE, video_id), video_id, 'Downloading video JSON', query={ 'vrtPlayerToken': token, 'client': '%s@PROD' % site_id, }, expected_status=400) if not data.get('title'): code = data.get('code') if code == 'AUTHENTICATION_REQUIRED': self.raise_login_required() elif code == 'INVALID_LOCATION': self.raise_geo_restricted(countries=['BE']) raise ExtractorError(data.get('message') or code, expected=True) title = data['title'] description = data.get('description') formats = [] for target in data['targetUrls']: format_url, format_type = url_or_none(target.get('url')), str_or_none(target.get('type')) if not format_url or not format_type: continue format_type = format_type.upper() if format_type in self._HLS_ENTRY_PROTOCOLS_MAP: formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', self._HLS_ENTRY_PROTOCOLS_MAP[format_type], m3u8_id=format_type, fatal=False)) elif format_type == 'HDS': formats.extend(self._extract_f4m_formats( format_url, video_id, f4m_id=format_type, fatal=False)) elif format_type == 'MPEG_DASH': formats.extend(self._extract_mpd_formats( format_url, video_id, mpd_id=format_type, fatal=False)) elif format_type == 'HSS': formats.extend(self._extract_ism_formats( format_url, video_id, ism_id='mss', fatal=False)) else: formats.append({ 'format_id': format_type, 'url': format_url, }) self._sort_formats(formats) subtitles = {} subtitle_urls = data.get('subtitleUrls') if isinstance(subtitle_urls, list): for subtitle in subtitle_urls: subtitle_url = subtitle.get('url') if subtitle_url and subtitle.get('type') == 'CLOSED': subtitles.setdefault('nl', []).append({'url': subtitle_url}) return { 'id': video_id, 'display_id': video_id, 'title': title, 'description': description, 'formats': formats, 'duration': float_or_none(data.get('duration'), 1000), 'thumbnail': data.get('posterImageUrl'), 'subtitles': subtitles, } class CanvasEenIE(InfoExtractor): IE_DESC = 'canvas.be and een.be' _VALID_URL = r'https?://(?:www\.)?(?P<site_id>canvas|een)\.be/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://www.canvas.be/video/de-afspraak/najaar-2015/de-afspraak-veilt-voor-de-warmste-week', 'md5': 'ed66976748d12350b118455979cca293', 'info_dict': { 'id': 'mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e', 'display_id': 'de-afspraak-veilt-voor-de-warmste-week', 'ext': 'flv', 'title': 'De afspraak veilt voor de Warmste Week', 'description': 'md5:24cb860c320dc2be7358e0e5aa317ba6', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 49.02, }, 'expected_warnings': ['is not a supported codec'], }, { # with subtitles 'url': 'http://www.canvas.be/video/panorama/2016/pieter-0167', 'info_dict': { 'id': 'mz-ast-5240ff21-2d30-4101-bba6-92b5ec67c625', 'display_id': 'pieter-0167', 'ext': 'mp4', 'title': 'Pieter 0167', 'description': 'md5:943cd30f48a5d29ba02c3a104dc4ec4e', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2553.08, 'subtitles': { 'nl': [{ 'ext': 'vtt', }], }, }, 'params': { 'skip_download': True, }, 'skip': 'Pagina niet gevonden', }, { 'url': 'https://www.een.be/thuis/emma-pakt-thilly-aan', 'info_dict': { 'id': 'md-ast-3a24ced2-64d7-44fb-b4ed-ed1aafbf90b8', 'display_id': 'emma-pakt-thilly-aan', 'ext': 'mp4', 'title': 'Emma pakt Thilly aan', 'description': 'md5:c5c9b572388a99b2690030afa3f3bad7', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 118.24, }, 'params': { 'skip_download': True, }, 'expected_warnings': ['is not a supported codec'], }, { 'url': 'https://www.canvas.be/check-point/najaar-2016/de-politie-uw-vriend', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) site_id, display_id = mobj.group('site_id'), mobj.group('id') webpage = self._download_webpage(url, display_id) title = strip_or_none(self._search_regex( r'<h1[^>]+class="video__body__header__title"[^>]*>(.+?)</h1>', webpage, 'title', default=None) or self._og_search_title( webpage, default=None)) video_id = self._html_search_regex( r'data-video=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video id', group='id') return { '_type': 'url_transparent', 'url': 'https://mediazone.vrt.be/api/v1/%s/assets/%s' % (site_id, video_id), 'ie_key': CanvasIE.ie_key(), 'id': video_id, 'display_id': display_id, 'title': title, 'description': self._og_search_description(webpage), } class VrtNUIE(GigyaBaseIE): IE_DESC = 'VrtNU.be' _VALID_URL = r'https?://(?:www\.)?vrt\.be/vrtnu/a-z/(?:[^/]+/){2}(?P<id>[^/?#&]+)' _TESTS = [{ # Available via old API endpoint 'url': 'https://www.vrt.be/vrtnu/a-z/postbus-x/1989/postbus-x-s1989a1/', 'info_dict': { 'id': 'pbs-pub-e8713dac-899e-41de-9313-81269f4c04ac$vid-90c932b1-e21d-4fb8-99b1-db7b49cf74de', 'ext': 'mp4', 'title': 'Postbus X - Aflevering 1 (Seizoen 1989)', 'description': 'md5:b704f669eb9262da4c55b33d7c6ed4b7', 'duration': 1457.04, 'thumbnail': r're:^https?://.*\.jpg$', 'series': 'Postbus X', 'season': 'Seizoen 1989', 'season_number': 1989, 'episode': 'De zwarte weduwe', 'episode_number': 1, 'timestamp': 1595822400, 'upload_date': '20200727', }, 'skip': 'This video is only available for registered users', 'params': { 'username': '<snip>', 'password': '<snip>', }, 'expected_warnings': ['is not a supported codec'], }, { # Only available via new API endpoint 'url': 'https://www.vrt.be/vrtnu/a-z/kamp-waes/1/kamp-waes-s1a5/', 'info_dict': { 'id': 'pbs-pub-0763b56c-64fb-4d38-b95b-af60bf433c71$vid-ad36a73c-4735-4f1f-b2c0-a38e6e6aa7e1', 'ext': 'mp4', 'title': 'Aflevering 5', 'description': 'Wie valt door de mand tijdens een missie?', 'duration': 2967.06, 'season': 'Season 1', 'season_number': 1, 'episode_number': 5, }, 'skip': 'This video is only available for registered users', 'params': { 'username': '<snip>', 'password': '<snip>', }, 'expected_warnings': ['Unable to download asset JSON', 'is not a supported codec', 'Unknown MIME type'], }] _NETRC_MACHINE = 'vrtnu' _APIKEY = '3_0Z2HujMtiWq_pkAjgnS2Md2E11a1AwZjYiBETtwNE-EoEHDINgtnvcAOpNgmrVGy' _CONTEXT_ID = 'R3595707040' def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return auth_data = { 'APIKey': self._APIKEY, 'targetEnv': 'jssdk', 'loginID': username, 'password': password, 'authMode': 'cookie', } auth_info = self._gigya_login(auth_data) # Sometimes authentication fails for no good reason, retry login_attempt = 1 while login_attempt <= 3: try: # When requesting a token, no actual token is returned, but the # necessary cookies are set. self._request_webpage( 'https://token.vrt.be', None, note='Requesting a token', errnote='Could not get a token', headers={ 'Content-Type': 'application/json', 'Referer': 'https://www.vrt.be/vrtnu/', }, data=json.dumps({ 'uid': auth_info['UID'], 'uidsig': auth_info['UIDSignature'], 'ts': auth_info['signatureTimestamp'], 'email': auth_info['profile']['email'], }).encode('utf-8')) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: login_attempt += 1 self.report_warning('Authentication failed') self._sleep(1, None, msg_template='Waiting for %(timeout)s seconds before trying again') else: raise e else: break def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) attrs = extract_attributes(self._search_regex( r'(<nui-media[^>]+>)', webpage, 'media element')) video_id = attrs['videoid'] publication_id = attrs.get('publicationid') if publication_id: video_id = publication_id + '$' + video_id page = (self._parse_json(self._search_regex( r'digitalData\s*=\s*({.+?});', webpage, 'digial data', default='{}'), video_id, fatal=False) or {}).get('page') or {} info = self._search_json_ld(webpage, display_id, default={}) return merge_dicts(info, { '_type': 'url_transparent', 'url': 'https://mediazone.vrt.be/api/v1/vrtvideo/assets/%s' % video_id, 'ie_key': CanvasIE.ie_key(), 'id': video_id, 'display_id': display_id, 'season_number': int_or_none(page.get('episode_season')), }) class DagelijkseKostIE(InfoExtractor): IE_DESC = 'dagelijksekost.een.be' _VALID_URL = r'https?://dagelijksekost\.een\.be/gerechten/(?P<id>[^/?#&]+)' _TEST = { 'url': 'https://dagelijksekost.een.be/gerechten/hachis-parmentier-met-witloof', 'md5': '30bfffc323009a3e5f689bef6efa2365', 'info_dict': { 'id': 'md-ast-27a4d1ff-7d7b-425e-b84f-a4d227f592fa', 'display_id': 'hachis-parmentier-met-witloof', 'ext': 'mp4', 'title': 'Hachis parmentier met witloof', 'description': 'md5:9960478392d87f63567b5b117688cdc5', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 283.02, }, 'expected_warnings': ['is not a supported codec'], } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = strip_or_none(get_element_by_class( 'dish-metadata__title', webpage ) or self._html_search_meta( 'twitter:title', webpage)) description = clean_html(get_element_by_class( 'dish-description', webpage) ) or self._html_search_meta( ('description', 'twitter:description', 'og:description'), webpage) video_id = self._html_search_regex( r'data-url=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video id', group='id') return { '_type': 'url_transparent', 'url': 'https://mediazone.vrt.be/api/v1/dako/assets/%s' % video_id, 'ie_key': CanvasIE.ie_key(), 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/viki.py
youtube_dl/extractor/viki.py
# coding: utf-8 from __future__ import unicode_literals import hashlib import hmac import json import time from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, parse_age_limit, parse_iso8601, try_get, ) class VikiBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:www\.)?viki\.(?:com|net|mx|jp|fr)/' _API_URL_TEMPLATE = 'https://api.viki.io%s' _DEVICE_ID = '112395910d' _APP = '100005a' _APP_VERSION = '6.11.3' _APP_SECRET = 'd96704b180208dbb2efa30fe44c48bd8690441af9f567ba8fd710a72badc85198f7472' _GEO_BYPASS = False _NETRC_MACHINE = 'viki' _token = None _ERRORS = { 'geo': 'Sorry, this content is not available in your region.', 'upcoming': 'Sorry, this content is not yet available.', 'paywall': 'Sorry, this content is only available to Viki Pass Plus subscribers', } def _stream_headers(self, timestamp, sig): return { 'X-Viki-manufacturer': 'vivo', 'X-Viki-device-model': 'vivo 1606', 'X-Viki-device-os-ver': '6.0.1', 'X-Viki-connection-type': 'WIFI', 'X-Viki-carrier': '', 'X-Viki-as-id': '100005a-1625321982-3932', 'timestamp': str(timestamp), 'signature': str(sig), 'x-viki-app-ver': self._APP_VERSION } def _api_query(self, path, version=4, **kwargs): path += '?' if '?' not in path else '&' app = self._APP query = '/v{version}/{path}app={app}'.format(**locals()) if self._token: query += '&token=%s' % self._token return query + ''.join('&{name}={val}.format(**locals())' for name, val in kwargs.items()) def _sign_query(self, path): timestamp = int(time.time()) query = self._api_query(path, version=5) sig = hmac.new( self._APP_SECRET.encode('ascii'), '{query}&t={timestamp}'.format(**locals()).encode('ascii'), hashlib.sha1).hexdigest() return timestamp, sig, self._API_URL_TEMPLATE % query def _call_api( self, path, video_id, note='Downloading JSON metadata', data=None, query=None, fatal=True): if query is None: timestamp, sig, url = self._sign_query(path) else: url = self._API_URL_TEMPLATE % self._api_query(path, version=4) resp = self._download_json( url, video_id, note, fatal=fatal, query=query, data=json.dumps(data).encode('utf-8') if data else None, headers=({'x-viki-app-ver': self._APP_VERSION} if data else self._stream_headers(timestamp, sig) if query is None else None), expected_status=400) or {} self._raise_error(resp.get('error'), fatal) return resp def _raise_error(self, error, fatal=True): if error is None: return msg = '%s said: %s' % (self.IE_NAME, error) if fatal: raise ExtractorError(msg, expected=True) else: self.report_warning(msg) def _check_errors(self, data): for reason, status in (data.get('blocking') or {}).items(): if status and reason in self._ERRORS: message = self._ERRORS[reason] if reason == 'geo': self.raise_geo_restricted(msg=message) elif reason == 'paywall': if try_get(data, lambda x: x['paywallable']['tvod']): self._raise_error('This video is for rent only or TVOD (Transactional Video On demand)') self.raise_login_required(message) self._raise_error(message) def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return self._token = self._call_api( 'sessions.json', None, 'Logging in', fatal=False, data={'username': username, 'password': password}).get('token') if not self._token: self.report_warning('Login Failed: Unable to get session token') @staticmethod def dict_selection(dict_obj, preferred_key): if preferred_key in dict_obj: return dict_obj[preferred_key] return (list(filter(None, dict_obj.values())) or [None])[0] class VikiIE(VikiBaseIE): IE_NAME = 'viki' _VALID_URL = r'%s(?:videos|player)/(?P<id>[0-9]+v)' % VikiBaseIE._VALID_URL_BASE _TESTS = [{ 'note': 'Free non-DRM video with storyboards in MPD', 'url': 'https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1', 'info_dict': { 'id': '1175236v', 'ext': 'mp4', 'title': 'Choosing Spouse by Lottery - Episode 1', 'timestamp': 1606463239, 'age_limit': 12, 'uploader': 'FCC', 'upload_date': '20201127', }, 'expected_warnings': ['Unknown MIME type image/jpeg in DASH manifest'], 'params': { 'format': 'bestvideo', }, }, { 'url': 'http://www.viki.com/videos/1023585v-heirs-episode-14', 'info_dict': { 'id': '1023585v', 'ext': 'mp4', 'title': 'Heirs - Episode 14', 'uploader': 'SBS Contents Hub', 'timestamp': 1385047627, 'upload_date': '20131121', 'age_limit': 13, 'duration': 3570, 'episode_number': 14, }, 'params': { 'format': 'bestvideo', }, 'skip': 'Content is only available to Viki Pass Plus subscribers', 'expected_warnings': ['Unknown MIME type image/jpeg in DASH manifest'], }, { # clip 'url': 'http://www.viki.com/videos/1067139v-the-avengers-age-of-ultron-press-conference', 'md5': '86c0b5dbd4d83a6611a79987cc7a1989', 'info_dict': { 'id': '1067139v', 'ext': 'mp4', 'title': "'The Avengers: Age of Ultron' Press Conference", 'description': 'md5:d70b2f9428f5488321bfe1db10d612ea', 'duration': 352, 'timestamp': 1430380829, 'upload_date': '20150430', 'uploader': 'Arirang TV', 'like_count': int, 'age_limit': 0, }, 'skip': 'Sorry. There was an error loading this video', }, { 'url': 'http://www.viki.com/videos/1048879v-ankhon-dekhi', 'info_dict': { 'id': '1048879v', 'ext': 'mp4', 'title': 'Ankhon Dekhi', 'duration': 6512, 'timestamp': 1408532356, 'upload_date': '20140820', 'uploader': 'Spuul', 'like_count': int, 'age_limit': 13, }, 'skip': 'Page not found!', }, { # episode 'url': 'http://www.viki.com/videos/44699v-boys-over-flowers-episode-1', 'md5': '670440c79f7109ca6564d4c7f24e3e81', 'info_dict': { 'id': '44699v', 'ext': 'mp4', 'title': 'Boys Over Flowers - Episode 1', 'description': 'md5:b89cf50038b480b88b5b3c93589a9076', 'duration': 4172, 'timestamp': 1270496524, 'upload_date': '20100405', 'uploader': 'group8', 'like_count': int, 'age_limit': 15, 'episode_number': 1, }, 'params': { 'format': 'bestvideo', }, 'expected_warnings': ['Unknown MIME type image/jpeg in DASH manifest'], }, { # youtube external 'url': 'http://www.viki.com/videos/50562v-poor-nastya-complete-episode-1', 'md5': '63f8600c1da6f01b7640eee7eca4f1da', 'info_dict': { 'id': '50562v', 'ext': 'webm', 'title': 'Poor Nastya [COMPLETE] - Episode 1', 'description': '', 'duration': 606, 'timestamp': 1274949505, 'upload_date': '20101213', 'uploader': 'ad14065n', 'uploader_id': 'ad14065n', 'like_count': int, 'age_limit': 13, }, 'skip': 'Page not found!', }, { 'url': 'http://www.viki.com/player/44699v', 'only_matching': True, }, { # non-English description 'url': 'http://www.viki.com/videos/158036v-love-in-magic', 'md5': '78bf49fdaa51f9e7f9150262a9ef9bdf', 'info_dict': { 'id': '158036v', 'ext': 'mp4', 'uploader': 'I Planet Entertainment', 'upload_date': '20111122', 'timestamp': 1321985454, 'description': 'md5:44b1e46619df3a072294645c770cef36', 'title': 'Love in Magic', 'age_limit': 15, }, 'params': { 'format': 'bestvideo', }, 'expected_warnings': ['Unknown MIME type image/jpeg in DASH manifest'], }] def _real_extract(self, url): video_id = self._match_id(url) video = self._call_api('videos/{0}.json'.format(video_id), video_id, 'Downloading video JSON', query={}) self._check_errors(video) title = try_get(video, lambda x: x['titles']['en'], str) episode_number = int_or_none(video.get('number')) if not title: title = 'Episode %d' % episode_number if video.get('type') == 'episode' else video.get('id') or video_id container_titles = try_get(video, lambda x: x['container']['titles'], dict) or {} container_title = self.dict_selection(container_titles, 'en') if container_title and title == video_id: title = container_title else: title = '%s - %s' % (container_title, title) resp = self._call_api( 'playback_streams/%s.json?drms=dt3&device_id=%s' % (video_id, self._DEVICE_ID), video_id, 'Downloading video streams JSON')['main'][0] mpd_url = resp['url'] # 720p is hidden in another MPD which can be found in the current manifest content mpd_content = self._download_webpage(mpd_url, video_id, note='Downloading initial MPD manifest') mpd_url = self._search_regex( r'(?mi)<BaseURL>(http.+.mpd)', mpd_content, 'new manifest', default=mpd_url) if 'mpdhd_high' not in mpd_url: # Modify the URL to get 1080p mpd_url = mpd_url.replace('mpdhd', 'mpdhd_high') formats = self._extract_mpd_formats(mpd_url, video_id) self._sort_formats(formats) description = self.dict_selection(video.get('descriptions', {}), 'en') thumbnails = [{ 'id': thumbnail_id, 'url': thumbnail['url'], } for thumbnail_id, thumbnail in (video.get('images') or {}).items() if thumbnail.get('url')] like_count = int_or_none(try_get(video, lambda x: x['likes']['count'])) stream_id = try_get(resp, lambda x: x['properties']['track']['stream_id']) subtitles = dict((lang, [{ 'ext': ext, 'url': self._API_URL_TEMPLATE % self._api_query( 'videos/{0}/auth_subtitles/{1}.{2}'.format(video_id, lang, ext), stream_id=stream_id) } for ext in ('srt', 'vtt')]) for lang in (video.get('subtitle_completions') or {}).keys()) return { 'id': video_id, 'formats': formats, 'title': title, 'description': description, 'duration': int_or_none(video.get('duration')), 'timestamp': parse_iso8601(video.get('created_at')), 'uploader': video.get('author'), 'uploader_url': video.get('author_url'), 'like_count': like_count, 'age_limit': parse_age_limit(video.get('rating')), 'thumbnails': thumbnails, 'subtitles': subtitles, 'episode_number': episode_number, } class VikiChannelIE(VikiBaseIE): IE_NAME = 'viki:channel' _VALID_URL = r'%s(?:tv|news|movies|artists)/(?P<id>[0-9]+c)' % VikiBaseIE._VALID_URL_BASE _TESTS = [{ 'url': 'http://www.viki.com/tv/50c-boys-over-flowers', 'info_dict': { 'id': '50c', 'title': 'Boys Over Flowers', 'description': 'md5:f08b679c200e1a273c695fe9986f21d7', }, 'playlist_mincount': 51, }, { 'url': 'http://www.viki.com/tv/1354c-poor-nastya-complete', 'info_dict': { 'id': '1354c', 'title': 'Poor Nastya [COMPLETE]', 'description': 'md5:05bf5471385aa8b21c18ad450e350525', }, 'playlist_count': 127, 'skip': 'Page not found', }, { 'url': 'http://www.viki.com/news/24569c-showbiz-korea', 'only_matching': True, }, { 'url': 'http://www.viki.com/movies/22047c-pride-and-prejudice-2005', 'only_matching': True, }, { 'url': 'http://www.viki.com/artists/2141c-shinee', 'only_matching': True, }] _video_types = ('episodes', 'movies', 'clips', 'trailers') def _entries(self, channel_id): params = { 'app': self._APP, 'token': self._token, 'only_ids': 'true', 'direction': 'asc', 'sort': 'number', 'per_page': 30 } video_types = self._video_types for video_type in video_types: if video_type not in self._video_types: self.report_warning('Unknown video_type: ' + video_type) page_num = 0 while True: page_num += 1 params['page'] = page_num res = self._call_api( 'containers/{channel_id}/{video_type}.json'.format(**locals()), channel_id, query=params, fatal=False, note='Downloading %s JSON page %d' % (video_type.title(), page_num)) for video_id in res.get('response') or []: yield self.url_result('https://www.viki.com/videos/' + video_id, VikiIE.ie_key(), video_id) if not res.get('more'): break def _real_extract(self, url): channel_id = self._match_id(url) channel = self._call_api('containers/%s.json' % channel_id, channel_id, 'Downloading channel JSON') self._check_errors(channel) return self.playlist_result( self._entries(channel_id), channel_id, self.dict_selection(channel['titles'], 'en'), self.dict_selection(channel['descriptions'], 'en'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/parliamentliveuk.py
youtube_dl/extractor/parliamentliveuk.py
from __future__ import unicode_literals from .common import InfoExtractor class ParliamentLiveUKIE(InfoExtractor): IE_NAME = 'parliamentlive.tv' IE_DESC = 'UK parliament videos' _VALID_URL = r'(?i)https?://(?:www\.)?parliamentlive\.tv/Event/Index/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _TESTS = [{ 'url': 'http://parliamentlive.tv/Event/Index/c1e9d44d-fd6c-4263-b50f-97ed26cc998b', 'info_dict': { 'id': '1_af9nv9ym', 'ext': 'mp4', 'title': 'Home Affairs Committee', 'uploader_id': 'FFMPEG-01', 'timestamp': 1422696664, 'upload_date': '20150131', }, }, { 'url': 'http://parliamentlive.tv/event/index/3f24936f-130f-40bf-9a5d-b3d6479da6a4', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'http://vodplayer.parliamentlive.tv/?mid=' + video_id, video_id) widget_config = self._parse_json(self._search_regex( r'(?s)kWidgetConfig\s*=\s*({.+});', webpage, 'kaltura widget config'), video_id) kaltura_url = 'kaltura:%s:%s' % ( widget_config['wid'][1:], widget_config['entry_id']) event_title = self._download_json( 'http://parliamentlive.tv/Event/GetShareVideo/' + video_id, video_id)['event']['title'] return { '_type': 'url_transparent', 'title': event_title, 'description': '', 'url': kaltura_url, 'ie_key': 'Kaltura', }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/wistia.py
youtube_dl/extractor/wistia.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, try_get, unescapeHTML, ) class WistiaBaseIE(InfoExtractor): _VALID_ID_REGEX = r'(?P<id>[a-z0-9]{10})' _VALID_URL_BASE = r'https?://(?:fast\.)?wistia\.(?:net|com)/embed/' _EMBED_BASE_URL = 'http://fast.wistia.com/embed/' def _download_embed_config(self, config_type, config_id, referer): base_url = self._EMBED_BASE_URL + '%ss/%s' % (config_type, config_id) embed_config = self._download_json( base_url + '.json', config_id, headers={ 'Referer': referer if referer.startswith('http') else base_url, # Some videos require this. }) if isinstance(embed_config, dict) and embed_config.get('error'): raise ExtractorError( 'Error while getting the playlist', expected=True) return embed_config def _extract_media(self, embed_config): data = embed_config['media'] video_id = data['hashedId'] title = data['name'] formats = [] thumbnails = [] for a in data['assets']: aurl = a.get('url') if not aurl: continue astatus = a.get('status') atype = a.get('type') if (astatus is not None and astatus != 2) or atype in ('preview', 'storyboard'): continue elif atype in ('still', 'still_image'): thumbnails.append({ 'url': aurl, 'width': int_or_none(a.get('width')), 'height': int_or_none(a.get('height')), 'filesize': int_or_none(a.get('size')), }) else: aext = a.get('ext') display_name = a.get('display_name') format_id = atype if atype and atype.endswith('_video') and display_name: format_id = '%s-%s' % (atype[:-6], display_name) f = { 'format_id': format_id, 'url': aurl, 'tbr': int_or_none(a.get('bitrate')) or None, 'preference': 1 if atype == 'original' else None, } if display_name == 'Audio': f.update({ 'vcodec': 'none', }) else: f.update({ 'width': int_or_none(a.get('width')), 'height': int_or_none(a.get('height')), 'vcodec': a.get('codec'), }) if a.get('container') == 'm3u8' or aext == 'm3u8': ts_f = f.copy() ts_f.update({ 'ext': 'ts', 'format_id': f['format_id'].replace('hls-', 'ts-'), 'url': f['url'].replace('.bin', '.ts'), }) formats.append(ts_f) f.update({ 'ext': 'mp4', 'protocol': 'm3u8_native', }) else: f.update({ 'container': a.get('container'), 'ext': aext, 'filesize': int_or_none(a.get('size')), }) formats.append(f) self._sort_formats(formats) subtitles = {} for caption in data.get('captions', []): language = caption.get('language') if not language: continue subtitles[language] = [{ 'url': self._EMBED_BASE_URL + 'captions/' + video_id + '.vtt?language=' + language, }] return { 'id': video_id, 'title': title, 'description': data.get('seoDescription'), 'formats': formats, 'thumbnails': thumbnails, 'duration': float_or_none(data.get('duration')), 'timestamp': int_or_none(data.get('createdAt')), 'subtitles': subtitles, } class WistiaIE(WistiaBaseIE): _VALID_URL = r'(?:wistia:|%s(?:iframe|medias)/)%s' % (WistiaBaseIE._VALID_URL_BASE, WistiaBaseIE._VALID_ID_REGEX) _TESTS = [{ # with hls video 'url': 'wistia:807fafadvk', 'md5': 'daff0f3687a41d9a71b40e0e8c2610fe', 'info_dict': { 'id': '807fafadvk', 'ext': 'mp4', 'title': 'Drip Brennan Dunn Workshop', 'description': 'a JV Webinars video', 'upload_date': '20160518', 'timestamp': 1463607249, 'duration': 4987.11, }, }, { 'url': 'wistia:sh7fpupwlt', 'only_matching': True, }, { 'url': 'http://fast.wistia.net/embed/iframe/sh7fpupwlt', 'only_matching': True, }, { 'url': 'http://fast.wistia.com/embed/iframe/sh7fpupwlt', 'only_matching': True, }, { 'url': 'http://fast.wistia.net/embed/medias/sh7fpupwlt.json', 'only_matching': True, }] # https://wistia.com/support/embed-and-share/video-on-your-website @staticmethod def _extract_url(webpage): urls = WistiaIE._extract_urls(webpage) return urls[0] if urls else None @staticmethod def _extract_urls(webpage): urls = [] for match in re.finditer( r'<(?:meta[^>]+?content|(?:iframe|script)[^>]+?src)=["\'](?P<url>(?:https?:)?//(?:fast\.)?wistia\.(?:net|com)/embed/(?:iframe|medias)/[a-z0-9]{10})', webpage): urls.append(unescapeHTML(match.group('url'))) for match in re.finditer( r'''(?sx) <div[^>]+class=(["'])(?:(?!\1).)*?\bwistia_async_(?P<id>[a-z0-9]{10})\b(?:(?!\1).)*?\1 ''', webpage): urls.append('wistia:%s' % match.group('id')) for match in re.finditer(r'(?:data-wistia-?id=["\']|Wistia\.embed\(["\']|id=["\']wistia_)(?P<id>[a-z0-9]{10})', webpage): urls.append('wistia:%s' % match.group('id')) return urls def _real_extract(self, url): video_id = self._match_id(url) embed_config = self._download_embed_config('media', video_id, url) return self._extract_media(embed_config) class WistiaPlaylistIE(WistiaBaseIE): _VALID_URL = r'%splaylists/%s' % (WistiaIE._VALID_URL_BASE, WistiaIE._VALID_ID_REGEX) _TEST = { 'url': 'https://fast.wistia.net/embed/playlists/aodt9etokc', 'info_dict': { 'id': 'aodt9etokc', }, 'playlist_count': 3, } def _real_extract(self, url): playlist_id = self._match_id(url) playlist = self._download_embed_config('playlist', playlist_id, url) entries = [] for media in (try_get(playlist, lambda x: x[0]['medias']) or []): embed_config = media.get('embed_config') if not embed_config: continue entries.append(self._extract_media(embed_config)) return self.playlist_result(entries, playlist_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/twentythreevideo.py
youtube_dl/extractor/twentythreevideo.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import int_or_none class TwentyThreeVideoIE(InfoExtractor): IE_NAME = '23video' _VALID_URL = r'https?://(?P<domain>[^.]+\.(?:twentythree\.net|23video\.com|filmweb\.no))/v\.ihtml/player\.html\?(?P<query>.*?\bphoto(?:_|%5f)id=(?P<id>\d+).*)' _TESTS = [{ 'url': 'https://video.twentythree.net/v.ihtml/player.html?showDescriptions=0&source=site&photo%5fid=20448876&autoPlay=1', 'md5': '75fcf216303eb1dae9920d651f85ced4', 'info_dict': { 'id': '20448876', 'ext': 'mp4', 'title': 'Video Marketing Minute: Personalized Video', 'timestamp': 1513855354, 'upload_date': '20171221', 'uploader_id': '12258964', 'uploader': 'Rasmus Bysted', } }, { 'url': 'https://bonnier-publications-danmark.23video.com/v.ihtml/player.html?token=f0dc46476e06e13afd5a1f84a29e31e8&source=embed&photo%5fid=36137620', 'only_matching': True, }] def _real_extract(self, url): domain, query, photo_id = re.match(self._VALID_URL, url).groups() base_url = 'https://%s' % domain photo_data = self._download_json( base_url + '/api/photo/list?' + query, photo_id, query={ 'format': 'json', }, transform_source=lambda s: self._search_regex(r'(?s)({.+})', s, 'photo data'))['photo'] title = photo_data['title'] formats = [] audio_path = photo_data.get('audio_download') if audio_path: formats.append({ 'format_id': 'audio', 'url': base_url + audio_path, 'filesize': int_or_none(photo_data.get('audio_size')), 'vcodec': 'none', }) def add_common_info_to_list(l, template, id_field, id_value): f_base = template % id_value f_path = photo_data.get(f_base + 'download') if not f_path: return l.append({ id_field: id_value, 'url': base_url + f_path, 'width': int_or_none(photo_data.get(f_base + 'width')), 'height': int_or_none(photo_data.get(f_base + 'height')), 'filesize': int_or_none(photo_data.get(f_base + 'size')), }) for f in ('mobile_high', 'medium', 'hd', '1080p', '4k'): add_common_info_to_list(formats, 'video_%s_', 'format_id', f) thumbnails = [] for t in ('quad16', 'quad50', 'quad75', 'quad100', 'small', 'portrait', 'standard', 'medium', 'large', 'original'): add_common_info_to_list(thumbnails, '%s_', 'id', t) return { 'id': photo_id, 'title': title, 'timestamp': int_or_none(photo_data.get('creation_date_epoch')), 'duration': int_or_none(photo_data.get('video_length')), 'view_count': int_or_none(photo_data.get('view_count')), 'comment_count': int_or_none(photo_data.get('number_of_comments')), 'uploader_id': photo_data.get('user_id'), 'uploader': photo_data.get('display_name'), 'thumbnails': thumbnails, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/npr.py
youtube_dl/extractor/npr.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, qualities, url_or_none, ) class NprIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?npr\.org/(?:sections/[^/]+/)?\d{4}/\d{2}/\d{2}/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.npr.org/sections/allsongs/2015/10/21/449974205/new-music-from-beach-house-chairlift-cmj-discoveries-and-more', 'info_dict': { 'id': '449974205', 'title': 'New Music From Beach House, Chairlift, CMJ Discoveries And More' }, 'playlist_count': 7, }, { 'url': 'https://www.npr.org/sections/deceptivecadence/2015/10/09/446928052/music-from-the-shadows-ancient-armenian-hymns-and-piano-jazz', 'info_dict': { 'id': '446928052', 'title': "Songs We Love: Tigran Hamasyan, 'Your Mercy is Boundless'" }, 'playlist': [{ 'md5': '12fa60cb2d3ed932f53609d4aeceabf1', 'info_dict': { 'id': '446929930', 'ext': 'mp3', 'title': 'Your Mercy is Boundless (Bazum en Qo gtutyunqd)', 'duration': 402, }, }], }, { # multimedia, not media title 'url': 'https://www.npr.org/2017/06/19/533198237/tigers-jaw-tiny-desk-concert', 'info_dict': { 'id': '533198237', 'title': 'Tigers Jaw: Tiny Desk Concert', }, 'playlist': [{ 'md5': '12fa60cb2d3ed932f53609d4aeceabf1', 'info_dict': { 'id': '533201718', 'ext': 'mp4', 'title': 'Tigers Jaw: Tiny Desk Concert', 'duration': 402, }, }], 'expected_warnings': ['Failed to download m3u8 information'], }, { # multimedia, no formats, stream 'url': 'https://www.npr.org/2020/02/14/805476846/laura-stevenson-tiny-desk-concert', 'only_matching': True, }] def _real_extract(self, url): playlist_id = self._match_id(url) story = self._download_json( 'http://api.npr.org/query', playlist_id, query={ 'id': playlist_id, 'fields': 'audio,multimedia,title', 'format': 'json', 'apiKey': 'MDAzMzQ2MjAyMDEyMzk4MTU1MDg3ZmM3MQ010', })['list']['story'][0] playlist_title = story.get('title', {}).get('$text') KNOWN_FORMATS = ('threegp', 'm3u8', 'smil', 'mp4', 'mp3') quality = qualities(KNOWN_FORMATS) entries = [] for media in story.get('audio', []) + story.get('multimedia', []): media_id = media['id'] formats = [] for format_id, formats_entry in media.get('format', {}).items(): if not formats_entry: continue if isinstance(formats_entry, list): formats_entry = formats_entry[0] format_url = formats_entry.get('$text') if not format_url: continue if format_id in KNOWN_FORMATS: if format_id == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, media_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif format_id == 'smil': smil_formats = self._extract_smil_formats( format_url, media_id, transform_source=lambda s: s.replace( 'rtmp://flash.npr.org/ondemand/', 'https://ondemand.npr.org/')) self._check_formats(smil_formats, media_id) formats.extend(smil_formats) else: formats.append({ 'url': format_url, 'format_id': format_id, 'quality': quality(format_id), }) for stream_id, stream_entry in media.get('stream', {}).items(): if not isinstance(stream_entry, dict): continue if stream_id != 'hlsUrl': continue stream_url = url_or_none(stream_entry.get('$text')) if not stream_url: continue formats.extend(self._extract_m3u8_formats( stream_url, stream_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) self._sort_formats(formats) entries.append({ 'id': media_id, 'title': media.get('title', {}).get('$text') or playlist_title, 'thumbnail': media.get('altImageUrl', {}).get('$text'), 'duration': int_or_none(media.get('duration', {}).get('$text')), 'formats': formats, }) return self.playlist_result(entries, playlist_id, playlist_title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/udn.py
youtube_dl/extractor/udn.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, js_to_json, ) from ..compat import compat_urlparse class UDNEmbedIE(InfoExtractor): IE_DESC = '聯合影音' _PROTOCOL_RELATIVE_VALID_URL = r'//video\.udn\.com/(?:embed|play)/news/(?P<id>\d+)' _VALID_URL = r'https?:' + _PROTOCOL_RELATIVE_VALID_URL _TESTS = [{ 'url': 'http://video.udn.com/embed/news/300040', 'info_dict': { 'id': '300040', 'ext': 'mp4', 'title': '生物老師男變女 全校挺"做自己"', 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { # m3u8 download 'skip_download': True, }, 'expected_warnings': ['Failed to parse JSON Expecting value'], }, { 'url': 'https://video.udn.com/embed/news/300040', 'only_matching': True, }, { # From https://video.udn.com/news/303776 'url': 'https://video.udn.com/play/news/303776', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) page = self._download_webpage(url, video_id) options_str = self._html_search_regex( r'var\s+options\s*=\s*([^;]+);', page, 'options') trans_options_str = js_to_json(options_str) options = self._parse_json(trans_options_str, 'options', fatal=False) or {} if options: video_urls = options['video'] title = options['title'] poster = options.get('poster') else: video_urls = self._parse_json(self._html_search_regex( r'"video"\s*:\s*({.+?})\s*,', trans_options_str, 'video urls'), 'video urls') title = self._html_search_regex( r"title\s*:\s*'(.+?)'\s*,", options_str, 'title') poster = self._html_search_regex( r"poster\s*:\s*'(.+?)'\s*,", options_str, 'poster', default=None) if video_urls.get('youtube'): return self.url_result(video_urls.get('youtube'), 'Youtube') formats = [] for video_type, api_url in video_urls.items(): if not api_url: continue video_url = self._download_webpage( compat_urlparse.urljoin(url, api_url), video_id, note='retrieve url for %s video' % video_type) ext = determine_ext(video_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, ext='mp4', m3u8_id='hls')) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id='hds')) else: mobj = re.search(r'_(?P<height>\d+)p_(?P<tbr>\d+)\.mp4', video_url) a_format = { 'url': video_url, # video_type may be 'mp4', which confuses YoutubeDL 'format_id': 'http-' + video_type, } if mobj: a_format.update({ 'height': int_or_none(mobj.group('height')), 'tbr': int_or_none(mobj.group('tbr')), }) formats.append(a_format) self._sort_formats(formats) return { 'id': video_id, 'formats': formats, 'title': title, 'thumbnail': poster, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ninecninemedia.py
youtube_dl/extractor/ninecninemedia.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, parse_iso8601, try_get, ) class NineCNineMediaIE(InfoExtractor): IE_NAME = '9c9media' _GEO_COUNTRIES = ['CA'] _VALID_URL = r'9c9media:(?P<destination_code>[^:]+):(?P<id>\d+)' _API_BASE_TEMPLATE = 'http://capi.9c9media.com/destinations/%s/platforms/desktop/contents/%s/' def _real_extract(self, url): destination_code, content_id = re.match(self._VALID_URL, url).groups() api_base_url = self._API_BASE_TEMPLATE % (destination_code, content_id) content = self._download_json(api_base_url, content_id, query={ '$include': '[Media.Name,Season,ContentPackages.Duration,ContentPackages.Id]', }) title = content['Name'] content_package = content['ContentPackages'][0] package_id = content_package['Id'] content_package_url = api_base_url + 'contentpackages/%s/' % package_id content_package = self._download_json( content_package_url, content_id, query={ '$include': '[HasClosedCaptions]', }) if try_get(content_package, lambda x: x['Constraints']['Security']['Type']): raise ExtractorError('This video is DRM protected.', expected=True) manifest_base_url = content_package_url + 'manifest.' formats = [] formats.extend(self._extract_m3u8_formats( manifest_base_url + 'm3u8', content_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) formats.extend(self._extract_f4m_formats( manifest_base_url + 'f4m', content_id, f4m_id='hds', fatal=False)) formats.extend(self._extract_mpd_formats( manifest_base_url + 'mpd', content_id, mpd_id='dash', fatal=False)) self._sort_formats(formats) thumbnails = [] for image in (content.get('Images') or []): image_url = image.get('Url') if not image_url: continue thumbnails.append({ 'url': image_url, 'width': int_or_none(image.get('Width')), 'height': int_or_none(image.get('Height')), }) tags, categories = [], [] for source_name, container in (('Tags', tags), ('Genres', categories)): for e in content.get(source_name, []): e_name = e.get('Name') if not e_name: continue container.append(e_name) season = content.get('Season') or {} info = { 'id': content_id, 'title': title, 'description': content.get('Desc') or content.get('ShortDesc'), 'timestamp': parse_iso8601(content.get('BroadcastDateTime')), 'episode_number': int_or_none(content.get('Episode')), 'season': season.get('Name'), 'season_number': int_or_none(season.get('Number')), 'season_id': season.get('Id'), 'series': try_get(content, lambda x: x['Media']['Name']), 'tags': tags, 'categories': categories, 'duration': float_or_none(content_package.get('Duration')), 'formats': formats, 'thumbnails': thumbnails, } if content_package.get('HasClosedCaptions'): info['subtitles'] = { 'en': [{ 'url': manifest_base_url + 'vtt', 'ext': 'vtt', }, { 'url': manifest_base_url + 'srt', 'ext': 'srt', }] } return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/scte.py
youtube_dl/extractor/scte.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( decode_packed_codes, ExtractorError, urlencode_postdata, ) class SCTEBaseIE(InfoExtractor): _LOGIN_URL = 'https://www.scte.org/SCTE/Sign_In.aspx' _NETRC_MACHINE = 'scte' def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return login_popup = self._download_webpage( self._LOGIN_URL, None, 'Downloading login popup') def is_logged(webpage): return any(re.search(p, webpage) for p in ( r'class=["\']welcome\b', r'>Sign Out<')) # already logged in if is_logged(login_popup): return login_form = self._hidden_inputs(login_popup) login_form.update({ 'ctl01$TemplateBody$WebPartManager1$gwpciNewContactSignInCommon$ciNewContactSignInCommon$signInUserName': username, 'ctl01$TemplateBody$WebPartManager1$gwpciNewContactSignInCommon$ciNewContactSignInCommon$signInPassword': password, 'ctl01$TemplateBody$WebPartManager1$gwpciNewContactSignInCommon$ciNewContactSignInCommon$RememberMe': 'on', }) response = self._download_webpage( self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(login_form)) if '|pageRedirect|' not in response and not is_logged(response): error = self._html_search_regex( r'(?s)<[^>]+class=["\']AsiError["\'][^>]*>(.+?)</', response, 'error message', default=None) if error: raise ExtractorError('Unable to login: %s' % error, expected=True) raise ExtractorError('Unable to log in') class SCTEIE(SCTEBaseIE): _VALID_URL = r'https?://learning\.scte\.org/mod/scorm/view\.php?.*?\bid=(?P<id>\d+)' _TESTS = [{ 'url': 'https://learning.scte.org/mod/scorm/view.php?id=31484', 'info_dict': { 'title': 'Introduction to DOCSIS Engineering Professional', 'id': '31484', }, 'playlist_count': 5, 'skip': 'Requires account credentials', }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._search_regex(r'<h1>(.+?)</h1>', webpage, 'title') context_id = self._search_regex(r'context-(\d+)', webpage, video_id) content_base = 'https://learning.scte.org/pluginfile.php/%s/mod_scorm/content/8/' % context_id context = decode_packed_codes(self._download_webpage( '%smobile/data.js' % content_base, video_id)) data = self._parse_xml( self._search_regex( r'CreateData\(\s*"(.+?)"', context, 'data').replace(r"\'", "'"), video_id) entries = [] for asset in data.findall('.//asset'): asset_url = asset.get('url') if not asset_url or not asset_url.endswith('.mp4'): continue asset_id = self._search_regex( r'video_([^_]+)_', asset_url, 'asset id', default=None) if not asset_id: continue entries.append({ 'id': asset_id, 'title': title, 'url': content_base + asset_url, }) return self.playlist_result(entries, video_id, title) class SCTECourseIE(SCTEBaseIE): _VALID_URL = r'https?://learning\.scte\.org/(?:mod/sub)?course/view\.php?.*?\bid=(?P<id>\d+)' _TESTS = [{ 'url': 'https://learning.scte.org/mod/subcourse/view.php?id=31491', 'only_matching': True, }, { 'url': 'https://learning.scte.org/course/view.php?id=3639', 'only_matching': True, }, { 'url': 'https://learning.scte.org/course/view.php?id=3073', 'only_matching': True, }] def _real_extract(self, url): course_id = self._match_id(url) webpage = self._download_webpage(url, course_id) title = self._search_regex( r'<h1>(.+?)</h1>', webpage, 'title', default=None) entries = [] for mobj in re.finditer( r'''(?x) <a[^>]+ href=(["\']) (?P<url> https?://learning\.scte\.org/mod/ (?P<kind>scorm|subcourse)/view\.php?(?:(?!\1).)*? \bid=\d+ ) ''', webpage): item_url = mobj.group('url') if item_url == url: continue ie = (SCTEIE.ie_key() if mobj.group('kind') == 'scorm' else SCTECourseIE.ie_key()) entries.append(self.url_result(item_url, ie=ie)) return self.playlist_result(entries, course_id, title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/bloomberg.py
youtube_dl/extractor/bloomberg.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class BloombergIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?bloomberg\.com/(?:[^/]+/)*(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'http://www.bloomberg.com/news/videos/b/aaeae121-5949-481e-a1ce-4562db6f5df2', # The md5 checksum changes 'info_dict': { 'id': 'qurhIVlJSB6hzkVi229d8g', 'ext': 'flv', 'title': 'Shah\'s Presentation on Foreign-Exchange Strategies', 'description': 'md5:a8ba0302912d03d246979735c17d2761', }, 'params': { 'format': 'best[format_id^=hds]', }, }, { # video ID in BPlayer(...) 'url': 'http://www.bloomberg.com/features/2016-hello-world-new-zealand/', 'info_dict': { 'id': '938c7e72-3f25-4ddb-8b85-a9be731baa74', 'ext': 'flv', 'title': 'Meet the Real-Life Tech Wizards of Middle Earth', 'description': 'Hello World, Episode 1: New Zealand’s freaky AI babies, robot exoskeletons, and a virtual you.', }, 'params': { 'format': 'best[format_id^=hds]', }, }, { # data-bmmrid= 'url': 'https://www.bloomberg.com/politics/articles/2017-02-08/le-pen-aide-briefed-french-central-banker-on-plan-to-print-money', 'only_matching': True, }, { 'url': 'http://www.bloomberg.com/news/articles/2015-11-12/five-strange-things-that-have-been-happening-in-financial-markets', 'only_matching': True, }, { 'url': 'http://www.bloomberg.com/politics/videos/2015-11-25/karl-rove-on-jeb-bush-s-struggles-stopping-trump', 'only_matching': True, }] def _real_extract(self, url): name = self._match_id(url) webpage = self._download_webpage(url, name) video_id = self._search_regex( (r'["\']bmmrId["\']\s*:\s*(["\'])(?P<id>(?:(?!\1).)+)\1', r'videoId\s*:\s*(["\'])(?P<id>(?:(?!\1).)+)\1', r'data-bmmrid=(["\'])(?P<id>(?:(?!\1).)+)\1'), webpage, 'id', group='id', default=None) if not video_id: bplayer_data = self._parse_json(self._search_regex( r'BPlayer\(null,\s*({[^;]+})\);', webpage, 'id'), name) video_id = bplayer_data['id'] title = re.sub(': Video$', '', self._og_search_title(webpage)) embed_info = self._download_json( 'http://www.bloomberg.com/api/embed?id=%s' % video_id, video_id) formats = [] for stream in embed_info['streams']: stream_url = stream.get('url') if not stream_url: continue if stream['muxing_format'] == 'TS': formats.extend(self._extract_m3u8_formats( stream_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) else: formats.extend(self._extract_f4m_formats( stream_url, video_id, f4m_id='hds', fatal=False)) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/medialaan.py
youtube_dl/extractor/medialaan.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( extract_attributes, int_or_none, mimetype2ext, parse_iso8601, ) class MedialaanIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?: (?:embed\.)?mychannels.video/embed/| embed\.mychannels\.video/(?:s(?:dk|cript)/)?production/| (?:www\.)?(?: (?: 7sur7| demorgen| hln| joe| qmusic )\.be| (?: [abe]d| bndestem| destentor| gelderlander| pzc| tubantia| volkskrant )\.nl )/video/(?:[^/]+/)*[^/?&#]+~p ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'https://www.bndestem.nl/video/de-terugkeer-van-ally-de-aap-en-wie-vertrekt-er-nog-bij-nac~p193993', 'info_dict': { 'id': '193993', 'ext': 'mp4', 'title': 'De terugkeer van Ally de Aap en wie vertrekt er nog bij NAC?', 'timestamp': 1611663540, 'upload_date': '20210126', 'duration': 238, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.gelderlander.nl/video/kanalen/degelderlander~c320/series/snel-nieuws~s984/noodbevel-in-doetinchem-politie-stuurt-mensen-centrum-uit~p194093', 'only_matching': True, }, { 'url': 'https://embed.mychannels.video/sdk/production/193993?options=TFTFF_default', 'only_matching': True, }, { 'url': 'https://embed.mychannels.video/script/production/193993', 'only_matching': True, }, { 'url': 'https://embed.mychannels.video/production/193993', 'only_matching': True, }, { 'url': 'https://mychannels.video/embed/193993', 'only_matching': True, }, { 'url': 'https://embed.mychannels.video/embed/193993', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): entries = [] for element in re.findall(r'(<div[^>]+data-mychannels-type="video"[^>]*>)', webpage): mychannels_id = extract_attributes(element).get('data-mychannels-id') if mychannels_id: entries.append('https://mychannels.video/embed/' + mychannels_id) return entries def _real_extract(self, url): production_id = self._match_id(url) production = self._download_json( 'https://embed.mychannels.video/sdk/production/' + production_id, production_id, query={'options': 'UUUU_default'})['productions'][0] title = production['title'] formats = [] for source in (production.get('sources') or []): src = source.get('src') if not src: continue ext = mimetype2ext(source.get('type')) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( src, production_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'ext': ext, 'url': src, }) self._sort_formats(formats) return { 'id': production_id, 'title': title, 'formats': formats, 'thumbnail': production.get('posterUrl'), 'timestamp': parse_iso8601(production.get('publicationDate'), ' '), 'duration': int_or_none(production.get('duration')) or None, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/storyfire.py
youtube_dl/extractor/storyfire.py
# coding: utf-8 from __future__ import unicode_literals import functools from .common import InfoExtractor from ..utils import ( # HEADRequest, int_or_none, OnDemandPagedList, smuggle_url, ) class StoryFireBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:www\.)?storyfire\.com/' def _call_api(self, path, video_id, resource, query=None): return self._download_json( 'https://storyfire.com/app/%s/%s' % (path, video_id), video_id, 'Downloading %s JSON metadata' % resource, query=query) def _parse_video(self, video): title = video['title'] vimeo_id = self._search_regex( r'https?://player\.vimeo\.com/external/(\d+)', video['vimeoVideoURL'], 'vimeo id') # video_url = self._request_webpage( # HEADRequest(video['vimeoVideoURL']), video_id).geturl() # formats = [] # for v_url, suffix in [(video_url, '_sep'), (video_url.replace('/sep/video/', '/video/'), '')]: # formats.extend(self._extract_m3u8_formats( # v_url, video_id, 'mp4', 'm3u8_native', # m3u8_id='hls' + suffix, fatal=False)) # formats.extend(self._extract_mpd_formats( # v_url.replace('.m3u8', '.mpd'), video_id, # mpd_id='dash' + suffix, fatal=False)) # self._sort_formats(formats) uploader_id = video.get('hostID') return { '_type': 'url_transparent', 'id': vimeo_id, 'title': title, 'description': video.get('description'), 'url': smuggle_url( 'https://player.vimeo.com/video/' + vimeo_id, { 'http_headers': { 'Referer': 'https://storyfire.com/', } }), # 'formats': formats, 'thumbnail': video.get('storyImage'), 'view_count': int_or_none(video.get('views')), 'like_count': int_or_none(video.get('likesCount')), 'comment_count': int_or_none(video.get('commentsCount')), 'duration': int_or_none(video.get('videoDuration')), 'timestamp': int_or_none(video.get('publishDate')), 'uploader': video.get('username'), 'uploader_id': uploader_id, 'uploader_url': 'https://storyfire.com/user/%s/video' % uploader_id if uploader_id else None, 'episode_number': int_or_none(video.get('episodeNumber') or video.get('episode_number')), } class StoryFireIE(StoryFireBaseIE): _VALID_URL = StoryFireBaseIE._VALID_URL_BASE + r'video-details/(?P<id>[0-9a-f]{24})' _TEST = { 'url': 'https://storyfire.com/video-details/5df1d132b6378700117f9181', 'md5': 'caec54b9e4621186d6079c7ec100c1eb', 'info_dict': { 'id': '378954662', 'ext': 'mp4', 'title': 'Buzzfeed Teaches You About Memes', 'uploader_id': 'ntZAJFECERSgqHSxzonV5K2E89s1', 'timestamp': 1576129028, 'description': 'md5:0b4e28021548e144bed69bb7539e62ea', 'uploader': 'whang!', 'upload_date': '20191212', 'duration': 418, 'view_count': int, 'like_count': int, 'comment_count': int, }, 'params': { 'skip_download': True, }, 'expected_warnings': ['Unable to download JSON metadata'] } def _real_extract(self, url): video_id = self._match_id(url) video = self._call_api( 'generic/video-detail', video_id, 'video')['video'] return self._parse_video(video) class StoryFireUserIE(StoryFireBaseIE): _VALID_URL = StoryFireBaseIE._VALID_URL_BASE + r'user/(?P<id>[^/]+)/video' _TEST = { 'url': 'https://storyfire.com/user/UQ986nFxmAWIgnkZQ0ftVhq4nOk2/video', 'info_dict': { 'id': 'UQ986nFxmAWIgnkZQ0ftVhq4nOk2', }, 'playlist_mincount': 151, } _PAGE_SIZE = 20 def _fetch_page(self, user_id, page): videos = self._call_api( 'publicVideos', user_id, 'page %d' % (page + 1), { 'skip': page * self._PAGE_SIZE, })['videos'] for video in videos: yield self._parse_video(video) def _real_extract(self, url): user_id = self._match_id(url) entries = OnDemandPagedList(functools.partial( self._fetch_page, user_id), self._PAGE_SIZE) return self.playlist_result(entries, user_id) class StoryFireSeriesIE(StoryFireBaseIE): _VALID_URL = StoryFireBaseIE._VALID_URL_BASE + r'write/series/stories/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://storyfire.com/write/series/stories/-Lq6MsuIHLODO6d2dDkr/', 'info_dict': { 'id': '-Lq6MsuIHLODO6d2dDkr', }, 'playlist_mincount': 13, }, { 'url': 'https://storyfire.com/write/series/stories/the_mortal_one/', 'info_dict': { 'id': 'the_mortal_one', }, 'playlist_count': 0, }] def _extract_videos(self, stories): for story in stories.values(): if story.get('hasVideo'): yield self._parse_video(story) def _real_extract(self, url): series_id = self._match_id(url) stories = self._call_api( 'seriesStories', series_id, 'series stories') return self.playlist_result(self._extract_videos(stories), series_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/myvidster.py
youtube_dl/extractor/myvidster.py
from __future__ import unicode_literals from .common import InfoExtractor class MyVidsterIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?myvidster\.com/video/(?P<id>\d+)/' _TEST = { 'url': 'http://www.myvidster.com/video/32059805/Hot_chemistry_with_raw_love_making', 'md5': '95296d0231c1363222c3441af62dc4ca', 'info_dict': { 'id': '3685814', 'title': 'md5:7d8427d6d02c4fbcef50fe269980c749', 'upload_date': '20141027', 'uploader': 'utkualp', 'ext': 'mp4', 'age_limit': 18, }, 'add_ie': ['XHamster'], } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) return self.url_result(self._html_search_regex( r'rel="videolink" href="(?P<real_url>.*)">', webpage, 'real video url'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/miomio.py
youtube_dl/extractor/miomio.py
# coding: utf-8 from __future__ import unicode_literals import random from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( xpath_text, int_or_none, ExtractorError, sanitized_Request, ) class MioMioIE(InfoExtractor): IE_NAME = 'miomio.tv' _VALID_URL = r'https?://(?:www\.)?miomio\.tv/watch/cc(?P<id>[0-9]+)' _TESTS = [{ # "type=video" in flashvars 'url': 'http://www.miomio.tv/watch/cc88912/', 'info_dict': { 'id': '88912', 'ext': 'flv', 'title': '【SKY】字幕 铠武昭和VS平成 假面骑士大战FEAT战队 魔星字幕组 字幕', 'duration': 5923, }, 'skip': 'Unable to load videos', }, { 'url': 'http://www.miomio.tv/watch/cc184024/', 'info_dict': { 'id': '43729', 'title': '《动漫同人插画绘制》', }, 'playlist_mincount': 86, 'skip': 'Unable to load videos', }, { 'url': 'http://www.miomio.tv/watch/cc173113/', 'info_dict': { 'id': '173113', 'title': 'The New Macbook 2015 上手试玩与简评' }, 'playlist_mincount': 2, 'skip': 'Unable to load videos', }, { # new 'h5' player 'url': 'http://www.miomio.tv/watch/cc273997/', 'md5': '0b27a4b4495055d826813f8c3a6b2070', 'info_dict': { 'id': '273997', 'ext': 'mp4', 'title': 'マツコの知らない世界【劇的進化SP!ビニール傘&冷凍食品2016】 1_2 - 16 05 31', }, 'skip': 'Unable to load videos', }] def _extract_mioplayer(self, webpage, video_id, title, http_headers): xml_config = self._search_regex( r'flashvars="type=(?:sina|video)&amp;(.+?)&amp;', webpage, 'xml config') # skipping the following page causes lags and eventually connection drop-outs self._request_webpage( 'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/xml.php?id=%s&r=%s' % (id, random.randint(100, 999)), video_id) vid_config_request = sanitized_Request( 'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?{0}'.format(xml_config), headers=http_headers) # the following xml contains the actual configuration information on the video file(s) vid_config = self._download_xml(vid_config_request, video_id) if not int_or_none(xpath_text(vid_config, 'timelength')): raise ExtractorError('Unable to load videos!', expected=True) entries = [] for f in vid_config.findall('./durl'): segment_url = xpath_text(f, 'url', 'video url') if not segment_url: continue order = xpath_text(f, 'order', 'order') segment_id = video_id segment_title = title if order: segment_id += '-%s' % order segment_title += ' part %s' % order entries.append({ 'id': segment_id, 'url': segment_url, 'title': segment_title, 'duration': int_or_none(xpath_text(f, 'length', 'duration'), 1000), 'http_headers': http_headers, }) return entries def _download_chinese_webpage(self, *args, **kwargs): # Requests with English locales return garbage headers = { 'Accept-Language': 'zh-TW,en-US;q=0.7,en;q=0.3', } kwargs.setdefault('headers', {}).update(headers) return self._download_webpage(*args, **kwargs) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_chinese_webpage( url, video_id) title = self._html_search_meta( 'description', webpage, 'title', fatal=True) mioplayer_path = self._search_regex( r'src="(/mioplayer(?:_h5)?/[^"]+)"', webpage, 'ref_path') if '_h5' in mioplayer_path: player_url = compat_urlparse.urljoin(url, mioplayer_path) player_webpage = self._download_chinese_webpage( player_url, video_id, note='Downloading player webpage', headers={'Referer': url}) entries = self._parse_html5_media_entries(player_url, player_webpage, video_id) http_headers = {'Referer': player_url} else: http_headers = {'Referer': 'http://www.miomio.tv%s' % mioplayer_path} entries = self._extract_mioplayer(webpage, video_id, title, http_headers) if len(entries) == 1: segment = entries[0] segment['id'] = video_id segment['title'] = title segment['http_headers'] = http_headers return segment return { '_type': 'multi_video', 'id': video_id, 'entries': entries, 'title': title, 'http_headers': http_headers, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/springboardplatform.py
youtube_dl/extractor/springboardplatform.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, xpath_attr, xpath_text, xpath_element, unescapeHTML, unified_timestamp, ) class SpringboardPlatformIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// cms\.springboardplatform\.com/ (?: (?:previews|embed_iframe)/(?P<index>\d+)/video/(?P<id>\d+)| xml_feeds_advanced/index/(?P<index_2>\d+)/rss3/(?P<id_2>\d+) ) ''' _TESTS = [{ 'url': 'http://cms.springboardplatform.com/previews/159/video/981017/0/0/1', 'md5': '5c3cb7b5c55740d482561099e920f192', 'info_dict': { 'id': '981017', 'ext': 'mp4', 'title': 'Redman "BUD like YOU" "Usher Good Kisser" REMIX', 'description': 'Redman "BUD like YOU" "Usher Good Kisser" REMIX', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1409132328, 'upload_date': '20140827', 'duration': 193, }, }, { 'url': 'http://cms.springboardplatform.com/embed_iframe/159/video/981017/rab007/rapbasement.com/1/1', 'only_matching': True, }, { 'url': 'http://cms.springboardplatform.com/embed_iframe/20/video/1731611/ki055/kidzworld.com/10', 'only_matching': True, }, { 'url': 'http://cms.springboardplatform.com/xml_feeds_advanced/index/159/rss3/981017/0/0/1/', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return [ mobj.group('url') for mobj in re.finditer( r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//cms\.springboardplatform\.com/embed_iframe/\d+/video/\d+.*?)\1', webpage)] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') or mobj.group('id_2') index = mobj.group('index') or mobj.group('index_2') video = self._download_xml( 'http://cms.springboardplatform.com/xml_feeds_advanced/index/%s/rss3/%s' % (index, video_id), video_id) item = xpath_element(video, './/item', 'item', fatal=True) content = xpath_element( item, './{http://search.yahoo.com/mrss/}content', 'content', fatal=True) title = unescapeHTML(xpath_text(item, './title', 'title', fatal=True)) video_url = content.attrib['url'] if 'error_video.mp4' in video_url: raise ExtractorError( 'Video %s no longer exists' % video_id, expected=True) duration = int_or_none(content.get('duration')) tbr = int_or_none(content.get('bitrate')) filesize = int_or_none(content.get('fileSize')) width = int_or_none(content.get('width')) height = int_or_none(content.get('height')) description = unescapeHTML(xpath_text( item, './description', 'description')) thumbnail = xpath_attr( item, './{http://search.yahoo.com/mrss/}thumbnail', 'url', 'thumbnail') timestamp = unified_timestamp(xpath_text( item, './{http://cms.springboardplatform.com/namespaces.html}created', 'timestamp')) formats = [{ 'url': video_url, 'format_id': 'http', 'tbr': tbr, 'filesize': filesize, 'width': width, 'height': height, }] m3u8_format = formats[0].copy() m3u8_format.update({ 'url': re.sub(r'(https?://)cdn\.', r'\1hls.', video_url) + '.m3u8', 'ext': 'mp4', 'format_id': 'hls', 'protocol': 'm3u8_native', }) formats.append(m3u8_format) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/polskieradio.py
youtube_dl/extractor/polskieradio.py
# coding: utf-8 from __future__ import unicode_literals import itertools import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse_unquote, compat_urlparse ) from ..utils import ( extract_attributes, int_or_none, strip_or_none, unified_timestamp, ) class PolskieRadioIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?polskieradio\.pl/\d+/\d+/Artykul/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://www.polskieradio.pl/7/5102/Artykul/1587943,Prof-Andrzej-Nowak-o-historii-nie-da-sie-myslec-beznamietnie', 'info_dict': { 'id': '1587943', 'title': 'Prof. Andrzej Nowak: o historii nie da się myśleć beznamiętnie', 'description': 'md5:12f954edbf3120c5e7075e17bf9fc5c5', }, 'playlist': [{ 'md5': '2984ee6ce9046d91fc233bc1a864a09a', 'info_dict': { 'id': '1540576', 'ext': 'mp3', 'title': 'md5:d4623290d4ac983bf924061c75c23a0d', 'timestamp': 1456594200, 'upload_date': '20160227', 'duration': 2364, 'thumbnail': r're:^https?://static\.prsa\.pl/images/.*\.jpg$' }, }], }, { 'url': 'http://www.polskieradio.pl/265/5217/Artykul/1635803,Euro-2016-nie-ma-miejsca-na-blad-Polacy-graja-ze-Szwajcaria-o-cwiercfinal', 'info_dict': { 'id': '1635803', 'title': 'Euro 2016: nie ma miejsca na błąd. Polacy grają ze Szwajcarią o ćwierćfinał', 'description': 'md5:01cb7d0cad58664095d72b51a1ebada2', }, 'playlist_mincount': 12, }, { 'url': 'http://polskieradio.pl/9/305/Artykul/1632955,Bardzo-popularne-slowo-remis', 'only_matching': True, }, { 'url': 'http://www.polskieradio.pl/7/5102/Artykul/1587943', 'only_matching': True, }, { # with mp4 video 'url': 'http://www.polskieradio.pl/9/299/Artykul/1634903,Brexit-Leszek-Miller-swiat-sie-nie-zawali-Europa-bedzie-trwac-dalej', 'only_matching': True, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) content = self._search_regex( r'(?s)<div[^>]+class="\s*this-article\s*"[^>]*>(.+?)<div[^>]+class="tags"[^>]*>', webpage, 'content') timestamp = unified_timestamp(self._html_search_regex( r'(?s)<span[^>]+id="datetime2"[^>]*>(.+?)</span>', webpage, 'timestamp', fatal=False)) thumbnail_url = self._og_search_thumbnail(webpage) entries = [] media_urls = set() for data_media in re.findall(r'<[^>]+data-media=({[^>]+})', content): media = self._parse_json(data_media, playlist_id, fatal=False) if not media.get('file') or not media.get('desc'): continue media_url = self._proto_relative_url(media['file'], 'http:') if media_url in media_urls: continue media_urls.add(media_url) entries.append({ 'id': compat_str(media['id']), 'url': media_url, 'title': compat_urllib_parse_unquote(media['desc']), 'duration': int_or_none(media.get('length')), 'vcodec': 'none' if media.get('provider') == 'audio' else None, 'timestamp': timestamp, 'thumbnail': thumbnail_url }) title = self._og_search_title(webpage).strip() description = strip_or_none(self._og_search_description(webpage)) return self.playlist_result(entries, playlist_id, title, description) class PolskieRadioCategoryIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?polskieradio\.pl/\d+(?:,[^/]+)?/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.polskieradio.pl/7/5102,HISTORIA-ZYWA', 'info_dict': { 'id': '5102', 'title': 'HISTORIA ŻYWA', }, 'playlist_mincount': 38, }, { 'url': 'http://www.polskieradio.pl/7/4807', 'info_dict': { 'id': '4807', 'title': 'Vademecum 1050. rocznicy Chrztu Polski' }, 'playlist_mincount': 5 }, { 'url': 'http://www.polskieradio.pl/7/129,Sygnaly-dnia?ref=source', 'only_matching': True }, { 'url': 'http://www.polskieradio.pl/37,RedakcjaKatolicka/4143,Kierunek-Krakow', 'info_dict': { 'id': '4143', 'title': 'Kierunek Kraków', }, 'playlist_mincount': 61 }, { 'url': 'http://www.polskieradio.pl/10,czworka/214,muzyka', 'info_dict': { 'id': '214', 'title': 'Muzyka', }, 'playlist_mincount': 61 }, { 'url': 'http://www.polskieradio.pl/7,Jedynka/5102,HISTORIA-ZYWA', 'only_matching': True, }, { 'url': 'http://www.polskieradio.pl/8,Dwojka/196,Publicystyka', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if PolskieRadioIE.suitable(url) else super(PolskieRadioCategoryIE, cls).suitable(url) def _entries(self, url, page, category_id): content = page for page_num in itertools.count(2): for a_entry, entry_id in re.findall( r'(?s)<article[^>]+>.*?(<a[^>]+href=["\']/\d+/\d+/Artykul/(\d+)[^>]+>).*?</article>', content): entry = extract_attributes(a_entry) href = entry.get('href') if not href: continue yield self.url_result( compat_urlparse.urljoin(url, href), PolskieRadioIE.ie_key(), entry_id, entry.get('title')) mobj = re.search( r'<div[^>]+class=["\']next["\'][^>]*>\s*<a[^>]+href=(["\'])(?P<url>(?:(?!\1).)+)\1', content) if not mobj: break next_url = compat_urlparse.urljoin(url, mobj.group('url')) content = self._download_webpage( next_url, category_id, 'Downloading page %s' % page_num) def _real_extract(self, url): category_id = self._match_id(url) webpage = self._download_webpage(url, category_id) title = self._html_search_regex( r'<title>([^<]+) - [^<]+ - [^<]+</title>', webpage, 'title', fatal=False) return self.playlist_result( self._entries(url, webpage, category_id), category_id, title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/walla.py
youtube_dl/extractor/walla.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( xpath_text, int_or_none, ) class WallaIE(InfoExtractor): _VALID_URL = r'https?://vod\.walla\.co\.il/[^/]+/(?P<id>\d+)/(?P<display_id>.+)' _TEST = { 'url': 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one', 'info_dict': { 'id': '2642630', 'display_id': 'one-direction-all-for-one', 'ext': 'flv', 'title': 'וואן דיירקשן: ההיסטריה', 'description': 'md5:de9e2512a92442574cdb0913c49bc4d8', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 3600, }, 'params': { # rtmp download 'skip_download': True, } } _SUBTITLE_LANGS = { 'עברית': 'heb', } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') video = self._download_xml( 'http://video2.walla.co.il/?w=null/null/%s/@@/video/flv_pl' % video_id, display_id) item = video.find('./items/item') title = xpath_text(item, './title', 'title') description = xpath_text(item, './synopsis', 'description') thumbnail = xpath_text(item, './preview_pic', 'thumbnail') duration = int_or_none(xpath_text(item, './duration', 'duration')) subtitles = {} for subtitle in item.findall('./subtitles/subtitle'): lang = xpath_text(subtitle, './title') subtitles[self._SUBTITLE_LANGS.get(lang, lang)] = [{ 'ext': 'srt', 'url': xpath_text(subtitle, './src'), }] formats = [] for quality in item.findall('./qualities/quality'): format_id = xpath_text(quality, './title') fmt = { 'url': 'rtmp://wafla.walla.co.il/vod', 'play_path': xpath_text(quality, './src'), 'player_url': 'http://isc.walla.co.il/w9/swf/video_swf/vod/WallaMediaPlayerAvod.swf', 'page_url': url, 'ext': 'flv', 'format_id': xpath_text(quality, './title'), } m = re.search(r'^(?P<height>\d+)[Pp]', format_id) if m: fmt['height'] = int(m.group('height')) formats.append(fmt) self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/americastestkitchen.py
youtube_dl/extractor/americastestkitchen.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import ( clean_html, int_or_none, try_get, unified_strdate, unified_timestamp, ) class AmericasTestKitchenIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:americastestkitchen|cooks(?:country|illustrated))\.com/(?:cooks(?:country|illustrated)/)?(?P<resource_type>episode|videos)/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.americastestkitchen.com/episode/582-weeknight-japanese-suppers', 'md5': 'b861c3e365ac38ad319cfd509c30577f', 'info_dict': { 'id': '5b400b9ee338f922cb06450c', 'title': 'Japanese Suppers', 'ext': 'mp4', 'display_id': 'weeknight-japanese-suppers', 'description': 'md5:64e606bfee910627efc4b5f050de92b3', 'timestamp': 1523304000, 'upload_date': '20180409', 'release_date': '20180409', 'series': "America's Test Kitchen", 'season': 'Season 18', 'season_number': 18, 'episode': 'Japanese Suppers', 'episode_number': 15, 'duration': 1376, 'thumbnail': r're:^https?://', 'average_rating': 0, 'view_count': int, }, 'params': { 'skip_download': True, }, }, { # Metadata parsing behaves differently for newer episodes (705) as opposed to older episodes (582 above) 'url': 'https://www.americastestkitchen.com/episode/705-simple-chicken-dinner', 'md5': '06451608c57651e985a498e69cec17e5', 'info_dict': { 'id': '5fbe8c61bda2010001c6763b', 'title': 'Simple Chicken Dinner', 'ext': 'mp4', 'display_id': 'atktv_2103_simple-chicken-dinner_full-episode_web-mp4', 'description': 'md5:eb68737cc2fd4c26ca7db30139d109e7', 'timestamp': 1610737200, 'upload_date': '20210115', 'release_date': '20210115', 'series': "America's Test Kitchen", 'season': 'Season 21', 'season_number': 21, 'episode': 'Simple Chicken Dinner', 'episode_number': 3, 'duration': 1397, 'thumbnail': r're:^https?://', 'view_count': int, 'average_rating': 0, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.americastestkitchen.com/videos/3420-pan-seared-salmon', 'only_matching': True, }, { 'url': 'https://www.americastestkitchen.com/cookscountry/episode/564-when-only-chocolate-will-do', 'only_matching': True, }, { 'url': 'https://www.americastestkitchen.com/cooksillustrated/videos/4478-beef-wellington', 'only_matching': True, }, { 'url': 'https://www.cookscountry.com/episode/564-when-only-chocolate-will-do', 'only_matching': True, }, { 'url': 'https://www.cooksillustrated.com/videos/4478-beef-wellington', 'only_matching': True, }] def _real_extract(self, url): resource_type, video_id = re.match(self._VALID_URL, url).groups() is_episode = resource_type == 'episode' if is_episode: resource_type = 'episodes' resource = self._download_json( 'https://www.americastestkitchen.com/api/v6/%s/%s' % (resource_type, video_id), video_id) video = resource['video'] if is_episode else resource episode = resource if is_episode else resource.get('episode') or {} return { '_type': 'url_transparent', 'url': 'https://player.zype.com/embed/%s.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ' % video['zypeId'], 'ie_key': 'Zype', 'description': clean_html(video.get('description')), 'timestamp': unified_timestamp(video.get('publishDate')), 'release_date': unified_strdate(video.get('publishDate')), 'episode_number': int_or_none(episode.get('number')), 'season_number': int_or_none(episode.get('season')), 'series': try_get(episode, lambda x: x['show']['title']), 'episode': episode.get('title'), } class AmericasTestKitchenSeasonIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?P<show>americastestkitchen|(?P<cooks>cooks(?:country|illustrated)))\.com(?:(?:/(?P<show2>cooks(?:country|illustrated)))?(?:/?$|(?<!ated)(?<!ated\.com)/episodes/browse/season_(?P<season>\d+)))' _TESTS = [{ # ATK Season 'url': 'https://www.americastestkitchen.com/episodes/browse/season_1', 'info_dict': { 'id': 'season_1', 'title': 'Season 1', }, 'playlist_count': 13, }, { # Cooks Country Season 'url': 'https://www.americastestkitchen.com/cookscountry/episodes/browse/season_12', 'info_dict': { 'id': 'season_12', 'title': 'Season 12', }, 'playlist_count': 13, }, { # America's Test Kitchen Series 'url': 'https://www.americastestkitchen.com/', 'info_dict': { 'id': 'americastestkitchen', 'title': 'America\'s Test Kitchen', }, 'playlist_count': 558, }, { # Cooks Country Series 'url': 'https://www.americastestkitchen.com/cookscountry', 'info_dict': { 'id': 'cookscountry', 'title': 'Cook\'s Country', }, 'playlist_count': 199, }, { 'url': 'https://www.americastestkitchen.com/cookscountry/', 'only_matching': True, }, { 'url': 'https://www.cookscountry.com/episodes/browse/season_12', 'only_matching': True, }, { 'url': 'https://www.cookscountry.com', 'only_matching': True, }, { 'url': 'https://www.americastestkitchen.com/cooksillustrated/', 'only_matching': True, }, { 'url': 'https://www.cooksillustrated.com', 'only_matching': True, }] def _real_extract(self, url): match = re.match(self._VALID_URL, url).groupdict() show = match.get('show2') show_path = ('/' + show) if show else '' show = show or match['show'] season_number = int_or_none(match.get('season')) slug, title = { 'americastestkitchen': ('atk', 'America\'s Test Kitchen'), 'cookscountry': ('cco', 'Cook\'s Country'), 'cooksillustrated': ('cio', 'Cook\'s Illustrated'), }[show] facet_filters = [ 'search_document_klass:episode', 'search_show_slug:' + slug, ] if season_number: playlist_id = 'season_%d' % season_number playlist_title = 'Season %d' % season_number facet_filters.append('search_season_list:' + playlist_title) else: playlist_id = show playlist_title = title season_search = self._download_json( 'https://y1fnzxui30-dsn.algolia.net/1/indexes/everest_search_%s_season_desc_production' % slug, playlist_id, headers={ 'Origin': 'https://www.americastestkitchen.com', 'X-Algolia-API-Key': '8d504d0099ed27c1b73708d22871d805', 'X-Algolia-Application-Id': 'Y1FNZXUI30', }, query={ 'facetFilters': json.dumps(facet_filters), 'attributesToRetrieve': 'description,search_%s_episode_number,search_document_date,search_url,title,search_atk_episode_season' % slug, 'attributesToHighlight': '', 'hitsPerPage': 1000, }) def entries(): for episode in (season_search.get('hits') or []): search_url = episode.get('search_url') # always formatted like '/episode/123-title-of-episode' if not search_url: continue yield { '_type': 'url', 'url': 'https://www.americastestkitchen.com%s%s' % (show_path, search_url), 'id': try_get(episode, lambda e: e['objectID'].rsplit('_', 1)[-1]), 'title': episode.get('title'), 'description': episode.get('description'), 'timestamp': unified_timestamp(episode.get('search_document_date')), 'season_number': season_number, 'episode_number': int_or_none(episode.get('search_%s_episode_number' % slug)), 'ie_key': AmericasTestKitchenIE.ie_key(), } return self.playlist_result( entries(), playlist_id, playlist_title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tennistv.py
youtube_dl/extractor/tennistv.py
# coding: utf-8 from __future__ import unicode_literals import json from .common import InfoExtractor from ..utils import ( ExtractorError, unified_timestamp, ) class TennisTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tennistv\.com/videos/(?P<id>[-a-z0-9]+)' _TEST = { 'url': 'https://www.tennistv.com/videos/indian-wells-2018-verdasco-fritz', 'info_dict': { 'id': 'indian-wells-2018-verdasco-fritz', 'ext': 'mp4', 'title': 'Fernando Verdasco v Taylor Fritz', 'description': 're:^After his stunning victory.{174}$', 'thumbnail': 'https://atp-prod.akamaized.net/api/images/v1/images/112831/landscape/1242/0', 'timestamp': 1521017381, 'upload_date': '20180314', }, 'params': { 'skip_download': True, }, 'skip': 'Requires email and password of a subscribed account', } _NETRC_MACHINE = 'tennistv' def _login(self): username, password = self._get_login_info() if not username or not password: raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True) login_form = { 'Email': username, 'Password': password, } login_json = json.dumps(login_form).encode('utf-8') headers = { 'content-type': 'application/json', 'Referer': 'https://www.tennistv.com/login', 'Origin': 'https://www.tennistv.com', } login_result = self._download_json( 'https://www.tennistv.com/api/users/v1/login', None, note='Logging in', errnote='Login failed (wrong password?)', headers=headers, data=login_json) if login_result['error']['errorCode']: raise ExtractorError('Login failed, %s said: %r' % (self.IE_NAME, login_result['error']['errorMessage'])) if login_result['entitlement'] != 'SUBSCRIBED': self.report_warning('%s may not be subscribed to %s.' % (username, self.IE_NAME)) self._session_token = login_result['sessionToken'] def _real_initialize(self): self._login() def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) internal_id = self._search_regex(r'video=([0-9]+)', webpage, 'internal video id') headers = { 'Origin': 'https://www.tennistv.com', 'authorization': 'ATP %s' % self._session_token, 'content-type': 'application/json', 'Referer': url, } check_data = { 'videoID': internal_id, 'VideoUrlType': 'HLSV3', } check_json = json.dumps(check_data).encode('utf-8') check_result = self._download_json( 'https://www.tennistv.com/api/users/v1/entitlementchecknondiva', video_id, note='Checking video authorization', headers=headers, data=check_json) formats = self._extract_m3u8_formats(check_result['contentUrl'], video_id, ext='mp4') vdata_url = 'https://www.tennistv.com/api/channels/v1/de/none/video/%s' % video_id vdata = self._download_json(vdata_url, video_id) timestamp = unified_timestamp(vdata['timestamp']) thumbnail = vdata['video']['thumbnailUrl'] description = vdata['displayText']['description'] title = vdata['video']['title'] series = vdata['tour'] venue = vdata['displayText']['venue'] round_str = vdata['seo']['round'] return { 'id': video_id, 'title': title, 'description': description, 'formats': formats, 'thumbnail': thumbnail, 'timestamp': timestamp, 'series': series, 'season': venue, 'episode': round_str, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/picarto.py
youtube_dl/extractor/picarto.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, js_to_json, ) class PicartoIE(InfoExtractor): _VALID_URL = r'https?://(?:www.)?picarto\.tv/(?P<id>[a-zA-Z0-9]+)' _TEST = { 'url': 'https://picarto.tv/Setz', 'info_dict': { 'id': 'Setz', 'ext': 'mp4', 'title': 're:^Setz [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'timestamp': int, 'is_live': True }, 'skip': 'Stream is offline', } @classmethod def suitable(cls, url): return False if PicartoVodIE.suitable(url) else super(PicartoIE, cls).suitable(url) def _real_extract(self, url): channel_id = self._match_id(url) data = self._download_json( 'https://ptvintern.picarto.tv/ptvapi', channel_id, query={ 'query': '''{ channel(name: "%s") { adult id online stream_name title } getLoadBalancerUrl(channel_name: "%s") { url } }''' % (channel_id, channel_id), })['data'] metadata = data['channel'] if metadata.get('online') == 0: raise ExtractorError('Stream is offline', expected=True) title = metadata['title'] cdn_data = self._download_json( data['getLoadBalancerUrl']['url'] + '/stream/json_' + metadata['stream_name'] + '.js', channel_id, 'Downloading load balancing info') formats = [] for source in (cdn_data.get('source') or []): source_url = source.get('url') if not source_url: continue source_type = source.get('type') if source_type == 'html5/application/vnd.apple.mpegurl': formats.extend(self._extract_m3u8_formats( source_url, channel_id, 'mp4', m3u8_id='hls', fatal=False)) elif source_type == 'html5/video/mp4': formats.append({ 'url': source_url, }) self._sort_formats(formats) mature = metadata.get('adult') if mature is None: age_limit = None else: age_limit = 18 if mature is True else 0 return { 'id': channel_id, 'title': self._live_title(title.strip()), 'is_live': True, 'channel': channel_id, 'channel_id': metadata.get('id'), 'channel_url': 'https://picarto.tv/%s' % channel_id, 'age_limit': age_limit, 'formats': formats, } class PicartoVodIE(InfoExtractor): _VALID_URL = r'https?://(?:www.)?picarto\.tv/videopopout/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://picarto.tv/videopopout/ArtofZod_2017.12.12.00.13.23.flv', 'md5': '3ab45ba4352c52ee841a28fb73f2d9ca', 'info_dict': { 'id': 'ArtofZod_2017.12.12.00.13.23.flv', 'ext': 'mp4', 'title': 'ArtofZod_2017.12.12.00.13.23.flv', 'thumbnail': r're:^https?://.*\.jpg' }, }, { 'url': 'https://picarto.tv/videopopout/Plague', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) vod_info = self._parse_json( self._search_regex( r'(?s)#vod-player["\']\s*,\s*(\{.+?\})\s*\)', webpage, video_id), video_id, transform_source=js_to_json) formats = self._extract_m3u8_formats( vod_info['vod'], video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') self._sort_formats(formats) return { 'id': video_id, 'title': video_id, 'thumbnail': vod_info.get('vodThumb'), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/snotr.py
youtube_dl/extractor/snotr.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( parse_duration, parse_filesize, str_to_int, ) class SnotrIE(InfoExtractor): _VALID_URL = r'http?://(?:www\.)?snotr\.com/video/(?P<id>\d+)/([\w]+)' _TESTS = [{ 'url': 'http://www.snotr.com/video/13708/Drone_flying_through_fireworks', 'info_dict': { 'id': '13708', 'ext': 'mp4', 'title': 'Drone flying through fireworks!', 'duration': 248, 'filesize_approx': 40700000, 'description': 'A drone flying through Fourth of July Fireworks', 'thumbnail': r're:^https?://.*\.jpg$', }, 'expected_warnings': ['description'], }, { 'url': 'http://www.snotr.com/video/530/David_Letteman_-_George_W_Bush_Top_10', 'info_dict': { 'id': '530', 'ext': 'mp4', 'title': 'David Letteman - George W. Bush Top 10', 'duration': 126, 'filesize_approx': 8500000, 'description': 'The top 10 George W. Bush moments, brought to you by David Letterman!', 'thumbnail': r're:^https?://.*\.jpg$', } }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage) description = self._og_search_description(webpage) info_dict = self._parse_html5_media_entries( url, webpage, video_id, m3u8_entry_protocol='m3u8_native')[0] view_count = str_to_int(self._html_search_regex( r'<p[^>]*>\s*<strong[^>]*>Views:</strong>\s*<span[^>]*>([\d,\.]+)', webpage, 'view count', fatal=False)) duration = parse_duration(self._html_search_regex( r'<p[^>]*>\s*<strong[^>]*>Length:</strong>\s*<span[^>]*>([\d:]+)', webpage, 'duration', fatal=False)) filesize_approx = parse_filesize(self._html_search_regex( r'<p[^>]*>\s*<strong[^>]*>Filesize:</strong>\s*<span[^>]*>([^<]+)', webpage, 'filesize', fatal=False)) info_dict.update({ 'id': video_id, 'description': description, 'title': title, 'view_count': view_count, 'duration': duration, 'filesize_approx': filesize_approx, }) return info_dict
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/myspass.py
youtube_dl/extractor/myspass.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, parse_duration, xpath_text, ) class MySpassIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?myspass\.de/([^/]+/)*(?P<id>\d+)' _TEST = { 'url': 'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/', 'md5': '0b49f4844a068f8b33f4b7c88405862b', 'info_dict': { 'id': '11741', 'ext': 'mp4', 'description': 'Wer kann in die Fußstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?', 'title': '17.02.2013 - Die Highlights, Teil 2', }, } def _real_extract(self, url): video_id = self._match_id(url) metadata = self._download_xml( 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=' + video_id, video_id) title = xpath_text(metadata, 'title', fatal=True) video_url = xpath_text(metadata, 'url_flv', 'download url', True) video_id_int = int(video_id) grps = re.search(r'/myspass2009/\d+/(\d+)/(\d+)/(\d+)/', video_url) for group in grps.groups() if grps else []: group_int = int(group) if group_int > video_id_int: video_url = video_url.replace( group, compat_str(group_int // video_id_int)) return { 'id': video_id, 'url': video_url, 'title': title, 'thumbnail': xpath_text(metadata, 'imagePreview'), 'description': xpath_text(metadata, 'description'), 'duration': parse_duration(xpath_text(metadata, 'duration')), 'series': xpath_text(metadata, 'format'), 'season_number': int_or_none(xpath_text(metadata, 'season')), 'season_id': xpath_text(metadata, 'season_id'), 'episode': title, 'episode_number': int_or_none(xpath_text(metadata, 'episode')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/adobepass.py
youtube_dl/extractor/adobepass.py
# coding: utf-8 from __future__ import unicode_literals import re import time import xml.etree.ElementTree as etree from .common import InfoExtractor from ..compat import ( compat_kwargs, compat_urlparse, ) from ..utils import ( unescapeHTML, urlencode_postdata, unified_timestamp, ExtractorError, NO_DEFAULT, ) MSO_INFO = { 'DTV': { 'name': 'DIRECTV', 'username_field': 'username', 'password_field': 'password', }, 'ATT': { 'name': 'AT&T U-verse', 'username_field': 'userid', 'password_field': 'password', }, 'ATTOTT': { 'name': 'DIRECTV NOW', 'username_field': 'email', 'password_field': 'loginpassword', }, 'Rogers': { 'name': 'Rogers', 'username_field': 'UserName', 'password_field': 'UserPassword', }, 'Comcast_SSO': { 'name': 'Comcast XFINITY', 'username_field': 'user', 'password_field': 'passwd', }, 'TWC': { 'name': 'Time Warner Cable | Spectrum', 'username_field': 'Ecom_User_ID', 'password_field': 'Ecom_Password', }, 'Brighthouse': { 'name': 'Bright House Networks | Spectrum', 'username_field': 'j_username', 'password_field': 'j_password', }, 'Charter_Direct': { 'name': 'Charter Spectrum', 'username_field': 'IDToken1', 'password_field': 'IDToken2', }, 'Verizon': { 'name': 'Verizon FiOS', 'username_field': 'IDToken1', 'password_field': 'IDToken2', }, 'thr030': { 'name': '3 Rivers Communications' }, 'com140': { 'name': 'Access Montana' }, 'acecommunications': { 'name': 'AcenTek' }, 'acm010': { 'name': 'Acme Communications' }, 'ada020': { 'name': 'Adams Cable Service' }, 'alb020': { 'name': 'Albany Mutual Telephone' }, 'algona': { 'name': 'Algona Municipal Utilities' }, 'allwest': { 'name': 'All West Communications' }, 'all025': { 'name': 'Allen\'s Communications' }, 'spl010': { 'name': 'Alliance Communications' }, 'all070': { 'name': 'ALLO Communications' }, 'alpine': { 'name': 'Alpine Communications' }, 'hun015': { 'name': 'American Broadband' }, 'nwc010': { 'name': 'American Broadband Missouri' }, 'com130-02': { 'name': 'American Community Networks' }, 'com130-01': { 'name': 'American Warrior Networks' }, 'tom020': { 'name': 'Amherst Telephone/Tomorrow Valley' }, 'tvc020': { 'name': 'Andycable' }, 'arkwest': { 'name': 'Arkwest Communications' }, 'art030': { 'name': 'Arthur Mutual Telephone Company' }, 'arvig': { 'name': 'Arvig' }, 'nttcash010': { 'name': 'Ashland Home Net' }, 'astound': { 'name': 'Astound (now Wave)' }, 'dix030': { 'name': 'ATC Broadband' }, 'ara010': { 'name': 'ATC Communications' }, 'she030-02': { 'name': 'Ayersville Communications' }, 'baldwin': { 'name': 'Baldwin Lightstream' }, 'bal040': { 'name': 'Ballard TV' }, 'cit025': { 'name': 'Bardstown Cable TV' }, 'bay030': { 'name': 'Bay Country Communications' }, 'tel095': { 'name': 'Beaver Creek Cooperative Telephone' }, 'bea020': { 'name': 'Beaver Valley Cable' }, 'bee010': { 'name': 'Bee Line Cable' }, 'wir030': { 'name': 'Beehive Broadband' }, 'bra020': { 'name': 'BELD' }, 'bel020': { 'name': 'Bellevue Municipal Cable' }, 'vol040-01': { 'name': 'Ben Lomand Connect / BLTV' }, 'bev010': { 'name': 'BEVCOMM' }, 'big020': { 'name': 'Big Sandy Broadband' }, 'ble020': { 'name': 'Bledsoe Telephone Cooperative' }, 'bvt010': { 'name': 'Blue Valley Tele-Communications' }, 'bra050': { 'name': 'Brandenburg Telephone Co.' }, 'bte010': { 'name': 'Bristol Tennessee Essential Services' }, 'annearundel': { 'name': 'Broadstripe' }, 'btc010': { 'name': 'BTC Communications' }, 'btc040': { 'name': 'BTC Vision - Nahunta' }, 'bul010': { 'name': 'Bulloch Telephone Cooperative' }, 'but010': { 'name': 'Butler-Bremer Communications' }, 'tel160-csp': { 'name': 'C Spire SNAP' }, 'csicable': { 'name': 'Cable Services Inc.' }, 'cableamerica': { 'name': 'CableAmerica' }, 'cab038': { 'name': 'CableSouth Media 3' }, 'weh010-camtel': { 'name': 'Cam-Tel Company' }, 'car030': { 'name': 'Cameron Communications' }, 'canbytel': { 'name': 'Canby Telcom' }, 'crt020': { 'name': 'CapRock Tv' }, 'car050': { 'name': 'Carnegie Cable' }, 'cas': { 'name': 'CAS Cable' }, 'casscomm': { 'name': 'CASSCOMM' }, 'mid180-02': { 'name': 'Catalina Broadband Solutions' }, 'cccomm': { 'name': 'CC Communications' }, 'nttccde010': { 'name': 'CDE Lightband' }, 'cfunet': { 'name': 'Cedar Falls Utilities' }, 'dem010-01': { 'name': 'Celect-Bloomer Telephone Area' }, 'dem010-02': { 'name': 'Celect-Bruce Telephone Area' }, 'dem010-03': { 'name': 'Celect-Citizens Connected Area' }, 'dem010-04': { 'name': 'Celect-Elmwood/Spring Valley Area' }, 'dem010-06': { 'name': 'Celect-Mosaic Telecom' }, 'dem010-05': { 'name': 'Celect-West WI Telephone Area' }, 'net010-02': { 'name': 'Cellcom/Nsight Telservices' }, 'cen100': { 'name': 'CentraCom' }, 'nttccst010': { 'name': 'Central Scott / CSTV' }, 'cha035': { 'name': 'Chaparral CableVision' }, 'cha050': { 'name': 'Chariton Valley Communication Corporation, Inc.' }, 'cha060': { 'name': 'Chatmoss Cablevision' }, 'nttcche010': { 'name': 'Cherokee Communications' }, 'che050': { 'name': 'Chesapeake Bay Communications' }, 'cimtel': { 'name': 'Cim-Tel Cable, LLC.' }, 'cit180': { 'name': 'Citizens Cablevision - Floyd, VA' }, 'cit210': { 'name': 'Citizens Cablevision, Inc.' }, 'cit040': { 'name': 'Citizens Fiber' }, 'cit250': { 'name': 'Citizens Mutual' }, 'war040': { 'name': 'Citizens Telephone Corporation' }, 'wat025': { 'name': 'City Of Monroe' }, 'wadsworth': { 'name': 'CityLink' }, 'nor100': { 'name': 'CL Tel' }, 'cla010': { 'name': 'Clarence Telephone and Cedar Communications' }, 'ser060': { 'name': 'Clear Choice Communications' }, 'tac020': { 'name': 'Click! Cable TV' }, 'war020': { 'name': 'CLICK1.NET' }, 'cml010': { 'name': 'CML Telephone Cooperative Association' }, 'cns': { 'name': 'CNS' }, 'com160': { 'name': 'Co-Mo Connect' }, 'coa020': { 'name': 'Coast Communications' }, 'coa030': { 'name': 'Coaxial Cable TV' }, 'mid055': { 'name': 'Cobalt TV (Mid-State Community TV)' }, 'col070': { 'name': 'Columbia Power & Water Systems' }, 'col080': { 'name': 'Columbus Telephone' }, 'nor105': { 'name': 'Communications 1 Cablevision, Inc.' }, 'com150': { 'name': 'Community Cable & Broadband' }, 'com020': { 'name': 'Community Communications Company' }, 'coy010': { 'name': 'commZoom' }, 'com025': { 'name': 'Complete Communication Services' }, 'cat020': { 'name': 'Comporium' }, 'com071': { 'name': 'ComSouth Telesys' }, 'consolidatedcable': { 'name': 'Consolidated' }, 'conwaycorp': { 'name': 'Conway Corporation' }, 'coo050': { 'name': 'Coon Valley Telecommunications Inc' }, 'coo080': { 'name': 'Cooperative Telephone Company' }, 'cpt010': { 'name': 'CP-TEL' }, 'cra010': { 'name': 'Craw-Kan Telephone' }, 'crestview': { 'name': 'Crestview Cable Communications' }, 'cross': { 'name': 'Cross TV' }, 'cro030': { 'name': 'Crosslake Communications' }, 'ctc040': { 'name': 'CTC - Brainerd MN' }, 'phe030': { 'name': 'CTV-Beam - East Alabama' }, 'cun010': { 'name': 'Cunningham Telephone & Cable' }, 'dpc010': { 'name': 'D & P Communications' }, 'dak030': { 'name': 'Dakota Central Telecommunications' }, 'nttcdel010': { 'name': 'Delcambre Telephone LLC' }, 'tel160-del': { 'name': 'Delta Telephone Company' }, 'sal040': { 'name': 'DiamondNet' }, 'ind060-dc': { 'name': 'Direct Communications' }, 'doy010': { 'name': 'Doylestown Cable TV' }, 'dic010': { 'name': 'DRN' }, 'dtc020': { 'name': 'DTC' }, 'dtc010': { 'name': 'DTC Cable (Delhi)' }, 'dum010': { 'name': 'Dumont Telephone Company' }, 'dun010': { 'name': 'Dunkerton Telephone Cooperative' }, 'cci010': { 'name': 'Duo County Telecom' }, 'eagle': { 'name': 'Eagle Communications' }, 'weh010-east': { 'name': 'East Arkansas Cable TV' }, 'eatel': { 'name': 'EATEL Video, LLC' }, 'ell010': { 'name': 'ECTA' }, 'emerytelcom': { 'name': 'Emery Telcom Video LLC' }, 'nor200': { 'name': 'Empire Access' }, 'endeavor': { 'name': 'Endeavor Communications' }, 'sun045': { 'name': 'Enhanced Telecommunications Corporation' }, 'mid030': { 'name': 'enTouch' }, 'epb020': { 'name': 'EPB Smartnet' }, 'jea010': { 'name': 'EPlus Broadband' }, 'com065': { 'name': 'ETC' }, 'ete010': { 'name': 'Etex Communications' }, 'fbc-tele': { 'name': 'F&B Communications' }, 'fal010': { 'name': 'Falcon Broadband' }, 'fam010': { 'name': 'FamilyView CableVision' }, 'far020': { 'name': 'Farmers Mutual Telephone Company' }, 'fay010': { 'name': 'Fayetteville Public Utilities' }, 'sal060': { 'name': 'fibrant' }, 'fid010': { 'name': 'Fidelity Communications' }, 'for030': { 'name': 'FJ Communications' }, 'fli020': { 'name': 'Flint River Communications' }, 'far030': { 'name': 'FMT - Jesup' }, 'foo010': { 'name': 'Foothills Communications' }, 'for080': { 'name': 'Forsyth CableNet' }, 'fbcomm': { 'name': 'Frankfort Plant Board' }, 'tel160-fra': { 'name': 'Franklin Telephone Company' }, 'nttcftc010': { 'name': 'FTC' }, 'fullchannel': { 'name': 'Full Channel, Inc.' }, 'gar040': { 'name': 'Gardonville Cooperative Telephone Association' }, 'gbt010': { 'name': 'GBT Communications, Inc.' }, 'tec010': { 'name': 'Genuine Telecom' }, 'clr010': { 'name': 'Giant Communications' }, 'gla010': { 'name': 'Glasgow EPB' }, 'gle010': { 'name': 'Glenwood Telecommunications' }, 'gra060': { 'name': 'GLW Broadband Inc.' }, 'goldenwest': { 'name': 'Golden West Cablevision' }, 'vis030': { 'name': 'Grantsburg Telcom' }, 'gpcom': { 'name': 'Great Plains Communications' }, 'gri010': { 'name': 'Gridley Cable Inc' }, 'hbc010': { 'name': 'H&B Cable Services' }, 'hae010': { 'name': 'Haefele TV Inc.' }, 'htc010': { 'name': 'Halstad Telephone Company' }, 'har005': { 'name': 'Harlan Municipal Utilities' }, 'har020': { 'name': 'Hart Communications' }, 'ced010': { 'name': 'Hartelco TV' }, 'hea040': { 'name': 'Heart of Iowa Communications Cooperative' }, 'htc020': { 'name': 'Hickory Telephone Company' }, 'nttchig010': { 'name': 'Highland Communication Services' }, 'hig030': { 'name': 'Highland Media' }, 'spc010': { 'name': 'Hilliary Communications' }, 'hin020': { 'name': 'Hinton CATV Co.' }, 'hometel': { 'name': 'HomeTel Entertainment, Inc.' }, 'hoodcanal': { 'name': 'Hood Canal Communications' }, 'weh010-hope': { 'name': 'Hope - Prescott Cable TV' }, 'horizoncable': { 'name': 'Horizon Cable TV, Inc.' }, 'hor040': { 'name': 'Horizon Chillicothe Telephone' }, 'htc030': { 'name': 'HTC Communications Co. - IL' }, 'htccomm': { 'name': 'HTC Communications, Inc. - IA' }, 'wal005': { 'name': 'Huxley Communications' }, 'imon': { 'name': 'ImOn Communications' }, 'ind040': { 'name': 'Independence Telecommunications' }, 'rrc010': { 'name': 'Inland Networks' }, 'stc020': { 'name': 'Innovative Cable TV St Croix' }, 'car100': { 'name': 'Innovative Cable TV St Thomas-St John' }, 'icc010': { 'name': 'Inside Connect Cable' }, 'int100': { 'name': 'Integra Telecom' }, 'int050': { 'name': 'Interstate Telecommunications Coop' }, 'irv010': { 'name': 'Irvine Cable' }, 'k2c010': { 'name': 'K2 Communications' }, 'kal010': { 'name': 'Kalida Telephone Company, Inc.' }, 'kal030': { 'name': 'Kalona Cooperative Telephone Company' }, 'kmt010': { 'name': 'KMTelecom' }, 'kpu010': { 'name': 'KPU Telecommunications' }, 'kuh010': { 'name': 'Kuhn Communications, Inc.' }, 'lak130': { 'name': 'Lakeland Communications' }, 'lan010': { 'name': 'Langco' }, 'lau020': { 'name': 'Laurel Highland Total Communications, Inc.' }, 'leh010': { 'name': 'Lehigh Valley Cooperative Telephone' }, 'bra010': { 'name': 'Limestone Cable/Bracken Cable' }, 'loc020': { 'name': 'LISCO' }, 'lit020': { 'name': 'Litestream' }, 'tel140': { 'name': 'LivCom' }, 'loc010': { 'name': 'LocalTel Communications' }, 'weh010-longview': { 'name': 'Longview - Kilgore Cable TV' }, 'lon030': { 'name': 'Lonsdale Video Ventures, LLC' }, 'lns010': { 'name': 'Lost Nation-Elwood Telephone Co.' }, 'nttclpc010': { 'name': 'LPC Connect' }, 'lumos': { 'name': 'Lumos Networks' }, 'madison': { 'name': 'Madison Communications' }, 'mad030': { 'name': 'Madison County Cable Inc.' }, 'nttcmah010': { 'name': 'Mahaska Communication Group' }, 'mar010': { 'name': 'Marne & Elk Horn Telephone Company' }, 'mcc040': { 'name': 'McClure Telephone Co.' }, 'mctv': { 'name': 'MCTV' }, 'merrimac': { 'name': 'Merrimac Communications Ltd.' }, 'metronet': { 'name': 'Metronet' }, 'mhtc': { 'name': 'MHTC' }, 'midhudson': { 'name': 'Mid-Hudson Cable' }, 'midrivers': { 'name': 'Mid-Rivers Communications' }, 'mid045': { 'name': 'Midstate Communications' }, 'mil080': { 'name': 'Milford Communications' }, 'min030': { 'name': 'MINET' }, 'nttcmin010': { 'name': 'Minford TV' }, 'san040-02': { 'name': 'Mitchell Telecom' }, 'mlg010': { 'name': 'MLGC' }, 'mon060': { 'name': 'Mon-Cre TVE' }, 'mou110': { 'name': 'Mountain Telephone' }, 'mou050': { 'name': 'Mountain Village Cable' }, 'mtacomm': { 'name': 'MTA Communications, LLC' }, 'mtc010': { 'name': 'MTC Cable' }, 'med040': { 'name': 'MTC Technologies' }, 'man060': { 'name': 'MTCC' }, 'mtc030': { 'name': 'MTCO Communications' }, 'mul050': { 'name': 'Mulberry Telecommunications' }, 'mur010': { 'name': 'Murray Electric System' }, 'musfiber': { 'name': 'MUS FiberNET' }, 'mpw': { 'name': 'Muscatine Power & Water' }, 'nttcsli010': { 'name': 'myEVTV.com' }, 'nor115': { 'name': 'NCC' }, 'nor260': { 'name': 'NDTC' }, 'nctc': { 'name': 'Nebraska Central Telecom, Inc.' }, 'nel020': { 'name': 'Nelsonville TV Cable' }, 'nem010': { 'name': 'Nemont' }, 'new075': { 'name': 'New Hope Telephone Cooperative' }, 'nor240': { 'name': 'NICP' }, 'cic010': { 'name': 'NineStar Connect' }, 'nktelco': { 'name': 'NKTelco' }, 'nortex': { 'name': 'Nortex Communications' }, 'nor140': { 'name': 'North Central Telephone Cooperative' }, 'nor030': { 'name': 'Northland Communications' }, 'nor075': { 'name': 'Northwest Communications' }, 'nor125': { 'name': 'Norwood Light Broadband' }, 'net010': { 'name': 'Nsight Telservices' }, 'dur010': { 'name': 'Ntec' }, 'nts010': { 'name': 'NTS Communications' }, 'new045': { 'name': 'NU-Telecom' }, 'nulink': { 'name': 'NuLink' }, 'jam030': { 'name': 'NVC' }, 'far035': { 'name': 'OmniTel Communications' }, 'onesource': { 'name': 'OneSource Communications' }, 'cit230': { 'name': 'Opelika Power Services' }, 'daltonutilities': { 'name': 'OptiLink' }, 'mid140': { 'name': 'OPTURA' }, 'ote010': { 'name': 'OTEC Communication Company' }, 'cci020': { 'name': 'Packerland Broadband' }, 'pan010': { 'name': 'Panora Telco/Guthrie Center Communications' }, 'otter': { 'name': 'Park Region Telephone & Otter Tail Telcom' }, 'mid050': { 'name': 'Partner Communications Cooperative' }, 'fib010': { 'name': 'Pathway' }, 'paulbunyan': { 'name': 'Paul Bunyan Communications' }, 'pem020': { 'name': 'Pembroke Telephone Company' }, 'mck010': { 'name': 'Peoples Rural Telephone Cooperative' }, 'pul010': { 'name': 'PES Energize' }, 'phi010': { 'name': 'Philippi Communications System' }, 'phonoscope': { 'name': 'Phonoscope Cable' }, 'pin070': { 'name': 'Pine Belt Communications, Inc.' }, 'weh010-pine': { 'name': 'Pine Bluff Cable TV' }, 'pin060': { 'name': 'Pineland Telephone Cooperative' }, 'cam010': { 'name': 'Pinpoint Communications' }, 'pio060': { 'name': 'Pioneer Broadband' }, 'pioncomm': { 'name': 'Pioneer Communications' }, 'pioneer': { 'name': 'Pioneer DTV' }, 'pla020': { 'name': 'Plant TiftNet, Inc.' }, 'par010': { 'name': 'PLWC' }, 'pro035': { 'name': 'PMT' }, 'vik011': { 'name': 'Polar Cablevision' }, 'pottawatomie': { 'name': 'Pottawatomie Telephone Co.' }, 'premiercomm': { 'name': 'Premier Communications' }, 'psc010': { 'name': 'PSC' }, 'pan020': { 'name': 'PTCI' }, 'qco010': { 'name': 'QCOL' }, 'qua010': { 'name': 'Quality Cablevision' }, 'rad010': { 'name': 'Radcliffe Telephone Company' }, 'car040': { 'name': 'Rainbow Communications' }, 'rai030': { 'name': 'Rainier Connect' }, 'ral010': { 'name': 'Ralls Technologies' }, 'rct010': { 'name': 'RC Technologies' }, 'red040': { 'name': 'Red River Communications' }, 'ree010': { 'name': 'Reedsburg Utility Commission' }, 'mol010': { 'name': 'Reliance Connects- Oregon' }, 'res020': { 'name': 'Reserve Telecommunications' }, 'weh010-resort': { 'name': 'Resort TV Cable' }, 'rld010': { 'name': 'Richland Grant Telephone Cooperative, Inc.' }, 'riv030': { 'name': 'River Valley Telecommunications Coop' }, 'rockportcable': { 'name': 'Rock Port Cablevision' }, 'rsf010': { 'name': 'RS Fiber' }, 'rtc': { 'name': 'RTC Communication Corp' }, 'res040': { 'name': 'RTC-Reservation Telephone Coop.' }, 'rte010': { 'name': 'RTEC Communications' }, 'stc010': { 'name': 'S&T' }, 'san020': { 'name': 'San Bruno Cable TV' }, 'san040-01': { 'name': 'Santel' }, 'sav010': { 'name': 'SCI Broadband-Savage Communications Inc.' }, 'sco050': { 'name': 'Scottsboro Electric Power Board' }, 'scr010': { 'name': 'Scranton Telephone Company' }, 'selco': { 'name': 'SELCO' }, 'she010': { 'name': 'Shentel' }, 'she030': { 'name': 'Sherwood Mutual Telephone Association, Inc.' }, 'ind060-ssc': { 'name': 'Silver Star Communications' }, 'sjoberg': { 'name': 'Sjoberg\'s Inc.' }, 'sou025': { 'name': 'SKT' }, 'sky050': { 'name': 'SkyBest TV' }, 'nttcsmi010': { 'name': 'Smithville Communications' }, 'woo010': { 'name': 'Solarus' }, 'sou075': { 'name': 'South Central Rural Telephone Cooperative' }, 'sou065': { 'name': 'South Holt Cablevision, Inc.' }, 'sou035': { 'name': 'South Slope Cooperative Communications' }, 'spa020': { 'name': 'Spanish Fork Community Network' }, 'spe010': { 'name': 'Spencer Municipal Utilities' }, 'spi005': { 'name': 'Spillway Communications, Inc.' }, 'srt010': { 'name': 'SRT' }, 'cccsmc010': { 'name': 'St. Maarten Cable TV' }, 'sta025': { 'name': 'Star Communications' }, 'sco020': { 'name': 'STE' }, 'uin010': { 'name': 'STRATA Networks' }, 'sum010': { 'name': 'Sumner Cable TV' }, 'pie010': { 'name': 'Surry TV/PCSI TV' }, 'swa010': { 'name': 'Swayzee Communications' }, 'sweetwater': { 'name': 'Sweetwater Cable Television Co' }, 'weh010-talequah': { 'name': 'Tahlequah Cable TV' }, 'tct': { 'name': 'TCT' }, 'tel050': { 'name': 'Tele-Media Company' }, 'com050': { 'name': 'The Community Agency' }, 'thr020': { 'name': 'Three River' }, 'cab140': { 'name': 'Town & Country Technologies' }, 'tra010': { 'name': 'Trans-Video' }, 'tre010': { 'name': 'Trenton TV Cable Company' }, 'tcc': { 'name': 'Tri County Communications Cooperative' }, 'tri025': { 'name': 'TriCounty Telecom' }, 'tri110': { 'name': 'TrioTel Communications, Inc.' }, 'tro010': { 'name': 'Troy Cablevision, Inc.' }, 'tsc': { 'name': 'TSC' }, 'cit220': { 'name': 'Tullahoma Utilities Board' }, 'tvc030': { 'name': 'TV Cable of Rensselaer' }, 'tvc015': { 'name': 'TVC Cable' }, 'cab180': { 'name': 'TVision' }, 'twi040': { 'name': 'Twin Lakes' }, 'tvtinc': { 'name': 'Twin Valley' }, 'uis010': { 'name': 'Union Telephone Company' }, 'uni110': { 'name': 'United Communications - TN' }, 'uni120': { 'name': 'United Services' }, 'uss020': { 'name': 'US Sonet' }, 'cab060': { 'name': 'USA Communications' }, 'she005': { 'name': 'USA Communications/Shellsburg, IA' }, 'val040': { 'name': 'Valley TeleCom Group' }, 'val025': { 'name': 'Valley Telecommunications' }, 'val030': { 'name': 'Valparaiso Broadband' }, 'cla050': { 'name': 'Vast Broadband' }, 'sul015': { 'name': 'Venture Communications Cooperative, Inc.' }, 'ver025': { 'name': 'Vernon Communications Co-op' }, 'weh010-vicksburg': { 'name': 'Vicksburg Video' }, 'vis070': { 'name': 'Vision Communications' }, 'volcanotel': { 'name': 'Volcano Vision, Inc.' }, 'vol040-02': { 'name': 'VolFirst / BLTV' }, 'ver070': { 'name': 'VTel' }, 'nttcvtx010': { 'name': 'VTX1' }, 'bci010-02': { 'name': 'Vyve Broadband' }, 'wab020': { 'name': 'Wabash Mutual Telephone' }, 'waitsfield': { 'name': 'Waitsfield Cable' }, 'wal010': { 'name': 'Walnut Communications' }, 'wavebroadband': { 'name': 'Wave' }, 'wav030': { 'name': 'Waverly Communications Utility' }, 'wbi010': { 'name': 'WBI' }, 'web020': { 'name': 'Webster-Calhoun Cooperative Telephone Association' }, 'wes005': { 'name': 'West Alabama TV Cable' }, 'carolinata': { 'name': 'West Carolina Communications' }, 'wct010': { 'name': 'West Central Telephone Association' }, 'wes110': { 'name': 'West River Cooperative Telephone Company' }, 'ani030': { 'name': 'WesTel Systems' }, 'westianet': { 'name': 'Western Iowa Networks' }, 'nttcwhi010': { 'name': 'Whidbey Telecom' }, 'weh010-white': { 'name': 'White County Cable TV' }, 'wes130': { 'name': 'Wiatel' }, 'wik010': { 'name': 'Wiktel' }, 'wil070': { 'name': 'Wilkes Communications, Inc./RiverStreet Networks' }, 'wil015': { 'name': 'Wilson Communications' }, 'win010': { 'name': 'Windomnet/SMBS' }, 'win090': { 'name': 'Windstream Cable TV' }, 'wcta': { 'name': 'Winnebago Cooperative Telecom Association' }, 'wtc010': { 'name': 'WTC' }, 'wil040': { 'name': 'WTC Communications, Inc.' }, 'wya010': { 'name': 'Wyandotte Cable' }, 'hin020-02': { 'name': 'X-Stream Services' }, 'xit010': { 'name': 'XIT Communications' }, 'yel010': { 'name': 'Yelcot Communications' }, 'mid180-01': { 'name': 'yondoo' }, 'cou060': { 'name': 'Zito Media' }, } class AdobePassIE(InfoExtractor): _SERVICE_PROVIDER_TEMPLATE = 'https://sp.auth.adobe.com/adobe-services/%s' _USER_AGENT = 'Mozilla/5.0 (X11; Linux i686; rv:47.0) Gecko/20100101 Firefox/47.0' _MVPD_CACHE = 'ap-mvpd' _DOWNLOADING_LOGIN_PAGE = 'Downloading Provider Login Page' def _download_webpage_handle(self, *args, **kwargs): headers = self.geo_verification_headers() headers.update(kwargs.get('headers', {})) kwargs['headers'] = headers return super(AdobePassIE, self)._download_webpage_handle( *args, **compat_kwargs(kwargs)) @staticmethod def _get_mvpd_resource(provider_id, title, guid, rating): channel = etree.Element('channel') channel_title = etree.SubElement(channel, 'title') channel_title.text = provider_id item = etree.SubElement(channel, 'item') resource_title = etree.SubElement(item, 'title') resource_title.text = title resource_guid = etree.SubElement(item, 'guid') resource_guid.text = guid resource_rating = etree.SubElement(item, 'media:rating') resource_rating.attrib = {'scheme': 'urn:v-chip'} resource_rating.text = rating return '<rss version="2.0" xmlns:media="http://search.yahoo.com/mrss/">' + etree.tostring(channel).decode() + '</rss>' def _extract_mvpd_auth(self, url, video_id, requestor_id, resource): def xml_text(xml_str, tag): return self._search_regex( '<%s>(.+?)</%s>' % (tag, tag), xml_str, tag) def is_expired(token, date_ele): token_expires = unified_timestamp(re.sub(r'[_ ]GMT', '', xml_text(token, date_ele))) return token_expires and token_expires <= int(time.time()) def post_form(form_page_res, note, data={}): form_page, urlh = form_page_res post_url = self._html_search_regex(r'<form[^>]+action=(["\'])(?P<url>.+?)\1', form_page, 'post url', group='url') if not re.match(r'https?://', post_url): post_url = compat_urlparse.urljoin(urlh.geturl(), post_url) form_data = self._hidden_inputs(form_page) form_data.update(data) return self._download_webpage_handle( post_url, video_id, note, data=urlencode_postdata(form_data), headers={ 'Content-Type': 'application/x-www-form-urlencoded', }) def raise_mvpd_required(): raise ExtractorError( 'This video is only available for users of participating TV providers. ' 'Use --ap-mso to specify Adobe Pass Multiple-system operator Identifier ' 'and --ap-username and --ap-password or --netrc to provide account credentials.', expected=True) def extract_redirect_url(html, url=None, fatal=False): # TODO: eliminate code duplication with generic extractor and move # redirection code into _download_webpage_handle REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)' redirect_url = self._search_regex( r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")' r'(?:[a-z-]+="[^"]+"\s+)*?content="%s' % REDIRECT_REGEX, html, 'meta refresh redirect', default=NO_DEFAULT if fatal else None, fatal=fatal) if not redirect_url: return None if url: redirect_url = compat_urlparse.urljoin(url, unescapeHTML(redirect_url)) return redirect_url mvpd_headers = { 'ap_42': 'anonymous', 'ap_11': 'Linux i686', 'ap_z': self._USER_AGENT, 'User-Agent': self._USER_AGENT, } guid = xml_text(resource, 'guid') if '<' in resource else resource count = 0 while count < 2: requestor_info = self._downloader.cache.load(self._MVPD_CACHE, requestor_id) or {} authn_token = requestor_info.get('authn_token') if authn_token and is_expired(authn_token, 'simpleTokenExpires'): authn_token = None if not authn_token: # TODO add support for other TV Providers mso_id = self._downloader.params.get('ap_mso') if not mso_id: raise_mvpd_required() username, password = self._get_login_info('ap_username', 'ap_password', mso_id) if not username or not password: raise_mvpd_required() mso_info = MSO_INFO[mso_id] provider_redirect_page_res = self._download_webpage_handle( self._SERVICE_PROVIDER_TEMPLATE % 'authenticate/saml', video_id, 'Downloading Provider Redirect Page', query={ 'noflash': 'true', 'mso_id': mso_id, 'requestor_id': requestor_id,
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/muenchentv.py
youtube_dl/extractor/muenchentv.py
# coding: utf-8 from __future__ import unicode_literals import json from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, js_to_json, ) class MuenchenTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?muenchen\.tv/livestream' IE_DESC = 'münchen.tv' _TEST = { 'url': 'http://www.muenchen.tv/livestream/', 'info_dict': { 'id': '5334', 'display_id': 'live', 'ext': 'mp4', 'title': 're:^münchen.tv-Livestream [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'is_live': True, 'thumbnail': r're:^https?://.*\.jpg$' }, 'params': { 'skip_download': True, } } def _real_extract(self, url): display_id = 'live' webpage = self._download_webpage(url, display_id) title = self._live_title(self._og_search_title(webpage)) data_js = self._search_regex( r'(?s)\nplaylist:\s*(\[.*?}\]),', webpage, 'playlist configuration') data_json = js_to_json(data_js) data = json.loads(data_json)[0] video_id = data['mediaid'] thumbnail = data.get('image') formats = [] for format_num, s in enumerate(data['sources']): ext = determine_ext(s['file'], None) label_str = s.get('label') if label_str is None: label_str = '_%d' % format_num if ext is None: format_id = label_str else: format_id = '%s-%s' % (ext, label_str) formats.append({ 'url': s['file'], 'tbr': int_or_none(s.get('label')), 'ext': 'mp4', 'format_id': format_id, 'preference': -100 if '.smil' in s['file'] else 0, }) self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'title': title, 'formats': formats, 'is_live': True, 'thumbnail': thumbnail, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/karrierevideos.py
youtube_dl/extractor/karrierevideos.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( fix_xml_ampersands, float_or_none, xpath_with_ns, xpath_text, ) class KarriereVideosIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?karrierevideos\.at(?:/[^/]+)+/(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.karrierevideos.at/berufsvideos/mittlere-hoehere-schulen/altenpflegerin', 'info_dict': { 'id': '32c91', 'ext': 'flv', 'title': 'AltenpflegerIn', 'description': 'md5:dbadd1259fde2159a9b28667cb664ae2', 'thumbnail': r're:^http://.*\.png', }, 'params': { # rtmp download 'skip_download': True, } }, { # broken ampersands 'url': 'http://www.karrierevideos.at/orientierung/vaeterkarenz-und-neue-chancen-fuer-muetter-baby-was-nun', 'info_dict': { 'id': '5sniu', 'ext': 'flv', 'title': 'Väterkarenz und neue Chancen für Mütter - "Baby - was nun?"', 'description': 'md5:97092c6ad1fd7d38e9d6a5fdeb2bcc33', 'thumbnail': r're:^http://.*\.png', }, 'params': { # rtmp download 'skip_download': True, } }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = (self._html_search_meta('title', webpage, default=None) or self._search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'video title')) video_id = self._search_regex( r'/config/video/(.+?)\.xml', webpage, 'video id') # Server returns malformed headers # Force Accept-Encoding: * to prevent gzipped results playlist = self._download_xml( 'http://www.karrierevideos.at/player-playlist.xml.php?p=%s' % video_id, video_id, transform_source=fix_xml_ampersands, headers={'Accept-Encoding': '*'}) NS_MAP = { 'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats' } def ns(path): return xpath_with_ns(path, NS_MAP) item = playlist.find('./tracklist/item') video_file = xpath_text( item, ns('./jwplayer:file'), 'video url', fatal=True) streamer = xpath_text( item, ns('./jwplayer:streamer'), 'streamer', fatal=True) uploader = xpath_text( item, ns('./jwplayer:author'), 'uploader') duration = float_or_none( xpath_text(item, ns('./jwplayer:duration'), 'duration')) description = self._html_search_regex( r'(?s)<div class="leadtext">(.+?)</div>', webpage, 'description') thumbnail = self._html_search_meta( 'thumbnail', webpage, 'thumbnail') if thumbnail: thumbnail = compat_urlparse.urljoin(url, thumbnail) return { 'id': video_id, 'url': streamer.replace('rtmpt', 'rtmp'), 'play_path': 'mp4:%s' % video_file, 'ext': 'flv', 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'duration': duration, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/turbo.py
youtube_dl/extractor/turbo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, int_or_none, qualities, xpath_text, ) class TurboIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?turbo\.fr/videos-voiture/(?P<id>[0-9]+)-' _API_URL = 'http://www.turbo.fr/api/tv/xml.php?player_generique=player_generique&id={0:}' _TEST = { 'url': 'http://www.turbo.fr/videos-voiture/454443-turbo-du-07-09-2014-renault-twingo-3-bentley-continental-gt-speed-ces-guide-achat-dacia.html', 'md5': '33f4b91099b36b5d5a91f84b5bcba600', 'info_dict': { 'id': '454443', 'ext': 'mp4', 'duration': 3715, 'title': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ', 'description': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia...', 'thumbnail': r're:^https?://.*\.jpg$', } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) playlist = self._download_xml(self._API_URL.format(video_id), video_id) item = playlist.find('./channel/item') if item is None: raise ExtractorError('Playlist item was not found', expected=True) title = xpath_text(item, './title', 'title') duration = int_or_none(xpath_text(item, './durate', 'duration')) thumbnail = xpath_text(item, './visuel_clip', 'thumbnail') description = self._html_search_meta('description', webpage) formats = [] get_quality = qualities(['3g', 'sd', 'hq']) for child in item: m = re.search(r'url_video_(?P<quality>.+)', child.tag) if m: quality = compat_str(m.group('quality')) formats.append({ 'format_id': quality, 'url': child.text, 'quality': get_quality(quality), }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'duration': duration, 'thumbnail': thumbnail, 'description': description, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/mwave.py
youtube_dl/extractor/mwave.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, parse_duration, ) class MwaveIE(InfoExtractor): _VALID_URL = r'https?://mwave\.interest\.me/(?:[^/]+/)?mnettv/videodetail\.m\?searchVideoDetailVO\.clip_id=(?P<id>[0-9]+)' _URL_TEMPLATE = 'http://mwave.interest.me/mnettv/videodetail.m?searchVideoDetailVO.clip_id=%s' _TESTS = [{ 'url': 'http://mwave.interest.me/mnettv/videodetail.m?searchVideoDetailVO.clip_id=168859', # md5 is unstable 'info_dict': { 'id': '168859', 'ext': 'flv', 'title': '[M COUNTDOWN] SISTAR - SHAKE IT', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'M COUNTDOWN', 'duration': 206, 'view_count': int, } }, { 'url': 'http://mwave.interest.me/en/mnettv/videodetail.m?searchVideoDetailVO.clip_id=176199', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) vod_info = self._download_json( 'http://mwave.interest.me/onair/vod_info.m?vodtype=CL&sectorid=&endinfo=Y&id=%s' % video_id, video_id, 'Download vod JSON') formats = [] for num, cdn_info in enumerate(vod_info['cdn']): stream_url = cdn_info.get('url') if not stream_url: continue stream_name = cdn_info.get('name') or compat_str(num) f4m_stream = self._download_json( stream_url, video_id, 'Download %s stream JSON' % stream_name) f4m_url = f4m_stream.get('fileurl') if not f4m_url: continue formats.extend( self._extract_f4m_formats(f4m_url + '&hdcore=3.0.3', video_id, f4m_id=stream_name)) self._sort_formats(formats) return { 'id': video_id, 'title': vod_info['title'], 'thumbnail': vod_info.get('cover'), 'uploader': vod_info.get('program_title'), 'duration': parse_duration(vod_info.get('time')), 'view_count': int_or_none(vod_info.get('hit')), 'formats': formats, } class MwaveMeetGreetIE(InfoExtractor): _VALID_URL = r'https?://mwave\.interest\.me/(?:[^/]+/)?meetgreet/view/(?P<id>\d+)' _TESTS = [{ 'url': 'http://mwave.interest.me/meetgreet/view/256', 'info_dict': { 'id': '173294', 'ext': 'flv', 'title': '[MEET&GREET] Park BoRam', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Mwave', 'duration': 3634, 'view_count': int, } }, { 'url': 'http://mwave.interest.me/en/meetgreet/view/256', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) clip_id = self._html_search_regex( r'<iframe[^>]+src="/mnettv/ifr_clip\.m\?searchVideoDetailVO\.clip_id=(\d+)', webpage, 'clip ID') clip_url = MwaveIE._URL_TEMPLATE % clip_id return self.url_result(clip_url, 'Mwave', clip_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/eitb.py
youtube_dl/extractor/eitb.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, parse_iso8601, sanitized_Request, ) class EitbIE(InfoExtractor): IE_NAME = 'eitb.tv' _VALID_URL = r'https?://(?:www\.)?eitb\.tv/(?:eu/bideoa|es/video)/[^/]+/\d+/(?P<id>\d+)' _TEST = { 'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/4104995148001/4090227752001/lasa-y-zabala-30-anos/', 'md5': 'edf4436247185adee3ea18ce64c47998', 'info_dict': { 'id': '4090227752001', 'ext': 'mp4', 'title': '60 minutos (Lasa y Zabala, 30 años)', 'description': 'Programa de reportajes de actualidad.', 'duration': 3996.76, 'timestamp': 1381789200, 'upload_date': '20131014', 'tags': list, }, } def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'http://mam.eitb.eus/mam/REST/ServiceMultiweb/Video/MULTIWEBTV/%s/' % video_id, video_id, 'Downloading video JSON') media = video['web_media'][0] formats = [] for rendition in media['RENDITIONS']: video_url = rendition.get('PMD_URL') if not video_url: continue tbr = float_or_none(rendition.get('ENCODING_RATE'), 1000) format_id = 'http' if tbr: format_id += '-%d' % int(tbr) formats.append({ 'url': rendition['PMD_URL'], 'format_id': format_id, 'width': int_or_none(rendition.get('FRAME_WIDTH')), 'height': int_or_none(rendition.get('FRAME_HEIGHT')), 'tbr': tbr, }) hls_url = media.get('HLS_SURL') if hls_url: request = sanitized_Request( 'http://mam.eitb.eus/mam/REST/ServiceMultiweb/DomainRestrictedSecurity/TokenAuth/', headers={'Referer': url}) token_data = self._download_json( request, video_id, 'Downloading auth token', fatal=False) if token_data: token = token_data.get('token') if token: formats.extend(self._extract_m3u8_formats( '%s?hdnts=%s' % (hls_url, token), video_id, m3u8_id='hls', fatal=False)) hds_url = media.get('HDS_SURL') if hds_url: formats.extend(self._extract_f4m_formats( '%s?hdcore=3.7.0' % hds_url.replace('euskalsvod', 'euskalvod'), video_id, f4m_id='hds', fatal=False)) self._sort_formats(formats) return { 'id': video_id, 'title': media.get('NAME_ES') or media.get('name') or media['NAME_EU'], 'description': media.get('SHORT_DESC_ES') or video.get('desc_group') or media.get('SHORT_DESC_EU'), 'thumbnail': media.get('STILL_URL') or media.get('THUMBNAIL_URL'), 'duration': float_or_none(media.get('LENGTH'), 1000), 'timestamp': parse_iso8601(media.get('BROADCST_DATE'), ' '), 'tags': media.get('TAGS'), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/m6.py
youtube_dl/extractor/m6.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class M6IE(InfoExtractor): IE_NAME = 'm6' _VALID_URL = r'https?://(?:www\.)?m6\.fr/[^/]+/videos/(?P<id>\d+)-[^\.]+\.html' _TEST = { 'url': 'http://www.m6.fr/emission-les_reines_du_shopping/videos/11323908-emeline_est_la_reine_du_shopping_sur_le_theme_ma_fete_d_8217_anniversaire.html', 'md5': '242994a87de2c316891428e0176bcb77', 'info_dict': { 'id': '11323908', 'ext': 'mp4', 'title': 'Emeline est la Reine du Shopping sur le thème « Ma fête d’anniversaire ! »', 'description': 'md5:1212ae8fb4b7baa4dc3886c5676007c2', 'duration': 100, } } def _real_extract(self, url): video_id = self._match_id(url) return self.url_result('6play:%s' % video_id, 'SixPlay', video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/frontendmasters.py
youtube_dl/extractor/frontendmasters.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( ExtractorError, parse_duration, url_or_none, urlencode_postdata, ) class FrontendMastersBaseIE(InfoExtractor): _API_BASE = 'https://api.frontendmasters.com/v1/kabuki' _LOGIN_URL = 'https://frontendmasters.com/login/' _NETRC_MACHINE = 'frontendmasters' _QUALITIES = { 'low': {'width': 480, 'height': 360}, 'mid': {'width': 1280, 'height': 720}, 'high': {'width': 1920, 'height': 1080} } def _real_initialize(self): self._login() def _login(self): (username, password) = self._get_login_info() if username is None: return login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading login page') login_form = self._hidden_inputs(login_page) login_form.update({ 'username': username, 'password': password }) post_url = self._search_regex( r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page, 'post_url', default=self._LOGIN_URL, group='url') if not post_url.startswith('http'): post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url) response = self._download_webpage( post_url, None, 'Logging in', data=urlencode_postdata(login_form), headers={'Content-Type': 'application/x-www-form-urlencoded'}) # Successful login if any(p in response for p in ( 'wp-login.php?action=logout', '>Logout')): return error = self._html_search_regex( r'class=(["\'])(?:(?!\1).)*\bMessageAlert\b(?:(?!\1).)*\1[^>]*>(?P<error>[^<]+)<', response, 'error message', default=None, group='error') if error: raise ExtractorError('Unable to login: %s' % error, expected=True) raise ExtractorError('Unable to log in') class FrontendMastersPageBaseIE(FrontendMastersBaseIE): def _download_course(self, course_name, url): return self._download_json( '%s/courses/%s' % (self._API_BASE, course_name), course_name, 'Downloading course JSON', headers={'Referer': url}) @staticmethod def _extract_chapters(course): chapters = [] lesson_elements = course.get('lessonElements') if isinstance(lesson_elements, list): chapters = [url_or_none(e) for e in lesson_elements if url_or_none(e)] return chapters @staticmethod def _extract_lesson(chapters, lesson_id, lesson): title = lesson.get('title') or lesson_id display_id = lesson.get('slug') description = lesson.get('description') thumbnail = lesson.get('thumbnail') chapter_number = None index = lesson.get('index') element_index = lesson.get('elementIndex') if (isinstance(index, int) and isinstance(element_index, int) and index < element_index): chapter_number = element_index - index chapter = (chapters[chapter_number - 1] if chapter_number - 1 < len(chapters) else None) duration = None timestamp = lesson.get('timestamp') if isinstance(timestamp, compat_str): mobj = re.search( r'(?P<start>\d{1,2}:\d{1,2}:\d{1,2})\s*-(?P<end>\s*\d{1,2}:\d{1,2}:\d{1,2})', timestamp) if mobj: duration = parse_duration(mobj.group('end')) - parse_duration( mobj.group('start')) return { '_type': 'url_transparent', 'url': 'frontendmasters:%s' % lesson_id, 'ie_key': FrontendMastersIE.ie_key(), 'id': lesson_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'chapter': chapter, 'chapter_number': chapter_number, } class FrontendMastersIE(FrontendMastersBaseIE): _VALID_URL = r'(?:frontendmasters:|https?://api\.frontendmasters\.com/v\d+/kabuki/video/)(?P<id>[^/]+)' _TESTS = [{ 'url': 'https://api.frontendmasters.com/v1/kabuki/video/a2qogef6ba', 'md5': '7f161159710d6b7016a4f4af6fcb05e2', 'info_dict': { 'id': 'a2qogef6ba', 'ext': 'mp4', 'title': 'a2qogef6ba', }, 'skip': 'Requires FrontendMasters account credentials', }, { 'url': 'frontendmasters:a2qogef6ba', 'only_matching': True, }] def _real_extract(self, url): lesson_id = self._match_id(url) source_url = '%s/video/%s/source' % (self._API_BASE, lesson_id) formats = [] for ext in ('webm', 'mp4'): for quality in ('low', 'mid', 'high'): resolution = self._QUALITIES[quality].copy() format_id = '%s-%s' % (ext, quality) format_url = self._download_json( source_url, lesson_id, 'Downloading %s source JSON' % format_id, query={ 'f': ext, 'r': resolution['height'], }, headers={ 'Referer': url, }, fatal=False)['url'] if not format_url: continue f = resolution.copy() f.update({ 'url': format_url, 'ext': ext, 'format_id': format_id, }) formats.append(f) self._sort_formats(formats) subtitles = { 'en': [{ 'url': '%s/transcripts/%s.vtt' % (self._API_BASE, lesson_id), }] } return { 'id': lesson_id, 'title': lesson_id, 'formats': formats, 'subtitles': subtitles } class FrontendMastersLessonIE(FrontendMastersPageBaseIE): _VALID_URL = r'https?://(?:www\.)?frontendmasters\.com/courses/(?P<course_name>[^/]+)/(?P<lesson_name>[^/]+)' _TEST = { 'url': 'https://frontendmasters.com/courses/web-development/tools', 'info_dict': { 'id': 'a2qogef6ba', 'display_id': 'tools', 'ext': 'mp4', 'title': 'Tools', 'description': 'md5:82c1ea6472e88ed5acd1829fe992e4f7', 'thumbnail': r're:^https?://.*\.jpg$', 'chapter': 'Introduction', 'chapter_number': 1, }, 'params': { 'skip_download': True, }, 'skip': 'Requires FrontendMasters account credentials', } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) course_name, lesson_name = mobj.group('course_name', 'lesson_name') course = self._download_course(course_name, url) lesson_id, lesson = next( (video_id, data) for video_id, data in course['lessonData'].items() if data.get('slug') == lesson_name) chapters = self._extract_chapters(course) return self._extract_lesson(chapters, lesson_id, lesson) class FrontendMastersCourseIE(FrontendMastersPageBaseIE): _VALID_URL = r'https?://(?:www\.)?frontendmasters\.com/courses/(?P<id>[^/]+)' _TEST = { 'url': 'https://frontendmasters.com/courses/web-development/', 'info_dict': { 'id': 'web-development', 'title': 'Introduction to Web Development', 'description': 'md5:9317e6e842098bf725d62360e52d49a6', }, 'playlist_count': 81, 'skip': 'Requires FrontendMasters account credentials', } @classmethod def suitable(cls, url): return False if FrontendMastersLessonIE.suitable(url) else super( FrontendMastersBaseIE, cls).suitable(url) def _real_extract(self, url): course_name = self._match_id(url) course = self._download_course(course_name, url) chapters = self._extract_chapters(course) lessons = sorted( course['lessonData'].values(), key=lambda data: data['index']) entries = [] for lesson in lessons: lesson_name = lesson.get('slug') if not lesson_name: continue lesson_id = lesson.get('hash') or lesson.get('statsId') entries.append(self._extract_lesson(chapters, lesson_id, lesson)) title = course.get('title') description = course.get('description') return self.playlist_result(entries, course_name, title, description)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/charlierose.py
youtube_dl/extractor/charlierose.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import remove_end class CharlieRoseIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?charlierose\.com/(?:video|episode)(?:s|/player)/(?P<id>\d+)' _TESTS = [{ 'url': 'https://charlierose.com/videos/27996', 'md5': 'fda41d49e67d4ce7c2411fd2c4702e09', 'info_dict': { 'id': '27996', 'ext': 'mp4', 'title': 'Remembering Zaha Hadid', 'thumbnail': r're:^https?://.*\.jpg\?\d+', 'description': 'We revisit past conversations with Zaha Hadid, in memory of the world renowned Iraqi architect.', 'subtitles': { 'en': [{ 'ext': 'vtt', }], }, }, }, { 'url': 'https://charlierose.com/videos/27996', 'only_matching': True, }, { 'url': 'https://charlierose.com/episodes/30887?autoplay=true', 'only_matching': True, }] _PLAYER_BASE = 'https://charlierose.com/video/player/%s' def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(self._PLAYER_BASE % video_id, video_id) title = remove_end(self._og_search_title(webpage), ' - Charlie Rose') info_dict = self._parse_html5_media_entries( self._PLAYER_BASE % video_id, webpage, video_id, m3u8_entry_protocol='m3u8_native')[0] self._sort_formats(info_dict['formats']) self._remove_duplicate_formats(info_dict['formats']) info_dict.update({ 'id': video_id, 'title': title, 'thumbnail': self._og_search_thumbnail(webpage), 'description': self._og_search_description(webpage), }) return info_dict
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/funk.py
youtube_dl/extractor/funk.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from .nexx import NexxIE from ..utils import ( int_or_none, str_or_none, ) class FunkIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?funk\.net/(?:channel|playlist)/[^/]+/(?P<display_id>[0-9a-z-]+)-(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.funk.net/channel/ba-793/die-lustigsten-instrumente-aus-dem-internet-teil-2-1155821', 'md5': '8dd9d9ab59b4aa4173b3197f2ea48e81', 'info_dict': { 'id': '1155821', 'ext': 'mp4', 'title': 'Die LUSTIGSTEN INSTRUMENTE aus dem Internet - Teil 2', 'description': 'md5:a691d0413ef4835588c5b03ded670c1f', 'timestamp': 1514507395, 'upload_date': '20171229', }, }, { 'url': 'https://www.funk.net/playlist/neuesteVideos/kameras-auf-dem-fusion-festival-1618699', 'only_matching': True, }] def _real_extract(self, url): display_id, nexx_id = re.match(self._VALID_URL, url).groups() video = self._download_json( 'https://www.funk.net/api/v4.0/videos/' + nexx_id, nexx_id) return { '_type': 'url_transparent', 'url': 'nexx:741:' + nexx_id, 'ie_key': NexxIE.ie_key(), 'id': nexx_id, 'title': video.get('title'), 'description': video.get('description'), 'duration': int_or_none(video.get('duration')), 'channel_id': str_or_none(video.get('channelId')), 'display_id': display_id, 'tags': video.get('tags'), 'thumbnail': video.get('imageUrlLandscape'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/pluralsight.py
youtube_dl/extractor/pluralsight.py
from __future__ import unicode_literals import collections import json import os import random import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( dict_get, ExtractorError, float_or_none, int_or_none, parse_duration, qualities, srt_subtitles_timecode, try_get, update_url_query, urlencode_postdata, ) class PluralsightBaseIE(InfoExtractor): _API_BASE = 'https://app.pluralsight.com' _GRAPHQL_EP = '%s/player/api/graphql' % _API_BASE _GRAPHQL_HEADERS = { 'Content-Type': 'application/json;charset=UTF-8', } _GRAPHQL_COURSE_TMPL = ''' query BootstrapPlayer { rpc { bootstrapPlayer { profile { firstName lastName email username userHandle authed isAuthed plan } course(courseId: "%s") { name title courseHasCaptions translationLanguages { code name } supportsWideScreenVideoFormats timestamp modules { name title duration formattedDuration author authorized clips { authorized clipId duration formattedDuration id index moduleIndex moduleTitle name title watched } } } } } }''' def _download_course(self, course_id, url, display_id): try: return self._download_course_rpc(course_id, url, display_id) except ExtractorError: # Old API fallback return self._download_json( 'https://app.pluralsight.com/player/user/api/v1/player/payload', display_id, data=urlencode_postdata({'courseId': course_id}), headers={'Referer': url}) def _download_course_rpc(self, course_id, url, display_id): response = self._download_json( self._GRAPHQL_EP, display_id, data=json.dumps({ 'query': self._GRAPHQL_COURSE_TMPL % course_id, 'variables': {} }).encode('utf-8'), headers=self._GRAPHQL_HEADERS) course = try_get( response, lambda x: x['data']['rpc']['bootstrapPlayer']['course'], dict) if course: return course raise ExtractorError( '%s said: %s' % (self.IE_NAME, response['error']['message']), expected=True) class PluralsightIE(PluralsightBaseIE): IE_NAME = 'pluralsight' _VALID_URL = r'https?://(?:(?:www|app)\.)?pluralsight\.com/(?:training/)?player\?' _LOGIN_URL = 'https://app.pluralsight.com/id/' _NETRC_MACHINE = 'pluralsight' _TESTS = [{ 'url': 'http://www.pluralsight.com/training/player?author=mike-mckeown&name=hosting-sql-server-windows-azure-iaas-m7-mgmt&mode=live&clip=3&course=hosting-sql-server-windows-azure-iaas', 'md5': '4d458cf5cf4c593788672419a8dd4cf8', 'info_dict': { 'id': 'hosting-sql-server-windows-azure-iaas-m7-mgmt-04', 'ext': 'mp4', 'title': 'Demo Monitoring', 'duration': 338, }, 'skip': 'Requires pluralsight account credentials', }, { 'url': 'https://app.pluralsight.com/training/player?course=angularjs-get-started&author=scott-allen&name=angularjs-get-started-m1-introduction&clip=0&mode=live', 'only_matching': True, }, { # available without pluralsight account 'url': 'http://app.pluralsight.com/training/player?author=scott-allen&name=angularjs-get-started-m1-introduction&mode=live&clip=0&course=angularjs-get-started', 'only_matching': True, }, { 'url': 'https://app.pluralsight.com/player?course=ccna-intro-networking&author=ross-bagurdes&name=ccna-intro-networking-m06&clip=0', 'only_matching': True, }] GRAPHQL_VIEWCLIP_TMPL = ''' query viewClip { viewClip(input: { author: "%(author)s", clipIndex: %(clipIndex)d, courseName: "%(courseName)s", includeCaptions: %(includeCaptions)s, locale: "%(locale)s", mediaType: "%(mediaType)s", moduleName: "%(moduleName)s", quality: "%(quality)s" }) { urls { url cdn rank source }, status } }''' def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading login page') login_form = self._hidden_inputs(login_page) login_form.update({ 'Username': username, 'Password': password, }) post_url = self._search_regex( r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page, 'post url', default=self._LOGIN_URL, group='url') if not post_url.startswith('http'): post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url) response = self._download_webpage( post_url, None, 'Logging in', data=urlencode_postdata(login_form), headers={'Content-Type': 'application/x-www-form-urlencoded'}) error = self._search_regex( r'<span[^>]+class="field-validation-error"[^>]*>([^<]+)</span>', response, 'error message', default=None) if error: raise ExtractorError('Unable to login: %s' % error, expected=True) if all(not re.search(p, response) for p in ( r'__INITIAL_STATE__', r'["\']currentUser["\']', # new layout? r'>\s*Sign out\s*<')): BLOCKED = 'Your account has been blocked due to suspicious activity' if BLOCKED in response: raise ExtractorError( 'Unable to login: %s' % BLOCKED, expected=True) MUST_AGREE = 'To continue using Pluralsight, you must agree to' if any(p in response for p in (MUST_AGREE, '>Disagree<', '>Agree<')): raise ExtractorError( 'Unable to login: %s some documents. Go to pluralsight.com, ' 'log in and agree with what Pluralsight requires.' % MUST_AGREE, expected=True) raise ExtractorError('Unable to log in') def _get_subtitles(self, author, clip_idx, clip_id, lang, name, duration, video_id): captions = None if clip_id: captions = self._download_json( '%s/transcript/api/v1/caption/json/%s/%s' % (self._API_BASE, clip_id, lang), video_id, 'Downloading captions JSON', 'Unable to download captions JSON', fatal=False) if not captions: captions_post = { 'a': author, 'cn': int(clip_idx), 'lc': lang, 'm': name, } captions = self._download_json( '%s/player/retrieve-captions' % self._API_BASE, video_id, 'Downloading captions JSON', 'Unable to download captions JSON', fatal=False, data=json.dumps(captions_post).encode('utf-8'), headers={'Content-Type': 'application/json;charset=utf-8'}) if captions: return { lang: [{ 'ext': 'json', 'data': json.dumps(captions), }, { 'ext': 'srt', 'data': self._convert_subtitles(duration, captions), }] } @staticmethod def _convert_subtitles(duration, subs): srt = '' TIME_OFFSET_KEYS = ('displayTimeOffset', 'DisplayTimeOffset') TEXT_KEYS = ('text', 'Text') for num, current in enumerate(subs): current = subs[num] start, text = ( float_or_none(dict_get(current, TIME_OFFSET_KEYS, skip_false_values=False)), dict_get(current, TEXT_KEYS)) if start is None or text is None: continue end = duration if num == len(subs) - 1 else float_or_none( dict_get(subs[num + 1], TIME_OFFSET_KEYS, skip_false_values=False)) if end is None: continue srt += os.linesep.join( ( '%d' % num, '%s --> %s' % ( srt_subtitles_timecode(start), srt_subtitles_timecode(end)), text, os.linesep, )) return srt def _real_extract(self, url): qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) author = qs.get('author', [None])[0] name = qs.get('name', [None])[0] clip_idx = qs.get('clip', [None])[0] course_name = qs.get('course', [None])[0] if any(not f for f in (author, name, clip_idx, course_name,)): raise ExtractorError('Invalid URL', expected=True) display_id = '%s-%s' % (name, clip_idx) course = self._download_course(course_name, url, display_id) collection = course['modules'] clip = None for module_ in collection: if name in (module_.get('moduleName'), module_.get('name')): for clip_ in module_.get('clips', []): clip_index = clip_.get('clipIndex') if clip_index is None: clip_index = clip_.get('index') if clip_index is None: continue if compat_str(clip_index) == clip_idx: clip = clip_ break if not clip: raise ExtractorError('Unable to resolve clip') title = clip['title'] clip_id = clip.get('clipName') or clip.get('name') or clip['clipId'] QUALITIES = { 'low': {'width': 640, 'height': 480}, 'medium': {'width': 848, 'height': 640}, 'high': {'width': 1024, 'height': 768}, 'high-widescreen': {'width': 1280, 'height': 720}, } QUALITIES_PREFERENCE = ('low', 'medium', 'high', 'high-widescreen',) quality_key = qualities(QUALITIES_PREFERENCE) AllowedQuality = collections.namedtuple('AllowedQuality', ['ext', 'qualities']) ALLOWED_QUALITIES = ( AllowedQuality('webm', ['high', ]), AllowedQuality('mp4', ['low', 'medium', 'high', ]), ) # Some courses also offer widescreen resolution for high quality (see # https://github.com/ytdl-org/youtube-dl/issues/7766) widescreen = course.get('supportsWideScreenVideoFormats') is True best_quality = 'high-widescreen' if widescreen else 'high' if widescreen: for allowed_quality in ALLOWED_QUALITIES: allowed_quality.qualities.append(best_quality) # In order to minimize the number of calls to ViewClip API and reduce # the probability of being throttled or banned by Pluralsight we will request # only single format until formats listing was explicitly requested. if self._downloader.params.get('listformats', False): allowed_qualities = ALLOWED_QUALITIES else: def guess_allowed_qualities(): req_format = self._downloader.params.get('format') or 'best' req_format_split = req_format.split('-', 1) if len(req_format_split) > 1: req_ext, req_quality = req_format_split req_quality = '-'.join(req_quality.split('-')[:2]) for allowed_quality in ALLOWED_QUALITIES: if req_ext == allowed_quality.ext and req_quality in allowed_quality.qualities: return (AllowedQuality(req_ext, (req_quality, )), ) req_ext = 'webm' if self._downloader.params.get('prefer_free_formats') else 'mp4' return (AllowedQuality(req_ext, (best_quality, )), ) allowed_qualities = guess_allowed_qualities() formats = [] for ext, qualities_ in allowed_qualities: for quality in qualities_: f = QUALITIES[quality].copy() clip_post = { 'author': author, 'includeCaptions': 'false', 'clipIndex': int(clip_idx), 'courseName': course_name, 'locale': 'en', 'moduleName': name, 'mediaType': ext, 'quality': '%dx%d' % (f['width'], f['height']), } format_id = '%s-%s' % (ext, quality) try: viewclip = self._download_json( self._GRAPHQL_EP, display_id, 'Downloading %s viewclip graphql' % format_id, data=json.dumps({ 'query': self.GRAPHQL_VIEWCLIP_TMPL % clip_post, 'variables': {} }).encode('utf-8'), headers=self._GRAPHQL_HEADERS)['data']['viewClip'] except ExtractorError: # Still works but most likely will go soon viewclip = self._download_json( '%s/video/clips/viewclip' % self._API_BASE, display_id, 'Downloading %s viewclip JSON' % format_id, fatal=False, data=json.dumps(clip_post).encode('utf-8'), headers={'Content-Type': 'application/json;charset=utf-8'}) # Pluralsight tracks multiple sequential calls to ViewClip API and start # to return 429 HTTP errors after some time (see # https://github.com/ytdl-org/youtube-dl/pull/6989). Moreover it may even lead # to account ban (see https://github.com/ytdl-org/youtube-dl/issues/6842). # To somewhat reduce the probability of these consequences # we will sleep random amount of time before each call to ViewClip. self._sleep( random.randint(5, 10), display_id, '%(video_id)s: Waiting for %(timeout)s seconds to avoid throttling') if not viewclip: continue clip_urls = viewclip.get('urls') if not isinstance(clip_urls, list): continue for clip_url_data in clip_urls: clip_url = clip_url_data.get('url') if not clip_url: continue cdn = clip_url_data.get('cdn') clip_f = f.copy() clip_f.update({ 'url': clip_url, 'ext': ext, 'format_id': '%s-%s' % (format_id, cdn) if cdn else format_id, 'quality': quality_key(quality), 'source_preference': int_or_none(clip_url_data.get('rank')), }) formats.append(clip_f) self._sort_formats(formats) duration = int_or_none( clip.get('duration')) or parse_duration(clip.get('formattedDuration')) # TODO: other languages? subtitles = self.extract_subtitles( author, clip_idx, clip.get('clipId'), 'en', name, duration, display_id) return { 'id': clip_id, 'title': title, 'duration': duration, 'creator': author, 'formats': formats, 'subtitles': subtitles, } class PluralsightCourseIE(PluralsightBaseIE): IE_NAME = 'pluralsight:course' _VALID_URL = r'https?://(?:(?:www|app)\.)?pluralsight\.com/(?:library/)?courses/(?P<id>[^/]+)' _TESTS = [{ # Free course from Pluralsight Starter Subscription for Microsoft TechNet # https://offers.pluralsight.com/technet?loc=zTS3z&prod=zOTprodz&tech=zOttechz&prog=zOTprogz&type=zSOz&media=zOTmediaz&country=zUSz 'url': 'http://www.pluralsight.com/courses/hosting-sql-server-windows-azure-iaas', 'info_dict': { 'id': 'hosting-sql-server-windows-azure-iaas', 'title': 'Hosting SQL Server in Microsoft Azure IaaS Fundamentals', 'description': 'md5:61b37e60f21c4b2f91dc621a977d0986', }, 'playlist_count': 31, }, { # available without pluralsight account 'url': 'https://www.pluralsight.com/courses/angularjs-get-started', 'only_matching': True, }, { 'url': 'https://app.pluralsight.com/library/courses/understanding-microsoft-azure-amazon-aws/table-of-contents', 'only_matching': True, }] def _real_extract(self, url): course_id = self._match_id(url) # TODO: PSM cookie course = self._download_course(course_id, url, course_id) title = course['title'] course_name = course['name'] course_data = course['modules'] description = course.get('description') or course.get('shortDescription') entries = [] for num, module in enumerate(course_data, 1): author = module.get('author') module_name = module.get('name') if not author or not module_name: continue for clip in module.get('clips', []): clip_index = int_or_none(clip.get('index')) if clip_index is None: continue clip_url = update_url_query( '%s/player' % self._API_BASE, query={ 'mode': 'live', 'course': course_name, 'author': author, 'name': module_name, 'clip': clip_index, }) entries.append({ '_type': 'url_transparent', 'url': clip_url, 'ie_key': PluralsightIE.ie_key(), 'chapter': module.get('title'), 'chapter_number': num, 'chapter_id': module.get('moduleRef'), }) return self.playlist_result(entries, course_id, title, description)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cbs.py
youtube_dl/extractor/cbs.py
from __future__ import unicode_literals from .theplatform import ThePlatformFeedIE from ..utils import ( ExtractorError, int_or_none, find_xpath_attr, xpath_element, xpath_text, update_url_query, ) class CBSBaseIE(ThePlatformFeedIE): def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'): subtitles = {} for k, ext in [('sMPTE-TTCCURL', 'tt'), ('ClosedCaptionURL', 'ttml'), ('webVTTCaptionURL', 'vtt')]: cc_e = find_xpath_attr(smil, self._xpath_ns('.//param', namespace), 'name', k) if cc_e is not None: cc_url = cc_e.get('value') if cc_url: subtitles.setdefault(subtitles_lang, []).append({ 'ext': ext, 'url': cc_url, }) return subtitles class CBSIE(CBSBaseIE): _VALID_URL = r'(?:cbs:|https?://(?:www\.)?(?:(?:cbs|paramountplus)\.com/shows/[^/]+/video|colbertlateshow\.com/(?:video|podcasts))/)(?P<id>[\w-]+)' _TESTS = [{ 'url': 'http://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/', 'info_dict': { 'id': '_u7W953k6la293J7EPTd9oHkSPs6Xn6_', 'ext': 'mp4', 'title': 'Connect Chat feat. Garth Brooks', 'description': 'Connect with country music singer Garth Brooks, as he chats with fans on Wednesday November 27, 2013. Be sure to tune in to Garth Brooks: Live from Las Vegas, Friday November 29, at 9/8c on CBS!', 'duration': 1495, 'timestamp': 1385585425, 'upload_date': '20131127', 'uploader': 'CBSI-NEW', }, 'params': { # m3u8 download 'skip_download': True, }, '_skip': 'Blocked outside the US', }, { 'url': 'http://colbertlateshow.com/video/8GmB0oY0McANFvp2aEffk9jZZZ2YyXxy/the-colbeard/', 'only_matching': True, }, { 'url': 'http://www.colbertlateshow.com/podcasts/dYSwjqPs_X1tvbV_P2FcPWRa_qT6akTC/in-the-bad-room-with-stephen/', 'only_matching': True, }, { 'url': 'https://www.paramountplus.com/shows/all-rise/video/QmR1WhNkh1a_IrdHZrbcRklm176X_rVc/all-rise-space/', 'only_matching': True, }] def _extract_video_info(self, content_id, site='cbs', mpx_acc=2198311517): items_data = self._download_xml( 'http://can.cbs.com/thunder/player/videoPlayerService.php', content_id, query={'partner': site, 'contentId': content_id}) video_data = xpath_element(items_data, './/item') title = xpath_text(video_data, 'videoTitle', 'title', True) tp_path = 'dJ5BDC/media/guid/%d/%s' % (mpx_acc, content_id) tp_release_url = 'http://link.theplatform.com/s/' + tp_path asset_types = [] subtitles = {} formats = [] last_e = None for item in items_data.findall('.//item'): asset_type = xpath_text(item, 'assetType') if not asset_type or asset_type in asset_types or 'HLS_FPS' in asset_type or 'DASH_CENC' in asset_type: continue asset_types.append(asset_type) query = { 'mbr': 'true', 'assetTypes': asset_type, } if asset_type.startswith('HLS') or asset_type in ('OnceURL', 'StreamPack'): query['formats'] = 'MPEG4,M3U' elif asset_type in ('RTMP', 'WIFI', '3G'): query['formats'] = 'MPEG4,FLV' try: tp_formats, tp_subtitles = self._extract_theplatform_smil( update_url_query(tp_release_url, query), content_id, 'Downloading %s SMIL data' % asset_type) except ExtractorError as e: last_e = e continue formats.extend(tp_formats) subtitles = self._merge_subtitles(subtitles, tp_subtitles) if last_e and not formats: raise last_e self._sort_formats(formats) info = self._extract_theplatform_metadata(tp_path, content_id) info.update({ 'id': content_id, 'title': title, 'series': xpath_text(video_data, 'seriesTitle'), 'season_number': int_or_none(xpath_text(video_data, 'seasonNumber')), 'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')), 'duration': int_or_none(xpath_text(video_data, 'videoLength'), 1000), 'thumbnail': xpath_text(video_data, 'previewImageURL'), 'formats': formats, 'subtitles': subtitles, }) return info def _real_extract(self, url): content_id = self._match_id(url) return self._extract_video_info(content_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/postprocessor/common.py
youtube_dl/postprocessor/common.py
from __future__ import unicode_literals import os from ..utils import ( PostProcessingError, cli_configuration_args, encodeFilename, ) class PostProcessor(object): """Post Processor class. PostProcessor objects can be added to downloaders with their add_post_processor() method. When the downloader has finished a successful download, it will take its internal chain of PostProcessors and start calling the run() method on each one of them, first with an initial argument and then with the returned value of the previous PostProcessor. The chain will be stopped if one of them ever returns None or the end of the chain is reached. PostProcessor objects follow a "mutual registration" process similar to InfoExtractor objects. Optionally PostProcessor can use a list of additional command-line arguments with self._configuration_args. """ _downloader = None def __init__(self, downloader=None): self._downloader = downloader def set_downloader(self, downloader): """Sets the downloader for this PP.""" self._downloader = downloader def run(self, information): """Run the PostProcessor. The "information" argument is a dictionary like the ones composed by InfoExtractors. The only difference is that this one has an extra field called "filepath" that points to the downloaded file. This method returns a tuple, the first element is a list of the files that can be deleted, and the second of which is the updated information. In addition, this method may raise a PostProcessingError exception if post processing fails. """ return [], information # by default, keep file and do nothing def try_utime(self, path, atime, mtime, errnote='Cannot update utime of file'): try: os.utime(encodeFilename(path), (atime, mtime)) except Exception: self._downloader.report_warning(errnote) def _configuration_args(self, default=[]): return cli_configuration_args(self._downloader.params, 'postprocessor_args', default) class AudioConversionError(PostProcessingError): pass
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/postprocessor/ffmpeg.py
youtube_dl/postprocessor/ffmpeg.py
from __future__ import unicode_literals import os import subprocess import time import re from .common import AudioConversionError, PostProcessor from ..compat import compat_open as open from ..utils import ( encodeArgument, encodeFilename, get_exe_version, is_outdated_version, PostProcessingError, prepend_extension, process_communicate_or_kill, shell_quote, subtitles_filename, dfxp2srt, ISO639Utils, replace_extension, ) EXT_TO_OUT_FORMATS = { 'aac': 'adts', 'flac': 'flac', 'm4a': 'ipod', 'mka': 'matroska', 'mkv': 'matroska', 'mpg': 'mpeg', 'ogv': 'ogg', 'ts': 'mpegts', 'wma': 'asf', 'wmv': 'asf', } ACODECS = { 'mp3': 'libmp3lame', 'aac': 'aac', 'flac': 'flac', 'm4a': 'aac', 'opus': 'libopus', 'vorbis': 'libvorbis', 'wav': None, } class FFmpegPostProcessorError(PostProcessingError): pass class FFmpegPostProcessor(PostProcessor): def __init__(self, downloader=None): PostProcessor.__init__(self, downloader) self._determine_executables() def check_version(self): if not self.available: raise FFmpegPostProcessorError('ffmpeg or avconv not found. Please install one.') required_version = '10-0' if self.basename == 'avconv' else '1.0' if is_outdated_version( self._versions[self.basename], required_version): warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % ( self.basename, self.basename, required_version) if self._downloader: self._downloader.report_warning(warning) @staticmethod def get_versions(downloader=None): return FFmpegPostProcessor(downloader)._versions def _determine_executables(self): # ordered to match prefer_ffmpeg! convs = ['ffmpeg', 'avconv'] probes = ['ffprobe', 'avprobe'] prefer_ffmpeg = True programs = convs + probes def get_ffmpeg_version(path): ver = get_exe_version(path, args=['-version']) if ver: regexs = [ r'(?:\d+:)?([0-9.]+)-[0-9]+ubuntu[0-9.]+$', # Ubuntu, see [1] r'n([0-9.]+)$', # Arch Linux # 1. http://www.ducea.com/2006/06/17/ubuntu-package-version-naming-explanation/ ] for regex in regexs: mobj = re.match(regex, ver) if mobj: ver = mobj.group(1) return ver self.basename = None self.probe_basename = None self._paths = None self._versions = None location = None if self._downloader: prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', True) location = self._downloader.params.get('ffmpeg_location') if location is not None: if not os.path.exists(location): self._downloader.report_warning( 'ffmpeg-location %s does not exist! ' 'Continuing without avconv/ffmpeg.' % (location)) self._versions = {} return elif not os.path.isdir(location): basename = os.path.splitext(os.path.basename(location))[0] if basename not in programs: self._downloader.report_warning( 'Cannot identify executable %s, its basename should be one of %s. ' 'Continuing without avconv/ffmpeg.' % (location, ', '.join(programs))) self._versions = {} return None location = os.path.dirname(os.path.abspath(location)) if basename in ('ffmpeg', 'ffprobe'): prefer_ffmpeg = True self._paths = dict( (p, p if location is None else os.path.join(location, p)) for p in programs) self._versions = dict( x for x in ( (p, get_ffmpeg_version(self._paths[p])) for p in programs) if x[1] is not None) basenames = [None, None] for i, progs in enumerate((convs, probes)): for p in progs[::-1 if prefer_ffmpeg is False else 1]: if self._versions.get(p): basenames[i] = p break self.basename, self.probe_basename = basenames @property def available(self): return self.basename is not None @property def executable(self): return self._paths[self.basename] @property def probe_available(self): return self.probe_basename is not None @property def probe_executable(self): return self._paths[self.probe_basename] def get_audio_codec(self, path): if not self.probe_available and not self.available: raise PostProcessingError('ffprobe/avprobe and ffmpeg/avconv not found. Please install one.') try: if self.probe_available: cmd = [ encodeFilename(self.probe_executable, True), encodeArgument('-show_streams')] else: cmd = [ encodeFilename(self.executable, True), encodeArgument('-i')] cmd.append(encodeFilename(self._ffmpeg_filename_argument(path), True)) if self._downloader.params.get('verbose', False): self._downloader.to_screen( '[debug] %s command line: %s' % (self.basename, shell_quote(cmd))) handle = subprocess.Popen( cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, stdin=subprocess.PIPE) stdout_data, stderr_data = process_communicate_or_kill(handle) expected_ret = 0 if self.probe_available else 1 if handle.wait() != expected_ret: return None except (IOError, OSError): return None output = (stdout_data if self.probe_available else stderr_data).decode('ascii', 'ignore') if self.probe_available: audio_codec = None for line in output.split('\n'): if line.startswith('codec_name='): audio_codec = line.split('=')[1].strip() elif line.strip() == 'codec_type=audio' and audio_codec is not None: return audio_codec else: # Stream #FILE_INDEX:STREAM_INDEX[STREAM_ID](LANGUAGE): CODEC_TYPE: CODEC_NAME mobj = re.search( r'Stream\s*#\d+:\d+(?:\[0x[0-9a-f]+\])?(?:\([a-z]{3}\))?:\s*Audio:\s*([0-9a-z]+)', output) if mobj: return mobj.group(1) return None def run_ffmpeg_multiple_files(self, input_paths, out_path, opts): self.check_version() oldest_mtime = min( os.stat(encodeFilename(path)).st_mtime for path in input_paths) opts += self._configuration_args() files_cmd = [] for path in input_paths: files_cmd.extend([ encodeArgument('-i'), encodeFilename(self._ffmpeg_filename_argument(path), True) ]) cmd = [encodeFilename(self.executable, True), encodeArgument('-y')] # avconv does not have repeat option if self.basename == 'ffmpeg': cmd += [encodeArgument('-loglevel'), encodeArgument('repeat+info')] cmd += (files_cmd + [encodeArgument(o) for o in opts] + [encodeFilename(self._ffmpeg_filename_argument(out_path), True)]) if self._downloader.params.get('verbose', False): self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd)) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = process_communicate_or_kill(p) if p.returncode != 0: stderr = stderr.decode('utf-8', 'replace') msgs = stderr.strip().split('\n') msg = msgs[-1] if self._downloader.params.get('verbose', False): self._downloader.to_screen('[debug] ' + '\n'.join(msgs[:-1])) raise FFmpegPostProcessorError(msg) self.try_utime(out_path, oldest_mtime, oldest_mtime) def run_ffmpeg(self, path, out_path, opts): self.run_ffmpeg_multiple_files([path], out_path, opts) def _ffmpeg_filename_argument(self, fn): # Always use 'file:' because the filename may contain ':' (ffmpeg # interprets that as a protocol) or can start with '-' (-- is broken in # ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details) # Also leave '-' intact in order not to break streaming to stdout. return 'file:' + fn if fn != '-' else fn class FFmpegExtractAudioPP(FFmpegPostProcessor): def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False): FFmpegPostProcessor.__init__(self, downloader) if preferredcodec is None: preferredcodec = 'best' self._preferredcodec = preferredcodec self._preferredquality = preferredquality self._nopostoverwrites = nopostoverwrites def run_ffmpeg(self, path, out_path, codec, more_opts): if codec is None: acodec_opts = [] else: acodec_opts = ['-acodec', codec] opts = ['-vn'] + acodec_opts + more_opts try: FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts) except FFmpegPostProcessorError as err: raise AudioConversionError(err.msg) def run(self, information): path = information['filepath'] filecodec = self.get_audio_codec(path) if filecodec is None: raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe') more_opts = [] if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'): if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']: # Lossless, but in another container acodec = 'copy' extension = 'm4a' more_opts = ['-bsf:a', 'aac_adtstoasc'] elif filecodec in ['aac', 'flac', 'mp3', 'vorbis', 'opus']: # Lossless if possible acodec = 'copy' extension = filecodec if filecodec == 'aac': more_opts = ['-f', 'adts'] if filecodec == 'vorbis': extension = 'ogg' else: # MP3 otherwise. acodec = 'libmp3lame' extension = 'mp3' more_opts = [] if self._preferredquality is not None: if int(self._preferredquality) < 10: more_opts += ['-q:a', self._preferredquality] else: more_opts += ['-b:a', self._preferredquality + 'k'] else: # We convert the audio (lossy if codec is lossy) acodec = ACODECS[self._preferredcodec] extension = self._preferredcodec more_opts = [] if self._preferredquality is not None: # The opus codec doesn't support the -aq option if int(self._preferredquality) < 10 and extension != 'opus': more_opts += ['-q:a', self._preferredquality] else: more_opts += ['-b:a', self._preferredquality + 'k'] if self._preferredcodec == 'aac': more_opts += ['-f', 'adts'] if self._preferredcodec == 'm4a': more_opts += ['-bsf:a', 'aac_adtstoasc'] if self._preferredcodec == 'vorbis': extension = 'ogg' if self._preferredcodec == 'wav': extension = 'wav' more_opts += ['-f', 'wav'] prefix, sep, ext = path.rpartition('.') # not os.path.splitext, since the latter does not work on unicode in all setups new_path = prefix + sep + extension information['filepath'] = new_path information['ext'] = extension # If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly. if (new_path == path or (self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))): self._downloader.to_screen('[ffmpeg] Post-process file %s exists, skipping' % new_path) return [], information try: self._downloader.to_screen('[ffmpeg] Destination: ' + new_path) self.run_ffmpeg(path, new_path, acodec, more_opts) except AudioConversionError as e: raise PostProcessingError( 'audio conversion failed: ' + e.msg) except Exception: raise PostProcessingError('error running ' + self.basename) # Try to update the date time for extracted audio file. if information.get('filetime') is not None: self.try_utime( new_path, time.time(), information['filetime'], errnote='Cannot update utime of audio file') return [path], information class FFmpegVideoConvertorPP(FFmpegPostProcessor): def __init__(self, downloader=None, preferedformat=None): super(FFmpegVideoConvertorPP, self).__init__(downloader) self._preferedformat = preferedformat def run(self, information): path = information['filepath'] if information['ext'] == self._preferedformat: self._downloader.to_screen('[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat)) return [], information options = [] if self._preferedformat == 'avi': options.extend(['-c:v', 'libxvid', '-vtag', 'XVID']) prefix, sep, ext = path.rpartition('.') outpath = prefix + sep + self._preferedformat self._downloader.to_screen('[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath) self.run_ffmpeg(path, outpath, options) information['filepath'] = outpath information['format'] = self._preferedformat information['ext'] = self._preferedformat return [path], information class FFmpegEmbedSubtitlePP(FFmpegPostProcessor): def run(self, information): if information['ext'] not in ('mp4', 'webm', 'mkv'): self._downloader.to_screen('[ffmpeg] Subtitles can only be embedded in mp4, webm or mkv files') return [], information subtitles = information.get('requested_subtitles') if not subtitles: self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to embed') return [], information filename = information['filepath'] ext = information['ext'] sub_langs = [] sub_filenames = [] webm_vtt_warn = False for lang, sub_info in subtitles.items(): sub_ext = sub_info['ext'] if ext != 'webm' or ext == 'webm' and sub_ext == 'vtt': sub_langs.append(lang) sub_filenames.append(subtitles_filename(filename, lang, sub_ext, ext)) else: if not webm_vtt_warn and ext == 'webm' and sub_ext != 'vtt': webm_vtt_warn = True self._downloader.to_screen('[ffmpeg] Only WebVTT subtitles can be embedded in webm files') if not sub_langs: return [], information input_files = [filename] + sub_filenames opts = [ '-map', '0', '-c', 'copy', # Don't copy the existing subtitles, we may be running the # postprocessor a second time '-map', '-0:s', # Don't copy Apple TV chapters track, bin_data (see #19042, #19024, # https://trac.ffmpeg.org/ticket/6016) '-map', '-0:d', ] if information['ext'] == 'mp4': opts += ['-c:s', 'mov_text'] for (i, lang) in enumerate(sub_langs): opts.extend(['-map', '%d:0' % (i + 1)]) lang_code = ISO639Utils.short2long(lang) or lang opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code]) temp_filename = prepend_extension(filename, 'temp') self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename) self.run_ffmpeg_multiple_files(input_files, temp_filename, opts) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return sub_filenames, information class FFmpegMetadataPP(FFmpegPostProcessor): def run(self, info): metadata = {} def add(meta_list, info_list=None): if not info_list: info_list = meta_list if not isinstance(meta_list, (list, tuple)): meta_list = (meta_list,) if not isinstance(info_list, (list, tuple)): info_list = (info_list,) for info_f in info_list: if info.get(info_f) is not None: for meta_f in meta_list: metadata[meta_f] = info[info_f] break # See [1-4] for some info on media metadata/metadata supported # by ffmpeg. # 1. https://kdenlive.org/en/project/adding-meta-data-to-mp4-video/ # 2. https://wiki.multimedia.cx/index.php/FFmpeg_Metadata # 3. https://kodi.wiki/view/Video_file_tagging # 4. http://atomicparsley.sourceforge.net/mpeg-4files.html add('title', ('track', 'title')) add('date', 'upload_date') add(('description', 'comment'), 'description') add('purl', 'webpage_url') add('track', 'track_number') add('artist', ('artist', 'creator', 'uploader', 'uploader_id')) add('genre') add('album') add('album_artist') add('disc', 'disc_number') add('show', 'series') add('season_number') add('episode_id', ('episode', 'episode_id')) add('episode_sort', 'episode_number') if not metadata: self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add') return [], info filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') in_filenames = [filename] options = [] if info['ext'] == 'm4a': options.extend(['-vn', '-acodec', 'copy']) else: options.extend(['-c', 'copy']) for (name, value) in metadata.items(): options.extend(['-metadata', '%s=%s' % (name, value)]) chapters = info.get('chapters', []) if chapters: metadata_filename = replace_extension(filename, 'meta') with open(metadata_filename, 'w', encoding='utf-8') as f: def ffmpeg_escape(text): return re.sub(r'(=|;|#|\\|\n)', r'\\\1', text) metadata_file_content = ';FFMETADATA1\n' for chapter in chapters: metadata_file_content += '[CHAPTER]\nTIMEBASE=1/1000\n' metadata_file_content += 'START=%d\n' % (chapter['start_time'] * 1000) metadata_file_content += 'END=%d\n' % (chapter['end_time'] * 1000) chapter_title = chapter.get('title') if chapter_title: metadata_file_content += 'title=%s\n' % ffmpeg_escape(chapter_title) f.write(metadata_file_content) in_filenames.append(metadata_filename) options.extend(['-map_metadata', '1']) self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename) self.run_ffmpeg_multiple_files(in_filenames, temp_filename, options) if chapters: os.remove(metadata_filename) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return [], info class FFmpegMergerPP(FFmpegPostProcessor): def run(self, info): filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0'] self._downloader.to_screen('[ffmpeg] Merging formats into "%s"' % filename) self.run_ffmpeg_multiple_files(info['__files_to_merge'], temp_filename, args) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return info['__files_to_merge'], info def can_merge(self): # TODO: figure out merge-capable ffmpeg version if self.basename != 'avconv': return True required_version = '10-0' if is_outdated_version( self._versions[self.basename], required_version): warning = ('Your copy of %s is outdated and unable to properly mux separate video and audio files, ' 'youtube-dl will download single file media. ' 'Update %s to version %s or newer to fix this.') % ( self.basename, self.basename, required_version) if self._downloader: self._downloader.report_warning(warning) return False return True class FFmpegFixupStretchedPP(FFmpegPostProcessor): def run(self, info): stretched_ratio = info.get('stretched_ratio') if stretched_ratio is None or stretched_ratio == 1: return [], info filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') options = ['-c', 'copy', '-aspect', '%f' % stretched_ratio] self._downloader.to_screen('[ffmpeg] Fixing aspect ratio in "%s"' % filename) self.run_ffmpeg(filename, temp_filename, options) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return [], info class FFmpegFixupM4aPP(FFmpegPostProcessor): def run(self, info): if info.get('container') != 'm4a_dash': return [], info filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') options = ['-c', 'copy', '-f', 'mp4'] self._downloader.to_screen('[ffmpeg] Correcting container in "%s"' % filename) self.run_ffmpeg(filename, temp_filename, options) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return [], info class FFmpegFixupM3u8PP(FFmpegPostProcessor): def run(self, info): filename = info['filepath'] if self.get_audio_codec(filename) == 'aac': temp_filename = prepend_extension(filename, 'temp') options = ['-c', 'copy', '-f', 'mp4', '-bsf:a', 'aac_adtstoasc'] self._downloader.to_screen('[ffmpeg] Fixing malformed AAC bitstream in "%s"' % filename) self.run_ffmpeg(filename, temp_filename, options) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) return [], info class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor): def __init__(self, downloader=None, format=None): super(FFmpegSubtitlesConvertorPP, self).__init__(downloader) self.format = format def run(self, info): subs = info.get('requested_subtitles') filename = info['filepath'] new_ext = self.format new_format = new_ext if new_format == 'vtt': new_format = 'webvtt' if subs is None: self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to convert') return [], info self._downloader.to_screen('[ffmpeg] Converting subtitles') sub_filenames = [] for lang, sub in subs.items(): ext = sub['ext'] if ext == new_ext: self._downloader.to_screen( '[ffmpeg] Subtitle file for %s is already in the requested format' % new_ext) continue old_file = subtitles_filename(filename, lang, ext, info.get('ext')) sub_filenames.append(old_file) new_file = subtitles_filename(filename, lang, new_ext, info.get('ext')) if ext in ('dfxp', 'ttml', 'tt'): self._downloader.report_warning( 'You have requested to convert dfxp (TTML) subtitles into another format, ' 'which results in style information loss') dfxp_file = old_file srt_file = subtitles_filename(filename, lang, 'srt', info.get('ext')) with open(dfxp_file, 'rb') as f: srt_data = dfxp2srt(f.read()) with open(srt_file, 'w', encoding='utf-8') as f: f.write(srt_data) old_file = srt_file subs[lang] = { 'ext': 'srt', 'data': srt_data } if new_ext == 'srt': continue else: sub_filenames.append(srt_file) self.run_ffmpeg(old_file, new_file, ['-f', new_format]) with open(new_file, 'r', encoding='utf-8') as f: subs[lang] = { 'ext': new_ext, 'data': f.read(), } return sub_filenames, info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/postprocessor/xattrpp.py
youtube_dl/postprocessor/xattrpp.py
from __future__ import unicode_literals from .common import PostProcessor from ..compat import compat_os_name from ..utils import ( hyphenate_date, write_xattr, XAttrMetadataError, XAttrUnavailableError, ) class XAttrMetadataPP(PostProcessor): # # More info about extended attributes for media: # http://freedesktop.org/wiki/CommonExtendedAttributes/ # http://www.freedesktop.org/wiki/PhreedomDraft/ # http://dublincore.org/documents/usageguide/elements.shtml # # TODO: # * capture youtube keywords and put them in 'user.dublincore.subject' (comma-separated) # * figure out which xattrs can be used for 'duration', 'thumbnail', 'resolution' # def run(self, info): """ Set extended attributes on downloaded file (if xattr support is found). """ # Write the metadata to the file's xattrs self._downloader.to_screen('[metadata] Writing metadata to file\'s xattrs') filename = info['filepath'] try: xattr_mapping = { 'user.xdg.referrer.url': 'webpage_url', # 'user.xdg.comment': 'description', 'user.dublincore.title': 'title', 'user.dublincore.date': 'upload_date', 'user.dublincore.description': 'description', 'user.dublincore.contributor': 'uploader', 'user.dublincore.format': 'format', } num_written = 0 for xattrname, infoname in xattr_mapping.items(): value = info.get(infoname) if value: if infoname == 'upload_date': value = hyphenate_date(value) byte_value = value.encode('utf-8') write_xattr(filename, xattrname, byte_value) num_written += 1 return [], info except XAttrUnavailableError as e: self._downloader.report_error(str(e)) return [], info except XAttrMetadataError as e: if e.reason == 'NO_SPACE': self._downloader.report_warning( 'There\'s no disk space left, disk quota exceeded or filesystem xattr limit exceeded. ' + (('Some ' if num_written else '') + 'extended attributes are not written.').capitalize()) elif e.reason == 'VALUE_TOO_LONG': self._downloader.report_warning( 'Unable to write extended attributes due to too long values.') else: msg = 'This filesystem doesn\'t support extended attributes. ' if compat_os_name == 'nt': msg += 'You need to use NTFS.' else: msg += '(You may have to enable them in your /etc/fstab)' self._downloader.report_error(msg) return [], info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/postprocessor/metadatafromtitle.py
youtube_dl/postprocessor/metadatafromtitle.py
from __future__ import unicode_literals import re from .common import PostProcessor class MetadataFromTitlePP(PostProcessor): def __init__(self, downloader, titleformat): super(MetadataFromTitlePP, self).__init__(downloader) self._titleformat = titleformat self._titleregex = (self.format_to_regex(titleformat) if re.search(r'%\(\w+\)s', titleformat) else titleformat) def format_to_regex(self, fmt): r""" Converts a string like '%(title)s - %(artist)s' to a regex like '(?P<title>.+)\ \-\ (?P<artist>.+)' """ lastpos = 0 regex = '' # replace %(..)s with regex group and escape other string parts for match in re.finditer(r'%\((\w+)\)s', fmt): regex += re.escape(fmt[lastpos:match.start()]) regex += r'(?P<' + match.group(1) + '>.+)' lastpos = match.end() if lastpos < len(fmt): regex += re.escape(fmt[lastpos:]) return regex def run(self, info): title = info['title'] match = re.match(self._titleregex, title) if match is None: self._downloader.to_screen( '[fromtitle] Could not interpret title of video as "%s"' % self._titleformat) return [], info for attribute, value in match.groupdict().items(): if value is None: continue info[attribute] = value self._downloader.to_screen( '[fromtitle] parsed %s: %s' % (attribute, value if value is not None else 'NA')) return [], info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/postprocessor/__init__.py
youtube_dl/postprocessor/__init__.py
from __future__ import unicode_literals from .embedthumbnail import EmbedThumbnailPP from .ffmpeg import ( FFmpegPostProcessor, FFmpegEmbedSubtitlePP, FFmpegExtractAudioPP, FFmpegFixupStretchedPP, FFmpegFixupM3u8PP, FFmpegFixupM4aPP, FFmpegMergerPP, FFmpegMetadataPP, FFmpegVideoConvertorPP, FFmpegSubtitlesConvertorPP, ) from .xattrpp import XAttrMetadataPP from .execafterdownload import ExecAfterDownloadPP from .metadatafromtitle import MetadataFromTitlePP def get_postprocessor(key): return globals()[key + 'PP'] __all__ = [ 'EmbedThumbnailPP', 'ExecAfterDownloadPP', 'FFmpegEmbedSubtitlePP', 'FFmpegExtractAudioPP', 'FFmpegFixupM3u8PP', 'FFmpegFixupM4aPP', 'FFmpegFixupStretchedPP', 'FFmpegMergerPP', 'FFmpegMetadataPP', 'FFmpegPostProcessor', 'FFmpegSubtitlesConvertorPP', 'FFmpegVideoConvertorPP', 'MetadataFromTitlePP', 'XAttrMetadataPP', ]
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/postprocessor/embedthumbnail.py
youtube_dl/postprocessor/embedthumbnail.py
# coding: utf-8 from __future__ import unicode_literals import os import subprocess from .ffmpeg import FFmpegPostProcessor from ..utils import ( check_executable, encodeArgument, encodeFilename, PostProcessingError, prepend_extension, process_communicate_or_kill, replace_extension, shell_quote, ) from ..compat import compat_open as open class EmbedThumbnailPPError(PostProcessingError): pass class EmbedThumbnailPP(FFmpegPostProcessor): def __init__(self, downloader=None, already_have_thumbnail=False): super(EmbedThumbnailPP, self).__init__(downloader) self._already_have_thumbnail = already_have_thumbnail def run(self, info): filename = info['filepath'] temp_filename = prepend_extension(filename, 'temp') if not info.get('thumbnails'): self._downloader.to_screen('[embedthumbnail] There aren\'t any thumbnails to embed') return [], info thumbnail_filename = info['thumbnails'][-1]['filename'] if not os.path.exists(encodeFilename(thumbnail_filename)): self._downloader.report_warning( 'Skipping embedding the thumbnail because the file is missing.') return [], info def is_webp(path): with open(encodeFilename(path), 'rb') as f: b = f.read(12) return b[0:4] == b'RIFF' and b[8:] == b'WEBP' # Correct extension for WebP file with wrong extension (see #25687, #25717) _, thumbnail_ext = os.path.splitext(thumbnail_filename) if thumbnail_ext: thumbnail_ext = thumbnail_ext[1:].lower() if thumbnail_ext != 'webp' and is_webp(thumbnail_filename): self._downloader.to_screen( '[ffmpeg] Correcting extension to webp and escaping path for thumbnail "%s"' % thumbnail_filename) thumbnail_webp_filename = replace_extension(thumbnail_filename, 'webp') os.rename(encodeFilename(thumbnail_filename), encodeFilename(thumbnail_webp_filename)) thumbnail_filename = thumbnail_webp_filename thumbnail_ext = 'webp' # Convert unsupported thumbnail formats to JPEG (see #25687, #25717) if thumbnail_ext not in ['jpg', 'png']: # NB: % is supposed to be escaped with %% but this does not work # for input files so working around with standard substitution escaped_thumbnail_filename = thumbnail_filename.replace('%', '#') os.rename(encodeFilename(thumbnail_filename), encodeFilename(escaped_thumbnail_filename)) escaped_thumbnail_jpg_filename = replace_extension(escaped_thumbnail_filename, 'jpg') self._downloader.to_screen('[ffmpeg] Converting thumbnail "%s" to JPEG' % escaped_thumbnail_filename) self.run_ffmpeg(escaped_thumbnail_filename, escaped_thumbnail_jpg_filename, ['-bsf:v', 'mjpeg2jpeg']) os.remove(encodeFilename(escaped_thumbnail_filename)) thumbnail_jpg_filename = replace_extension(thumbnail_filename, 'jpg') # Rename back to unescaped for further processing os.rename(encodeFilename(escaped_thumbnail_jpg_filename), encodeFilename(thumbnail_jpg_filename)) thumbnail_filename = thumbnail_jpg_filename if info['ext'] == 'mp3': options = [ '-c', 'copy', '-map', '0', '-map', '1', '-metadata:s:v', 'title="Album cover"', '-metadata:s:v', 'comment="Cover (Front)"'] self._downloader.to_screen('[ffmpeg] Adding thumbnail to "%s"' % filename) self.run_ffmpeg_multiple_files([filename, thumbnail_filename], temp_filename, options) if not self._already_have_thumbnail: os.remove(encodeFilename(thumbnail_filename)) os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) elif info['ext'] in ['m4a', 'mp4']: atomicparsley = next((x for x in ['AtomicParsley', 'atomicparsley'] if check_executable(x, ['-v'])), None) if atomicparsley is None: raise EmbedThumbnailPPError('AtomicParsley was not found. Please install.') cmd = [encodeFilename(atomicparsley, True), encodeFilename(filename, True), encodeArgument('--artwork'), encodeFilename(thumbnail_filename, True), encodeArgument('-o'), encodeFilename(temp_filename, True)] self._downloader.to_screen('[atomicparsley] Adding thumbnail to "%s"' % filename) if self._downloader.params.get('verbose', False): self._downloader.to_screen('[debug] AtomicParsley command line: %s' % shell_quote(cmd)) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process_communicate_or_kill(p) if p.returncode != 0: msg = stderr.decode('utf-8', 'replace').strip() raise EmbedThumbnailPPError(msg) if not self._already_have_thumbnail: os.remove(encodeFilename(thumbnail_filename)) # for formats that don't support thumbnails (like 3gp) AtomicParsley # won't create to the temporary file if b'No changes' in stdout: self._downloader.report_warning('The file format doesn\'t support embedding a thumbnail') else: os.remove(encodeFilename(filename)) os.rename(encodeFilename(temp_filename), encodeFilename(filename)) else: raise EmbedThumbnailPPError('Only mp3 and m4a/mp4 are supported for thumbnail embedding for now.') return [], info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/postprocessor/execafterdownload.py
youtube_dl/postprocessor/execafterdownload.py
from __future__ import unicode_literals import subprocess from .common import PostProcessor from ..compat import compat_shlex_quote from ..utils import ( encodeArgument, PostProcessingError, ) class ExecAfterDownloadPP(PostProcessor): def __init__(self, downloader, exec_cmd): super(ExecAfterDownloadPP, self).__init__(downloader) self.exec_cmd = exec_cmd def run(self, information): cmd = self.exec_cmd if '{}' not in cmd: cmd += ' {}' cmd = cmd.replace('{}', compat_shlex_quote(information['filepath'])) self._downloader.to_screen('[exec] Executing command: %s' % cmd) retCode = subprocess.call(encodeArgument(cmd), shell=True) if retCode != 0: raise PostProcessingError( 'Command returned error code %d' % retCode) return [], information
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/downloader/niconico.py
youtube_dl/downloader/niconico.py
# coding: utf-8 from __future__ import unicode_literals try: import threading except ImportError: threading = None from .common import FileDownloader from ..downloader import get_suitable_downloader from ..extractor.niconico import NiconicoIE from ..utils import sanitized_Request class NiconicoDmcFD(FileDownloader): """ Downloading niconico douga from DMC with heartbeat """ FD_NAME = 'niconico_dmc' def real_download(self, filename, info_dict): self.to_screen('[%s] Downloading from DMC' % self.FD_NAME) ie = NiconicoIE(self.ydl) info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict) fd = get_suitable_downloader(info_dict, params=self.params)(self.ydl, self.params) for ph in self._progress_hooks: fd.add_progress_hook(ph) if not threading: self.to_screen('[%s] Threading for Heartbeat not available' % self.FD_NAME) return fd.real_download(filename, info_dict) success = download_complete = False timer = [None] heartbeat_lock = threading.Lock() heartbeat_url = heartbeat_info_dict['url'] heartbeat_data = heartbeat_info_dict['data'].encode() heartbeat_interval = heartbeat_info_dict.get('interval', 30) request = sanitized_Request(heartbeat_url, heartbeat_data) def heartbeat(): try: self.ydl.urlopen(request).read() except Exception: self.to_screen('[%s] Heartbeat failed' % self.FD_NAME) with heartbeat_lock: if not download_complete: timer[0] = threading.Timer(heartbeat_interval, heartbeat) timer[0].start() heartbeat_info_dict['ping']() self.to_screen('[%s] Heartbeat with %d second interval ...' % (self.FD_NAME, heartbeat_interval)) try: heartbeat() if type(fd).__name__ == 'HlsFD': info_dict.update(ie._extract_m3u8_formats(info_dict['url'], info_dict['id'])[0]) success = fd.real_download(filename, info_dict) finally: if heartbeat_lock: with heartbeat_lock: timer[0].cancel() download_complete = True return success
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/downloader/fragment.py
youtube_dl/downloader/fragment.py
from __future__ import division, unicode_literals import os import time import json from .common import FileDownloader from .http import HttpFD from ..utils import ( error_to_compat_str, encodeFilename, sanitize_open, sanitized_Request, ) class HttpQuietDownloader(HttpFD): def to_screen(self, *args, **kargs): pass class FragmentFD(FileDownloader): """ A base file downloader class for fragmented media (e.g. f4m/m3u8 manifests). Available options: fragment_retries: Number of times to retry a fragment for HTTP error (DASH and hlsnative only) skip_unavailable_fragments: Skip unavailable fragments (DASH and hlsnative only) keep_fragments: Keep downloaded fragments on disk after downloading is finished For each incomplete fragment download youtube-dl keeps on disk a special bookkeeping file with download state and metadata (in future such files will be used for any incomplete download handled by youtube-dl). This file is used to properly handle resuming, check download file consistency and detect potential errors. The file has a .ytdl extension and represents a standard JSON file of the following format: extractor: Dictionary of extractor related data. TBD. downloader: Dictionary of downloader related data. May contain following data: current_fragment: Dictionary with current (being downloaded) fragment data: index: 0-based index of current fragment among all fragments fragment_count: Total count of fragments This feature is experimental and file format may change in future. """ def report_retry_fragment(self, err, frag_index, count, retries): self.to_screen( '[download] Got server HTTP error: %s. Retrying fragment %d (attempt %d of %s)...' % (error_to_compat_str(err), frag_index, count, self.format_retries(retries))) def report_skip_fragment(self, frag_index): self.to_screen('[download] Skipping fragment %d...' % frag_index) def _prepare_url(self, info_dict, url): headers = info_dict.get('http_headers') return sanitized_Request(url, None, headers) if headers else url def _prepare_and_start_frag_download(self, ctx): self._prepare_frag_download(ctx) self._start_frag_download(ctx) @staticmethod def __do_ytdl_file(ctx): return ctx['live'] is not True and ctx['tmpfilename'] != '-' def _read_ytdl_file(self, ctx): assert 'ytdl_corrupt' not in ctx stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'r') try: ctx['fragment_index'] = json.loads(stream.read())['downloader']['current_fragment']['index'] except Exception: ctx['ytdl_corrupt'] = True finally: stream.close() def _write_ytdl_file(self, ctx): frag_index_stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'w') downloader = { 'current_fragment': { 'index': ctx['fragment_index'], }, } if ctx.get('fragment_count') is not None: downloader['fragment_count'] = ctx['fragment_count'] frag_index_stream.write(json.dumps({'downloader': downloader})) frag_index_stream.close() def _download_fragment(self, ctx, frag_url, info_dict, headers=None): fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], ctx['fragment_index']) fragment_info_dict = { 'url': frag_url, 'http_headers': headers or info_dict.get('http_headers'), } frag_resume_len = 0 if ctx['dl'].params.get('continuedl', True): frag_resume_len = self.filesize_or_none( self.temp_name(fragment_filename)) fragment_info_dict['frag_resume_len'] = frag_resume_len ctx['frag_resume_len'] = frag_resume_len or 0 success = ctx['dl'].download(fragment_filename, fragment_info_dict) if not success: return False, None if fragment_info_dict.get('filetime'): ctx['fragment_filetime'] = fragment_info_dict.get('filetime') down, frag_sanitized = sanitize_open(fragment_filename, 'rb') ctx['fragment_filename_sanitized'] = frag_sanitized frag_content = down.read() down.close() return True, frag_content def _append_fragment(self, ctx, frag_content): try: ctx['dest_stream'].write(frag_content) ctx['dest_stream'].flush() finally: if self.__do_ytdl_file(ctx): self._write_ytdl_file(ctx) if not self.params.get('keep_fragments', False): os.remove(encodeFilename(ctx['fragment_filename_sanitized'])) del ctx['fragment_filename_sanitized'] def _prepare_frag_download(self, ctx): if not ctx.setdefault('live', False): total_frags_str = '%d' % ctx['total_frags'] ad_frags = ctx.get('ad_frags', 0) if ad_frags: total_frags_str += ' (not including %d ad)' % ad_frags else: total_frags_str = 'unknown (live)' self.to_screen( '[%s] Total fragments: %s' % (self.FD_NAME, total_frags_str)) self.report_destination(ctx['filename']) continuedl = self.params.get('continuedl', True) dl = HttpQuietDownloader( self.ydl, { 'continuedl': continuedl, 'quiet': True, 'noprogress': True, 'ratelimit': self.params.get('ratelimit'), 'retries': self.params.get('retries', 0), 'nopart': self.params.get('nopart', False), 'test': self.params.get('test', False), } ) tmpfilename = self.temp_name(ctx['filename']) open_mode = 'wb' # Establish possible resume length resume_len = self.filesize_or_none(tmpfilename) or 0 if resume_len > 0: open_mode = 'ab' # Should be initialized before ytdl file check ctx.update({ 'tmpfilename': tmpfilename, 'fragment_index': 0, }) if self.__do_ytdl_file(ctx): ytdl_file_exists = os.path.isfile(encodeFilename(self.ytdl_filename(ctx['filename']))) if continuedl and ytdl_file_exists: self._read_ytdl_file(ctx) is_corrupt = ctx.get('ytdl_corrupt') is True is_inconsistent = ctx['fragment_index'] > 0 and resume_len == 0 if is_corrupt or is_inconsistent: message = ( '.ytdl file is corrupt' if is_corrupt else 'Inconsistent state of incomplete fragment download') self.report_warning( '%s. Restarting from the beginning...' % message) ctx['fragment_index'] = resume_len = 0 if 'ytdl_corrupt' in ctx: del ctx['ytdl_corrupt'] self._write_ytdl_file(ctx) else: if not continuedl: if ytdl_file_exists: self._read_ytdl_file(ctx) ctx['fragment_index'] = resume_len = 0 self._write_ytdl_file(ctx) assert ctx['fragment_index'] == 0 dest_stream, tmpfilename = sanitize_open(tmpfilename, open_mode) ctx.update({ 'dl': dl, 'dest_stream': dest_stream, 'tmpfilename': tmpfilename, # Total complete fragments downloaded so far in bytes 'complete_frags_downloaded_bytes': resume_len, }) def _start_frag_download(self, ctx): resume_len = ctx['complete_frags_downloaded_bytes'] total_frags = ctx['total_frags'] # This dict stores the download progress, it's updated by the progress # hook state = { 'status': 'downloading', 'downloaded_bytes': resume_len, 'fragment_index': ctx['fragment_index'], 'fragment_count': total_frags, 'filename': ctx['filename'], 'tmpfilename': ctx['tmpfilename'], } start = time.time() ctx.update({ 'started': start, 'fragment_started': start, # Amount of fragment's bytes downloaded by the time of the previous # frag progress hook invocation 'prev_frag_downloaded_bytes': 0, }) def frag_progress_hook(s): if s['status'] not in ('downloading', 'finished'): return if not total_frags and ctx.get('fragment_count'): state['fragment_count'] = ctx['fragment_count'] time_now = time.time() state['elapsed'] = time_now - start frag_total_bytes = s.get('total_bytes') or 0 if not ctx['live']: estimated_size = ( (ctx['complete_frags_downloaded_bytes'] + frag_total_bytes) / (state['fragment_index'] + 1) * total_frags) state['total_bytes_estimate'] = estimated_size if s['status'] == 'finished': state['fragment_index'] += 1 ctx['fragment_index'] = state['fragment_index'] state['downloaded_bytes'] += frag_total_bytes - ctx['prev_frag_downloaded_bytes'] ctx['complete_frags_downloaded_bytes'] = state['downloaded_bytes'] ctx['speed'] = state['speed'] = self.calc_speed( ctx['fragment_started'], time_now, frag_total_bytes) ctx['fragment_started'] = time.time() ctx['prev_frag_downloaded_bytes'] = 0 else: frag_downloaded_bytes = s['downloaded_bytes'] state['downloaded_bytes'] += frag_downloaded_bytes - ctx['prev_frag_downloaded_bytes'] ctx['speed'] = state['speed'] = self.calc_speed( ctx['fragment_started'], time_now, frag_downloaded_bytes - ctx['frag_resume_len']) if not ctx['live']: state['eta'] = self.calc_eta(state['speed'], estimated_size - state['downloaded_bytes']) ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes self._hook_progress(state) ctx['dl'].add_progress_hook(frag_progress_hook) return start def _finish_frag_download(self, ctx): ctx['dest_stream'].close() if self.__do_ytdl_file(ctx): ytdl_filename = encodeFilename(self.ytdl_filename(ctx['filename'])) if os.path.isfile(ytdl_filename): os.remove(ytdl_filename) elapsed = time.time() - ctx['started'] if ctx['tmpfilename'] == '-': downloaded_bytes = ctx['complete_frags_downloaded_bytes'] else: self.try_rename(ctx['tmpfilename'], ctx['filename']) if self.params.get('updatetime', True): filetime = ctx.get('fragment_filetime') if filetime: try: os.utime(ctx['filename'], (time.time(), filetime)) except Exception: pass downloaded_bytes = self.filesize_or_none(ctx['filename']) or 0 self._hook_progress({ 'downloaded_bytes': downloaded_bytes, 'total_bytes': downloaded_bytes, 'filename': ctx['filename'], 'status': 'finished', 'elapsed': elapsed, })
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/downloader/hls.py
youtube_dl/downloader/hls.py
from __future__ import unicode_literals import re import binascii try: from Crypto.Cipher import AES can_decrypt_frag = True except ImportError: can_decrypt_frag = False from .fragment import FragmentFD from .external import FFmpegFD from ..compat import ( compat_urllib_error, compat_urlparse, compat_struct_pack, ) from ..utils import ( parse_m3u8_attributes, update_url_query, ) class HlsFD(FragmentFD): """ A limited implementation that does not require ffmpeg """ FD_NAME = 'hlsnative' @staticmethod def can_download(manifest, info_dict): UNSUPPORTED_FEATURES = ( r'#EXT-X-KEY:METHOD=(?!NONE|AES-128)', # encrypted streams [1] # r'#EXT-X-BYTERANGE', # playlists composed of byte ranges of media files [2] # Live streams heuristic does not always work (e.g. geo restricted to Germany # http://hls-geo.daserste.de/i/videoportal/Film/c_620000/622873/format,716451,716457,716450,716458,716459,.mp4.csmil/index_4_av.m3u8?null=0) # r'#EXT-X-MEDIA-SEQUENCE:(?!0$)', # live streams [3] # This heuristic also is not correct since segments may not be appended as well. # Twitch vods of finished streams have EXT-X-PLAYLIST-TYPE:EVENT despite # no segments will definitely be appended to the end of the playlist. # r'#EXT-X-PLAYLIST-TYPE:EVENT', # media segments may be appended to the end of # # event media playlists [4] r'#EXT-X-MAP:', # media initialization [5] # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.4 # 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.2 # 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.2 # 4. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.5 # 5. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.5 ) check_results = [not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES] is_aes128_enc = '#EXT-X-KEY:METHOD=AES-128' in manifest check_results.append(can_decrypt_frag or not is_aes128_enc) check_results.append(not (is_aes128_enc and r'#EXT-X-BYTERANGE' in manifest)) check_results.append(not info_dict.get('is_live')) return all(check_results) def real_download(self, filename, info_dict): man_url = info_dict['url'] self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME) urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url)) man_url = urlh.geturl() s = urlh.read().decode('utf-8', 'ignore') if not self.can_download(s, info_dict): if info_dict.get('extra_param_to_segment_url') or info_dict.get('_decryption_key_url'): self.report_error('pycrypto not found. Please install it.') return False self.report_warning( 'hlsnative has detected features it does not support, ' 'extraction will be delegated to ffmpeg') fd = FFmpegFD(self.ydl, self.params) for ph in self._progress_hooks: fd.add_progress_hook(ph) return fd.real_download(filename, info_dict) def is_ad_fragment_start(s): return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad')) def is_ad_fragment_end(s): return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment')) media_frags = 0 ad_frags = 0 ad_frag_next = False for line in s.splitlines(): line = line.strip() if not line: continue if line.startswith('#'): if is_ad_fragment_start(line): ad_frag_next = True elif is_ad_fragment_end(line): ad_frag_next = False continue if ad_frag_next: ad_frags += 1 continue media_frags += 1 ctx = { 'filename': filename, 'total_frags': media_frags, 'ad_frags': ad_frags, } self._prepare_and_start_frag_download(ctx) fragment_retries = self.params.get('fragment_retries', 0) skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True) test = self.params.get('test', False) extra_query = None extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url') if extra_param_to_segment_url: extra_query = compat_urlparse.parse_qs(extra_param_to_segment_url) i = 0 media_sequence = 0 decrypt_info = {'METHOD': 'NONE'} byte_range = {} frag_index = 0 ad_frag_next = False for line in s.splitlines(): line = line.strip() if line: if not line.startswith('#'): if ad_frag_next: continue frag_index += 1 if frag_index <= ctx['fragment_index']: continue frag_url = ( line if re.match(r'^https?://', line) else compat_urlparse.urljoin(man_url, line)) if extra_query: frag_url = update_url_query(frag_url, extra_query) count = 0 headers = info_dict.get('http_headers', {}) if byte_range: headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1) while count <= fragment_retries: try: success, frag_content = self._download_fragment( ctx, frag_url, info_dict, headers) if not success: return False break except compat_urllib_error.HTTPError as err: # Unavailable (possibly temporary) fragments may be served. # First we try to retry then either skip or abort. # See https://github.com/ytdl-org/youtube-dl/issues/10165, # https://github.com/ytdl-org/youtube-dl/issues/10448). count += 1 if count <= fragment_retries: self.report_retry_fragment(err, frag_index, count, fragment_retries) if count > fragment_retries: if skip_unavailable_fragments: i += 1 media_sequence += 1 self.report_skip_fragment(frag_index) continue self.report_error( 'giving up after %s fragment retries' % fragment_retries) return False if decrypt_info['METHOD'] == 'AES-128': iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', media_sequence) decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen( self._prepare_url(info_dict, info_dict.get('_decryption_key_url') or decrypt_info['URI'])).read() # Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block # size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded, # not what it decrypts to. if not test: frag_content = AES.new( decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content) self._append_fragment(ctx, frag_content) # We only download the first fragment during the test if test: break i += 1 media_sequence += 1 elif line.startswith('#EXT-X-KEY'): decrypt_url = decrypt_info.get('URI') decrypt_info = parse_m3u8_attributes(line[11:]) if decrypt_info['METHOD'] == 'AES-128': if 'IV' in decrypt_info: decrypt_info['IV'] = binascii.unhexlify(decrypt_info['IV'][2:].zfill(32)) if not re.match(r'^https?://', decrypt_info['URI']): decrypt_info['URI'] = compat_urlparse.urljoin( man_url, decrypt_info['URI']) if extra_query: decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query) if decrypt_url != decrypt_info['URI']: decrypt_info['KEY'] = None elif line.startswith('#EXT-X-MEDIA-SEQUENCE'): media_sequence = int(line[22:]) elif line.startswith('#EXT-X-BYTERANGE'): splitted_byte_range = line[17:].split('@') sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range['end'] byte_range = { 'start': sub_range_start, 'end': sub_range_start + int(splitted_byte_range[0]), } elif is_ad_fragment_start(line): ad_frag_next = True elif is_ad_fragment_end(line): ad_frag_next = False self._finish_frag_download(ctx) return True
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/downloader/http.py
youtube_dl/downloader/http.py
from __future__ import unicode_literals import errno import os import socket import time import random import re from .common import FileDownloader from ..compat import ( compat_str, compat_urllib_error, ) from ..utils import ( ContentTooShortError, encodeFilename, int_or_none, sanitize_open, sanitized_Request, write_xattr, XAttrMetadataError, XAttrUnavailableError, ) class HttpFD(FileDownloader): def real_download(self, filename, info_dict): url = info_dict['url'] class DownloadContext(dict): __getattr__ = dict.get __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ ctx = DownloadContext() ctx.filename = filename ctx.tmpfilename = self.temp_name(filename) ctx.stream = None # Do not include the Accept-Encoding header headers = {'Youtubedl-no-compression': 'True'} add_headers = info_dict.get('http_headers') if add_headers: headers.update(add_headers) is_test = self.params.get('test', False) chunk_size = self._TEST_FILE_SIZE if is_test else ( info_dict.get('downloader_options', {}).get('http_chunk_size') or self.params.get('http_chunk_size') or 0) ctx.open_mode = 'wb' ctx.resume_len = 0 ctx.data_len = None ctx.block_size = self.params.get('buffersize', 1024) ctx.start_time = time.time() ctx.chunk_size = None if self.params.get('continuedl', True): # Establish possible resume length ctx.resume_len = info_dict.get('frag_resume_len') if ctx.resume_len is None: ctx.resume_len = self.filesize_or_none(ctx.tmpfilename) or 0 ctx.is_resume = ctx.resume_len > 0 count = 0 retries = self.params.get('retries', 0) class SucceedDownload(Exception): pass class RetryDownload(Exception): def __init__(self, source_error): self.source_error = source_error class NextFragment(Exception): pass def set_range(req, start, end): range_header = 'bytes=%d-' % start if end: range_header += compat_str(end) req.add_header('Range', range_header) def establish_connection(): ctx.chunk_size = (random.randint(int(chunk_size * 0.95), chunk_size) if not is_test and chunk_size else chunk_size) if ctx.resume_len > 0: range_start = ctx.resume_len if ctx.is_resume: self.report_resuming_byte(ctx.resume_len) ctx.open_mode = 'ab' elif ctx.chunk_size > 0: range_start = 0 else: range_start = None ctx.is_resume = False range_end = range_start + ctx.chunk_size - 1 if ctx.chunk_size else None if range_end and ctx.data_len is not None and range_end >= ctx.data_len: range_end = ctx.data_len - 1 has_range = range_start is not None ctx.has_range = has_range request = sanitized_Request(url, None, headers) if has_range: set_range(request, range_start, range_end) # Establish connection try: try: ctx.data = self.ydl.urlopen(request) except (compat_urllib_error.URLError, ) as err: # reason may not be available, e.g. for urllib2.HTTPError on python 2.6 reason = getattr(err, 'reason', None) if isinstance(reason, socket.timeout): raise RetryDownload(err) raise err # When trying to resume, Content-Range HTTP header of response has to be checked # to match the value of requested Range HTTP header. This is due to webservers # that don't support resuming and serve a whole file with no Content-Range # set in response despite requested Range (see # https://github.com/ytdl-org/youtube-dl/issues/6057#issuecomment-126129799) if has_range: content_range = ctx.data.headers.get('Content-Range') if content_range: content_range_m = re.search(r'bytes (\d+)-(\d+)?(?:/(\d+))?', content_range) # Content-Range is present and matches requested Range, resume is possible if content_range_m: if range_start == int(content_range_m.group(1)): content_range_end = int_or_none(content_range_m.group(2)) content_len = int_or_none(content_range_m.group(3)) accept_content_len = ( # Non-chunked download not ctx.chunk_size # Chunked download and requested piece or # its part is promised to be served or content_range_end == range_end or content_len < range_end) if accept_content_len: ctx.data_len = content_len return # Content-Range is either not present or invalid. Assuming remote webserver is # trying to send the whole file, resume is not possible, so wiping the local file # and performing entire redownload if range_start > 0: self.report_unable_to_resume() ctx.resume_len = 0 ctx.open_mode = 'wb' ctx.data_len = int_or_none(ctx.data.info().get('Content-length', None)) return except (compat_urllib_error.HTTPError, ) as err: if err.code == 416: # Unable to resume (requested range not satisfiable) try: # Open the connection again without the range header ctx.data = self.ydl.urlopen( sanitized_Request(url, None, headers)) content_length = ctx.data.info()['Content-Length'] except (compat_urllib_error.HTTPError, ) as err: if err.code < 500 or err.code >= 600: raise else: # Examine the reported length if (content_length is not None and (ctx.resume_len - 100 < int(content_length) < ctx.resume_len + 100)): # The file had already been fully downloaded. # Explanation to the above condition: in issue #175 it was revealed that # YouTube sometimes adds or removes a few bytes from the end of the file, # changing the file size slightly and causing problems for some users. So # I decided to implement a suggested change and consider the file # completely downloaded if the file size differs less than 100 bytes from # the one in the hard drive. self.report_file_already_downloaded(ctx.filename) self.try_rename(ctx.tmpfilename, ctx.filename) self._hook_progress({ 'filename': ctx.filename, 'status': 'finished', 'downloaded_bytes': ctx.resume_len, 'total_bytes': ctx.resume_len, }) raise SucceedDownload() else: # The length does not match, we start the download over self.report_unable_to_resume() ctx.resume_len = 0 ctx.open_mode = 'wb' return elif err.code < 500 or err.code >= 600: # Unexpected HTTP error raise raise RetryDownload(err) except socket.error as err: if err.errno != errno.ECONNRESET: # Connection reset is no problem, just retry raise raise RetryDownload(err) def download(): data_len = ctx.data.info().get('Content-length', None) # Range HTTP header may be ignored/unsupported by a webserver # (e.g. extractor/scivee.py, extractor/bambuser.py). # However, for a test we still would like to download just a piece of a file. # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control # block size when downloading a file. if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE): data_len = self._TEST_FILE_SIZE if data_len is not None: data_len = int(data_len) + ctx.resume_len min_data_len = self.params.get('min_filesize') max_data_len = self.params.get('max_filesize') if min_data_len is not None and data_len < min_data_len: self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len)) return False if max_data_len is not None and data_len > max_data_len: self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len)) return False byte_counter = 0 + ctx.resume_len block_size = ctx.block_size start = time.time() # measure time over whole while-loop, so slow_down() and best_block_size() work together properly now = None # needed for slow_down() in the first loop run before = start # start measuring def retry(e): to_stdout = ctx.tmpfilename == '-' if ctx.stream is not None: if not to_stdout: ctx.stream.close() ctx.stream = None ctx.resume_len = byte_counter if to_stdout else os.path.getsize(encodeFilename(ctx.tmpfilename)) raise RetryDownload(e) while True: try: # Download and write data_block = ctx.data.read(block_size if data_len is None else min(block_size, data_len - byte_counter)) # socket.timeout is a subclass of socket.error but may not have # errno set except socket.timeout as e: retry(e) except socket.error as e: # SSLError on python 2 (inherits socket.error) may have # no errno set but this error message if e.errno in (errno.ECONNRESET, errno.ETIMEDOUT) or getattr(e, 'message', None) == 'The read operation timed out': retry(e) raise byte_counter += len(data_block) # exit loop when download is finished if len(data_block) == 0: break # Open destination file just in time if ctx.stream is None: try: ctx.stream, ctx.tmpfilename = sanitize_open( ctx.tmpfilename, ctx.open_mode) assert ctx.stream is not None ctx.filename = self.undo_temp_name(ctx.tmpfilename) self.report_destination(ctx.filename) except (OSError, IOError) as err: self.report_error('unable to open for writing: %s' % str(err)) return False if self.params.get('xattr_set_filesize', False) and data_len is not None: try: write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode('utf-8')) except (XAttrUnavailableError, XAttrMetadataError) as err: self.report_error('unable to set filesize xattr: %s' % str(err)) try: ctx.stream.write(data_block) except (IOError, OSError) as err: self.to_stderr('\n') self.report_error('unable to write data: %s' % str(err)) return False # Apply rate limit self.slow_down(start, now, byte_counter - ctx.resume_len) # end measuring of one loop run now = time.time() after = now # Adjust block size if not self.params.get('noresizebuffer', False): block_size = self.best_block_size(after - before, len(data_block)) before = after # Progress message speed = self.calc_speed(start, now, byte_counter - ctx.resume_len) eta = self.calc_eta(speed, ctx.data_len and (ctx.data_len - byte_counter)) self._hook_progress({ 'status': 'downloading', 'downloaded_bytes': byte_counter, 'total_bytes': ctx.data_len, 'tmpfilename': ctx.tmpfilename, 'filename': ctx.filename, 'eta': eta, 'speed': speed, 'elapsed': now - ctx.start_time, }) if data_len is not None and byte_counter == data_len: break if not is_test and ctx.chunk_size and ctx.data_len is not None and byte_counter < ctx.data_len: ctx.resume_len = byte_counter # ctx.block_size = block_size raise NextFragment() if ctx.stream is None: self.to_stderr('\n') self.report_error('Did not get any data blocks') return False if ctx.tmpfilename != '-': ctx.stream.close() if data_len is not None and byte_counter != data_len: err = ContentTooShortError(byte_counter, int(data_len)) if count <= retries: retry(err) raise err self.try_rename(ctx.tmpfilename, ctx.filename) # Update file modification time if self.params.get('updatetime', True): info_dict['filetime'] = self.try_utime(ctx.filename, ctx.data.info().get('last-modified', None)) self._hook_progress({ 'downloaded_bytes': byte_counter, 'total_bytes': byte_counter, 'filename': ctx.filename, 'status': 'finished', 'elapsed': time.time() - ctx.start_time, }) return True while count <= retries: try: establish_connection() return download() except RetryDownload as e: count += 1 if count <= retries: self.report_retry(e.source_error, count, retries) continue except NextFragment: continue except SucceedDownload: return True self.report_error('giving up after %s retries' % retries) return False
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/downloader/common.py
youtube_dl/downloader/common.py
from __future__ import division, unicode_literals import os import re import sys import time import random from ..compat import compat_os_name from ..utils import ( decodeArgument, encodeFilename, error_to_compat_str, float_or_none, format_bytes, shell_quote, timeconvert, ) class FileDownloader(object): """File Downloader class. File downloader objects are the ones responsible of downloading the actual video file and writing it to disk. File downloaders accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. Available options: verbose: Print additional info to stdout. quiet: Do not print messages to stdout. ratelimit: Download speed limit, in bytes/sec. retries: Number of times to retry for HTTP error 5xx buffersize: Size of download buffer in bytes. noresizebuffer: Do not automatically resize the download buffer. continuedl: Try to continue downloads if possible. noprogress: Do not print the progress bar. logtostderr: Log messages to stderr instead of stdout. consoletitle: Display progress in console window's titlebar. nopart: Do not use temporary .part files. updatetime: Use the Last-modified header to set output file timestamps. test: Download only first bytes to test the downloader. min_filesize: Skip files smaller than this size max_filesize: Skip files larger than this size xattr_set_filesize: Set ytdl.filesize user xattribute with expected size. external_downloader_args: A list of additional command-line arguments for the external downloader. hls_use_mpegts: Use the mpegts container for HLS videos. http_chunk_size: Size of a chunk for chunk-based HTTP downloading. May be useful for bypassing bandwidth throttling imposed by a webserver (experimental) Subclasses of this one must re-define the real_download method. """ _TEST_FILE_SIZE = 10241 params = None def __init__(self, ydl, params): """Create a FileDownloader object with the given options.""" self.ydl = ydl self._progress_hooks = [] self.params = params self.add_progress_hook(self.report_progress) @staticmethod def format_seconds(seconds): (mins, secs) = divmod(seconds, 60) (hours, mins) = divmod(mins, 60) if hours > 99: return '--:--:--' if hours == 0: return '%02d:%02d' % (mins, secs) else: return '%02d:%02d:%02d' % (hours, mins, secs) @staticmethod def calc_percent(byte_counter, data_len): if data_len is None: return None return float(byte_counter) / float(data_len) * 100.0 @staticmethod def format_percent(percent): if percent is None: return '---.-%' return '%6s' % ('%3.1f%%' % percent) @classmethod def calc_eta(cls, start_or_rate, now_or_remaining, *args): if len(args) < 2: rate, remaining = (start_or_rate, now_or_remaining) if None in (rate, remaining): return None return int(float(remaining) / rate) start, now = (start_or_rate, now_or_remaining) total, current = args[:2] if total is None: return None if now is None: now = time.time() rate = cls.calc_speed(start, now, current) return rate and int((float(total) - float(current)) / rate) @staticmethod def format_eta(eta): if eta is None: return '--:--' return FileDownloader.format_seconds(eta) @staticmethod def calc_speed(start, now, bytes): dif = now - start if bytes == 0 or dif < 0.001: # One millisecond return None return float(bytes) / dif @staticmethod def format_speed(speed): if speed is None: return '%10s' % '---b/s' return '%10s' % ('%s/s' % format_bytes(speed)) @staticmethod def format_retries(retries): return 'inf' if retries == float('inf') else '%.0f' % retries @staticmethod def filesize_or_none(unencoded_filename): fn = encodeFilename(unencoded_filename) if os.path.isfile(fn): return os.path.getsize(fn) @staticmethod def best_block_size(elapsed_time, bytes): new_min = max(bytes / 2.0, 1.0) new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB if elapsed_time < 0.001: return int(new_max) rate = bytes / elapsed_time if rate > new_max: return int(new_max) if rate < new_min: return int(new_min) return int(rate) @staticmethod def parse_bytes(bytestr): """Parse a string indicating a byte quantity into an integer.""" matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr) if matchobj is None: return None number = float(matchobj.group(1)) multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower()) return int(round(number * multiplier)) def to_screen(self, *args, **kargs): self.ydl.to_screen(*args, **kargs) def to_stderr(self, message): self.ydl.to_screen(message) def to_console_title(self, message): self.ydl.to_console_title(message) def trouble(self, *args, **kargs): self.ydl.trouble(*args, **kargs) def report_warning(self, *args, **kargs): self.ydl.report_warning(*args, **kargs) def report_error(self, *args, **kargs): self.ydl.report_error(*args, **kargs) def slow_down(self, start_time, now, byte_counter): """Sleep if the download speed is over the rate limit.""" rate_limit = self.params.get('ratelimit') if rate_limit is None or byte_counter == 0: return if now is None: now = time.time() elapsed = now - start_time if elapsed <= 0.0: return speed = float(byte_counter) / elapsed if speed > rate_limit: sleep_time = float(byte_counter) / rate_limit - elapsed if sleep_time > 0: time.sleep(sleep_time) def temp_name(self, filename): """Returns a temporary filename for the given filename.""" if self.params.get('nopart', False) or filename == '-' or \ (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): return filename return filename + '.part' def undo_temp_name(self, filename): if filename.endswith('.part'): return filename[:-len('.part')] return filename def ytdl_filename(self, filename): return filename + '.ytdl' def try_rename(self, old_filename, new_filename): try: if old_filename == new_filename: return os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) except (IOError, OSError) as err: self.report_error('unable to rename file: %s' % error_to_compat_str(err)) def try_utime(self, filename, last_modified_hdr): """Try to set the last-modified time of the given file.""" if last_modified_hdr is None: return if not os.path.isfile(encodeFilename(filename)): return timestr = last_modified_hdr if timestr is None: return filetime = timeconvert(timestr) if filetime is None: return filetime # Ignore obviously invalid dates if filetime == 0: return try: os.utime(filename, (time.time(), filetime)) except Exception: pass return filetime def report_destination(self, filename): """Report destination filename.""" self.to_screen('[download] Destination: ' + filename) def _report_progress_status(self, msg, is_last_line=False): fullmsg = '[download] ' + msg if self.params.get('progress_with_newline', False): self.to_screen(fullmsg) else: if compat_os_name == 'nt': prev_len = getattr(self, '_report_progress_prev_line_length', 0) if prev_len > len(fullmsg): fullmsg += ' ' * (prev_len - len(fullmsg)) self._report_progress_prev_line_length = len(fullmsg) clear_line = '\r' else: clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r') self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line) self.to_console_title('youtube-dl ' + msg) def report_progress(self, s): if s['status'] == 'finished': if self.params.get('noprogress', False): self.to_screen('[download] Download completed') else: msg_template = '100%%' if s.get('total_bytes') is not None: s['_total_bytes_str'] = format_bytes(s['total_bytes']) msg_template += ' of %(_total_bytes_str)s' if s.get('elapsed') is not None: s['_elapsed_str'] = self.format_seconds(s['elapsed']) msg_template += ' in %(_elapsed_str)s' self._report_progress_status( msg_template % s, is_last_line=True) if self.params.get('noprogress'): return if s['status'] != 'downloading': return if s.get('eta') is not None: s['_eta_str'] = self.format_eta(s['eta']) else: s['_eta_str'] = 'Unknown ETA' if s.get('total_bytes') and s.get('downloaded_bytes') is not None: s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes']) elif s.get('total_bytes_estimate') and s.get('downloaded_bytes') is not None: s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes_estimate']) else: if s.get('downloaded_bytes') == 0: s['_percent_str'] = self.format_percent(0) else: s['_percent_str'] = 'Unknown %' if s.get('speed') is not None: s['_speed_str'] = self.format_speed(s['speed']) else: s['_speed_str'] = 'Unknown speed' if s.get('total_bytes') is not None: s['_total_bytes_str'] = format_bytes(s['total_bytes']) msg_template = '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s' elif s.get('total_bytes_estimate') is not None: s['_total_bytes_estimate_str'] = format_bytes(s['total_bytes_estimate']) msg_template = '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s' else: if s.get('downloaded_bytes') is not None: s['_downloaded_bytes_str'] = format_bytes(s['downloaded_bytes']) if s.get('elapsed'): s['_elapsed_str'] = self.format_seconds(s['elapsed']) msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)' else: msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s' else: msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s' self._report_progress_status(msg_template % s) def report_resuming_byte(self, resume_len): """Report attempt to resume at given byte.""" self.to_screen('[download] Resuming download at byte %s' % resume_len) def report_retry(self, err, count, retries): """Report retry in case of HTTP error 5xx""" self.to_screen( '[download] Got server HTTP error: %s. Retrying (attempt %d of %s)...' % (error_to_compat_str(err), count, self.format_retries(retries))) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: self.to_screen('[download] The file has already been downloaded') def report_unable_to_resume(self): """Report it was impossible to resume download.""" self.to_screen('[download] Unable to resume') def download(self, filename, info_dict): """Download to a filename using the info from info_dict Return True on success and False otherwise This method filters the `Cookie` header from the info_dict to prevent leaks. Downloaders have their own way of handling cookies. See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj """ nooverwrites_and_exists = ( self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)) ) if not hasattr(filename, 'write'): continuedl_and_exists = ( self.params.get('continuedl', True) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False) ) # Check file already present if filename != '-' and (nooverwrites_and_exists or continuedl_and_exists): self.report_file_already_downloaded(filename) self._hook_progress({ 'filename': filename, 'status': 'finished', 'total_bytes': os.path.getsize(encodeFilename(filename)), }) return True min_sleep_interval, max_sleep_interval = ( float_or_none(self.params.get(interval), default=0) for interval in ('sleep_interval', 'max_sleep_interval')) sleep_note = '' available_at = info_dict.get('available_at') if available_at: forced_sleep_interval = available_at - int(time.time()) if forced_sleep_interval > min_sleep_interval: sleep_note = 'as required by the site' min_sleep_interval = forced_sleep_interval if forced_sleep_interval > max_sleep_interval: max_sleep_interval = forced_sleep_interval sleep_interval = random.uniform( min_sleep_interval, max_sleep_interval or min_sleep_interval) if sleep_interval > 0: self.to_screen( '[download] Sleeping %.2f seconds %s...' % ( sleep_interval, sleep_note)) time.sleep(sleep_interval) return self.real_download(filename, info_dict) def real_download(self, filename, info_dict): """Real download process. Redefine in subclasses.""" raise NotImplementedError('This method must be implemented by subclasses') def _hook_progress(self, status): for ph in self._progress_hooks: ph(status) def add_progress_hook(self, ph): # See YoutubeDl.py (search for progress_hooks) for a description of # this interface self._progress_hooks.append(ph) def _debug_cmd(self, args, exe=None): if not self.params.get('verbose', False): return str_args = [decodeArgument(a) for a in args] if exe is None: exe = os.path.basename(str_args[0]) self.to_screen('[debug] %s command line: %s' % ( exe, shell_quote(str_args)))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/downloader/f4m.py
youtube_dl/downloader/f4m.py
from __future__ import division, unicode_literals import io import itertools import time from .fragment import FragmentFD from ..compat import ( compat_b64decode, compat_etree_fromstring, compat_urlparse, compat_urllib_error, compat_urllib_parse_urlparse, compat_struct_pack, compat_struct_unpack, ) from ..utils import ( fix_xml_ampersands, xpath_text, ) class DataTruncatedError(Exception): pass class FlvReader(io.BytesIO): """ Reader for Flv files The file format is documented in https://www.adobe.com/devnet/f4v.html """ def read_bytes(self, n): data = self.read(n) if len(data) < n: raise DataTruncatedError( 'FlvReader error: need %d bytes while only %d bytes got' % ( n, len(data))) return data # Utility functions for reading numbers and strings def read_unsigned_long_long(self): return compat_struct_unpack('!Q', self.read_bytes(8))[0] def read_unsigned_int(self): return compat_struct_unpack('!I', self.read_bytes(4))[0] def read_unsigned_char(self): return compat_struct_unpack('!B', self.read_bytes(1))[0] def read_string(self): res = b'' while True: char = self.read_bytes(1) if char == b'\x00': break res += char return res def read_box_info(self): """ Read a box and return the info as a tuple: (box_size, box_type, box_data) """ real_size = size = self.read_unsigned_int() box_type = self.read_bytes(4) header_end = 8 if size == 1: real_size = self.read_unsigned_long_long() header_end = 16 return real_size, box_type, self.read_bytes(real_size - header_end) def read_asrt(self): # version self.read_unsigned_char() # flags self.read_bytes(3) quality_entry_count = self.read_unsigned_char() # QualityEntryCount for i in range(quality_entry_count): self.read_string() segment_run_count = self.read_unsigned_int() segments = [] for i in range(segment_run_count): first_segment = self.read_unsigned_int() fragments_per_segment = self.read_unsigned_int() segments.append((first_segment, fragments_per_segment)) return { 'segment_run': segments, } def read_afrt(self): # version self.read_unsigned_char() # flags self.read_bytes(3) # time scale self.read_unsigned_int() quality_entry_count = self.read_unsigned_char() # QualitySegmentUrlModifiers for i in range(quality_entry_count): self.read_string() fragments_count = self.read_unsigned_int() fragments = [] for i in range(fragments_count): first = self.read_unsigned_int() first_ts = self.read_unsigned_long_long() duration = self.read_unsigned_int() if duration == 0: discontinuity_indicator = self.read_unsigned_char() else: discontinuity_indicator = None fragments.append({ 'first': first, 'ts': first_ts, 'duration': duration, 'discontinuity_indicator': discontinuity_indicator, }) return { 'fragments': fragments, } def read_abst(self): # version self.read_unsigned_char() # flags self.read_bytes(3) self.read_unsigned_int() # BootstrapinfoVersion # Profile,Live,Update,Reserved flags = self.read_unsigned_char() live = flags & 0x20 != 0 # time scale self.read_unsigned_int() # CurrentMediaTime self.read_unsigned_long_long() # SmpteTimeCodeOffset self.read_unsigned_long_long() self.read_string() # MovieIdentifier server_count = self.read_unsigned_char() # ServerEntryTable for i in range(server_count): self.read_string() quality_count = self.read_unsigned_char() # QualityEntryTable for i in range(quality_count): self.read_string() # DrmData self.read_string() # MetaData self.read_string() segments_count = self.read_unsigned_char() segments = [] for i in range(segments_count): box_size, box_type, box_data = self.read_box_info() assert box_type == b'asrt' segment = FlvReader(box_data).read_asrt() segments.append(segment) fragments_run_count = self.read_unsigned_char() fragments = [] for i in range(fragments_run_count): box_size, box_type, box_data = self.read_box_info() assert box_type == b'afrt' fragments.append(FlvReader(box_data).read_afrt()) return { 'segments': segments, 'fragments': fragments, 'live': live, } def read_bootstrap_info(self): total_size, box_type, box_data = self.read_box_info() assert box_type == b'abst' return FlvReader(box_data).read_abst() def read_bootstrap_info(bootstrap_bytes): return FlvReader(bootstrap_bytes).read_bootstrap_info() def build_fragments_list(boot_info): """ Return a list of (segment, fragment) for each fragment in the video """ res = [] segment_run_table = boot_info['segments'][0] fragment_run_entry_table = boot_info['fragments'][0]['fragments'] first_frag_number = fragment_run_entry_table[0]['first'] fragments_counter = itertools.count(first_frag_number) for segment, fragments_count in segment_run_table['segment_run']: # In some live HDS streams (for example Rai), `fragments_count` is # abnormal and causing out-of-memory errors. It's OK to change the # number of fragments for live streams as they are updated periodically if fragments_count == 4294967295 and boot_info['live']: fragments_count = 2 for _ in range(fragments_count): res.append((segment, next(fragments_counter))) if boot_info['live']: res = res[-2:] return res def write_unsigned_int(stream, val): stream.write(compat_struct_pack('!I', val)) def write_unsigned_int_24(stream, val): stream.write(compat_struct_pack('!I', val)[1:]) def write_flv_header(stream): """Writes the FLV header to stream""" # FLV header stream.write(b'FLV\x01') stream.write(b'\x05') stream.write(b'\x00\x00\x00\x09') stream.write(b'\x00\x00\x00\x00') def write_metadata_tag(stream, metadata): """Writes optional metadata tag to stream""" SCRIPT_TAG = b'\x12' FLV_TAG_HEADER_LEN = 11 if metadata: stream.write(SCRIPT_TAG) write_unsigned_int_24(stream, len(metadata)) stream.write(b'\x00\x00\x00\x00\x00\x00\x00') stream.write(metadata) write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata)) def remove_encrypted_media(media): return list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and 'drmAdditionalHeaderSetId' not in e.attrib, media)) def _add_ns(prop, ver=1): return '{http://ns.adobe.com/f4m/%d.0}%s' % (ver, prop) def get_base_url(manifest): base_url = xpath_text( manifest, [_add_ns('baseURL'), _add_ns('baseURL', 2)], 'base URL', default=None) if base_url: base_url = base_url.strip() return base_url class F4mFD(FragmentFD): """ A downloader for f4m manifests or AdobeHDS. """ FD_NAME = 'f4m' def _get_unencrypted_media(self, doc): media = doc.findall(_add_ns('media')) if not media: self.report_error('No media found') for e in (doc.findall(_add_ns('drmAdditionalHeader')) + doc.findall(_add_ns('drmAdditionalHeaderSet'))): # If id attribute is missing it's valid for all media nodes # without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute if 'id' not in e.attrib: self.report_error('Missing ID in f4m DRM') media = remove_encrypted_media(media) if not media: self.report_error('Unsupported DRM') return media def _get_bootstrap_from_url(self, bootstrap_url): bootstrap = self.ydl.urlopen(bootstrap_url).read() return read_bootstrap_info(bootstrap) def _update_live_fragments(self, bootstrap_url, latest_fragment): fragments_list = [] retries = 30 while (not fragments_list) and (retries > 0): boot_info = self._get_bootstrap_from_url(bootstrap_url) fragments_list = build_fragments_list(boot_info) fragments_list = [f for f in fragments_list if f[1] > latest_fragment] if not fragments_list: # Retry after a while time.sleep(5.0) retries -= 1 if not fragments_list: self.report_error('Failed to update fragments') return fragments_list def _parse_bootstrap_node(self, node, base_url): # Sometimes non empty inline bootstrap info can be specified along # with bootstrap url attribute (e.g. dummy inline bootstrap info # contains whitespace characters in [1]). We will prefer bootstrap # url over inline bootstrap info when present. # 1. http://live-1-1.rutube.ru/stream/1024/HDS/SD/C2NKsS85HQNckgn5HdEmOQ/1454167650/S-s604419906/move/four/dirs/upper/1024-576p.f4m bootstrap_url = node.get('url') if bootstrap_url: bootstrap_url = compat_urlparse.urljoin( base_url, bootstrap_url) boot_info = self._get_bootstrap_from_url(bootstrap_url) else: bootstrap_url = None bootstrap = compat_b64decode(node.text) boot_info = read_bootstrap_info(bootstrap) return boot_info, bootstrap_url def real_download(self, filename, info_dict): man_url = info_dict['url'] requested_bitrate = info_dict.get('tbr') self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME) urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url)) man_url = urlh.geturl() # Some manifests may be malformed, e.g. prosiebensat1 generated manifests # (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244 # and https://github.com/ytdl-org/youtube-dl/issues/7823) manifest = fix_xml_ampersands(urlh.read().decode('utf-8', 'ignore')).strip() doc = compat_etree_fromstring(manifest) formats = [(int(f.attrib.get('bitrate', -1)), f) for f in self._get_unencrypted_media(doc)] if requested_bitrate is None or len(formats) == 1: # get the best format formats = sorted(formats, key=lambda f: f[0]) rate, media = formats[-1] else: rate, media = list(filter( lambda f: int(f[0]) == requested_bitrate, formats))[0] # Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec. man_base_url = get_base_url(doc) or man_url base_url = compat_urlparse.urljoin(man_base_url, media.attrib['url']) bootstrap_node = doc.find(_add_ns('bootstrapInfo')) boot_info, bootstrap_url = self._parse_bootstrap_node( bootstrap_node, man_base_url) live = boot_info['live'] metadata_node = media.find(_add_ns('metadata')) if metadata_node is not None: metadata = compat_b64decode(metadata_node.text) else: metadata = None fragments_list = build_fragments_list(boot_info) test = self.params.get('test', False) if test: # We only download the first fragment fragments_list = fragments_list[:1] total_frags = len(fragments_list) # For some akamai manifests we'll need to add a query to the fragment url akamai_pv = xpath_text(doc, _add_ns('pv-2.0')) ctx = { 'filename': filename, 'total_frags': total_frags, 'live': live, } self._prepare_frag_download(ctx) dest_stream = ctx['dest_stream'] if ctx['complete_frags_downloaded_bytes'] == 0: write_flv_header(dest_stream) if not live: write_metadata_tag(dest_stream, metadata) base_url_parsed = compat_urllib_parse_urlparse(base_url) self._start_frag_download(ctx) frag_index = 0 while fragments_list: seg_i, frag_i = fragments_list.pop(0) frag_index += 1 if frag_index <= ctx['fragment_index']: continue name = 'Seg%d-Frag%d' % (seg_i, frag_i) query = [] if base_url_parsed.query: query.append(base_url_parsed.query) if akamai_pv: query.append(akamai_pv.strip(';')) if info_dict.get('extra_param_to_segment_url'): query.append(info_dict['extra_param_to_segment_url']) url_parsed = base_url_parsed._replace(path=base_url_parsed.path + name, query='&'.join(query)) try: success, down_data = self._download_fragment(ctx, url_parsed.geturl(), info_dict) if not success: return False reader = FlvReader(down_data) while True: try: _, box_type, box_data = reader.read_box_info() except DataTruncatedError: if test: # In tests, segments may be truncated, and thus # FlvReader may not be able to parse the whole # chunk. If so, write the segment as is # See https://github.com/ytdl-org/youtube-dl/issues/9214 dest_stream.write(down_data) break raise if box_type == b'mdat': self._append_fragment(ctx, box_data) break except (compat_urllib_error.HTTPError, ) as err: if live and (err.code == 404 or err.code == 410): # We didn't keep up with the live window. Continue # with the next available fragment. msg = 'Fragment %d unavailable' % frag_i self.report_warning(msg) fragments_list = [] else: raise if not fragments_list and not test and live and bootstrap_url: fragments_list = self._update_live_fragments(bootstrap_url, frag_i) total_frags += len(fragments_list) if fragments_list and (fragments_list[0][1] > frag_i + 1): msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1)) self.report_warning(msg) self._finish_frag_download(ctx) return True
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/downloader/dash.py
youtube_dl/downloader/dash.py
from __future__ import unicode_literals import itertools from .fragment import FragmentFD from ..compat import compat_urllib_error from ..utils import ( DownloadError, urljoin, ) class DashSegmentsFD(FragmentFD): """ Download segments in a DASH manifest """ FD_NAME = 'dashsegments' def real_download(self, filename, info_dict): fragment_base_url = info_dict.get('fragment_base_url') fragments = info_dict['fragments'][:1] if self.params.get( 'test', False) else info_dict['fragments'] ctx = { 'filename': filename, 'total_frags': len(fragments), } self._prepare_and_start_frag_download(ctx) fragment_retries = self.params.get('fragment_retries', 0) skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True) for frag_index, fragment in enumerate(fragments, 1): if frag_index <= ctx['fragment_index']: continue success = False # In DASH, the first segment contains necessary headers to # generate a valid MP4 file, so always abort for the first segment fatal = frag_index == 1 or not skip_unavailable_fragments fragment_url = fragment.get('url') if not fragment_url: assert fragment_base_url fragment_url = urljoin(fragment_base_url, fragment['path']) headers = info_dict.get('http_headers') fragment_range = fragment.get('range') if fragment_range: headers = headers.copy() if headers else {} headers['Range'] = 'bytes=%s' % (fragment_range,) for count in itertools.count(): try: success, frag_content = self._download_fragment(ctx, fragment_url, info_dict, headers) if not success: return False self._append_fragment(ctx, frag_content) except compat_urllib_error.HTTPError as err: # YouTube may often return 404 HTTP error for a fragment causing the # whole download to fail. However if the same fragment is immediately # retried with the same request data this usually succeeds (1-2 attempts # is usually enough) thus allowing to download the whole file successfully. # To be future-proof we will retry all fragments that fail with any # HTTP error. if count < fragment_retries: self.report_retry_fragment(err, frag_index, count + 1, fragment_retries) continue except DownloadError: # Don't retry fragment if error occurred during HTTP downloading # itself since it has its own retry settings if fatal: raise break if not success: if not fatal: self.report_skip_fragment(frag_index) continue self.report_error('giving up after %s fragment retries' % count) return False self._finish_frag_download(ctx) return True
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/downloader/rtsp.py
youtube_dl/downloader/rtsp.py
from __future__ import unicode_literals import os import subprocess from .common import FileDownloader from ..utils import ( check_executable, encodeFilename, ) class RtspFD(FileDownloader): def real_download(self, filename, info_dict): url = info_dict['url'] self.report_destination(filename) tmpfilename = self.temp_name(filename) if check_executable('mplayer', ['-h']): args = [ 'mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url] elif check_executable('mpv', ['-h']): args = [ 'mpv', '-really-quiet', '--vo=null', '--stream-dump=' + tmpfilename, url] else: self.report_error('MMS or RTSP download detected but neither "mplayer" nor "mpv" could be run. Please install any.') return False self._debug_cmd(args) retval = subprocess.call(args) if retval == 0: fsize = os.path.getsize(encodeFilename(tmpfilename)) self.to_screen('\r[%s] %s bytes' % (args[0], fsize)) self.try_rename(tmpfilename, filename) self._hook_progress({ 'downloaded_bytes': fsize, 'total_bytes': fsize, 'filename': filename, 'status': 'finished', }) return True else: self.to_stderr('\n') self.report_error('%s exited with code %d' % (args[0], retval)) return False
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false