repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/downloader/external.py
youtube_dl/downloader/external.py
from __future__ import unicode_literals import os import re import subprocess import sys import tempfile import time from .common import FileDownloader from ..compat import ( compat_setenv, compat_str, compat_subprocess_Popen, ) try: from ..postprocessor.ffmpeg import FFmpegPostProcessor, EXT_TO_OUT_FORMATS except ImportError: FFmpegPostProcessor = None from ..utils import ( cli_option, cli_valueless_option, cli_bool_option, cli_configuration_args, encodeFilename, encodeArgument, handle_youtubedl_headers, check_executable, is_outdated_version, process_communicate_or_kill, T, traverse_obj, ) class ExternalFD(FileDownloader): def real_download(self, filename, info_dict): self.report_destination(filename) tmpfilename = self.temp_name(filename) self._cookies_tempfile = None try: started = time.time() retval = self._call_downloader(tmpfilename, info_dict) except KeyboardInterrupt: if not info_dict.get('is_live'): raise # Live stream downloading cancellation should be considered as # correct and expected termination thus all postprocessing # should take place retval = 0 self.to_screen('[%s] Interrupted by user' % self.get_basename()) finally: if self._cookies_tempfile and os.path.isfile(self._cookies_tempfile): try: os.remove(self._cookies_tempfile) except OSError: self.report_warning( 'Unable to delete temporary cookies file "{0}"'.format(self._cookies_tempfile)) if retval == 0: status = { 'filename': filename, 'status': 'finished', 'elapsed': time.time() - started, } if filename != '-': fsize = os.path.getsize(encodeFilename(tmpfilename)) self.to_screen('\r[%s] Downloaded %s bytes' % (self.get_basename(), fsize)) self.try_rename(tmpfilename, filename) status.update({ 'downloaded_bytes': fsize, 'total_bytes': fsize, }) self._hook_progress(status) return True else: self.to_stderr('\n') self.report_error('%s exited with code %d' % ( self.get_basename(), retval)) return False @classmethod def get_basename(cls): return cls.__name__[:-2].lower() @property def exe(self): return self.params.get('external_downloader') @classmethod def available(cls): return check_executable(cls.get_basename(), [cls.AVAILABLE_OPT]) @classmethod def supports(cls, info_dict): return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps') @classmethod def can_download(cls, info_dict): return cls.available() and cls.supports(info_dict) def _option(self, command_option, param): return cli_option(self.params, command_option, param) def _bool_option(self, command_option, param, true_value='true', false_value='false', separator=None): return cli_bool_option(self.params, command_option, param, true_value, false_value, separator) def _valueless_option(self, command_option, param, expected_value=True): return cli_valueless_option(self.params, command_option, param, expected_value) def _configuration_args(self, default=[]): return cli_configuration_args(self.params, 'external_downloader_args', default) def _write_cookies(self): if not self.ydl.cookiejar.filename: tmp_cookies = tempfile.NamedTemporaryFile(suffix='.cookies', delete=False) tmp_cookies.close() self._cookies_tempfile = tmp_cookies.name self.to_screen('[download] Writing temporary cookies file to "{0}"'.format(self._cookies_tempfile)) # real_download resets _cookies_tempfile; if it's None, save() will write to cookiejar.filename self.ydl.cookiejar.save(self._cookies_tempfile, ignore_discard=True, ignore_expires=True) return self.ydl.cookiejar.filename or self._cookies_tempfile def _call_downloader(self, tmpfilename, info_dict): """ Either overwrite this or implement _make_cmd """ cmd = [encodeArgument(a) for a in self._make_cmd(tmpfilename, info_dict)] self._debug_cmd(cmd) p = subprocess.Popen( cmd, stderr=subprocess.PIPE) _, stderr = process_communicate_or_kill(p) if p.returncode != 0: self.to_stderr(stderr.decode('utf-8', 'replace')) return p.returncode @staticmethod def _header_items(info_dict): return traverse_obj( info_dict, ('http_headers', T(dict.items), Ellipsis)) class CurlFD(ExternalFD): AVAILABLE_OPT = '-V' def _make_cmd(self, tmpfilename, info_dict): cmd = [self.exe, '--location', '-o', tmpfilename, '--compressed'] cookie_header = self.ydl.cookiejar.get_cookie_header(info_dict['url']) if cookie_header: cmd += ['--cookie', cookie_header] for key, val in self._header_items(info_dict): cmd += ['--header', '%s: %s' % (key, val)] cmd += self._bool_option('--continue-at', 'continuedl', '-', '0') cmd += self._valueless_option('--silent', 'noprogress') cmd += self._valueless_option('--verbose', 'verbose') cmd += self._option('--limit-rate', 'ratelimit') retry = self._option('--retry', 'retries') if len(retry) == 2: if retry[1] in ('inf', 'infinite'): retry[1] = '2147483647' cmd += retry cmd += self._option('--max-filesize', 'max_filesize') cmd += self._option('--interface', 'source_address') cmd += self._option('--proxy', 'proxy') cmd += self._valueless_option('--insecure', 'nocheckcertificate') cmd += self._configuration_args() cmd += ['--', info_dict['url']] return cmd def _call_downloader(self, tmpfilename, info_dict): cmd = [encodeArgument(a) for a in self._make_cmd(tmpfilename, info_dict)] self._debug_cmd(cmd) # curl writes the progress to stderr so don't capture it. p = subprocess.Popen(cmd) process_communicate_or_kill(p) return p.returncode class AxelFD(ExternalFD): AVAILABLE_OPT = '-V' def _make_cmd(self, tmpfilename, info_dict): cmd = [self.exe, '-o', tmpfilename] for key, val in self._header_items(info_dict): cmd += ['-H', '%s: %s' % (key, val)] cookie_header = self.ydl.cookiejar.get_cookie_header(info_dict['url']) if cookie_header: cmd += ['-H', 'Cookie: {0}'.format(cookie_header), '--max-redirect=0'] cmd += self._configuration_args() cmd += ['--', info_dict['url']] return cmd class WgetFD(ExternalFD): AVAILABLE_OPT = '--version' def _make_cmd(self, tmpfilename, info_dict): cmd = [self.exe, '-O', tmpfilename, '-nv', '--compression=auto'] if self.ydl.cookiejar.get_cookie_header(info_dict['url']): cmd += ['--load-cookies', self._write_cookies()] for key, val in self._header_items(info_dict): cmd += ['--header', '%s: %s' % (key, val)] cmd += self._option('--limit-rate', 'ratelimit') retry = self._option('--tries', 'retries') if len(retry) == 2: if retry[1] in ('inf', 'infinite'): retry[1] = '0' cmd += retry cmd += self._option('--bind-address', 'source_address') proxy = self.params.get('proxy') if proxy: for var in ('http_proxy', 'https_proxy'): cmd += ['--execute', '%s=%s' % (var, proxy)] cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate') cmd += self._configuration_args() cmd += ['--', info_dict['url']] return cmd class Aria2cFD(ExternalFD): AVAILABLE_OPT = '-v' @staticmethod def _aria2c_filename(fn): return fn if os.path.isabs(fn) else os.path.join('.', fn) def _make_cmd(self, tmpfilename, info_dict): cmd = [self.exe, '-c', '--console-log-level=warn', '--summary-interval=0', '--download-result=hide', '--http-accept-gzip=true', '--file-allocation=none', '-x16', '-j16', '-s16'] if 'fragments' in info_dict: cmd += ['--allow-overwrite=true', '--allow-piece-length-change=true'] else: cmd += ['--min-split-size', '1M'] if self.ydl.cookiejar.get_cookie_header(info_dict['url']): cmd += ['--load-cookies={0}'.format(self._write_cookies())] for key, val in self._header_items(info_dict): cmd += ['--header', '%s: %s' % (key, val)] cmd += self._configuration_args(['--max-connection-per-server', '4']) cmd += ['--out', os.path.basename(tmpfilename)] cmd += self._option('--max-overall-download-limit', 'ratelimit') cmd += self._option('--interface', 'source_address') cmd += self._option('--all-proxy', 'proxy') cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=') cmd += self._bool_option('--remote-time', 'updatetime', 'true', 'false', '=') cmd += self._bool_option('--show-console-readout', 'noprogress', 'false', 'true', '=') cmd += self._configuration_args() # aria2c strips out spaces from the beginning/end of filenames and paths. # We work around this issue by adding a "./" to the beginning of the # filename and relative path, and adding a "/" at the end of the path. # See: https://github.com/yt-dlp/yt-dlp/issues/276 # https://github.com/ytdl-org/youtube-dl/issues/20312 # https://github.com/aria2/aria2/issues/1373 dn = os.path.dirname(tmpfilename) if dn: cmd += ['--dir', self._aria2c_filename(dn) + os.path.sep] if 'fragments' not in info_dict: cmd += ['--out', self._aria2c_filename(os.path.basename(tmpfilename))] cmd += ['--auto-file-renaming=false'] if 'fragments' in info_dict: cmd += ['--file-allocation=none', '--uri-selector=inorder'] url_list_file = '%s.frag.urls' % (tmpfilename, ) url_list = [] for frag_index, fragment in enumerate(info_dict['fragments']): fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index) url_list.append('%s\n\tout=%s' % (fragment['url'], self._aria2c_filename(fragment_filename))) stream, _ = self.sanitize_open(url_list_file, 'wb') stream.write('\n'.join(url_list).encode()) stream.close() cmd += ['-i', self._aria2c_filename(url_list_file)] else: cmd += ['--', info_dict['url']] return cmd class Aria2pFD(ExternalFD): ''' Aria2pFD class This class support to use aria2p as downloader. (Aria2p, a command-line tool and Python library to interact with an aria2c daemon process through JSON-RPC.) It can help you to get download progress more easily. To use aria2p as downloader, you need to install aria2c and aria2p, aria2p can download with pip. Then run aria2c in the background and enable with the --enable-rpc option. ''' try: import aria2p __avail = True except ImportError: __avail = False @classmethod def available(cls): return cls.__avail def _call_downloader(self, tmpfilename, info_dict): aria2 = self.aria2p.API( self.aria2p.Client( host='http://localhost', port=6800, secret='' ) ) options = { 'min-split-size': '1M', 'max-connection-per-server': 4, 'auto-file-renaming': 'false', } options['dir'] = os.path.dirname(tmpfilename) or os.path.abspath('.') options['out'] = os.path.basename(tmpfilename) if self.ydl.cookiejar.get_cookie_header(info_dict['url']): options['load-cookies'] = self._write_cookies() options['header'] = [] for key, val in self._header_items(info_dict): options['header'].append('{0}: {1}'.format(key, val)) download = aria2.add_uris([info_dict['url']], options) status = { 'status': 'downloading', 'tmpfilename': tmpfilename, } started = time.time() while download.status in ['active', 'waiting']: download = aria2.get_download(download.gid) status.update({ 'downloaded_bytes': download.completed_length, 'total_bytes': download.total_length, 'elapsed': time.time() - started, 'eta': download.eta.total_seconds(), 'speed': download.download_speed, }) self._hook_progress(status) time.sleep(.5) return download.status != 'complete' class HttpieFD(ExternalFD): @classmethod def available(cls): return check_executable('http', ['--version']) def _make_cmd(self, tmpfilename, info_dict): cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']] for key, val in self._header_items(info_dict): cmd += ['%s:%s' % (key, val)] # httpie 3.1.0+ removes the Cookie header on redirect, so this should be safe for now. [1] # If we ever need cookie handling for redirects, we can export the cookiejar into a session. [2] # 1: https://github.com/httpie/httpie/security/advisories/GHSA-9w4w-cpc8-h2fq # 2: https://httpie.io/docs/cli/sessions cookie_header = self.ydl.cookiejar.get_cookie_header(info_dict['url']) if cookie_header: cmd += ['Cookie:%s' % cookie_header] return cmd class FFmpegFD(ExternalFD): @classmethod def supports(cls, info_dict): return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps', 'm3u8', 'rtsp', 'rtmp', 'mms', 'http_dash_segments') @classmethod def available(cls): # actual availability can only be confirmed for an instance return bool(FFmpegPostProcessor) def _call_downloader(self, tmpfilename, info_dict): # `downloader` means the parent `YoutubeDL` ffpp = FFmpegPostProcessor(downloader=self.ydl) if not ffpp.available: self.report_error('ffmpeg required for download but no ffmpeg (nor avconv) executable could be found. Please install one.') return False ffpp.check_version() args = [ffpp.executable, '-y'] for log_level in ('quiet', 'verbose'): if self.params.get(log_level, False): args += ['-loglevel', log_level] break seekable = info_dict.get('_seekable') if seekable is not None: # setting -seekable prevents ffmpeg from guessing if the server # supports seeking(by adding the header `Range: bytes=0-`), which # can cause problems in some cases # https://github.com/ytdl-org/youtube-dl/issues/11800#issuecomment-275037127 # http://trac.ffmpeg.org/ticket/6125#comment:10 args += ['-seekable', '1' if seekable else '0'] args += self._configuration_args() # start_time = info_dict.get('start_time') or 0 # if start_time: # args += ['-ss', compat_str(start_time)] # end_time = info_dict.get('end_time') # if end_time: # args += ['-t', compat_str(end_time - start_time)] url = info_dict['url'] cookies = self.ydl.cookiejar.get_cookies_for_url(url) if cookies: args.extend(['-cookies', ''.join( '{0}={1}; path={2}; domain={3};\r\n'.format( cookie.name, cookie.value, cookie.path, cookie.domain) for cookie in cookies)]) if info_dict.get('http_headers') and re.match(r'^https?://', url): # Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv: # [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header. headers = handle_youtubedl_headers(info_dict['http_headers']) args += [ '-headers', ''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())] env = None proxy = self.params.get('proxy') if proxy: if not re.match(r'^[\da-zA-Z]+://', proxy): proxy = 'http://%s' % proxy if proxy.startswith('socks'): self.report_warning( '%s does not support SOCKS proxies. Downloading is likely to fail. ' 'Consider adding --hls-prefer-native to your command.' % self.get_basename()) # Since December 2015 ffmpeg supports -http_proxy option (see # http://git.videolan.org/?p=ffmpeg.git;a=commit;h=b4eb1f29ebddd60c41a2eb39f5af701e38e0d3fd) # We could switch to the following code if we are able to detect version properly # args += ['-http_proxy', proxy] env = os.environ.copy() compat_setenv('HTTP_PROXY', proxy, env=env) compat_setenv('http_proxy', proxy, env=env) protocol = info_dict.get('protocol') if protocol == 'rtmp': player_url = info_dict.get('player_url') page_url = info_dict.get('page_url') app = info_dict.get('app') play_path = info_dict.get('play_path') tc_url = info_dict.get('tc_url') flash_version = info_dict.get('flash_version') live = info_dict.get('rtmp_live', False) conn = info_dict.get('rtmp_conn') if player_url is not None: args += ['-rtmp_swfverify', player_url] if page_url is not None: args += ['-rtmp_pageurl', page_url] if app is not None: args += ['-rtmp_app', app] if play_path is not None: args += ['-rtmp_playpath', play_path] if tc_url is not None: args += ['-rtmp_tcurl', tc_url] if flash_version is not None: args += ['-rtmp_flashver', flash_version] if live: args += ['-rtmp_live', 'live'] if isinstance(conn, list): for entry in conn: args += ['-rtmp_conn', entry] elif isinstance(conn, compat_str): args += ['-rtmp_conn', conn] args += ['-i', url, '-c', 'copy'] if self.params.get('test', False): args += ['-fs', compat_str(self._TEST_FILE_SIZE)] if protocol in ('m3u8', 'm3u8_native'): if self.params.get('hls_use_mpegts', False) or tmpfilename == '-': args += ['-f', 'mpegts'] else: args += ['-f', 'mp4'] if (ffpp.basename == 'ffmpeg' and is_outdated_version(ffpp._versions['ffmpeg'], '3.2', False)) and (not info_dict.get('acodec') or info_dict['acodec'].split('.')[0] in ('aac', 'mp4a')): args += ['-bsf:a', 'aac_adtstoasc'] elif protocol == 'rtmp': args += ['-f', 'flv'] else: args += ['-f', EXT_TO_OUT_FORMATS.get(info_dict['ext'], info_dict['ext'])] args = [encodeArgument(opt) for opt in args] args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True)) self._debug_cmd(args) # From [1], a PIPE opened in Popen() should be closed, unless # .communicate() is called. Avoid leaking any PIPEs by using Popen # as a context manager (newer Python 3.x and compat) # Fixes "Resource Warning" in test/test_downloader_external.py # [1] https://devpress.csdn.net/python/62fde12d7e66823466192e48.html with compat_subprocess_Popen(args, stdin=subprocess.PIPE, env=env) as proc: try: retval = proc.wait() except BaseException as e: # subprocess.run would send the SIGKILL signal to ffmpeg and the # mp4 file couldn't be played, but if we ask ffmpeg to quit it # produces a file that is playable (this is mostly useful for live # streams). Note that Windows is not affected and produces playable # files (see https://github.com/ytdl-org/youtube-dl/issues/8300). if isinstance(e, KeyboardInterrupt) and sys.platform != 'win32': process_communicate_or_kill(proc, b'q') else: proc.kill() raise return retval class AVconvFD(FFmpegFD): pass _BY_NAME = dict( (klass.get_basename(), klass) for name, klass in globals().items() if name.endswith('FD') and name != 'ExternalFD' ) def list_external_downloaders(): return sorted(_BY_NAME.keys()) def get_external_downloader(external_downloader): """ Given the name of the executable, see whether we support the given downloader . """ # Drop .exe extension on Windows bn = os.path.splitext(os.path.basename(external_downloader))[0] return _BY_NAME[bn]
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/downloader/ism.py
youtube_dl/downloader/ism.py
from __future__ import unicode_literals import time import binascii import io from .fragment import FragmentFD from ..compat import ( compat_Struct, compat_urllib_error, ) u8 = compat_Struct('>B') u88 = compat_Struct('>Bx') u16 = compat_Struct('>H') u1616 = compat_Struct('>Hxx') u32 = compat_Struct('>I') u64 = compat_Struct('>Q') s88 = compat_Struct('>bx') s16 = compat_Struct('>h') s1616 = compat_Struct('>hxx') s32 = compat_Struct('>i') unity_matrix = (s32.pack(0x10000) + s32.pack(0) * 3) * 2 + s32.pack(0x40000000) TRACK_ENABLED = 0x1 TRACK_IN_MOVIE = 0x2 TRACK_IN_PREVIEW = 0x4 SELF_CONTAINED = 0x1 def box(box_type, payload): return u32.pack(8 + len(payload)) + box_type + payload def full_box(box_type, version, flags, payload): return box(box_type, u8.pack(version) + u32.pack(flags)[1:] + payload) def write_piff_header(stream, params): track_id = params['track_id'] fourcc = params['fourcc'] duration = params['duration'] timescale = params.get('timescale', 10000000) language = params.get('language', 'und') height = params.get('height', 0) width = params.get('width', 0) is_audio = width == 0 and height == 0 creation_time = modification_time = int(time.time()) ftyp_payload = b'isml' # major brand ftyp_payload += u32.pack(1) # minor version ftyp_payload += b'piff' + b'iso2' # compatible brands stream.write(box(b'ftyp', ftyp_payload)) # File Type Box mvhd_payload = u64.pack(creation_time) mvhd_payload += u64.pack(modification_time) mvhd_payload += u32.pack(timescale) mvhd_payload += u64.pack(duration) mvhd_payload += s1616.pack(1) # rate mvhd_payload += s88.pack(1) # volume mvhd_payload += u16.pack(0) # reserved mvhd_payload += u32.pack(0) * 2 # reserved mvhd_payload += unity_matrix mvhd_payload += u32.pack(0) * 6 # pre defined mvhd_payload += u32.pack(0xffffffff) # next track id moov_payload = full_box(b'mvhd', 1, 0, mvhd_payload) # Movie Header Box tkhd_payload = u64.pack(creation_time) tkhd_payload += u64.pack(modification_time) tkhd_payload += u32.pack(track_id) # track id tkhd_payload += u32.pack(0) # reserved tkhd_payload += u64.pack(duration) tkhd_payload += u32.pack(0) * 2 # reserved tkhd_payload += s16.pack(0) # layer tkhd_payload += s16.pack(0) # alternate group tkhd_payload += s88.pack(1 if is_audio else 0) # volume tkhd_payload += u16.pack(0) # reserved tkhd_payload += unity_matrix tkhd_payload += u1616.pack(width) tkhd_payload += u1616.pack(height) trak_payload = full_box(b'tkhd', 1, TRACK_ENABLED | TRACK_IN_MOVIE | TRACK_IN_PREVIEW, tkhd_payload) # Track Header Box mdhd_payload = u64.pack(creation_time) mdhd_payload += u64.pack(modification_time) mdhd_payload += u32.pack(timescale) mdhd_payload += u64.pack(duration) mdhd_payload += u16.pack(((ord(language[0]) - 0x60) << 10) | ((ord(language[1]) - 0x60) << 5) | (ord(language[2]) - 0x60)) mdhd_payload += u16.pack(0) # pre defined mdia_payload = full_box(b'mdhd', 1, 0, mdhd_payload) # Media Header Box hdlr_payload = u32.pack(0) # pre defined hdlr_payload += b'soun' if is_audio else b'vide' # handler type hdlr_payload += u32.pack(0) * 3 # reserved hdlr_payload += (b'Sound' if is_audio else b'Video') + b'Handler\0' # name mdia_payload += full_box(b'hdlr', 0, 0, hdlr_payload) # Handler Reference Box if is_audio: smhd_payload = s88.pack(0) # balance smhd_payload += u16.pack(0) # reserved media_header_box = full_box(b'smhd', 0, 0, smhd_payload) # Sound Media Header else: vmhd_payload = u16.pack(0) # graphics mode vmhd_payload += u16.pack(0) * 3 # opcolor media_header_box = full_box(b'vmhd', 0, 1, vmhd_payload) # Video Media Header minf_payload = media_header_box dref_payload = u32.pack(1) # entry count dref_payload += full_box(b'url ', 0, SELF_CONTAINED, b'') # Data Entry URL Box dinf_payload = full_box(b'dref', 0, 0, dref_payload) # Data Reference Box minf_payload += box(b'dinf', dinf_payload) # Data Information Box stsd_payload = u32.pack(1) # entry count sample_entry_payload = u8.pack(0) * 6 # reserved sample_entry_payload += u16.pack(1) # data reference index if is_audio: sample_entry_payload += u32.pack(0) * 2 # reserved sample_entry_payload += u16.pack(params.get('channels', 2)) sample_entry_payload += u16.pack(params.get('bits_per_sample', 16)) sample_entry_payload += u16.pack(0) # pre defined sample_entry_payload += u16.pack(0) # reserved sample_entry_payload += u1616.pack(params['sampling_rate']) if fourcc == 'AACL': sample_entry_box = box(b'mp4a', sample_entry_payload) else: sample_entry_payload += u16.pack(0) # pre defined sample_entry_payload += u16.pack(0) # reserved sample_entry_payload += u32.pack(0) * 3 # pre defined sample_entry_payload += u16.pack(width) sample_entry_payload += u16.pack(height) sample_entry_payload += u1616.pack(0x48) # horiz resolution 72 dpi sample_entry_payload += u1616.pack(0x48) # vert resolution 72 dpi sample_entry_payload += u32.pack(0) # reserved sample_entry_payload += u16.pack(1) # frame count sample_entry_payload += u8.pack(0) * 32 # compressor name sample_entry_payload += u16.pack(0x18) # depth sample_entry_payload += s16.pack(-1) # pre defined codec_private_data = binascii.unhexlify(params['codec_private_data'].encode('utf-8')) if fourcc in ('H264', 'AVC1'): sps, pps = codec_private_data.split(u32.pack(1))[1:] avcc_payload = u8.pack(1) # configuration version avcc_payload += sps[1:4] # avc profile indication + profile compatibility + avc level indication avcc_payload += u8.pack(0xfc | (params.get('nal_unit_length_field', 4) - 1)) # complete representation (1) + reserved (11111) + length size minus one avcc_payload += u8.pack(1) # reserved (0) + number of sps (0000001) avcc_payload += u16.pack(len(sps)) avcc_payload += sps avcc_payload += u8.pack(1) # number of pps avcc_payload += u16.pack(len(pps)) avcc_payload += pps sample_entry_payload += box(b'avcC', avcc_payload) # AVC Decoder Configuration Record sample_entry_box = box(b'avc1', sample_entry_payload) # AVC Simple Entry stsd_payload += sample_entry_box stbl_payload = full_box(b'stsd', 0, 0, stsd_payload) # Sample Description Box stts_payload = u32.pack(0) # entry count stbl_payload += full_box(b'stts', 0, 0, stts_payload) # Decoding Time to Sample Box stsc_payload = u32.pack(0) # entry count stbl_payload += full_box(b'stsc', 0, 0, stsc_payload) # Sample To Chunk Box stco_payload = u32.pack(0) # entry count stbl_payload += full_box(b'stco', 0, 0, stco_payload) # Chunk Offset Box minf_payload += box(b'stbl', stbl_payload) # Sample Table Box mdia_payload += box(b'minf', minf_payload) # Media Information Box trak_payload += box(b'mdia', mdia_payload) # Media Box moov_payload += box(b'trak', trak_payload) # Track Box mehd_payload = u64.pack(duration) mvex_payload = full_box(b'mehd', 1, 0, mehd_payload) # Movie Extends Header Box trex_payload = u32.pack(track_id) # track id trex_payload += u32.pack(1) # default sample description index trex_payload += u32.pack(0) # default sample duration trex_payload += u32.pack(0) # default sample size trex_payload += u32.pack(0) # default sample flags mvex_payload += full_box(b'trex', 0, 0, trex_payload) # Track Extends Box moov_payload += box(b'mvex', mvex_payload) # Movie Extends Box stream.write(box(b'moov', moov_payload)) # Movie Box def extract_box_data(data, box_sequence): data_reader = io.BytesIO(data) while True: box_size = u32.unpack(data_reader.read(4))[0] box_type = data_reader.read(4) if box_type == box_sequence[0]: box_data = data_reader.read(box_size - 8) if len(box_sequence) == 1: return box_data return extract_box_data(box_data, box_sequence[1:]) data_reader.seek(box_size - 8, 1) class IsmFD(FragmentFD): """ Download segments in a ISM manifest """ FD_NAME = 'ism' def real_download(self, filename, info_dict): segments = info_dict['fragments'][:1] if self.params.get( 'test', False) else info_dict['fragments'] ctx = { 'filename': filename, 'total_frags': len(segments), } self._prepare_and_start_frag_download(ctx) fragment_retries = self.params.get('fragment_retries', 0) skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True) track_written = False frag_index = 0 for i, segment in enumerate(segments): frag_index += 1 if frag_index <= ctx['fragment_index']: continue count = 0 while count <= fragment_retries: try: success, frag_content = self._download_fragment(ctx, segment['url'], info_dict) if not success: return False if not track_written: tfhd_data = extract_box_data(frag_content, [b'moof', b'traf', b'tfhd']) info_dict['_download_params']['track_id'] = u32.unpack(tfhd_data[4:8])[0] write_piff_header(ctx['dest_stream'], info_dict['_download_params']) track_written = True self._append_fragment(ctx, frag_content) break except compat_urllib_error.HTTPError as err: count += 1 if count <= fragment_retries: self.report_retry_fragment(err, frag_index, count, fragment_retries) if count > fragment_retries: if skip_unavailable_fragments: self.report_skip_fragment(frag_index) continue self.report_error('giving up after %s fragment retries' % fragment_retries) return False self._finish_frag_download(ctx) return True
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/downloader/__init__.py
youtube_dl/downloader/__init__.py
from __future__ import unicode_literals from ..utils import ( determine_protocol, ) def get_suitable_downloader(info_dict, params={}): info_dict['protocol'] = determine_protocol(info_dict) info_copy = info_dict.copy() return _get_suitable_downloader(info_copy, params) # Some of these require get_suitable_downloader from .common import FileDownloader from .dash import DashSegmentsFD from .f4m import F4mFD from .hls import HlsFD from .http import HttpFD from .rtmp import RtmpFD from .rtsp import RtspFD from .ism import IsmFD from .niconico import NiconicoDmcFD from .external import ( get_external_downloader, FFmpegFD, ) PROTOCOL_MAP = { 'rtmp': RtmpFD, 'm3u8_native': HlsFD, 'm3u8': FFmpegFD, 'mms': RtspFD, 'rtsp': RtspFD, 'f4m': F4mFD, 'http_dash_segments': DashSegmentsFD, 'ism': IsmFD, 'niconico_dmc': NiconicoDmcFD, } def _get_suitable_downloader(info_dict, params={}): """Get the downloader class that can handle the info dict.""" # if (info_dict.get('start_time') or info_dict.get('end_time')) and not info_dict.get('requested_formats') and FFmpegFD.can_download(info_dict): # return FFmpegFD external_downloader = params.get('external_downloader') if external_downloader is not None: ed = get_external_downloader(external_downloader) if ed.can_download(info_dict): return ed # Avoid using unwanted args since external_downloader was rejected if params.get('external_downloader_args'): params['external_downloader_args'] = None protocol = info_dict['protocol'] if protocol.startswith('m3u8') and info_dict.get('is_live'): return FFmpegFD if protocol == 'm3u8' and params.get('hls_prefer_native') is True: return HlsFD if protocol == 'm3u8_native' and params.get('hls_prefer_native') is False: return FFmpegFD return PROTOCOL_MAP.get(protocol, HttpFD) __all__ = [ 'get_suitable_downloader', 'FileDownloader', ]
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/downloader/rtmp.py
youtube_dl/downloader/rtmp.py
from __future__ import unicode_literals import os import re import subprocess import time from .common import FileDownloader from ..compat import compat_str from ..utils import ( check_executable, encodeFilename, encodeArgument, get_exe_version, ) def rtmpdump_version(): return get_exe_version( 'rtmpdump', ['--help'], r'(?i)RTMPDump\s*v?([0-9a-zA-Z._-]+)') class RtmpFD(FileDownloader): def real_download(self, filename, info_dict): def run_rtmpdump(args): start = time.time() resume_percent = None resume_downloaded_data_len = None proc = subprocess.Popen(args, stderr=subprocess.PIPE) cursor_in_new_line = True proc_stderr_closed = False try: while not proc_stderr_closed: # read line from stderr line = '' while True: char = proc.stderr.read(1) if not char: proc_stderr_closed = True break if char in [b'\r', b'\n']: break line += char.decode('ascii', 'replace') if not line: # proc_stderr_closed is True continue mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line) if mobj: downloaded_data_len = int(float(mobj.group(1)) * 1024) percent = float(mobj.group(2)) if not resume_percent: resume_percent = percent resume_downloaded_data_len = downloaded_data_len time_now = time.time() eta = self.calc_eta(start, time_now, 100 - resume_percent, percent - resume_percent) speed = self.calc_speed(start, time_now, downloaded_data_len - resume_downloaded_data_len) data_len = None if percent > 0: data_len = int(downloaded_data_len * 100 / percent) self._hook_progress({ 'status': 'downloading', 'downloaded_bytes': downloaded_data_len, 'total_bytes_estimate': data_len, 'tmpfilename': tmpfilename, 'filename': filename, 'eta': eta, 'elapsed': time_now - start, 'speed': speed, }) cursor_in_new_line = False else: # no percent for live streams mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line) if mobj: downloaded_data_len = int(float(mobj.group(1)) * 1024) time_now = time.time() speed = self.calc_speed(start, time_now, downloaded_data_len) self._hook_progress({ 'downloaded_bytes': downloaded_data_len, 'tmpfilename': tmpfilename, 'filename': filename, 'status': 'downloading', 'elapsed': time_now - start, 'speed': speed, }) cursor_in_new_line = False elif self.params.get('verbose', False): if not cursor_in_new_line: self.to_screen('') cursor_in_new_line = True self.to_screen('[rtmpdump] ' + line) if not cursor_in_new_line: self.to_screen('') return proc.wait() except BaseException: # Including KeyboardInterrupt proc.kill() proc.wait() raise url = info_dict['url'] player_url = info_dict.get('player_url') page_url = info_dict.get('page_url') app = info_dict.get('app') play_path = info_dict.get('play_path') tc_url = info_dict.get('tc_url') flash_version = info_dict.get('flash_version') live = info_dict.get('rtmp_live', False) conn = info_dict.get('rtmp_conn') protocol = info_dict.get('rtmp_protocol') real_time = info_dict.get('rtmp_real_time', False) no_resume = info_dict.get('no_resume', False) continue_dl = self.params.get('continuedl', True) self.report_destination(filename) tmpfilename = self.temp_name(filename) test = self.params.get('test', False) # Check for rtmpdump first if not check_executable('rtmpdump', ['-h']): self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.') return False # Download using rtmpdump. rtmpdump returns exit code 2 when # the connection was interrupted and resuming appears to be # possible. This is part of rtmpdump's normal usage, AFAIK. basic_args = [ 'rtmpdump', '--verbose', '-r', url, '-o', tmpfilename] if player_url is not None: basic_args += ['--swfVfy', player_url] if page_url is not None: basic_args += ['--pageUrl', page_url] if app is not None: basic_args += ['--app', app] if play_path is not None: basic_args += ['--playpath', play_path] if tc_url is not None: basic_args += ['--tcUrl', tc_url] if test: basic_args += ['--stop', '1'] if flash_version is not None: basic_args += ['--flashVer', flash_version] if live: basic_args += ['--live'] if isinstance(conn, list): for entry in conn: basic_args += ['--conn', entry] elif isinstance(conn, compat_str): basic_args += ['--conn', conn] if protocol is not None: basic_args += ['--protocol', protocol] if real_time: basic_args += ['--realtime'] args = basic_args if not no_resume and continue_dl and not live: args += ['--resume'] if not live and continue_dl: args += ['--skip', '1'] args = [encodeArgument(a) for a in args] self._debug_cmd(args, exe='rtmpdump') RD_SUCCESS = 0 RD_FAILED = 1 RD_INCOMPLETE = 2 RD_NO_CONNECT = 3 started = time.time() try: retval = run_rtmpdump(args) except KeyboardInterrupt: if not info_dict.get('is_live'): raise retval = RD_SUCCESS self.to_screen('\n[rtmpdump] Interrupted by user') if retval == RD_NO_CONNECT: self.report_error('[rtmpdump] Could not connect to RTMP server.') return False while retval in (RD_INCOMPLETE, RD_FAILED) and not test and not live: prevsize = os.path.getsize(encodeFilename(tmpfilename)) self.to_screen('[rtmpdump] Downloaded %s bytes' % prevsize) time.sleep(5.0) # This seems to be needed args = basic_args + ['--resume'] if retval == RD_FAILED: args += ['--skip', '1'] args = [encodeArgument(a) for a in args] retval = run_rtmpdump(args) cursize = os.path.getsize(encodeFilename(tmpfilename)) if prevsize == cursize and retval == RD_FAILED: break # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those if prevsize == cursize and retval == RD_INCOMPLETE and cursize > 1024: self.to_screen('[rtmpdump] Could not download the whole video. This can happen for some advertisements.') retval = RD_SUCCESS break if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE): fsize = os.path.getsize(encodeFilename(tmpfilename)) self.to_screen('[rtmpdump] Downloaded %s bytes' % fsize) self.try_rename(tmpfilename, filename) self._hook_progress({ 'downloaded_bytes': fsize, 'total_bytes': fsize, 'filename': filename, 'status': 'finished', 'elapsed': time.time() - started, }) return True else: self.to_stderr('\n') self.report_error('rtmpdump exited with code %d' % retval) return False
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_swfinterp.py
test/test_swfinterp.py
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest dirn = os.path.dirname sys.path.insert(0, dirn(dirn(os.path.abspath(__file__)))) import errno import json import re import subprocess from youtube_dl.swfinterp import SWFInterpreter from youtube_dl.compat import compat_open as open TEST_DIR = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'swftests') class TestSWFInterpreter(unittest.TestCase): pass def _make_testfunc(testfile): m = re.match(r'^(.*)\.(as)$', testfile) if not m: return test_id = m.group(1) def test_func(self): as_file = os.path.join(TEST_DIR, testfile) swf_file = os.path.join(TEST_DIR, test_id + '.swf') if ((not os.path.exists(swf_file)) or os.path.getmtime(swf_file) < os.path.getmtime(as_file)): # Recompile try: subprocess.check_call([ 'mxmlc', '-output', swf_file, '-static-link-runtime-shared-libraries', as_file]) except OSError as ose: if ose.errno == errno.ENOENT: self.skipTest('mxmlc not found!') return raise with open(swf_file, 'rb') as swf_f: swf_content = swf_f.read() swfi = SWFInterpreter(swf_content) with open(as_file, 'r', encoding='utf-8') as as_f: as_content = as_f.read() def _find_spec(key): m = re.search( r'(?m)^//\s*%s:\s*(.*?)\n' % re.escape(key), as_content) if not m: raise ValueError('Cannot find %s in %s' % (key, testfile)) return json.loads(m.group(1)) input_args = _find_spec('input') output = _find_spec('output') swf_class = swfi.extract_class(test_id) func = swfi.extract_function(swf_class, 'main') res = func(input_args) self.assertEqual(res, output) test_func.__name__ = str('test_swf_' + test_id) setattr(TestSWFInterpreter, test_func.__name__, test_func) for testfile in os.listdir(TEST_DIR): _make_testfunc(testfile) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/helper.py
test/helper.py
from __future__ import unicode_literals import errno import hashlib import json import os.path import re import ssl import sys import types import unittest import youtube_dl.extractor from youtube_dl import YoutubeDL from youtube_dl.compat import ( compat_open as open, compat_os_name, compat_str, ) from youtube_dl.utils import ( IDENTITY, preferredencoding, write_string, ) def get_params(override=None): PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json") LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "local_parameters.json") with open(PARAMETERS_FILE, encoding='utf-8') as pf: parameters = json.load(pf) if os.path.exists(LOCAL_PARAMETERS_FILE): with open(LOCAL_PARAMETERS_FILE, encoding='utf-8') as pf: parameters.update(json.load(pf)) if override: parameters.update(override) return parameters def try_rm(filename): """ Remove a file if it exists """ try: os.remove(filename) except OSError as ose: if ose.errno != errno.ENOENT: raise def report_warning(message): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if sys.stderr.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' output = '%s %s\n' % (_msg_header, message) if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3: output = output.encode(preferredencoding()) sys.stderr.write(output) class FakeYDL(YoutubeDL): def __init__(self, override=None): # Different instances of the downloader can't share the same dictionary # some test set the "sublang" parameter, which would break the md5 checks. params = get_params(override=override) super(FakeYDL, self).__init__(params, auto_init=False) self.result = [] def to_screen(self, s, skip_eol=None): print(s) def trouble(self, *args, **kwargs): s = args[0] if len(args) > 0 else kwargs.get('message', 'Missing message') raise Exception(s) def download(self, x): self.result.append(x) def expect_warning(self, regex): # Silence an expected warning matching a regex old_report_warning = self.report_warning def report_warning(self, message, *args, **kwargs): if re.match(regex, message): return old_report_warning(message, *args, **kwargs) self.report_warning = types.MethodType(report_warning, self) class FakeLogger(object): def debug(self, msg): pass def warning(self, msg): pass def error(self, msg): pass def gettestcases(include_onlymatching=False): for ie in youtube_dl.extractor.gen_extractors(): for tc in ie.get_testcases(include_onlymatching): yield tc md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest() def expect_value(self, got, expected, field): if isinstance(expected, compat_str) and expected.startswith('re:'): match_str = expected[len('re:'):] match_rex = re.compile(match_str) self.assertTrue( isinstance(got, compat_str), 'Expected a %s object, but got %s for field %s' % ( compat_str.__name__, type(got).__name__, field)) self.assertTrue( match_rex.match(got), 'field %s (value: %r) should match %r' % (field, got, match_str)) elif isinstance(expected, compat_str) and expected.startswith('startswith:'): start_str = expected[len('startswith:'):] self.assertTrue( isinstance(got, compat_str), 'Expected a %s object, but got %s for field %s' % ( compat_str.__name__, type(got).__name__, field)) self.assertTrue( got.startswith(start_str), 'field %s (value: %r) should start with %r' % (field, got, start_str)) elif isinstance(expected, compat_str) and expected.startswith('contains:'): contains_str = expected[len('contains:'):] self.assertTrue( isinstance(got, compat_str), 'Expected a %s object, but got %s for field %s' % ( compat_str.__name__, type(got).__name__, field)) self.assertTrue( contains_str in got, 'field %s (value: %r) should contain %r' % (field, got, contains_str)) elif isinstance(expected, compat_str) and re.match(r'lambda \w+:', expected): fn = eval(expected) suite = expected.split(':', 1)[1].strip() self.assertTrue( fn(got), 'Expected field %s to meet condition %s, but value %r failed ' % (field, suite, got)) elif isinstance(expected, type): self.assertTrue( isinstance(got, expected), 'Expected type %r for field %s, but got value %r of type %r' % (expected, field, got, type(got))) elif isinstance(expected, dict) and isinstance(got, dict): expect_dict(self, got, expected) elif isinstance(expected, list) and isinstance(got, list): self.assertEqual( len(expected), len(got), 'Expected a list of length %d, but got a list of length %d for field %s' % ( len(expected), len(got), field)) for index, (item_got, item_expected) in enumerate(zip(got, expected)): type_got = type(item_got) type_expected = type(item_expected) self.assertEqual( type_expected, type_got, 'Type mismatch for list item at index %d for field %s, expected %r, got %r' % ( index, field, type_expected, type_got)) expect_value(self, item_got, item_expected, field) else: if isinstance(expected, compat_str) and expected.startswith('md5:'): self.assertTrue( isinstance(got, compat_str), 'Expected field %s to be a unicode object, but got value %r of type %r' % (field, got, type(got))) got = 'md5:' + md5(got) elif isinstance(expected, compat_str) and re.match(r'^(?:min|max)?count:\d+', expected): self.assertTrue( isinstance(got, (list, dict)), 'Expected field %s to be a list or a dict, but it is of type %s' % ( field, type(got).__name__)) op, _, expected_num = expected.partition(':') expected_num = int(expected_num) if op == 'mincount': assert_func = self.assertGreaterEqual msg_tmpl = 'Expected %d items in field %s, but only got %d' elif op == 'maxcount': assert_func = self.assertLessEqual msg_tmpl = 'Expected maximum %d items in field %s, but got %d' elif op == 'count': assert_func = self.assertEqual msg_tmpl = 'Expected exactly %d items in field %s, but got %d' else: assert False assert_func( len(got), expected_num, msg_tmpl % (expected_num, field, len(got))) return self.assertEqual( expected, got, 'Invalid value for field %s, expected %r, got %r' % (field, expected, got)) def expect_dict(self, got_dict, expected_dict): for info_field, expected in expected_dict.items(): got = got_dict.get(info_field) expect_value(self, got, expected, info_field) def expect_info_dict(self, got_dict, expected_dict): expect_dict(self, got_dict, expected_dict) # Check for the presence of mandatory fields if got_dict.get('_type') not in ('playlist', 'multi_video'): for key in ('id', 'url', 'title', 'ext'): self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key) # Check for mandatory fields that are automatically set by YoutubeDL for key in ['webpage_url', 'extractor', 'extractor_key']: self.assertTrue(got_dict.get(key), 'Missing field: %s' % key) # Are checkable fields missing from the test case definition? test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value)) for key, value in got_dict.items() if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location', 'age_limit')) missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys()) if missing_keys: def _repr(v): if isinstance(v, compat_str): return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n') else: return repr(v) info_dict_str = '' if len(missing_keys) != len(expected_dict): info_dict_str += ''.join( ' %s: %s,\n' % (_repr(k), _repr(v)) for k, v in test_info_dict.items() if k not in missing_keys) if info_dict_str: info_dict_str += '\n' info_dict_str += ''.join( ' %s: %s,\n' % (_repr(k), _repr(test_info_dict[k])) for k in missing_keys) write_string( '\n\'info_dict\': {\n' + info_dict_str + '},\n', out=sys.stderr) self.assertFalse( missing_keys, 'Missing keys in test definition: %s' % ( ', '.join(sorted(missing_keys)))) def assertRegexpMatches(self, text, regexp, msg=None): if hasattr(self, 'assertRegexp'): return self.assertRegexp(text, regexp, msg) else: m = re.match(regexp, text) if not m: note = 'Regexp didn\'t match: %r not found' % (regexp) if len(text) < 1000: note += ' in %r' % text if msg is None: msg = note else: msg = note + ', ' + msg self.assertTrue(m, msg) def expect_warnings(ydl, warnings_re): real_warning = ydl.report_warning def _report_warning(self, w, *args, **kwargs): if not any(re.search(w_re, w) for w_re in warnings_re): real_warning(w) ydl.report_warning = types.MethodType(_report_warning, ydl) def http_server_port(httpd): if os.name == 'java' and isinstance(httpd.socket, ssl.SSLSocket): # In Jython SSLSocket is not a subclass of socket.socket sock = httpd.socket.sock else: sock = httpd.socket return sock.getsockname()[1] def expectedFailureIf(cond): return unittest.expectedFailure if cond else IDENTITY
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_youtube_misc.py
test/test_youtube_misc.py
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from youtube_dl.extractor import YoutubeIE class TestYoutubeMisc(unittest.TestCase): def test_youtube_extract(self): assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id) assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc') assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc') assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc') assertExtractId('https://www.youtube.com/watch_popup?v=BaW_jenozKc', 'BaW_jenozKc') assertExtractId('http://www.youtube.com/watch?v=BaW_jenozKcsharePLED17F32AD9753930', 'BaW_jenozKc') assertExtractId('BaW_jenozKc', 'BaW_jenozKc') if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_verbose_output.py
test/test_verbose_output.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import unittest import sys import os import subprocess sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) class TestVerboseOutput(unittest.TestCase): def test_private_info_arg(self): outp = subprocess.Popen( [ sys.executable, 'youtube_dl/__main__.py', '-v', '--username', 'johnsmith@gmail.com', '--password', 'secret', ], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) sout, serr = outp.communicate() self.assertTrue(b'--username' in serr) self.assertTrue(b'johnsmith' not in serr) self.assertTrue(b'--password' in serr) self.assertTrue(b'secret' not in serr) def test_private_info_shortarg(self): outp = subprocess.Popen( [ sys.executable, 'youtube_dl/__main__.py', '-v', '-u', 'johnsmith@gmail.com', '-p', 'secret', ], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) sout, serr = outp.communicate() self.assertTrue(b'-u' in serr) self.assertTrue(b'johnsmith' not in serr) self.assertTrue(b'-p' in serr) self.assertTrue(b'secret' not in serr) def test_private_info_eq(self): outp = subprocess.Popen( [ sys.executable, 'youtube_dl/__main__.py', '-v', '--username=johnsmith@gmail.com', '--password=secret', ], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) sout, serr = outp.communicate() self.assertTrue(b'--username' in serr) self.assertTrue(b'johnsmith' not in serr) self.assertTrue(b'--password' in serr) self.assertTrue(b'secret' not in serr) def test_private_info_shortarg_eq(self): outp = subprocess.Popen( [ sys.executable, 'youtube_dl/__main__.py', '-v', '-u=johnsmith@gmail.com', '-p=secret', ], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) sout, serr = outp.communicate() self.assertTrue(b'-u' in serr) self.assertTrue(b'johnsmith' not in serr) self.assertTrue(b'-p' in serr) self.assertTrue(b'secret' not in serr) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_postprocessors.py
test/test_postprocessors.py
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from youtube_dl.postprocessor import MetadataFromTitlePP class TestMetadataFromTitle(unittest.TestCase): def test_format_to_regex(self): pp = MetadataFromTitlePP(None, '%(title)s - %(artist)s') self.assertEqual(pp._titleregex, r'(?P<title>.+)\ \-\ (?P<artist>.+)')
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_YoutubeDL.py
test/test_YoutubeDL.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import copy import json from test.helper import ( FakeYDL, assertRegexpMatches, try_rm, ) from youtube_dl import YoutubeDL from youtube_dl.compat import ( compat_http_cookiejar_Cookie, compat_http_cookies_SimpleCookie, compat_kwargs, compat_open as open, compat_str, compat_urllib_error, ) from youtube_dl.extractor import YoutubeIE from youtube_dl.extractor.common import InfoExtractor from youtube_dl.postprocessor.common import PostProcessor from youtube_dl.utils import ( ExtractorError, match_filter_func, traverse_obj, ) TEST_URL = 'http://localhost/sample.mp4' class YDL(FakeYDL): def __init__(self, *args, **kwargs): super(YDL, self).__init__(*args, **kwargs) self.downloaded_info_dicts = [] self.msgs = [] def process_info(self, info_dict): self.downloaded_info_dicts.append(info_dict.copy()) def to_screen(self, msg): self.msgs.append(msg) def dl(self, *args, **kwargs): assert False, 'Downloader must not be invoked for test_YoutubeDL' def _make_result(formats, **kwargs): res = { 'formats': formats, 'id': 'testid', 'title': 'testttitle', 'extractor': 'testex', 'extractor_key': 'TestEx', 'webpage_url': 'http://example.com/watch?v=shenanigans', } res.update(**compat_kwargs(kwargs)) return res class TestFormatSelection(unittest.TestCase): def test_prefer_free_formats(self): # Same resolution => download webm ydl = YDL() ydl.params['prefer_free_formats'] = True formats = [ {'ext': 'webm', 'height': 460, 'url': TEST_URL}, {'ext': 'mp4', 'height': 460, 'url': TEST_URL}, ] info_dict = _make_result(formats) yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['ext'], 'webm') # Different resolution => download best quality (mp4) ydl = YDL() ydl.params['prefer_free_formats'] = True formats = [ {'ext': 'webm', 'height': 720, 'url': TEST_URL}, {'ext': 'mp4', 'height': 1080, 'url': TEST_URL}, ] info_dict['formats'] = formats yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['ext'], 'mp4') # No prefer_free_formats => prefer mp4 and flv for greater compatibility ydl = YDL() ydl.params['prefer_free_formats'] = False formats = [ {'ext': 'webm', 'height': 720, 'url': TEST_URL}, {'ext': 'mp4', 'height': 720, 'url': TEST_URL}, {'ext': 'flv', 'height': 720, 'url': TEST_URL}, ] info_dict['formats'] = formats yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['ext'], 'mp4') ydl = YDL() ydl.params['prefer_free_formats'] = False formats = [ {'ext': 'flv', 'height': 720, 'url': TEST_URL}, {'ext': 'webm', 'height': 720, 'url': TEST_URL}, ] info_dict['formats'] = formats yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['ext'], 'flv') def test_format_selection(self): formats = [ {'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL}, {'format_id': 'example-with-dashes', 'ext': 'webm', 'preference': 1, 'url': TEST_URL}, {'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': TEST_URL}, {'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': TEST_URL}, {'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl = YDL({'format': '20/47'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], '47') ydl = YDL({'format': '20/71/worst'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], '35') ydl = YDL() ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], '2') ydl = YDL({'format': 'webm/mp4'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], '47') ydl = YDL({'format': '3gp/40/mp4'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], '35') ydl = YDL({'format': 'example-with-dashes'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'example-with-dashes') def test_format_selection_audio(self): formats = [ {'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL}, {'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none', 'url': TEST_URL}, {'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none', 'url': TEST_URL}, {'format_id': 'vid', 'ext': 'mp4', 'preference': 4, 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'bestaudio'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'audio-high') ydl = YDL({'format': 'worstaudio'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'audio-low') formats = [ {'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL}, {'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2, 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'bestaudio/worstaudio/best'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'vid-high') def test_format_selection_audio_exts(self): formats = [ {'format_id': 'mp3-64', 'ext': 'mp3', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'}, {'format_id': 'ogg-64', 'ext': 'ogg', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'}, {'format_id': 'aac-64', 'ext': 'aac', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'}, {'format_id': 'mp3-32', 'ext': 'mp3', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'}, {'format_id': 'aac-32', 'ext': 'aac', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'best'}) ie = YoutubeIE(ydl) ie._sort_formats(info_dict['formats']) ydl.process_ie_result(copy.deepcopy(info_dict)) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'aac-64') ydl = YDL({'format': 'mp3'}) ie = YoutubeIE(ydl) ie._sort_formats(info_dict['formats']) ydl.process_ie_result(copy.deepcopy(info_dict)) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'mp3-64') ydl = YDL({'prefer_free_formats': True}) ie = YoutubeIE(ydl) ie._sort_formats(info_dict['formats']) ydl.process_ie_result(copy.deepcopy(info_dict)) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'ogg-64') def test_format_selection_video(self): formats = [ {'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': TEST_URL}, {'format_id': 'dash-video-high', 'ext': 'mp4', 'preference': 2, 'acodec': 'none', 'url': TEST_URL}, {'format_id': 'vid', 'ext': 'mp4', 'preference': 3, 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'bestvideo'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'dash-video-high') ydl = YDL({'format': 'worstvideo'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'dash-video-low') ydl = YDL({'format': 'bestvideo[format_id^=dash][format_id$=low]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'dash-video-low') formats = [ {'format_id': 'vid-vcodec-dot', 'ext': 'mp4', 'preference': 1, 'vcodec': 'avc1.123456', 'acodec': 'none', 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'bestvideo[vcodec=avc1.123456]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'vid-vcodec-dot') def test_format_selection_string_ops(self): formats = [ {'format_id': 'abc-cba', 'ext': 'mp4', 'url': TEST_URL}, {'format_id': 'zxc-cxz', 'ext': 'webm', 'url': TEST_URL}, ] info_dict = _make_result(formats) # equals (=) ydl = YDL({'format': '[format_id=abc-cba]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'abc-cba') # does not equal (!=) ydl = YDL({'format': '[format_id!=abc-cba]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'zxc-cxz') ydl = YDL({'format': '[format_id!=abc-cba][format_id!=zxc-cxz]'}) self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy()) # starts with (^=) ydl = YDL({'format': '[format_id^=abc]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'abc-cba') # does not start with (!^=) ydl = YDL({'format': '[format_id!^=abc]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'zxc-cxz') ydl = YDL({'format': '[format_id!^=abc][format_id!^=zxc]'}) self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy()) # ends with ($=) ydl = YDL({'format': '[format_id$=cba]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'abc-cba') # does not end with (!$=) ydl = YDL({'format': '[format_id!$=cba]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'zxc-cxz') ydl = YDL({'format': '[format_id!$=cba][format_id!$=cxz]'}) self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy()) # contains (*=) ydl = YDL({'format': '[format_id*=bc-cb]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'abc-cba') # does not contain (!*=) ydl = YDL({'format': '[format_id!*=bc-cb]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'zxc-cxz') ydl = YDL({'format': '[format_id!*=abc][format_id!*=zxc]'}) self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy()) ydl = YDL({'format': '[format_id!*=-]'}) self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy()) def test_youtube_format_selection(self): order = [ '38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '17', '36', '13', # Apple HTTP Live Streaming '96', '95', '94', '93', '92', '132', '151', # 3D '85', '84', '102', '83', '101', '82', '100', # Dash video '137', '248', '136', '247', '135', '246', '245', '244', '134', '243', '133', '242', '160', # Dash audio '141', '172', '140', '171', '139', ] def format_info(f_id): info = YoutubeIE._formats[f_id].copy() # XXX: In real cases InfoExtractor._parse_mpd_formats() fills up 'acodec' # and 'vcodec', while in tests such information is incomplete since # commit a6c2c24479e5f4827ceb06f64d855329c0a6f593 # test_YoutubeDL.test_youtube_format_selection is broken without # this fix if 'acodec' in info and 'vcodec' not in info: info['vcodec'] = 'none' elif 'vcodec' in info and 'acodec' not in info: info['acodec'] = 'none' info['format_id'] = f_id info['url'] = 'url:' + f_id return info formats_order = [format_info(f_id) for f_id in order] info_dict = _make_result(list(formats_order), extractor='youtube') ydl = YDL({'format': 'bestvideo+bestaudio'}) yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], '137+141') self.assertEqual(downloaded['ext'], 'mp4') info_dict = _make_result(list(formats_order), extractor='youtube') ydl = YDL({'format': 'bestvideo[height>=999999]+bestaudio/best'}) yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], '38') info_dict = _make_result(list(formats_order), extractor='youtube') ydl = YDL({'format': 'bestvideo/best,bestaudio'}) yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] self.assertEqual(downloaded_ids, ['137', '141']) info_dict = _make_result(list(formats_order), extractor='youtube') ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])+bestaudio'}) yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] self.assertEqual(downloaded_ids, ['137+141', '248+141']) info_dict = _make_result(list(formats_order), extractor='youtube') ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])[height<=720]+bestaudio'}) yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] self.assertEqual(downloaded_ids, ['136+141', '247+141']) info_dict = _make_result(list(formats_order), extractor='youtube') ydl = YDL({'format': '(bestvideo[ext=none]/bestvideo[ext=webm])+bestaudio'}) yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] self.assertEqual(downloaded_ids, ['248+141']) for f1, f2 in zip(formats_order, formats_order[1:]): info_dict = _make_result([f1, f2], extractor='youtube') ydl = YDL({'format': 'best/bestvideo'}) yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], f1['format_id']) info_dict = _make_result([f2, f1], extractor='youtube') ydl = YDL({'format': 'best/bestvideo'}) yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], f1['format_id']) def test_audio_only_extractor_format_selection(self): # For extractors with incomplete formats (all formats are audio-only or # video-only) best and worst should fallback to corresponding best/worst # video-only or audio-only formats (as per # https://github.com/ytdl-org/youtube-dl/pull/5556) formats = [ {'format_id': 'low', 'ext': 'mp3', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL}, {'format_id': 'high', 'ext': 'mp3', 'preference': 2, 'vcodec': 'none', 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'best'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'high') ydl = YDL({'format': 'worst'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'low') def test_format_not_available(self): formats = [ {'format_id': 'regular', 'ext': 'mp4', 'height': 360, 'url': TEST_URL}, {'format_id': 'video', 'ext': 'mp4', 'height': 720, 'acodec': 'none', 'url': TEST_URL}, ] info_dict = _make_result(formats) # This must fail since complete video-audio format does not match filter # and extractor does not provide incomplete only formats (i.e. only # video-only or audio-only). ydl = YDL({'format': 'best[height>360]'}) self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy()) def test_format_selection_issue_10083(self): # See https://github.com/ytdl-org/youtube-dl/issues/10083 formats = [ {'format_id': 'regular', 'height': 360, 'url': TEST_URL}, {'format_id': 'video', 'height': 720, 'acodec': 'none', 'url': TEST_URL}, {'format_id': 'audio', 'vcodec': 'none', 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'best[height>360]/bestvideo[height>360]+bestaudio'}) ydl.process_ie_result(info_dict.copy()) self.assertEqual(ydl.downloaded_info_dicts[0]['format_id'], 'video+audio') def test_invalid_format_specs(self): def assert_syntax_error(format_spec): ydl = YDL({'format': format_spec}) info_dict = _make_result([{'format_id': 'foo', 'url': TEST_URL}]) self.assertRaises(SyntaxError, ydl.process_ie_result, info_dict) assert_syntax_error('bestvideo,,best') assert_syntax_error('+bestaudio') assert_syntax_error('bestvideo+') assert_syntax_error('/') assert_syntax_error('bestvideo+bestvideo+bestaudio') def test_format_filtering(self): formats = [ {'format_id': 'A', 'filesize': 500, 'width': 1000}, {'format_id': 'B', 'filesize': 1000, 'width': 500}, {'format_id': 'C', 'filesize': 1000, 'width': 400}, {'format_id': 'D', 'filesize': 2000, 'width': 600}, {'format_id': 'E', 'filesize': 3000}, {'format_id': 'F'}, {'format_id': 'G', 'filesize': 1000000}, ] for f in formats: f['url'] = 'http://_/' f['ext'] = 'unknown' info_dict = _make_result(formats) ydl = YDL({'format': 'best[filesize<3000]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'D') ydl = YDL({'format': 'best[filesize<=3000]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'E') ydl = YDL({'format': 'best[filesize <= ? 3000]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'F') ydl = YDL({'format': 'best [filesize = 1000] [width>450]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'B') ydl = YDL({'format': 'best [filesize = 1000] [width!=450]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'C') ydl = YDL({'format': '[filesize>?1]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'G') ydl = YDL({'format': '[filesize<1M]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'E') ydl = YDL({'format': '[filesize<1MiB]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'G') ydl = YDL({'format': 'all[width>=400][width<=600]'}) ydl.process_ie_result(info_dict) downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] self.assertEqual(downloaded_ids, ['B', 'C', 'D']) ydl = YDL({'format': 'best[height<40]'}) try: ydl.process_ie_result(info_dict) except ExtractorError: pass self.assertEqual(ydl.downloaded_info_dicts, []) def test_default_format_spec(self): ydl = YDL({'simulate': True}) self.assertEqual(ydl._default_format_spec({}), 'bestvideo+bestaudio/best') ydl = YDL({}) self.assertEqual(ydl._default_format_spec({'is_live': True}), 'best/bestvideo+bestaudio') ydl = YDL({'simulate': True}) self.assertEqual(ydl._default_format_spec({'is_live': True}), 'bestvideo+bestaudio/best') ydl = YDL({'outtmpl': '-'}) self.assertEqual(ydl._default_format_spec({}), 'best/bestvideo+bestaudio') ydl = YDL({}) self.assertEqual(ydl._default_format_spec({}, download=False), 'bestvideo+bestaudio/best') self.assertEqual(ydl._default_format_spec({'is_live': True}), 'best/bestvideo+bestaudio') class TestYoutubeDL(unittest.TestCase): def test_subtitles(self): def s_formats(lang, autocaption=False): return [{ 'ext': ext, 'url': 'http://localhost/video.%s.%s' % (lang, ext), '_auto': autocaption, } for ext in ['vtt', 'srt', 'ass']] subtitles = dict((l, s_formats(l)) for l in ['en', 'fr', 'es']) auto_captions = dict((l, s_formats(l, True)) for l in ['it', 'pt', 'es']) info_dict = { 'id': 'test', 'title': 'Test', 'url': 'http://localhost/video.mp4', 'subtitles': subtitles, 'automatic_captions': auto_captions, 'extractor': 'TEST', } def get_info(params={}): params.setdefault('simulate', True) ydl = YDL(params) ydl.report_warning = lambda *args, **kargs: None return ydl.process_video_result(info_dict, download=False) result = get_info() self.assertFalse(result.get('requested_subtitles')) self.assertEqual(result['subtitles'], subtitles) self.assertEqual(result['automatic_captions'], auto_captions) result = get_info({'writesubtitles': True}) subs = result['requested_subtitles'] self.assertTrue(subs) self.assertEqual(set(subs.keys()), set(['en'])) self.assertTrue(subs['en'].get('data') is None) self.assertEqual(subs['en']['ext'], 'ass') result = get_info({'writesubtitles': True, 'subtitlesformat': 'foo/srt'}) subs = result['requested_subtitles'] self.assertEqual(subs['en']['ext'], 'srt') result = get_info({'writesubtitles': True, 'subtitleslangs': ['es', 'fr', 'it']}) subs = result['requested_subtitles'] self.assertTrue(subs) self.assertEqual(set(subs.keys()), set(['es', 'fr'])) result = get_info({'writesubtitles': True, 'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']}) subs = result['requested_subtitles'] self.assertTrue(subs) self.assertEqual(set(subs.keys()), set(['es', 'pt'])) self.assertFalse(subs['es']['_auto']) self.assertTrue(subs['pt']['_auto']) result = get_info({'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']}) subs = result['requested_subtitles'] self.assertTrue(subs) self.assertEqual(set(subs.keys()), set(['es', 'pt'])) self.assertTrue(subs['es']['_auto']) self.assertTrue(subs['pt']['_auto']) def test_add_extra_info(self): test_dict = { 'extractor': 'Foo', } extra_info = { 'extractor': 'Bar', 'playlist': 'funny videos', } YDL.add_extra_info(test_dict, extra_info) self.assertEqual(test_dict['extractor'], 'Foo') self.assertEqual(test_dict['playlist'], 'funny videos') def test_prepare_filename(self): info = { 'id': '1234', 'ext': 'mp4', 'width': None, 'height': 1080, 'title1': '$PATH', 'title2': '%PATH%', } def fname(templ, na_placeholder='NA'): params = {'outtmpl': templ} if na_placeholder != 'NA': params['outtmpl_na_placeholder'] = na_placeholder ydl = YoutubeDL(params) return ydl.prepare_filename(info) self.assertEqual(fname('%(id)s.%(ext)s'), '1234.mp4') self.assertEqual(fname('%(id)s-%(width)s.%(ext)s'), '1234-NA.mp4') NA_TEST_OUTTMPL = '%(uploader_date)s-%(width)d-%(id)s.%(ext)s' # Replace missing fields with 'NA' by default self.assertEqual(fname(NA_TEST_OUTTMPL), 'NA-NA-1234.mp4') # Or by provided placeholder self.assertEqual(fname(NA_TEST_OUTTMPL, na_placeholder='none'), 'none-none-1234.mp4') self.assertEqual(fname(NA_TEST_OUTTMPL, na_placeholder=''), '--1234.mp4') self.assertEqual(fname('%(height)d.%(ext)s'), '1080.mp4') self.assertEqual(fname('%(height)6d.%(ext)s'), ' 1080.mp4') self.assertEqual(fname('%(height)-6d.%(ext)s'), '1080 .mp4') self.assertEqual(fname('%(height)06d.%(ext)s'), '001080.mp4') self.assertEqual(fname('%(height) 06d.%(ext)s'), ' 01080.mp4') self.assertEqual(fname('%(height) 06d.%(ext)s'), ' 01080.mp4') self.assertEqual(fname('%(height)0 6d.%(ext)s'), ' 01080.mp4') self.assertEqual(fname('%(height)0 6d.%(ext)s'), ' 01080.mp4') self.assertEqual(fname('%(height) 0 6d.%(ext)s'), ' 01080.mp4') self.assertEqual(fname('%%'), '%') self.assertEqual(fname('%%%%'), '%%') self.assertEqual(fname('%%(height)06d.%(ext)s'), '%(height)06d.mp4') self.assertEqual(fname('%(width)06d.%(ext)s'), 'NA.mp4') self.assertEqual(fname('%(width)06d.%%(ext)s'), 'NA.%(ext)s') self.assertEqual(fname('%%(width)06d.%(ext)s'), '%(width)06d.mp4') self.assertEqual(fname('Hello %(title1)s'), 'Hello $PATH') self.assertEqual(fname('Hello %(title2)s'), 'Hello %PATH%') def test_format_note(self): ydl = YoutubeDL() self.assertEqual(ydl._format_note({}), '') assertRegexpMatches(self, ydl._format_note({ 'vbr': 10, }), r'^\s*10k$') assertRegexpMatches(self, ydl._format_note({ 'fps': 30, }), r'^30fps$') def test_postprocessors(self): filename = 'post-processor-testfile.mp4' audiofile = filename + '.mp3' class SimplePP(PostProcessor): def run(self, info): with open(audiofile, 'w') as f: f.write('EXAMPLE') return [info['filepath']], info def run_pp(params, PP): with open(filename, 'w') as f: f.write('EXAMPLE') ydl = YoutubeDL(params) ydl.add_post_processor(PP()) ydl.post_process(filename, {'filepath': filename}) run_pp({'keepvideo': True}, SimplePP) self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename) self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile) os.unlink(filename) os.unlink(audiofile) run_pp({'keepvideo': False}, SimplePP) self.assertFalse(os.path.exists(filename), '%s exists' % filename) self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile) os.unlink(audiofile) class ModifierPP(PostProcessor): def run(self, info): with open(info['filepath'], 'w') as f: f.write('MODIFIED') return [], info run_pp({'keepvideo': False}, ModifierPP) self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename) os.unlink(filename) def test_match_filter(self): class FilterYDL(YDL): def __init__(self, *args, **kwargs): super(FilterYDL, self).__init__(*args, **kwargs) self.params['simulate'] = True def process_info(self, info_dict): super(YDL, self).process_info(info_dict) def _match_entry(self, info_dict, incomplete): res = super(FilterYDL, self)._match_entry(info_dict, incomplete) if res is None: self.downloaded_info_dicts.append(info_dict) return res first = { 'id': '1', 'url': TEST_URL, 'title': 'one', 'extractor': 'TEST', 'duration': 30, 'filesize': 10 * 1024, 'playlist_id': '42', 'uploader': "變態妍字幕版 太妍 тест", 'creator': "тест ' 123 ' тест--", } second = { 'id': '2', 'url': TEST_URL, 'title': 'two', 'extractor': 'TEST', 'duration': 10, 'description': 'foo', 'filesize': 5 * 1024, 'playlist_id': '43', 'uploader': "тест 123", } videos = [first, second] def get_videos(filter_=None): ydl = FilterYDL({'match_filter': filter_}) for v in videos: ydl.process_ie_result(v, download=True) return [v['id'] for v in ydl.downloaded_info_dicts] res = get_videos() self.assertEqual(res, ['1', '2']) def f(v): if v['id'] == '1': return None else: return 'Video id is not 1' res = get_videos(f) self.assertEqual(res, ['1']) f = match_filter_func('duration < 30') res = get_videos(f) self.assertEqual(res, ['2']) f = match_filter_func('description = foo') res = get_videos(f) self.assertEqual(res, ['2']) f = match_filter_func('description =? foo') res = get_videos(f) self.assertEqual(res, ['1', '2'])
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_write_annotations.py
test/test_write_annotations.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import get_params, try_rm import xml.etree.ElementTree import youtube_dl.YoutubeDL import youtube_dl.extractor from youtube_dl.compat import compat_open as open class YoutubeDL(youtube_dl.YoutubeDL): def __init__(self, *args, **kwargs): super(YoutubeDL, self).__init__(*args, **kwargs) self.to_stderr = self.to_screen params = get_params({ 'writeannotations': True, 'skip_download': True, 'writeinfojson': False, 'format': 'flv', }) TEST_ID = 'gr51aVj-mLg' ANNOTATIONS_FILE = TEST_ID + '.annotations.xml' EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label'] class TestAnnotations(unittest.TestCase): def setUp(self): # Clear old files self.tearDown() def test_info_json(self): expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text. ie = youtube_dl.extractor.YoutubeIE() ydl = YoutubeDL(params) ydl.add_info_extractor(ie) ydl.download([TEST_ID]) self.assertTrue(os.path.exists(ANNOTATIONS_FILE)) annoxml = None with open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof: annoxml = xml.etree.ElementTree.parse(annof) self.assertTrue(annoxml is not None, 'Failed to parse annotations XML') root = annoxml.getroot() self.assertEqual(root.tag, 'document') annotationsTag = root.find('annotations') self.assertEqual(annotationsTag.tag, 'annotations') annotations = annotationsTag.findall('annotation') # Not all the annotations have TEXT children and the annotations are returned unsorted. for a in annotations: self.assertEqual(a.tag, 'annotation') if a.get('type') == 'text': textTag = a.find('TEXT') text = textTag.text self.assertTrue(text in expected) # assertIn only added in python 2.7 # remove the first occurrence, there could be more than one annotation with the same text expected.remove(text) # We should have seen (and removed) all the expected annotation texts. self.assertEqual(len(expected), 0, 'Not all expected annotations were found.') def tearDown(self): try_rm(ANNOTATIONS_FILE) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_youtube_lists.py
test/test_youtube_lists.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import FakeYDL from youtube_dl.extractor import ( YoutubeIE, YoutubePlaylistIE, YoutubeTabIE, ) class TestYoutubeLists(unittest.TestCase): def assertIsPlaylist(self, info): """Make sure the info has '_type' set to 'playlist'""" self.assertEqual(info['_type'], 'playlist') def test_youtube_playlist_noplaylist(self): dl = FakeYDL() dl.params['noplaylist'] = True dl.params['format'] = 'best' ie = YoutubePlaylistIE(dl) result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re') self.assertEqual(result['_type'], 'url') result = dl.extract_info(result['url'], download=False, ie_key=result.get('ie_key'), process=False) self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg') def test_youtube_mix(self): dl = FakeYDL() dl.params['format'] = 'best' ie = YoutubeTabIE(dl) result = dl.extract_info('https://www.youtube.com/watch?v=tyITL_exICo&list=RDCLAK5uy_kLWIr9gv1XLlPbaDS965-Db4TrBoUTxQ8', download=False, ie_key=ie.ie_key(), process=True) entries = (result or {}).get('entries', [{'id': 'not_found', }]) self.assertTrue(len(entries) >= 25) original_video = entries[0] self.assertEqual(original_video['id'], 'tyITL_exICo') def test_youtube_flat_playlist_extraction(self): dl = FakeYDL() dl.params['extract_flat'] = True ie = YoutubeTabIE(dl) result = ie.extract('https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc') self.assertIsPlaylist(result) entries = list(result['entries']) self.assertTrue(len(entries) == 1) video = entries[0] self.assertEqual(video['_type'], 'url') self.assertEqual(video['ie_key'], 'Youtube') self.assertEqual(video['id'], 'BaW_jenozKc') self.assertEqual(video['url'], 'BaW_jenozKc') self.assertEqual(video['title'], 'youtube-dl test video "\'/\\ä↭𝕐') self.assertEqual(video['duration'], 10) self.assertEqual(video['uploader'], 'Philipp Hagemeister') if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_iqiyi_sdk_interpreter.py
test/test_iqiyi_sdk_interpreter.py
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import FakeYDL from youtube_dl.extractor import IqiyiIE class IqiyiIEWithCredentials(IqiyiIE): def _get_login_info(self): return 'foo', 'bar' class WarningLogger(object): def __init__(self): self.messages = [] def warning(self, msg): self.messages.append(msg) def debug(self, msg): pass def error(self, msg): pass class TestIqiyiSDKInterpreter(unittest.TestCase): def test_iqiyi_sdk_interpreter(self): ''' Test the functionality of IqiyiSDKInterpreter by trying to log in If `sign` is incorrect, /validate call throws an HTTP 556 error ''' logger = WarningLogger() ie = IqiyiIEWithCredentials(FakeYDL({'logger': logger})) ie._login() self.assertTrue('unable to log in:' in logger.messages[0]) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_utils.py
test/test_utils.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # Various small unit tests import io import itertools import json import types import xml.etree.ElementTree from youtube_dl.utils import ( _UnsafeExtensionError, age_restricted, args_to_str, base_url, caesar, clean_html, clean_podcast_url, date_from_str, DateRange, detect_exe_version, determine_ext, encode_base_n, encode_compat_str, encodeFilename, escape_rfc3986, escape_url, expand_path, extract_attributes, ExtractorError, find_xpath_attr, fix_xml_ampersands, float_or_none, get_element_by_class, get_element_by_attribute, get_elements_by_class, get_elements_by_attribute, InAdvancePagedList, int_or_none, intlist_to_bytes, is_html, join_nonempty, js_to_json, LazyList, limit_length, lowercase_escape, merge_dicts, mimetype2ext, month_by_name, multipart_encode, ohdave_rsa_encrypt, OnDemandPagedList, orderedSet, parse_age_limit, parse_bitrate, parse_duration, parse_filesize, parse_codecs, parse_count, parse_iso8601, parse_resolution, parse_qs, partial_application, pkcs1pad, prepend_extension, read_batch_urls, remove_start, remove_end, remove_quotes, replace_extension, rot47, sanitize_filename, sanitize_path, sanitize_url, sanitized_Request, shell_quote, smuggle_url, str_to_int, strip_jsonp, strip_or_none, subtitles_filename, timeconvert, try_call, unescapeHTML, unified_strdate, unified_timestamp, unsmuggle_url, uppercase_escape, url_basename, url_or_none, urljoin, urlencode_postdata, urshift, update_url_query, variadic, version_tuple, xpath_with_ns, xpath_element, xpath_text, xpath_attr, render_table, match_str, parse_dfxp_time_expr, dfxp2srt, cli_option, cli_valueless_option, cli_bool_option, YoutubeDLHandler, ) from youtube_dl.compat import ( compat_chr, compat_etree_fromstring, compat_getenv, compat_os_name, compat_setenv, compat_str, compat_urlparse, ) class TestUtil(unittest.TestCase): def test_timeconvert(self): self.assertTrue(timeconvert('') is None) self.assertTrue(timeconvert('bougrg') is None) def test_sanitize_filename(self): self.assertEqual(sanitize_filename('abc'), 'abc') self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e') self.assertEqual(sanitize_filename('123'), '123') self.assertEqual('abc_de', sanitize_filename('abc/de')) self.assertFalse('/' in sanitize_filename('abc/de///')) self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de')) self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|')) self.assertEqual('yes no', sanitize_filename('yes? no')) self.assertEqual('this - that', sanitize_filename('this: that')) self.assertEqual(sanitize_filename('AT&T'), 'AT&T') aumlaut = 'ä' self.assertEqual(sanitize_filename(aumlaut), aumlaut) tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430' self.assertEqual(sanitize_filename(tests), tests) self.assertEqual( sanitize_filename('New World record at 0:12:34'), 'New World record at 0_12_34') self.assertEqual(sanitize_filename('--gasdgf'), '_-gasdgf') self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf') self.assertEqual(sanitize_filename('.gasdgf'), 'gasdgf') self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf') forbidden = '"\0\\/' for fc in forbidden: for fbc in forbidden: self.assertTrue(fbc not in sanitize_filename(fc)) def test_sanitize_filename_restricted(self): self.assertEqual(sanitize_filename('abc', restricted=True), 'abc') self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e') self.assertEqual(sanitize_filename('123', restricted=True), '123') self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True)) self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True)) self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True)) self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True)) self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True)) self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True)) tests = 'aäb\u4e2d\u56fd\u7684c' self.assertEqual(sanitize_filename(tests, restricted=True), 'aab_c') self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#' for fc in forbidden: for fbc in forbidden: self.assertTrue(fbc not in sanitize_filename(fc, restricted=True)) # Handle a common case more neatly self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song') self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech') # .. but make sure the file name is never empty self.assertTrue(sanitize_filename('-', restricted=True) != '') self.assertTrue(sanitize_filename(':', restricted=True) != '') self.assertEqual(sanitize_filename( 'ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', restricted=True), 'AAAAAAAECEEEEIIIIDNOOOOOOOOEUUUUUYTHssaaaaaaaeceeeeiiiionooooooooeuuuuuythy') def test_sanitize_ids(self): self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw') self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw') self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI') def test_sanitize_path(self): if sys.platform != 'win32': return self.assertEqual(sanitize_path('abc'), 'abc') self.assertEqual(sanitize_path('abc/def'), 'abc\\def') self.assertEqual(sanitize_path('abc\\def'), 'abc\\def') self.assertEqual(sanitize_path('abc|def'), 'abc#def') self.assertEqual(sanitize_path('<>:"|?*'), '#######') self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def') self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def') self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc') self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc') self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc') self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc') self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f') self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc') self.assertEqual( sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'), 'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s') self.assertEqual( sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'), 'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part') self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#') self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def') self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#') self.assertEqual(sanitize_path('../abc'), '..\\abc') self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc') self.assertEqual(sanitize_path('./abc'), 'abc') self.assertEqual(sanitize_path('./../abc'), '..\\abc') def test_sanitize_url(self): self.assertEqual(sanitize_url('//foo.bar'), 'http://foo.bar') self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar') self.assertEqual(sanitize_url('rmtps://foo.bar'), 'rtmps://foo.bar') self.assertEqual(sanitize_url('https://foo.bar'), 'https://foo.bar') self.assertEqual(sanitize_url('foo bar'), 'foo bar') def test_sanitized_Request(self): self.assertFalse(sanitized_Request('http://foo.bar').has_header('Authorization')) self.assertFalse(sanitized_Request('http://:foo.bar').has_header('Authorization')) self.assertEqual(sanitized_Request('http://@foo.bar').get_header('Authorization'), 'Basic Og==') self.assertEqual(sanitized_Request('http://:pass@foo.bar').get_header('Authorization'), 'Basic OnBhc3M=') self.assertEqual(sanitized_Request('http://user:@foo.bar').get_header('Authorization'), 'Basic dXNlcjo=') self.assertEqual(sanitized_Request('http://user:pass@foo.bar').get_header('Authorization'), 'Basic dXNlcjpwYXNz') def test_expand_path(self): def env(var): return '%{0}%'.format(var) if sys.platform == 'win32' else '${0}'.format(var) compat_setenv('YOUTUBE_DL_EXPATH_PATH', 'expanded') self.assertEqual(expand_path(env('YOUTUBE_DL_EXPATH_PATH')), 'expanded') self.assertEqual(expand_path(env('HOME')), compat_getenv('HOME')) self.assertEqual(expand_path('~'), compat_getenv('HOME')) self.assertEqual( expand_path('~/%s' % env('YOUTUBE_DL_EXPATH_PATH')), '%s/expanded' % compat_getenv('HOME')) _uncommon_extensions = [ ('exe', 'abc.exe.ext'), ('de', 'abc.de.ext'), ('../.mp4', None), ('..\\.mp4', None), ] def assertUnsafeExtension(self, ext=None): assert_raises = self.assertRaises(_UnsafeExtensionError) assert_raises.ext = ext orig_exit = assert_raises.__exit__ def my_exit(self_, exc_type, exc_val, exc_tb): did_raise = orig_exit(exc_type, exc_val, exc_tb) if did_raise and assert_raises.ext is not None: self.assertEqual(assert_raises.ext, assert_raises.exception.extension, 'Unsafe extension not as unexpected') return did_raise assert_raises.__exit__ = types.MethodType(my_exit, assert_raises) return assert_raises def test_prepend_extension(self): self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext') self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext') self.assertEqual(prepend_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp') self.assertEqual(prepend_extension('abc', 'temp'), 'abc.temp') self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp') self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext') # Test uncommon extensions self.assertEqual(prepend_extension('abc.ext', 'bin'), 'abc.bin.ext') for ext, result in self._uncommon_extensions: with self.assertUnsafeExtension(ext): prepend_extension('abc', ext) if result: self.assertEqual(prepend_extension('abc.ext', ext, 'ext'), result) else: with self.assertUnsafeExtension(ext): prepend_extension('abc.ext', ext, 'ext') with self.assertUnsafeExtension(ext): prepend_extension('abc.unexpected_ext', ext, 'ext') def test_replace_extension(self): self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp') self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp') self.assertEqual(replace_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp') self.assertEqual(replace_extension('abc', 'temp'), 'abc.temp') self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp') self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp') # Test uncommon extensions self.assertEqual(replace_extension('abc.ext', 'bin'), 'abc.unknown_video') for ext, _ in self._uncommon_extensions: with self.assertUnsafeExtension(ext): replace_extension('abc', ext) with self.assertUnsafeExtension(ext): replace_extension('abc.ext', ext, 'ext') with self.assertUnsafeExtension(ext): replace_extension('abc.unexpected_ext', ext, 'ext') def test_subtitles_filename(self): self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt'), 'abc.en.vtt') self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt', 'ext'), 'abc.en.vtt') self.assertEqual(subtitles_filename('abc.unexpected_ext', 'en', 'vtt', 'ext'), 'abc.unexpected_ext.en.vtt') def test_remove_start(self): self.assertEqual(remove_start(None, 'A - '), None) self.assertEqual(remove_start('A - B', 'A - '), 'B') self.assertEqual(remove_start('B - A', 'A - '), 'B - A') def test_remove_end(self): self.assertEqual(remove_end(None, ' - B'), None) self.assertEqual(remove_end('A - B', ' - B'), 'A') self.assertEqual(remove_end('B - A', ' - B'), 'B - A') def test_remove_quotes(self): self.assertEqual(remove_quotes(None), None) self.assertEqual(remove_quotes('"'), '"') self.assertEqual(remove_quotes("'"), "'") self.assertEqual(remove_quotes(';'), ';') self.assertEqual(remove_quotes('";'), '";') self.assertEqual(remove_quotes('""'), '') self.assertEqual(remove_quotes('";"'), ';') def test_ordered_set(self): self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7]) self.assertEqual(orderedSet([]), []) self.assertEqual(orderedSet([1]), [1]) # keep the list ordered self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1]) def test_unescape_html(self): self.assertEqual(unescapeHTML('%20;'), '%20;') self.assertEqual(unescapeHTML('&#x2F;'), '/') self.assertEqual(unescapeHTML('&#47;'), '/') self.assertEqual(unescapeHTML('&eacute;'), 'é') self.assertEqual(unescapeHTML('&#2013266066;'), '&#2013266066;') self.assertEqual(unescapeHTML('&a&quot;'), '&a"') # HTML5 entities self.assertEqual(unescapeHTML('&period;&apos;'), '.\'') def test_date_from_str(self): self.assertEqual(date_from_str('yesterday'), date_from_str('now-1day')) self.assertEqual(date_from_str('now+7day'), date_from_str('now+1week')) self.assertEqual(date_from_str('now+14day'), date_from_str('now+2week')) self.assertEqual(date_from_str('now+365day'), date_from_str('now+1year')) self.assertEqual(date_from_str('now+30day'), date_from_str('now+1month')) def test_daterange(self): _20century = DateRange("19000101", "20000101") self.assertFalse("17890714" in _20century) _ac = DateRange("00010101") self.assertTrue("19690721" in _ac) _firstmilenium = DateRange(end="10000101") self.assertTrue("07110427" in _firstmilenium) def test_unified_dates(self): self.assertEqual(unified_strdate('December 21, 2010'), '20101221') self.assertEqual(unified_strdate('8/7/2009'), '20090708') self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214') self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011') self.assertEqual(unified_strdate('1968 12 10'), '19681210') self.assertEqual(unified_strdate('1968-12-10'), '19681210') self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128') self.assertEqual( unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False), '20141126') self.assertEqual( unified_strdate('2/2/2015 6:47:40 PM', day_first=False), '20150202') self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214') self.assertEqual(unified_strdate('25-09-2014'), '20140925') self.assertEqual(unified_strdate('27.02.2016 17:30'), '20160227') self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None) self.assertEqual(unified_strdate('Feb 7, 2016 at 6:35 pm'), '20160207') self.assertEqual(unified_strdate('July 15th, 2013'), '20130715') self.assertEqual(unified_strdate('September 1st, 2013'), '20130901') self.assertEqual(unified_strdate('Sep 2nd, 2013'), '20130902') self.assertEqual(unified_strdate('November 3rd, 2019'), '20191103') self.assertEqual(unified_strdate('October 23rd, 2005'), '20051023') def test_unified_timestamps(self): self.assertEqual(unified_timestamp('December 21, 2010'), 1292889600) self.assertEqual(unified_timestamp('8/7/2009'), 1247011200) self.assertEqual(unified_timestamp('Dec 14, 2012'), 1355443200) self.assertEqual(unified_timestamp('2012/10/11 01:56:38 +0000'), 1349920598) self.assertEqual(unified_timestamp('1968 12 10'), -33436800) self.assertEqual(unified_timestamp('1968-12-10'), -33436800) self.assertEqual(unified_timestamp('28/01/2014 21:00:00 +0100'), 1390939200) self.assertEqual( unified_timestamp('11/26/2014 11:30:00 AM PST', day_first=False), 1417001400) self.assertEqual( unified_timestamp('2/2/2015 6:47:40 PM', day_first=False), 1422902860) self.assertEqual(unified_timestamp('Feb 14th 2016 5:45PM'), 1455471900) self.assertEqual(unified_timestamp('25-09-2014'), 1411603200) self.assertEqual(unified_timestamp('27.02.2016 17:30'), 1456594200) self.assertEqual(unified_timestamp('UNKNOWN DATE FORMAT'), None) self.assertEqual(unified_timestamp('May 16, 2016 11:15 PM'), 1463440500) self.assertEqual(unified_timestamp('Feb 7, 2016 at 6:35 pm'), 1454870100) self.assertEqual(unified_timestamp('2017-03-30T17:52:41Q'), 1490896361) self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540) self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140) self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363) self.assertEqual(unified_timestamp('December 31 1969 20:00:01 EDT'), 1) self.assertEqual(unified_timestamp('Wednesday 31 December 1969 18:01:26 MDT'), 86) self.assertEqual(unified_timestamp('12/31/1969 20:01:18 EDT', False), 78) def test_determine_ext(self): self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4') self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None) self.assertEqual(determine_ext('http://example.com/foo/bar.nonext/?download', None), None) self.assertEqual(determine_ext('http://example.com/foo/bar/mp4?download', None), None) self.assertEqual(determine_ext('http://example.com/foo/bar.m3u8//?download'), 'm3u8') self.assertEqual(determine_ext('foobar', None), None) def test_find_xpath_attr(self): testxml = '''<root> <node/> <node x="a"/> <node x="a" y="c" /> <node x="b" y="d" /> <node x="" /> </root>''' doc = compat_etree_fromstring(testxml) self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None) self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None) self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None) self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None) self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1]) self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1]) self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3]) self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2]) self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2]) self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3]) self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4]) def test_xpath_with_ns(self): testxml = '''<root xmlns:media="http://example.com/"> <media:song> <media:author>The Author</media:author> <url>http://server.com/download.mp3</url> </media:song> </root>''' doc = compat_etree_fromstring(testxml) find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'})) self.assertTrue(find('media:song') is not None) self.assertEqual(find('media:song/media:author').text, 'The Author') self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3') def test_xpath_element(self): doc = xml.etree.ElementTree.Element('root') div = xml.etree.ElementTree.SubElement(doc, 'div') p = xml.etree.ElementTree.SubElement(div, 'p') p.text = 'Foo' self.assertEqual(xpath_element(doc, 'div/p'), p) self.assertEqual(xpath_element(doc, ['div/p']), p) self.assertEqual(xpath_element(doc, ['div/bar', 'div/p']), p) self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default') self.assertEqual(xpath_element(doc, ['div/bar'], default='default'), 'default') self.assertTrue(xpath_element(doc, 'div/bar') is None) self.assertTrue(xpath_element(doc, ['div/bar']) is None) self.assertTrue(xpath_element(doc, ['div/bar'], 'div/baz') is None) self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True) self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar'], fatal=True) self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar', 'div/baz'], fatal=True) def test_xpath_text(self): testxml = '''<root> <div> <p>Foo</p> </div> </root>''' doc = compat_etree_fromstring(testxml) self.assertEqual(xpath_text(doc, 'div/p'), 'Foo') self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default') self.assertTrue(xpath_text(doc, 'div/bar') is None) self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True) def test_xpath_attr(self): testxml = '''<root> <div> <p x="a">Foo</p> </div> </root>''' doc = compat_etree_fromstring(testxml) self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a') self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None) self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None) self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default') self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default') self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True) self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True) def test_smuggle_url(self): data = {"ö": "ö", "abc": [3]} url = 'https://foo.bar/baz?x=y#a' smug_url = smuggle_url(url, data) unsmug_url, unsmug_data = unsmuggle_url(smug_url) self.assertEqual(url, unsmug_url) self.assertEqual(data, unsmug_data) res_url, res_data = unsmuggle_url(url) self.assertEqual(res_url, url) self.assertEqual(res_data, None) smug_url = smuggle_url(url, {'a': 'b'}) smug_smug_url = smuggle_url(smug_url, {'c': 'd'}) res_url, res_data = unsmuggle_url(smug_smug_url) self.assertEqual(res_url, url) self.assertEqual(res_data, {'a': 'b', 'c': 'd'}) def test_shell_quote(self): args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')] self.assertEqual( shell_quote(args), """ffmpeg -i 'ñ€ß'"'"'.mp4'""" if compat_os_name != 'nt' else '''ffmpeg -i "ñ€ß'.mp4"''') def test_float_or_none(self): self.assertEqual(float_or_none('42.42'), 42.42) self.assertEqual(float_or_none('42'), 42.0) self.assertEqual(float_or_none(''), None) self.assertEqual(float_or_none(None), None) self.assertEqual(float_or_none([]), None) self.assertEqual(float_or_none(set()), None) def test_int_or_none(self): self.assertEqual(int_or_none(42), 42) self.assertEqual(int_or_none('42'), 42) self.assertEqual(int_or_none(''), None) self.assertEqual(int_or_none(None), None) self.assertEqual(int_or_none([]), None) self.assertEqual(int_or_none(set()), None) self.assertEqual(int_or_none('42', base=8), 34) self.assertRaises(TypeError, int_or_none(42, base=8)) def test_str_to_int(self): self.assertEqual(str_to_int('123,456'), 123456) self.assertEqual(str_to_int('123.456'), 123456) self.assertEqual(str_to_int(523), 523) # Python 3 has no long if sys.version_info < (3, 0): eval('self.assertEqual(str_to_int(123456L), 123456)') self.assertEqual(str_to_int('noninteger'), None) self.assertEqual(str_to_int([]), None) def test_url_basename(self): self.assertEqual(url_basename('http://foo.de/'), '') self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz') self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz') self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz') self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz') self.assertEqual( url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'), 'trailer.mp4') def test_base_url(self): self.assertEqual(base_url('http://foo.de/'), 'http://foo.de/') self.assertEqual(base_url('http://foo.de/bar'), 'http://foo.de/') self.assertEqual(base_url('http://foo.de/bar/'), 'http://foo.de/bar/') self.assertEqual(base_url('http://foo.de/bar/baz'), 'http://foo.de/bar/') self.assertEqual(base_url('http://foo.de/bar/baz?x=z/x/c'), 'http://foo.de/bar/') def test_urljoin(self): self.assertEqual(urljoin('http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin(b'http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin(b'http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('//foo.de/', '/a/b/c.txt'), '//foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt') self.assertEqual(urljoin(None, 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin(None, '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt') self.assertEqual(urljoin('', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin(['foobar'], 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', None), None) self.assertEqual(urljoin('http://foo.de/', ''), None) self.assertEqual(urljoin('http://foo.de/', ['foobar']), None) self.assertEqual(urljoin('http://foo.de/a/b/c.txt', '.././../d.txt'), 'http://foo.de/d.txt') self.assertEqual(urljoin('http://foo.de/a/b/c.txt', 'rtmp://foo.de'), 'rtmp://foo.de') self.assertEqual(urljoin(None, 'rtmp://foo.de'), 'rtmp://foo.de') def test_url_or_none(self): self.assertEqual(url_or_none(None), None) self.assertEqual(url_or_none(''), None) self.assertEqual(url_or_none('foo'), None) self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de') self.assertEqual(url_or_none('https://foo.de'), 'https://foo.de') self.assertEqual(url_or_none('http$://foo.de'), None) self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de') self.assertEqual(url_or_none('//foo.de'), '//foo.de') self.assertEqual(url_or_none('s3://foo.de'), None) self.assertEqual(url_or_none('rtmpte://foo.de'), 'rtmpte://foo.de') self.assertEqual(url_or_none('mms://foo.de'), 'mms://foo.de') self.assertEqual(url_or_none('rtspu://foo.de'), 'rtspu://foo.de') self.assertEqual(url_or_none('ftps://foo.de'), 'ftps://foo.de') def test_parse_age_limit(self): self.assertEqual(parse_age_limit(None), None) self.assertEqual(parse_age_limit(False), None) self.assertEqual(parse_age_limit('invalid'), None) self.assertEqual(parse_age_limit(0), 0) self.assertEqual(parse_age_limit(18), 18) self.assertEqual(parse_age_limit(21), 21) self.assertEqual(parse_age_limit(22), None) self.assertEqual(parse_age_limit('18'), 18) self.assertEqual(parse_age_limit('18+'), 18) self.assertEqual(parse_age_limit('PG-13'), 13) self.assertEqual(parse_age_limit('TV-14'), 14) self.assertEqual(parse_age_limit('TV-MA'), 17) self.assertEqual(parse_age_limit('TV14'), 14) self.assertEqual(parse_age_limit('TV_G'), 0) def test_parse_duration(self): self.assertEqual(parse_duration(None), None) self.assertEqual(parse_duration(False), None) self.assertEqual(parse_duration('invalid'), None) self.assertEqual(parse_duration('1'), 1) self.assertEqual(parse_duration('1337:12'), 80232) self.assertEqual(parse_duration('9:12:43'), 33163) self.assertEqual(parse_duration('12:00'), 720) self.assertEqual(parse_duration('00:01:01'), 61) self.assertEqual(parse_duration('x:y'), None) self.assertEqual(parse_duration('3h11m53s'), 11513) self.assertEqual(parse_duration('3h 11m 53s'), 11513) self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513) self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513) self.assertEqual(parse_duration('3 hours, 11 minutes, 53 seconds'), 11513) self.assertEqual(parse_duration('3 hours, 11 mins, 53 secs'), 11513) self.assertEqual(parse_duration('62m45s'), 3765) self.assertEqual(parse_duration('6m59s'), 419) self.assertEqual(parse_duration('49s'), 49) self.assertEqual(parse_duration('0h0m0s'), 0) self.assertEqual(parse_duration('0m0s'), 0) self.assertEqual(parse_duration('0s'), 0) self.assertEqual(parse_duration('01:02:03.05'), 3723.05) self.assertEqual(parse_duration('T30M38S'), 1838) self.assertEqual(parse_duration('5 s'), 5) self.assertEqual(parse_duration('3 min'), 180) self.assertEqual(parse_duration('2.5 hours'), 9000) self.assertEqual(parse_duration('02:03:04'), 7384) self.assertEqual(parse_duration('01:02:03:04'), 93784) self.assertEqual(parse_duration('1 hour 3 minutes'), 3780) self.assertEqual(parse_duration('87 Min.'), 5220) self.assertEqual(parse_duration('PT1H0.040S'), 3600.04) self.assertEqual(parse_duration('PT00H03M30SZ'), 210) self.assertEqual(parse_duration('P0Y0M0DT0H4M20.880S'), 260.88) self.assertEqual(parse_duration('01:02:03:050'), 3723.05) self.assertEqual(parse_duration('103:050'), 103.05) self.assertEqual(parse_duration('1HR 3MIN'), 3780) self.assertEqual(parse_duration('2hrs 3mins'), 7380) def test_fix_xml_ampersands(self): self.assertEqual( fix_xml_ampersands('"&x=y&z=a'), '"&amp;x=y&amp;z=a') self.assertEqual( fix_xml_ampersands('"&amp;x=y&wrong;&z=a'),
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_http.py
test/test_http.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import contextlib import gzip import io import ssl import tempfile import threading import zlib # avoid deprecated alias assertRaisesRegexp if hasattr(unittest.TestCase, 'assertRaisesRegex'): unittest.TestCase.assertRaisesRegexp = unittest.TestCase.assertRaisesRegex try: import brotli except ImportError: brotli = None try: from urllib.request import pathname2url except ImportError: from urllib import pathname2url from youtube_dl.compat import ( compat_http_cookiejar_Cookie, compat_http_server, compat_str as str, compat_urllib_error, compat_urllib_HTTPError, compat_urllib_parse, compat_urllib_request, ) from youtube_dl.utils import ( sanitized_Request, update_Request, urlencode_postdata, ) from test.helper import ( expectedFailureIf, FakeYDL, FakeLogger, http_server_port, ) from youtube_dl import YoutubeDL TEST_DIR = os.path.dirname(os.path.abspath(__file__)) class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler): protocol_version = 'HTTP/1.1' # work-around old/new -style class inheritance def super(self, meth_name, *args, **kwargs): from types import MethodType try: super() fn = lambda s, m, *a, **k: getattr(super(), m)(*a, **k) except TypeError: fn = lambda s, m, *a, **k: getattr(compat_http_server.BaseHTTPRequestHandler, m)(s, *a, **k) self.super = MethodType(fn, self) return self.super(meth_name, *args, **kwargs) def log_message(self, format, *args): pass def _headers(self): payload = str(self.headers).encode('utf-8') self.send_response(200) self.send_header('Content-Type', 'application/json') self.send_header('Content-Length', str(len(payload))) self.end_headers() self.wfile.write(payload) def _redirect(self): self.send_response(int(self.path[len('/redirect_'):])) self.send_header('Location', '/method') self.send_header('Content-Length', '0') self.end_headers() def _method(self, method, payload=None): self.send_response(200) self.send_header('Content-Length', str(len(payload or ''))) self.send_header('Method', method) self.end_headers() if payload: self.wfile.write(payload) def _status(self, status): payload = '<html>{0} NOT FOUND</html>'.format(status).encode('utf-8') self.send_response(int(status)) self.send_header('Content-Type', 'text/html; charset=utf-8') self.send_header('Content-Length', str(len(payload))) self.end_headers() self.wfile.write(payload) def _read_data(self): if 'Content-Length' in self.headers: return self.rfile.read(int(self.headers['Content-Length'])) def _test_url(self, path, host='127.0.0.1', scheme='http', port=None): return '{0}://{1}:{2}/{3}'.format( scheme, host, port if port is not None else http_server_port(self.server), path) def do_POST(self): data = self._read_data() if self.path.startswith('/redirect_'): self._redirect() elif self.path.startswith('/method'): self._method('POST', data) elif self.path.startswith('/headers'): self._headers() else: self._status(404) def do_HEAD(self): if self.path.startswith('/redirect_'): self._redirect() elif self.path.startswith('/method'): self._method('HEAD') else: self._status(404) def do_PUT(self): data = self._read_data() if self.path.startswith('/redirect_'): self._redirect() elif self.path.startswith('/method'): self._method('PUT', data) else: self._status(404) def do_GET(self): def respond(payload=b'<html><video src="/vid.mp4" /></html>', payload_type='text/html; charset=utf-8', payload_encoding=None, resp_code=200): self.send_response(resp_code) self.send_header('Content-Type', payload_type) if payload_encoding: self.send_header('Content-Encoding', payload_encoding) self.send_header('Content-Length', str(len(payload))) # required for persistent connections self.end_headers() self.wfile.write(payload) def gzip_compress(p): buf = io.BytesIO() with contextlib.closing(gzip.GzipFile(fileobj=buf, mode='wb')) as f: f.write(p) return buf.getvalue() if self.path == '/video.html': respond() elif self.path == '/vid.mp4': respond(b'\x00\x00\x00\x00\x20\x66\x74[video]', 'video/mp4') elif self.path == '/302': if sys.version_info[0] == 3: # XXX: Python 3 http server does not allow non-ASCII header values self.send_response(404) self.end_headers() return new_url = self._test_url('中文.html') self.send_response(302) self.send_header(b'Location', new_url.encode('utf-8')) self.end_headers() elif self.path == '/%E4%B8%AD%E6%96%87.html': respond() elif self.path == '/%c7%9f': respond() elif self.path == '/redirect_dotsegments': self.send_response(301) # redirect to /headers but with dot segments before self.send_header('Location', '/a/b/./../../headers') self.send_header('Content-Length', '0') self.end_headers() elif self.path.startswith('/redirect_'): self._redirect() elif self.path.startswith('/method'): self._method('GET') elif self.path.startswith('/headers'): self._headers() elif self.path.startswith('/308-to-headers'): self.send_response(308) self.send_header('Location', '/headers') self.send_header('Content-Length', '0') self.end_headers() elif self.path == '/trailing_garbage': payload = b'<html><video src="/vid.mp4" /></html>' compressed = gzip_compress(payload) + b'trailing garbage' respond(compressed, payload_encoding='gzip') elif self.path == '/302-non-ascii-redirect': new_url = self._test_url('中文.html') # actually respond with permanent redirect self.send_response(301) self.send_header('Location', new_url) self.send_header('Content-Length', '0') self.end_headers() elif self.path == '/content-encoding': encodings = self.headers.get('ytdl-encoding', '') payload = b'<html><video src="/vid.mp4" /></html>' for encoding in filter(None, (e.strip() for e in encodings.split(','))): if encoding == 'br' and brotli: payload = brotli.compress(payload) elif encoding == 'gzip': payload = gzip_compress(payload) elif encoding == 'deflate': payload = zlib.compress(payload) elif encoding == 'unsupported': payload = b'raw' break else: self._status(415) return respond(payload, payload_encoding=encodings) else: self._status(404) def send_header(self, keyword, value): """ Forcibly allow HTTP server to send non percent-encoded non-ASCII characters in headers. This is against what is defined in RFC 3986: but we need to test that we support this since some sites incorrectly do this. """ if keyword.lower() == 'connection': return self.super('send_header', keyword, value) if not hasattr(self, '_headers_buffer'): self._headers_buffer = [] self._headers_buffer.append('{0}: {1}\r\n'.format(keyword, value).encode('utf-8')) def end_headers(self): if hasattr(self, '_headers_buffer'): self.wfile.write(b''.join(self._headers_buffer)) self._headers_buffer = [] self.super('end_headers') class TestHTTP(unittest.TestCase): # when does it make sense to check the SSL certificate? _check_cert = ( sys.version_info >= (3, 2) or (sys.version_info[0] == 2 and sys.version_info[1:] >= (7, 19))) def setUp(self): # HTTP server self.http_httpd = compat_http_server.HTTPServer( ('127.0.0.1', 0), HTTPTestRequestHandler) self.http_port = http_server_port(self.http_httpd) self.http_server_thread = threading.Thread(target=self.http_httpd.serve_forever) self.http_server_thread.daemon = True self.http_server_thread.start() try: from http.server import ThreadingHTTPServer except ImportError: try: from socketserver import ThreadingMixIn except ImportError: from SocketServer import ThreadingMixIn class ThreadingHTTPServer(ThreadingMixIn, compat_http_server.HTTPServer): pass # HTTPS server certfn = os.path.join(TEST_DIR, 'testcert.pem') self.https_httpd = ThreadingHTTPServer( ('127.0.0.1', 0), HTTPTestRequestHandler) try: sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslctx.verify_mode = ssl.CERT_NONE sslctx.check_hostname = False sslctx.load_cert_chain(certfn, None) self.https_httpd.socket = sslctx.wrap_socket( self.https_httpd.socket, server_side=True) except AttributeError: self.https_httpd.socket = ssl.wrap_socket( self.https_httpd.socket, certfile=certfn, server_side=True) self.https_port = http_server_port(self.https_httpd) self.https_server_thread = threading.Thread(target=self.https_httpd.serve_forever) self.https_server_thread.daemon = True self.https_server_thread.start() def tearDown(self): def closer(svr): def _closer(): svr.shutdown() svr.server_close() return _closer shutdown_thread = threading.Thread(target=closer(self.http_httpd)) shutdown_thread.start() self.http_server_thread.join(2.0) shutdown_thread = threading.Thread(target=closer(self.https_httpd)) shutdown_thread.start() self.https_server_thread.join(2.0) def _test_url(self, path, host='127.0.0.1', scheme='http', port=None): return '{0}://{1}:{2}/{3}'.format( scheme, host, port if port is not None else self.https_port if scheme == 'https' else self.http_port, path) @unittest.skipUnless(_check_cert, 'No support for certificate check in SSL') def test_nocheckcertificate(self): with FakeYDL({'logger': FakeLogger()}) as ydl: with self.assertRaises(compat_urllib_error.URLError): ydl.urlopen(sanitized_Request(self._test_url('headers', scheme='https'))) with FakeYDL({'logger': FakeLogger(), 'nocheckcertificate': True}) as ydl: r = ydl.urlopen(sanitized_Request(self._test_url('headers', scheme='https'))) self.assertEqual(r.getcode(), 200) r.close() def test_percent_encode(self): with FakeYDL() as ydl: # Unicode characters should be encoded with uppercase percent-encoding res = ydl.urlopen(sanitized_Request(self._test_url('中文.html'))) self.assertEqual(res.getcode(), 200) res.close() # don't normalize existing percent encodings res = ydl.urlopen(sanitized_Request(self._test_url('%c7%9f'))) self.assertEqual(res.getcode(), 200) res.close() def test_unicode_path_redirection(self): with FakeYDL() as ydl: r = ydl.urlopen(sanitized_Request(self._test_url('302-non-ascii-redirect'))) self.assertEqual(r.url, self._test_url('%E4%B8%AD%E6%96%87.html')) r.close() def test_redirect(self): with FakeYDL() as ydl: def do_req(redirect_status, method, check_no_content=False): data = b'testdata' if method in ('POST', 'PUT') else None res = ydl.urlopen(sanitized_Request( self._test_url('redirect_{0}'.format(redirect_status)), method=method, data=data)) if check_no_content: self.assertNotIn('Content-Type', res.headers) return res.read().decode('utf-8'), res.headers.get('method', '') # A 303 must either use GET or HEAD for subsequent request self.assertEqual(do_req(303, 'POST'), ('', 'GET')) self.assertEqual(do_req(303, 'HEAD'), ('', 'HEAD')) self.assertEqual(do_req(303, 'PUT'), ('', 'GET')) # 301 and 302 turn POST only into a GET, with no Content-Type self.assertEqual(do_req(301, 'POST', True), ('', 'GET')) self.assertEqual(do_req(301, 'HEAD'), ('', 'HEAD')) self.assertEqual(do_req(302, 'POST', True), ('', 'GET')) self.assertEqual(do_req(302, 'HEAD'), ('', 'HEAD')) self.assertEqual(do_req(301, 'PUT'), ('testdata', 'PUT')) self.assertEqual(do_req(302, 'PUT'), ('testdata', 'PUT')) # 307 and 308 should not change method for m in ('POST', 'PUT'): self.assertEqual(do_req(307, m), ('testdata', m)) self.assertEqual(do_req(308, m), ('testdata', m)) self.assertEqual(do_req(307, 'HEAD'), ('', 'HEAD')) self.assertEqual(do_req(308, 'HEAD'), ('', 'HEAD')) # These should not redirect and instead raise an HTTPError for code in (300, 304, 305, 306): with self.assertRaises(compat_urllib_HTTPError): do_req(code, 'GET') # Jython 2.7.1 times out for some reason @expectedFailureIf(sys.platform.startswith('java') and sys.version_info < (2, 7, 2)) def test_content_type(self): # https://github.com/yt-dlp/yt-dlp/commit/379a4f161d4ad3e40932dcf5aca6e6fb9715ab28 with FakeYDL({'nocheckcertificate': True}) as ydl: # method should be auto-detected as POST r = sanitized_Request(self._test_url('headers', scheme='https'), data=urlencode_postdata({'test': 'test'})) headers = ydl.urlopen(r).read().decode('utf-8') self.assertIn('Content-Type: application/x-www-form-urlencoded', headers) # test http r = sanitized_Request(self._test_url('headers'), data=urlencode_postdata({'test': 'test'})) headers = ydl.urlopen(r).read().decode('utf-8') self.assertIn('Content-Type: application/x-www-form-urlencoded', headers) def test_update_req(self): req = sanitized_Request('http://example.com') assert req.data is None assert req.get_method() == 'GET' assert not req.has_header('Content-Type') # Test that zero-byte payloads will be sent req = update_Request(req, data=b'') assert req.data == b'' assert req.get_method() == 'POST' # yt-dl expects data to be encoded and Content-Type to be added by sender # assert req.get_header('Content-Type') == 'application/x-www-form-urlencoded' def test_cookiejar(self): with FakeYDL() as ydl: ydl.cookiejar.set_cookie(compat_http_cookiejar_Cookie( 0, 'test', 'ytdl', None, False, '127.0.0.1', True, False, '/headers', True, False, None, False, None, None, {})) data = ydl.urlopen(sanitized_Request( self._test_url('headers'))).read().decode('utf-8') self.assertIn('Cookie: test=ytdl', data) def test_passed_cookie_header(self): # We should accept a Cookie header being passed as in normal headers and handle it appropriately. with FakeYDL() as ydl: # Specified Cookie header should be used res = ydl.urlopen(sanitized_Request( self._test_url('headers'), headers={'Cookie': 'test=test'})).read().decode('utf-8') self.assertIn('Cookie: test=test', res) # Specified Cookie header should be removed on any redirect res = ydl.urlopen(sanitized_Request( self._test_url('308-to-headers'), headers={'Cookie': 'test=test'})).read().decode('utf-8') self.assertNotIn('Cookie: test=test', res) # Specified Cookie header should override global cookiejar for that request ydl.cookiejar.set_cookie(compat_http_cookiejar_Cookie( 0, 'test', 'ytdlp', None, False, '127.0.0.1', True, False, '/headers', True, False, None, False, None, None, {})) data = ydl.urlopen(sanitized_Request( self._test_url('headers'), headers={'Cookie': 'test=test'})).read().decode('utf-8') self.assertNotIn('Cookie: test=ytdlp', data) self.assertIn('Cookie: test=test', data) def test_no_compression_compat_header(self): with FakeYDL() as ydl: data = ydl.urlopen( sanitized_Request( self._test_url('headers'), headers={'Youtubedl-no-compression': True})).read() self.assertIn(b'Accept-Encoding: identity', data) self.assertNotIn(b'youtubedl-no-compression', data.lower()) def test_gzip_trailing_garbage(self): # https://github.com/ytdl-org/youtube-dl/commit/aa3e950764337ef9800c936f4de89b31c00dfcf5 # https://github.com/ytdl-org/youtube-dl/commit/6f2ec15cee79d35dba065677cad9da7491ec6e6f with FakeYDL() as ydl: data = ydl.urlopen(sanitized_Request(self._test_url('trailing_garbage'))).read().decode('utf-8') self.assertEqual(data, '<html><video src="/vid.mp4" /></html>') def __test_compression(self, encoding): with FakeYDL() as ydl: res = ydl.urlopen( sanitized_Request( self._test_url('content-encoding'), headers={'ytdl-encoding': encoding})) # decoded encodings are removed: only check for valid decompressed data self.assertEqual(res.read(), b'<html><video src="/vid.mp4" /></html>') @unittest.skipUnless(brotli, 'brotli support is not installed') def test_brotli(self): self.__test_compression('br') def test_deflate(self): self.__test_compression('deflate') def test_gzip(self): self.__test_compression('gzip') def test_multiple_encodings(self): # https://www.rfc-editor.org/rfc/rfc9110.html#section-8.4 for pair in ('gzip,deflate', 'deflate, gzip', 'gzip, gzip', 'deflate, deflate'): self.__test_compression(pair) def test_unsupported_encoding(self): # it should return the raw content with FakeYDL() as ydl: res = ydl.urlopen( sanitized_Request( self._test_url('content-encoding'), headers={'ytdl-encoding': 'unsupported'})) self.assertEqual(res.headers.get('Content-Encoding'), 'unsupported') self.assertEqual(res.read(), b'raw') def test_remove_dot_segments(self): with FakeYDL() as ydl: res = ydl.urlopen(sanitized_Request(self._test_url('a/b/./../../headers'))) self.assertEqual(compat_urllib_parse.urlparse(res.geturl()).path, '/headers') res = ydl.urlopen(sanitized_Request(self._test_url('redirect_dotsegments'))) self.assertEqual(compat_urllib_parse.urlparse(res.geturl()).path, '/headers') def _build_proxy_handler(name): class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler): proxy_name = name def log_message(self, format, *args): pass def do_GET(self): self.send_response(200) self.send_header('Content-Type', 'text/plain; charset=utf-8') self.end_headers() self.wfile.write('{0}: {1}'.format(self.proxy_name, self.path).encode('utf-8')) return HTTPTestRequestHandler class TestProxy(unittest.TestCase): def setUp(self): self.proxy = compat_http_server.HTTPServer( ('127.0.0.1', 0), _build_proxy_handler('normal')) self.port = http_server_port(self.proxy) self.proxy_thread = threading.Thread(target=self.proxy.serve_forever) self.proxy_thread.daemon = True self.proxy_thread.start() self.geo_proxy = compat_http_server.HTTPServer( ('127.0.0.1', 0), _build_proxy_handler('geo')) self.geo_port = http_server_port(self.geo_proxy) self.geo_proxy_thread = threading.Thread(target=self.geo_proxy.serve_forever) self.geo_proxy_thread.daemon = True self.geo_proxy_thread.start() def tearDown(self): def closer(svr): def _closer(): svr.shutdown() svr.server_close() return _closer shutdown_thread = threading.Thread(target=closer(self.proxy)) shutdown_thread.start() self.proxy_thread.join(2.0) shutdown_thread = threading.Thread(target=closer(self.geo_proxy)) shutdown_thread.start() self.geo_proxy_thread.join(2.0) def _test_proxy(self, host='127.0.0.1', port=None): return '{0}:{1}'.format( host, port if port is not None else self.port) def test_proxy(self): geo_proxy = self._test_proxy(port=self.geo_port) ydl = YoutubeDL({ 'proxy': self._test_proxy(), 'geo_verification_proxy': geo_proxy, }) url = 'http://foo.com/bar' response = ydl.urlopen(url).read().decode('utf-8') self.assertEqual(response, 'normal: {0}'.format(url)) req = compat_urllib_request.Request(url) req.add_header('Ytdl-request-proxy', geo_proxy) response = ydl.urlopen(req).read().decode('utf-8') self.assertEqual(response, 'geo: {0}'.format(url)) def test_proxy_with_idn(self): ydl = YoutubeDL({ 'proxy': self._test_proxy(), }) url = 'http://中文.tw/' response = ydl.urlopen(url).read().decode('utf-8') # b'xn--fiq228c' is '中文'.encode('idna') self.assertEqual(response, 'normal: http://xn--fiq228c.tw/') class TestFileURL(unittest.TestCase): # See https://github.com/ytdl-org/youtube-dl/issues/8227 def test_file_urls(self): tf = tempfile.NamedTemporaryFile(delete=False) tf.write(b'foobar') tf.close() url = compat_urllib_parse.urljoin('file://', pathname2url(tf.name)) with FakeYDL() as ydl: self.assertRaisesRegexp( compat_urllib_error.URLError, 'file:// scheme is explicitly disabled in youtube-dl for security reasons', ydl.urlopen, url) # not yet implemented """ with FakeYDL({'enable_file_urls': True}) as ydl: res = ydl.urlopen(url) self.assertEqual(res.read(), b'foobar') res.close() """ os.unlink(tf.name) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_downloader_external.py
test/test_downloader_external.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import re import sys import subprocess import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import ( FakeLogger, FakeYDL, http_server_port, try_rm, ) from youtube_dl import YoutubeDL from youtube_dl.compat import ( compat_contextlib_suppress, compat_http_cookiejar_Cookie, compat_http_server, compat_kwargs, ) from youtube_dl.utils import ( encodeFilename, join_nonempty, ) from youtube_dl.downloader.external import ( Aria2cFD, Aria2pFD, AxelFD, CurlFD, FFmpegFD, HttpieFD, WgetFD, ) from youtube_dl.postprocessor import ( FFmpegPostProcessor, ) import threading TEST_SIZE = 10 * 1024 TEST_COOKIE = { 'version': 0, 'name': 'test', 'value': 'ytdlp', 'port': None, 'port_specified': False, 'domain': '.example.com', 'domain_specified': True, 'domain_initial_dot': False, 'path': '/', 'path_specified': True, 'secure': False, 'expires': None, 'discard': False, 'comment': None, 'comment_url': None, 'rest': {}, } TEST_COOKIE_VALUE = join_nonempty('name', 'value', delim='=', from_dict=TEST_COOKIE) TEST_INFO = {'url': 'http://www.example.com/'} def cookiejar_Cookie(**cookie_args): return compat_http_cookiejar_Cookie(**compat_kwargs(cookie_args)) def ifExternalFDAvailable(externalFD): return unittest.skipUnless(externalFD.available(), externalFD.get_basename() + ' not found') class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler): def log_message(self, format, *args): pass def send_content_range(self, total=None): range_header = self.headers.get('Range') start = end = None if range_header: mobj = re.match(r'bytes=(\d+)-(\d+)', range_header) if mobj: start, end = (int(mobj.group(i)) for i in (1, 2)) valid_range = start is not None and end is not None if valid_range: content_range = 'bytes %d-%d' % (start, end) if total: content_range += '/%d' % total self.send_header('Content-Range', content_range) return (end - start + 1) if valid_range else total def serve(self, range=True, content_length=True): self.send_response(200) self.send_header('Content-Type', 'video/mp4') size = TEST_SIZE if range: size = self.send_content_range(TEST_SIZE) if content_length: self.send_header('Content-Length', size) self.end_headers() self.wfile.write(b'#' * size) def do_GET(self): if self.path == '/regular': self.serve() elif self.path == '/no-content-length': self.serve(content_length=False) elif self.path == '/no-range': self.serve(range=False) elif self.path == '/no-range-no-content-length': self.serve(range=False, content_length=False) else: assert False, 'unrecognised server path' @ifExternalFDAvailable(Aria2pFD) class TestAria2pFD(unittest.TestCase): def setUp(self): self.httpd = compat_http_server.HTTPServer( ('127.0.0.1', 0), HTTPTestRequestHandler) self.port = http_server_port(self.httpd) self.server_thread = threading.Thread(target=self.httpd.serve_forever) self.server_thread.daemon = True self.server_thread.start() def download(self, params, ep): with subprocess.Popen( ['aria2c', '--enable-rpc'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL ) as process: if not process.poll(): filename = 'testfile.mp4' params['logger'] = FakeLogger() params['outtmpl'] = filename ydl = YoutubeDL(params) try_rm(encodeFilename(filename)) self.assertEqual(ydl.download(['http://127.0.0.1:%d/%s' % (self.port, ep)]), 0) self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE) try_rm(encodeFilename(filename)) process.kill() def download_all(self, params): for ep in ('regular', 'no-content-length', 'no-range', 'no-range-no-content-length'): self.download(params, ep) def test_regular(self): self.download_all({'external_downloader': 'aria2p'}) def test_chunked(self): self.download_all({ 'external_downloader': 'aria2p', 'http_chunk_size': 1000, }) @ifExternalFDAvailable(HttpieFD) class TestHttpieFD(unittest.TestCase): def test_make_cmd(self): with FakeYDL() as ydl: downloader = HttpieFD(ydl, {}) self.assertEqual( downloader._make_cmd('test', TEST_INFO), ['http', '--download', '--output', 'test', 'http://www.example.com/']) # Test cookie header is added ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE)) self.assertEqual( downloader._make_cmd('test', TEST_INFO), ['http', '--download', '--output', 'test', 'http://www.example.com/', 'Cookie:' + TEST_COOKIE_VALUE]) @ifExternalFDAvailable(AxelFD) class TestAxelFD(unittest.TestCase): def test_make_cmd(self): with FakeYDL() as ydl: downloader = AxelFD(ydl, {}) self.assertEqual( downloader._make_cmd('test', TEST_INFO), ['axel', '-o', 'test', '--', 'http://www.example.com/']) # Test cookie header is added ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE)) self.assertEqual( downloader._make_cmd('test', TEST_INFO), ['axel', '-o', 'test', '-H', 'Cookie: ' + TEST_COOKIE_VALUE, '--max-redirect=0', '--', 'http://www.example.com/']) @ifExternalFDAvailable(WgetFD) class TestWgetFD(unittest.TestCase): def test_make_cmd(self): with FakeYDL() as ydl: downloader = WgetFD(ydl, {}) self.assertNotIn('--load-cookies', downloader._make_cmd('test', TEST_INFO)) # Test cookiejar tempfile arg is added ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE)) self.assertIn('--load-cookies', downloader._make_cmd('test', TEST_INFO)) @ifExternalFDAvailable(CurlFD) class TestCurlFD(unittest.TestCase): def test_make_cmd(self): with FakeYDL() as ydl: downloader = CurlFD(ydl, {}) self.assertNotIn('--cookie', downloader._make_cmd('test', TEST_INFO)) # Test cookie header is added ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE)) self.assertIn('--cookie', downloader._make_cmd('test', TEST_INFO)) self.assertIn(TEST_COOKIE_VALUE, downloader._make_cmd('test', TEST_INFO)) @ifExternalFDAvailable(Aria2cFD) class TestAria2cFD(unittest.TestCase): def test_make_cmd(self): with FakeYDL() as ydl: downloader = Aria2cFD(ydl, {}) downloader._make_cmd('test', TEST_INFO) self.assertFalse(hasattr(downloader, '_cookies_tempfile')) # Test cookiejar tempfile arg is added ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE)) cmd = downloader._make_cmd('test', TEST_INFO) self.assertIn('--load-cookies=%s' % downloader._cookies_tempfile, cmd) # Handle delegated availability def ifFFmpegFDAvailable(externalFD): # raise SkipTest, or set False! avail = ifExternalFDAvailable(externalFD) and False with compat_contextlib_suppress(Exception): avail = FFmpegPostProcessor(downloader=None).available return unittest.skipUnless( avail, externalFD.get_basename() + ' not found') @ifFFmpegFDAvailable(FFmpegFD) class TestFFmpegFD(unittest.TestCase): _args = [] def _test_cmd(self, args): self._args = args def test_make_cmd(self): with FakeYDL() as ydl: downloader = FFmpegFD(ydl, {}) downloader._debug_cmd = self._test_cmd info_dict = TEST_INFO.copy() info_dict['ext'] = 'mp4' downloader._call_downloader('test', info_dict) self.assertEqual(self._args, [ 'ffmpeg', '-y', '-i', 'http://www.example.com/', '-c', 'copy', '-f', 'mp4', 'file:test']) # Test cookies arg is added ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE)) downloader._call_downloader('test', info_dict) self.assertEqual(self._args, [ 'ffmpeg', '-y', '-cookies', TEST_COOKIE_VALUE + '; path=/; domain=.example.com;\r\n', '-i', 'http://www.example.com/', '-c', 'copy', '-f', 'mp4', 'file:test']) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_subtitles.py
test/test_subtitles.py
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import FakeYDL, md5 from youtube_dl.extractor import ( YoutubeIE, DailymotionIE, TEDIE, VimeoIE, WallaIE, CeskaTelevizeIE, LyndaIE, NPOIE, ComedyCentralIE, NRKTVIE, RaiPlayIE, VikiIE, ThePlatformIE, ThePlatformFeedIE, RTVEALaCartaIE, DemocracynowIE, ) class BaseTestSubtitles(unittest.TestCase): url = None IE = None def setUp(self): self.DL = FakeYDL() self.ie = self.IE() self.DL.add_info_extractor(self.ie) if not self.IE.working(): print('Skipping: %s marked as not _WORKING' % self.IE.ie_key()) self.skipTest('IE marked as not _WORKING') def getInfoDict(self): info_dict = self.DL.extract_info(self.url, download=False) return info_dict def getSubtitles(self): info_dict = self.getInfoDict() subtitles = info_dict['requested_subtitles'] if not subtitles: return subtitles for sub_info in subtitles.values(): if sub_info.get('data') is None: uf = self.DL.urlopen(sub_info['url']) sub_info['data'] = uf.read().decode('utf-8') return dict((l, sub_info['data']) for l, sub_info in subtitles.items()) class TestYoutubeSubtitles(BaseTestSubtitles): # Available subtitles for QRS8MkLhQmM: # Language formats # ru vtt, ttml, srv3, srv2, srv1, json3 # fr vtt, ttml, srv3, srv2, srv1, json3 # en vtt, ttml, srv3, srv2, srv1, json3 # nl vtt, ttml, srv3, srv2, srv1, json3 # de vtt, ttml, srv3, srv2, srv1, json3 # ko vtt, ttml, srv3, srv2, srv1, json3 # it vtt, ttml, srv3, srv2, srv1, json3 # zh-Hant vtt, ttml, srv3, srv2, srv1, json3 # hi vtt, ttml, srv3, srv2, srv1, json3 # pt-BR vtt, ttml, srv3, srv2, srv1, json3 # es-MX vtt, ttml, srv3, srv2, srv1, json3 # ja vtt, ttml, srv3, srv2, srv1, json3 # pl vtt, ttml, srv3, srv2, srv1, json3 url = 'QRS8MkLhQmM' IE = YoutubeIE def test_youtube_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(len(subtitles.keys()), 13) self.assertEqual(md5(subtitles['en']), 'ae1bd34126571a77aabd4d276b28044d') self.assertEqual(md5(subtitles['it']), '0e0b667ba68411d88fd1c5f4f4eab2f9') for lang in ['fr', 'de']: self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang) def _test_subtitles_format(self, fmt, md5_hash, lang='en'): self.DL.params['writesubtitles'] = True self.DL.params['subtitlesformat'] = fmt subtitles = self.getSubtitles() self.assertEqual(md5(subtitles[lang]), md5_hash) def test_youtube_subtitles_ttml_format(self): self._test_subtitles_format('ttml', 'c97ddf1217390906fa9fbd34901f3da2') def test_youtube_subtitles_vtt_format(self): self._test_subtitles_format('vtt', 'ae1bd34126571a77aabd4d276b28044d') def test_youtube_subtitles_json3_format(self): self._test_subtitles_format('json3', '688dd1ce0981683867e7fe6fde2a224b') def _test_automatic_captions(self, url, lang): self.url = url self.DL.params['writeautomaticsub'] = True self.DL.params['subtitleslangs'] = [lang] subtitles = self.getSubtitles() self.assertTrue(subtitles[lang] is not None) def test_youtube_automatic_captions(self): # Available automatic captions for 8YoUxe5ncPo: # Language formats (all in vtt, ttml, srv3, srv2, srv1, json3) # gu, zh-Hans, zh-Hant, gd, ga, gl, lb, la, lo, tt, tr, # lv, lt, tk, th, tg, te, fil, haw, yi, ceb, yo, de, da, # el, eo, en, eu, et, es, ru, rw, ro, bn, be, bg, uk, jv, # bs, ja, or, xh, co, ca, cy, cs, ps, pt, pa, vi, pl, hy, # hr, ht, hu, hmn, hi, ha, mg, uz, ml, mn, mi, mk, ur, # mt, ms, mr, ug, ta, my, af, sw, is, am, # *it*, iw, sv, ar, # su, zu, az, id, ig, nl, no, ne, ny, fr, ku, fy, fa, fi, # ka, kk, sr, sq, ko, kn, km, st, sk, si, so, sn, sm, sl, # ky, sd # ... self._test_automatic_captions('8YoUxe5ncPo', 'it') @unittest.skip('ASR subs all in all supported langs now') def test_youtube_translated_subtitles(self): # This video has a subtitles track, which can be translated (#4555) self._test_automatic_captions('Ky9eprVWzlI', 'it') def test_youtube_nosubtitles(self): self.DL.expect_warning('video doesn\'t have subtitles') # Available automatic captions for 8YoUxe5ncPo: # ... # 8YoUxe5ncPo has no subtitles self.url = '8YoUxe5ncPo' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertFalse(subtitles) class TestDailymotionSubtitles(BaseTestSubtitles): url = 'http://www.dailymotion.com/video/xczg00' IE = DailymotionIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertTrue(len(subtitles.keys()) >= 6) self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f') self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792') for lang in ['es', 'fr', 'de']: self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang) def test_nosubtitles(self): self.DL.expect_warning('video doesn\'t have subtitles') self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertFalse(subtitles) @unittest.skip('IE broken') class TestTedSubtitles(BaseTestSubtitles): url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html' IE = TEDIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertTrue(len(subtitles.keys()) >= 28) self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14') self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5') for lang in ['es', 'fr', 'de']: self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang) class TestVimeoSubtitles(BaseTestSubtitles): url = 'http://vimeo.com/76979871' IE = VimeoIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr'])) self.assertEqual(md5(subtitles['en']), '386cbc9320b94e25cb364b97935e5dd1') self.assertEqual(md5(subtitles['fr']), 'c9b69eef35bc6641c0d4da8a04f9dfac') def test_nosubtitles(self): self.DL.expect_warning('video doesn\'t have subtitles') self.url = 'http://vimeo.com/68093876' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertFalse(subtitles) @unittest.skip('IE broken') class TestWallaSubtitles(BaseTestSubtitles): url = 'http://vod.walla.co.il/movie/2705958/the-yes-men' IE = WallaIE def test_allsubtitles(self): self.DL.expect_warning('Automatic Captions not supported by this server') self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['heb'])) self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920') def test_nosubtitles(self): self.DL.expect_warning('video doesn\'t have subtitles') self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertFalse(subtitles) @unittest.skip('IE broken') class TestCeskaTelevizeSubtitles(BaseTestSubtitles): url = 'http://www.ceskatelevize.cz/ivysilani/10600540290-u6-uzasny-svet-techniky' IE = CeskaTelevizeIE def test_allsubtitles(self): self.DL.expect_warning('Automatic Captions not supported by this server') self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['cs'])) self.assertTrue(len(subtitles['cs']) > 20000) def test_nosubtitles(self): self.DL.expect_warning('video doesn\'t have subtitles') self.url = 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertFalse(subtitles) @unittest.skip('IE broken') class TestLyndaSubtitles(BaseTestSubtitles): url = 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html' IE = LyndaIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(md5(subtitles['en']), '09bbe67222259bed60deaa26997d73a7') @unittest.skip('IE broken') class TestNPOSubtitles(BaseTestSubtitles): url = 'http://www.npo.nl/nos-journaal/28-08-2014/POW_00722860' IE = NPOIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['nl'])) self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4') @unittest.skip('IE broken') class TestMTVSubtitles(BaseTestSubtitles): url = 'http://www.cc.com/video-clips/p63lk0/adam-devine-s-house-party-chasing-white-swans' IE = ComedyCentralIE def getInfoDict(self): return super(TestMTVSubtitles, self).getInfoDict()['entries'][0] def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(md5(subtitles['en']), '78206b8d8a0cfa9da64dc026eea48961') class TestNRKSubtitles(BaseTestSubtitles): url = 'http://tv.nrk.no/serie/ikke-gjoer-dette-hjemme/DMPV73000411/sesong-2/episode-1' IE = NRKTVIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True self.DL.params['format'] = 'best/bestvideo' subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['nb-ttv'])) self.assertEqual(md5(subtitles['nb-ttv']), '67e06ff02d0deaf975e68f6cb8f6a149') class TestRaiPlaySubtitles(BaseTestSubtitles): IE = RaiPlayIE def test_subtitles_key(self): self.url = 'http://www.raiplay.it/video/2014/04/Report-del-07042014-cb27157f-9dd0-4aee-b788-b1f67643a391.html' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['it'])) self.assertEqual(md5(subtitles['it']), 'b1d90a98755126b61e667567a1f6680a') def test_subtitles_array_key(self): self.url = 'https://www.raiplay.it/video/2020/12/Report---04-01-2021-2e90f1de-8eee-4de4-ac0e-78d21db5b600.html' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['it'])) self.assertEqual(md5(subtitles['it']), '4b3264186fbb103508abe5311cfcb9cd') @unittest.skip('IE broken - DRM only') class TestVikiSubtitles(BaseTestSubtitles): url = 'http://www.viki.com/videos/1060846v-punch-episode-18' IE = VikiIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(md5(subtitles['en']), '53cb083a5914b2d84ef1ab67b880d18a') class TestThePlatformSubtitles(BaseTestSubtitles): # from http://www.3playmedia.com/services-features/tools/integrations/theplatform/ # (see http://theplatform.com/about/partners/type/subtitles-closed-captioning/) url = 'theplatform:JFUjUE1_ehvq' IE = ThePlatformIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b') @unittest.skip('IE broken') class TestThePlatformFeedSubtitles(BaseTestSubtitles): url = 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207' IE = ThePlatformFeedIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(md5(subtitles['en']), '48649a22e82b2da21c9a67a395eedade') class TestRtveSubtitles(BaseTestSubtitles): url = 'http://www.rtve.es/alacarta/videos/los-misterios-de-laura/misterios-laura-capitulo-32-misterio-del-numero-17-2-parte/2428621/' IE = RTVEALaCartaIE def test_allsubtitles(self): print('Skipping, only available from Spain') return self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['es'])) self.assertEqual(md5(subtitles['es']), '69e70cae2d40574fb7316f31d6eb7fca') class TestDemocracynowSubtitles(BaseTestSubtitles): url = 'http://www.democracynow.org/shows/2015/7/3' IE = DemocracynowIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(md5(subtitles['en']), 'a3cc4c0b5eadd74d9974f1c1f5101045') def test_subtitles_in_page(self): self.url = 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), set(['en'])) self.assertEqual(md5(subtitles['en']), 'a3cc4c0b5eadd74d9974f1c1f5101045') if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_unicode_literals.py
test/test_unicode_literals.py
from __future__ import unicode_literals # Allow direct execution import os import re import sys import unittest dirn = os.path.dirname rootDir = dirn(dirn(os.path.abspath(__file__))) sys.path.insert(0, rootDir) IGNORED_FILES = [ 'setup.py', # http://bugs.python.org/issue13943 'conf.py', 'buildserver.py', 'get-pip.py', ] IGNORED_DIRS = [ '.git', '.tox', ] from test.helper import assertRegexpMatches from youtube_dl.compat import compat_open as open class TestUnicodeLiterals(unittest.TestCase): def test_all_files(self): for dirpath, dirnames, filenames in os.walk(rootDir): for ignore_dir in IGNORED_DIRS: if ignore_dir in dirnames: # If we remove the directory from dirnames os.walk won't # recurse into it dirnames.remove(ignore_dir) for basename in filenames: if not basename.endswith('.py'): continue if basename in IGNORED_FILES: continue fn = os.path.join(dirpath, basename) with open(fn, encoding='utf-8') as inf: code = inf.read() if "'" not in code and '"' not in code: continue assertRegexpMatches( self, code, r'(?:(?:#.*?|\s*)\n)*from __future__ import (?:[a-z_]+,\s*)*unicode_literals', 'unicode_literals import missing in %s' % fn) m = re.search(r'(?<=\s)u[\'"](?!\)|,|$)', code) if m is not None: self.assertTrue( m is None, 'u present in %s, around %s' % ( fn, code[m.start() - 10:m.end() + 10])) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_options.py
test/test_options.py
# coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from youtube_dl.options import _hide_login_info class TestOptions(unittest.TestCase): def test_hide_login_info(self): self.assertEqual(_hide_login_info(['-u', 'foo', '-p', 'bar']), ['-u', 'PRIVATE', '-p', 'PRIVATE']) self.assertEqual(_hide_login_info(['-u']), ['-u']) self.assertEqual(_hide_login_info(['-u', 'foo', '-u', 'bar']), ['-u', 'PRIVATE', '-u', 'PRIVATE']) self.assertEqual(_hide_login_info(['--username=foo']), ['--username=PRIVATE']) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_youtube_signature.py
test/test_youtube_signature.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import re import string from youtube_dl.compat import ( compat_contextlib_suppress, compat_open as open, compat_str, compat_urlretrieve, ) from test.helper import FakeYDL from youtube_dl.extractor import YoutubeIE from youtube_dl.jsinterp import JSInterpreter _SIG_TESTS = [ ( 'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js', 86, '>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321', ), ( 'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js', 85, '3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@', ), ( 'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js', 90, ']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876', ), ( 'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl0Cbn9e.js', 84, 'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVW@YZ!"#$%&\'()*+,-./:;<=', ), ( 'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', '2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA', 'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2', ), ( 'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js', 84, '123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>', ), ( 'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js', 83, '123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F', ), ( 'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js', '4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288', '82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B', ), ( 'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', '312AA52209E3623129A412D56A40F11CB0AF14AE.3EE09501CB14E3BCDC3B2AE808BF3F1D14E7FBF12', '112AA5220913623229A412D56A40F11CB0AF14AE.3EE0950FCB14EEBCDC3B2AE808BF331D14E7FBF3', ), ( 'https://www.youtube.com/s/player/6ed0d907/player_ias.vflset/en_US/base.js', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', 'AOq0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xx8j7v1pDL2QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0', ), ( 'https://www.youtube.com/s/player/3bb1f723/player_ias.vflset/en_US/base.js', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', 'MyOSJXtKI3m-uME_jv7-pT12gOFC02RFkGoqWpzE0Cs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', ), ( 'https://www.youtube.com/s/player/2f1832d2/player_ias.vflset/en_US/base.js', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', '0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xxAj7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJ2OySqa0q', ), ( 'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', 'AAOAOq0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xx8j7vgpDL0QwbdV06sCIEzpWqMGkFR20CFOS21Tp-7vj_EMu-m37KtXJoOy1', ), ( 'https://www.youtube.com/s/player/363db69b/player_ias.vflset/en_US/base.js', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', '0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpz2ICs6EVdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', ), ( 'https://www.youtube.com/s/player/363db69b/player_ias_tce.vflset/en_US/base.js', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', '0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpz2ICs6EVdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', ), ( 'https://www.youtube.com/s/player/4fcd6e4a/player_ias.vflset/en_US/base.js', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', 'wAOAOq0QJ8ARAIgXmPlOPSBkkUs1bYFYlJCfe29xx8q7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0', ), ( 'https://www.youtube.com/s/player/4fcd6e4a/player_ias_tce.vflset/en_US/base.js', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', 'wAOAOq0QJ8ARAIgXmPlOPSBkkUs1bYFYlJCfe29xx8q7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0', ), ( 'https://www.youtube.com/s/player/20830619/player_ias.vflset/en_US/base.js', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', '7AOq0QJ8wRAIgXmPlOPSBkkAs1bYFYlJCfe29xx8jOv1pDL0Q2bdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0qaw', ), ( 'https://www.youtube.com/s/player/20830619/player_ias_tce.vflset/en_US/base.js', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', '7AOq0QJ8wRAIgXmPlOPSBkkAs1bYFYlJCfe29xx8jOv1pDL0Q2bdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0qaw', ), ( 'https://www.youtube.com/s/player/20830619/player-plasma-ias-phone-en_US.vflset/base.js', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', '7AOq0QJ8wRAIgXmPlOPSBkkAs1bYFYlJCfe29xx8jOv1pDL0Q2bdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0qaw', ), ( 'https://www.youtube.com/s/player/20830619/player-plasma-ias-tablet-en_US.vflset/base.js', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', '7AOq0QJ8wRAIgXmPlOPSBkkAs1bYFYlJCfe29xx8jOv1pDL0Q2bdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0qaw', ), ( 'https://www.youtube.com/s/player/8a8ac953/player_ias_tce.vflset/en_US/base.js', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', 'IAOAOq0QJ8wRAAgXmPlOPSBkkUs1bYFYlJCfe29xx8j7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_E2u-m37KtXJoOySqa0', ), ( 'https://www.youtube.com/s/player/8a8ac953/tv-player-es6.vflset/tv-player-es6.js', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', 'IAOAOq0QJ8wRAAgXmPlOPSBkkUs1bYFYlJCfe29xx8j7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_E2u-m37KtXJoOySqa0', ), ] _NSIG_TESTS = [ ( 'https://www.youtube.com/s/player/7862ca1f/player_ias.vflset/en_US/base.js', 'X_LCxVDjAavgE5t', 'yxJ1dM6iz5ogUg', ), ( 'https://www.youtube.com/s/player/9216d1f7/player_ias.vflset/en_US/base.js', 'SLp9F5bwjAdhE9F-', 'gWnb9IK2DJ8Q1w', ), ( 'https://www.youtube.com/s/player/f8cb7a3b/player_ias.vflset/en_US/base.js', 'oBo2h5euWy6osrUt', 'ivXHpm7qJjJN', ), ( 'https://www.youtube.com/s/player/2dfe380c/player_ias.vflset/en_US/base.js', 'oBo2h5euWy6osrUt', '3DIBbn3qdQ', ), ( 'https://www.youtube.com/s/player/f1ca6900/player_ias.vflset/en_US/base.js', 'cu3wyu6LQn2hse', 'jvxetvmlI9AN9Q', ), ( 'https://www.youtube.com/s/player/8040e515/player_ias.vflset/en_US/base.js', 'wvOFaY-yjgDuIEg5', 'HkfBFDHmgw4rsw', ), ( 'https://www.youtube.com/s/player/e06dea74/player_ias.vflset/en_US/base.js', 'AiuodmaDDYw8d3y4bf', 'ankd8eza2T6Qmw', ), ( 'https://www.youtube.com/s/player/5dd88d1d/player-plasma-ias-phone-en_US.vflset/base.js', 'kSxKFLeqzv_ZyHSAt', 'n8gS8oRlHOxPFA', ), ( 'https://www.youtube.com/s/player/324f67b9/player_ias.vflset/en_US/base.js', 'xdftNy7dh9QGnhW', '22qLGxrmX8F1rA', ), ( 'https://www.youtube.com/s/player/4c3f79c5/player_ias.vflset/en_US/base.js', 'TDCstCG66tEAO5pR9o', 'dbxNtZ14c-yWyw', ), ( 'https://www.youtube.com/s/player/c81bbb4a/player_ias.vflset/en_US/base.js', 'gre3EcLurNY2vqp94', 'Z9DfGxWP115WTg', ), ( 'https://www.youtube.com/s/player/1f7d5369/player_ias.vflset/en_US/base.js', 'batNX7sYqIJdkJ', 'IhOkL_zxbkOZBw', ), ( 'https://www.youtube.com/s/player/009f1d77/player_ias.vflset/en_US/base.js', '5dwFHw8aFWQUQtffRq', 'audescmLUzI3jw', ), ( 'https://www.youtube.com/s/player/dc0c6770/player_ias.vflset/en_US/base.js', '5EHDMgYLV6HPGk_Mu-kk', 'n9lUJLHbxUI0GQ', ), ( 'https://www.youtube.com/s/player/c2199353/player_ias.vflset/en_US/base.js', '5EHDMgYLV6HPGk_Mu-kk', 'AD5rgS85EkrE7', ), ( 'https://www.youtube.com/s/player/113ca41c/player_ias.vflset/en_US/base.js', 'cgYl-tlYkhjT7A', 'hI7BBr2zUgcmMg', ), ( 'https://www.youtube.com/s/player/c57c113c/player_ias.vflset/en_US/base.js', 'M92UUMHa8PdvPd3wyM', '3hPqLJsiNZx7yA', ), ( 'https://www.youtube.com/s/player/5a3b6271/player_ias.vflset/en_US/base.js', 'B2j7f_UPT4rfje85Lu_e', 'm5DmNymaGQ5RdQ', ), ( 'https://www.youtube.com/s/player/7a062b77/player_ias.vflset/en_US/base.js', 'NRcE3y3mVtm_cV-W', 'VbsCYUATvqlt5w', ), ( 'https://www.youtube.com/s/player/dac945fd/player_ias.vflset/en_US/base.js', 'o8BkRxXhuYsBCWi6RplPdP', '3Lx32v_hmzTm6A', ), ( 'https://www.youtube.com/s/player/6f20102c/player_ias.vflset/en_US/base.js', 'lE8DhoDmKqnmJJ', 'pJTTX6XyJP2BYw', ), ( 'https://www.youtube.com/s/player/cfa9e7cb/player_ias.vflset/en_US/base.js', 'aCi3iElgd2kq0bxVbQ', 'QX1y8jGb2IbZ0w', ), ( 'https://www.youtube.com/s/player/8c7583ff/player_ias.vflset/en_US/base.js', '1wWCVpRR96eAmMI87L', 'KSkWAVv1ZQxC3A', ), ( 'https://www.youtube.com/s/player/b7910ca8/player_ias.vflset/en_US/base.js', '_hXMCwMt9qE310D', 'LoZMgkkofRMCZQ', ), ( 'https://www.youtube.com/s/player/590f65a6/player_ias.vflset/en_US/base.js', '1tm7-g_A9zsI8_Lay_', 'xI4Vem4Put_rOg', ), ( 'https://www.youtube.com/s/player/b22ef6e7/player_ias.vflset/en_US/base.js', 'b6HcntHGkvBLk_FRf', 'kNPW6A7FyP2l8A', ), ( 'https://www.youtube.com/s/player/3400486c/player_ias.vflset/en_US/base.js', 'lL46g3XifCKUZn1Xfw', 'z767lhet6V2Skl', ), ( 'https://www.youtube.com/s/player/5604538d/player_ias.vflset/en_US/base.js', '7X-he4jjvMx7BCX', 'sViSydX8IHtdWA', ), ( 'https://www.youtube.com/s/player/20dfca59/player_ias.vflset/en_US/base.js', '-fLCxedkAk4LUTK2', 'O8kfRq1y1eyHGw', ), ( 'https://www.youtube.com/s/player/b12cc44b/player_ias.vflset/en_US/base.js', 'keLa5R2U00sR9SQK', 'N1OGyujjEwMnLw', ), ( 'https://www.youtube.com/s/player/3bb1f723/player_ias.vflset/en_US/base.js', 'gK15nzVyaXE9RsMP3z', 'ZFFWFLPWx9DEgQ', ), ( 'https://www.youtube.com/s/player/f8f53e1a/player_ias.vflset/en_US/base.js', 'VTQOUOv0mCIeJ7i8kZB', 'kcfD8wy0sNLyNQ', ), ( 'https://www.youtube.com/s/player/2f1832d2/player_ias.vflset/en_US/base.js', 'YWt1qdbe8SAfkoPHW5d', 'RrRjWQOJmBiP', ), ( 'https://www.youtube.com/s/player/9c6dfc4a/player_ias.vflset/en_US/base.js', 'jbu7ylIosQHyJyJV', 'uwI0ESiynAmhNg', ), ( 'https://www.youtube.com/s/player/f6e09c70/player_ias.vflset/en_US/base.js', 'W9HJZKktxuYoDTqW', 'jHbbkcaxm54', ), ( 'https://www.youtube.com/s/player/f6e09c70/player_ias_tce.vflset/en_US/base.js', 'W9HJZKktxuYoDTqW', 'jHbbkcaxm54', ), ( 'https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js', 'Sy4aDGc0VpYRR9ew_', '5UPOT1VhoZxNLQ', ), ( 'https://www.youtube.com/s/player/d50f54ef/player_ias_tce.vflset/en_US/base.js', 'Ha7507LzRmH3Utygtj', 'XFTb2HoeOE5MHg', ), ( 'https://www.youtube.com/s/player/074a8365/player_ias_tce.vflset/en_US/base.js', 'Ha7507LzRmH3Utygtj', 'ufTsrE0IVYrkl8v', ), ( 'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js', 'N5uAlLqm0eg1GyHO', 'dCBQOejdq5s-ww', ), ( 'https://www.youtube.com/s/player/69f581a5/tv-player-ias.vflset/tv-player-ias.js', '-qIP447rVlTTwaZjY', 'KNcGOksBAvwqQg', ), ( 'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js', 'ir9-V6cdbCiyKxhr', '2PL7ZDYAALMfmA', ), ( 'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js', 'ir9-V6cdbCiyKxhr', '2PL7ZDYAALMfmA', ), ( 'https://www.youtube.com/s/player/363db69b/player_ias.vflset/en_US/base.js', 'eWYu5d5YeY_4LyEDc', 'XJQqf-N7Xra3gg', ), ( 'https://www.youtube.com/s/player/4fcd6e4a/player_ias.vflset/en_US/base.js', 'o_L251jm8yhZkWtBW', 'lXoxI3XvToqn6A', ), ( 'https://www.youtube.com/s/player/4fcd6e4a/tv-player-ias.vflset/tv-player-ias.js', 'o_L251jm8yhZkWtBW', 'lXoxI3XvToqn6A', ), ( 'https://www.youtube.com/s/player/20830619/tv-player-ias.vflset/tv-player-ias.js', 'ir9-V6cdbCiyKxhr', '9YE85kNjZiS4', ), ( 'https://www.youtube.com/s/player/20830619/player-plasma-ias-phone-en_US.vflset/base.js', 'ir9-V6cdbCiyKxhr', '9YE85kNjZiS4', ), ( 'https://www.youtube.com/s/player/20830619/player-plasma-ias-tablet-en_US.vflset/base.js', 'ir9-V6cdbCiyKxhr', '9YE85kNjZiS4', ), ( 'https://www.youtube.com/s/player/8a8ac953/player_ias_tce.vflset/en_US/base.js', 'MiBYeXx_vRREbiCCmh', 'RtZYMVvmkE0JE', ), ( 'https://www.youtube.com/s/player/8a8ac953/tv-player-es6.vflset/tv-player-es6.js', 'MiBYeXx_vRREbiCCmh', 'RtZYMVvmkE0JE', ), ( 'https://www.youtube.com/s/player/aa3fc80b/player_ias.vflset/en_US/base.js', '0qY9dal2uzOnOGwa-48hha', 'VSh1KDfQMk-eag', ), ] class TestPlayerInfo(unittest.TestCase): def test_youtube_extract_player_info(self): PLAYER_URLS = ( ('https://www.youtube.com/s/player/4c3f79c5/player_ias.vflset/en_US/base.js', '4c3f79c5'), ('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/en_US/base.js', '64dddad9'), ('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/fr_FR/base.js', '64dddad9'), ('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'), ('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-de_DE.vflset/base.js', '64dddad9'), ('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-tablet-en_US.vflset/base.js', '64dddad9'), ('https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js', 'e7567ecf'), ('https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js', '643afba4'), # obsolete ('https://www.youtube.com/yts/jsbin/player_ias-vfle4-e03/en_US/base.js', 'vfle4-e03'), ('https://www.youtube.com/yts/jsbin/player_ias-vfl49f_g4/en_US/base.js', 'vfl49f_g4'), ('https://www.youtube.com/yts/jsbin/player_ias-vflCPQUIL/en_US/base.js', 'vflCPQUIL'), ('https://www.youtube.com/yts/jsbin/player-vflzQZbt7/en_US/base.js', 'vflzQZbt7'), ('https://www.youtube.com/yts/jsbin/player-en_US-vflaxXRn1/base.js', 'vflaxXRn1'), ('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', 'vflXGBaUN'), ('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'vflKjOTVq'), ) ie = YoutubeIE(FakeYDL({'cachedir': False})) for player_url, expected_player_id in PLAYER_URLS: player_id = ie._extract_player_info(player_url) self.assertEqual(player_id, expected_player_id) class TestSignature(unittest.TestCase): def setUp(self): TEST_DIR = os.path.dirname(os.path.abspath(__file__)) self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata/sigs') if not os.path.exists(self.TESTDATA_DIR): os.mkdir(self.TESTDATA_DIR) def tearDown(self): with compat_contextlib_suppress(OSError): for f in os.listdir(self.TESTDATA_DIR): os.remove(f) def t_factory(name, sig_func, url_pattern): def make_tfunc(url, sig_input, expected_sig): m = url_pattern.match(url) assert m, '{0!r} should follow URL format'.format(url) test_id = re.sub(r'[/.-]', '_', m.group('id') or m.group('compat_id')) def test_func(self): basename = 'player-{0}.js'.format(test_id) fn = os.path.join(self.TESTDATA_DIR, basename) if not os.path.exists(fn): compat_urlretrieve(url, fn) with open(fn, encoding='utf-8') as testf: jscode = testf.read() self.assertEqual(sig_func(jscode, sig_input), expected_sig) test_func.__name__ = str('test_{0}_js_{1}'.format(name, test_id)) setattr(TestSignature, test_func.__name__, test_func) return make_tfunc def signature(jscode, sig_input): func = YoutubeIE(FakeYDL({'cachedir': False}))._parse_sig_js(jscode) src_sig = ( compat_str(string.printable[:sig_input]) if isinstance(sig_input, int) else sig_input) return func(src_sig) def n_sig(jscode, sig_input): ie = YoutubeIE(FakeYDL({'cachedir': False})) jsi = JSInterpreter(jscode) jsi, _, func_code = ie._extract_n_function_code_jsi(sig_input, jsi) return ie._extract_n_function_from_code(jsi, func_code)(sig_input) make_sig_test = t_factory( 'signature', signature, re.compile(r'''(?x) .+/(?P<h5>html5)?player(?(h5)(?:-en_US)?-|/)(?P<id>[a-zA-Z0-9/._-]+) (?(h5)/(?:watch_as3|html5player))?\.js$ ''')) for test_spec in _SIG_TESTS: make_sig_test(*test_spec) make_nsig_test = t_factory( 'nsig', n_sig, re.compile(r'.+/player/(?P<id>[a-zA-Z0-9_/.-]+)\.js$')) for test_spec in _NSIG_TESTS: make_nsig_test(*test_spec) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_cache.py
test/test_cache.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import shutil from test.helper import FakeYDL from youtube_dl.cache import Cache from youtube_dl.utils import version_tuple from youtube_dl.version import __version__ def _is_empty(d): return not bool(os.listdir(d)) def _mkdir(d): if not os.path.exists(d): os.mkdir(d) class TestCache(unittest.TestCase): def setUp(self): TEST_DIR = os.path.dirname(os.path.abspath(__file__)) TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata') _mkdir(TESTDATA_DIR) self.test_dir = os.path.join(TESTDATA_DIR, 'cache_test') self.tearDown() def tearDown(self): if os.path.exists(self.test_dir): shutil.rmtree(self.test_dir) def test_cache(self): ydl = FakeYDL({ 'cachedir': self.test_dir, }) c = Cache(ydl) obj = {'x': 1, 'y': ['ä', '\\a', True]} self.assertEqual(c.load('test_cache', 'k.'), None) c.store('test_cache', 'k.', obj) self.assertEqual(c.load('test_cache', 'k2'), None) self.assertFalse(_is_empty(self.test_dir)) self.assertEqual(c.load('test_cache', 'k.'), obj) self.assertEqual(c.load('test_cache', 'y'), None) self.assertEqual(c.load('test_cache2', 'k.'), None) c.remove() self.assertFalse(os.path.exists(self.test_dir)) self.assertEqual(c.load('test_cache', 'k.'), None) def test_cache_validation(self): ydl = FakeYDL({ 'cachedir': self.test_dir, }) c = Cache(ydl) obj = {'x': 1, 'y': ['ä', '\\a', True]} c.store('test_cache', 'k.', obj) self.assertEqual(c.load('test_cache', 'k.', min_ver='1970.01.01'), obj) new_version = '.'.join(('%0.2d' % ((v + 1) if i == 0 else v, )) for i, v in enumerate(version_tuple(__version__))) self.assertIs(c.load('test_cache', 'k.', min_ver=new_version), None) def test_cache_clear(self): ydl = FakeYDL({ 'cachedir': self.test_dir, }) c = Cache(ydl) c.store('test_cache', 'k.', 'kay') c.store('test_cache', 'l.', 'ell') self.assertEqual(c.load('test_cache', 'k.'), 'kay') c.clear('test_cache', 'k.') self.assertEqual(c.load('test_cache', 'k.'), None) self.assertEqual(c.load('test_cache', 'l.'), 'ell') if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_aes.py
test/test_aes.py
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from youtube_dl.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_cbc_encrypt, aes_decrypt_text, aes_ecb_encrypt from youtube_dl.utils import bytes_to_intlist, intlist_to_bytes import base64 # the encrypted data can be generate with 'devscripts/generate_aes_testdata.py' class TestAES(unittest.TestCase): def setUp(self): self.key = self.iv = [0x20, 0x15] + 14 * [0] self.secret_msg = b'Secret message goes here' def test_encrypt(self): msg = b'message' key = list(range(16)) encrypted = aes_encrypt(bytes_to_intlist(msg), key) decrypted = intlist_to_bytes(aes_decrypt(encrypted, key)) self.assertEqual(decrypted, msg) def test_cbc_decrypt(self): data = bytes_to_intlist( b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd" ) decrypted = intlist_to_bytes(aes_cbc_decrypt(data, self.key, self.iv)) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) def test_cbc_encrypt(self): data = bytes_to_intlist(self.secret_msg) encrypted = intlist_to_bytes(aes_cbc_encrypt(data, self.key, self.iv)) self.assertEqual( encrypted, b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd") def test_decrypt_text(self): password = intlist_to_bytes(self.key).decode('utf-8') encrypted = base64.b64encode( intlist_to_bytes(self.iv[:8]) + b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae' ).decode('utf-8') decrypted = (aes_decrypt_text(encrypted, password, 16)) self.assertEqual(decrypted, self.secret_msg) password = intlist_to_bytes(self.key).decode('utf-8') encrypted = base64.b64encode( intlist_to_bytes(self.iv[:8]) + b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83' ).decode('utf-8') decrypted = (aes_decrypt_text(encrypted, password, 32)) self.assertEqual(decrypted, self.secret_msg) def test_ecb_encrypt(self): data = bytes_to_intlist(self.secret_msg) encrypted = intlist_to_bytes(aes_ecb_encrypt(data, self.key)) self.assertEqual( encrypted, b'\xaa\x86]\x81\x97>\x02\x92\x9d\x1bR[[L/u\xd3&\xd1(h\xde{\x81\x94\xba\x02\xae\xbd\xa6\xd0:') if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_download.py
test/test_download.py
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import ( expect_warnings, get_params, gettestcases, expect_info_dict, try_rm, report_warning, ) import hashlib import json import socket import youtube_dl.YoutubeDL from youtube_dl.compat import ( compat_http_client, compat_HTTPError, compat_open as open, compat_urllib_error, ) from youtube_dl.utils import ( DownloadError, ExtractorError, error_to_compat_str, format_bytes, IDENTITY, preferredencoding, UnavailableVideoError, ) from youtube_dl.extractor import get_info_extractor RETRIES = 3 # Some unittest APIs require actual str if not isinstance('TEST', str): _encode_str = lambda s: s.encode(preferredencoding()) else: _encode_str = IDENTITY class YoutubeDL(youtube_dl.YoutubeDL): def __init__(self, *args, **kwargs): self.to_stderr = self.to_screen self.processed_info_dicts = [] super(YoutubeDL, self).__init__(*args, **kwargs) def report_warning(self, message): # Don't accept warnings during tests raise ExtractorError(message) def process_info(self, info_dict): self.processed_info_dicts.append(info_dict) return super(YoutubeDL, self).process_info(info_dict) def _file_md5(fn): with open(fn, 'rb') as f: return hashlib.md5(f.read()).hexdigest() defs = gettestcases() class TestDownload(unittest.TestCase): # Parallel testing in nosetests. See # http://nose.readthedocs.org/en/latest/doc_tests/test_multiprocess/multiprocess.html _multiprocess_shared_ = True maxDiff = None def __str__(self): """Identify each test with the `add_ie` attribute, if available.""" def strclass(cls): """From 2.7's unittest; 2.6 had _strclass so we can't import it.""" return '%s.%s' % (cls.__module__, cls.__name__) add_ie = getattr(self, self._testMethodName).add_ie return '%s (%s)%s:' % (self._testMethodName, strclass(self.__class__), ' [%s]' % add_ie if add_ie else '') def setUp(self): self.defs = defs # Dynamically generate tests def generator(test_case, tname): def test_template(self): ie = youtube_dl.extractor.get_info_extractor(test_case['name'])() other_ies = [get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', [])] is_playlist = any(k.startswith('playlist') for k in test_case) test_cases = test_case.get( 'playlist', [] if is_playlist else [test_case]) def print_skipping(reason): print('Skipping %s: %s' % (test_case['name'], reason)) self.skipTest(_encode_str(reason)) if not ie.working(): print_skipping('IE marked as not _WORKING') for tc in test_cases: info_dict = tc.get('info_dict', {}) if not (info_dict.get('id') and info_dict.get('ext')): raise Exception('Test definition (%s) requires both \'id\' and \'ext\' keys present to define the output file' % (tname, )) if 'skip' in test_case: print_skipping(test_case['skip']) for other_ie in other_ies: if not other_ie.working(): print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key()) params = get_params(test_case.get('params', {})) params['outtmpl'] = tname + '_' + params['outtmpl'] if is_playlist and 'playlist' not in test_case: params.setdefault('extract_flat', 'in_playlist') params.setdefault('playlistend', test_case['playlist_maxcount'] + 1 if test_case.get('playlist_maxcount') else test_case.get('playlist_mincount')) params.setdefault('skip_download', True) ydl = YoutubeDL(params, auto_init=False) ydl.add_default_info_extractors() finished_hook_called = set() def _hook(status): if status['status'] == 'finished': finished_hook_called.add(status['filename']) ydl.add_progress_hook(_hook) expect_warnings(ydl, test_case.get('expected_warnings', [])) def get_tc_filename(tc): return ydl.prepare_filename(tc.get('info_dict', {})) res_dict = None def try_rm_tcs_files(tcs=None): if tcs is None: tcs = test_cases for tc in tcs: tc_filename = get_tc_filename(tc) try_rm(tc_filename) try_rm(tc_filename + '.part') try_rm(os.path.splitext(tc_filename)[0] + '.info.json') try_rm_tcs_files() try: try_num = 1 while True: try: # We're not using .download here since that is just a shim # for outside error handling, and returns the exit code # instead of the result dict. res_dict = ydl.extract_info( test_case['url'], force_generic_extractor=params.get('force_generic_extractor', False)) except (DownloadError, ExtractorError) as err: # Check if the exception is not a network related one if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503): msg = getattr(err, 'msg', error_to_compat_str(err)) err.msg = '%s (%s)' % (msg, tname, ) raise err if try_num == RETRIES: report_warning('%s failed due to network errors, skipping...' % tname) return print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num)) try_num += 1 else: break if is_playlist: self.assertTrue(res_dict['_type'] in ['playlist', 'multi_video']) self.assertTrue('entries' in res_dict) expect_info_dict(self, res_dict, test_case.get('info_dict', {})) if 'playlist_mincount' in test_case: self.assertGreaterEqual( len(res_dict['entries']), test_case['playlist_mincount'], 'Expected at least %d in playlist %s, but got only %d' % ( test_case['playlist_mincount'], test_case['url'], len(res_dict['entries']))) if 'playlist_maxcount' in test_case: self.assertLessEqual( len(res_dict['entries']), test_case['playlist_maxcount'], 'Expected at most %d in playlist %s, but got %d' % ( test_case['playlist_maxcount'], test_case['url'], len(res_dict['entries']))) if 'playlist_count' in test_case: self.assertEqual( len(res_dict['entries']), test_case['playlist_count'], 'Expected %d entries in playlist %s, but got %d.' % ( test_case['playlist_count'], test_case['url'], len(res_dict['entries']), )) if 'playlist_duration_sum' in test_case: got_duration = sum(e['duration'] for e in res_dict['entries']) self.assertEqual( test_case['playlist_duration_sum'], got_duration) # Generalize both playlists and single videos to unified format for # simplicity if 'entries' not in res_dict: res_dict['entries'] = [res_dict] for tc_num, tc in enumerate(test_cases): tc_res_dict = res_dict['entries'][tc_num] # First, check test cases' data against extracted data alone expect_info_dict(self, tc_res_dict, tc.get('info_dict', {})) # Now, check downloaded file consistency # support test-case with volatile ID, signalled by regexp value if tc.get('info_dict', {}).get('id', '').startswith('re:'): test_id = tc['info_dict']['id'] tc['info_dict']['id'] = tc_res_dict['id'] else: test_id = None tc_filename = get_tc_filename(tc) if test_id: tc['info_dict']['id'] = test_id if not test_case.get('params', {}).get('skip_download', False): self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename) self.assertTrue(tc_filename in finished_hook_called) expected_minsize = tc.get('file_minsize', 10000) if expected_minsize is not None: if params.get('test'): expected_minsize = max(expected_minsize, 10000) got_fsize = os.path.getsize(tc_filename) self.assertGreaterEqual( got_fsize, expected_minsize, 'Expected %s to be at least %s, but it\'s only %s ' % (tc_filename, format_bytes(expected_minsize), format_bytes(got_fsize))) if 'md5' in tc: md5_for_file = _file_md5(tc_filename) self.assertEqual(tc['md5'], md5_for_file) # Finally, check test cases' data again but this time against # extracted data from info JSON file written during processing info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json' self.assertTrue( os.path.exists(info_json_fn), 'Missing info file %s' % info_json_fn) with open(info_json_fn, encoding='utf-8') as infof: info_dict = json.load(infof) expect_info_dict(self, info_dict, tc.get('info_dict', {})) finally: try_rm_tcs_files() if is_playlist and res_dict is not None and res_dict.get('entries'): # Remove all other files that may have been extracted if the # extractor returns full results even with extract_flat res_tcs = [{'info_dict': e} for e in res_dict['entries']] try_rm_tcs_files(res_tcs) return test_template # And add them to TestDownload for n, test_case in enumerate(defs): tname = 'test_' + str(test_case['name']) i = 1 while hasattr(TestDownload, tname): tname = 'test_%s_%d' % (test_case['name'], i) i += 1 test_method = generator(test_case, tname) test_method.__name__ = str(tname) ie_list = test_case.get('add_ie') test_method.add_ie = ie_list and ','.join(ie_list) setattr(TestDownload, test_method.__name__, test_method) del test_method if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_socks.py
test/test_socks.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import random import subprocess from test.helper import ( FakeYDL, get_params, ) from youtube_dl.compat import ( compat_str, compat_urllib_request, ) class TestMultipleSocks(unittest.TestCase): @staticmethod def _check_params(attrs): params = get_params() for attr in attrs: if attr not in params: print('Missing %s. Skipping.' % attr) return return params def test_proxy_http(self): params = self._check_params(['primary_proxy', 'primary_server_ip']) if params is None: return ydl = FakeYDL({ 'proxy': params['primary_proxy'] }) self.assertEqual( ydl.urlopen('http://yt-dl.org/ip').read().decode('utf-8'), params['primary_server_ip']) def test_proxy_https(self): params = self._check_params(['primary_proxy', 'primary_server_ip']) if params is None: return ydl = FakeYDL({ 'proxy': params['primary_proxy'] }) self.assertEqual( ydl.urlopen('https://yt-dl.org/ip').read().decode('utf-8'), params['primary_server_ip']) def test_secondary_proxy_http(self): params = self._check_params(['secondary_proxy', 'secondary_server_ip']) if params is None: return ydl = FakeYDL() req = compat_urllib_request.Request('http://yt-dl.org/ip') req.add_header('Ytdl-request-proxy', params['secondary_proxy']) self.assertEqual( ydl.urlopen(req).read().decode('utf-8'), params['secondary_server_ip']) def test_secondary_proxy_https(self): params = self._check_params(['secondary_proxy', 'secondary_server_ip']) if params is None: return ydl = FakeYDL() req = compat_urllib_request.Request('https://yt-dl.org/ip') req.add_header('Ytdl-request-proxy', params['secondary_proxy']) self.assertEqual( ydl.urlopen(req).read().decode('utf-8'), params['secondary_server_ip']) class TestSocks(unittest.TestCase): _SKIP_SOCKS_TEST = True def setUp(self): if self._SKIP_SOCKS_TEST: return self.port = random.randint(20000, 30000) self.server_process = subprocess.Popen([ 'srelay', '-f', '-i', '127.0.0.1:%d' % self.port], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def tearDown(self): if self._SKIP_SOCKS_TEST: return self.server_process.terminate() self.server_process.communicate() def _get_ip(self, protocol): if self._SKIP_SOCKS_TEST: return '127.0.0.1' ydl = FakeYDL({ 'proxy': '%s://127.0.0.1:%d' % (protocol, self.port), }) return ydl.urlopen('http://yt-dl.org/ip').read().decode('utf-8') def test_socks4(self): self.assertTrue(isinstance(self._get_ip('socks4'), compat_str)) def test_socks4a(self): self.assertTrue(isinstance(self._get_ip('socks4a'), compat_str)) def test_socks5(self): self.assertTrue(isinstance(self._get_ip('socks5'), compat_str)) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_compat.py
test/test_compat.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from youtube_dl.compat import ( compat_casefold, compat_getenv, compat_setenv, compat_etree_Element, compat_etree_fromstring, compat_expanduser, compat_shlex_split, compat_str, compat_struct_unpack, compat_urllib_parse_unquote, compat_urllib_parse_unquote_plus, compat_urllib_parse_urlencode, compat_urllib_request, ) class TestCompat(unittest.TestCase): def test_compat_getenv(self): test_str = 'тест' compat_setenv('YOUTUBE_DL_COMPAT_GETENV', test_str) self.assertEqual(compat_getenv('YOUTUBE_DL_COMPAT_GETENV'), test_str) def test_compat_setenv(self): test_var = 'YOUTUBE_DL_COMPAT_SETENV' test_str = 'тест' compat_setenv(test_var, test_str) compat_getenv(test_var) self.assertEqual(compat_getenv(test_var), test_str) def test_compat_expanduser(self): old_home = os.environ.get('HOME') test_str = r'C:\Documents and Settings\тест\Application Data' compat_setenv('HOME', test_str) self.assertEqual(compat_expanduser('~'), test_str) compat_setenv('HOME', old_home or '') def test_all_present(self): import youtube_dl.compat all_names = sorted( youtube_dl.compat.__all__ + youtube_dl.compat.legacy) present_names = set(map(compat_str, filter( lambda c: '_' in c and not c.startswith('_'), dir(youtube_dl.compat)))) - set(['unicode_literals']) self.assertEqual(all_names, sorted(present_names)) def test_compat_urllib_parse_unquote(self): self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def') self.assertEqual(compat_urllib_parse_unquote('%7e/abc+def'), '~/abc+def') self.assertEqual(compat_urllib_parse_unquote(''), '') self.assertEqual(compat_urllib_parse_unquote('%'), '%') self.assertEqual(compat_urllib_parse_unquote('%%'), '%%') self.assertEqual(compat_urllib_parse_unquote('%%%'), '%%%') self.assertEqual(compat_urllib_parse_unquote('%2F'), '/') self.assertEqual(compat_urllib_parse_unquote('%2f'), '/') self.assertEqual(compat_urllib_parse_unquote('%E6%B4%A5%E6%B3%A2'), '津波') self.assertEqual( compat_urllib_parse_unquote('''<meta property="og:description" content="%E2%96%81%E2%96%82%E2%96%83%E2%96%84%25%E2%96%85%E2%96%86%E2%96%87%E2%96%88" /> %<a href="https://ar.wikipedia.org/wiki/%D8%AA%D8%B3%D9%88%D9%86%D8%A7%D9%85%D9%8A">%a'''), '''<meta property="og:description" content="▁▂▃▄%▅▆▇█" /> %<a href="https://ar.wikipedia.org/wiki/تسونامي">%a''') self.assertEqual( compat_urllib_parse_unquote('''%28%5E%E2%97%A3_%E2%97%A2%5E%29%E3%81%A3%EF%B8%BB%E3%83%87%E2%95%90%E4%B8%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%86%B6%I%Break%25Things%'''), '''(^◣_◢^)っ︻デ═一 ⇀ ⇀ ⇀ ⇀ ⇀ ↶%I%Break%Things%''') def test_compat_urllib_parse_unquote_plus(self): self.assertEqual(compat_urllib_parse_unquote_plus('abc%20def'), 'abc def') self.assertEqual(compat_urllib_parse_unquote_plus('%7e/abc+def'), '~/abc def') def test_compat_urllib_parse_urlencode(self): self.assertEqual(compat_urllib_parse_urlencode({'abc': 'def'}), 'abc=def') self.assertEqual(compat_urllib_parse_urlencode({'abc': b'def'}), 'abc=def') self.assertEqual(compat_urllib_parse_urlencode({b'abc': 'def'}), 'abc=def') self.assertEqual(compat_urllib_parse_urlencode({b'abc': b'def'}), 'abc=def') self.assertEqual(compat_urllib_parse_urlencode([('abc', 'def')]), 'abc=def') self.assertEqual(compat_urllib_parse_urlencode([('abc', b'def')]), 'abc=def') self.assertEqual(compat_urllib_parse_urlencode([(b'abc', 'def')]), 'abc=def') self.assertEqual(compat_urllib_parse_urlencode([(b'abc', b'def')]), 'abc=def') def test_compat_shlex_split(self): self.assertEqual(compat_shlex_split('-option "one two"'), ['-option', 'one two']) self.assertEqual(compat_shlex_split('-option "one\ntwo" \n -flag'), ['-option', 'one\ntwo', '-flag']) self.assertEqual(compat_shlex_split('-val 中文'), ['-val', '中文']) def test_compat_etree_Element(self): try: compat_etree_Element.items except AttributeError: self.fail('compat_etree_Element is not a type') def test_compat_etree_fromstring(self): xml = ''' <root foo="bar" spam="中文"> <normal>foo</normal> <chinese>中文</chinese> <foo><bar>spam</bar></foo> </root> ''' doc = compat_etree_fromstring(xml.encode('utf-8')) self.assertTrue(isinstance(doc.attrib['foo'], compat_str)) self.assertTrue(isinstance(doc.attrib['spam'], compat_str)) self.assertTrue(isinstance(doc.find('normal').text, compat_str)) self.assertTrue(isinstance(doc.find('chinese').text, compat_str)) self.assertTrue(isinstance(doc.find('foo/bar').text, compat_str)) def test_compat_etree_fromstring_doctype(self): xml = '''<?xml version="1.0"?> <!DOCTYPE smil PUBLIC "-//W3C//DTD SMIL 2.0//EN" "http://www.w3.org/2001/SMIL20/SMIL20.dtd"> <smil xmlns="http://www.w3.org/2001/SMIL20/Language"></smil>''' compat_etree_fromstring(xml) def test_compat_struct_unpack(self): self.assertEqual(compat_struct_unpack('!B', b'\x00'), (0,)) def test_compat_casefold(self): if hasattr(compat_str, 'casefold'): # don't bother to test str.casefold() (again) return # thanks https://bugs.python.org/file24232/casefolding.patch self.assertEqual(compat_casefold('hello'), 'hello') self.assertEqual(compat_casefold('hELlo'), 'hello') self.assertEqual(compat_casefold('ß'), 'ss') self.assertEqual(compat_casefold('fi'), 'fi') self.assertEqual(compat_casefold('\u03a3'), '\u03c3') self.assertEqual(compat_casefold('A\u0345\u03a3'), 'a\u03b9\u03c3') def test_compat_urllib_request_Request(self): self.assertEqual( compat_urllib_request.Request('http://127.0.0.1', method='PUT').get_method(), 'PUT') class PUTrequest(compat_urllib_request.Request): def get_method(self): return 'PUT' self.assertEqual( PUTrequest('http://127.0.0.1').get_method(), 'PUT') if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_age_restriction.py
test/test_age_restriction.py
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import try_rm from youtube_dl import YoutubeDL from youtube_dl.utils import DownloadError def _download_restricted(url, filename, age): """ Returns true if the file has been downloaded """ params = { 'age_limit': age, 'skip_download': True, 'writeinfojson': True, 'outtmpl': '%(id)s.%(ext)s', } ydl = YoutubeDL(params) ydl.add_default_info_extractors() json_filename = os.path.splitext(filename)[0] + '.info.json' try_rm(json_filename) try: ydl.download([url]) except DownloadError: try_rm(json_filename) res = os.path.exists(json_filename) try_rm(json_filename) return res class TestAgeRestriction(unittest.TestCase): def _assert_restricted(self, url, filename, age, old_age=None): self.assertTrue(_download_restricted(url, filename, old_age)) self.assertFalse(_download_restricted(url, filename, age)) def test_youtube(self): self._assert_restricted('HtVdAasjOgU', 'HtVdAasjOgU.mp4', 10) def test_youporn(self): self._assert_restricted( 'https://www.youporn.com/watch/16715086/sex-ed-in-detention-18-asmr/', '16715086.mp4', 2, old_age=25) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_downloader_http.py
test/test_downloader_http.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import re import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import ( FakeLogger, http_server_port, try_rm, ) from youtube_dl import YoutubeDL from youtube_dl.compat import compat_http_server from youtube_dl.downloader.http import HttpFD from youtube_dl.utils import encodeFilename import threading TEST_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_SIZE = 10 * 1024 class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler): def log_message(self, format, *args): pass def send_content_range(self, total=None): range_header = self.headers.get('Range') start = end = None if range_header: mobj = re.search(r'^bytes=(\d+)-(\d+)', range_header) if mobj: start = int(mobj.group(1)) end = int(mobj.group(2)) valid_range = start is not None and end is not None if valid_range: content_range = 'bytes %d-%d' % (start, end) if total: content_range += '/%d' % total self.send_header('Content-Range', content_range) return (end - start + 1) if valid_range else total def serve(self, range=True, content_length=True): self.send_response(200) self.send_header('Content-Type', 'video/mp4') size = TEST_SIZE if range: size = self.send_content_range(TEST_SIZE) if content_length: self.send_header('Content-Length', size) self.end_headers() self.wfile.write(b'#' * size) def do_GET(self): if self.path == '/regular': self.serve() elif self.path == '/no-content-length': self.serve(content_length=False) elif self.path == '/no-range': self.serve(range=False) elif self.path == '/no-range-no-content-length': self.serve(range=False, content_length=False) else: assert False class TestHttpFD(unittest.TestCase): def setUp(self): self.httpd = compat_http_server.HTTPServer( ('127.0.0.1', 0), HTTPTestRequestHandler) self.port = http_server_port(self.httpd) self.server_thread = threading.Thread(target=self.httpd.serve_forever) self.server_thread.daemon = True self.server_thread.start() def download(self, params, ep): params['logger'] = FakeLogger() ydl = YoutubeDL(params) downloader = HttpFD(ydl, params) filename = 'testfile.mp4' try_rm(encodeFilename(filename)) self.assertTrue(downloader.real_download(filename, { 'url': 'http://127.0.0.1:%d/%s' % (self.port, ep), })) self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE, ep) try_rm(encodeFilename(filename)) def download_all(self, params): for ep in ('regular', 'no-content-length', 'no-range', 'no-range-no-content-length'): self.download(params, ep) def test_regular(self): self.download_all({}) def test_chunked(self): self.download_all({ 'http_chunk_size': 1000, }) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/__init__.py
test/__init__.py
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_execution.py
test/test_execution.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import unittest import sys import os import subprocess rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, rootDir) from youtube_dl.compat import compat_register_utf8, compat_subprocess_get_DEVNULL from youtube_dl.utils import encodeArgument compat_register_utf8() _DEV_NULL = compat_subprocess_get_DEVNULL() class TestExecution(unittest.TestCase): def setUp(self): self.module = 'youtube_dl' if sys.version_info < (2, 7): self.module += '.__main__' def test_import(self): subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir) def test_module_exec(self): subprocess.check_call([sys.executable, '-m', self.module, '--version'], cwd=rootDir, stdout=_DEV_NULL) def test_main_exec(self): subprocess.check_call([sys.executable, os.path.normpath('youtube_dl/__main__.py'), '--version'], cwd=rootDir, stdout=_DEV_NULL) def test_cmdline_umlauts(self): os.environ['PYTHONIOENCODING'] = 'utf-8' p = subprocess.Popen( [sys.executable, '-m', self.module, encodeArgument('ä'), '--version'], cwd=rootDir, stdout=_DEV_NULL, stderr=subprocess.PIPE) _, stderr = p.communicate() self.assertFalse(stderr) def test_lazy_extractors(self): lazy_extractors = os.path.normpath('youtube_dl/extractor/lazy_extractors.py') try: subprocess.check_call([sys.executable, os.path.normpath('devscripts/make_lazy_extractors.py'), lazy_extractors], cwd=rootDir, stdout=_DEV_NULL) subprocess.check_call([sys.executable, os.path.normpath('test/test_all_urls.py')], cwd=rootDir, stdout=_DEV_NULL) finally: for x in ('', 'c') if sys.version_info[0] < 3 else ('',): try: os.remove(lazy_extractors + x) except OSError: pass if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_netrc.py
test/test_netrc.py
# coding: utf-8 from __future__ import unicode_literals import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from youtube_dl.extractor import ( gen_extractors, ) class TestNetRc(unittest.TestCase): def test_netrc_present(self): for ie in gen_extractors(): if not hasattr(ie, '_login'): continue self.assertTrue( hasattr(ie, '_NETRC_MACHINE'), 'Extractor %s supports login, but is missing a _NETRC_MACHINE property' % ie.IE_NAME) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_InfoExtractor.py
test/test_InfoExtractor.py
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import threading from test.helper import ( expect_dict, expect_value, FakeYDL, http_server_port, ) from youtube_dl.compat import ( compat_etree_fromstring, compat_http_server, compat_open as open, ) from youtube_dl.extractor.common import InfoExtractor from youtube_dl.extractor import ( get_info_extractor, YoutubeIE, ) from youtube_dl.utils import ( encode_data_uri, ExtractorError, RegexNotFoundError, strip_jsonp, ) TEAPOT_RESPONSE_STATUS = 418 TEAPOT_RESPONSE_BODY = "<h1>418 I'm a teapot</h1>" class InfoExtractorTestRequestHandler(compat_http_server.BaseHTTPRequestHandler): def log_message(self, format, *args): pass def do_GET(self): if self.path == '/teapot': self.send_response(TEAPOT_RESPONSE_STATUS) self.send_header('Content-Type', 'text/html; charset=utf-8') self.end_headers() self.wfile.write(TEAPOT_RESPONSE_BODY.encode()) else: assert False class DummyIE(InfoExtractor): pass class TestInfoExtractor(unittest.TestCase): def setUp(self): self.ie = DummyIE(FakeYDL()) def test_ie_key(self): self.assertEqual(get_info_extractor(YoutubeIE.ie_key()), YoutubeIE) def test_html_search_regex(self): html = '<p id="foo">Watch this <a href="http://www.youtube.com/watch?v=BaW_jenozKc">video</a></p>' search = lambda re, *args: self.ie._html_search_regex(re, html, *args) self.assertEqual(search(r'<p id="foo">(.+?)</p>', 'foo'), 'Watch this video') def test_opengraph(self): ie = self.ie html = ''' <meta name="og:title" content='Foo'/> <meta content="Some video's description " name="og:description"/> <meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&amp;key2=val2'/> <meta content='application/x-shockwave-flash' property='og:video:type'> <meta content='Foo' property=og:foobar> <meta name="og:test1" content='foo > < bar'/> <meta name="og:test2" content="foo >//< bar"/> <meta property=og-test3 content='Ill-formatted opengraph'/> <meta property=og:test4 content=unquoted-value/> ''' self.assertEqual(ie._og_search_title(html), 'Foo') self.assertEqual(ie._og_search_description(html), 'Some video\'s description ') self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2') self.assertEqual(ie._og_search_video_url(html, default=None), None) self.assertEqual(ie._og_search_property('foobar', html), 'Foo') self.assertEqual(ie._og_search_property('test1', html), 'foo > < bar') self.assertEqual(ie._og_search_property('test2', html), 'foo >//< bar') self.assertEqual(ie._og_search_property('test3', html), 'Ill-formatted opengraph') self.assertEqual(ie._og_search_property(('test0', 'test1'), html), 'foo > < bar') self.assertRaises(RegexNotFoundError, ie._og_search_property, 'test0', html, None, fatal=True) self.assertRaises(RegexNotFoundError, ie._og_search_property, ('test0', 'test00'), html, None, fatal=True) self.assertEqual(ie._og_search_property('test4', html), 'unquoted-value') def test_html_search_meta(self): ie = self.ie html = ''' <meta name="a" content="1" /> <meta name='b' content='2'> <meta name="c" content='3'> <meta name=d content='4'> <meta property="e" content='5' > <meta content="6" name="f"> ''' self.assertEqual(ie._html_search_meta('a', html), '1') self.assertEqual(ie._html_search_meta('b', html), '2') self.assertEqual(ie._html_search_meta('c', html), '3') self.assertEqual(ie._html_search_meta('d', html), '4') self.assertEqual(ie._html_search_meta('e', html), '5') self.assertEqual(ie._html_search_meta('f', html), '6') self.assertEqual(ie._html_search_meta(('a', 'b', 'c'), html), '1') self.assertEqual(ie._html_search_meta(('c', 'b', 'a'), html), '3') self.assertEqual(ie._html_search_meta(('z', 'x', 'c'), html), '3') self.assertRaises(RegexNotFoundError, ie._html_search_meta, 'z', html, None, fatal=True) self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True) def test_search_nextjs_data(self): html = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content= "text/html; charset=utf-8"> <meta name="viewport" content="width=device-width"> <title>Test _search_nextjs_data()</title> </head> <body> <div id="__next"> <div style="background-color:#17171E" class="FU" dir="ltr"> <div class="sc-93de261d-0 dyzzYE"> <div> <header class="HD"></header> <main class="MN"> <div style="height:0" class="HT0"> <div style="width:NaN%" data-testid= "stream-container" class="WDN"></div> </div> </main> </div> <footer class="sc-6e5faf91-0 dEGaHS"></footer> </div> </div> </div> <script id="__NEXT_DATA__" type="application/json"> {"props":{"pageProps":{"video":{"id":"testid"}}}} </script> </body> </html> ''' search = self.ie._search_nextjs_data(html, 'testID') self.assertEqual(search['props']['pageProps']['video']['id'], 'testid') search = self.ie._search_nextjs_data( 'no next.js data here, move along', 'testID', default={'status': 0}) self.assertEqual(search['status'], 0) def test_search_nuxt_data(self): html = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content= "text/html; charset=utf-8"> <title>Nuxt.js Test Page</title> <meta name="viewport" content= "width=device-width, initial-scale=1"> <meta data-hid="robots" name="robots" content="all"> </head> <body class="BD"> <div id="__layout"> <h1 class="H1">Example heading</h1> <div class="IN"> <p>Decoy text</p> </div> </div> <script> window.__NUXT__=(function(a,b,c,d,e,f,g,h){return {decoy:" default",data:[{track:{id:f,title:g}}]}}(null,null,"c",null,null,"testid","Nuxt.js title",null)); </script> <script src="/_nuxt/a12345b.js" defer="defer"></script> </body> </html> ''' search = self.ie._search_nuxt_data(html, 'testID') self.assertEqual(search['track']['id'], 'testid') def test_search_json_ld_realworld(self): # https://github.com/ytdl-org/youtube-dl/issues/23306 expect_dict( self, self.ie._search_json_ld(r'''<script type="application/ld+json"> { "@context": "http://schema.org/", "@type": "VideoObject", "name": "1 On 1 With Kleio", "url": "https://www.eporner.com/hd-porn/xN49A1cT3eB/1-On-1-With-Kleio/", "duration": "PT0H12M23S", "thumbnailUrl": ["https://static-eu-cdn.eporner.com/thumbs/static4/7/78/780/780814/9_360.jpg", "https://imggen.eporner.com/780814/1920/1080/9.jpg"], "contentUrl": "https://gvideo.eporner.com/xN49A1cT3eB/xN49A1cT3eB.mp4", "embedUrl": "https://www.eporner.com/embed/xN49A1cT3eB/1-On-1-With-Kleio/", "image": "https://static-eu-cdn.eporner.com/thumbs/static4/7/78/780/780814/9_360.jpg", "width": "1920", "height": "1080", "encodingFormat": "mp4", "bitrate": "6617kbps", "isFamilyFriendly": "False", "description": "Kleio Valentien", "uploadDate": "2015-12-05T21:24:35+01:00", "interactionStatistic": { "@type": "InteractionCounter", "interactionType": { "@type": "http://schema.org/WatchAction" }, "userInteractionCount": 1120958 }, "aggregateRating": { "@type": "AggregateRating", "ratingValue": "88", "ratingCount": "630", "bestRating": "100", "worstRating": "0" }, "actor": [{ "@type": "Person", "name": "Kleio Valentien", "url": "https://www.eporner.com/pornstar/kleio-valentien/" }]} </script>''', None), { 'title': '1 On 1 With Kleio', 'description': 'Kleio Valentien', 'url': 'https://gvideo.eporner.com/xN49A1cT3eB/xN49A1cT3eB.mp4', 'timestamp': 1449347075, 'duration': 743.0, 'view_count': 1120958, 'width': 1920, 'height': 1080, }) def test_download_json(self): uri = encode_data_uri(b'{"foo": "blah"}', 'application/json') self.assertEqual(self.ie._download_json(uri, None), {'foo': 'blah'}) uri = encode_data_uri(b'callback({"foo": "blah"})', 'application/javascript') self.assertEqual(self.ie._download_json(uri, None, transform_source=strip_jsonp), {'foo': 'blah'}) uri = encode_data_uri(b'{"foo": invalid}', 'application/json') self.assertRaises(ExtractorError, self.ie._download_json, uri, None) self.assertEqual(self.ie._download_json(uri, None, fatal=False), None) def test_parse_html5_media_entries(self): # inline video tag expect_dict( self, self.ie._parse_html5_media_entries( 'https://127.0.0.1/video.html', r'<html><video src="/vid.mp4" /></html>', None)[0], { 'formats': [{ 'url': 'https://127.0.0.1/vid.mp4', }], }) # from https://www.r18.com/ # with kpbs in label expect_dict( self, self.ie._parse_html5_media_entries( 'https://www.r18.com/', r''' <video id="samplevideo_amateur" class="js-samplevideo video-js vjs-default-skin vjs-big-play-centered" controls preload="auto" width="400" height="225" poster="//pics.r18.com/digital/amateur/mgmr105/mgmr105jp.jpg"> <source id="video_source" src="https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_sm_w.mp4" type="video/mp4" res="240" label="300kbps"> <source id="video_source" src="https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dm_w.mp4" type="video/mp4" res="480" label="1000kbps"> <source id="video_source" src="https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dmb_w.mp4" type="video/mp4" res="740" label="1500kbps"> <p>Your browser does not support the video tag.</p> </video> ''', None)[0], { 'formats': [{ 'url': 'https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_sm_w.mp4', 'ext': 'mp4', 'format_id': '300kbps', 'height': 240, 'tbr': 300, }, { 'url': 'https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dm_w.mp4', 'ext': 'mp4', 'format_id': '1000kbps', 'height': 480, 'tbr': 1000, }, { 'url': 'https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dmb_w.mp4', 'ext': 'mp4', 'format_id': '1500kbps', 'height': 740, 'tbr': 1500, }], 'thumbnail': '//pics.r18.com/digital/amateur/mgmr105/mgmr105jp.jpg' }) # from https://www.csfd.cz/ # with width and height expect_dict( self, self.ie._parse_html5_media_entries( 'https://www.csfd.cz/', r''' <video width="770" height="328" preload="none" controls poster="https://img.csfd.cz/files/images/film/video/preview/163/344/163344118_748d20.png?h360" > <source src="https://video.csfd.cz/files/videos/157/750/157750813/163327358_eac647.mp4" type="video/mp4" width="640" height="360"> <source src="https://video.csfd.cz/files/videos/157/750/157750813/163327360_3d2646.mp4" type="video/mp4" width="1280" height="720"> <source src="https://video.csfd.cz/files/videos/157/750/157750813/163327356_91f258.mp4" type="video/mp4" width="1920" height="1080"> <source src="https://video.csfd.cz/files/videos/157/750/157750813/163327359_962b4a.webm" type="video/webm" width="640" height="360"> <source src="https://video.csfd.cz/files/videos/157/750/157750813/163327361_6feee0.webm" type="video/webm" width="1280" height="720"> <source src="https://video.csfd.cz/files/videos/157/750/157750813/163327357_8ab472.webm" type="video/webm" width="1920" height="1080"> <track src="https://video.csfd.cz/files/subtitles/163/344/163344115_4c388b.srt" type="text/x-srt" kind="subtitles" srclang="cs" label="cs"> </video> ''', None)[0], { 'formats': [{ 'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327358_eac647.mp4', 'ext': 'mp4', 'width': 640, 'height': 360, }, { 'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327360_3d2646.mp4', 'ext': 'mp4', 'width': 1280, 'height': 720, }, { 'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327356_91f258.mp4', 'ext': 'mp4', 'width': 1920, 'height': 1080, }, { 'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327359_962b4a.webm', 'ext': 'webm', 'width': 640, 'height': 360, }, { 'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327361_6feee0.webm', 'ext': 'webm', 'width': 1280, 'height': 720, }, { 'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327357_8ab472.webm', 'ext': 'webm', 'width': 1920, 'height': 1080, }], 'subtitles': { 'cs': [{'url': 'https://video.csfd.cz/files/subtitles/163/344/163344115_4c388b.srt'}] }, 'thumbnail': 'https://img.csfd.cz/files/images/film/video/preview/163/344/163344118_748d20.png?h360' }) # from https://tamasha.com/v/Kkdjw # with height in label expect_dict( self, self.ie._parse_html5_media_entries( 'https://tamasha.com/v/Kkdjw', r''' <video crossorigin="anonymous"> <source src="https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4" type="video/mp4" label="AUTO" res="0"/> <source src="https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4" type="video/mp4" label="240p" res="240"/> <source src="https://s-v2.tamasha.com/statics/videos_file/20/00/Kkdjw_200041c66f657fc967db464d156eafbc1ed9fe6f_n_144.mp4" type="video/mp4" label="144p" res="144"/> </video> ''', None)[0], { 'formats': [{ 'url': 'https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4', }, { 'url': 'https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4', 'ext': 'mp4', 'format_id': '240p', 'height': 240, }, { 'url': 'https://s-v2.tamasha.com/statics/videos_file/20/00/Kkdjw_200041c66f657fc967db464d156eafbc1ed9fe6f_n_144.mp4', 'ext': 'mp4', 'format_id': '144p', 'height': 144, }] }) # from https://www.directvnow.com # with data-src expect_dict( self, self.ie._parse_html5_media_entries( 'https://www.directvnow.com', r''' <video id="vid1" class="header--video-masked active" muted playsinline> <source data-src="https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4" type="video/mp4" /> </video> ''', None)[0], { 'formats': [{ 'ext': 'mp4', 'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4', }] }) # from https://www.directvnow.com # with data-src expect_dict( self, self.ie._parse_html5_media_entries( 'https://www.directvnow.com', r''' <video id="vid1" class="header--video-masked active" muted playsinline> <source data-src="https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4" type="video/mp4" /> </video> ''', None)[0], { 'formats': [{ 'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4', 'ext': 'mp4', }] }) # from https://www.klarna.com/uk/ # with data-video-src expect_dict( self, self.ie._parse_html5_media_entries( 'https://www.directvnow.com', r''' <video loop autoplay muted class="responsive-video block-kl__video video-on-medium"> <source src="" data-video-desktop data-video-src="https://www.klarna.com/uk/wp-content/uploads/sites/11/2019/01/KL062_Smooth3_0_DogWalking_5s_920x080_.mp4" type="video/mp4" /> </video> ''', None)[0], { 'formats': [{ 'url': 'https://www.klarna.com/uk/wp-content/uploads/sites/11/2019/01/KL062_Smooth3_0_DogWalking_5s_920x080_.mp4', 'ext': 'mp4', }], }) # from https://0000.studio/ # with type attribute but without extension in URL expect_dict( self, self.ie._parse_html5_media_entries( 'https://0000.studio', r''' <video src="https://d1ggyt9m8pwf3g.cloudfront.net/protected/ap-northeast-1:1864af40-28d5-492b-b739-b32314b1a527/archive/clip/838db6a7-8973-4cd6-840d-8517e4093c92" controls="controls" type="video/mp4" preload="metadata" autoplay="autoplay" playsinline class="object-contain"> </video> ''', None)[0], { 'formats': [{ 'url': 'https://d1ggyt9m8pwf3g.cloudfront.net/protected/ap-northeast-1:1864af40-28d5-492b-b739-b32314b1a527/archive/clip/838db6a7-8973-4cd6-840d-8517e4093c92', 'ext': 'mp4', }], }) def test_extract_jwplayer_data_realworld(self): # from http://www.suffolk.edu/sjc/ expect_dict( self, self.ie._extract_jwplayer_data(r''' <script type='text/javascript'> jwplayer('my-video').setup({ file: 'rtmp://192.138.214.154/live/sjclive', fallback: 'true', width: '95%', aspectratio: '16:9', primary: 'flash', mediaid:'XEgvuql4' }); </script> ''', None, require_title=False), { 'id': 'XEgvuql4', 'formats': [{ 'url': 'rtmp://192.138.214.154/live/sjclive', 'ext': 'flv' }] }) # from https://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary/ expect_dict( self, self.ie._extract_jwplayer_data(r''' <script type="text/javascript"> jwplayer("mediaplayer").setup({ 'videoid': "7564", 'width': "100%", 'aspectratio': "16:9", 'stretching': "exactfit", 'autostart': 'false', 'flashplayer': "https://t04.vipstreamservice.com/jwplayer/v5.10/player.swf", 'file': "https://cdn.pornoxo.com/key=MF+oEbaxqTKb50P-w9G3nA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/4b2157147afe5efa93ce1978e0265289c193874e02597.flv", 'image': "https://t03.vipstreamservice.com/thumbs/pxo-full/2009-12/14/a4b2157147afe5efa93ce1978e0265289c193874e02597.flv-full-13.jpg", 'filefallback': "https://cdn.pornoxo.com/key=9ZPsTR5EvPLQrBaak2MUGA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/m_4b2157147afe5efa93ce1978e0265289c193874e02597.mp4", 'logo.hide': true, 'skin': "https://t04.vipstreamservice.com/jwplayer/skin/modieus-blk.zip", 'plugins': "https://t04.vipstreamservice.com/jwplayer/dock/dockableskinnableplugin.swf", 'dockableskinnableplugin.piclink': "/index.php?key=ajax-videothumbsn&vid=7564&data=2009-12--14--4b2157147afe5efa93ce1978e0265289c193874e02597.flv--17370", 'controlbar': 'bottom', 'modes': [ {type: 'flash', src: 'https://t04.vipstreamservice.com/jwplayer/v5.10/player.swf'} ], 'provider': 'http' }); //noinspection JSAnnotator invideo.setup({ adsUrl: "/banner-iframe/?zoneId=32", adsUrl2: "", autostart: false }); </script> ''', 'dummy', require_title=False), { 'thumbnail': 'https://t03.vipstreamservice.com/thumbs/pxo-full/2009-12/14/a4b2157147afe5efa93ce1978e0265289c193874e02597.flv-full-13.jpg', 'formats': [{ 'url': 'https://cdn.pornoxo.com/key=MF+oEbaxqTKb50P-w9G3nA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/4b2157147afe5efa93ce1978e0265289c193874e02597.flv', 'ext': 'flv' }] }) # from http://www.indiedb.com/games/king-machine/videos expect_dict( self, self.ie._extract_jwplayer_data(r''' <script> jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/\/www.indiedb.com\/","displaytitle":false,"autostart":false,"repeat":false,"title":"king machine trailer 1","sharing":{"link":"http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1","code":"<iframe width=\"560\" height=\"315\" src=\"http:\/\/www.indiedb.com\/media\/iframe\/1522983\" frameborder=\"0\" allowfullscreen><\/iframe><br><a href=\"http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1\">king machine trailer 1 - Indie DB<\/a>"},"related":{"file":"http:\/\/rss.indiedb.com\/media\/recommended\/1522983\/feed\/rss.xml","dimensions":"160x120","onclick":"link"},"sources":[{"file":"http:\/\/cdn.dbolical.com\/cache\/videos\/games\/1\/50\/49678\/encode_mp4\/king-machine-trailer.mp4","label":"360p SD","default":"true"},{"file":"http:\/\/cdn.dbolical.com\/cache\/videos\/games\/1\/50\/49678\/encode720p_mp4\/king-machine-trailer.mp4","label":"720p HD"}],"image":"http:\/\/media.indiedb.com\/cache\/images\/games\/1\/50\/49678\/thumb_620x2000\/king-machine-trailer.mp4.jpg","advertising":{"client":"vast","tag":"http:\/\/ads.intergi.com\/adrawdata\/3.0\/5205\/4251742\/0\/1013\/ADTECH;cors=yes;width=560;height=315;referring_url=http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1;content_url=http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1;media_id=1522983;title=king+machine+trailer+1;device=__DEVICE__;model=__MODEL__;os=Windows+OS;osversion=__OSVERSION__;ua=__UA__;ip=109.171.17.81;uniqueid=1522983;tags=__TAGS__;number=58cac25928151;time=1489683033"},"width":620,"height":349}).once("play", function(event) { videoAnalytics("play"); }).once("complete", function(event) { videoAnalytics("completed"); }); </script> ''', 'dummy'), { 'title': 'king machine trailer 1', 'thumbnail': 'http://media.indiedb.com/cache/images/games/1/50/49678/thumb_620x2000/king-machine-trailer.mp4.jpg', 'formats': [{ 'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode_mp4/king-machine-trailer.mp4', 'height': 360, 'ext': 'mp4' }, { 'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode720p_mp4/king-machine-trailer.mp4', 'height': 720, 'ext': 'mp4' }] }) def test_parse_m3u8_formats(self): _TEST_CASES = [ ( # https://github.com/ytdl-org/youtube-dl/issues/11507 # http://pluzz.francetv.fr/videos/le_ministere.html 'pluzz_francetv_11507', 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais', [{ 'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_0_av.m3u8?null=0', 'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais', 'ext': 'mp4', 'format_id': '180', 'protocol': 'm3u8', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.66.30', 'tbr': 180, 'width': 256, 'height': 144, }, { 'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_1_av.m3u8?null=0', 'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais', 'ext': 'mp4', 'format_id': '303', 'protocol': 'm3u8', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.66.30', 'tbr': 303, 'width': 320, 'height': 180, }, { 'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_2_av.m3u8?null=0', 'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais', 'ext': 'mp4', 'format_id': '575', 'protocol': 'm3u8', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.66.30', 'tbr': 575, 'width': 512, 'height': 288, }, { 'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_3_av.m3u8?null=0', 'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais', 'ext': 'mp4', 'format_id': '831', 'protocol': 'm3u8', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.77.30', 'tbr': 831, 'width': 704, 'height': 396, }, { 'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_4_av.m3u8?null=0', 'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais', 'ext': 'mp4', 'protocol': 'm3u8', 'format_id': '1467', 'acodec': 'mp4a.40.2', 'vcodec': 'avc1.77.30', 'tbr': 1467, 'width': 1024, 'height': 576, }] ), ( # https://github.com/ytdl-org/youtube-dl/issues/11995 # http://teamcoco.com/video/clueless-gamer-super-bowl-for-honor 'teamcoco_11995', 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8', [{ 'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-audio-160k_v4.m3u8', 'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8', 'ext': 'mp4', 'format_id': 'audio-0-Default', 'protocol': 'm3u8', 'vcodec': 'none', }, { 'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-audio-64k_v4.m3u8', 'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8', 'ext': 'mp4', 'format_id': 'audio-1-Default', 'protocol': 'm3u8', 'vcodec': 'none', }, { 'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-audio-64k_v4.m3u8', 'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8', 'ext': 'mp4', 'format_id': '71', 'protocol': 'm3u8', 'acodec': 'mp4a.40.5', 'vcodec': 'none', 'tbr': 71, }, { 'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-400k_v4.m3u8', 'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8', 'ext': 'mp4', 'format_id': '413', 'protocol': 'm3u8', 'acodec': 'none', 'vcodec': 'avc1.42001e', 'tbr': 413, 'width': 400, 'height': 224, }, {
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_traversal.py
test/test_traversal.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import itertools import re from youtube_dl.traversal import ( dict_get, get_first, require, subs_list_to_dict, T, traverse_obj, unpack, value, ) from youtube_dl.compat import ( compat_chr as chr, compat_etree_fromstring, compat_http_cookies, compat_map as map, compat_str, compat_zip as zip, ) from youtube_dl.utils import ( determine_ext, ExtractorError, int_or_none, join_nonempty, str_or_none, ) _TEST_DATA = { 100: 100, 1.2: 1.2, 'str': 'str', 'None': None, '...': Ellipsis, 'urls': [ {'index': 0, 'url': 'https://www.example.com/0'}, {'index': 1, 'url': 'https://www.example.com/1'}, ], 'data': ( {'index': 2}, {'index': 3}, ), 'dict': {}, } if sys.version_info < (3, 0): class _TestCase(unittest.TestCase): def assertCountEqual(self, *args, **kwargs): return self.assertItemsEqual(*args, **kwargs) else: _TestCase = unittest.TestCase class TestTraversal(_TestCase): def assertMaybeCountEqual(self, *args, **kwargs): if sys.version_info < (3, 7): # random dict order return self.assertCountEqual(*args, **kwargs) else: return self.assertEqual(*args, **kwargs) def test_traverse_obj(self): # instant compat str = compat_str # define a pukka Iterable def iter_range(stop): for from_ in range(stop): yield from_ # Test base functionality self.assertEqual(traverse_obj(_TEST_DATA, ('str',)), 'str', msg='allow tuple path') self.assertEqual(traverse_obj(_TEST_DATA, ['str']), 'str', msg='allow list path') self.assertEqual(traverse_obj(_TEST_DATA, (value for value in ("str",))), 'str', msg='allow iterable path') self.assertEqual(traverse_obj(_TEST_DATA, 'str'), 'str', msg='single items should be treated as a path') self.assertEqual(traverse_obj(_TEST_DATA, None), _TEST_DATA) self.assertEqual(traverse_obj(_TEST_DATA, 100), 100) self.assertEqual(traverse_obj(_TEST_DATA, 1.2), 1.2) # Test Ellipsis behavior self.assertCountEqual(traverse_obj(_TEST_DATA, Ellipsis), (item for item in _TEST_DATA.values() if item not in (None, {})), msg='`...` should give all non-discarded values') self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', 0, Ellipsis)), _TEST_DATA['urls'][0].values(), msg='`...` selection for dicts should select all values') self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'url')), ['https://www.example.com/0', 'https://www.example.com/1'], msg='nested `...` queries should work') self.assertCountEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'index')), iter_range(4), msg='`...` query result should be flattened') self.assertEqual(traverse_obj(iter(range(4)), Ellipsis), list(range(4)), msg='`...` should accept iterables') # Test function as key self.assertEqual(traverse_obj(_TEST_DATA, lambda x, y: x == 'urls' and isinstance(y, list)), [_TEST_DATA['urls']], msg='function as query key should perform a filter based on (key, value)') self.assertCountEqual(traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], str)), set(('str',)), msg='exceptions in the query function should be caught') self.assertEqual(traverse_obj(iter(range(4)), lambda _, x: x % 2 == 0), [0, 2], msg='function key should accept iterables') if __debug__: with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'): traverse_obj(_TEST_DATA, lambda a: Ellipsis) with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'): traverse_obj(_TEST_DATA, lambda a, b, c: Ellipsis) # Test set as key (transformation/type, like `expected_type`) self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str.upper), )), ['STR'], msg='Function in set should be a transformation') self.assertEqual(traverse_obj(_TEST_DATA, ('fail', T(lambda _: 'const'))), 'const', msg='Function in set should always be called') self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str))), ['str'], msg='Type in set should be a type filter') self.assertMaybeCountEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str, int))), [100, 'str'], msg='Multiple types in set should be a type filter') self.assertEqual(traverse_obj(_TEST_DATA, T(dict)), _TEST_DATA, msg='A single set should be wrapped into a path') self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str.upper))), ['STR'], msg='Transformation function should not raise') self.assertMaybeCountEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str_or_none))), [item for item in map(str_or_none, _TEST_DATA.values()) if item is not None], msg='Function in set should be a transformation') if __debug__: with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'): traverse_obj(_TEST_DATA, set()) with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'): traverse_obj(_TEST_DATA, set((str.upper, str))) # Test `slice` as a key _SLICE_DATA = [0, 1, 2, 3, 4] self.assertEqual(traverse_obj(_TEST_DATA, ('dict', slice(1))), None, msg='slice on a dictionary should not throw') self.assertEqual(traverse_obj(_SLICE_DATA, slice(1)), _SLICE_DATA[:1], msg='slice key should apply slice to sequence') self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 2)), _SLICE_DATA[1:2], msg='slice key should apply slice to sequence') self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 4, 2)), _SLICE_DATA[1:4:2], msg='slice key should apply slice to sequence') # Test alternative paths self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'str'), 'str', msg='multiple `paths` should be treated as alternative paths') self.assertEqual(traverse_obj(_TEST_DATA, 'str', 100), 'str', msg='alternatives should exit early') self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'fail'), None, msg='alternatives should return `default` if exhausted') self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, 'fail'), 100), 100, msg='alternatives should track their own branching return') self.assertEqual(traverse_obj(_TEST_DATA, ('dict', Ellipsis), ('data', Ellipsis)), list(_TEST_DATA['data']), msg='alternatives on empty objects should search further') # Test branch and path nesting self.assertEqual(traverse_obj(_TEST_DATA, ('urls', (3, 0), 'url')), ['https://www.example.com/0'], msg='tuple as key should be treated as branches') self.assertEqual(traverse_obj(_TEST_DATA, ('urls', [3, 0], 'url')), ['https://www.example.com/0'], msg='list as key should be treated as branches') self.assertEqual(traverse_obj(_TEST_DATA, ('urls', ((1, 'fail'), (0, 'url')))), ['https://www.example.com/0'], msg='double nesting in path should be treated as paths') self.assertEqual(traverse_obj(['0', [1, 2]], [(0, 1), 0]), [1], msg='do not fail early on branching') self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', ((1, ('fail', 'url')), (0, 'url')))), ['https://www.example.com/0', 'https://www.example.com/1'], msg='triple nesting in path should be treated as branches') self.assertEqual(traverse_obj(_TEST_DATA, ('urls', ('fail', (Ellipsis, 'url')))), ['https://www.example.com/0', 'https://www.example.com/1'], msg='ellipsis as branch path start gets flattened') # Test dictionary as key self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}), {0: 100, 1: 1.2}, msg='dict key should result in a dict with the same keys') self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', 0, 'url')}), {0: 'https://www.example.com/0'}, msg='dict key should allow paths') self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', (3, 0), 'url')}), {0: ['https://www.example.com/0']}, msg='tuple in dict path should be treated as branches') self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', ((1, 'fail'), (0, 'url')))}), {0: ['https://www.example.com/0']}, msg='double nesting in dict path should be treated as paths') self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', ((1, ('fail', 'url')), (0, 'url')))}), {0: ['https://www.example.com/1', 'https://www.example.com/0']}, msg='triple nesting in dict path should be treated as branches') self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}), {}, msg='remove `None` values when top level dict key fails') self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}, default=Ellipsis), {0: Ellipsis}, msg='use `default` if key fails and `default`') self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}), {}, msg='remove empty values when dict key') self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}, default=Ellipsis), {0: Ellipsis}, msg='use `default` when dict key and a default') self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}), {}, msg='remove empty values when nested dict key fails') self.assertEqual(traverse_obj(None, {0: 'fail'}), {}, msg='default to dict if pruned') self.assertEqual(traverse_obj(None, {0: 'fail'}, default=Ellipsis), {0: Ellipsis}, msg='default to dict if pruned and default is given') self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}, default=Ellipsis), {0: {0: Ellipsis}}, msg='use nested `default` when nested dict key fails and `default`') self.assertEqual(traverse_obj(_TEST_DATA, {0: ('dict', Ellipsis)}), {}, msg='remove key if branch in dict key not successful') # Testing default parameter behavior _DEFAULT_DATA = {'None': None, 'int': 0, 'list': []} self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail'), None, msg='default value should be `None`') self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail', 'fail', default=Ellipsis), Ellipsis, msg='chained fails should result in default') self.assertEqual(traverse_obj(_DEFAULT_DATA, 'None', 'int'), 0, msg='should not short cirquit on `None`') self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail', default=1), 1, msg='invalid dict key should result in `default`') self.assertEqual(traverse_obj(_DEFAULT_DATA, 'None', default=1), 1, msg='`None` is a deliberate sentinel and should become `default`') self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', 10)), None, msg='`IndexError` should result in `default`') self.assertEqual(traverse_obj(_DEFAULT_DATA, (Ellipsis, 'fail'), default=1), 1, msg='if branched but not successful return `default` if defined, not `[]`') self.assertEqual(traverse_obj(_DEFAULT_DATA, (Ellipsis, 'fail'), default=None), None, msg='if branched but not successful return `default` even if `default` is `None`') self.assertEqual(traverse_obj(_DEFAULT_DATA, (Ellipsis, 'fail')), [], msg='if branched but not successful return `[]`, not `default`') self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', Ellipsis)), [], msg='if branched but object is empty return `[]`, not `default`') self.assertEqual(traverse_obj(None, Ellipsis), [], msg='if branched but object is `None` return `[]`, not `default`') self.assertEqual(traverse_obj({0: None}, (0, Ellipsis)), [], msg='if branched but state is `None` return `[]`, not `default`') branching_paths = [ ('fail', Ellipsis), (Ellipsis, 'fail'), 100 * ('fail',) + (Ellipsis,), (Ellipsis,) + 100 * ('fail',), ] for branching_path in branching_paths: self.assertEqual(traverse_obj({}, branching_path), [], msg='if branched but state is `None`, return `[]` (not `default`)') self.assertEqual(traverse_obj({}, 'fail', branching_path), [], msg='if branching in last alternative and previous did not match, return `[]` (not `default`)') self.assertEqual(traverse_obj({0: 'x'}, 0, branching_path), 'x', msg='if branching in last alternative and previous did match, return single value') self.assertEqual(traverse_obj({0: 'x'}, branching_path, 0), 'x', msg='if branching in first alternative and non-branching path does match, return single value') self.assertEqual(traverse_obj({}, branching_path, 'fail'), None, msg='if branching in first alternative and non-branching path does not match, return `default`') # Testing expected_type behavior _EXPECTED_TYPE_DATA = {'str': 'str', 'int': 0} self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=str), 'str', msg='accept matching `expected_type` type') self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int), None, msg='reject non-matching `expected_type` type') self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: str(x)), '0', msg='transform type using type function') self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=lambda _: 1 / 0), None, msg='wrap expected_type function in try_call') self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, Ellipsis, expected_type=str), ['str'], msg='eliminate items that expected_type fails on') self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}, expected_type=int), {0: 100}, msg='type as expected_type should filter dict values') self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2, 2: 'None'}, expected_type=str_or_none), {0: '100', 1: '1.2'}, msg='function as expected_type should transform dict values') self.assertEqual(traverse_obj(_TEST_DATA, ({0: 1.2}, 0, set((int_or_none,))), expected_type=int), 1, msg='expected_type should not filter non-final dict values') self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 100, 1: 'str'}}, expected_type=int), {0: {0: 100}}, msg='expected_type should transform deep dict values') self.assertEqual(traverse_obj(_TEST_DATA, [({0: '...'}, {0: '...'})], expected_type=type(Ellipsis)), [{0: Ellipsis}, {0: Ellipsis}], msg='expected_type should transform branched dict values') self.assertEqual(traverse_obj({1: {3: 4}}, [(1, 2), 3], expected_type=int), [4], msg='expected_type regression for type matching in tuple branching') self.assertEqual(traverse_obj(_TEST_DATA, ['data', Ellipsis], expected_type=int), [], msg='expected_type regression for type matching in dict result') # Test get_all behavior _GET_ALL_DATA = {'key': [0, 1, 2]} self.assertEqual(traverse_obj(_GET_ALL_DATA, ('key', Ellipsis), get_all=False), 0, msg='if not `get_all`, return only first matching value') self.assertEqual(traverse_obj(_GET_ALL_DATA, Ellipsis, get_all=False), [0, 1, 2], msg='do not overflatten if not `get_all`') # Test casesense behavior _CASESENSE_DATA = { 'KeY': 'value0', 0: { 'KeY': 'value1', 0: {'KeY': 'value2'}, }, # FULLWIDTH LATIN CAPITAL LETTER K '\uff2bey': 'value3', } self.assertEqual(traverse_obj(_CASESENSE_DATA, 'key'), None, msg='dict keys should be case sensitive unless `casesense`') self.assertEqual(traverse_obj(_CASESENSE_DATA, 'keY', casesense=False), 'value0', msg='allow non matching key case if `casesense`') self.assertEqual(traverse_obj(_CASESENSE_DATA, '\uff4bey', # FULLWIDTH LATIN SMALL LETTER K casesense=False), 'value3', msg='allow non matching Unicode key case if `casesense`') self.assertEqual(traverse_obj(_CASESENSE_DATA, (0, ('keY',)), casesense=False), ['value1'], msg='allow non matching key case in branch if `casesense`') self.assertEqual(traverse_obj(_CASESENSE_DATA, (0, ((0, 'keY'),)), casesense=False), ['value2'], msg='allow non matching key case in branch path if `casesense`') # Test traverse_string behavior _TRAVERSE_STRING_DATA = {'str': 'str', 1.2: 1.2} self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0)), None, msg='do not traverse into string if not `traverse_string`') self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0), _traverse_string=True), 's', msg='traverse into string if `traverse_string`') self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, (1.2, 1), _traverse_string=True), '.', msg='traverse into converted data if `traverse_string`') self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', Ellipsis), _traverse_string=True), 'str', msg='`...` should result in string (same value) if `traverse_string`') self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)), _traverse_string=True), 'sr', msg='`slice` should result in string if `traverse_string`') self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == 's'), _traverse_string=True), 'str', msg='function should result in string if `traverse_string`') self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)), _traverse_string=True), ['s', 'r'], msg='branching should result in list if `traverse_string`') self.assertEqual(traverse_obj({}, (0, Ellipsis), _traverse_string=True), [], msg='branching should result in list if `traverse_string`') self.assertEqual(traverse_obj({}, (0, lambda x, y: True), _traverse_string=True), [], msg='branching should result in list if `traverse_string`') self.assertEqual(traverse_obj({}, (0, slice(1)), _traverse_string=True), [], msg='branching should result in list if `traverse_string`') # Test re.Match as input obj mobj = re.match(r'^0(12)(?P<group>3)(4)?$', '0123') self.assertEqual(traverse_obj(mobj, Ellipsis), [x for x in mobj.groups() if x is not None], msg='`...` on a `re.Match` should give its `groups()`') self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 2)), ['0123', '3'], msg='function on a `re.Match` should give groupno, value starting at 0') self.assertEqual(traverse_obj(mobj, 'group'), '3', msg='str key on a `re.Match` should give group with that name') self.assertEqual(traverse_obj(mobj, 2), '3', msg='int key on a `re.Match` should give group with that name') self.assertEqual(traverse_obj(mobj, 'gRoUp', casesense=False), '3', msg='str key on a `re.Match` should respect casesense') self.assertEqual(traverse_obj(mobj, 'fail'), None, msg='failing str key on a `re.Match` should return `default`') self.assertEqual(traverse_obj(mobj, 'gRoUpS', casesense=False), None, msg='failing str key on a `re.Match` should return `default`') self.assertEqual(traverse_obj(mobj, 8), None, msg='failing int key on a `re.Match` should return `default`') self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 'group')), ['0123', '3'], msg='function on a `re.Match` should give group name as well') # Test xml.etree.ElementTree.Element as input obj etree = compat_etree_fromstring('''<?xml version="1.0"?> <data> <country name="Liechtenstein"> <rank>1</rank> <year>2008</year> <gdppc>141100</gdppc> <neighbor name="Austria" direction="E"/> <neighbor name="Switzerland" direction="W"/> </country> <country name="Singapore"> <rank>4</rank> <year>2011</year> <gdppc>59900</gdppc> <neighbor name="Malaysia" direction="N"/> </country> <country name="Panama"> <rank>68</rank> <year>2011</year> <gdppc>13600</gdppc> <neighbor name="Costa Rica" direction="W"/> <neighbor name="Colombia" direction="E"/> </country> </data>''') self.assertEqual(traverse_obj(etree, ''), etree, msg='empty str key should return the element itself') self.assertEqual(traverse_obj(etree, 'country'), list(etree), msg='str key should return all children with that tag name') self.assertEqual(traverse_obj(etree, Ellipsis), list(etree), msg='`...` as key should return all children') self.assertEqual(traverse_obj(etree, lambda _, x: x[0].text == '4'), [etree[1]], msg='function as key should get element as value') self.assertEqual(traverse_obj(etree, lambda i, _: i == 1), [etree[1]], msg='function as key should get index as key') self.assertEqual(traverse_obj(etree, 0), etree[0], msg='int key should return the nth child') self.assertEqual(traverse_obj(etree, './/neighbor/@name'), ['Austria', 'Switzerland', 'Malaysia', 'Costa Rica', 'Colombia'], msg='`@<attribute>` at end of path should give that attribute') self.assertEqual(traverse_obj(etree, '//neighbor/@fail'), [None, None, None, None, None], msg='`@<nonexistent>` at end of path should give `None`') self.assertEqual(traverse_obj(etree, ('//neighbor/@', 2)), {'name': 'Malaysia', 'direction': 'N'}, msg='`@` should give the full attribute dict') self.assertEqual(traverse_obj(etree, '//year/text()'), ['2008', '2011', '2011'], msg='`text()` at end of path should give the inner text') self.assertEqual(traverse_obj(etree, '//*[@direction]/@direction'), ['E', 'W', 'N', 'W', 'E'], msg='full python xpath features should be supported') self.assertEqual(traverse_obj(etree, (0, '@name')), 'Liechtenstein', msg='special transformations should act on current element') self.assertEqual(traverse_obj(etree, ('country', 0, Ellipsis, 'text()', T(int_or_none))), [1, 2008, 141100], msg='special transformations should act on current element') def test_traversal_unbranching(self): self.assertEqual(traverse_obj(_TEST_DATA, [(100, 1.2), all]), [100, 1.2], msg='`all` should give all results as list') self.assertEqual(traverse_obj(_TEST_DATA, [(100, 1.2), any]), 100, msg='`any` should give the first result') self.assertEqual(traverse_obj(_TEST_DATA, [100, all]), [100], msg='`all` should give list if non branching') self.assertEqual(traverse_obj(_TEST_DATA, [100, any]), 100, msg='`any` should give single item if non branching') self.assertEqual(traverse_obj(_TEST_DATA, [('dict', 'None', 100), all]), [100], msg='`all` should filter `None` and empty dict') self.assertEqual(traverse_obj(_TEST_DATA, [('dict', 'None', 100), any]), 100, msg='`any` should filter `None` and empty dict') self.assertEqual(traverse_obj(_TEST_DATA, [{ 'all': [('dict', 'None', 100, 1.2), all], 'any': [('dict', 'None', 100, 1.2), any], }]), {'all': [100, 1.2], 'any': 100}, msg='`all`/`any` should apply to each dict path separately') self.assertEqual(traverse_obj(_TEST_DATA, [{ 'all': [('dict', 'None', 100, 1.2), all], 'any': [('dict', 'None', 100, 1.2), any], }], get_all=False), {'all': [100, 1.2], 'any': 100}, msg='`all`/`any` should apply to dict regardless of `get_all`') self.assertIs(traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), all, T(float)]), None, msg='`all` should reset branching status') self.assertIs(traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), any, T(float)]), None, msg='`any` should reset branching status') self.assertEqual(traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), all, Ellipsis, T(float)]), [1.2], msg='`all` should allow further branching') self.assertEqual(traverse_obj(_TEST_DATA, [('dict', 'None', 'urls', 'data'), any, Ellipsis, 'index']), [0, 1], msg='`any` should allow further branching') def test_traversal_morsel(self): morsel = compat_http_cookies.Morsel() # SameSite added in Py3.8, breaks .update for 3.5-3.7 # Similarly Partitioned, Py3.14, thx Grub4k values = dict(zip(morsel, map(chr, itertools.count(ord('a'))))) morsel.set(str('item_key'), 'item_value', 'coded_value') morsel.update(values) values.update({ 'key': str('item_key'), 'value': 'item_value', }), values = dict((str(k), v) for k, v in values.items()) for key, val in values.items(): self.assertEqual(traverse_obj(morsel, key), val, msg='Morsel should provide access to all values') values = list(values.values()) self.assertMaybeCountEqual(traverse_obj(morsel, Ellipsis), values, msg='`...` should yield all values') self.assertMaybeCountEqual(traverse_obj(morsel, lambda k, v: True), values, msg='function key should yield all values') self.assertIs(traverse_obj(morsel, [(None,), any]), morsel, msg='Morsel should not be implicitly changed to dict on usage') def test_traversal_filter(self): data = [None, False, True, 0, 1, 0.0, 1.1, '', 'str', {}, {0: 0}, [], [1]] self.assertEqual( traverse_obj(data, (Ellipsis, filter)), [True, 1, 1.1, 'str', {0: 0}, [1]], '`filter` should filter falsy values') class TestTraversalHelpers(_TestCase): def test_traversal_require(self): with self.assertRaises(ExtractorError, msg='Missing `value` should raise'): traverse_obj(_TEST_DATA, ('None', T(require('value')))) self.assertEqual( traverse_obj(_TEST_DATA, ('str', T(require('value')))), 'str', '`require` should pass through non-`None` values') def test_subs_list_to_dict(self): self.assertEqual(traverse_obj([ {'name': 'de', 'url': 'https://example.com/subs/de.vtt'}, {'name': 'en', 'url': 'https://example.com/subs/en1.ass'}, {'name': 'en', 'url': 'https://example.com/subs/en2.ass'}, ], [Ellipsis, { 'id': 'name', 'url': 'url', }, all, T(subs_list_to_dict)]), { 'de': [{'url': 'https://example.com/subs/de.vtt'}], 'en': [ {'url': 'https://example.com/subs/en1.ass'}, {'url': 'https://example.com/subs/en2.ass'}, ], }, 'function should build subtitle dict from list of subtitles') self.assertEqual(traverse_obj([ {'name': 'de', 'url': 'https://example.com/subs/de.ass'}, {'name': 'de'}, {'name': 'en', 'content': 'content'}, {'url': 'https://example.com/subs/en'}, ], [Ellipsis, { 'id': 'name', 'data': 'content', 'url': 'url', }, all, T(subs_list_to_dict(lang=None))]), { 'de': [{'url': 'https://example.com/subs/de.ass'}], 'en': [{'data': 'content'}], }, 'subs with mandatory items missing should be filtered') self.assertEqual(traverse_obj([ {'url': 'https://example.com/subs/de.ass', 'name': 'de'}, {'url': 'https://example.com/subs/en', 'name': 'en'}, ], [Ellipsis, { 'id': 'name', 'ext': ['url', T(determine_ext(default_ext=None))], 'url': 'url', }, all, T(subs_list_to_dict(ext='ext'))]), { 'de': [{'url': 'https://example.com/subs/de.ass', 'ext': 'ass'}], 'en': [{'url': 'https://example.com/subs/en', 'ext': 'ext'}], }, '`ext` should set default ext but leave existing value untouched') self.assertEqual(traverse_obj([ {'name': 'en', 'url': 'https://example.com/subs/en2', 'prio': True}, {'name': 'en', 'url': 'https://example.com/subs/en1', 'prio': False}, ], [Ellipsis, { 'id': 'name', 'quality': ['prio', T(int)], 'url': 'url', }, all, T(subs_list_to_dict(ext='ext'))]), {'en': [ {'url': 'https://example.com/subs/en1', 'ext': 'ext'}, {'url': 'https://example.com/subs/en2', 'ext': 'ext'}, ]}, '`quality` key should sort subtitle list accordingly') self.assertEqual(traverse_obj([ {'name': 'de', 'url': 'https://example.com/subs/de.ass'}, {'name': 'de'}, {'name': 'en', 'content': 'content'}, {'url': 'https://example.com/subs/en'}, ], [Ellipsis, { 'id': 'name', 'url': 'url', 'data': 'content', }, all, T(subs_list_to_dict(lang='en'))]), {
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_update.py
test/test_update.py
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import json from youtube_dl.update import rsa_verify class TestUpdate(unittest.TestCase): def test_rsa_verify(self): UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537) with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'versions.json'), 'rb') as f: versions_info = f.read().decode() versions_info = json.loads(versions_info) signature = versions_info['signature'] del versions_info['signature'] self.assertTrue(rsa_verify( json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY)) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_YoutubeDLCookieJar.py
test/test_YoutubeDLCookieJar.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import os import re import sys import tempfile import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from youtube_dl.utils import YoutubeDLCookieJar class TestYoutubeDLCookieJar(unittest.TestCase): def test_keep_session_cookies(self): cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/session_cookies.txt') cookiejar.load(ignore_discard=True, ignore_expires=True) tf = tempfile.NamedTemporaryFile(delete=False) try: cookiejar.save(filename=tf.name, ignore_discard=True, ignore_expires=True) temp = tf.read().decode('utf-8') self.assertTrue(re.search( r'www\.foobar\.foobar\s+FALSE\s+/\s+TRUE\s+0\s+YoutubeDLExpiresEmpty\s+YoutubeDLExpiresEmptyValue', temp)) self.assertTrue(re.search( r'www\.foobar\.foobar\s+FALSE\s+/\s+TRUE\s+0\s+YoutubeDLExpires0\s+YoutubeDLExpires0Value', temp)) finally: tf.close() os.remove(tf.name) def test_strip_httponly_prefix(self): cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/httponly_cookies.txt') cookiejar.load(ignore_discard=True, ignore_expires=True) def assert_cookie_has_value(key): self.assertEqual(cookiejar._cookies['www.foobar.foobar']['/'][key].value, key + '_VALUE') assert_cookie_has_value('HTTPONLY_COOKIE') assert_cookie_has_value('JS_ACCESSIBLE_COOKIE') def test_malformed_cookies(self): cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/malformed_cookies.txt') cookiejar.load(ignore_discard=True, ignore_expires=True) # Cookies should be empty since all malformed cookie file entries # will be ignored self.assertFalse(cookiejar._cookies) def test_get_cookie_header(self): cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/httponly_cookies.txt') cookiejar.load(ignore_discard=True, ignore_expires=True) header = cookiejar.get_cookie_header('https://www.foobar.foobar') self.assertIn('HTTPONLY_COOKIE', header) def test_get_cookies_for_url(self): cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/session_cookies.txt') cookiejar.load(ignore_discard=True, ignore_expires=True) cookies = cookiejar.get_cookies_for_url('https://www.foobar.foobar/') self.assertEqual(len(cookies), 2) cookies = cookiejar.get_cookies_for_url('https://foobar.foobar/') self.assertFalse(cookies) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_jsinterp.py
test/test_jsinterp.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import math import re import time from youtube_dl.compat import compat_str as str from youtube_dl.jsinterp import JS_Undefined, JSInterpreter NaN = object() class TestJSInterpreter(unittest.TestCase): def _test(self, jsi_or_code, expected, func='f', args=()): if isinstance(jsi_or_code, str): jsi_or_code = JSInterpreter(jsi_or_code) got = jsi_or_code.call_function(func, *args) if expected is NaN: self.assertTrue(math.isnan(got), '{0} is not NaN'.format(got)) else: self.assertEqual(got, expected) def test_basic(self): jsi = JSInterpreter('function f(){;}') self.assertEqual(repr(jsi.extract_function('f')), 'F<f>') self._test(jsi, None) self._test('function f(){return 42;}', 42) self._test('function f(){42}', None) self._test('var f = function(){return 42;}', 42) def test_add(self): self._test('function f(){return 42 + 7;}', 49) self._test('function f(){return 42 + undefined;}', NaN) self._test('function f(){return 42 + null;}', 42) self._test('function f(){return 1 + "";}', '1') self._test('function f(){return 42 + "7";}', '427') self._test('function f(){return false + true;}', 1) self._test('function f(){return "false" + true;}', 'falsetrue') self._test('function f(){return ' '1 + "2" + [3,4] + {k: 56} + null + undefined + Infinity;}', '123,4[object Object]nullundefinedInfinity') def test_sub(self): self._test('function f(){return 42 - 7;}', 35) self._test('function f(){return 42 - undefined;}', NaN) self._test('function f(){return 42 - null;}', 42) self._test('function f(){return 42 - "7";}', 35) self._test('function f(){return 42 - "spam";}', NaN) def test_mul(self): self._test('function f(){return 42 * 7;}', 294) self._test('function f(){return 42 * undefined;}', NaN) self._test('function f(){return 42 * null;}', 0) self._test('function f(){return 42 * "7";}', 294) self._test('function f(){return 42 * "eggs";}', NaN) def test_div(self): jsi = JSInterpreter('function f(a, b){return a / b;}') self._test(jsi, NaN, args=(0, 0)) self._test(jsi, NaN, args=(JS_Undefined, 1)) self._test(jsi, float('inf'), args=(2, 0)) self._test(jsi, 0, args=(0, 3)) self._test(jsi, 6, args=(42, 7)) self._test(jsi, 0, args=(42, float('inf'))) self._test(jsi, 6, args=("42", 7)) self._test(jsi, NaN, args=("spam", 7)) def test_mod(self): self._test('function f(){return 42 % 7;}', 0) self._test('function f(){return 42 % 0;}', NaN) self._test('function f(){return 42 % undefined;}', NaN) self._test('function f(){return 42 % "7";}', 0) self._test('function f(){return 42 % "beans";}', NaN) def test_exp(self): self._test('function f(){return 42 ** 2;}', 1764) self._test('function f(){return 42 ** undefined;}', NaN) self._test('function f(){return 42 ** null;}', 1) self._test('function f(){return undefined ** 0;}', 1) self._test('function f(){return undefined ** 42;}', NaN) self._test('function f(){return 42 ** "2";}', 1764) self._test('function f(){return 42 ** "spam";}', NaN) def test_calc(self): self._test('function f(a){return 2*a+1;}', 7, args=[3]) def test_empty_return(self): self._test('function f(){return; y()}', None) def test_morespace(self): self._test('function f (a) { return 2 * a + 1 ; }', 7, args=[3]) self._test('function f () { x = 2 ; return x; }', 2) def test_strange_chars(self): self._test('function $_xY1 ($_axY1) { var $_axY2 = $_axY1 + 1; return $_axY2; }', 21, args=[20], func='$_xY1') def test_operators(self): self._test('function f(){return 1 << 5;}', 32) self._test('function f(){return 2 ** 5}', 32) self._test('function f(){return 19 & 21;}', 17) self._test('function f(){return 11 >> 2;}', 2) self._test('function f(){return []? 2+3: 4;}', 5) # equality self._test('function f(){return 1 == 1}', True) self._test('function f(){return 1 == 1.0}', True) self._test('function f(){return 1 == "1"}', True) self._test('function f(){return 1 == 2}', False) self._test('function f(){return 1 != "1"}', False) self._test('function f(){return 1 != 2}', True) self._test('function f(){var x = {a: 1}; var y = x; return x == y}', True) self._test('function f(){var x = {a: 1}; return x == {a: 1}}', False) self._test('function f(){return NaN == NaN}', False) self._test('function f(){return null == undefined}', True) self._test('function f(){return "spam, eggs" == "spam, eggs"}', True) # strict equality self._test('function f(){return 1 === 1}', True) self._test('function f(){return 1 === 1.0}', True) self._test('function f(){return 1 === "1"}', False) self._test('function f(){return 1 === 2}', False) self._test('function f(){var x = {a: 1}; var y = x; return x === y}', True) self._test('function f(){var x = {a: 1}; return x === {a: 1}}', False) self._test('function f(){return NaN === NaN}', False) self._test('function f(){return null === undefined}', False) self._test('function f(){return null === null}', True) self._test('function f(){return undefined === undefined}', True) self._test('function f(){return "uninterned" === "uninterned"}', True) self._test('function f(){return 1 === 1}', True) self._test('function f(){return 1 === "1"}', False) self._test('function f(){return 1 !== 1}', False) self._test('function f(){return 1 !== "1"}', True) # expressions self._test('function f(){return 0 && 1 || 2;}', 2) self._test('function f(){return 0 ?? 42;}', 0) self._test('function f(){return "life, the universe and everything" < 42;}', False) # https://github.com/ytdl-org/youtube-dl/issues/32815 self._test('function f(){return 0 - 7 * - 6;}', 42) def test_bitwise_operators_typecast(self): # madness self._test('function f(){return null << 5}', 0) self._test('function f(){return undefined >> 5}', 0) self._test('function f(){return 42 << NaN}', 42) self._test('function f(){return 42 << Infinity}', 42) self._test('function f(){return 0.0 << null}', 0) self._test('function f(){return NaN << 42}', 0) self._test('function f(){return "21.9" << 1}', 42) self._test('function f(){return true << "5";}', 32) self._test('function f(){return true << true;}', 2) self._test('function f(){return "19" & "21.9";}', 17) self._test('function f(){return "19" & false;}', 0) self._test('function f(){return "11.0" >> "2.1";}', 2) self._test('function f(){return 5 ^ 9;}', 12) self._test('function f(){return 0.0 << NaN}', 0) self._test('function f(){return null << undefined}', 0) self._test('function f(){return 21 << 4294967297}', 42) def test_array_access(self): self._test('function f(){var x = [1,2,3]; x[0] = 4; x[0] = 5; x[2.0] = 7; return x;}', [5, 2, 7]) def test_parens(self): self._test('function f(){return (1) + (2) * ((( (( (((((3)))))) )) ));}', 7) self._test('function f(){return (1 + 2) * 3;}', 9) def test_quotes(self): self._test(r'function f(){return "a\"\\("}', r'a"\(') def test_assignments(self): self._test('function f(){var x = 20; x = 30 + 1; return x;}', 31) self._test('function f(){var x = 20; x += 30 + 1; return x;}', 51) self._test('function f(){var x = 20; x -= 30 + 1; return x;}', -11) self._test('function f(){var x = 2; var y = ["a", "b"]; y[x%y["length"]]="z"; return y}', ['z', 'b']) def test_comments(self): self._test(''' function f() { var x = /* 1 + */ 2; var y = /* 30 * 40 */ 50; return x + y; } ''', 52) self._test(''' function f() { var x = "/*"; var y = 1 /* comment */ + 2; return y; } ''', 3) self._test(''' function f() { var x = ( /* 1 + */ 2 + /* 30 * 40 */ 50); return x; } ''', 52) def test_precedence(self): self._test(''' function f() { var a = [10, 20, 30, 40, 50]; var b = 6; a[0]=a[b%a.length]; return a; } ''', [20, 20, 30, 40, 50]) def test_builtins(self): self._test('function f() { return NaN }', NaN) def test_Date(self): self._test('function f() { return new Date("Wednesday 31 December 1969 18:01:26 MDT") - 0; }', 86000) jsi = JSInterpreter('function f(dt) { return new Date(dt) - 0; }') # date format m/d/y self._test(jsi, 86000, args=['12/31/1969 18:01:26 MDT']) # epoch 0 self._test(jsi, 0, args=['1 January 1970 00:00:00 UTC']) # undefined self._test(jsi, NaN, args=[JS_Undefined]) # y,m,d, ... - may fail with older dates lacking DST data jsi = JSInterpreter( 'function f() { return new Date(%s); }' % ('2024, 5, 29, 2, 52, 12, 42',)) self._test(jsi, ( 1719625932042 # UK value + ( + 3600 # back to GMT + (time.altzone if time.daylight # host's DST else time.timezone) ) * 1000)) # no arg self.assertAlmostEqual(JSInterpreter( 'function f() { return new Date() - 0; }').call_function('f'), time.time() * 1000, delta=100) # Date.now() self.assertAlmostEqual(JSInterpreter( 'function f() { return Date.now(); }').call_function('f'), time.time() * 1000, delta=100) # Date.parse() jsi = JSInterpreter('function f(dt) { return Date.parse(dt); }') self._test(jsi, 0, args=['1 January 1970 00:00:00 UTC']) # Date.UTC() jsi = JSInterpreter('function f() { return Date.UTC(%s); }' % ('1970, 0, 1, 0, 0, 0, 0',)) self._test(jsi, 0) def test_call(self): jsi = JSInterpreter(''' function x() { return 2; } function y(a) { return x() + (a?a:0); } function z() { return y(3); } ''') self._test(jsi, 5, func='z') self._test(jsi, 2, func='y') def test_if(self): self._test(''' function f() { let a = 9; if (0==0) {a++} return a } ''', 10) self._test(''' function f() { if (0==0) {return 10} } ''', 10) self._test(''' function f() { if (0!=0) {return 1} else {return 10} } ''', 10) def test_elseif(self): self._test(''' function f() { if (0!=0) {return 1} else if (1==0) {return 2} else {return 10} } ''', 10) def test_for_loop(self): self._test('function f() { a=0; for (i=0; i-10; i++) {a++} return a }', 10) def test_while_loop(self): self._test('function f() { a=0; while (a<10) {a++} return a }', 10) def test_switch(self): jsi = JSInterpreter(''' function f(x) { switch(x){ case 1:x+=1; case 2:x+=2; case 3:x+=3;break; case 4:x+=4; default:x=0; } return x } ''') self._test(jsi, 7, args=[1]) self._test(jsi, 6, args=[3]) self._test(jsi, 0, args=[5]) def test_switch_default(self): jsi = JSInterpreter(''' function f(x) { switch(x){ case 2: x+=2; default: x-=1; case 5: case 6: x+=6; case 0: break; case 1: x+=1; } return x } ''') self._test(jsi, 2, args=[1]) self._test(jsi, 11, args=[5]) self._test(jsi, 14, args=[9]) def test_try(self): self._test('function f() { try{return 10} catch(e){return 5} }', 10) def test_catch(self): self._test('function f() { try{throw 10} catch(e){return 5} }', 5) def test_finally(self): self._test('function f() { try{throw 10} finally {return 42} }', 42) self._test('function f() { try{throw 10} catch(e){return 5} finally {return 42} }', 42) def test_nested_try(self): self._test(''' function f() {try { try{throw 10} finally {throw 42} } catch(e){return 5} } ''', 5) def test_for_loop_continue(self): self._test('function f() { a=0; for (i=0; i-10; i++) { continue; a++ } return a }', 0) def test_for_loop_break(self): self._test('function f() { a=0; for (i=0; i-10; i++) { break; a++ } return a }', 0) def test_for_loop_try(self): self._test(''' function f() { for (i=0; i-10; i++) { try { if (i == 5) throw i} catch {return 10} finally {break} }; return 42 } ''', 42) def test_literal_list(self): self._test('function f() { return [1, 2, "asdf", [5, 6, 7]][3] }', [5, 6, 7]) def test_comma(self): self._test('function f() { a=5; a -= 1, a+=3; return a }', 7) self._test('function f() { a=5; return (a -= 1, a+=3, a); }', 7) self._test('function f() { return (l=[0,1,2,3], function(a, b){return a+b})((l[1], l[2]), l[3]) }', 5) def test_not(self): self._test('function f() { return ! undefined; }', True) self._test('function f() { return !0; }', True) self._test('function f() { return !!0; }', False) self._test('function f() { return ![]; }', False) self._test('function f() { return !0 !== false; }', True) def test_void(self): self._test('function f() { return void 42; }', JS_Undefined) def test_typeof(self): self._test('function f() { return typeof undefined; }', 'undefined') self._test('function f() { return typeof NaN; }', 'number') self._test('function f() { return typeof Infinity; }', 'number') self._test('function f() { return typeof true; }', 'boolean') self._test('function f() { return typeof null; }', 'object') self._test('function f() { return typeof "a string"; }', 'string') self._test('function f() { return typeof 42; }', 'number') self._test('function f() { return typeof 42.42; }', 'number') self._test('function f() { var g = function(){}; return typeof g; }', 'function') self._test('function f() { return typeof {key: "value"}; }', 'object') # not yet implemented: Symbol, BigInt def test_return_function(self): jsi = JSInterpreter(''' function x() { return [1, function(){return 1}][1] } ''') self.assertEqual(jsi.call_function('x')([]), 1) def test_null(self): self._test('function f() { return null; }', None) self._test('function f() { return [null > 0, null < 0, null == 0, null === 0]; }', [False, False, False, False]) self._test('function f() { return [null >= 0, null <= 0]; }', [True, True]) def test_undefined(self): self._test('function f() { return undefined === undefined; }', True) self._test('function f() { return undefined; }', JS_Undefined) self._test('function f() { return undefined ?? 42; }', 42) self._test('function f() { let v; return v; }', JS_Undefined) self._test('function f() { let v; return v**0; }', 1) self._test('function f() { let v; return [v>42, v<=42, v&&42, 42&&v]; }', [False, False, JS_Undefined, JS_Undefined]) self._test(''' function f() { return [ undefined === undefined, undefined == undefined, undefined == null ]; } ''', [True] * 3) self._test(''' function f() { return [ undefined < undefined, undefined > undefined, undefined === 0, undefined == 0, undefined < 0, undefined > 0, undefined >= 0, undefined <= 0, undefined > null, undefined < null, undefined === null ]; } ''', [False] * 11) jsi = JSInterpreter(''' function x() { let v; return [42+v, v+42, v**42, 42**v, 0**v]; } ''') for y in jsi.call_function('x'): self.assertTrue(math.isnan(y)) def test_object(self): self._test('function f() { return {}; }', {}) self._test('function f() { let a = {m1: 42, m2: 0 }; return [a["m1"], a.m2]; }', [42, 0]) self._test('function f() { let a; return a?.qq; }', JS_Undefined) self._test('function f() { let a = {m1: 42, m2: 0 }; return a?.qq; }', JS_Undefined) def test_indexing(self): self._test('function f() { return [1, 2, 3, 4][3]}', 4) self._test('function f() { return [1, [2, [3, [4]]]][1][1][1][0]}', 4) self._test('function f() { var o = {1: 2, 3: 4}; return o[3]}', 4) self._test('function f() { var o = {1: 2, 3: 4}; return o["3"]}', 4) self._test('function f() { return [1, [2, {3: [4]}]][1][1]["3"][0]}', 4) self._test('function f() { return [1, 2, 3, 4].length}', 4) self._test('function f() { var o = {1: 2, 3: 4}; return o.length}', JS_Undefined) self._test('function f() { var o = {1: 2, 3: 4}; o["length"] = 42; return o.length}', 42) def test_regex(self): self._test('function f() { let a=/,,[/,913,/](,)}/; }', None) self._test('function f() { let a=/,,[/,913,/](,)}/; return a.source; }', ',,[/,913,/](,)}') jsi = JSInterpreter(''' function x() { let a=/,,[/,913,/](,)}/; "".replace(a, ""); return a; } ''') attrs = set(('findall', 'finditer', 'match', 'scanner', 'search', 'split', 'sub', 'subn')) if sys.version_info >= (2, 7): # documented for 2.6 but may not be found attrs.update(('flags', 'groupindex', 'groups', 'pattern')) self.assertSetEqual(set(dir(jsi.call_function('x'))) & attrs, attrs) jsi = JSInterpreter(''' function x() { let a=/,,[/,913,/](,)}/i; return a; } ''') self.assertEqual(jsi.call_function('x').flags & ~re.U, re.I) jsi = JSInterpreter(r'function f() { let a=/,][}",],()}(\[)/; return a; }') self.assertEqual(jsi.call_function('f').pattern, r',][}",],()}(\[)') jsi = JSInterpreter(r'function f() { let a=[/[)\\]/]; return a[0]; }') self.assertEqual(jsi.call_function('f').pattern, r'[)\\]') def test_replace(self): self._test('function f() { let a="data-name".replace("data-", ""); return a }', 'name') self._test('function f() { let a="data-name".replace(new RegExp("^.+-"), ""); return a; }', 'name') self._test('function f() { let a="data-name".replace(/^.+-/, ""); return a; }', 'name') self._test('function f() { let a="data-name".replace(/a/g, "o"); return a; }', 'doto-nome') self._test('function f() { let a="data-name".replaceAll("a", "o"); return a; }', 'doto-nome') def test_char_code_at(self): jsi = JSInterpreter('function f(i){return "test".charCodeAt(i)}') self._test(jsi, 116, args=[0]) self._test(jsi, 101, args=[1]) self._test(jsi, 115, args=[2]) self._test(jsi, 116, args=[3]) self._test(jsi, None, args=[4]) self._test(jsi, 116, args=['not_a_number']) def test_bitwise_operators_overflow(self): self._test('function f(){return -524999584 << 5}', 379882496) self._test('function f(){return 1236566549 << 5}', 915423904) def test_negative(self): self._test('function f(){return 2 * -2.0 ;}', -4) self._test('function f(){return 2 - - -2 ;}', 0) self._test('function f(){return 2 - - - -2 ;}', 4) self._test('function f(){return 2 - + + - -2;}', 0) self._test('function f(){return 2 + - + - -2;}', 0) def test_32066(self): self._test( "function f(){return Math.pow(3, 5) + new Date('1970-01-01T08:01:42.000+08:00') / 1000 * -239 - -24205;}", 70) @unittest.skip('Not yet working') def test_packed(self): self._test( '''function f(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}''', '''h 7=g("1j");7.7h({7g:[{33:"w://7f-7e-7d-7c.v.7b/7a/79/78/77/76.74?t=73&s=2s&e=72&f=2t&71=70.0.0.1&6z=6y&6x=6w"}],6v:"w://32.v.u/6u.31",16:"r%",15:"r%",6t:"6s",6r:"",6q:"l",6p:"l",6o:"6n",6m:\'6l\',6k:"6j",9:[{33:"/2u?b=6i&n=50&6h=w://32.v.u/6g.31",6f:"6e"}],1y:{6d:1,6c:\'#6b\',6a:\'#69\',68:"67",66:30,65:r,},"64":{63:"%62 2m%m%61%5z%5y%5x.u%5w%5v%5u.2y%22 2k%m%1o%22 5t%m%1o%22 5s%m%1o%22 2j%m%5r%22 16%m%5q%22 15%m%5p%22 5o%2z%5n%5m%2z",5l:"w://v.u/d/1k/5k.2y",5j:[]},\'5i\':{"5h":"5g"},5f:"5e",5d:"w://v.u",5c:{},5b:l,1x:[0.25,0.50,0.75,1,1.25,1.5,2]});h 1m,1n,5a;h 59=0,58=0;h 7=g("1j");h 2x=0,57=0,56=0;$.55({54:{\'53-52\':\'2i-51\'}});7.j(\'4z\',6(x){c(5>0&&x.1l>=5&&1n!=1){1n=1;$(\'q.4y\').4x(\'4w\')}});7.j(\'13\',6(x){2x=x.1l});7.j(\'2g\',6(x){2w(x)});7.j(\'4v\',6(){$(\'q.2v\').4u()});6 2w(x){$(\'q.2v\').4t();c(1m)19;1m=1;17=0;c(4s.4r===l){17=1}$.4q(\'/2u?b=4p&2l=1k&4o=2t-4n-4m-2s-4l&4k=&4j=&4i=&17=\'+17,6(2r){$(\'#4h\').4g(2r)});$(\'.3-8-4f-4e:4d("4c")\').2h(6(e){2q();g().4b(0);g().4a(l)});6 2q(){h $14=$("<q />").2p({1l:"49",16:"r%",15:"r%",48:0,2n:0,2o:47,46:"45(10%, 10%, 10%, 0.4)","44-43":"42"});$("<41 />").2p({16:"60%",15:"60%",2o:40,"3z-2n":"3y"}).3x({\'2m\':\'/?b=3w&2l=1k\',\'2k\':\'0\',\'2j\':\'2i\'}).2f($14);$14.2h(6(){$(3v).3u();g().2g()});$14.2f($(\'#1j\'))}g().13(0);}6 3t(){h 9=7.1b(2e);2d.2c(9);c(9.n>1){1r(i=0;i<9.n;i++){c(9[i].1a==2e){2d.2c(\'!!=\'+i);7.1p(i)}}}}7.j(\'3s\',6(){g().1h("/2a/3r.29","3q 10 28",6(){g().13(g().27()+10)},"2b");$("q[26=2b]").23().21(\'.3-20-1z\');g().1h("/2a/3p.29","3o 10 28",6(){h 12=g().27()-10;c(12<0)12=0;g().13(12)},"24");$("q[26=24]").23().21(\'.3-20-1z\');});6 1i(){}7.j(\'3n\',6(){1i()});7.j(\'3m\',6(){1i()});7.j("k",6(y){h 9=7.1b();c(9.n<2)19;$(\'.3-8-3l-3k\').3j(6(){$(\'#3-8-a-k\').1e(\'3-8-a-z\');$(\'.3-a-k\').p(\'o-1f\',\'11\')});7.1h("/3i/3h.3g","3f 3e",6(){$(\'.3-1w\').3d(\'3-8-1v\');$(\'.3-8-1y, .3-8-1x\').p(\'o-1g\',\'11\');c($(\'.3-1w\').3c(\'3-8-1v\')){$(\'.3-a-k\').p(\'o-1g\',\'l\');$(\'.3-a-k\').p(\'o-1f\',\'l\');$(\'.3-8-a\').1e(\'3-8-a-z\');$(\'.3-8-a:1u\').3b(\'3-8-a-z\')}3a{$(\'.3-a-k\').p(\'o-1g\',\'11\');$(\'.3-a-k\').p(\'o-1f\',\'11\');$(\'.3-8-a:1u\').1e(\'3-8-a-z\')}},"39");7.j("38",6(y){1d.37(\'1c\',y.9[y.36].1a)});c(1d.1t(\'1c\')){35("1s(1d.1t(\'1c\'));",34)}});h 18;6 1s(1q){h 9=7.1b();c(9.n>1){1r(i=0;i<9.n;i++){c(9[i].1a==1q){c(i==18){19}18=i;7.1p(i)}}}}',36,270,'|||jw|||function|player|settings|tracks|submenu||if||||jwplayer|var||on|audioTracks|true|3D|length|aria|attr|div|100|||sx|filemoon|https||event|active||false|tt|seek|dd|height|width|adb|current_audio|return|name|getAudioTracks|default_audio|localStorage|removeClass|expanded|checked|addButton|callMeMaybe|vplayer|0fxcyc2ajhp1|position|vvplay|vvad|220|setCurrentAudioTrack|audio_name|for|audio_set|getItem|last|open|controls|playbackRates|captions|rewind|icon|insertAfter||detach|ff00||button|getPosition|sec|png|player8|ff11|log|console|track_name|appendTo|play|click|no|scrolling|frameborder|file_code|src|top|zIndex|css|showCCform|data|1662367683|383371|dl|video_ad|doPlay|prevt|mp4|3E||jpg|thumbs|file|300|setTimeout|currentTrack|setItem|audioTrackChanged|dualSound|else|addClass|hasClass|toggleClass|Track|Audio|svg|dualy|images|mousedown|buttons|topbar|playAttemptFailed|beforePlay|Rewind|fr|Forward|ff|ready|set_audio_track|remove|this|upload_srt|prop|50px|margin|1000001|iframe|center|align|text|rgba|background|1000000|left|absolute|pause|setCurrentCaptions|Upload|contains|item|content|html|fviews|referer|prem|embed|3e57249ef633e0d03bf76ceb8d8a4b65|216|83|hash|view|get|TokenZir|window|hide|show|complete|slow|fadeIn|video_ad_fadein|time||cache|Cache|Content|headers|ajaxSetup|v2done|tott|vastdone2|vastdone1|vvbefore|playbackRateControls|cast|aboutlink|FileMoon|abouttext|UHD|1870|qualityLabels|sites|GNOME_POWER|link|2Fiframe|3C|allowfullscreen|22360|22640|22no|marginheight|marginwidth|2FGNOME_POWER|2F0fxcyc2ajhp1|2Fe|2Ffilemoon|2F|3A||22https|3Ciframe|code|sharing|fontOpacity|backgroundOpacity|Tahoma|fontFamily|303030|backgroundColor|FFFFFF|color|userFontScale|thumbnails|kind|0fxcyc2ajhp10000|url|get_slides|start|startparam|none|preload|html5|primary|hlshtml|androidhls|duration|uniform|stretching|0fxcyc2ajhp1_xt|image|2048|sp|6871|asn|127|srv|43200|_g3XlBcu2lmD9oDexD2NLWSmah2Nu3XcDrl93m9PwXY|m3u8||master|0fxcyc2ajhp1_x|00076|01|hls2|to|s01|delivery|storage|moon|sources|setup'''.split('|')) def test_join(self): test_input = list('test') tests = [ 'function f(a, b){return a.join(b)}', 'function f(a, b){return Array.prototype.join.call(a, b)}', 'function f(a, b){return Array.prototype.join.apply(a, [b])}', ] for test in tests: jsi = JSInterpreter(test) self._test(jsi, 'test', args=[test_input, '']) self._test(jsi, 't-e-s-t', args=[test_input, '-']) self._test(jsi, '', args=[[], '-']) self._test('function f(){return ' '[1, 1.0, "abc", {a: 1}, null, undefined, Infinity, NaN].join()}', '1,1,abc,[object Object],,,Infinity,NaN') self._test('function f(){return ' '[1, 1.0, "abc", {a: 1}, null, undefined, Infinity, NaN].join("~")}', '1~1~abc~[object Object]~~~Infinity~NaN') def test_split(self): test_result = list('test') tests = [ 'function f(a, b){return a.split(b)}', 'function f(a, b){return a["split"](b)}', 'function f(a, b){let x = ["split"]; return a[x[0]](b)}', 'function f(a, b){return String.prototype.split.call(a, b)}', 'function f(a, b){return String.prototype.split.apply(a, [b])}', ] for test in tests: jsi = JSInterpreter(test) self._test(jsi, test_result, args=['test', '']) self._test(jsi, test_result, args=['t-e-s-t', '-']) self._test(jsi, [''], args=['', '-']) self._test(jsi, [], args=['', '']) # RegExp split self._test('function f(){return "test".split(/(?:)/)}', ['t', 'e', 's', 't']) self._test('function f(){return "t-e-s-t".split(/[es-]+/)}', ['t', 't']) # from MDN: surrogate pairs aren't handled: case 1 fails # self._test('function f(){return "😄😄".split(/(?:)/)}', # ['\ud83d', '\ude04', '\ud83d', '\ude04']) # case 2 beats Py3.2: it gets the case 1 result if sys.version_info >= (2, 6) and not ((3, 0) <= sys.version_info < (3, 3)): self._test('function f(){return "😄😄".split(/(?:)/u)}', ['😄', '😄']) def test_slice(self): self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice()}', [0, 1, 2, 3, 4, 5, 6, 7, 8]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0)}', [0, 1, 2, 3, 4, 5, 6, 7, 8]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(5)}', [5, 6, 7, 8]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(99)}', []) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-2)}', [7, 8]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-99)}', [0, 1, 2, 3, 4, 5, 6, 7, 8]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0, 0)}', []) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(1, 0)}', []) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0, 1)}', [0]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(3, 6)}', [3, 4, 5]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(1, -1)}', [1, 2, 3, 4, 5, 6, 7]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-1, 1)}', []) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-3, -1)}', [6, 7]) self._test('function f(){return "012345678".slice()}', '012345678') self._test('function f(){return "012345678".slice(0)}', '012345678') self._test('function f(){return "012345678".slice(5)}', '5678') self._test('function f(){return "012345678".slice(99)}', '') self._test('function f(){return "012345678".slice(-2)}', '78') self._test('function f(){return "012345678".slice(-99)}', '012345678') self._test('function f(){return "012345678".slice(0, 0)}', '') self._test('function f(){return "012345678".slice(1, 0)}', '') self._test('function f(){return "012345678".slice(0, 1)}', '0') self._test('function f(){return "012345678".slice(3, 6)}', '345') self._test('function f(){return "012345678".slice(1, -1)}', '1234567') self._test('function f(){return "012345678".slice(-1, 1)}', '') self._test('function f(){return "012345678".slice(-3, -1)}', '67') def test_splice(self): self._test('function f(){var T = ["0", "1", "2"]; T["splice"](2, 1, "0")[0]; return T }', ['0', '1', '0']) def test_pop(self): # pop self._test('function f(){var a = [0, 1, 2, 3, 4, 5, 6, 7, 8]; return [a.pop(), a]}', [8, [0, 1, 2, 3, 4, 5, 6, 7]]) self._test('function f(){return [].pop()}', JS_Undefined) # push self._test('function f(){var a = [0, 1, 2]; return [a.push(3, 4), a]}', [5, [0, 1, 2, 3, 4]]) self._test('function f(){var a = [0, 1, 2]; return [a.push(), a]}', [3, [0, 1, 2]]) def test_shift(self): # shift self._test('function f(){var a = [0, 1, 2, 3, 4, 5, 6, 7, 8]; return [a.shift(), a]}', [0, [1, 2, 3, 4, 5, 6, 7, 8]]) self._test('function f(){return [].shift()}', JS_Undefined) # unshift self._test('function f(){var a = [0, 1, 2]; return [a.unshift(3, 4), a]}', [5, [3, 4, 0, 1, 2]]) self._test('function f(){var a = [0, 1, 2]; return [a.unshift(), a]}', [3, [0, 1, 2]]) def test_forEach(self): self._test('function f(){var ret = []; var l = [4, 2]; ' 'var log = function(e,i,a){ret.push([e,i,a]);}; ' 'l.forEach(log); ' 'return [ret.length, ret[0][0], ret[1][1], ret[0][2]]}', [2, 4, 1, [4, 2]]) self._test('function f(){var ret = []; var l = [4, 2]; ' 'var log = function(e,i,a){this.push([e,i,a]);}; ' 'l.forEach(log, ret); ' 'return [ret.length, ret[0][0], ret[1][1], ret[0][2]]}', [2, 4, 1, [4, 2]]) def test_extract_function(self): jsi = JSInterpreter('function a(b) { return b + 1; }') func = jsi.extract_function('a') self.assertEqual(func([2]), 3) def test_extract_function_with_global_stack(self): jsi = JSInterpreter('function c(d) { return d + e + f + g; }') func = jsi.extract_function('c', {'e': 10}, {'f': 100, 'g': 1000})
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/test/test_all_urls.py
test/test_all_urls.py
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest import collections sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import gettestcases from youtube_dl.extractor import ( FacebookIE, gen_extractors, YoutubeIE, ) class TestAllURLsMatching(unittest.TestCase): def setUp(self): self.ies = gen_extractors() def matching_ies(self, url): return [ie.IE_NAME for ie in self.ies if ie.suitable(url) and ie.IE_NAME != 'generic'] def assertMatch(self, url, ie_list): self.assertEqual(self.matching_ies(url), ie_list) def test_youtube_playlist_matching(self): assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist']) assertTab = lambda url: self.assertMatch(url, ['youtube:tab']) assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') # 585 assertPlaylist('PL63F0C78739B09958') assertTab('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q') assertTab('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') assertTab('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC') assertTab('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668 self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M')) # Top tracks assertTab('https://www.youtube.com/playlist?list=MCUS.20142101') def test_youtube_matching(self): self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M')) self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) # 668 self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube']) self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube']) self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube']) self.assertMatch('http://www.cleanvideosearch.com/media/action/yt/watch?videoId=8v_4O44sfjM', ['youtube']) def test_youtube_channel_matching(self): assertChannel = lambda url: self.assertMatch(url, ['youtube:tab']) assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM') assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM?feature=gb_ch_rec') assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM/videos') def test_youtube_user_matching(self): self.assertMatch('http://www.youtube.com/NASAgovVideo/videos', ['youtube:tab']) def test_youtube_feeds(self): self.assertMatch('https://www.youtube.com/feed/library', ['youtube:tab']) self.assertMatch('https://www.youtube.com/feed/history', ['youtube:tab']) self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:tab']) self.assertMatch('https://www.youtube.com/feed/subscriptions', ['youtube:tab']) def test_youtube_search_matching(self): self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url']) self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url']) def test_facebook_matching(self): self.assertTrue(FacebookIE.suitable('https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268')) self.assertTrue(FacebookIE.suitable('https://www.facebook.com/cindyweather?fref=ts#!/photo.php?v=10152183998945793')) def test_no_duplicates(self): ies = gen_extractors() for tc in gettestcases(include_onlymatching=True): url = tc['url'] for ie in ies: if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'): self.assertTrue(ie.suitable(url), '%s should match URL %r' % (type(ie).__name__, url)) else: self.assertFalse( ie.suitable(url), '%s should not match URL %r . That URL belongs to %s.' % (type(ie).__name__, url, tc['name'])) def test_keywords(self): self.assertMatch(':ytsubs', ['youtube:subscriptions']) self.assertMatch(':ytsubscriptions', ['youtube:subscriptions']) self.assertMatch(':ythistory', ['youtube:history']) def test_vimeo_matching(self): self.assertMatch('https://vimeo.com/channels/tributes', ['vimeo:channel']) self.assertMatch('https://vimeo.com/channels/31259', ['vimeo:channel']) self.assertMatch('https://vimeo.com/channels/31259/53576664', ['vimeo']) self.assertMatch('https://vimeo.com/user7108434', ['vimeo:user']) self.assertMatch('https://vimeo.com/user7108434/videos', ['vimeo:user']) self.assertMatch('https://vimeo.com/user21297594/review/75524534/3c257a1b5d', ['vimeo:review']) # https://github.com/ytdl-org/youtube-dl/issues/1930 def test_soundcloud_not_matching_sets(self): self.assertMatch('http://soundcloud.com/floex/sets/gone-ep', ['soundcloud:set']) def test_tumblr(self): self.assertMatch('http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes', ['Tumblr']) self.assertMatch('http://tatianamaslanydaily.tumblr.com/post/54196191430', ['Tumblr']) def test_pbs(self): # https://github.com/ytdl-org/youtube-dl/issues/2350 self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['pbs']) self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['pbs']) def test_no_duplicated_ie_names(self): name_accu = collections.defaultdict(list) for ie in self.ies: name_accu[ie.IE_NAME.lower()].append(type(ie).__name__) for (ie_name, ie_list) in name_accu.items(): self.assertEqual( len(ie_list), 1, 'Multiple extractors with the same IE_NAME "%s" (%s)' % (ie_name, ', '.join(ie_list))) if __name__ == '__main__': unittest.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/make_contributing.py
devscripts/make_contributing.py
#!/usr/bin/env python from __future__ import unicode_literals import optparse import re from utils import read_file, write_file def main(): parser = optparse.OptionParser(usage='%prog INFILE OUTFILE') options, args = parser.parse_args() if len(args) != 2: parser.error('Expected an input and an output filename') infile, outfile = args readme = read_file(infile) bug_text = re.search( r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1) dev_text = re.search( r'(?s)(#\s*DEVELOPER INSTRUCTIONS.*?)#\s*EMBEDDING YOUTUBE-DL', readme).group(1) out = bug_text + dev_text write_file(outfile, out) if __name__ == '__main__': main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/make_readme.py
devscripts/make_readme.py
from __future__ import unicode_literals import os.path import re import sys dirn = os.path.dirname sys.path.insert(0, dirn(dirn(os.path.abspath(__file__)))) from utils import read_file from youtube_dl.compat import compat_open as open README_FILE = 'README.md' helptext = sys.stdin.read() if isinstance(helptext, bytes): helptext = helptext.decode('utf-8') oldreadme = read_file(README_FILE) header = oldreadme[:oldreadme.index('# OPTIONS')] footer = oldreadme[oldreadme.index('# CONFIGURATION'):] options = helptext[helptext.index(' General Options:') + 19:] options = re.sub(r'(?m)^ (\w.+)$', r'## \1', options) options = '# OPTIONS\n' + options + '\n' with open(README_FILE, 'w', encoding='utf-8') as f: f.write(header) f.write(options) f.write(footer)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/prepare_manpage.py
devscripts/prepare_manpage.py
from __future__ import unicode_literals import optparse import os.path import re from utils import read_file, write_file ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) README_FILE = os.path.join(ROOT_DIR, 'README.md') PREFIX = r'''%YOUTUBE-DL(1) # NAME youtube\-dl \- download videos from youtube.com or other video platforms # SYNOPSIS **youtube-dl** \[OPTIONS\] URL [URL...] ''' def main(): parser = optparse.OptionParser(usage='%prog OUTFILE.md') options, args = parser.parse_args() if len(args) != 1: parser.error('Expected an output filename') outfile, = args readme = read_file(README_FILE) readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme) readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme) readme = PREFIX + readme readme = filter_options(readme) write_file(outfile, readme) def filter_options(readme): ret = '' in_options = False for line in readme.split('\n'): if line.startswith('# '): if line[2:].startswith('OPTIONS'): in_options = True else: in_options = False if in_options: if line.lstrip().startswith('-'): split = re.split(r'\s{2,}', line.lstrip()) # Description string may start with `-` as well. If there is # only one piece then it's a description bit not an option. if len(split) > 1: option, description = split split_option = option.split(' ') if not split_option[-1].startswith('-'): # metavar option = ' '.join(split_option[:-1] + ['*%s*' % split_option[-1]]) # Pandoc's definition_lists. See http://pandoc.org/README.html # for more information. ret += '\n%s\n: %s\n' % (option, description) continue ret += line.lstrip() + '\n' else: ret += line + '\n' return ret if __name__ == '__main__': main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/lazy_load_template.py
devscripts/lazy_load_template.py
# coding: utf-8 from __future__ import unicode_literals import re class LazyLoadExtractor(object): _module = None @classmethod def ie_key(cls): return cls.__name__[:-2] def __new__(cls, *args, **kwargs): mod = __import__(cls._module, fromlist=(cls.__name__,)) real_cls = getattr(mod, cls.__name__) instance = real_cls.__new__(real_cls) instance.__init__(*args, **kwargs) return instance
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/bash-completion.py
devscripts/bash-completion.py
#!/usr/bin/env python from __future__ import unicode_literals import os from os.path import dirname as dirn import sys sys.path.insert(0, dirn(dirn(os.path.abspath(__file__)))) import youtube_dl from youtube_dl.compat import compat_open as open from utils import read_file BASH_COMPLETION_FILE = "youtube-dl.bash-completion" BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in" def build_completion(opt_parser): opts_flag = [] for group in opt_parser.option_groups: for option in group.option_list: # for every long flag opts_flag.append(option.get_opt_string()) template = read_file(BASH_COMPLETION_TEMPLATE) with open(BASH_COMPLETION_FILE, "w", encoding='utf-8') as f: # just using the special char filled_template = template.replace("{{flags}}", " ".join(opts_flag)) f.write(filled_template) parser = youtube_dl.parseOpts()[0] build_completion(parser)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/zsh-completion.py
devscripts/zsh-completion.py
#!/usr/bin/env python from __future__ import unicode_literals import os from os.path import dirname as dirn import sys sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) import youtube_dl from utils import read_file, write_file ZSH_COMPLETION_FILE = "youtube-dl.zsh" ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in" def build_completion(opt_parser): opts = [opt for group in opt_parser.option_groups for opt in group.option_list] opts_file = [opt for opt in opts if opt.metavar == "FILE"] opts_dir = [opt for opt in opts if opt.metavar == "DIR"] fileopts = [] for opt in opts_file: if opt._short_opts: fileopts.extend(opt._short_opts) if opt._long_opts: fileopts.extend(opt._long_opts) diropts = [] for opt in opts_dir: if opt._short_opts: diropts.extend(opt._short_opts) if opt._long_opts: diropts.extend(opt._long_opts) flags = [opt.get_opt_string() for opt in opts] template = read_file(ZSH_COMPLETION_TEMPLATE) template = template.replace("{{fileopts}}", "|".join(fileopts)) template = template.replace("{{diropts}}", "|".join(diropts)) template = template.replace("{{flags}}", " ".join(flags)) write_file(ZSH_COMPLETION_FILE, template) parser = youtube_dl.parseOpts()[0] build_completion(parser)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/make_issue_template.py
devscripts/make_issue_template.py
#!/usr/bin/env python from __future__ import unicode_literals import optparse import os.path import sys from utils import read_file, read_version, write_file def main(): parser = optparse.OptionParser(usage='%prog INFILE OUTFILE') options, args = parser.parse_args() if len(args) != 2: parser.error('Expected an input and an output filename') infile, outfile = args issue_template_tmpl = read_file(infile) out = issue_template_tmpl % {'version': read_version()} write_file(outfile, out) if __name__ == '__main__': main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/fish-completion.py
devscripts/fish-completion.py
#!/usr/bin/env python from __future__ import unicode_literals import optparse import os from os.path import dirname as dirn import sys sys.path.insert(0, dirn(dirn(os.path.abspath(__file__)))) import youtube_dl from youtube_dl.utils import shell_quote from utils import read_file, write_file FISH_COMPLETION_FILE = 'youtube-dl.fish' FISH_COMPLETION_TEMPLATE = 'devscripts/fish-completion.in' EXTRA_ARGS = { 'recode-video': ['--arguments', 'mp4 flv ogg webm mkv', '--exclusive'], # Options that need a file parameter 'download-archive': ['--require-parameter'], 'cookies': ['--require-parameter'], 'load-info': ['--require-parameter'], 'batch-file': ['--require-parameter'], } def build_completion(opt_parser): commands = [] for group in opt_parser.option_groups: for option in group.option_list: long_option = option.get_opt_string().strip('-') complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option] if option._short_opts: complete_cmd += ['--short-option', option._short_opts[0].strip('-')] if option.help != optparse.SUPPRESS_HELP: complete_cmd += ['--description', option.help] complete_cmd.extend(EXTRA_ARGS.get(long_option, [])) commands.append(shell_quote(complete_cmd)) template = read_file(FISH_COMPLETION_TEMPLATE) filled_template = template.replace('{{commands}}', '\n'.join(commands)) write_file(FISH_COMPLETION_FILE, filled_template) parser = youtube_dl.parseOpts()[0] build_completion(parser)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/generate_aes_testdata.py
devscripts/generate_aes_testdata.py
from __future__ import unicode_literals import codecs import subprocess import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from youtube_dl.utils import intlist_to_bytes from youtube_dl.aes import aes_encrypt, key_expansion secret_msg = b'Secret message goes here' def hex_str(int_list): return codecs.encode(intlist_to_bytes(int_list), 'hex') def openssl_encode(algo, key, iv): cmd = ['openssl', 'enc', '-e', '-' + algo, '-K', hex_str(key), '-iv', hex_str(iv)] prog = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) out, _ = prog.communicate(secret_msg) return out iv = key = [0x20, 0x15] + 14 * [0] r = openssl_encode('aes-128-cbc', key, iv) print('aes_cbc_decrypt') print(repr(r)) password = key new_key = aes_encrypt(password, key_expansion(password)) r = openssl_encode('aes-128-ctr', new_key, iv) print('aes_decrypt_text 16') print(repr(r)) password = key + 16 * [0] new_key = aes_encrypt(password, key_expansion(password)) * (32 // 16) r = openssl_encode('aes-256-ctr', new_key, iv) print('aes_decrypt_text 32') print(repr(r))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/check-porn.py
devscripts/check-porn.py
#!/usr/bin/env python from __future__ import unicode_literals """ This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check if we are not 'age_limit' tagging some porn site A second approach implemented relies on a list of porn domains, to activate it pass the list filename as the only argument """ # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import gettestcases from youtube_dl.utils import compat_urllib_parse_urlparse from youtube_dl.utils import compat_urllib_request if len(sys.argv) > 1: METHOD = 'LIST' LIST = open(sys.argv[1]).read().decode('utf8').strip() else: METHOD = 'EURISTIC' for test in gettestcases(): if METHOD == 'EURISTIC': try: webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read() except Exception: print('\nFail: {0}'.format(test['name'])) continue webpage = webpage.decode('utf8', 'replace') RESULT = 'porn' in webpage.lower() elif METHOD == 'LIST': domain = compat_urllib_parse_urlparse(test['url']).netloc if not domain: print('\nFail: {0}'.format(test['name'])) continue domain = '.'.join(domain.split('.')[-2:]) RESULT = ('.' + domain + '\n' in LIST or '\n' + domain + '\n' in LIST) if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict'] or test['info_dict']['age_limit'] != 18): print('\nPotential missing age_limit check: {0}'.format(test['name'])) elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict'] and test['info_dict']['age_limit'] == 18): print('\nPotential false negative: {0}'.format(test['name'])) else: sys.stdout.write('.') sys.stdout.flush() print()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/utils.py
devscripts/utils.py
# coding: utf-8 from __future__ import unicode_literals import argparse import functools import os.path import subprocess import sys dirn = os.path.dirname sys.path.insert(0, dirn(dirn(os.path.abspath(__file__)))) from youtube_dl.compat import ( compat_kwargs, compat_open as open, ) def read_file(fname): with open(fname, encoding='utf-8') as f: return f.read() def write_file(fname, content, mode='w'): with open(fname, mode, encoding='utf-8') as f: return f.write(content) def read_version(fname='youtube_dl/version.py'): """Get the version without importing the package""" exec(compile(read_file(fname), fname, 'exec')) return locals()['__version__'] def get_filename_args(has_infile=False, default_outfile=None): parser = argparse.ArgumentParser() if has_infile: parser.add_argument('infile', help='Input file') kwargs = {'nargs': '?', 'default': default_outfile} if default_outfile else {} kwargs['help'] = 'Output file' parser.add_argument('outfile', **compat_kwargs(kwargs)) opts = parser.parse_args() if has_infile: return opts.infile, opts.outfile return opts.outfile def compose_functions(*functions): return lambda x: functools.reduce(lambda y, f: f(y), functions, x) def run_process(*args, **kwargs): kwargs.setdefault('text', True) kwargs.setdefault('check', True) kwargs.setdefault('capture_output', True) if kwargs['text']: kwargs.setdefault('encoding', 'utf-8') kwargs.setdefault('errors', 'replace') kwargs = compat_kwargs(kwargs) return subprocess.run(args, **kwargs)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/create-github-release.py
devscripts/create-github-release.py
#!/usr/bin/env python from __future__ import unicode_literals import json import mimetypes import netrc import optparse import os import re import sys dirn = os.path.dirname sys.path.insert(0, dirn(dirn(os.path.abspath(__file__)))) from youtube_dl.compat import ( compat_basestring, compat_getpass, compat_print, compat_urllib_request, ) from youtube_dl.utils import ( make_HTTPS_handler, sanitized_Request, ) from utils import read_file class GitHubReleaser(object): _API_URL = 'https://api.github.com/repos/ytdl-org/youtube-dl/releases' _UPLOADS_URL = 'https://uploads.github.com/repos/ytdl-org/youtube-dl/releases/%s/assets?name=%s' _NETRC_MACHINE = 'github.com' def __init__(self, debuglevel=0): self._init_github_account() https_handler = make_HTTPS_handler({}, debuglevel=debuglevel) self._opener = compat_urllib_request.build_opener(https_handler) def _init_github_account(self): try: info = netrc.netrc().authenticators(self._NETRC_MACHINE) if info is not None: self._token = info[2] compat_print('Using GitHub credentials found in .netrc...') return else: compat_print('No GitHub credentials found in .netrc') except (IOError, netrc.NetrcParseError): compat_print('Unable to parse .netrc') self._token = compat_getpass( 'Type your GitHub PAT (personal access token) and press [Return]: ') def _call(self, req): if isinstance(req, compat_basestring): req = sanitized_Request(req) req.add_header('Authorization', 'token %s' % self._token) response = self._opener.open(req).read().decode('utf-8') return json.loads(response) def list_releases(self): return self._call(self._API_URL) def create_release(self, tag_name, name=None, body='', draft=False, prerelease=False): data = { 'tag_name': tag_name, 'target_commitish': 'master', 'name': name, 'body': body, 'draft': draft, 'prerelease': prerelease, } req = sanitized_Request(self._API_URL, json.dumps(data).encode('utf-8')) return self._call(req) def create_asset(self, release_id, asset): asset_name = os.path.basename(asset) url = self._UPLOADS_URL % (release_id, asset_name) # Our files are small enough to be loaded directly into memory. data = open(asset, 'rb').read() req = sanitized_Request(url, data) mime_type, _ = mimetypes.guess_type(asset_name) req.add_header('Content-Type', mime_type or 'application/octet-stream') return self._call(req) def main(): parser = optparse.OptionParser(usage='%prog CHANGELOG VERSION BUILDPATH') options, args = parser.parse_args() if len(args) != 3: parser.error('Expected a version and a build directory') changelog_file, version, build_path = args changelog = read_file(changelog_file) mobj = re.search(r'(?s)version %s\n{2}(.+?)\n{3}' % version, changelog) body = mobj.group(1) if mobj else '' releaser = GitHubReleaser() new_release = releaser.create_release( version, name='youtube-dl %s' % version, body=body) release_id = new_release['id'] for asset in os.listdir(build_path): compat_print('Uploading %s...' % asset) releaser.create_asset(release_id, os.path.join(build_path, asset)) if __name__ == '__main__': main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/__init__.py
devscripts/__init__.py
# Empty file needed to make devscripts.utils properly importable from outside
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/buildserver.py
devscripts/buildserver.py
#!/usr/bin/python3 import argparse import ctypes import functools import shutil import subprocess import sys import tempfile import threading import traceback import os.path sys.path.insert(0, os.path.dirname(os.path.dirname((os.path.abspath(__file__))))) from youtube_dl.compat import ( compat_input, compat_http_server, compat_str, compat_urlparse, ) # These are not used outside of buildserver.py thus not in compat.py try: import winreg as compat_winreg except ImportError: # Python 2 import _winreg as compat_winreg try: import socketserver as compat_socketserver except ImportError: # Python 2 import SocketServer as compat_socketserver class BuildHTTPServer(compat_socketserver.ThreadingMixIn, compat_http_server.HTTPServer): allow_reuse_address = True advapi32 = ctypes.windll.advapi32 SC_MANAGER_ALL_ACCESS = 0xf003f SC_MANAGER_CREATE_SERVICE = 0x02 SERVICE_WIN32_OWN_PROCESS = 0x10 SERVICE_AUTO_START = 0x2 SERVICE_ERROR_NORMAL = 0x1 DELETE = 0x00010000 SERVICE_STATUS_START_PENDING = 0x00000002 SERVICE_STATUS_RUNNING = 0x00000004 SERVICE_ACCEPT_STOP = 0x1 SVCNAME = 'youtubedl_builder' LPTSTR = ctypes.c_wchar_p START_CALLBACK = ctypes.WINFUNCTYPE(None, ctypes.c_int, ctypes.POINTER(LPTSTR)) class SERVICE_TABLE_ENTRY(ctypes.Structure): _fields_ = [ ('lpServiceName', LPTSTR), ('lpServiceProc', START_CALLBACK) ] HandlerEx = ctypes.WINFUNCTYPE( ctypes.c_int, # return ctypes.c_int, # dwControl ctypes.c_int, # dwEventType ctypes.c_void_p, # lpEventData, ctypes.c_void_p, # lpContext, ) def _ctypes_array(c_type, py_array): ar = (c_type * len(py_array))() ar[:] = py_array return ar def win_OpenSCManager(): res = advapi32.OpenSCManagerW(None, None, SC_MANAGER_ALL_ACCESS) if not res: raise Exception('Opening service manager failed - ' 'are you running this as administrator?') return res def win_install_service(service_name, cmdline): manager = win_OpenSCManager() try: h = advapi32.CreateServiceW( manager, service_name, None, SC_MANAGER_CREATE_SERVICE, SERVICE_WIN32_OWN_PROCESS, SERVICE_AUTO_START, SERVICE_ERROR_NORMAL, cmdline, None, None, None, None, None) if not h: raise OSError('Service creation failed: %s' % ctypes.FormatError()) advapi32.CloseServiceHandle(h) finally: advapi32.CloseServiceHandle(manager) def win_uninstall_service(service_name): manager = win_OpenSCManager() try: h = advapi32.OpenServiceW(manager, service_name, DELETE) if not h: raise OSError('Could not find service %s: %s' % ( service_name, ctypes.FormatError())) try: if not advapi32.DeleteService(h): raise OSError('Deletion failed: %s' % ctypes.FormatError()) finally: advapi32.CloseServiceHandle(h) finally: advapi32.CloseServiceHandle(manager) def win_service_report_event(service_name, msg, is_error=True): with open('C:/sshkeys/log', 'a', encoding='utf-8') as f: f.write(msg + '\n') event_log = advapi32.RegisterEventSourceW(None, service_name) if not event_log: raise OSError('Could not report event: %s' % ctypes.FormatError()) try: type_id = 0x0001 if is_error else 0x0004 event_id = 0xc0000000 if is_error else 0x40000000 lines = _ctypes_array(LPTSTR, [msg]) if not advapi32.ReportEventW( event_log, type_id, 0, event_id, None, len(lines), 0, lines, None): raise OSError('Event reporting failed: %s' % ctypes.FormatError()) finally: advapi32.DeregisterEventSource(event_log) def win_service_handler(stop_event, *args): try: raise ValueError('Handler called with args ' + repr(args)) TODO except Exception as e: tb = traceback.format_exc() msg = str(e) + '\n' + tb win_service_report_event(service_name, msg, is_error=True) raise def win_service_set_status(handle, status_code): svcStatus = SERVICE_STATUS() svcStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS svcStatus.dwCurrentState = status_code svcStatus.dwControlsAccepted = SERVICE_ACCEPT_STOP svcStatus.dwServiceSpecificExitCode = 0 if not advapi32.SetServiceStatus(handle, ctypes.byref(svcStatus)): raise OSError('SetServiceStatus failed: %r' % ctypes.FormatError()) def win_service_main(service_name, real_main, argc, argv_raw): try: # args = [argv_raw[i].value for i in range(argc)] stop_event = threading.Event() handler = HandlerEx(functools.partial(stop_event, win_service_handler)) h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None) if not h: raise OSError('Handler registration failed: %s' % ctypes.FormatError()) TODO except Exception as e: tb = traceback.format_exc() msg = str(e) + '\n' + tb win_service_report_event(service_name, msg, is_error=True) raise def win_service_start(service_name, real_main): try: cb = START_CALLBACK( functools.partial(win_service_main, service_name, real_main)) dispatch_table = _ctypes_array(SERVICE_TABLE_ENTRY, [ SERVICE_TABLE_ENTRY( service_name, cb ), SERVICE_TABLE_ENTRY(None, ctypes.cast(None, START_CALLBACK)) ]) if not advapi32.StartServiceCtrlDispatcherW(dispatch_table): raise OSError('ctypes start failed: %s' % ctypes.FormatError()) except Exception as e: tb = traceback.format_exc() msg = str(e) + '\n' + tb win_service_report_event(service_name, msg, is_error=True) raise def main(args=None): parser = argparse.ArgumentParser() parser.add_argument('-i', '--install', action='store_const', dest='action', const='install', help='Launch at Windows startup') parser.add_argument('-u', '--uninstall', action='store_const', dest='action', const='uninstall', help='Remove Windows service') parser.add_argument('-s', '--service', action='store_const', dest='action', const='service', help='Run as a Windows service') parser.add_argument('-b', '--bind', metavar='<host:port>', action='store', default='0.0.0.0:8142', help='Bind to host:port (default %default)') options = parser.parse_args(args=args) if options.action == 'install': fn = os.path.abspath(__file__).replace('v:', '\\\\vboxsrv\\vbox') cmdline = '%s %s -s -b %s' % (sys.executable, fn, options.bind) win_install_service(SVCNAME, cmdline) return if options.action == 'uninstall': win_uninstall_service(SVCNAME) return if options.action == 'service': win_service_start(SVCNAME, main) return host, port_str = options.bind.split(':') port = int(port_str) print('Listening on %s:%d' % (host, port)) srv = BuildHTTPServer((host, port), BuildHTTPRequestHandler) thr = threading.Thread(target=srv.serve_forever) thr.start() compat_input('Press ENTER to shut down') srv.shutdown() thr.join() def rmtree(path): for name in os.listdir(path): fname = os.path.join(path, name) if os.path.isdir(fname): rmtree(fname) else: os.chmod(fname, 0o666) os.remove(fname) os.rmdir(path) class BuildError(Exception): def __init__(self, output, code=500): self.output = output self.code = code def __str__(self): return self.output class HTTPError(BuildError): pass class PythonBuilder(object): def __init__(self, **kwargs): python_version = kwargs.pop('python', '3.4') python_path = None for node in ('Wow6432Node\\', ''): try: key = compat_winreg.OpenKey( compat_winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\%sPython\PythonCore\%s\InstallPath' % (node, python_version)) try: python_path, _ = compat_winreg.QueryValueEx(key, '') finally: compat_winreg.CloseKey(key) break except Exception: pass if not python_path: raise BuildError('No such Python version: %s' % python_version) self.pythonPath = python_path super(PythonBuilder, self).__init__(**kwargs) class GITInfoBuilder(object): def __init__(self, **kwargs): try: self.user, self.repoName = kwargs['path'][:2] self.rev = kwargs.pop('rev') except ValueError: raise BuildError('Invalid path') except KeyError as e: raise BuildError('Missing mandatory parameter "%s"' % e.args[0]) path = os.path.join(os.environ['APPDATA'], 'Build archive', self.repoName, self.user) if not os.path.exists(path): os.makedirs(path) self.basePath = tempfile.mkdtemp(dir=path) self.buildPath = os.path.join(self.basePath, 'build') super(GITInfoBuilder, self).__init__(**kwargs) class GITBuilder(GITInfoBuilder): def build(self): try: subprocess.check_output(['git', 'clone', 'git://github.com/%s/%s.git' % (self.user, self.repoName), self.buildPath]) subprocess.check_output(['git', 'checkout', self.rev], cwd=self.buildPath) except subprocess.CalledProcessError as e: raise BuildError(e.output) super(GITBuilder, self).build() class YoutubeDLBuilder(object): authorizedUsers = ['fraca7', 'phihag', 'rg3', 'FiloSottile', 'ytdl-org'] def __init__(self, **kwargs): if self.repoName != 'youtube-dl': raise BuildError('Invalid repository "%s"' % self.repoName) if self.user not in self.authorizedUsers: raise HTTPError('Unauthorized user "%s"' % self.user, 401) super(YoutubeDLBuilder, self).__init__(**kwargs) def build(self): try: proc = subprocess.Popen([os.path.join(self.pythonPath, 'python.exe'), 'setup.py', 'py2exe'], stdin=subprocess.PIPE, cwd=self.buildPath) proc.wait() #subprocess.check_output([os.path.join(self.pythonPath, 'python.exe'), 'setup.py', 'py2exe'], # cwd=self.buildPath) except subprocess.CalledProcessError as e: raise BuildError(e.output) super(YoutubeDLBuilder, self).build() class DownloadBuilder(object): def __init__(self, **kwargs): self.handler = kwargs.pop('handler') self.srcPath = os.path.join(self.buildPath, *tuple(kwargs['path'][2:])) self.srcPath = os.path.abspath(os.path.normpath(self.srcPath)) if not self.srcPath.startswith(self.buildPath): raise HTTPError(self.srcPath, 401) super(DownloadBuilder, self).__init__(**kwargs) def build(self): if not os.path.exists(self.srcPath): raise HTTPError('No such file', 404) if os.path.isdir(self.srcPath): raise HTTPError('Is a directory: %s' % self.srcPath, 401) self.handler.send_response(200) self.handler.send_header('Content-Type', 'application/octet-stream') self.handler.send_header('Content-Disposition', 'attachment; filename=%s' % os.path.split(self.srcPath)[-1]) self.handler.send_header('Content-Length', str(os.stat(self.srcPath).st_size)) self.handler.end_headers() with open(self.srcPath, 'rb') as src: shutil.copyfileobj(src, self.handler.wfile) super(DownloadBuilder, self).build() class CleanupTempDir(object): def build(self): try: rmtree(self.basePath) except Exception as e: print('WARNING deleting "%s": %s' % (self.basePath, e)) super(CleanupTempDir, self).build() class Null(object): def __init__(self, **kwargs): pass def start(self): pass def close(self): pass def build(self): pass class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, CleanupTempDir, Null): pass class BuildHTTPRequestHandler(compat_http_server.BaseHTTPRequestHandler): actionDict = {'build': Builder, 'download': Builder} # They're the same, no more caching. def do_GET(self): path = compat_urlparse.urlparse(self.path) paramDict = dict([(key, value[0]) for key, value in compat_urlparse.parse_qs(path.query).items()]) action, _, path = path.path.strip('/').partition('/') if path: path = path.split('/') if action in self.actionDict: try: builder = self.actionDict[action](path=path, handler=self, **paramDict) builder.start() try: builder.build() finally: builder.close() except BuildError as e: self.send_response(e.code) msg = compat_str(e).encode('UTF-8') self.send_header('Content-Type', 'text/plain; charset=UTF-8') self.send_header('Content-Length', len(msg)) self.end_headers() self.wfile.write(msg) else: self.send_response(500, 'Unknown build method "%s"' % action) else: self.send_response(500, 'Malformed URL') if __name__ == '__main__': main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/show-downloads-statistics.py
devscripts/show-downloads-statistics.py
#!/usr/bin/env python from __future__ import unicode_literals import itertools import json import os import re import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from youtube_dl.compat import ( compat_print, compat_urllib_request, ) from youtube_dl.utils import format_bytes def format_size(bytes): return '%s (%d bytes)' % (format_bytes(bytes), bytes) total_bytes = 0 for page in itertools.count(1): releases = json.loads(compat_urllib_request.urlopen( 'https://api.github.com/repos/ytdl-org/youtube-dl/releases?page=%s' % page ).read().decode('utf-8')) if not releases: break for release in releases: compat_print(release['name']) for asset in release['assets']: asset_name = asset['name'] total_bytes += asset['download_count'] * asset['size'] if all(not re.match(p, asset_name) for p in ( r'^youtube-dl$', r'^youtube-dl-\d{4}\.\d{2}\.\d{2}(?:\.\d+)?\.tar\.gz$', r'^youtube-dl\.exe$')): continue compat_print( ' %s size: %s downloads: %d' % (asset_name, format_size(asset['size']), asset['download_count'])) compat_print('total downloads traffic: %s' % format_size(total_bytes))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/cli_to_api.py
devscripts/cli_to_api.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals """ This script displays the API parameters corresponding to a yt-dl command line Example: $ ./cli_to_api.py -f best {u'format': 'best'} $ """ # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import youtube_dl from types import MethodType def cli_to_api(*opts): YDL = youtube_dl.YoutubeDL # to extract the parsed options, break out of YoutubeDL instantiation # return options via this Exception class ParseYTDLResult(Exception): def __init__(self, result): super(ParseYTDLResult, self).__init__('result') self.opts = result # replacement constructor that raises ParseYTDLResult def ytdl_init(ydl, ydl_opts): super(YDL, ydl).__init__(ydl_opts) raise ParseYTDLResult(ydl_opts) # patch in the constructor YDL.__init__ = MethodType(ytdl_init, YDL) # core parser def parsed_options(argv): try: youtube_dl._real_main(list(argv)) except ParseYTDLResult as result: return result.opts # from https://github.com/yt-dlp/yt-dlp/issues/5859#issuecomment-1363938900 default = parsed_options([]) def neq_opt(a, b): if a == b: return False if a is None and repr(type(object)).endswith(".utils.DateRange'>"): return '0001-01-01 - 9999-12-31' != '{0}'.format(b) return a != b diff = dict((k, v) for k, v in parsed_options(opts).items() if neq_opt(default[k], v)) if 'postprocessors' in diff: diff['postprocessors'] = [pp for pp in diff['postprocessors'] if pp not in default['postprocessors']] return diff def main(): from pprint import PrettyPrinter pprint = PrettyPrinter() super_format = pprint.format def format(object, context, maxlevels, level): if repr(type(object)).endswith(".utils.DateRange'>"): return '{0}: {1}>'.format(repr(object)[:-2], object), True, False return super_format(object, context, maxlevels, level) pprint.format = format pprint.pprint(cli_to_api(*sys.argv)) if __name__ == '__main__': main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/make_supportedsites.py
devscripts/make_supportedsites.py
#!/usr/bin/env python from __future__ import unicode_literals import optparse import os.path import sys # Import youtube_dl dirn = os.path.dirname sys.path.insert(0, dirn(dirn(os.path.abspath(__file__)))) import youtube_dl from utils import write_file def main(): parser = optparse.OptionParser(usage='%prog OUTFILE.md') options, args = parser.parse_args() if len(args) != 1: parser.error('Expected an output filename') outfile, = args def gen_ies_md(ies): for ie in ies: ie_md = '**{0}**'.format(ie.IE_NAME) ie_desc = getattr(ie, 'IE_DESC', None) if ie_desc is False: continue if ie_desc is not None: ie_md += ': {0}'.format(ie.IE_DESC) if not ie.working(): ie_md += ' (Currently broken)' yield ie_md ies = sorted(youtube_dl.gen_extractors(), key=lambda i: i.IE_NAME.lower()) out = '# Supported sites\n' + ''.join( ' - ' + md + '\n' for md in gen_ies_md(ies)) write_file(outfile, out) if __name__ == '__main__': main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/make_lazy_extractors.py
devscripts/make_lazy_extractors.py
from __future__ import unicode_literals, print_function from inspect import getsource import os from os.path import dirname as dirn import re import sys print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr) sys.path.insert(0, dirn(dirn(os.path.abspath(__file__)))) lazy_extractors_filename = sys.argv[1] if os.path.exists(lazy_extractors_filename): os.remove(lazy_extractors_filename) # Py2: may be confused by leftover lazy_extractors.pyc if sys.version_info[0] < 3: for c in ('c', 'o'): try: os.remove(lazy_extractors_filename + 'c') except OSError: pass from devscripts.utils import read_file, write_file from youtube_dl.compat import compat_register_utf8 compat_register_utf8() from youtube_dl.extractor import _ALL_CLASSES from youtube_dl.extractor.common import InfoExtractor, SearchInfoExtractor module_template = read_file('devscripts/lazy_load_template.py') def get_source(m): return re.sub(r'(?m)^\s*#.*\n', '', getsource(m)) module_contents = [ module_template, get_source(InfoExtractor.suitable), get_source(InfoExtractor._match_valid_url) + '\n', 'class LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n', # needed for suitable() methods of Youtube extractor (see #28780) 'from youtube_dl.utils import parse_qs, variadic\n', ] ie_template = ''' class {name}({bases}): _VALID_URL = {valid_url!r} _module = '{module}' ''' make_valid_template = ''' @classmethod def _make_valid_url(cls): return {valid_url!r} ''' def get_base_name(base): if base is InfoExtractor: return 'LazyLoadExtractor' elif base is SearchInfoExtractor: return 'LazyLoadSearchExtractor' else: return base.__name__ def build_lazy_ie(ie, name): valid_url = getattr(ie, '_VALID_URL', None) s = ie_template.format( name=name, bases=', '.join(map(get_base_name, ie.__bases__)), valid_url=valid_url, module=ie.__module__) if ie.suitable.__func__ is not InfoExtractor.suitable.__func__: s += '\n' + get_source(ie.suitable) if hasattr(ie, '_make_valid_url'): # search extractors s += make_valid_template.format(valid_url=ie._make_valid_url()) return s # find the correct sorting and add the required base classes so that subclasses # can be correctly created classes = _ALL_CLASSES[:-1] ordered_cls = [] while classes: for c in classes[:]: bases = set(c.__bases__) - set((object, InfoExtractor, SearchInfoExtractor)) stop = False for b in bases: if b not in classes and b not in ordered_cls: if b.__name__ == 'GenericIE': exit() classes.insert(0, b) stop = True if stop: break if all(b in ordered_cls for b in bases): ordered_cls.append(c) classes.remove(c) break ordered_cls.append(_ALL_CLASSES[-1]) names = [] for ie in ordered_cls: name = ie.__name__ src = build_lazy_ie(ie, name) module_contents.append(src) if ie in _ALL_CLASSES: names.append(name) module_contents.append( '_ALL_CLASSES = [{0}]'.format(', '.join(names))) module_src = '\n'.join(module_contents) write_file(lazy_extractors_filename, module_src + '\n') # work around JVM byte code module limit in Jython if sys.platform.startswith('java') and sys.version_info[:2] == (2, 7): import subprocess from youtube_dl.compat import compat_subprocess_get_DEVNULL # if Python 2.7 is available, use it to compile the module for Jython try: # if Python 2.7 is available, use it to compile the module for Jython subprocess.check_call(['python2.7', '-m', 'py_compile', lazy_extractors_filename], stdout=compat_subprocess_get_DEVNULL()) except Exception: pass
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/gh-pages/update-sites.py
devscripts/gh-pages/update-sites.py
#!/usr/bin/env python3 from __future__ import unicode_literals import sys import os import textwrap dirn = os.path.dirname # We must be able to import youtube_dl sys.path.insert(0, dirn(dirn(dirn(os.path.abspath(__file__))))) import youtube_dl from devscripts.utils import read_file, write_file def main(): template = read_file('supportedsites.html.in') ie_htmls = [] for ie in youtube_dl.list_extractors(age_limit=None): ie_html = '<b>{}</b>'.format(ie.IE_NAME) ie_desc = getattr(ie, 'IE_DESC', None) if ie_desc is False: continue elif ie_desc is not None: ie_html += ': {}'.format(ie.IE_DESC) if not ie.working(): ie_html += ' (Currently broken)' ie_htmls.append('<li>{}</li>'.format(ie_html)) template = template.replace('@SITES@', textwrap.indent('\n'.join(ie_htmls), '\t')) write_file('supportedsites.html', template) if __name__ == '__main__': main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/gh-pages/update-copyright.py
devscripts/gh-pages/update-copyright.py
#!/usr/bin/env python # coding: utf-8 from __future__ import with_statement, unicode_literals import datetime import glob import os import re import sys dirn = os.path.dirname sys.path.insert(0, dirn(dirn(dirn(os.path.abspath(__file__))))) from devscripts.utils import read_file, write_file from youtube_dl import compat_str year = compat_str(datetime.datetime.now().year) for fn in glob.glob('*.html*'): content = read_file(fn) newc = re.sub(r'(?P<copyright>Copyright © 2011-)(?P<year>[0-9]{4})', 'Copyright © 2011-' + year, content) if content != newc: tmpFn = fn + '.part' write_file(tmpFn, newc) os.rename(tmpFn, fn)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/gh-pages/generate-download.py
devscripts/gh-pages/generate-download.py
#!/usr/bin/env python3 from __future__ import unicode_literals import json import os.path import sys dirn = os.path.dirname sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) from utils import read_file, write_file versions_info = json.loads(read_file('update/versions.json')) version = versions_info['latest'] version_dict = versions_info['versions'][version] # Read template page template = read_file('download.html.in') template = template.replace('@PROGRAM_VERSION@', version) template = template.replace('@PROGRAM_URL@', version_dict['bin'][0]) template = template.replace('@PROGRAM_SHA256SUM@', version_dict['bin'][1]) template = template.replace('@EXE_URL@', version_dict['exe'][0]) template = template.replace('@EXE_SHA256SUM@', version_dict['exe'][1]) template = template.replace('@TAR_URL@', version_dict['tar'][0]) template = template.replace('@TAR_SHA256SUM@', version_dict['tar'][1]) write_file('download.html', template)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/gh-pages/sign-versions.py
devscripts/gh-pages/sign-versions.py
#!/usr/bin/env python3 from __future__ import unicode_literals, with_statement import rsa import json from binascii import hexlify try: input = raw_input except NameError: pass versions_info = json.load(open('update/versions.json')) if 'signature' in versions_info: del versions_info['signature'] print('Enter the PKCS1 private key, followed by a blank line:') privkey = b'' while True: try: line = input() except EOFError: break if line == '': break privkey += line.encode('ascii') + b'\n' privkey = rsa.PrivateKey.load_pkcs1(privkey) signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode() print('signature: ' + signature) versions_info['signature'] = signature with open('update/versions.json', 'w') as versionsf: json.dump(versions_info, versionsf, indent=4, sort_keys=True)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/gh-pages/update-feed.py
devscripts/gh-pages/update-feed.py
#!/usr/bin/env python3 from __future__ import unicode_literals import datetime import json import os.path import textwrap import sys dirn = os.path.dirname sys.path.insert(0, dirn(dirn(os.path.abspath(__file__)))) from utils import write_file atom_template = textwrap.dedent("""\ <?xml version="1.0" encoding="utf-8"?> <feed xmlns="http://www.w3.org/2005/Atom"> <link rel="self" href="http://ytdl-org.github.io/youtube-dl/update/releases.atom" /> <title>youtube-dl releases</title> <id>https://yt-dl.org/feed/youtube-dl-updates-feed</id> <updated>@TIMESTAMP@</updated> @ENTRIES@ </feed>""") entry_template = textwrap.dedent(""" <entry> <id>https://yt-dl.org/feed/youtube-dl-updates-feed/youtube-dl-@VERSION@</id> <title>New version @VERSION@</title> <link href="http://ytdl-org.github.io/youtube-dl" /> <content type="xhtml"> <div xmlns="http://www.w3.org/1999/xhtml"> Downloads available at <a href="https://yt-dl.org/downloads/@VERSION@/">https://yt-dl.org/downloads/@VERSION@/</a> </div> </content> <author> <name>The youtube-dl maintainers</name> </author> <updated>@TIMESTAMP@</updated> </entry> """) now = datetime.datetime.now() now_iso = now.isoformat() + 'Z' atom_template = atom_template.replace('@TIMESTAMP@', now_iso) versions_info = json.load(open('update/versions.json')) versions = list(versions_info['versions'].keys()) versions.sort() entries = [] for v in versions: fields = v.split('.') year, month, day = map(int, fields[:3]) faked = 0 patchlevel = 0 while True: try: datetime.date(year, month, day) except ValueError: day -= 1 faked += 1 assert day > 0 continue break if len(fields) >= 4: try: patchlevel = int(fields[3]) except ValueError: patchlevel = 1 timestamp = '%04d-%02d-%02dT00:%02d:%02dZ' % (year, month, day, faked, patchlevel) entry = entry_template.replace('@TIMESTAMP@', timestamp) entry = entry.replace('@VERSION@', v) entries.append(entry) entries_str = textwrap.indent(''.join(entries), '\t') atom_template = atom_template.replace('@ENTRIES@', entries_str) write_file('update/releases.atom', atom_template)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/devscripts/gh-pages/add-version.py
devscripts/gh-pages/add-version.py
#!/usr/bin/env python3 from __future__ import unicode_literals import json import sys import hashlib import os.path dirn = os.path.dirname sys.path.insert(0, dirn(dirn(dirn(os.path.abspath(__file__))))) from devscripts.utils import read_file, write_file from youtube_dl.compat import compat_open as open if len(sys.argv) <= 1: print('Specify the version number as parameter') sys.exit() version = sys.argv[1] write_file('update/LATEST_VERSION', version) versions_info = json.loads(read_file('update/versions.json')) if 'signature' in versions_info: del versions_info['signature'] new_version = {} filenames = { 'bin': 'youtube-dl', 'exe': 'youtube-dl.exe', 'tar': 'youtube-dl-%s.tar.gz' % version} build_dir = os.path.join('..', '..', 'build', version) for key, filename in filenames.items(): url = 'https://yt-dl.org/downloads/%s/%s' % (version, filename) fn = os.path.join(build_dir, filename) with open(fn, 'rb') as f: data = f.read() if not data: raise ValueError('File %s is empty!' % fn) sha256sum = hashlib.sha256(data).hexdigest() new_version[key] = (url, sha256sum) versions_info['versions'][version] = new_version versions_info['latest'] = version with open('update/versions.json', 'w', encoding='utf-8') as jsonf: json.dumps(versions_info, jsonf, indent=4, sort_keys=True)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/docs/conf.py
docs/conf.py
# coding: utf-8 # # youtube-dl documentation build configuration file, created by # sphinx-quickstart on Fri Mar 14 21:05:43 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # Allows to import youtube_dl sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # -- General configuration ------------------------------------------------ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'youtube-dl' copyright = u'2014, Ricardo Garcia Gonzalez' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from youtube_dl.version import __version__ version = __version__ # The full version, including alpha/beta/rc tags. release = version # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Output file base name for HTML help builder. htmlhelp_basename = 'youtube-dldoc'
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_post_hooks.py
test/test_post_hooks.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import get_params, is_download_test, try_rm import yt_dlp.YoutubeDL # isort: split from yt_dlp.utils import DownloadError class YoutubeDL(yt_dlp.YoutubeDL): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.to_stderr = self.to_screen TEST_ID = 'gr51aVj-mLg' EXPECTED_NAME = 'gr51aVj-mLg' @is_download_test class TestPostHooks(unittest.TestCase): def setUp(self): self.stored_name_1 = None self.stored_name_2 = None self.params = get_params({ 'skip_download': False, 'writeinfojson': False, 'quiet': True, 'verbose': False, 'cachedir': False, }) self.files = [] def test_post_hooks(self): self.params['post_hooks'] = [self.hook_one, self.hook_two] ydl = YoutubeDL(self.params) ydl.download([TEST_ID]) self.assertEqual(self.stored_name_1, EXPECTED_NAME, 'Not the expected name from hook 1') self.assertEqual(self.stored_name_2, EXPECTED_NAME, 'Not the expected name from hook 2') def test_post_hook_exception(self): self.params['post_hooks'] = [self.hook_three] ydl = YoutubeDL(self.params) self.assertRaises(DownloadError, ydl.download, [TEST_ID]) def hook_one(self, filename): self.stored_name_1, _ = os.path.splitext(os.path.basename(filename)) self.files.append(filename) def hook_two(self, filename): self.stored_name_2, _ = os.path.splitext(os.path.basename(filename)) self.files.append(filename) def hook_three(self, filename): self.files.append(filename) raise Exception(f'Test exception for \'{filename}\'') def tearDown(self): for f in self.files: try_rm(f) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_plugins.py
test/test_plugins.py
import importlib import os import shutil import sys import unittest from pathlib import Path sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) TEST_DATA_DIR = Path(os.path.dirname(os.path.abspath(__file__)), 'testdata') sys.path.append(str(TEST_DATA_DIR)) importlib.invalidate_caches() from yt_dlp.plugins import ( PACKAGE_NAME, PluginSpec, directories, load_plugins, load_all_plugins, register_plugin_spec, ) from yt_dlp.globals import ( extractors, postprocessors, plugin_dirs, plugin_ies, plugin_pps, all_plugins_loaded, plugin_specs, ) EXTRACTOR_PLUGIN_SPEC = PluginSpec( module_name='extractor', suffix='IE', destination=extractors, plugin_destination=plugin_ies, ) POSTPROCESSOR_PLUGIN_SPEC = PluginSpec( module_name='postprocessor', suffix='PP', destination=postprocessors, plugin_destination=plugin_pps, ) def reset_plugins(): plugin_ies.value = {} plugin_pps.value = {} plugin_dirs.value = ['default'] plugin_specs.value = {} all_plugins_loaded.value = False # Clearing override plugins is probably difficult for module_name in tuple(sys.modules): for plugin_type in ('extractor', 'postprocessor'): if module_name.startswith(f'{PACKAGE_NAME}.{plugin_type}.'): del sys.modules[module_name] importlib.invalidate_caches() class TestPlugins(unittest.TestCase): TEST_PLUGIN_DIR = TEST_DATA_DIR / PACKAGE_NAME def setUp(self): reset_plugins() def tearDown(self): reset_plugins() def test_directories_containing_plugins(self): self.assertIn(self.TEST_PLUGIN_DIR, map(Path, directories())) def test_extractor_classes(self): plugins_ie = load_plugins(EXTRACTOR_PLUGIN_SPEC) self.assertIn(f'{PACKAGE_NAME}.extractor.normal', sys.modules.keys()) self.assertIn('NormalPluginIE', plugins_ie.keys()) # don't load modules with underscore prefix self.assertFalse( f'{PACKAGE_NAME}.extractor._ignore' in sys.modules, 'loaded module beginning with underscore') self.assertNotIn('IgnorePluginIE', plugins_ie.keys()) self.assertNotIn('IgnorePluginIE', plugin_ies.value) # Don't load extractors with underscore prefix self.assertNotIn('_IgnoreUnderscorePluginIE', plugins_ie.keys()) self.assertNotIn('_IgnoreUnderscorePluginIE', plugin_ies.value) # Don't load extractors not specified in __all__ (if supplied) self.assertNotIn('IgnoreNotInAllPluginIE', plugins_ie.keys()) self.assertNotIn('IgnoreNotInAllPluginIE', plugin_ies.value) self.assertIn('InAllPluginIE', plugins_ie.keys()) self.assertIn('InAllPluginIE', plugin_ies.value) # Don't load override extractors self.assertNotIn('OverrideGenericIE', plugins_ie.keys()) self.assertNotIn('OverrideGenericIE', plugin_ies.value) self.assertNotIn('_UnderscoreOverrideGenericIE', plugins_ie.keys()) self.assertNotIn('_UnderscoreOverrideGenericIE', plugin_ies.value) def test_postprocessor_classes(self): plugins_pp = load_plugins(POSTPROCESSOR_PLUGIN_SPEC) self.assertIn('NormalPluginPP', plugins_pp.keys()) self.assertIn(f'{PACKAGE_NAME}.postprocessor.normal', sys.modules.keys()) self.assertIn('NormalPluginPP', plugin_pps.value) def test_importing_zipped_module(self): zip_path = TEST_DATA_DIR / 'zipped_plugins.zip' shutil.make_archive(str(zip_path)[:-4], 'zip', str(zip_path)[:-4]) sys.path.append(str(zip_path)) # add zip to search paths importlib.invalidate_caches() # reset the import caches try: for plugin_type in ('extractor', 'postprocessor'): package = importlib.import_module(f'{PACKAGE_NAME}.{plugin_type}') self.assertIn(zip_path / PACKAGE_NAME / plugin_type, map(Path, package.__path__)) plugins_ie = load_plugins(EXTRACTOR_PLUGIN_SPEC) self.assertIn('ZippedPluginIE', plugins_ie.keys()) plugins_pp = load_plugins(POSTPROCESSOR_PLUGIN_SPEC) self.assertIn('ZippedPluginPP', plugins_pp.keys()) finally: sys.path.remove(str(zip_path)) os.remove(zip_path) importlib.invalidate_caches() # reset the import caches def test_reloading_plugins(self): reload_plugins_path = TEST_DATA_DIR / 'reload_plugins' load_plugins(EXTRACTOR_PLUGIN_SPEC) load_plugins(POSTPROCESSOR_PLUGIN_SPEC) # Remove default folder and add reload_plugin path sys.path.remove(str(TEST_DATA_DIR)) sys.path.append(str(reload_plugins_path)) importlib.invalidate_caches() try: for plugin_type in ('extractor', 'postprocessor'): package = importlib.import_module(f'{PACKAGE_NAME}.{plugin_type}') self.assertIn(reload_plugins_path / PACKAGE_NAME / plugin_type, map(Path, package.__path__)) plugins_ie = load_plugins(EXTRACTOR_PLUGIN_SPEC) self.assertIn('NormalPluginIE', plugins_ie.keys()) self.assertTrue( plugins_ie['NormalPluginIE'].REPLACED, msg='Reloading has not replaced original extractor plugin') self.assertTrue( extractors.value['NormalPluginIE'].REPLACED, msg='Reloading has not replaced original extractor plugin globally') plugins_pp = load_plugins(POSTPROCESSOR_PLUGIN_SPEC) self.assertIn('NormalPluginPP', plugins_pp.keys()) self.assertTrue(plugins_pp['NormalPluginPP'].REPLACED, msg='Reloading has not replaced original postprocessor plugin') self.assertTrue( postprocessors.value['NormalPluginPP'].REPLACED, msg='Reloading has not replaced original postprocessor plugin globally') finally: sys.path.remove(str(reload_plugins_path)) sys.path.append(str(TEST_DATA_DIR)) importlib.invalidate_caches() def test_extractor_override_plugin(self): load_plugins(EXTRACTOR_PLUGIN_SPEC) from yt_dlp.extractor.generic import GenericIE self.assertEqual(GenericIE.TEST_FIELD, 'override') self.assertEqual(GenericIE.SECONDARY_TEST_FIELD, 'underscore-override') self.assertEqual(GenericIE.IE_NAME, 'generic+override+underscore-override') importlib.invalidate_caches() # test that loading a second time doesn't wrap a second time load_plugins(EXTRACTOR_PLUGIN_SPEC) from yt_dlp.extractor.generic import GenericIE self.assertEqual(GenericIE.IE_NAME, 'generic+override+underscore-override') def test_load_all_plugin_types(self): # no plugin specs registered load_all_plugins() self.assertNotIn(f'{PACKAGE_NAME}.extractor.normal', sys.modules.keys()) self.assertNotIn(f'{PACKAGE_NAME}.postprocessor.normal', sys.modules.keys()) register_plugin_spec(EXTRACTOR_PLUGIN_SPEC) register_plugin_spec(POSTPROCESSOR_PLUGIN_SPEC) load_all_plugins() self.assertTrue(all_plugins_loaded.value) self.assertIn(f'{PACKAGE_NAME}.extractor.normal', sys.modules.keys()) self.assertIn(f'{PACKAGE_NAME}.postprocessor.normal', sys.modules.keys()) def test_no_plugin_dirs(self): register_plugin_spec(EXTRACTOR_PLUGIN_SPEC) register_plugin_spec(POSTPROCESSOR_PLUGIN_SPEC) plugin_dirs.value = [] load_all_plugins() self.assertNotIn(f'{PACKAGE_NAME}.extractor.normal', sys.modules.keys()) self.assertNotIn(f'{PACKAGE_NAME}.postprocessor.normal', sys.modules.keys()) def test_set_plugin_dirs(self): custom_plugin_dir = str(TEST_DATA_DIR / 'plugin_packages') plugin_dirs.value = [custom_plugin_dir] load_plugins(EXTRACTOR_PLUGIN_SPEC) self.assertIn(f'{PACKAGE_NAME}.extractor.package', sys.modules.keys()) self.assertIn('PackagePluginIE', plugin_ies.value) def test_invalid_plugin_dir(self): plugin_dirs.value = ['invalid_dir'] with self.assertRaises(ValueError): load_plugins(EXTRACTOR_PLUGIN_SPEC) def test_append_plugin_dirs(self): custom_plugin_dir = str(TEST_DATA_DIR / 'plugin_packages') self.assertEqual(plugin_dirs.value, ['default']) plugin_dirs.value.append(custom_plugin_dir) self.assertEqual(plugin_dirs.value, ['default', custom_plugin_dir]) load_plugins(EXTRACTOR_PLUGIN_SPEC) self.assertIn(f'{PACKAGE_NAME}.extractor.package', sys.modules.keys()) self.assertIn('PackagePluginIE', plugin_ies.value) def test_get_plugin_spec(self): register_plugin_spec(EXTRACTOR_PLUGIN_SPEC) register_plugin_spec(POSTPROCESSOR_PLUGIN_SPEC) self.assertEqual(plugin_specs.value.get('extractor'), EXTRACTOR_PLUGIN_SPEC) self.assertEqual(plugin_specs.value.get('postprocessor'), POSTPROCESSOR_PLUGIN_SPEC) self.assertIsNone(plugin_specs.value.get('invalid')) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/helper.py
test/helper.py
import errno import hashlib import json import os.path import re import ssl import sys import types import yt_dlp.extractor from yt_dlp import YoutubeDL from yt_dlp.utils import preferredencoding, try_call, write_string, find_available_port if 'pytest' in sys.modules: import pytest is_download_test = pytest.mark.download else: def is_download_test(test_class): return test_class def get_params(override=None): PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'parameters.json') LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'local_parameters.json') with open(PARAMETERS_FILE, encoding='utf-8') as pf: parameters = json.load(pf) if os.path.exists(LOCAL_PARAMETERS_FILE): with open(LOCAL_PARAMETERS_FILE, encoding='utf-8') as pf: parameters.update(json.load(pf)) if override: parameters.update(override) return parameters def try_rm(filename): """ Remove a file if it exists """ try: os.remove(filename) except OSError as ose: if ose.errno != errno.ENOENT: raise def report_warning(message, *args, **kwargs): """ Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored """ if sys.stderr.isatty() and os.name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' output = f'{_msg_header} {message}\n' if 'b' in getattr(sys.stderr, 'mode', ''): output = output.encode(preferredencoding()) sys.stderr.write(output) class FakeYDL(YoutubeDL): def __init__(self, override=None): # Different instances of the downloader can't share the same dictionary # some test set the "sublang" parameter, which would break the md5 checks. params = get_params(override=override) super().__init__(params, auto_init=False) self.result = [] def to_screen(self, s, *args, **kwargs): print(s) def trouble(self, s, *args, **kwargs): raise Exception(s) def download(self, x): self.result.append(x) def expect_warning(self, regex): # Silence an expected warning matching a regex old_report_warning = self.report_warning def report_warning(self, message, *args, **kwargs): if re.match(regex, message): return old_report_warning(message, *args, **kwargs) self.report_warning = types.MethodType(report_warning, self) def gettestcases(include_onlymatching=False): for ie in yt_dlp.extractor.gen_extractors(): yield from ie.get_testcases(include_onlymatching) def getwebpagetestcases(): for ie in yt_dlp.extractor.gen_extractors(): for tc in ie.get_webpage_testcases(): tc.setdefault('add_ie', []).append('Generic') yield tc md5 = lambda s: hashlib.md5(s.encode()).hexdigest() def _iter_differences(got, expected, field): if isinstance(expected, str): op, _, val = expected.partition(':') if op in ('mincount', 'maxcount', 'count'): if not isinstance(got, (list, dict)): yield field, f'expected either {list.__name__} or {dict.__name__}, got {type(got).__name__}' return expected_num = int(val) got_num = len(got) if op == 'mincount': if got_num < expected_num: yield field, f'expected at least {val} items, got {got_num}' return if op == 'maxcount': if got_num > expected_num: yield field, f'expected at most {val} items, got {got_num}' return assert op == 'count' if got_num != expected_num: yield field, f'expected exactly {val} items, got {got_num}' return if not isinstance(got, str): yield field, f'expected {str.__name__}, got {type(got).__name__}' return if op == 're': if not re.match(val, got): yield field, f'should match {val!r}, got {got!r}' return if op == 'startswith': if not got.startswith(val): yield field, f'should start with {val!r}, got {got!r}' return if op == 'contains': if not val.startswith(got): yield field, f'should contain {val!r}, got {got!r}' return if op == 'md5': hash_val = md5(got) if hash_val != val: yield field, f'expected hash {val}, got {hash_val}' return if got != expected: yield field, f'expected {expected!r}, got {got!r}' return if isinstance(expected, dict) and isinstance(got, dict): for key, expected_val in expected.items(): if key not in got: yield field, f'missing key: {key!r}' continue field_name = key if field is None else f'{field}.{key}' yield from _iter_differences(got[key], expected_val, field_name) return if isinstance(expected, type): if not isinstance(got, expected): yield field, f'expected {expected.__name__}, got {type(got).__name__}' return if isinstance(expected, list) and isinstance(got, list): # TODO: clever diffing algorithm lmao if len(expected) != len(got): yield field, f'expected length of {len(expected)}, got {len(got)}' return for index, (got_val, expected_val) in enumerate(zip(got, expected, strict=True)): field_name = str(index) if field is None else f'{field}.{index}' yield from _iter_differences(got_val, expected_val, field_name) return if got != expected: yield field, f'expected {expected!r}, got {got!r}' def _expect_value(message, got, expected, field): mismatches = list(_iter_differences(got, expected, field)) if not mismatches: return fields = [field for field, _ in mismatches if field is not None] return ''.join(( message, f' ({", ".join(fields)})' if fields else '', *(f'\n\t{field}: {message}' for field, message in mismatches))) def expect_value(self, got, expected, field): if message := _expect_value('values differ', got, expected, field): self.fail(message) def expect_dict(self, got_dict, expected_dict): if message := _expect_value('dictionaries differ', got_dict, expected_dict, None): self.fail(message) def sanitize_got_info_dict(got_dict): IGNORED_FIELDS = ( *YoutubeDL._format_fields, # Lists 'formats', 'thumbnails', 'subtitles', 'automatic_captions', 'comments', 'entries', # Auto-generated 'autonumber', 'playlist', 'format_index', 'video_ext', 'audio_ext', 'duration_string', 'epoch', 'n_entries', 'fulltitle', 'extractor', 'extractor_key', 'filename', 'filepath', 'infojson_filename', 'original_url', # Only live_status needs to be checked 'is_live', 'was_live', ) IGNORED_PREFIXES = ('', 'playlist', 'requested', 'webpage') def sanitize(key, value): if isinstance(value, str) and len(value) > 100 and key != 'thumbnail': return f'md5:{md5(value)}' elif isinstance(value, list) and len(value) > 10: return f'count:{len(value)}' elif key.endswith('_count') and isinstance(value, int): return int return value test_info_dict = { key: sanitize(key, value) for key, value in got_dict.items() if value is not None and key not in IGNORED_FIELDS and ( not any(key.startswith(f'{prefix}_') for prefix in IGNORED_PREFIXES) or key == '_old_archive_ids') } # display_id may be generated from id if test_info_dict.get('display_id') == test_info_dict.get('id'): test_info_dict.pop('display_id') # Remove deprecated fields for old in YoutubeDL._deprecated_multivalue_fields: test_info_dict.pop(old, None) # release_year may be generated from release_date if try_call(lambda: test_info_dict['release_year'] == int(test_info_dict['release_date'][:4])): test_info_dict.pop('release_year') # Check url for flat entries if got_dict.get('_type', 'video') != 'video' and got_dict.get('url'): test_info_dict['url'] = got_dict['url'] return test_info_dict def expect_info_dict(self, got_dict, expected_dict): ALLOWED_KEYS_SORT_ORDER = ( # NB: Keep in sync with the docstring of extractor/common.py 'ie_key', 'url', 'id', 'ext', 'direct', 'display_id', 'title', 'alt_title', 'description', 'media_type', 'uploader', 'uploader_id', 'uploader_url', 'channel', 'channel_id', 'channel_url', 'channel_is_verified', 'channel_follower_count', 'comment_count', 'view_count', 'concurrent_view_count', 'save_count', 'like_count', 'dislike_count', 'repost_count', 'average_rating', 'age_limit', 'duration', 'thumbnail', 'heatmap', 'chapters', 'chapter', 'chapter_number', 'chapter_id', 'start_time', 'end_time', 'section_start', 'section_end', 'categories', 'tags', 'cast', 'composers', 'artists', 'album_artists', 'creators', 'genres', 'track', 'track_number', 'track_id', 'album', 'album_type', 'disc_number', 'series', 'series_id', 'season', 'season_number', 'season_id', 'episode', 'episode_number', 'episode_id', 'timestamp', 'upload_date', 'release_timestamp', 'release_date', 'release_year', 'modified_timestamp', 'modified_date', 'playable_in_embed', 'availability', 'live_status', 'location', 'license', '_old_archive_ids', ) expect_dict(self, got_dict, expected_dict) # Check for the presence of mandatory fields if got_dict.get('_type') not in ('playlist', 'multi_video'): mandatory_fields = ['id', 'title'] if expected_dict.get('ext'): mandatory_fields.extend(('url', 'ext')) for key in mandatory_fields: self.assertTrue(got_dict.get(key), f'Missing mandatory field {key}') # Check for mandatory fields that are automatically set by YoutubeDL if got_dict.get('_type', 'video') == 'video': for key in ['webpage_url', 'extractor', 'extractor_key']: self.assertTrue(got_dict.get(key), f'Missing field: {key}') test_info_dict = sanitize_got_info_dict(got_dict) # Check for invalid/misspelled field names being returned by the extractor invalid_keys = sorted(test_info_dict.keys() - ALLOWED_KEYS_SORT_ORDER) self.assertFalse(invalid_keys, f'Invalid fields returned by the extractor: {", ".join(invalid_keys)}') missing_keys = sorted( test_info_dict.keys() - expected_dict.keys(), key=lambda x: ALLOWED_KEYS_SORT_ORDER.index(x)) if missing_keys: def _repr(v): if isinstance(v, str): return "'{}'".format(v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')) elif isinstance(v, type): return v.__name__ else: return repr(v) info_dict_str = ''.join( f' {_repr(k)}: {_repr(v)},\n' for k, v in test_info_dict.items() if k not in missing_keys) if info_dict_str: info_dict_str += '\n' info_dict_str += ''.join( f' {_repr(k)}: {_repr(test_info_dict[k])},\n' for k in missing_keys) info_dict_str = '\n\'info_dict\': {\n' + info_dict_str + '},\n' write_string(info_dict_str.replace('\n', '\n '), out=sys.stderr) self.assertFalse( missing_keys, 'Missing keys in test definition: {}'.format(', '.join(sorted(missing_keys)))) def assertRegexpMatches(self, text, regexp, msg=None): if hasattr(self, 'assertRegexp'): return self.assertRegexp(text, regexp, msg) else: m = re.match(regexp, text) if not m: note = f'Regexp didn\'t match: {regexp!r} not found' if len(text) < 1000: note += f' in {text!r}' if msg is None: msg = note else: msg = note + ', ' + msg self.assertTrue(m, msg) def assertGreaterEqual(self, got, expected, msg=None): if not (got >= expected): if msg is None: msg = f'{got!r} not greater than or equal to {expected!r}' self.assertTrue(got >= expected, msg) def assertLessEqual(self, got, expected, msg=None): if not (got <= expected): if msg is None: msg = f'{got!r} not less than or equal to {expected!r}' self.assertTrue(got <= expected, msg) def assertEqual(self, got, expected, msg=None): if got != expected: if msg is None: msg = f'{got!r} not equal to {expected!r}' self.assertTrue(got == expected, msg) def expect_warnings(ydl, warnings_re): real_warning = ydl.report_warning def _report_warning(w, *args, **kwargs): if not any(re.search(w_re, w) for w_re in warnings_re): real_warning(w, *args, **kwargs) ydl.report_warning = _report_warning def http_server_port(httpd): if os.name == 'java' and isinstance(httpd.socket, ssl.SSLSocket): # In Jython SSLSocket is not a subclass of socket.socket sock = httpd.socket.sock else: sock = httpd.socket return sock.getsockname()[1] def verify_address_availability(address): if find_available_port(address) is None: pytest.skip(f'Unable to bind to source address {address} (address may not exist)') def validate_and_send(rh, req): rh.validate(req) return rh.send(req)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_http_proxy.py
test/test_http_proxy.py
import abc import base64 import contextlib import functools import json import os import random import ssl import threading from http.server import BaseHTTPRequestHandler from socketserver import ThreadingTCPServer import pytest from test.helper import http_server_port, verify_address_availability from test.test_networking import TEST_DIR from test.test_socks import IPv6ThreadingTCPServer from yt_dlp.dependencies import urllib3 from yt_dlp.networking import Request from yt_dlp.networking.exceptions import HTTPError, ProxyError, SSLError class HTTPProxyAuthMixin: def proxy_auth_error(self): self.send_response(407) self.send_header('Proxy-Authenticate', 'Basic realm="test http proxy"') self.end_headers() return False def do_proxy_auth(self, username, password): if username is None and password is None: return True proxy_auth_header = self.headers.get('Proxy-Authorization', None) if proxy_auth_header is None: return self.proxy_auth_error() if not proxy_auth_header.startswith('Basic '): return self.proxy_auth_error() auth = proxy_auth_header[6:] try: auth_username, auth_password = base64.b64decode(auth).decode().split(':', 1) except Exception: return self.proxy_auth_error() if auth_username != (username or '') or auth_password != (password or ''): return self.proxy_auth_error() return True class HTTPProxyHandler(BaseHTTPRequestHandler, HTTPProxyAuthMixin): def __init__(self, *args, proxy_info=None, username=None, password=None, request_handler=None, **kwargs): self.username = username self.password = password self.proxy_info = proxy_info super().__init__(*args, **kwargs) def do_GET(self): if not self.do_proxy_auth(self.username, self.password): self.server.close_request(self.request) return if self.path.endswith('/proxy_info'): payload = json.dumps(self.proxy_info or { 'client_address': self.client_address, 'connect': False, 'connect_host': None, 'connect_port': None, 'headers': dict(self.headers), 'path': self.path, 'proxy': ':'.join(str(y) for y in self.connection.getsockname()), }) self.send_response(200) self.send_header('Content-Type', 'application/json; charset=utf-8') self.send_header('Content-Length', str(len(payload))) self.end_headers() self.wfile.write(payload.encode()) else: self.send_response(404) self.end_headers() self.server.close_request(self.request) if urllib3: import urllib3.util.ssltransport class SSLTransport(urllib3.util.ssltransport.SSLTransport): """ Modified version of urllib3 SSLTransport to support server side SSL This allows us to chain multiple TLS connections. """ def __init__(self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True, server_side=False): self.incoming = ssl.MemoryBIO() self.outgoing = ssl.MemoryBIO() self.suppress_ragged_eofs = suppress_ragged_eofs self.socket = socket self.sslobj = ssl_context.wrap_bio( self.incoming, self.outgoing, server_hostname=server_hostname, server_side=server_side, ) self._ssl_io_loop(self.sslobj.do_handshake) @property def _io_refs(self): return self.socket._io_refs @_io_refs.setter def _io_refs(self, value): self.socket._io_refs = value def shutdown(self, *args, **kwargs): self.socket.shutdown(*args, **kwargs) else: SSLTransport = None class HTTPSProxyHandler(HTTPProxyHandler): def __init__(self, request, *args, **kwargs): certfn = os.path.join(TEST_DIR, 'testcert.pem') sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) sslctx.load_cert_chain(certfn, None) if isinstance(request, ssl.SSLSocket): request = SSLTransport(request, ssl_context=sslctx, server_side=True) else: request = sslctx.wrap_socket(request, server_side=True) super().__init__(request, *args, **kwargs) class HTTPConnectProxyHandler(BaseHTTPRequestHandler, HTTPProxyAuthMixin): protocol_version = 'HTTP/1.1' default_request_version = 'HTTP/1.1' def __init__(self, *args, username=None, password=None, request_handler=None, **kwargs): self.username = username self.password = password self.request_handler = request_handler super().__init__(*args, **kwargs) def do_CONNECT(self): if not self.do_proxy_auth(self.username, self.password): self.server.close_request(self.request) return self.send_response(200) self.end_headers() proxy_info = { 'client_address': self.client_address, 'connect': True, 'connect_host': self.path.split(':')[0], 'connect_port': int(self.path.split(':')[1]), 'headers': dict(self.headers), 'path': self.path, 'proxy': ':'.join(str(y) for y in self.connection.getsockname()), } self.request_handler(self.request, self.client_address, self.server, proxy_info=proxy_info) self.server.close_request(self.request) class HTTPSConnectProxyHandler(HTTPConnectProxyHandler): def __init__(self, request, *args, **kwargs): certfn = os.path.join(TEST_DIR, 'testcert.pem') sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) sslctx.load_cert_chain(certfn, None) request = sslctx.wrap_socket(request, server_side=True) self._original_request = request super().__init__(request, *args, **kwargs) def do_CONNECT(self): super().do_CONNECT() self.server.close_request(self._original_request) @contextlib.contextmanager def proxy_server(proxy_server_class, request_handler, bind_ip=None, **proxy_server_kwargs): server = server_thread = None try: bind_address = bind_ip or '127.0.0.1' server_type = ThreadingTCPServer if '.' in bind_address else IPv6ThreadingTCPServer server = server_type( (bind_address, 0), functools.partial(proxy_server_class, request_handler=request_handler, **proxy_server_kwargs)) server_port = http_server_port(server) server_thread = threading.Thread(target=server.serve_forever) server_thread.daemon = True server_thread.start() if '.' not in bind_address: yield f'[{bind_address}]:{server_port}' else: yield f'{bind_address}:{server_port}' finally: server.shutdown() server.server_close() server_thread.join(2.0) class HTTPProxyTestContext(abc.ABC): REQUEST_HANDLER_CLASS = None REQUEST_PROTO = None def http_server(self, server_class, *args, **kwargs): return proxy_server(server_class, self.REQUEST_HANDLER_CLASS, *args, **kwargs) @abc.abstractmethod def proxy_info_request(self, handler, target_domain=None, target_port=None, **req_kwargs) -> dict: """return a dict of proxy_info""" class HTTPProxyHTTPTestContext(HTTPProxyTestContext): # Standard HTTP Proxy for http requests REQUEST_HANDLER_CLASS = HTTPProxyHandler REQUEST_PROTO = 'http' def proxy_info_request(self, handler, target_domain=None, target_port=None, **req_kwargs): request = Request(f'http://{target_domain or "127.0.0.1"}:{target_port or "40000"}/proxy_info', **req_kwargs) handler.validate(request) return json.loads(handler.send(request).read().decode()) class HTTPProxyHTTPSTestContext(HTTPProxyTestContext): # HTTP Connect proxy, for https requests REQUEST_HANDLER_CLASS = HTTPSProxyHandler REQUEST_PROTO = 'https' def proxy_info_request(self, handler, target_domain=None, target_port=None, **req_kwargs): request = Request(f'https://{target_domain or "127.0.0.1"}:{target_port or "40000"}/proxy_info', **req_kwargs) handler.validate(request) return json.loads(handler.send(request).read().decode()) CTX_MAP = { 'http': HTTPProxyHTTPTestContext, 'https': HTTPProxyHTTPSTestContext, } @pytest.fixture(scope='module') def ctx(request): return CTX_MAP[request.param]() @pytest.mark.parametrize( 'handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True) @pytest.mark.handler_flaky('CurlCFFI', reason='segfaults') @pytest.mark.parametrize('ctx', ['http'], indirect=True) # pure http proxy can only support http class TestHTTPProxy: def test_http_no_auth(self, handler, ctx): with ctx.http_server(HTTPProxyHandler) as server_address: with handler(proxies={ctx.REQUEST_PROTO: f'http://{server_address}'}) as rh: proxy_info = ctx.proxy_info_request(rh) assert proxy_info['proxy'] == server_address assert proxy_info['connect'] is False assert 'Proxy-Authorization' not in proxy_info['headers'] def test_http_auth(self, handler, ctx): with ctx.http_server(HTTPProxyHandler, username='test', password='test') as server_address: with handler(proxies={ctx.REQUEST_PROTO: f'http://test:test@{server_address}'}) as rh: proxy_info = ctx.proxy_info_request(rh) assert proxy_info['proxy'] == server_address assert 'Proxy-Authorization' in proxy_info['headers'] def test_http_bad_auth(self, handler, ctx): with ctx.http_server(HTTPProxyHandler, username='test', password='test') as server_address: with handler(proxies={ctx.REQUEST_PROTO: f'http://test:bad@{server_address}'}) as rh: with pytest.raises(HTTPError) as exc_info: ctx.proxy_info_request(rh) assert exc_info.value.response.status == 407 exc_info.value.response.close() def test_http_source_address(self, handler, ctx): with ctx.http_server(HTTPProxyHandler) as server_address: source_address = f'127.0.0.{random.randint(5, 255)}' verify_address_availability(source_address) with handler(proxies={ctx.REQUEST_PROTO: f'http://{server_address}'}, source_address=source_address) as rh: proxy_info = ctx.proxy_info_request(rh) assert proxy_info['proxy'] == server_address assert proxy_info['client_address'][0] == source_address @pytest.mark.skip_handler('Urllib', 'urllib does not support https proxies') def test_https(self, handler, ctx): with ctx.http_server(HTTPSProxyHandler) as server_address: with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'https://{server_address}'}) as rh: proxy_info = ctx.proxy_info_request(rh) assert proxy_info['proxy'] == server_address assert proxy_info['connect'] is False assert 'Proxy-Authorization' not in proxy_info['headers'] @pytest.mark.skip_handler('Urllib', 'urllib does not support https proxies') def test_https_verify_failed(self, handler, ctx): with ctx.http_server(HTTPSProxyHandler) as server_address: with handler(verify=True, proxies={ctx.REQUEST_PROTO: f'https://{server_address}'}) as rh: # Accept SSLError as may not be feasible to tell if it is proxy or request error. # note: if request proto also does ssl verification, this may also be the error of the request. # Until we can support passing custom cacerts to handlers, we cannot properly test this for all cases. with pytest.raises((ProxyError, SSLError)): ctx.proxy_info_request(rh) def test_http_with_idn(self, handler, ctx): with ctx.http_server(HTTPProxyHandler) as server_address: with handler(proxies={ctx.REQUEST_PROTO: f'http://{server_address}'}) as rh: proxy_info = ctx.proxy_info_request(rh, target_domain='中文.tw') assert proxy_info['proxy'] == server_address assert proxy_info['path'].startswith('http://xn--fiq228c.tw') assert proxy_info['headers']['Host'].split(':', 1)[0] == 'xn--fiq228c.tw' @pytest.mark.parametrize( 'handler,ctx', [ ('Requests', 'https'), ('CurlCFFI', 'https'), ], indirect=True) @pytest.mark.handler_flaky('CurlCFFI', reason='segfaults') class TestHTTPConnectProxy: def test_http_connect_no_auth(self, handler, ctx): with ctx.http_server(HTTPConnectProxyHandler) as server_address: with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'http://{server_address}'}) as rh: proxy_info = ctx.proxy_info_request(rh) assert proxy_info['proxy'] == server_address assert proxy_info['connect'] is True assert 'Proxy-Authorization' not in proxy_info['headers'] def test_http_connect_auth(self, handler, ctx): with ctx.http_server(HTTPConnectProxyHandler, username='test', password='test') as server_address: with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'http://test:test@{server_address}'}) as rh: proxy_info = ctx.proxy_info_request(rh) assert proxy_info['proxy'] == server_address assert 'Proxy-Authorization' in proxy_info['headers'] def test_http_connect_bad_auth(self, handler, ctx): with ctx.http_server(HTTPConnectProxyHandler, username='test', password='test') as server_address: with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'http://test:bad@{server_address}'}) as rh: with pytest.raises(ProxyError): ctx.proxy_info_request(rh) def test_http_connect_source_address(self, handler, ctx): with ctx.http_server(HTTPConnectProxyHandler) as server_address: source_address = f'127.0.0.{random.randint(5, 255)}' verify_address_availability(source_address) with handler(proxies={ctx.REQUEST_PROTO: f'http://{server_address}'}, source_address=source_address, verify=False) as rh: proxy_info = ctx.proxy_info_request(rh) assert proxy_info['proxy'] == server_address assert proxy_info['client_address'][0] == source_address @pytest.mark.skipif(urllib3 is None, reason='requires urllib3 to test') def test_https_connect_proxy(self, handler, ctx): with ctx.http_server(HTTPSConnectProxyHandler) as server_address: with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'https://{server_address}'}) as rh: proxy_info = ctx.proxy_info_request(rh) assert proxy_info['proxy'] == server_address assert proxy_info['connect'] is True assert 'Proxy-Authorization' not in proxy_info['headers'] @pytest.mark.skipif(urllib3 is None, reason='requires urllib3 to test') def test_https_connect_verify_failed(self, handler, ctx): with ctx.http_server(HTTPSConnectProxyHandler) as server_address: with handler(verify=True, proxies={ctx.REQUEST_PROTO: f'https://{server_address}'}) as rh: # Accept SSLError as may not be feasible to tell if it is proxy or request error. # note: if request proto also does ssl verification, this may also be the error of the request. # Until we can support passing custom cacerts to handlers, we cannot properly test this for all cases. with pytest.raises((ProxyError, SSLError)): ctx.proxy_info_request(rh) @pytest.mark.skipif(urllib3 is None, reason='requires urllib3 to test') def test_https_connect_proxy_auth(self, handler, ctx): with ctx.http_server(HTTPSConnectProxyHandler, username='test', password='test') as server_address: with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'https://test:test@{server_address}'}) as rh: proxy_info = ctx.proxy_info_request(rh) assert proxy_info['proxy'] == server_address assert 'Proxy-Authorization' in proxy_info['headers']
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_youtube_misc.py
test/test_youtube_misc.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from yt_dlp.extractor import YoutubeIE class TestYoutubeMisc(unittest.TestCase): def test_youtube_extract(self): assertExtractId = lambda url, video_id: self.assertEqual(YoutubeIE.extract_id(url), video_id) assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc') assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc') assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc') assertExtractId('https://www.youtube.com/watch_popup?v=BaW_jenozKc', 'BaW_jenozKc') assertExtractId('http://www.youtube.com/watch?v=BaW_jenozKcsharePLED17F32AD9753930', 'BaW_jenozKc') assertExtractId('BaW_jenozKc', 'BaW_jenozKc') if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_verbose_output.py
test/test_verbose_output.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import subprocess rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) class TestVerboseOutput(unittest.TestCase): def test_private_info_arg(self): outp = subprocess.Popen( [ sys.executable, 'yt_dlp/__main__.py', '-v', '--ignore-config', '--username', 'johnsmith@gmail.com', '--password', 'my_secret_password', ], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, serr = outp.communicate() self.assertTrue(b'--username' in serr) self.assertTrue(b'johnsmith' not in serr) self.assertTrue(b'--password' in serr) self.assertTrue(b'my_secret_password' not in serr) def test_private_info_shortarg(self): outp = subprocess.Popen( [ sys.executable, 'yt_dlp/__main__.py', '-v', '--ignore-config', '-u', 'johnsmith@gmail.com', '-p', 'my_secret_password', ], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, serr = outp.communicate() self.assertTrue(b'-u' in serr) self.assertTrue(b'johnsmith' not in serr) self.assertTrue(b'-p' in serr) self.assertTrue(b'my_secret_password' not in serr) def test_private_info_eq(self): outp = subprocess.Popen( [ sys.executable, 'yt_dlp/__main__.py', '-v', '--ignore-config', '--username=johnsmith@gmail.com', '--password=my_secret_password', ], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, serr = outp.communicate() self.assertTrue(b'--username' in serr) self.assertTrue(b'johnsmith' not in serr) self.assertTrue(b'--password' in serr) self.assertTrue(b'my_secret_password' not in serr) def test_private_info_shortarg_eq(self): outp = subprocess.Popen( [ sys.executable, 'yt_dlp/__main__.py', '-v', '--ignore-config', '-u=johnsmith@gmail.com', '-p=my_secret_password', ], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, serr = outp.communicate() self.assertTrue(b'-u' in serr) self.assertTrue(b'johnsmith' not in serr) self.assertTrue(b'-p' in serr) self.assertTrue(b'my_secret_password' not in serr) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_networking.py
test/test_networking.py
#!/usr/bin/env python3 # Allow direct execution import os import sys from unittest.mock import MagicMock import pytest from yt_dlp.networking.common import Features, DEFAULT_TIMEOUT sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import gzip import http.client import http.cookiejar import http.server import io import logging import pathlib import random import ssl import tempfile import threading import time import urllib.request import warnings import zlib from email.message import Message from http.cookiejar import CookieJar from test.helper import ( FakeYDL, http_server_port, validate_and_send, verify_address_availability, ) from yt_dlp.cookies import YoutubeDLCookieJar from yt_dlp.dependencies import brotli, curl_cffi, requests, urllib3 from yt_dlp.networking import ( HEADRequest, PATCHRequest, PUTRequest, Request, RequestDirector, RequestHandler, Response, ) from yt_dlp.networking._urllib import UrllibRH from yt_dlp.networking.exceptions import ( CertificateVerifyError, HTTPError, IncompleteRead, NoSupportingHandlers, ProxyError, RequestError, SSLError, TransportError, UnsupportedRequest, ) from yt_dlp.networking.impersonate import ( ImpersonateRequestHandler, ImpersonateTarget, ) from yt_dlp.utils import YoutubeDLError from yt_dlp.utils._utils import _YDLLogger as FakeLogger from yt_dlp.utils.networking import HTTPHeaderDict, std_headers TEST_DIR = os.path.dirname(os.path.abspath(__file__)) class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler): protocol_version = 'HTTP/1.1' default_request_version = 'HTTP/1.1' def log_message(self, format, *args): pass def _headers(self): payload = str(self.headers).encode() self.send_response(200) self.send_header('Content-Type', 'application/json') self.send_header('Content-Length', str(len(payload))) self.end_headers() self.wfile.write(payload) def _redirect(self): self.send_response(int(self.path[len('/redirect_'):])) self.send_header('Location', '/method') self.send_header('Content-Length', '0') self.end_headers() def _method(self, method, payload=None): self.send_response(200) self.send_header('Content-Length', str(len(payload or ''))) self.send_header('Method', method) self.end_headers() if payload: self.wfile.write(payload) def _status(self, status): payload = f'<html>{status} NOT FOUND</html>'.encode() self.send_response(int(status)) self.send_header('Content-Type', 'text/html; charset=utf-8') self.send_header('Content-Length', str(len(payload))) self.end_headers() self.wfile.write(payload) def _read_data(self): if 'Content-Length' in self.headers: return self.rfile.read(int(self.headers['Content-Length'])) else: return b'' def do_POST(self): data = self._read_data() + str(self.headers).encode() if self.path.startswith('/redirect_'): self._redirect() elif self.path.startswith('/method'): self._method('POST', data) elif self.path.startswith('/headers'): self._headers() else: self._status(404) def do_HEAD(self): if self.path.startswith('/redirect_'): self._redirect() elif self.path.startswith('/method'): self._method('HEAD') else: self._status(404) def do_PUT(self): data = self._read_data() + str(self.headers).encode() if self.path.startswith('/redirect_'): self._redirect() elif self.path.startswith('/method'): self._method('PUT', data) else: self._status(404) def do_GET(self): if self.path == '/video.html': payload = b'<html><video src="/vid.mp4" /></html>' self.send_response(200) self.send_header('Content-Type', 'text/html; charset=utf-8') self.send_header('Content-Length', str(len(payload))) self.end_headers() self.wfile.write(payload) elif self.path == '/vid.mp4': payload = b'\x00\x00\x00\x00\x20\x66\x74[video]' self.send_response(200) self.send_header('Content-Type', 'video/mp4') self.send_header('Content-Length', str(len(payload))) self.end_headers() self.wfile.write(payload) elif self.path == '/%E4%B8%AD%E6%96%87.html': payload = b'<html><video src="/vid.mp4" /></html>' self.send_response(200) self.send_header('Content-Type', 'text/html; charset=utf-8') self.send_header('Content-Length', str(len(payload))) self.end_headers() self.wfile.write(payload) elif self.path == '/%c7%9f': payload = b'<html><video src="/vid.mp4" /></html>' self.send_response(200) self.send_header('Content-Type', 'text/html; charset=utf-8') self.send_header('Content-Length', str(len(payload))) self.end_headers() self.wfile.write(payload) elif self.path.startswith('/redirect_loop'): self.send_response(301) self.send_header('Location', self.path) self.send_header('Content-Length', '0') self.end_headers() elif self.path == '/redirect_dotsegments': self.send_response(301) # redirect to /headers but with dot segments before self.send_header('Location', '/a/b/./../../headers') self.send_header('Content-Length', '0') self.end_headers() elif self.path == '/redirect_dotsegments_absolute': self.send_response(301) # redirect to /headers but with dot segments before - absolute url self.send_header('Location', f'http://127.0.0.1:{http_server_port(self.server)}/a/b/./../../headers') self.send_header('Content-Length', '0') self.end_headers() elif self.path.startswith('/redirect_'): self._redirect() elif self.path.startswith('/method'): self._method('GET', str(self.headers).encode()) elif self.path.startswith('/headers'): self._headers() elif self.path.startswith('/308-to-headers'): self.send_response(308) # redirect to "localhost" for testing cookie redirection handling self.send_header('Location', f'http://localhost:{self.connection.getsockname()[1]}/headers') self.send_header('Content-Length', '0') self.end_headers() elif self.path == '/trailing_garbage': payload = b'<html><video src="/vid.mp4" /></html>' self.send_response(200) self.send_header('Content-Type', 'text/html; charset=utf-8') self.send_header('Content-Encoding', 'gzip') buf = io.BytesIO() with gzip.GzipFile(fileobj=buf, mode='wb') as f: f.write(payload) compressed = buf.getvalue() + b'trailing garbage' self.send_header('Content-Length', str(len(compressed))) self.end_headers() self.wfile.write(compressed) elif self.path == '/302-non-ascii-redirect': new_url = f'http://127.0.0.1:{http_server_port(self.server)}/中文.html' self.send_response(301) self.send_header('Location', new_url) self.send_header('Content-Length', '0') self.end_headers() elif self.path == '/content-encoding': encodings = self.headers.get('ytdl-encoding', '') payload = b'<html><video src="/vid.mp4" /></html>' for encoding in filter(None, (e.strip() for e in encodings.split(','))): if encoding == 'br' and brotli: payload = brotli.compress(payload) elif encoding == 'gzip': payload = gzip.compress(payload, mtime=0) elif encoding == 'deflate': payload = zlib.compress(payload) elif encoding == 'unsupported': payload = b'raw' break else: self._status(415) return self.send_response(200) self.send_header('Content-Encoding', encodings) self.send_header('Content-Length', str(len(payload))) self.end_headers() self.wfile.write(payload) elif self.path.startswith('/gen_'): payload = b'<html></html>' self.send_response(int(self.path[len('/gen_'):])) self.send_header('Content-Type', 'text/html; charset=utf-8') self.send_header('Content-Length', str(len(payload))) self.end_headers() self.wfile.write(payload) elif self.path.startswith('/incompleteread'): payload = b'<html></html>' self.send_response(200) self.send_header('Content-Type', 'text/html; charset=utf-8') self.send_header('Content-Length', '234234') self.end_headers() self.wfile.write(payload) self.finish() elif self.path.startswith('/timeout_'): time.sleep(int(self.path[len('/timeout_'):])) self._headers() elif self.path == '/source_address': payload = str(self.client_address[0]).encode() self.send_response(200) self.send_header('Content-Type', 'text/html; charset=utf-8') self.send_header('Content-Length', str(len(payload))) self.end_headers() self.wfile.write(payload) self.finish() elif self.path == '/get_cookie': self.send_response(200) self.send_header('Set-Cookie', 'test=ytdlp; path=/') self.end_headers() self.finish() else: self._status(404) def send_header(self, keyword, value): """ Forcibly allow HTTP server to send non percent-encoded non-ASCII characters in headers. This is against what is defined in RFC 3986, however we need to test we support this since some sites incorrectly do this. """ if keyword.lower() == 'connection': return super().send_header(keyword, value) if not hasattr(self, '_headers_buffer'): self._headers_buffer = [] self._headers_buffer.append(f'{keyword}: {value}\r\n'.encode()) class TestRequestHandlerBase: @classmethod def setup_class(cls): cls.http_httpd = http.server.ThreadingHTTPServer( ('127.0.0.1', 0), HTTPTestRequestHandler) cls.http_port = http_server_port(cls.http_httpd) cls.http_server_thread = threading.Thread(target=cls.http_httpd.serve_forever) # FIXME: we should probably stop the http server thread after each test # See: https://github.com/yt-dlp/yt-dlp/pull/7094#discussion_r1199746041 cls.http_server_thread.daemon = True cls.http_server_thread.start() # HTTPS server certfn = os.path.join(TEST_DIR, 'testcert.pem') cls.https_httpd = http.server.ThreadingHTTPServer( ('127.0.0.1', 0), HTTPTestRequestHandler) sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) sslctx.load_cert_chain(certfn, None) cls.https_httpd.socket = sslctx.wrap_socket(cls.https_httpd.socket, server_side=True) cls.https_port = http_server_port(cls.https_httpd) cls.https_server_thread = threading.Thread(target=cls.https_httpd.serve_forever) cls.https_server_thread.daemon = True cls.https_server_thread.start() @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True) @pytest.mark.handler_flaky('CurlCFFI', os.name == 'nt', reason='segfaults') class TestHTTPRequestHandler(TestRequestHandlerBase): def test_verify_cert(self, handler): with handler() as rh: with pytest.raises(CertificateVerifyError): validate_and_send(rh, Request(f'https://127.0.0.1:{self.https_port}/headers')) with handler(verify=False) as rh: r = validate_and_send(rh, Request(f'https://127.0.0.1:{self.https_port}/headers')) assert r.status == 200 r.close() def test_ssl_error(self, handler): # HTTPS server with too old TLS version # XXX: is there a better way to test this than to create a new server? https_httpd = http.server.ThreadingHTTPServer( ('127.0.0.1', 0), HTTPTestRequestHandler) sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) https_httpd.socket = sslctx.wrap_socket(https_httpd.socket, server_side=True) https_port = http_server_port(https_httpd) https_server_thread = threading.Thread(target=https_httpd.serve_forever) https_server_thread.daemon = True https_server_thread.start() with handler(verify=False) as rh: with pytest.raises(SSLError, match=r'(?i)ssl(?:v3|/tls).alert.handshake.failure') as exc_info: validate_and_send(rh, Request(f'https://127.0.0.1:{https_port}/headers')) assert not issubclass(exc_info.type, CertificateVerifyError) @pytest.mark.skip_handler('CurlCFFI', 'legacy_ssl ignored by CurlCFFI') def test_legacy_ssl_extension(self, handler): # HTTPS server with old ciphers # XXX: is there a better way to test this than to create a new server? https_httpd = http.server.ThreadingHTTPServer( ('127.0.0.1', 0), HTTPTestRequestHandler) sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) sslctx.maximum_version = ssl.TLSVersion.TLSv1_2 sslctx.set_ciphers('SHA1:AESCCM:aDSS:eNULL:aNULL') sslctx.load_cert_chain(os.path.join(TEST_DIR, 'testcert.pem'), None) https_httpd.socket = sslctx.wrap_socket(https_httpd.socket, server_side=True) https_port = http_server_port(https_httpd) https_server_thread = threading.Thread(target=https_httpd.serve_forever) https_server_thread.daemon = True https_server_thread.start() with handler(verify=False) as rh: res = validate_and_send(rh, Request(f'https://127.0.0.1:{https_port}/headers', extensions={'legacy_ssl': True})) assert res.status == 200 res.close() # Ensure only applies to request extension with pytest.raises(SSLError): validate_and_send(rh, Request(f'https://127.0.0.1:{https_port}/headers')) @pytest.mark.skip_handler('CurlCFFI', 'legacy_ssl ignored by CurlCFFI') def test_legacy_ssl_support(self, handler): # HTTPS server with old ciphers # XXX: is there a better way to test this than to create a new server? https_httpd = http.server.ThreadingHTTPServer( ('127.0.0.1', 0), HTTPTestRequestHandler) sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) sslctx.maximum_version = ssl.TLSVersion.TLSv1_2 sslctx.set_ciphers('SHA1:AESCCM:aDSS:eNULL:aNULL') sslctx.load_cert_chain(os.path.join(TEST_DIR, 'testcert.pem'), None) https_httpd.socket = sslctx.wrap_socket(https_httpd.socket, server_side=True) https_port = http_server_port(https_httpd) https_server_thread = threading.Thread(target=https_httpd.serve_forever) https_server_thread.daemon = True https_server_thread.start() with handler(verify=False, legacy_ssl_support=True) as rh: res = validate_and_send(rh, Request(f'https://127.0.0.1:{https_port}/headers')) assert res.status == 200 res.close() def test_percent_encode(self, handler): with handler() as rh: # Unicode characters should be encoded with uppercase percent-encoding res = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/中文.html')) assert res.status == 200 res.close() # don't normalize existing percent encodings res = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/%c7%9f')) assert res.status == 200 res.close() @pytest.mark.parametrize('path', [ '/a/b/./../../headers', '/redirect_dotsegments', # https://github.com/yt-dlp/yt-dlp/issues/9020 '/redirect_dotsegments_absolute', ]) def test_remove_dot_segments(self, handler, path): with handler(verbose=True) as rh: # This isn't a comprehensive test, # but it should be enough to check whether the handler is removing dot segments in required scenarios res = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}{path}')) assert res.status == 200 assert res.url == f'http://127.0.0.1:{self.http_port}/headers' res.close() @pytest.mark.skip_handler('CurlCFFI', 'not supported by curl-cffi (non-standard)') def test_unicode_path_redirection(self, handler): with handler() as rh: r = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/302-non-ascii-redirect')) assert r.url == f'http://127.0.0.1:{self.http_port}/%E4%B8%AD%E6%96%87.html' r.close() def test_raise_http_error(self, handler): with handler() as rh: for bad_status in (400, 500, 599, 302): with pytest.raises(HTTPError): validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/gen_{bad_status}')) # Should not raise an error validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/gen_200')).close() def test_response_url(self, handler): with handler() as rh: # Response url should be that of the last url in redirect chain res = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/redirect_301')) assert res.url == f'http://127.0.0.1:{self.http_port}/method' res.close() res2 = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/gen_200')) assert res2.url == f'http://127.0.0.1:{self.http_port}/gen_200' res2.close() # Covers some basic cases we expect some level of consistency between request handlers for @pytest.mark.parametrize('redirect_status,method,expected', [ # A 303 must either use GET or HEAD for subsequent request (303, 'POST', ('', 'GET', False)), (303, 'HEAD', ('', 'HEAD', False)), # 301 and 302 turn POST only into a GET (301, 'POST', ('', 'GET', False)), (301, 'HEAD', ('', 'HEAD', False)), (302, 'POST', ('', 'GET', False)), (302, 'HEAD', ('', 'HEAD', False)), # 307 and 308 should not change method (307, 'POST', ('testdata', 'POST', True)), (308, 'POST', ('testdata', 'POST', True)), (307, 'HEAD', ('', 'HEAD', False)), (308, 'HEAD', ('', 'HEAD', False)), ]) def test_redirect(self, handler, redirect_status, method, expected): with handler() as rh: data = b'testdata' if method == 'POST' else None headers = {} if data is not None: headers['Content-Type'] = 'application/test' res = validate_and_send( rh, Request(f'http://127.0.0.1:{self.http_port}/redirect_{redirect_status}', method=method, data=data, headers=headers)) headers = b'' data_recv = b'' if data is not None: data_recv += res.read(len(data)) if data_recv != data: headers += data_recv data_recv = b'' headers += res.read() assert expected[0] == data_recv.decode() assert expected[1] == res.headers.get('method') assert expected[2] == ('content-length' in headers.decode().lower()) def test_request_cookie_header(self, handler): # We should accept a Cookie header being passed as in normal headers and handle it appropriately. with handler() as rh: # Specified Cookie header should be used res = validate_and_send( rh, Request( f'http://127.0.0.1:{self.http_port}/headers', headers={'Cookie': 'test=test'})).read().decode() assert 'cookie: test=test' in res.lower() # Specified Cookie header should be removed on any redirect res = validate_and_send( rh, Request( f'http://127.0.0.1:{self.http_port}/308-to-headers', headers={'Cookie': 'test=test2'})).read().decode() assert 'cookie: test=test2' not in res.lower() # Specified Cookie header should override global cookiejar for that request # Whether cookies from the cookiejar is applied on the redirect is considered undefined for now cookiejar = YoutubeDLCookieJar() cookiejar.set_cookie(http.cookiejar.Cookie( version=0, name='test', value='ytdlp', port=None, port_specified=False, domain='127.0.0.1', domain_specified=True, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=False, comment=None, comment_url=None, rest={})) with handler(cookiejar=cookiejar) as rh: data = validate_and_send( rh, Request(f'http://127.0.0.1:{self.http_port}/headers', headers={'cookie': 'test=test3'})).read() assert b'cookie: test=ytdlp' not in data.lower() assert b'cookie: test=test3' in data.lower() def test_redirect_loop(self, handler): with handler() as rh: with pytest.raises(HTTPError, match='redirect loop'): validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/redirect_loop')) def test_incompleteread(self, handler): with handler(timeout=2) as rh: with pytest.raises(IncompleteRead, match='13 bytes read, 234221 more expected'): validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/incompleteread')).read() def test_cookies(self, handler): cookiejar = YoutubeDLCookieJar() cookiejar.set_cookie(http.cookiejar.Cookie( 0, 'test', 'ytdlp', None, False, '127.0.0.1', True, False, '/headers', True, False, None, False, None, None, {})) with handler(cookiejar=cookiejar) as rh: data = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/headers')).read() assert b'cookie: test=ytdlp' in data.lower() # Per request with handler() as rh: data = validate_and_send( rh, Request(f'http://127.0.0.1:{self.http_port}/headers', extensions={'cookiejar': cookiejar})).read() assert b'cookie: test=ytdlp' in data.lower() def test_cookie_sync_only_cookiejar(self, handler): # Ensure that cookies are ONLY being handled by the cookiejar with handler() as rh: validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/get_cookie', extensions={'cookiejar': YoutubeDLCookieJar()})) data = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/headers', extensions={'cookiejar': YoutubeDLCookieJar()})).read() assert b'cookie: test=ytdlp' not in data.lower() def test_cookie_sync_delete_cookie(self, handler): # Ensure that cookies are ONLY being handled by the cookiejar cookiejar = YoutubeDLCookieJar() with handler(cookiejar=cookiejar) as rh: validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/get_cookie')) data = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/headers')).read() assert b'cookie: test=ytdlp' in data.lower() cookiejar.clear_session_cookies() data = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/headers')).read() assert b'cookie: test=ytdlp' not in data.lower() def test_headers(self, handler): with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh: # Global Headers data = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/headers')).read().lower() assert b'test1: test' in data # Per request headers, merged with global data = validate_and_send(rh, Request( f'http://127.0.0.1:{self.http_port}/headers', headers={'test2': 'changed', 'test3': 'test3'})).read().lower() assert b'test1: test' in data assert b'test2: changed' in data assert b'test2: test2' not in data assert b'test3: test3' in data def test_read_timeout(self, handler): with handler() as rh: # Default timeout is 20 seconds, so this should go through validate_and_send( rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_1')) with handler(timeout=0.1) as rh: with pytest.raises(TransportError): validate_and_send( rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_5')) # Per request timeout, should override handler timeout validate_and_send( rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_1', extensions={'timeout': 4})) def test_connect_timeout(self, handler): # nothing should be listening on this port connect_timeout_url = 'http://10.255.255.255' with handler(timeout=0.01) as rh, pytest.raises(TransportError): now = time.time() validate_and_send(rh, Request(connect_timeout_url)) assert time.time() - now < DEFAULT_TIMEOUT # Per request timeout, should override handler timeout request = Request(connect_timeout_url, extensions={'timeout': 0.01}) with handler() as rh, pytest.raises(TransportError): now = time.time() validate_and_send(rh, request) assert time.time() - now < DEFAULT_TIMEOUT def test_source_address(self, handler): source_address = f'127.0.0.{random.randint(5, 255)}' # on some systems these loopback addresses we need for testing may not be available # see: https://github.com/yt-dlp/yt-dlp/issues/8890 verify_address_availability(source_address) with handler(source_address=source_address) as rh: data = validate_and_send( rh, Request(f'http://127.0.0.1:{self.http_port}/source_address')).read().decode() assert source_address == data @pytest.mark.skip_handler('CurlCFFI', 'not supported by curl-cffi') def test_gzip_trailing_garbage(self, handler): with handler() as rh: res = validate_and_send(rh, Request(f'http://localhost:{self.http_port}/trailing_garbage')) data = res.read().decode() assert data == '<html><video src="/vid.mp4" /></html>' # Should auto-close and mark the response adaptor as closed assert res.closed @pytest.mark.skip_handler('CurlCFFI', 'not applicable to curl-cffi') @pytest.mark.skipif(not brotli, reason='brotli support is not installed') def test_brotli(self, handler): with handler() as rh: res = validate_and_send( rh, Request( f'http://127.0.0.1:{self.http_port}/content-encoding', headers={'ytdl-encoding': 'br'})) assert res.headers.get('Content-Encoding') == 'br' assert res.read() == b'<html><video src="/vid.mp4" /></html>' # Should auto-close and mark the response adaptor as closed assert res.closed def test_deflate(self, handler): with handler() as rh: res = validate_and_send( rh, Request( f'http://127.0.0.1:{self.http_port}/content-encoding', headers={'ytdl-encoding': 'deflate'})) assert res.headers.get('Content-Encoding') == 'deflate' assert res.read() == b'<html><video src="/vid.mp4" /></html>' # Should auto-close and mark the response adaptor as closed assert res.closed def test_gzip(self, handler): with handler() as rh: res = validate_and_send( rh, Request( f'http://127.0.0.1:{self.http_port}/content-encoding', headers={'ytdl-encoding': 'gzip'})) assert res.headers.get('Content-Encoding') == 'gzip' assert res.read() == b'<html><video src="/vid.mp4" /></html>' # Should auto-close and mark the response adaptor as closed assert res.closed def test_multiple_encodings(self, handler): with handler() as rh: for pair in ('gzip,deflate', 'deflate, gzip', 'gzip, gzip', 'deflate, deflate'): res = validate_and_send( rh, Request( f'http://127.0.0.1:{self.http_port}/content-encoding', headers={'ytdl-encoding': pair})) assert res.headers.get('Content-Encoding') == pair assert res.read() == b'<html><video src="/vid.mp4" /></html>' # Should auto-close and mark the response adaptor as closed assert res.closed @pytest.mark.skip_handler('CurlCFFI', 'not supported by curl-cffi') def test_unsupported_encoding(self, handler): with handler() as rh: res = validate_and_send( rh, Request( f'http://127.0.0.1:{self.http_port}/content-encoding', headers={'ytdl-encoding': 'unsupported', 'Accept-Encoding': '*'})) assert res.headers.get('Content-Encoding') == 'unsupported' assert res.read() == b'raw' # Should auto-close and mark the response adaptor as closed assert res.closed def test_read(self, handler): with handler() as rh: res = validate_and_send( rh, Request(f'http://127.0.0.1:{self.http_port}/headers')) assert res.readable() assert res.read(1) == b'H' # Ensure we don't close the adaptor yet assert not res.closed assert res.read(3) == b'ost' assert res.read().decode().endswith('\n\n') assert res.read() == b'' # Should auto-close and mark the response adaptor as closed assert res.closed def test_request_disable_proxy(self, handler): for proxy_proto in handler._SUPPORTED_PROXY_SCHEMES or ['http']: # Given the handler is configured with a proxy with handler(proxies={'http': f'{proxy_proto}://10.255.255.255'}, timeout=5) as rh: # When a proxy is explicitly set to None for the request res = validate_and_send( rh, Request(f'http://127.0.0.1:{self.http_port}/headers', proxies={'http': None})) # Then no proxy should be used res.close() assert res.status == 200 @pytest.mark.skip_handlers_if( lambda _, handler: Features.NO_PROXY not in handler._SUPPORTED_FEATURES, 'handler does not support NO_PROXY') def test_noproxy(self, handler): for proxy_proto in handler._SUPPORTED_PROXY_SCHEMES or ['http']: # Given the handler is configured with a proxy with handler(proxies={'http': f'{proxy_proto}://10.255.255.255'}, timeout=5) as rh: for no_proxy in (f'127.0.0.1:{self.http_port}', '127.0.0.1', 'localhost'): # When request no proxy includes the request url host nop_response = validate_and_send( rh, Request(f'http://127.0.0.1:{self.http_port}/headers', proxies={'no': no_proxy})) # Then the proxy should not be used assert nop_response.status == 200 nop_response.close() @pytest.mark.skip_handlers_if( lambda _, handler: Features.ALL_PROXY not in handler._SUPPORTED_FEATURES, 'handler does not support ALL_PROXY') def test_allproxy(self, handler):
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_config.py
test/test_config.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest import unittest.mock sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import contextlib import itertools from pathlib import Path from yt_dlp.compat import compat_expanduser from yt_dlp.options import create_parser, parseOpts from yt_dlp.utils import Config, get_executable_path ENVIRON_DEFAULTS = { 'HOME': None, 'XDG_CONFIG_HOME': '/_xdg_config_home/', 'USERPROFILE': 'C:/Users/testing/', 'APPDATA': 'C:/Users/testing/AppData/Roaming/', 'HOMEDRIVE': 'C:/', 'HOMEPATH': 'Users/testing/', } @contextlib.contextmanager def set_environ(**kwargs): saved_environ = os.environ.copy() for name, value in {**ENVIRON_DEFAULTS, **kwargs}.items(): if value is None: os.environ.pop(name, None) else: os.environ[name] = value yield os.environ.clear() os.environ.update(saved_environ) def _generate_expected_groups(): xdg_config_home = os.getenv('XDG_CONFIG_HOME') or compat_expanduser('~/.config') appdata_dir = os.getenv('appdata') home_dir = compat_expanduser('~') return { 'Portable': [ Path(get_executable_path(), 'yt-dlp.conf'), ], 'Home': [ Path('yt-dlp.conf'), ], 'User': [ Path(xdg_config_home, 'yt-dlp.conf'), Path(xdg_config_home, 'yt-dlp', 'config'), Path(xdg_config_home, 'yt-dlp', 'config.txt'), *(( Path(appdata_dir, 'yt-dlp.conf'), Path(appdata_dir, 'yt-dlp', 'config'), Path(appdata_dir, 'yt-dlp', 'config.txt'), ) if appdata_dir else ()), Path(home_dir, 'yt-dlp.conf'), Path(home_dir, 'yt-dlp.conf.txt'), Path(home_dir, '.yt-dlp', 'config'), Path(home_dir, '.yt-dlp', 'config.txt'), ], 'System': [ Path('/etc/yt-dlp.conf'), Path('/etc/yt-dlp/config'), Path('/etc/yt-dlp/config.txt'), ], } class TestConfig(unittest.TestCase): maxDiff = None @set_environ() def test_config__ENVIRON_DEFAULTS_sanity(self): expected = make_expected() self.assertCountEqual( set(expected), expected, 'ENVIRON_DEFAULTS produces non unique names') def test_config_all_environ_values(self): for name, value in ENVIRON_DEFAULTS.items(): for new_value in (None, '', '.', value or '/some/dir'): with set_environ(**{name: new_value}): self._simple_grouping_test() def test_config_default_expected_locations(self): files, _ = self._simple_config_test() self.assertEqual( files, make_expected(), 'Not all expected locations have been checked') def test_config_default_grouping(self): self._simple_grouping_test() def _simple_grouping_test(self): expected_groups = make_expected_groups() for name, group in expected_groups.items(): for index, existing_path in enumerate(group): result, opts = self._simple_config_test(existing_path) expected = expected_from_expected_groups(expected_groups, existing_path) self.assertEqual( result, expected, f'The checked locations do not match the expected ({name}, {index})') self.assertEqual( opts.outtmpl['default'], '1', f'The used result value was incorrect ({name}, {index})') def _simple_config_test(self, *stop_paths): encountered = 0 paths = [] def read_file(filename, default=[]): nonlocal encountered path = Path(filename) paths.append(path) if path in stop_paths: encountered += 1 return ['-o', f'{encountered}'] with ConfigMock(read_file): _, opts, _ = parseOpts([], False) return paths, opts @set_environ() def test_config_early_exit_commandline(self): self._early_exit_test(0, '--ignore-config') @set_environ() def test_config_early_exit_files(self): for index, _ in enumerate(make_expected(), 1): self._early_exit_test(index) def _early_exit_test(self, allowed_reads, *args): reads = 0 def read_file(filename, default=[]): nonlocal reads reads += 1 if reads > allowed_reads: self.fail('The remaining config was not ignored') elif reads == allowed_reads: return ['--ignore-config'] with ConfigMock(read_file): parseOpts(args, False) @set_environ() def test_config_override_commandline(self): self._override_test(0, '-o', 'pass') @set_environ() def test_config_override_files(self): for index, _ in enumerate(make_expected(), 1): self._override_test(index) def _override_test(self, start_index, *args): index = 0 def read_file(filename, default=[]): nonlocal index index += 1 if index > start_index: return ['-o', 'fail'] elif index == start_index: return ['-o', 'pass'] with ConfigMock(read_file): _, opts, _ = parseOpts(args, False) self.assertEqual( opts.outtmpl['default'], 'pass', 'The earlier group did not override the later ones') @contextlib.contextmanager def ConfigMock(read_file=None): with unittest.mock.patch('yt_dlp.options.Config') as mock: mock.return_value = Config(create_parser()) if read_file is not None: mock.read_file = read_file yield mock def make_expected(*filepaths): return expected_from_expected_groups(_generate_expected_groups(), *filepaths) def make_expected_groups(*filepaths): return _filter_expected_groups(_generate_expected_groups(), filepaths) def expected_from_expected_groups(expected_groups, *filepaths): return list(itertools.chain.from_iterable( _filter_expected_groups(expected_groups, filepaths).values())) def _filter_expected_groups(expected, filepaths): if not filepaths: return expected result = {} for group, paths in expected.items(): new_paths = [] for path in paths: new_paths.append(path) if path in filepaths: break result[group] = new_paths return result if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_postprocessors.py
test/test_postprocessors.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import subprocess from yt_dlp import YoutubeDL from yt_dlp.utils import shell_quote from yt_dlp.postprocessor import ( ExecPP, FFmpegThumbnailsConvertorPP, MetadataFromFieldPP, MetadataParserPP, ModifyChaptersPP, SponsorBlockPP, ) class TestMetadataFromField(unittest.TestCase): def test_format_to_regex(self): self.assertEqual( MetadataParserPP.format_to_regex('%(title)s - %(artist)s'), r'(?P<title>.+)\ \-\ (?P<artist>.+)') self.assertEqual(MetadataParserPP.format_to_regex(r'(?P<x>.+)'), r'(?P<x>.+)') def test_field_to_template(self): self.assertEqual(MetadataParserPP.field_to_template('title'), '%(title)s') self.assertEqual(MetadataParserPP.field_to_template('1'), '1') self.assertEqual(MetadataParserPP.field_to_template('foo bar'), 'foo bar') self.assertEqual(MetadataParserPP.field_to_template(' literal'), ' literal') def test_metadatafromfield(self): self.assertEqual( MetadataFromFieldPP.to_action('%(title)s \\: %(artist)s:%(title)s : %(artist)s'), (MetadataParserPP.Actions.INTERPRET, '%(title)s : %(artist)s', '%(title)s : %(artist)s')) class TestConvertThumbnail(unittest.TestCase): def test_escaping(self): pp = FFmpegThumbnailsConvertorPP() if not pp.available: print('Skipping: ffmpeg not found') return test_data_dir = 'test/testdata/thumbnails' generated_file = f'{test_data_dir}/empty.webp' subprocess.check_call([ pp.executable, '-y', '-f', 'lavfi', '-i', 'color=c=black:s=320x320', '-c:v', 'libwebp', '-pix_fmt', 'yuv420p', '-vframes', '1', generated_file, ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) file = test_data_dir + '/foo %d bar/foo_%d.{}' initial_file = file.format('webp') os.replace(generated_file, initial_file) tests = (('webp', 'png'), ('png', 'jpg')) for inp, out in tests: out_file = file.format(out) if os.path.exists(out_file): os.remove(out_file) pp.convert_thumbnail(file.format(inp), out) self.assertTrue(os.path.exists(out_file)) for _, out in tests: os.remove(file.format(out)) os.remove(initial_file) class TestExec(unittest.TestCase): def test_parse_cmd(self): pp = ExecPP(YoutubeDL(), '') info = {'filepath': 'file name'} cmd = 'echo {}'.format(shell_quote(info['filepath'])) self.assertEqual(pp.parse_cmd('echo', info), cmd) self.assertEqual(pp.parse_cmd('echo {}', info), cmd) self.assertEqual(pp.parse_cmd('echo %(filepath)q', info), cmd) class TestModifyChaptersPP(unittest.TestCase): def setUp(self): self._pp = ModifyChaptersPP(YoutubeDL()) @staticmethod def _sponsor_chapter(start, end, cat, remove=False, title=None): if title is None: title = SponsorBlockPP.CATEGORIES[cat] return { 'start_time': start, 'end_time': end, '_categories': [(cat, start, end, title)], **({'remove': True} if remove else {}), } @staticmethod def _chapter(start, end, title=None, remove=False): c = {'start_time': start, 'end_time': end} if title is not None: c['title'] = title if remove: c['remove'] = True return c def _chapters(self, ends, titles): self.assertEqual(len(ends), len(titles)) start = 0 chapters = [] for e, t in zip(ends, titles, strict=True): chapters.append(self._chapter(start, e, t)) start = e return chapters def _remove_marked_arrange_sponsors_test_impl( self, chapters, expected_chapters, expected_removed): actual_chapters, actual_removed = ( self._pp._remove_marked_arrange_sponsors(chapters)) for c in actual_removed: c.pop('title', None) c.pop('_categories', None) actual_chapters = [{ 'start_time': c['start_time'], 'end_time': c['end_time'], 'title': c['title'], } for c in actual_chapters] self.assertSequenceEqual(expected_chapters, actual_chapters) self.assertSequenceEqual(expected_removed, actual_removed) def test_remove_marked_arrange_sponsors_CanGetThroughUnaltered(self): chapters = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']) self._remove_marked_arrange_sponsors_test_impl(chapters, chapters, []) def test_remove_marked_arrange_sponsors_ChapterWithSponsors(self): chapters = [ *self._chapters([70], ['c']), self._sponsor_chapter(10, 20, 'sponsor'), self._sponsor_chapter(30, 40, 'preview'), self._sponsor_chapter(50, 60, 'filler')] expected = self._chapters( [10, 20, 30, 40, 50, 60, 70], ['c', '[SponsorBlock]: Sponsor', 'c', '[SponsorBlock]: Preview/Recap', 'c', '[SponsorBlock]: Filler Tangent', 'c']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_SponsorBlockChapters(self): chapters = [ *self._chapters([70], ['c']), self._sponsor_chapter(10, 20, 'chapter', title='sb c1'), self._sponsor_chapter(15, 16, 'chapter', title='sb c2'), self._sponsor_chapter(30, 40, 'preview'), self._sponsor_chapter(50, 60, 'filler')] expected = self._chapters( [10, 15, 16, 20, 30, 40, 50, 60, 70], ['c', '[SponsorBlock]: sb c1', '[SponsorBlock]: sb c1, sb c2', '[SponsorBlock]: sb c1', 'c', '[SponsorBlock]: Preview/Recap', 'c', '[SponsorBlock]: Filler Tangent', 'c']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_UniqueNamesForOverlappingSponsors(self): chapters = [ *self._chapters([120], ['c']), self._sponsor_chapter(10, 45, 'sponsor'), self._sponsor_chapter(20, 40, 'selfpromo'), self._sponsor_chapter(50, 70, 'sponsor'), self._sponsor_chapter(60, 85, 'selfpromo'), self._sponsor_chapter(90, 120, 'selfpromo'), self._sponsor_chapter(100, 110, 'sponsor')] expected = self._chapters( [10, 20, 40, 45, 50, 60, 70, 85, 90, 100, 110, 120], ['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion', '[SponsorBlock]: Sponsor', 'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion', 'c', '[SponsorBlock]: Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion, Sponsor', '[SponsorBlock]: Unpaid/Self Promotion']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_ChapterWithCuts(self): cuts = [self._chapter(10, 20, remove=True), self._sponsor_chapter(30, 40, 'sponsor', remove=True), self._chapter(50, 60, remove=True)] chapters = self._chapters([70], ['c']) + cuts self._remove_marked_arrange_sponsors_test_impl( chapters, self._chapters([40], ['c']), cuts) def test_remove_marked_arrange_sponsors_ChapterWithSponsorsAndCuts(self): chapters = [ *self._chapters([70], ['c']), self._sponsor_chapter(10, 20, 'sponsor'), self._sponsor_chapter(30, 40, 'selfpromo', remove=True), self._sponsor_chapter(50, 60, 'interaction')] expected = self._chapters([10, 20, 40, 50, 60], ['c', '[SponsorBlock]: Sponsor', 'c', '[SponsorBlock]: Interaction Reminder', 'c']) self._remove_marked_arrange_sponsors_test_impl( chapters, expected, [self._chapter(30, 40, remove=True)]) def test_remove_marked_arrange_sponsors_ChapterWithSponsorCutInTheMiddle(self): cuts = [self._sponsor_chapter(20, 30, 'selfpromo', remove=True), self._chapter(40, 50, remove=True)] chapters = [ *self._chapters([70], ['c']), self._sponsor_chapter(10, 60, 'sponsor'), *cuts] expected = self._chapters( [10, 40, 50], ['c', '[SponsorBlock]: Sponsor', 'c']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) def test_remove_marked_arrange_sponsors_ChapterWithCutHidingSponsor(self): cuts = [self._sponsor_chapter(20, 50, 'selfpromo', remove=True)] chapters = [ *self._chapters([60], ['c']), self._sponsor_chapter(10, 20, 'intro'), self._sponsor_chapter(30, 40, 'sponsor'), self._sponsor_chapter(50, 60, 'outro'), *cuts] expected = self._chapters( [10, 20, 30], ['c', '[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) def test_remove_marked_arrange_sponsors_ChapterWithAdjacentSponsors(self): chapters = [ *self._chapters([70], ['c']), self._sponsor_chapter(10, 20, 'sponsor'), self._sponsor_chapter(20, 30, 'selfpromo'), self._sponsor_chapter(30, 40, 'interaction')] expected = self._chapters( [10, 20, 30, 40, 70], ['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion', '[SponsorBlock]: Interaction Reminder', 'c']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_ChapterWithAdjacentCuts(self): chapters = [ *self._chapters([70], ['c']), self._sponsor_chapter(10, 20, 'sponsor'), self._sponsor_chapter(20, 30, 'interaction', remove=True), self._chapter(30, 40, remove=True), self._sponsor_chapter(40, 50, 'selfpromo', remove=True), self._sponsor_chapter(50, 60, 'interaction')] expected = self._chapters([10, 20, 30, 40], ['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Interaction Reminder', 'c']) self._remove_marked_arrange_sponsors_test_impl( chapters, expected, [self._chapter(20, 50, remove=True)]) def test_remove_marked_arrange_sponsors_ChapterWithOverlappingSponsors(self): chapters = [ *self._chapters([70], ['c']), self._sponsor_chapter(10, 30, 'sponsor'), self._sponsor_chapter(20, 50, 'selfpromo'), self._sponsor_chapter(40, 60, 'interaction')] expected = self._chapters( [10, 20, 30, 40, 50, 60, 70], ['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion, Interaction Reminder', '[SponsorBlock]: Interaction Reminder', 'c']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_ChapterWithOverlappingCuts(self): chapters = [ *self._chapters([70], ['c']), self._sponsor_chapter(10, 30, 'sponsor', remove=True), self._sponsor_chapter(20, 50, 'selfpromo', remove=True), self._sponsor_chapter(40, 60, 'interaction', remove=True)] self._remove_marked_arrange_sponsors_test_impl( chapters, self._chapters([20], ['c']), [self._chapter(10, 60, remove=True)]) def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsors(self): chapters = [ *self._chapters([170], ['c']), self._sponsor_chapter(0, 30, 'intro'), self._sponsor_chapter(20, 50, 'sponsor'), self._sponsor_chapter(40, 60, 'selfpromo'), self._sponsor_chapter(70, 90, 'sponsor'), self._sponsor_chapter(80, 100, 'sponsor'), self._sponsor_chapter(90, 110, 'sponsor'), self._sponsor_chapter(120, 140, 'selfpromo'), self._sponsor_chapter(130, 160, 'interaction'), self._sponsor_chapter(150, 170, 'outro')] expected = self._chapters( [20, 30, 40, 50, 60, 70, 110, 120, 130, 140, 150, 160, 170], ['[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Intermission/Intro Animation, Sponsor', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion', 'c', '[SponsorBlock]: Sponsor', 'c', '[SponsorBlock]: Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion, Interaction Reminder', '[SponsorBlock]: Interaction Reminder', '[SponsorBlock]: Interaction Reminder, Endcards/Credits', '[SponsorBlock]: Endcards/Credits']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingCuts(self): chapters = [ *self._chapters([170], ['c']), self._chapter(0, 30, remove=True), self._sponsor_chapter(20, 50, 'sponsor', remove=True), self._chapter(40, 60, remove=True), self._sponsor_chapter(70, 90, 'sponsor', remove=True), self._chapter(80, 100, remove=True), self._chapter(90, 110, remove=True), self._sponsor_chapter(120, 140, 'sponsor', remove=True), self._sponsor_chapter(130, 160, 'selfpromo', remove=True), self._chapter(150, 170, remove=True)] expected_cuts = [self._chapter(0, 60, remove=True), self._chapter(70, 110, remove=True), self._chapter(120, 170, remove=True)] self._remove_marked_arrange_sponsors_test_impl( chapters, self._chapters([20], ['c']), expected_cuts) def test_remove_marked_arrange_sponsors_OverlappingSponsorsDifferentTitlesAfterCut(self): chapters = [ *self._chapters([60], ['c']), self._sponsor_chapter(10, 60, 'sponsor'), self._sponsor_chapter(10, 40, 'intro'), self._sponsor_chapter(30, 50, 'interaction'), self._sponsor_chapter(30, 50, 'selfpromo', remove=True), self._sponsor_chapter(40, 50, 'interaction'), self._sponsor_chapter(50, 60, 'outro')] expected = self._chapters( [10, 30, 40], ['c', '[SponsorBlock]: Sponsor, Intermission/Intro Animation', '[SponsorBlock]: Sponsor, Endcards/Credits']) self._remove_marked_arrange_sponsors_test_impl( chapters, expected, [self._chapter(30, 50, remove=True)]) def test_remove_marked_arrange_sponsors_SponsorsNoLongerOverlapAfterCut(self): chapters = [ *self._chapters([70], ['c']), self._sponsor_chapter(10, 30, 'sponsor'), self._sponsor_chapter(20, 50, 'interaction'), self._sponsor_chapter(30, 50, 'selfpromo', remove=True), self._sponsor_chapter(40, 60, 'sponsor'), self._sponsor_chapter(50, 60, 'interaction')] expected = self._chapters( [10, 20, 40, 50], ['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Interaction Reminder', 'c']) self._remove_marked_arrange_sponsors_test_impl( chapters, expected, [self._chapter(30, 50, remove=True)]) def test_remove_marked_arrange_sponsors_SponsorsStillOverlapAfterCut(self): chapters = [ *self._chapters([70], ['c']), self._sponsor_chapter(10, 60, 'sponsor'), self._sponsor_chapter(20, 60, 'interaction'), self._sponsor_chapter(30, 50, 'selfpromo', remove=True)] expected = self._chapters( [10, 20, 40, 50], ['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Interaction Reminder', 'c']) self._remove_marked_arrange_sponsors_test_impl( chapters, expected, [self._chapter(30, 50, remove=True)]) def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsorsAndCuts(self): chapters = [ *self._chapters([200], ['c']), self._sponsor_chapter(10, 40, 'sponsor'), self._sponsor_chapter(10, 30, 'intro'), self._chapter(20, 30, remove=True), self._sponsor_chapter(30, 40, 'selfpromo'), self._sponsor_chapter(50, 70, 'sponsor'), self._sponsor_chapter(60, 80, 'interaction'), self._chapter(70, 80, remove=True), self._sponsor_chapter(70, 90, 'sponsor'), self._sponsor_chapter(80, 100, 'interaction'), self._sponsor_chapter(120, 170, 'selfpromo'), self._sponsor_chapter(130, 180, 'outro'), self._chapter(140, 150, remove=True), self._chapter(150, 160, remove=True)] expected = self._chapters( [10, 20, 30, 40, 50, 70, 80, 100, 110, 130, 140, 160], ['c', '[SponsorBlock]: Sponsor, Intermission/Intro Animation', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion', 'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Interaction Reminder', '[SponsorBlock]: Interaction Reminder', 'c', '[SponsorBlock]: Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion, Endcards/Credits', '[SponsorBlock]: Endcards/Credits', 'c']) expected_cuts = [self._chapter(20, 30, remove=True), self._chapter(70, 80, remove=True), self._chapter(140, 160, remove=True)] self._remove_marked_arrange_sponsors_test_impl(chapters, expected, expected_cuts) def test_remove_marked_arrange_sponsors_SponsorOverlapsMultipleChapters(self): chapters = [ *self._chapters([20, 40, 60, 80, 100], ['c1', 'c2', 'c3', 'c4', 'c5']), self._sponsor_chapter(10, 90, 'sponsor')] expected = self._chapters([10, 90, 100], ['c1', '[SponsorBlock]: Sponsor', 'c5']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_CutOverlapsMultipleChapters(self): cuts = [self._chapter(10, 90, remove=True)] chapters = self._chapters([20, 40, 60, 80, 100], ['c1', 'c2', 'c3', 'c4', 'c5']) + cuts expected = self._chapters([10, 20], ['c1', 'c5']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) def test_remove_marked_arrange_sponsors_SponsorsWithinSomeChaptersAndOverlappingOthers(self): chapters = [ *self._chapters([10, 40, 60, 80], ['c1', 'c2', 'c3', 'c4']), self._sponsor_chapter(20, 30, 'sponsor'), self._sponsor_chapter(50, 70, 'selfpromo')] expected = self._chapters([10, 20, 30, 40, 50, 70, 80], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c2', 'c3', '[SponsorBlock]: Unpaid/Self Promotion', 'c4']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_CutsWithinSomeChaptersAndOverlappingOthers(self): cuts = [self._chapter(20, 30, remove=True), self._chapter(50, 70, remove=True)] chapters = self._chapters([10, 40, 60, 80], ['c1', 'c2', 'c3', 'c4']) + cuts expected = self._chapters([10, 30, 40, 50], ['c1', 'c2', 'c3', 'c4']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) def test_remove_marked_arrange_sponsors_ChaptersAfterLastSponsor(self): chapters = [ *self._chapters([20, 40, 50, 60], ['c1', 'c2', 'c3', 'c4']), self._sponsor_chapter(10, 30, 'music_offtopic')] expected = self._chapters( [10, 30, 40, 50, 60], ['c1', '[SponsorBlock]: Non-Music Section', 'c2', 'c3', 'c4']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_ChaptersAfterLastCut(self): cuts = [self._chapter(10, 30, remove=True)] chapters = self._chapters([20, 40, 50, 60], ['c1', 'c2', 'c3', 'c4']) + cuts expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) def test_remove_marked_arrange_sponsors_SponsorStartsAtChapterStart(self): chapters = [ *self._chapters([10, 20, 40], ['c1', 'c2', 'c3']), self._sponsor_chapter(20, 30, 'sponsor')] expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_CutStartsAtChapterStart(self): cuts = [self._chapter(20, 30, remove=True)] chapters = self._chapters([10, 20, 40], ['c1', 'c2', 'c3']) + cuts expected = self._chapters([10, 20, 30], ['c1', 'c2', 'c3']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) def test_remove_marked_arrange_sponsors_SponsorEndsAtChapterEnd(self): chapters = [ *self._chapters([10, 30, 40], ['c1', 'c2', 'c3']), self._sponsor_chapter(20, 30, 'sponsor')] expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_CutEndsAtChapterEnd(self): cuts = [self._chapter(20, 30, remove=True)] chapters = self._chapters([10, 30, 40], ['c1', 'c2', 'c3']) + cuts expected = self._chapters([10, 20, 30], ['c1', 'c2', 'c3']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) def test_remove_marked_arrange_sponsors_SponsorCoincidesWithChapters(self): chapters = [ *self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']), self._sponsor_chapter(10, 30, 'sponsor')] expected = self._chapters([10, 30, 40], ['c1', '[SponsorBlock]: Sponsor', 'c4']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_CutCoincidesWithChapters(self): cuts = [self._chapter(10, 30, remove=True)] chapters = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']) + cuts expected = self._chapters([10, 20], ['c1', 'c4']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) def test_remove_marked_arrange_sponsors_SponsorsAtVideoBoundaries(self): chapters = [ *self._chapters([20, 40, 60], ['c1', 'c2', 'c3']), self._sponsor_chapter(0, 10, 'intro'), self._sponsor_chapter(50, 60, 'outro')] expected = self._chapters( [10, 20, 40, 50, 60], ['[SponsorBlock]: Intermission/Intro Animation', 'c1', 'c2', 'c3', '[SponsorBlock]: Endcards/Credits']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_CutsAtVideoBoundaries(self): cuts = [self._chapter(0, 10, remove=True), self._chapter(50, 60, remove=True)] chapters = self._chapters([20, 40, 60], ['c1', 'c2', 'c3']) + cuts expected = self._chapters([10, 30, 40], ['c1', 'c2', 'c3']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) def test_remove_marked_arrange_sponsors_SponsorsOverlapChaptersAtVideoBoundaries(self): chapters = [ *self._chapters([10, 40, 50], ['c1', 'c2', 'c3']), self._sponsor_chapter(0, 20, 'intro'), self._sponsor_chapter(30, 50, 'outro')] expected = self._chapters( [20, 30, 50], ['[SponsorBlock]: Intermission/Intro Animation', 'c2', '[SponsorBlock]: Endcards/Credits']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_CutsOverlapChaptersAtVideoBoundaries(self): cuts = [self._chapter(0, 20, remove=True), self._chapter(30, 50, remove=True)] chapters = self._chapters([10, 40, 50], ['c1', 'c2', 'c3']) + cuts expected = self._chapters([10], ['c2']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) def test_remove_marked_arrange_sponsors_EverythingSponsored(self): chapters = [ *self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']), self._sponsor_chapter(0, 20, 'intro'), self._sponsor_chapter(20, 40, 'outro')] expected = self._chapters([20, 40], ['[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits']) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) def test_remove_marked_arrange_sponsors_EverythingCut(self): cuts = [self._chapter(0, 20, remove=True), self._chapter(20, 40, remove=True)] chapters = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']) + cuts self._remove_marked_arrange_sponsors_test_impl( chapters, [], [self._chapter(0, 40, remove=True)]) def test_remove_marked_arrange_sponsors_TinyChaptersInTheOriginalArePreserved(self): chapters = self._chapters([0.1, 0.2, 0.3, 0.4], ['c1', 'c2', 'c3', 'c4']) self._remove_marked_arrange_sponsors_test_impl(chapters, chapters, []) def test_remove_marked_arrange_sponsors_TinySponsorsAreIgnored(self): chapters = [self._sponsor_chapter(0, 0.1, 'intro'), self._chapter(0.1, 0.2, 'c1'), self._sponsor_chapter(0.2, 0.3, 'sponsor'), self._chapter(0.3, 0.4, 'c2'), self._sponsor_chapter(0.4, 0.5, 'outro')] self._remove_marked_arrange_sponsors_test_impl( chapters, self._chapters([0.3, 0.5], ['c1', 'c2']), []) def test_remove_marked_arrange_sponsors_TinyChaptersResultingFromCutsAreIgnored(self): cuts = [self._chapter(1.5, 2.5, remove=True)] chapters = self._chapters([2, 3, 3.5], ['c1', 'c2', 'c3']) + cuts self._remove_marked_arrange_sponsors_test_impl( chapters, self._chapters([2, 2.5], ['c1', 'c3']), cuts) def test_remove_marked_arrange_sponsors_SingleTinyChapterIsPreserved(self): cuts = [self._chapter(0.5, 2, remove=True)] chapters = self._chapters([2], ['c']) + cuts self._remove_marked_arrange_sponsors_test_impl( chapters, self._chapters([0.5], ['c']), cuts) def test_remove_marked_arrange_sponsors_TinyChapterAtTheStartPrependedToTheNext(self): cuts = [self._chapter(0.5, 2, remove=True)] chapters = self._chapters([2, 4], ['c1', 'c2']) + cuts self._remove_marked_arrange_sponsors_test_impl( chapters, self._chapters([2.5], ['c2']), cuts) def test_remove_marked_arrange_sponsors_TinyChaptersResultingFromSponsorOverlapAreIgnored(self): chapters = [ *self._chapters([1, 3, 4], ['c1', 'c2', 'c3']), self._sponsor_chapter(1.5, 2.5, 'sponsor')] self._remove_marked_arrange_sponsors_test_impl( chapters, self._chapters([1.5, 2.5, 4], ['c1', '[SponsorBlock]: Sponsor', 'c3']), []) def test_remove_marked_arrange_sponsors_TinySponsorsOverlapsAreIgnored(self): chapters = [ *self._chapters([2, 3, 5], ['c1', 'c2', 'c3']), self._sponsor_chapter(1, 3, 'sponsor'), self._sponsor_chapter(2.5, 4, 'selfpromo')] self._remove_marked_arrange_sponsors_test_impl( chapters, self._chapters([1, 3, 4, 5], [ 'c1', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion', 'c3']), []) def test_remove_marked_arrange_sponsors_TinySponsorsPrependedToTheNextSponsor(self): chapters = [ *self._chapters([4], ['c']), self._sponsor_chapter(1.5, 2, 'sponsor'), self._sponsor_chapter(2, 4, 'selfpromo')] self._remove_marked_arrange_sponsors_test_impl( chapters, self._chapters([1.5, 4], ['c', '[SponsorBlock]: Unpaid/Self Promotion']), []) def test_remove_marked_arrange_sponsors_SmallestSponsorInTheOverlapGetsNamed(self): self._pp._sponsorblock_chapter_title = '[SponsorBlock]: %(name)s' chapters = [ *self._chapters([10], ['c']), self._sponsor_chapter(2, 8, 'sponsor'), self._sponsor_chapter(4, 6, 'selfpromo')] self._remove_marked_arrange_sponsors_test_impl( chapters, self._chapters([2, 4, 6, 8, 10], [ 'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion', '[SponsorBlock]: Sponsor', 'c', ]), []) def test_make_concat_opts_CommonCase(self): sponsor_chapters = [self._chapter(1, 2, 's1'), self._chapter(10, 20, 's2')] expected = '''ffconcat version 1.0 file 'file:test' outpoint 1.000000 file 'file:test' inpoint 2.000000 outpoint 10.000000 file 'file:test' inpoint 20.000000 ''' opts = self._pp._make_concat_opts(sponsor_chapters, 30) self.assertEqual(expected, ''.join(self._pp._concat_spec(['test'] * len(opts), opts))) def test_make_concat_opts_NoZeroDurationChunkAtVideoStart(self): sponsor_chapters = [self._chapter(0, 1, 's1'), self._chapter(10, 20, 's2')] expected = '''ffconcat version 1.0 file 'file:test' inpoint 1.000000 outpoint 10.000000 file 'file:test' inpoint 20.000000 ''' opts = self._pp._make_concat_opts(sponsor_chapters, 30) self.assertEqual(expected, ''.join(self._pp._concat_spec(['test'] * len(opts), opts))) def test_make_concat_opts_NoZeroDurationChunkAtVideoEnd(self): sponsor_chapters = [self._chapter(1, 2, 's1'), self._chapter(10, 20, 's2')] expected = '''ffconcat version 1.0 file 'file:test' outpoint 1.000000 file 'file:test' inpoint 2.000000 outpoint 10.000000 ''' opts = self._pp._make_concat_opts(sponsor_chapters, 20) self.assertEqual(expected, ''.join(self._pp._concat_spec(['test'] * len(opts), opts))) def test_quote_for_concat_RunsOfQuotes(self): self.assertEqual( r"'special '\'' '\'\''characters'\'\'\''galore'", self._pp._quote_for_ffmpeg("special ' ''characters'''galore")) def test_quote_for_concat_QuotesAtStart(self): self.assertEqual( r"\'\'\''special '\'' characters '\'' galore'", self._pp._quote_for_ffmpeg("'''special ' characters ' galore")) def test_quote_for_concat_QuotesAtEnd(self): self.assertEqual( r"'special '\'' characters '\'' galore'\'\'\'", self._pp._quote_for_ffmpeg("special ' characters ' galore'''")) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_YoutubeDL.py
test/test_YoutubeDL.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest from unittest.mock import patch from yt_dlp.globals import all_plugins_loaded sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import contextlib import copy import json from test.helper import FakeYDL, assertRegexpMatches, try_rm from yt_dlp import YoutubeDL from yt_dlp.extractor.common import InfoExtractor from yt_dlp.postprocessor.common import PostProcessor from yt_dlp.utils import ( ExtractorError, LazyList, OnDemandPagedList, int_or_none, match_filter_func, ) from yt_dlp.utils.traversal import traverse_obj TEST_URL = 'http://localhost/sample.mp4' class YDL(FakeYDL): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.downloaded_info_dicts = [] self.msgs = [] def process_info(self, info_dict): self.downloaded_info_dicts.append(info_dict.copy()) def to_screen(self, msg, *args, **kwargs): self.msgs.append(msg) def dl(self, *args, **kwargs): assert False, 'Downloader must not be invoked for test_YoutubeDL' def _make_result(formats, **kwargs): res = { 'formats': formats, 'id': 'testid', 'title': 'testttitle', 'extractor': 'testex', 'extractor_key': 'TestEx', 'webpage_url': 'http://example.com/watch?v=shenanigans', } res.update(**kwargs) return res class TestFormatSelection(unittest.TestCase): def test_prefer_free_formats(self): # Same resolution => download webm ydl = YDL() ydl.params['prefer_free_formats'] = True formats = [ {'ext': 'webm', 'height': 460, 'url': TEST_URL}, {'ext': 'mp4', 'height': 460, 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl.sort_formats(info_dict) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['ext'], 'webm') # Different resolution => download best quality (mp4) ydl = YDL() ydl.params['prefer_free_formats'] = True formats = [ {'ext': 'webm', 'height': 720, 'url': TEST_URL}, {'ext': 'mp4', 'height': 1080, 'url': TEST_URL}, ] info_dict['formats'] = formats ydl.sort_formats(info_dict) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['ext'], 'mp4') # No prefer_free_formats => prefer mp4 and webm ydl = YDL() ydl.params['prefer_free_formats'] = False formats = [ {'ext': 'webm', 'height': 720, 'url': TEST_URL}, {'ext': 'mp4', 'height': 720, 'url': TEST_URL}, {'ext': 'flv', 'height': 720, 'url': TEST_URL}, ] info_dict['formats'] = formats ydl.sort_formats(info_dict) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['ext'], 'mp4') ydl = YDL() ydl.params['prefer_free_formats'] = False formats = [ {'ext': 'flv', 'height': 720, 'url': TEST_URL}, {'ext': 'webm', 'height': 720, 'url': TEST_URL}, ] info_dict['formats'] = formats ydl.sort_formats(info_dict) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['ext'], 'webm') def test_format_selection(self): formats = [ {'format_id': '35', 'ext': 'mp4', 'preference': 0, 'url': TEST_URL}, {'format_id': 'example-with-dashes', 'ext': 'webm', 'preference': 1, 'url': TEST_URL}, {'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': TEST_URL}, {'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': TEST_URL}, {'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': TEST_URL}, ] info_dict = _make_result(formats) def test(inp, *expected, multi=False): ydl = YDL({ 'format': inp, 'allow_multiple_video_streams': multi, 'allow_multiple_audio_streams': multi, }) ydl.process_ie_result(info_dict.copy()) downloaded = [x['format_id'] for x in ydl.downloaded_info_dicts] self.assertEqual(downloaded, list(expected)) test('20/47', '47') test('20/71/worst', '35') test(None, '2') test('webm/mp4', '47') test('3gp/40/mp4', '35') test('example-with-dashes', 'example-with-dashes') test('all', '2', '47', '45', 'example-with-dashes', '35') test('mergeall', '2+47+45+example-with-dashes+35', multi=True) # See: https://github.com/yt-dlp/yt-dlp/pulls/8797 test('7_a/worst', '35') def test_format_selection_audio(self): formats = [ {'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL}, {'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none', 'url': TEST_URL}, {'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none', 'url': TEST_URL}, {'format_id': 'vid', 'ext': 'mp4', 'preference': 4, 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'bestaudio'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'audio-high') ydl = YDL({'format': 'worstaudio'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'audio-low') formats = [ {'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL}, {'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2, 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'bestaudio/worstaudio/best'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'vid-high') def test_format_selection_audio_exts(self): formats = [ {'format_id': 'mp3-64', 'ext': 'mp3', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'}, {'format_id': 'ogg-64', 'ext': 'ogg', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'}, {'format_id': 'aac-64', 'ext': 'aac', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'}, {'format_id': 'mp3-32', 'ext': 'mp3', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'}, {'format_id': 'aac-32', 'ext': 'aac', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'best', 'format_sort': ['abr', 'ext']}) ydl.sort_formats(info_dict) ydl.process_ie_result(copy.deepcopy(info_dict)) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'aac-64') ydl = YDL({'format': 'mp3'}) ydl.sort_formats(info_dict) ydl.process_ie_result(copy.deepcopy(info_dict)) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'mp3-64') ydl = YDL({'prefer_free_formats': True, 'format_sort': ['abr', 'ext']}) ydl.sort_formats(info_dict) ydl.process_ie_result(copy.deepcopy(info_dict)) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'ogg-64') def test_format_selection_video(self): formats = [ {'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': TEST_URL}, {'format_id': 'dash-video-high', 'ext': 'mp4', 'preference': 2, 'acodec': 'none', 'url': TEST_URL}, {'format_id': 'vid', 'ext': 'mp4', 'preference': 3, 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'bestvideo'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'dash-video-high') ydl = YDL({'format': 'worstvideo'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'dash-video-low') ydl = YDL({'format': 'bestvideo[format_id^=dash][format_id$=low]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'dash-video-low') formats = [ {'format_id': 'vid-vcodec-dot', 'ext': 'mp4', 'preference': 1, 'vcodec': 'avc1.123456', 'acodec': 'none', 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'bestvideo[vcodec=avc1.123456]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'vid-vcodec-dot') def test_format_selection_by_vcodec_sort(self): formats = [ {'format_id': 'av1-format', 'ext': 'mp4', 'vcodec': 'av1', 'acodec': 'none', 'url': TEST_URL}, {'format_id': 'vp9-hdr-format', 'ext': 'mp4', 'vcodec': 'vp09.02.50.10.01.09.18.09.00', 'acodec': 'none', 'url': TEST_URL}, {'format_id': 'vp9-sdr-format', 'ext': 'mp4', 'vcodec': 'vp09.00.50.08', 'acodec': 'none', 'url': TEST_URL}, {'format_id': 'h265-format', 'ext': 'mp4', 'vcodec': 'h265', 'acodec': 'none', 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'bestvideo', 'format_sort': ['vcodec:vp9.2']}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'vp9-hdr-format') ydl = YDL({'format': 'bestvideo', 'format_sort': ['vcodec:vp9']}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'vp9-sdr-format') ydl = YDL({'format': 'bestvideo', 'format_sort': ['+vcodec:vp9.2']}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'vp9-hdr-format') ydl = YDL({'format': 'bestvideo', 'format_sort': ['+vcodec:vp9']}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'vp9-sdr-format') def test_format_selection_string_ops(self): formats = [ {'format_id': 'abc-cba', 'ext': 'mp4', 'url': TEST_URL}, {'format_id': 'zxc-cxz', 'ext': 'webm', 'url': TEST_URL}, ] info_dict = _make_result(formats) # equals (=) ydl = YDL({'format': '[format_id=abc-cba]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'abc-cba') # does not equal (!=) ydl = YDL({'format': '[format_id!=abc-cba]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'zxc-cxz') ydl = YDL({'format': '[format_id!=abc-cba][format_id!=zxc-cxz]'}) self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy()) # starts with (^=) ydl = YDL({'format': '[format_id^=abc]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'abc-cba') # does not start with (!^=) ydl = YDL({'format': '[format_id!^=abc]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'zxc-cxz') ydl = YDL({'format': '[format_id!^=abc][format_id!^=zxc]'}) self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy()) # ends with ($=) ydl = YDL({'format': '[format_id$=cba]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'abc-cba') # does not end with (!$=) ydl = YDL({'format': '[format_id!$=cba]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'zxc-cxz') ydl = YDL({'format': '[format_id!$=cba][format_id!$=cxz]'}) self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy()) # contains (*=) ydl = YDL({'format': '[format_id*=bc-cb]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'abc-cba') # does not contain (!*=) ydl = YDL({'format': '[format_id!*=bc-cb]'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'zxc-cxz') ydl = YDL({'format': '[format_id!*=abc][format_id!*=zxc]'}) self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy()) ydl = YDL({'format': '[format_id!*=-]'}) self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy()) def test_audio_only_extractor_format_selection(self): # For extractors with incomplete formats (all formats are audio-only or # video-only) best and worst should fallback to corresponding best/worst # video-only or audio-only formats (as per # https://github.com/ytdl-org/youtube-dl/pull/5556) formats = [ {'format_id': 'low', 'ext': 'mp3', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL}, {'format_id': 'high', 'ext': 'mp3', 'preference': 2, 'vcodec': 'none', 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'best'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'high') ydl = YDL({'format': 'worst'}) ydl.process_ie_result(info_dict.copy()) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'low') def test_format_not_available(self): formats = [ {'format_id': 'regular', 'ext': 'mp4', 'height': 360, 'url': TEST_URL}, {'format_id': 'video', 'ext': 'mp4', 'height': 720, 'acodec': 'none', 'url': TEST_URL}, ] info_dict = _make_result(formats) # This must fail since complete video-audio format does not match filter # and extractor does not provide incomplete only formats (i.e. only # video-only or audio-only). ydl = YDL({'format': 'best[height>360]'}) self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy()) def test_format_selection_issue_10083(self): # See https://github.com/ytdl-org/youtube-dl/issues/10083 formats = [ {'format_id': 'regular', 'height': 360, 'url': TEST_URL}, {'format_id': 'video', 'height': 720, 'acodec': 'none', 'url': TEST_URL}, {'format_id': 'audio', 'vcodec': 'none', 'url': TEST_URL}, ] info_dict = _make_result(formats) ydl = YDL({'format': 'best[height>360]/bestvideo[height>360]+bestaudio'}) ydl.process_ie_result(info_dict.copy()) self.assertEqual(ydl.downloaded_info_dicts[0]['format_id'], 'video+audio') def test_invalid_format_specs(self): def assert_syntax_error(format_spec): self.assertRaises(SyntaxError, YDL, {'format': format_spec}) assert_syntax_error('bestvideo,,best') assert_syntax_error('+bestaudio') assert_syntax_error('bestvideo+') assert_syntax_error('/') assert_syntax_error('[720<height]') def test_format_filtering(self): formats = [ {'format_id': 'A', 'filesize': 500, 'width': 1000, 'aspect_ratio': 1.0}, {'format_id': 'B', 'filesize': 1000, 'width': 500, 'aspect_ratio': 1.33}, {'format_id': 'C', 'filesize': 1000, 'width': 400, 'aspect_ratio': 1.5}, {'format_id': 'D', 'filesize': 2000, 'width': 600, 'aspect_ratio': 1.78}, {'format_id': 'E', 'filesize': 3000, 'aspect_ratio': 0.56}, {'format_id': 'F'}, {'format_id': 'G', 'filesize': 1000000}, ] for f in formats: f['url'] = 'http://_/' f['ext'] = 'unknown' info_dict = _make_result(formats, _format_sort_fields=('id', )) ydl = YDL({'format': 'best[filesize<3000]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'D') ydl = YDL({'format': 'best[filesize<=3000]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'E') ydl = YDL({'format': 'best[filesize <= ? 3000]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'F') ydl = YDL({'format': 'best [filesize = 1000] [width>450]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'B') ydl = YDL({'format': 'best [filesize = 1000] [width!=450]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'C') ydl = YDL({'format': '[filesize>?1]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'G') ydl = YDL({'format': '[filesize<1M]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'E') ydl = YDL({'format': '[filesize<1MiB]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'G') ydl = YDL({'format': 'all[width>=400][width<=600]'}) ydl.process_ie_result(info_dict) downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] self.assertEqual(downloaded_ids, ['D', 'C', 'B']) ydl = YDL({'format': 'best[height<40]'}) with contextlib.suppress(ExtractorError): ydl.process_ie_result(info_dict) self.assertEqual(ydl.downloaded_info_dicts, []) ydl = YDL({'format': 'best[aspect_ratio=1]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'A') ydl = YDL({'format': 'all[aspect_ratio > 1.00]'}) ydl.process_ie_result(info_dict) downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] self.assertEqual(downloaded_ids, ['D', 'C', 'B']) ydl = YDL({'format': 'all[aspect_ratio < 1.00]'}) ydl.process_ie_result(info_dict) downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] self.assertEqual(downloaded_ids, ['E']) ydl = YDL({'format': 'best[aspect_ratio=1.5]'}) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'C') ydl = YDL({'format': 'all[aspect_ratio!=1]'}) ydl.process_ie_result(info_dict) downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] self.assertEqual(downloaded_ids, ['E', 'D', 'C', 'B']) @patch('yt_dlp.postprocessor.ffmpeg.FFmpegMergerPP.available', False) def test_default_format_spec_without_ffmpeg(self): ydl = YDL({}) self.assertEqual(ydl._default_format_spec({}), 'best/bestvideo+bestaudio') ydl = YDL({'simulate': True}) self.assertEqual(ydl._default_format_spec({}), 'best/bestvideo+bestaudio') ydl = YDL({}) self.assertEqual(ydl._default_format_spec({'is_live': True}), 'best/bestvideo+bestaudio') ydl = YDL({'simulate': True}) self.assertEqual(ydl._default_format_spec({'is_live': True}), 'best/bestvideo+bestaudio') ydl = YDL({'outtmpl': '-'}) self.assertEqual(ydl._default_format_spec({}), 'best/bestvideo+bestaudio') ydl = YDL({}) self.assertEqual(ydl._default_format_spec({}), 'best/bestvideo+bestaudio') self.assertEqual(ydl._default_format_spec({'is_live': True}), 'best/bestvideo+bestaudio') @patch('yt_dlp.postprocessor.ffmpeg.FFmpegMergerPP.available', True) @patch('yt_dlp.postprocessor.ffmpeg.FFmpegMergerPP.can_merge', lambda _: True) def test_default_format_spec_with_ffmpeg(self): ydl = YDL({}) self.assertEqual(ydl._default_format_spec({}), 'bestvideo*+bestaudio/best') ydl = YDL({'simulate': True}) self.assertEqual(ydl._default_format_spec({}), 'bestvideo*+bestaudio/best') ydl = YDL({}) self.assertEqual(ydl._default_format_spec({'is_live': True}), 'best/bestvideo+bestaudio') ydl = YDL({'simulate': True}) self.assertEqual(ydl._default_format_spec({'is_live': True}), 'best/bestvideo+bestaudio') ydl = YDL({'outtmpl': '-'}) self.assertEqual(ydl._default_format_spec({}), 'best/bestvideo+bestaudio') ydl = YDL({}) self.assertEqual(ydl._default_format_spec({}), 'bestvideo*+bestaudio/best') self.assertEqual(ydl._default_format_spec({'is_live': True}), 'best/bestvideo+bestaudio') class TestYoutubeDL(unittest.TestCase): def test_subtitles(self): def s_formats(lang, autocaption=False): return [{ 'ext': ext, 'url': f'http://localhost/video.{lang}.{ext}', '_auto': autocaption, } for ext in ['vtt', 'srt', 'ass']] subtitles = {l: s_formats(l) for l in ['en', 'fr', 'es']} auto_captions = {l: s_formats(l, True) for l in ['it', 'pt', 'es']} info_dict = { 'id': 'test', 'title': 'Test', 'url': 'http://localhost/video.mp4', 'subtitles': subtitles, 'automatic_captions': auto_captions, 'extractor': 'TEST', 'webpage_url': 'http://example.com/watch?v=shenanigans', } def get_info(params={}): params.setdefault('simulate', True) ydl = YDL(params) ydl.report_warning = lambda *args, **kargs: None return ydl.process_video_result(info_dict, download=False) result = get_info() self.assertFalse(result.get('requested_subtitles')) self.assertEqual(result['subtitles'], subtitles) self.assertEqual(result['automatic_captions'], auto_captions) result = get_info({'writesubtitles': True}) subs = result['requested_subtitles'] self.assertTrue(subs) self.assertEqual(set(subs.keys()), {'en'}) self.assertTrue(subs['en'].get('data') is None) self.assertEqual(subs['en']['ext'], 'ass') result = get_info({'writesubtitles': True, 'subtitlesformat': 'foo/srt'}) subs = result['requested_subtitles'] self.assertEqual(subs['en']['ext'], 'srt') result = get_info({'writesubtitles': True, 'subtitleslangs': ['es', 'fr', 'it']}) subs = result['requested_subtitles'] self.assertTrue(subs) self.assertEqual(set(subs.keys()), {'es', 'fr'}) result = get_info({'writesubtitles': True, 'subtitleslangs': ['all', '-en']}) subs = result['requested_subtitles'] self.assertTrue(subs) self.assertEqual(set(subs.keys()), {'es', 'fr'}) result = get_info({'writesubtitles': True, 'subtitleslangs': ['en', 'fr', '-en']}) subs = result['requested_subtitles'] self.assertTrue(subs) self.assertEqual(set(subs.keys()), {'fr'}) result = get_info({'writesubtitles': True, 'subtitleslangs': ['-en', 'en']}) subs = result['requested_subtitles'] self.assertTrue(subs) self.assertEqual(set(subs.keys()), {'en'}) result = get_info({'writesubtitles': True, 'subtitleslangs': ['e.+']}) subs = result['requested_subtitles'] self.assertTrue(subs) self.assertEqual(set(subs.keys()), {'es', 'en'}) result = get_info({'writesubtitles': True, 'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']}) subs = result['requested_subtitles'] self.assertTrue(subs) self.assertEqual(set(subs.keys()), {'es', 'pt'}) self.assertFalse(subs['es']['_auto']) self.assertTrue(subs['pt']['_auto']) result = get_info({'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']}) subs = result['requested_subtitles'] self.assertTrue(subs) self.assertEqual(set(subs.keys()), {'es', 'pt'}) self.assertTrue(subs['es']['_auto']) self.assertTrue(subs['pt']['_auto']) def test_add_extra_info(self): test_dict = { 'extractor': 'Foo', } extra_info = { 'extractor': 'Bar', 'playlist': 'funny videos', } YDL.add_extra_info(test_dict, extra_info) self.assertEqual(test_dict['extractor'], 'Foo') self.assertEqual(test_dict['playlist'], 'funny videos') outtmpl_info = { 'id': '1234', 'ext': 'mp4', 'width': None, 'height': 1080, 'filesize': 1024, 'title1': '$PATH', 'title2': '%PATH%', 'title3': 'foo/bar\\test', 'title4': 'foo "bar" test', 'title5': 'áéí 𝐀', 'timestamp': 1618488000, 'duration': 100000, 'playlist_index': 1, 'playlist_autonumber': 2, '__last_playlist_index': 100, 'n_entries': 10, 'formats': [ {'id': 'id 1', 'height': 1080, 'width': 1920}, {'id': 'id 2', 'height': 720}, {'id': 'id 3'}, ], } def test_prepare_outtmpl_and_filename(self): def test(tmpl, expected, *, info=None, **params): params['outtmpl'] = tmpl ydl = FakeYDL(params) ydl._num_downloads = 1 self.assertEqual(ydl.validate_outtmpl(tmpl), None) out = ydl.evaluate_outtmpl(tmpl, info or self.outtmpl_info) fname = ydl.prepare_filename(info or self.outtmpl_info) if not isinstance(expected, (list, tuple)): expected = (expected, expected) for (name, got), expect in zip((('outtmpl', out), ('filename', fname)), expected, strict=True): if callable(expect): self.assertTrue(expect(got), f'Wrong {name} from {tmpl}') elif expect is not None: self.assertEqual(got, expect, f'Wrong {name} from {tmpl}') # Side-effects original_infodict = dict(self.outtmpl_info) test('foo.bar', 'foo.bar') original_infodict['epoch'] = self.outtmpl_info.get('epoch') self.assertTrue(isinstance(original_infodict['epoch'], int)) test('%(epoch)d', int_or_none) self.assertEqual(original_infodict, self.outtmpl_info) # Auto-generated fields test('%(id)s.%(ext)s', '1234.mp4') test('%(duration_string)s', ('27:46:40', '27-46-40')) test('%(resolution)s', '1080p') test('%(playlist_index|)s', '001') test('%(playlist_index&{}!)s', '1!') test('%(playlist_autonumber)s', '02') test('%(autonumber)s', '00001') test('%(autonumber+2)03d', '005', autonumber_start=3) test('%(autonumber)s', '001', autonumber_size=3) # Escaping % test('%', '%') test('%%', '%') test('%%%%', '%%') test('%s', '%s') test('%%%s', '%%s') test('%d', '%d') test('%abc%', '%abc%') test('%%(width)06d.%(ext)s', '%(width)06d.mp4') test('%%%(height)s', '%1080') test('%(width)06d.%(ext)s', 'NA.mp4') test('%(width)06d.%%(ext)s', 'NA.%(ext)s') test('%%(width)06d.%(ext)s', '%(width)06d.mp4') # Sanitization options test('%(title3)s', (None, 'foo⧸bar⧹test')) test('%(title5)s', (None, 'aei_A'), restrictfilenames=True) test('%(title3)s', (None, 'foo_bar_test'), windowsfilenames=False, restrictfilenames=True) if sys.platform != 'win32': test('%(title3)s', (None, 'foo⧸bar\\test'), windowsfilenames=False) # ID sanitization test('%(id)s', '_abcd', info={'id': '_abcd'}) test('%(some_id)s', '_abcd', info={'some_id': '_abcd'}) test('%(formats.0.id)s', '_abcd', info={'formats': [{'id': '_abcd'}]}) test('%(id)s', '-abcd', info={'id': '-abcd'}) test('%(id)s', '.abcd', info={'id': '.abcd'}) test('%(id)s', 'ab__cd', info={'id': 'ab__cd'}) test('%(id)s', ('ab:cd', 'ab:cd'), info={'id': 'ab:cd'}) test('%(id.0)s', '-', info={'id': '--'}) # Invalid templates self.assertTrue(isinstance(YoutubeDL.validate_outtmpl('%(title)'), ValueError)) test('%(invalid@tmpl|def)s', 'none', outtmpl_na_placeholder='none') test('%(..)s', 'NA') test('%(formats.{id)s', 'NA') # Entire info_dict def expect_same_infodict(out): got_dict = json.loads(out) for info_field, expected in self.outtmpl_info.items(): self.assertEqual(got_dict.get(info_field), expected, info_field) return True test('%()j', (expect_same_infodict, None)) # NA placeholder NA_TEST_OUTTMPL = '%(uploader_date)s-%(width)d-%(x|def)s-%(id)s.%(ext)s' test(NA_TEST_OUTTMPL, 'NA-NA-def-1234.mp4') test(NA_TEST_OUTTMPL, 'none-none-def-1234.mp4', outtmpl_na_placeholder='none') test(NA_TEST_OUTTMPL, '--def-1234.mp4', outtmpl_na_placeholder='') test('%(non_existent.0)s', 'NA') # String formatting FMT_TEST_OUTTMPL = '%%(height)%s.%%(ext)s' test(FMT_TEST_OUTTMPL % 's', '1080.mp4') test(FMT_TEST_OUTTMPL % 'd', '1080.mp4') test(FMT_TEST_OUTTMPL % '6d', ' 1080.mp4') test(FMT_TEST_OUTTMPL % '-6d', '1080 .mp4') test(FMT_TEST_OUTTMPL % '06d', '001080.mp4') test(FMT_TEST_OUTTMPL % ' 06d', ' 01080.mp4') test(FMT_TEST_OUTTMPL % ' 06d', ' 01080.mp4') test(FMT_TEST_OUTTMPL % '0 6d', ' 01080.mp4') test(FMT_TEST_OUTTMPL % '0 6d', ' 01080.mp4') test(FMT_TEST_OUTTMPL % ' 0 6d', ' 01080.mp4') # Type casting test('%(id)d', '1234') test('%(height)c', '1') test('%(ext)c', 'm') test('%(id)d %(id)r', "1234 '1234'") test('%(id)r %(height)r', "'1234' 1080") test('%(title5)a %(height)a', (R"'\xe1\xe9\xed \U0001d400' 1080", None)) test('%(ext)s-%(ext|def)d', 'mp4-def') test('%(width|0)04d', '0') test('a%(width|b)d', 'ab', outtmpl_na_placeholder='none') FORMATS = self.outtmpl_info['formats'] # Custom type casting test('%(formats.:.id)l', 'id 1, id 2, id 3') test('%(formats.:.id)#l', ('id 1\nid 2\nid 3', 'id 1 id 2 id 3')) test('%(ext)l', 'mp4') test('%(formats.:.id) 18l', ' id 1, id 2, id 3') test('%(formats)j', (json.dumps(FORMATS), None)) test('%(formats)#j', ( json.dumps(FORMATS, indent=4), json.dumps(FORMATS, indent=4).replace(':', ':').replace('"', '"').replace('\n', ' '), )) test('%(title5).3B', 'á') test('%(title5)U', 'áéí 𝐀') test('%(title5)#U', 'a\u0301e\u0301i\u0301 𝐀') test('%(title5)+U', 'áéí A') test('%(title5)+#U', 'a\u0301e\u0301i\u0301 A') test('%(height)D', '1k') test('%(filesize)#D', '1Ki') test('%(height)5.2D', ' 1.08k') test('%(title4)#S', 'foo_bar_test') test('%(title4).10S', ('foo "bar" ', 'foo "bar"' + ('#' if os.name == 'nt' else ' ')))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_devalue.py
test/test_devalue.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import datetime as dt import json import math import re import unittest from yt_dlp.utils.jslib import devalue TEST_CASES_EQUALS = [{ 'name': 'int', 'unparsed': [-42], 'parsed': -42, }, { 'name': 'str', 'unparsed': ['woo!!!'], 'parsed': 'woo!!!', }, { 'name': 'Number', 'unparsed': [['Object', 42]], 'parsed': 42, }, { 'name': 'String', 'unparsed': [['Object', 'yar']], 'parsed': 'yar', }, { 'name': 'Infinity', 'unparsed': -4, 'parsed': math.inf, }, { 'name': 'negative Infinity', 'unparsed': -5, 'parsed': -math.inf, }, { 'name': 'negative zero', 'unparsed': -6, 'parsed': -0.0, }, { 'name': 'RegExp', 'unparsed': [['RegExp', 'regexp', 'gim']], # XXX: flags are ignored 'parsed': re.compile('regexp'), }, { 'name': 'Date', 'unparsed': [['Date', '2001-09-09T01:46:40.000Z']], 'parsed': dt.datetime.fromtimestamp(1e9, tz=dt.timezone.utc), }, { 'name': 'Array', 'unparsed': [[1, 2, 3], 'a', 'b', 'c'], 'parsed': ['a', 'b', 'c'], }, { 'name': 'Array (empty)', 'unparsed': [[]], 'parsed': [], }, { 'name': 'Array (sparse)', 'unparsed': [[-2, 1, -2], 'b'], 'parsed': [None, 'b', None], }, { 'name': 'Object', 'unparsed': [{'foo': 1, 'x-y': 2}, 'bar', 'z'], 'parsed': {'foo': 'bar', 'x-y': 'z'}, }, { 'name': 'Set', 'unparsed': [['Set', 1, 2, 3], 1, 2, 3], 'parsed': [1, 2, 3], }, { 'name': 'Map', 'unparsed': [['Map', 1, 2], 'a', 'b'], 'parsed': [['a', 'b']], }, { 'name': 'BigInt', 'unparsed': [['BigInt', '1']], 'parsed': 1, }, { 'name': 'Uint8Array', 'unparsed': [['Uint8Array', 'AQID']], 'parsed': [1, 2, 3], }, { 'name': 'ArrayBuffer', 'unparsed': [['ArrayBuffer', 'AQID']], 'parsed': [1, 2, 3], }, { 'name': 'str (repetition)', 'unparsed': [[1, 1], 'a string'], 'parsed': ['a string', 'a string'], }, { 'name': 'None (repetition)', 'unparsed': [[1, 1], None], 'parsed': [None, None], }, { 'name': 'dict (repetition)', 'unparsed': [[1, 1], {}], 'parsed': [{}, {}], }, { 'name': 'Object without prototype', 'unparsed': [['null']], 'parsed': {}, }, { 'name': 'cross-realm POJO', 'unparsed': [{}], 'parsed': {}, }] TEST_CASES_IS = [{ 'name': 'bool', 'unparsed': [True], 'parsed': True, }, { 'name': 'Boolean', 'unparsed': [['Object', False]], 'parsed': False, }, { 'name': 'undefined', 'unparsed': -1, 'parsed': None, }, { 'name': 'null', 'unparsed': [None], 'parsed': None, }, { 'name': 'NaN', 'unparsed': -3, 'parsed': math.nan, }] TEST_CASES_INVALID = [{ 'name': 'empty string', 'unparsed': '', 'error': ValueError, 'pattern': r'expected int or list as input', }, { 'name': 'hole', 'unparsed': -2, 'error': ValueError, 'pattern': r'invalid integer input', }, { 'name': 'string', 'unparsed': 'hello', 'error': ValueError, 'pattern': r'expected int or list as input', }, { 'name': 'number', 'unparsed': 42, 'error': ValueError, 'pattern': r'invalid integer input', }, { 'name': 'boolean', 'unparsed': True, 'error': ValueError, 'pattern': r'expected int or list as input', }, { 'name': 'null', 'unparsed': None, 'error': ValueError, 'pattern': r'expected int or list as input', }, { 'name': 'object', 'unparsed': {}, 'error': ValueError, 'pattern': r'expected int or list as input', }, { 'name': 'empty array', 'unparsed': [], 'error': ValueError, 'pattern': r'expected a non-empty list as input', }, { 'name': 'Python negative indexing', 'unparsed': [[1, 2, 3, 4, 5, 6, 7, -7], 1, 2, 3, 4, 5, 6, 7], 'error': IndexError, 'pattern': r'invalid index: -7', }] class TestDevalue(unittest.TestCase): def test_devalue_parse_equals(self): for tc in TEST_CASES_EQUALS: self.assertEqual(devalue.parse(tc['unparsed']), tc['parsed'], tc['name']) def test_devalue_parse_is(self): for tc in TEST_CASES_IS: self.assertIs(devalue.parse(tc['unparsed']), tc['parsed'], tc['name']) def test_devalue_parse_invalid(self): for tc in TEST_CASES_INVALID: with self.assertRaisesRegex(tc['error'], tc['pattern'], msg=tc['name']): devalue.parse(tc['unparsed']) def test_devalue_parse_cyclical(self): name = 'Map (cyclical)' result = devalue.parse([['Map', 1, 0], 'self']) self.assertEqual(result[0][0], 'self', name) self.assertIs(result, result[0][1], name) name = 'Set (cyclical)' result = devalue.parse([['Set', 0, 1], 42]) self.assertEqual(result[1], 42, name) self.assertIs(result, result[0], name) result = devalue.parse([[0]]) self.assertIs(result, result[0], 'Array (cyclical)') name = 'Object (cyclical)' result = devalue.parse([{'self': 0}]) self.assertIs(result, result['self'], name) name = 'Object with null prototype (cyclical)' result = devalue.parse([['null', 'self', 0]]) self.assertIs(result, result['self'], name) name = 'Objects (cyclical)' result = devalue.parse([[1, 2], {'second': 2}, {'first': 1}]) self.assertIs(result[0], result[1]['first'], name) self.assertIs(result[1], result[0]['second'], name) def test_devalue_parse_revivers(self): self.assertEqual( devalue.parse([['indirect', 1], {'a': 2}, 'b'], revivers={'indirect': lambda x: x}), {'a': 'b'}, 'revivers (indirect)') self.assertEqual( devalue.parse([['parse', 1], '{"a":0}'], revivers={'parse': lambda x: json.loads(x)}), {'a': 0}, 'revivers (parse)') if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_youtube_lists.py
test/test_youtube_lists.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import FakeYDL, is_download_test from yt_dlp.extractor import YoutubeIE, YoutubeTabIE from yt_dlp.utils import ExtractorError @is_download_test class TestYoutubeLists(unittest.TestCase): def assertIsPlaylist(self, info): """Make sure the info has '_type' set to 'playlist'""" self.assertEqual(info['_type'], 'playlist') def test_youtube_playlist_noplaylist(self): dl = FakeYDL() dl.params['noplaylist'] = True ie = YoutubeTabIE(dl) result = ie.extract('https://www.youtube.com/watch?v=OmJ-4B-mS-Y&list=PLydZ2Hrp_gPRJViZjLFKaBMgCQOYEEkyp&index=2') self.assertEqual(result['_type'], 'url') self.assertEqual(result['ie_key'], YoutubeIE.ie_key()) self.assertEqual(YoutubeIE.extract_id(result['url']), 'OmJ-4B-mS-Y') def test_youtube_mix(self): dl = FakeYDL() ie = YoutubeTabIE(dl) result = ie.extract('https://www.youtube.com/watch?v=tyITL_exICo&list=RDCLAK5uy_kLWIr9gv1XLlPbaDS965-Db4TrBoUTxQ8') entries = list(result['entries']) self.assertTrue(len(entries) >= 50) original_video = entries[0] self.assertEqual(original_video['id'], 'tyITL_exICo') def test_youtube_flat_playlist_extraction(self): dl = FakeYDL() dl.params['extract_flat'] = True ie = YoutubeTabIE(dl) result = ie.extract('https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc') self.assertIsPlaylist(result) entries = list(result['entries']) self.assertTrue(len(entries) == 1) video = entries[0] self.assertEqual(video['_type'], 'url') self.assertEqual(video['ie_key'], 'Youtube') self.assertEqual(video['id'], 'BaW_jenozKc') self.assertEqual(video['url'], 'https://www.youtube.com/watch?v=BaW_jenozKc') self.assertEqual(video['title'], 'youtube-dl test video "\'/\\ä↭𝕐') self.assertEqual(video['duration'], 10) self.assertEqual(video['uploader'], 'Philipp Hagemeister') def test_youtube_channel_no_uploads(self): dl = FakeYDL() dl.params['extract_flat'] = True ie = YoutubeTabIE(dl) # no uploads with self.assertRaisesRegex(ExtractorError, r'no uploads'): ie.extract('https://www.youtube.com/channel/UC2yXPzFejc422buOIzn_0CA') # no uploads and no UCID given with self.assertRaisesRegex(ExtractorError, r'no uploads'): ie.extract('https://www.youtube.com/news') if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_utils.py
test/test_utils.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import contextlib import datetime as dt import io import itertools import json import ntpath import pickle import subprocess import unittest import unittest.mock import warnings import xml.etree.ElementTree from yt_dlp.compat import ( compat_etree_fromstring, compat_HTMLParseError, ) from yt_dlp.utils import ( Config, DateRange, ExtractorError, InAdvancePagedList, LazyList, NO_DEFAULT, OnDemandPagedList, Popen, age_restricted, args_to_str, base_url, caesar, clean_html, clean_podcast_url, cli_bool_option, cli_option, cli_valueless_option, date_from_str, datetime_from_str, detect_exe_version, determine_ext, determine_file_encoding, dfxp2srt, encode_base_n, encode_compat_str, expand_path, extract_attributes, extract_basic_auth, find_xpath_attr, fix_xml_ampersands, float_or_none, format_bytes, get_compatible_ext, get_element_by_attribute, get_element_by_class, get_element_html_by_attribute, get_element_html_by_class, get_element_text_and_html_by_tag, get_elements_by_attribute, get_elements_by_class, get_elements_html_by_attribute, get_elements_html_by_class, get_elements_text_and_html_by_attribute, int_or_none, iri_to_uri, is_html, js_to_json, jwt_decode_hs256, jwt_encode, limit_length, locked_file, lowercase_escape, match_str, merge_dicts, mimetype2ext, month_by_name, multipart_encode, ohdave_rsa_encrypt, orderedSet, parse_age_limit, parse_bitrate, parse_codecs, parse_count, parse_dfxp_time_expr, parse_duration, parse_filesize, parse_iso8601, parse_qs, parse_resolution, pkcs1pad, prepend_extension, read_batch_urls, remove_end, remove_quotes, remove_start, render_table, replace_extension, datetime_round, rot47, sanitize_filename, sanitize_path, sanitize_url, shell_quote, strftime_or_none, smuggle_url, str_to_int, strip_jsonp, strip_or_none, subtitles_filename, timeconvert, try_call, unescapeHTML, unified_strdate, unified_timestamp, unsmuggle_url, update_url_query, uppercase_escape, url_basename, url_or_none, urlencode_postdata, urljoin, urshift, variadic, version_tuple, xpath_attr, xpath_element, xpath_text, xpath_with_ns, ) from yt_dlp.utils._utils import _UnsafeExtensionError from yt_dlp.utils.networking import ( HTTPHeaderDict, escape_rfc3986, normalize_url, remove_dot_segments, ) class TestUtil(unittest.TestCase): def test_timeconvert(self): self.assertTrue(timeconvert('') is None) self.assertTrue(timeconvert('bougrg') is None) def test_sanitize_filename(self): self.assertEqual(sanitize_filename(''), '') self.assertEqual(sanitize_filename('abc'), 'abc') self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e') self.assertEqual(sanitize_filename('123'), '123') self.assertEqual('abc⧸de', sanitize_filename('abc/de')) self.assertFalse('/' in sanitize_filename('abc/de///')) self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', is_id=False)) self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', is_id=False)) self.assertEqual('yes no', sanitize_filename('yes? no', is_id=False)) self.assertEqual('this - that', sanitize_filename('this: that', is_id=False)) self.assertEqual(sanitize_filename('AT&T'), 'AT&T') aumlaut = 'ä' self.assertEqual(sanitize_filename(aumlaut), aumlaut) tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430' self.assertEqual(sanitize_filename(tests), tests) self.assertEqual( sanitize_filename('New World record at 0:12:34'), 'New World record at 0_12_34') self.assertEqual(sanitize_filename('--gasdgf'), '--gasdgf') self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf') self.assertEqual(sanitize_filename('--gasdgf', is_id=False), '_-gasdgf') self.assertEqual(sanitize_filename('.gasdgf'), '.gasdgf') self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf') self.assertEqual(sanitize_filename('.gasdgf', is_id=False), 'gasdgf') forbidden = '"\0\\/' for fc in forbidden: for fbc in forbidden: self.assertTrue(fbc not in sanitize_filename(fc)) def test_sanitize_filename_restricted(self): self.assertEqual(sanitize_filename('abc', restricted=True), 'abc') self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e') self.assertEqual(sanitize_filename('123', restricted=True), '123') self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True)) self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True)) self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True)) self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True)) self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True)) self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True)) tests = 'aäb\u4e2d\u56fd\u7684c' self.assertEqual(sanitize_filename(tests, restricted=True), 'aab_c') self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#' for fc in forbidden: for fbc in forbidden: self.assertTrue(fbc not in sanitize_filename(fc, restricted=True)) # Handle a common case more neatly self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song') self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech') # .. but make sure the file name is never empty self.assertTrue(sanitize_filename('-', restricted=True) != '') self.assertTrue(sanitize_filename(':', restricted=True) != '') self.assertEqual(sanitize_filename( 'ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', restricted=True), 'AAAAAAAECEEEEIIIIDNOOOOOOOOEUUUUUYTHssaaaaaaaeceeeeiiiionooooooooeuuuuuythy') def test_sanitize_ids(self): self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw') self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw') self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI') @unittest.mock.patch('sys.platform', 'win32') def test_sanitize_path(self): self.assertEqual(sanitize_path('abc'), 'abc') self.assertEqual(sanitize_path('abc/def'), 'abc\\def') self.assertEqual(sanitize_path('abc\\def'), 'abc\\def') self.assertEqual(sanitize_path('abc|def'), 'abc#def') self.assertEqual(sanitize_path('<>:"|?*'), '#######') self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def') self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def') self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc') self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc') self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc') self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc') self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f') self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc') self.assertEqual( sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'), 'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s') self.assertEqual( sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'), 'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part') self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#') self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def') self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#') self.assertEqual(sanitize_path('C:\\abc:%(title)s.%(ext)s'), 'C:\\abc#%(title)s.%(ext)s') for test, expected in [ ('C:\\', 'C:\\'), ('../abc', '..\\abc'), ('../../abc', '..\\..\\abc'), ('./abc', 'abc'), ('./../abc', '..\\abc'), ('\\abc', '\\abc'), ('C:abc', 'C:abc'), ('C:abc\\..\\', 'C:'), ('C:abc\\..\\def\\..\\..\\', 'C:..'), ('C:\\abc\\xyz///..\\def\\', 'C:\\abc\\def'), ('abc/../', '.'), ('./abc/../', '.'), ]: result = sanitize_path(test) assert result == expected, f'{test} was incorrectly resolved' assert result == sanitize_path(result), f'{test} changed after sanitizing again' assert result == ntpath.normpath(test), f'{test} does not match ntpath.normpath' def test_sanitize_url(self): self.assertEqual(sanitize_url('//foo.bar'), 'http://foo.bar') self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar') self.assertEqual(sanitize_url('rmtps://foo.bar'), 'rtmps://foo.bar') self.assertEqual(sanitize_url('https://foo.bar'), 'https://foo.bar') self.assertEqual(sanitize_url('foo bar'), 'foo bar') def test_expand_path(self): def env(var): return f'%{var}%' if sys.platform == 'win32' else f'${var}' os.environ['yt_dlp_EXPATH_PATH'] = 'expanded' self.assertEqual(expand_path(env('yt_dlp_EXPATH_PATH')), 'expanded') old_home = os.environ.get('HOME') test_str = R'C:\Documents and Settings\тест\Application Data' try: os.environ['HOME'] = test_str self.assertEqual(expand_path(env('HOME')), os.getenv('HOME')) self.assertEqual(expand_path('~'), os.getenv('HOME')) self.assertEqual( expand_path('~/{}'.format(env('yt_dlp_EXPATH_PATH'))), '{}/expanded'.format(os.getenv('HOME'))) finally: os.environ['HOME'] = old_home or '' _uncommon_extensions = [ ('exe', 'abc.exe.ext'), ('de', 'abc.de.ext'), ('../.mp4', None), ('..\\.mp4', None), ] def test_prepend_extension(self): self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext') self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext') self.assertEqual(prepend_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp') self.assertEqual(prepend_extension('abc', 'temp'), 'abc.temp') self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp') self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext') # Test uncommon extensions self.assertEqual(prepend_extension('abc.ext', 'bin'), 'abc.bin.ext') for ext, result in self._uncommon_extensions: with self.assertRaises(_UnsafeExtensionError): prepend_extension('abc', ext) if result: self.assertEqual(prepend_extension('abc.ext', ext, 'ext'), result) else: with self.assertRaises(_UnsafeExtensionError): prepend_extension('abc.ext', ext, 'ext') with self.assertRaises(_UnsafeExtensionError): prepend_extension('abc.unexpected_ext', ext, 'ext') def test_replace_extension(self): self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp') self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp') self.assertEqual(replace_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp') self.assertEqual(replace_extension('abc', 'temp'), 'abc.temp') self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp') self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp') # Test uncommon extensions self.assertEqual(replace_extension('abc.ext', 'bin'), 'abc.unknown_video') for ext, _ in self._uncommon_extensions: with self.assertRaises(_UnsafeExtensionError): replace_extension('abc', ext) with self.assertRaises(_UnsafeExtensionError): replace_extension('abc.ext', ext, 'ext') with self.assertRaises(_UnsafeExtensionError): replace_extension('abc.unexpected_ext', ext, 'ext') def test_subtitles_filename(self): self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt'), 'abc.en.vtt') self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt', 'ext'), 'abc.en.vtt') self.assertEqual(subtitles_filename('abc.unexpected_ext', 'en', 'vtt', 'ext'), 'abc.unexpected_ext.en.vtt') def test_remove_start(self): self.assertEqual(remove_start(None, 'A - '), None) self.assertEqual(remove_start('A - B', 'A - '), 'B') self.assertEqual(remove_start('B - A', 'A - '), 'B - A') self.assertEqual(remove_start('non-empty', ''), 'non-empty') def test_remove_end(self): self.assertEqual(remove_end(None, ' - B'), None) self.assertEqual(remove_end('A - B', ' - B'), 'A') self.assertEqual(remove_end('B - A', ' - B'), 'B - A') self.assertEqual(remove_end('non-empty', ''), 'non-empty') def test_remove_quotes(self): self.assertEqual(remove_quotes(None), None) self.assertEqual(remove_quotes('"'), '"') self.assertEqual(remove_quotes("'"), "'") self.assertEqual(remove_quotes(';'), ';') self.assertEqual(remove_quotes('";'), '";') self.assertEqual(remove_quotes('""'), '') self.assertEqual(remove_quotes('";"'), ';') def test_ordered_set(self): self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7]) self.assertEqual(orderedSet([]), []) self.assertEqual(orderedSet([1]), [1]) # keep the list ordered self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1]) def test_unescape_html(self): self.assertEqual(unescapeHTML('%20;'), '%20;') self.assertEqual(unescapeHTML('&#x2F;'), '/') self.assertEqual(unescapeHTML('&#47;'), '/') self.assertEqual(unescapeHTML('&eacute;'), 'é') self.assertEqual(unescapeHTML('&#2013266066;'), '&#2013266066;') self.assertEqual(unescapeHTML('&a&quot;'), '&a"') # HTML5 entities self.assertEqual(unescapeHTML('&period;&apos;'), '.\'') def test_date_from_str(self): self.assertEqual(date_from_str('yesterday'), date_from_str('now-1day')) self.assertEqual(date_from_str('now+7day'), date_from_str('now+1week')) self.assertEqual(date_from_str('now+14day'), date_from_str('now+2week')) self.assertEqual(date_from_str('20200229+365day'), date_from_str('20200229+1year')) self.assertEqual(date_from_str('20210131+28day'), date_from_str('20210131+1month')) def test_datetime_from_str(self): self.assertEqual(datetime_from_str('yesterday', precision='day'), datetime_from_str('now-1day', precision='auto')) self.assertEqual(datetime_from_str('now+7day', precision='day'), datetime_from_str('now+1week', precision='auto')) self.assertEqual(datetime_from_str('now+14day', precision='day'), datetime_from_str('now+2week', precision='auto')) self.assertEqual(datetime_from_str('20200229+365day', precision='day'), datetime_from_str('20200229+1year', precision='auto')) self.assertEqual(datetime_from_str('20210131+28day', precision='day'), datetime_from_str('20210131+1month', precision='auto')) self.assertEqual(datetime_from_str('20210131+59day', precision='day'), datetime_from_str('20210131+2month', precision='auto')) self.assertEqual(datetime_from_str('now+1day', precision='hour'), datetime_from_str('now+24hours', precision='auto')) self.assertEqual(datetime_from_str('now+23hours', precision='hour'), datetime_from_str('now+23hours', precision='auto')) def test_datetime_round(self): self.assertEqual(datetime_round(dt.datetime.strptime('1820-05-12T01:23:45Z', '%Y-%m-%dT%H:%M:%SZ')), dt.datetime(1820, 5, 12, tzinfo=dt.timezone.utc)) self.assertEqual(datetime_round(dt.datetime.strptime('1969-12-31T23:34:45Z', '%Y-%m-%dT%H:%M:%SZ'), 'hour'), dt.datetime(1970, 1, 1, 0, tzinfo=dt.timezone.utc)) self.assertEqual(datetime_round(dt.datetime.strptime('2024-12-25T01:23:45Z', '%Y-%m-%dT%H:%M:%SZ'), 'minute'), dt.datetime(2024, 12, 25, 1, 24, tzinfo=dt.timezone.utc)) self.assertEqual(datetime_round(dt.datetime.strptime('2024-12-25T01:23:45.123Z', '%Y-%m-%dT%H:%M:%S.%fZ'), 'second'), dt.datetime(2024, 12, 25, 1, 23, 45, tzinfo=dt.timezone.utc)) self.assertEqual(datetime_round(dt.datetime.strptime('2024-12-25T01:23:45.678Z', '%Y-%m-%dT%H:%M:%S.%fZ'), 'second'), dt.datetime(2024, 12, 25, 1, 23, 46, tzinfo=dt.timezone.utc)) def test_strftime_or_none(self): self.assertEqual(strftime_or_none(-4722192000), '18200512') self.assertEqual(strftime_or_none(0), '19700101') self.assertEqual(strftime_or_none(1735084800), '20241225') # Throws OverflowError self.assertEqual(strftime_or_none(1735084800000), None) def test_daterange(self): _20century = DateRange('19000101', '20000101') self.assertFalse('17890714' in _20century) _ac = DateRange('00010101') self.assertTrue('19690721' in _ac) _firstmilenium = DateRange(end='10000101') self.assertTrue('07110427' in _firstmilenium) def test_unified_dates(self): self.assertEqual(unified_strdate('December 21, 2010'), '20101221') self.assertEqual(unified_strdate('8/7/2009'), '20090708') self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214') self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011') self.assertEqual(unified_strdate('1968 12 10'), '19681210') self.assertEqual(unified_strdate('1968-12-10'), '19681210') self.assertEqual(unified_strdate('31-07-2022 20:00'), '20220731') self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128') self.assertEqual( unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False), '20141126') self.assertEqual( unified_strdate('2/2/2015 6:47:40 PM', day_first=False), '20150202') self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214') self.assertEqual(unified_strdate('25-09-2014'), '20140925') self.assertEqual(unified_strdate('27.02.2016 17:30'), '20160227') self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None) self.assertEqual(unified_strdate('Feb 7, 2016 at 6:35 pm'), '20160207') self.assertEqual(unified_strdate('July 15th, 2013'), '20130715') self.assertEqual(unified_strdate('September 1st, 2013'), '20130901') self.assertEqual(unified_strdate('Sep 2nd, 2013'), '20130902') self.assertEqual(unified_strdate('November 3rd, 2019'), '20191103') self.assertEqual(unified_strdate('October 23rd, 2005'), '20051023') def test_unified_timestamps(self): self.assertEqual(unified_timestamp('December 21, 2010'), 1292889600) self.assertEqual(unified_timestamp('8/7/2009'), 1247011200) self.assertEqual(unified_timestamp('Dec 14, 2012'), 1355443200) self.assertEqual(unified_timestamp('2012/10/11 01:56:38 +0000'), 1349920598) self.assertEqual(unified_timestamp('1968 12 10'), -33436800) self.assertEqual(unified_timestamp('1968-12-10'), -33436800) self.assertEqual(unified_timestamp('28/01/2014 21:00:00 +0100'), 1390939200) self.assertEqual( unified_timestamp('11/26/2014 11:30:00 AM PST', day_first=False), 1417001400) self.assertEqual( unified_timestamp('2/2/2015 6:47:40 PM', day_first=False), 1422902860) self.assertEqual(unified_timestamp('Feb 14th 2016 5:45PM'), 1455471900) self.assertEqual(unified_timestamp('25-09-2014'), 1411603200) self.assertEqual(unified_timestamp('27.02.2016 17:30'), 1456594200) self.assertEqual(unified_timestamp('UNKNOWN DATE FORMAT'), None) self.assertEqual(unified_timestamp('May 16, 2016 11:15 PM'), 1463440500) self.assertEqual(unified_timestamp('Feb 7, 2016 at 6:35 pm'), 1454870100) self.assertEqual(unified_timestamp('2017-03-30T17:52:41Q'), 1490896361) self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540) self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140) self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363) self.assertEqual(unified_timestamp('Sunday, 26 Nov 2006, 19:00'), 1164567600) self.assertEqual(unified_timestamp('wed, aug 16, 2008, 12:00pm'), 1218931200) self.assertEqual(unified_timestamp('December 31 1969 20:00:01 EDT'), 1) self.assertEqual(unified_timestamp('Wednesday 31 December 1969 18:01:26 MDT'), 86) self.assertEqual(unified_timestamp('12/31/1969 20:01:18 EDT', False), 78) self.assertEqual(unified_timestamp('2026-01-01 00:00:00', tz_offset=0), 1767225600) self.assertEqual(unified_timestamp('2026-01-01 00:00:00', tz_offset=8), 1767196800) self.assertEqual(unified_timestamp('2026-01-01 00:00:00 +0800', tz_offset=-5), 1767196800) def test_determine_ext(self): self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4') self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None) self.assertEqual(determine_ext('http://example.com/foo/bar.nonext/?download', None), None) self.assertEqual(determine_ext('http://example.com/foo/bar/mp4?download', None), None) self.assertEqual(determine_ext('http://example.com/foo/bar.m3u8//?download'), 'm3u8') self.assertEqual(determine_ext('foobar', None), None) def test_find_xpath_attr(self): testxml = '''<root> <node/> <node x="a"/> <node x="a" y="c" /> <node x="b" y="d" /> <node x="" /> </root>''' doc = compat_etree_fromstring(testxml) self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None) self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None) self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None) self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None) self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1]) self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1]) self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3]) self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2]) self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2]) self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3]) self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4]) def test_xpath_with_ns(self): testxml = '''<root xmlns:media="http://example.com/"> <media:song> <media:author>The Author</media:author> <url>http://server.com/download.mp3</url> </media:song> </root>''' doc = compat_etree_fromstring(testxml) find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'})) self.assertTrue(find('media:song') is not None) self.assertEqual(find('media:song/media:author').text, 'The Author') self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3') def test_xpath_element(self): doc = xml.etree.ElementTree.Element('root') div = xml.etree.ElementTree.SubElement(doc, 'div') p = xml.etree.ElementTree.SubElement(div, 'p') p.text = 'Foo' self.assertEqual(xpath_element(doc, 'div/p'), p) self.assertEqual(xpath_element(doc, ['div/p']), p) self.assertEqual(xpath_element(doc, ['div/bar', 'div/p']), p) self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default') self.assertEqual(xpath_element(doc, ['div/bar'], default='default'), 'default') self.assertTrue(xpath_element(doc, 'div/bar') is None) self.assertTrue(xpath_element(doc, ['div/bar']) is None) self.assertTrue(xpath_element(doc, ['div/bar'], 'div/baz') is None) self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True) self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar'], fatal=True) self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar', 'div/baz'], fatal=True) def test_xpath_text(self): testxml = '''<root> <div> <p>Foo</p> </div> </root>''' doc = compat_etree_fromstring(testxml) self.assertEqual(xpath_text(doc, 'div/p'), 'Foo') self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default') self.assertTrue(xpath_text(doc, 'div/bar') is None) self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True) def test_xpath_attr(self): testxml = '''<root> <div> <p x="a">Foo</p> </div> </root>''' doc = compat_etree_fromstring(testxml) self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a') self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None) self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None) self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default') self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default') self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True) self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True) def test_smuggle_url(self): data = {'ö': 'ö', 'abc': [3]} url = 'https://foo.bar/baz?x=y#a' smug_url = smuggle_url(url, data) unsmug_url, unsmug_data = unsmuggle_url(smug_url) self.assertEqual(url, unsmug_url) self.assertEqual(data, unsmug_data) res_url, res_data = unsmuggle_url(url) self.assertEqual(res_url, url) self.assertEqual(res_data, None) smug_url = smuggle_url(url, {'a': 'b'}) smug_smug_url = smuggle_url(smug_url, {'c': 'd'}) res_url, res_data = unsmuggle_url(smug_smug_url) self.assertEqual(res_url, url) self.assertEqual(res_data, {'a': 'b', 'c': 'd'}) def test_shell_quote(self): args = ['ffmpeg', '-i', 'ñ€ß\'.mp4'] self.assertEqual( shell_quote(args), """ffmpeg -i 'ñ€ß'"'"'.mp4'""" if os.name != 'nt' else '''ffmpeg -i "ñ€ß'.mp4"''') def test_float_or_none(self): self.assertEqual(float_or_none('42.42'), 42.42) self.assertEqual(float_or_none('42'), 42.0) self.assertEqual(float_or_none(''), None) self.assertEqual(float_or_none(None), None) self.assertEqual(float_or_none([]), None) self.assertEqual(float_or_none(set()), None) def test_int_or_none(self): self.assertEqual(int_or_none('42'), 42) self.assertEqual(int_or_none(''), None) self.assertEqual(int_or_none(None), None) self.assertEqual(int_or_none([]), None) self.assertEqual(int_or_none(set()), None) def test_str_to_int(self): self.assertEqual(str_to_int('123,456'), 123456) self.assertEqual(str_to_int('123.456'), 123456) self.assertEqual(str_to_int(523), 523) self.assertEqual(str_to_int('noninteger'), None) self.assertEqual(str_to_int([]), None) def test_url_basename(self): self.assertEqual(url_basename('http://foo.de/'), '') self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz') self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz') self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz') self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz') self.assertEqual( url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'), 'trailer.mp4') def test_base_url(self): self.assertEqual(base_url('http://foo.de/'), 'http://foo.de/') self.assertEqual(base_url('http://foo.de/bar'), 'http://foo.de/') self.assertEqual(base_url('http://foo.de/bar/'), 'http://foo.de/bar/') self.assertEqual(base_url('http://foo.de/bar/baz'), 'http://foo.de/bar/') self.assertEqual(base_url('http://foo.de/bar/baz?x=z/x/c'), 'http://foo.de/bar/') self.assertEqual(base_url('http://foo.de/bar/baz&x=z&w=y/x/c'), 'http://foo.de/bar/baz&x=z&w=y/x/') def test_urljoin(self): self.assertEqual(urljoin('http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin(b'http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin(b'http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('//foo.de/', '/a/b/c.txt'), '//foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt') self.assertEqual(urljoin(None, 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin(None, '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt') self.assertEqual(urljoin('', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin(['foobar'], 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', None), None) self.assertEqual(urljoin('http://foo.de/', ''), None) self.assertEqual(urljoin('http://foo.de/', ['foobar']), None) self.assertEqual(urljoin('http://foo.de/a/b/c.txt', '.././../d.txt'), 'http://foo.de/d.txt') self.assertEqual(urljoin('http://foo.de/a/b/c.txt', 'rtmp://foo.de'), 'rtmp://foo.de') self.assertEqual(urljoin(None, 'rtmp://foo.de'), 'rtmp://foo.de') def test_url_or_none(self): self.assertEqual(url_or_none(None), None) self.assertEqual(url_or_none(''), None) self.assertEqual(url_or_none('foo'), None) self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de') self.assertEqual(url_or_none('https://foo.de'), 'https://foo.de') self.assertEqual(url_or_none('http$://foo.de'), None) self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de') self.assertEqual(url_or_none('//foo.de'), '//foo.de') self.assertEqual(url_or_none('s3://foo.de'), None) self.assertEqual(url_or_none('rtmpte://foo.de'), 'rtmpte://foo.de') self.assertEqual(url_or_none('mms://foo.de'), 'mms://foo.de') self.assertEqual(url_or_none('rtspu://foo.de'), 'rtspu://foo.de') self.assertEqual(url_or_none('ftps://foo.de'), 'ftps://foo.de') self.assertEqual(url_or_none('ws://foo.de'), 'ws://foo.de') self.assertEqual(url_or_none('wss://foo.de'), 'wss://foo.de') def test_parse_age_limit(self):
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_downloader_external.py
test/test_downloader_external.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import http.cookiejar from test.helper import FakeYDL from yt_dlp.downloader.external import ( Aria2cFD, AxelFD, CurlFD, FFmpegFD, HttpieFD, WgetFD, ) TEST_COOKIE = { 'version': 0, 'name': 'test', 'value': 'ytdlp', 'port': None, 'port_specified': False, 'domain': '.example.com', 'domain_specified': True, 'domain_initial_dot': False, 'path': '/', 'path_specified': True, 'secure': False, 'expires': None, 'discard': False, 'comment': None, 'comment_url': None, 'rest': {}, } TEST_INFO = {'url': 'http://www.example.com/'} class TestHttpieFD(unittest.TestCase): def test_make_cmd(self): with FakeYDL() as ydl: downloader = HttpieFD(ydl, {}) self.assertEqual( downloader._make_cmd('test', TEST_INFO), ['http', '--download', '--output', 'test', 'http://www.example.com/']) # Test cookie header is added ydl.cookiejar.set_cookie(http.cookiejar.Cookie(**TEST_COOKIE)) self.assertEqual( downloader._make_cmd('test', TEST_INFO), ['http', '--download', '--output', 'test', 'http://www.example.com/', 'Cookie:test=ytdlp']) class TestAxelFD(unittest.TestCase): def test_make_cmd(self): with FakeYDL() as ydl: downloader = AxelFD(ydl, {}) self.assertEqual( downloader._make_cmd('test', TEST_INFO), ['axel', '-o', 'test', '--', 'http://www.example.com/']) # Test cookie header is added ydl.cookiejar.set_cookie(http.cookiejar.Cookie(**TEST_COOKIE)) self.assertEqual( downloader._make_cmd('test', TEST_INFO), ['axel', '-o', 'test', '-H', 'Cookie: test=ytdlp', '--max-redirect=0', '--', 'http://www.example.com/']) class TestWgetFD(unittest.TestCase): def test_make_cmd(self): with FakeYDL() as ydl: downloader = WgetFD(ydl, {}) self.assertNotIn('--load-cookies', downloader._make_cmd('test', TEST_INFO)) # Test cookiejar tempfile arg is added ydl.cookiejar.set_cookie(http.cookiejar.Cookie(**TEST_COOKIE)) self.assertIn('--load-cookies', downloader._make_cmd('test', TEST_INFO)) class TestCurlFD(unittest.TestCase): def test_make_cmd(self): with FakeYDL() as ydl: downloader = CurlFD(ydl, {}) self.assertNotIn('--cookie', downloader._make_cmd('test', TEST_INFO)) # Test cookie header is added ydl.cookiejar.set_cookie(http.cookiejar.Cookie(**TEST_COOKIE)) self.assertIn('--cookie', downloader._make_cmd('test', TEST_INFO)) self.assertIn('test=ytdlp', downloader._make_cmd('test', TEST_INFO)) class TestAria2cFD(unittest.TestCase): def test_make_cmd(self): with FakeYDL() as ydl: downloader = Aria2cFD(ydl, {}) downloader._make_cmd('test', TEST_INFO) self.assertFalse(hasattr(downloader, '_cookies_tempfile')) # Test cookiejar tempfile arg is added ydl.cookiejar.set_cookie(http.cookiejar.Cookie(**TEST_COOKIE)) cmd = downloader._make_cmd('test', TEST_INFO) self.assertIn(f'--load-cookies={downloader._cookies_tempfile}', cmd) @unittest.skipUnless(FFmpegFD.available(), 'ffmpeg not found') class TestFFmpegFD(unittest.TestCase): _args = [] def _test_cmd(self, args): self._args = args def test_make_cmd(self): with FakeYDL() as ydl: downloader = FFmpegFD(ydl, {}) downloader._debug_cmd = self._test_cmd downloader._call_downloader('test', {**TEST_INFO, 'ext': 'mp4'}) self.assertEqual(self._args, [ 'ffmpeg', '-y', '-hide_banner', '-i', 'http://www.example.com/', '-c', 'copy', '-f', 'mp4', 'file:test']) # Test cookies arg is added ydl.cookiejar.set_cookie(http.cookiejar.Cookie(**TEST_COOKIE)) downloader._call_downloader('test', {**TEST_INFO, 'ext': 'mp4'}) self.assertEqual(self._args, [ 'ffmpeg', '-y', '-hide_banner', '-cookies', 'test=ytdlp; path=/; domain=.example.com;\r\n', '-i', 'http://www.example.com/', '-c', 'copy', '-f', 'mp4', 'file:test']) # Test with non-url input (ffmpeg reads from stdin '-' for websockets) downloader._call_downloader('test', {'url': 'x', 'ext': 'mp4'}) self.assertEqual(self._args, [ 'ffmpeg', '-y', '-hide_banner', '-i', 'x', '-c', 'copy', '-f', 'mp4', 'file:test']) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_overwrites.py
test/test_overwrites.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import subprocess from test.helper import is_download_test, try_rm root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) download_file = os.path.join(root_dir, 'test.webm') @is_download_test class TestOverwrites(unittest.TestCase): def setUp(self): # create an empty file open(download_file, 'a').close() def test_default_overwrites(self): outp = subprocess.Popen( [ sys.executable, 'yt_dlp/__main__.py', '-o', 'test.webm', 'https://www.youtube.com/watch?v=jNQXAC9IVRw', ], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) sout, _ = outp.communicate() self.assertTrue(b'has already been downloaded' in sout) # if the file has no content, it has not been redownloaded self.assertTrue(os.path.getsize(download_file) < 1) def test_yes_overwrites(self): outp = subprocess.Popen( [ sys.executable, 'yt_dlp/__main__.py', '--yes-overwrites', '-o', 'test.webm', 'https://www.youtube.com/watch?v=jNQXAC9IVRw', ], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) sout, _ = outp.communicate() self.assertTrue(b'has already been downloaded' not in sout) # if the file has no content, it has not been redownloaded self.assertTrue(os.path.getsize(download_file) > 1) def tearDown(self): try_rm(os.path.join(root_dir, 'test.webm')) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_subtitles.py
test/test_subtitles.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import FakeYDL, is_download_test, md5 from yt_dlp.extractor import ( NPOIE, NRKTVIE, PBSIE, CeskaTelevizeIE, DailymotionIE, DemocracynowIE, LyndaIE, RaiPlayIE, RTVEALaCartaIE, TedTalkIE, ThePlatformFeedIE, ThePlatformIE, VimeoIE, WallaIE, YoutubeIE, ) @is_download_test class BaseTestSubtitles(unittest.TestCase): url = None IE = None def setUp(self): self.DL = FakeYDL() self.ie = self.IE() self.DL.add_info_extractor(self.ie) if not self.IE.working(): print(f'Skipping: {self.IE.ie_key()} marked as not _WORKING') self.skipTest('IE marked as not _WORKING') def getInfoDict(self): return self.DL.extract_info(self.url, download=False) def getSubtitles(self): info_dict = self.getInfoDict() subtitles = info_dict['requested_subtitles'] if not subtitles: return subtitles for sub_info in subtitles.values(): if sub_info.get('data') is None: uf = self.DL.urlopen(sub_info['url']) sub_info['data'] = uf.read().decode() return {l: sub_info['data'] for l, sub_info in subtitles.items()} @is_download_test class TestYoutubeSubtitles(BaseTestSubtitles): # Available subtitles for QRS8MkLhQmM: # Language formats # ru vtt, ttml, srv3, srv2, srv1, json3 # fr vtt, ttml, srv3, srv2, srv1, json3 # en vtt, ttml, srv3, srv2, srv1, json3 # nl vtt, ttml, srv3, srv2, srv1, json3 # de vtt, ttml, srv3, srv2, srv1, json3 # ko vtt, ttml, srv3, srv2, srv1, json3 # it vtt, ttml, srv3, srv2, srv1, json3 # zh-Hant vtt, ttml, srv3, srv2, srv1, json3 # hi vtt, ttml, srv3, srv2, srv1, json3 # pt-BR vtt, ttml, srv3, srv2, srv1, json3 # es-MX vtt, ttml, srv3, srv2, srv1, json3 # ja vtt, ttml, srv3, srv2, srv1, json3 # pl vtt, ttml, srv3, srv2, srv1, json3 url = 'QRS8MkLhQmM' IE = YoutubeIE def test_youtube_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(len(subtitles.keys()), 13) self.assertEqual(md5(subtitles['en']), 'ae1bd34126571a77aabd4d276b28044d') self.assertEqual(md5(subtitles['it']), '0e0b667ba68411d88fd1c5f4f4eab2f9') for lang in ['fr', 'de']: self.assertTrue(subtitles.get(lang) is not None, f'Subtitles for \'{lang}\' not extracted') def _test_subtitles_format(self, fmt, md5_hash, lang='en'): self.DL.params['writesubtitles'] = True self.DL.params['subtitlesformat'] = fmt subtitles = self.getSubtitles() self.assertEqual(md5(subtitles[lang]), md5_hash) def test_youtube_subtitles_ttml_format(self): self._test_subtitles_format('ttml', 'c97ddf1217390906fa9fbd34901f3da2') def test_youtube_subtitles_vtt_format(self): self._test_subtitles_format('vtt', 'ae1bd34126571a77aabd4d276b28044d') def test_youtube_subtitles_json3_format(self): self._test_subtitles_format('json3', '688dd1ce0981683867e7fe6fde2a224b') def _test_automatic_captions(self, url, lang): self.url = url self.DL.params['writeautomaticsub'] = True self.DL.params['subtitleslangs'] = [lang] subtitles = self.getSubtitles() self.assertTrue(subtitles[lang] is not None) def test_youtube_automatic_captions(self): # Available automatic captions for 8YoUxe5ncPo: # Language formats (all in vtt, ttml, srv3, srv2, srv1, json3) # gu, zh-Hans, zh-Hant, gd, ga, gl, lb, la, lo, tt, tr, # lv, lt, tk, th, tg, te, fil, haw, yi, ceb, yo, de, da, # el, eo, en, eu, et, es, ru, rw, ro, bn, be, bg, uk, jv, # bs, ja, or, xh, co, ca, cy, cs, ps, pt, pa, vi, pl, hy, # hr, ht, hu, hmn, hi, ha, mg, uz, ml, mn, mi, mk, ur, # mt, ms, mr, ug, ta, my, af, sw, is, am, # *it*, iw, sv, ar, # su, zu, az, id, ig, nl, no, ne, ny, fr, ku, fy, fa, fi, # ka, kk, sr, sq, ko, kn, km, st, sk, si, so, sn, sm, sl, # ky, sd # ... self._test_automatic_captions('8YoUxe5ncPo', 'it') @unittest.skip('Video unavailable') def test_youtube_translated_subtitles(self): # This video has a subtitles track, which can be translated (#4555) self._test_automatic_captions('Ky9eprVWzlI', 'it') def test_youtube_nosubtitles(self): self.DL.expect_warning('video doesn\'t have subtitles') # Available automatic captions for 8YoUxe5ncPo: # ... # 8YoUxe5ncPo has no subtitles self.url = '8YoUxe5ncPo' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertFalse(subtitles) @is_download_test class TestDailymotionSubtitles(BaseTestSubtitles): url = 'http://www.dailymotion.com/video/xczg00' IE = DailymotionIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertTrue(len(subtitles.keys()) >= 6) self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f') self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792') for lang in ['es', 'fr', 'de']: self.assertTrue(subtitles.get(lang) is not None, f'Subtitles for \'{lang}\' not extracted') def test_nosubtitles(self): self.DL.expect_warning('video doesn\'t have subtitles') self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertFalse(subtitles) @is_download_test @unittest.skip('IE broken') class TestTedSubtitles(BaseTestSubtitles): url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html' IE = TedTalkIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertTrue(len(subtitles.keys()) >= 28) self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14') self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5') for lang in ['es', 'fr', 'de']: self.assertTrue(subtitles.get(lang) is not None, f'Subtitles for \'{lang}\' not extracted') @is_download_test class TestVimeoSubtitles(BaseTestSubtitles): url = 'http://vimeo.com/76979871' IE = VimeoIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), {'de', 'en', 'es', 'fr'}) self.assertEqual(md5(subtitles['en']), '386cbc9320b94e25cb364b97935e5dd1') self.assertEqual(md5(subtitles['fr']), 'c9b69eef35bc6641c0d4da8a04f9dfac') def test_nosubtitles(self): self.DL.expect_warning('video doesn\'t have subtitles') self.url = 'http://vimeo.com/68093876' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertFalse(subtitles) @is_download_test @unittest.skip('IE broken') class TestWallaSubtitles(BaseTestSubtitles): url = 'http://vod.walla.co.il/movie/2705958/the-yes-men' IE = WallaIE def test_allsubtitles(self): self.DL.expect_warning('Automatic Captions not supported by this server') self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), {'heb'}) self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920') def test_nosubtitles(self): self.DL.expect_warning('video doesn\'t have subtitles') self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertFalse(subtitles) @is_download_test @unittest.skip('IE broken') class TestCeskaTelevizeSubtitles(BaseTestSubtitles): url = 'http://www.ceskatelevize.cz/ivysilani/10600540290-u6-uzasny-svet-techniky' IE = CeskaTelevizeIE def test_allsubtitles(self): self.DL.expect_warning('Automatic Captions not supported by this server') self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), {'cs'}) self.assertTrue(len(subtitles['cs']) > 20000) def test_nosubtitles(self): self.DL.expect_warning('video doesn\'t have subtitles') self.url = 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertFalse(subtitles) @is_download_test @unittest.skip('IE broken') class TestLyndaSubtitles(BaseTestSubtitles): url = 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html' IE = LyndaIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), {'en'}) self.assertEqual(md5(subtitles['en']), '09bbe67222259bed60deaa26997d73a7') @is_download_test @unittest.skip('IE broken') class TestNPOSubtitles(BaseTestSubtitles): url = 'http://www.npo.nl/nos-journaal/28-08-2014/POW_00722860' IE = NPOIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), {'nl'}) self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4') @is_download_test class TestNRKSubtitles(BaseTestSubtitles): url = 'http://tv.nrk.no/serie/ikke-gjoer-dette-hjemme/DMPV73000411/sesong-2/episode-1' IE = NRKTVIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), {'nb-ttv'}) self.assertEqual(md5(subtitles['nb-ttv']), '67e06ff02d0deaf975e68f6cb8f6a149') @is_download_test class TestRaiPlaySubtitles(BaseTestSubtitles): IE = RaiPlayIE def test_subtitles_key(self): self.url = 'http://www.raiplay.it/video/2014/04/Report-del-07042014-cb27157f-9dd0-4aee-b788-b1f67643a391.html' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), {'it'}) self.assertEqual(md5(subtitles['it']), 'b1d90a98755126b61e667567a1f6680a') def test_subtitles_array_key(self): self.url = 'https://www.raiplay.it/video/2020/12/Report---04-01-2021-2e90f1de-8eee-4de4-ac0e-78d21db5b600.html' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), {'it'}) self.assertEqual(md5(subtitles['it']), '4b3264186fbb103508abe5311cfcb9cd') @is_download_test class TestThePlatformSubtitles(BaseTestSubtitles): # from http://www.3playmedia.com/services-features/tools/integrations/theplatform/ # (see http://theplatform.com/about/partners/type/subtitles-closed-captioning/) url = 'theplatform:JFUjUE1_ehvq' IE = ThePlatformIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), {'en'}) self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b') @is_download_test @unittest.skip('IE broken') class TestThePlatformFeedSubtitles(BaseTestSubtitles): url = 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207' IE = ThePlatformFeedIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), {'en'}) self.assertEqual(md5(subtitles['en']), '48649a22e82b2da21c9a67a395eedade') @is_download_test class TestRtveSubtitles(BaseTestSubtitles): url = 'http://www.rtve.es/alacarta/videos/los-misterios-de-laura/misterios-laura-capitulo-32-misterio-del-numero-17-2-parte/2428621/' IE = RTVEALaCartaIE def test_allsubtitles(self): print('Skipping, only available from Spain') return self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), {'es'}) self.assertEqual(md5(subtitles['es']), '69e70cae2d40574fb7316f31d6eb7fca') @is_download_test class TestDemocracynowSubtitles(BaseTestSubtitles): url = 'http://www.democracynow.org/shows/2015/7/3' IE = DemocracynowIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), {'en'}) self.assertEqual(md5(subtitles['en']), 'a3cc4c0b5eadd74d9974f1c1f5101045') def test_subtitles_in_page(self): self.url = 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree' self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), {'en'}) self.assertEqual(md5(subtitles['en']), 'a3cc4c0b5eadd74d9974f1c1f5101045') @is_download_test class TestPBSSubtitles(BaseTestSubtitles): url = 'https://www.pbs.org/video/how-fantasy-reflects-our-world-picecq/' IE = PBSIE def test_allsubtitles(self): self.DL.params['writesubtitles'] = True self.DL.params['allsubtitles'] = True subtitles = self.getSubtitles() self.assertEqual(set(subtitles.keys()), {'en'}) def test_subtitles_dfxp_format(self): self.DL.params['writesubtitles'] = True self.DL.params['subtitlesformat'] = 'dfxp' subtitles = self.getSubtitles() self.assertIn(md5(subtitles['en']), ['643b034254cdc3768ff1e750b6b5873b']) def test_subtitles_vtt_format(self): self.DL.params['writesubtitles'] = True self.DL.params['subtitlesformat'] = 'vtt' subtitles = self.getSubtitles() self.assertIn( md5(subtitles['en']), ['937a05711555b165d4c55a9667017045', 'f49ea998d6824d94959c8152a368ff73']) def test_subtitles_srt_format(self): self.DL.params['writesubtitles'] = True self.DL.params['subtitlesformat'] = 'srt' subtitles = self.getSubtitles() self.assertIn(md5(subtitles['en']), ['2082c21b43759d9bf172931b2f2ca371']) def test_subtitles_sami_format(self): self.DL.params['writesubtitles'] = True self.DL.params['subtitlesformat'] = 'sami' subtitles = self.getSubtitles() self.assertIn(md5(subtitles['en']), ['4256b16ac7da6a6780fafd04294e85cd']) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_cookies.py
test/test_cookies.py
import datetime as dt import unittest from yt_dlp import cookies from yt_dlp.cookies import ( LenientSimpleCookie, LinuxChromeCookieDecryptor, MacChromeCookieDecryptor, WindowsChromeCookieDecryptor, _get_linux_desktop_environment, _LinuxDesktopEnvironment, parse_safari_cookies, pbkdf2_sha1, ) class Logger: def debug(self, message, *args, **kwargs): print(f'[verbose] {message}') def info(self, message, *args, **kwargs): print(message) def warning(self, message, *args, **kwargs): self.error(message) def error(self, message, *args, **kwargs): raise Exception(message) class MonkeyPatch: def __init__(self, module, temporary_values): self._module = module self._temporary_values = temporary_values self._backup_values = {} def __enter__(self): for name, temp_value in self._temporary_values.items(): self._backup_values[name] = getattr(self._module, name) setattr(self._module, name, temp_value) def __exit__(self, exc_type, exc_val, exc_tb): for name, backup_value in self._backup_values.items(): setattr(self._module, name, backup_value) class TestCookies(unittest.TestCase): def test_get_desktop_environment(self): """ based on https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util_unittest.cc """ test_cases = [ ({}, _LinuxDesktopEnvironment.OTHER), ({'DESKTOP_SESSION': 'my_custom_de'}, _LinuxDesktopEnvironment.OTHER), ({'XDG_CURRENT_DESKTOP': 'my_custom_de'}, _LinuxDesktopEnvironment.OTHER), ({'DESKTOP_SESSION': 'gnome'}, _LinuxDesktopEnvironment.GNOME), ({'DESKTOP_SESSION': 'mate'}, _LinuxDesktopEnvironment.GNOME), ({'DESKTOP_SESSION': 'kde4'}, _LinuxDesktopEnvironment.KDE4), ({'DESKTOP_SESSION': 'kde'}, _LinuxDesktopEnvironment.KDE3), ({'DESKTOP_SESSION': 'xfce'}, _LinuxDesktopEnvironment.XFCE), ({'XDG_CURRENT_DESKTOP': 'my_custom_de', 'DESKTOP_SESSION': 'gnome'}, _LinuxDesktopEnvironment.GNOME), ({'XDG_CURRENT_DESKTOP': 'my_custom_de', 'DESKTOP_SESSION': 'mate'}, _LinuxDesktopEnvironment.GNOME), ({'XDG_CURRENT_DESKTOP': 'my_custom_de', 'DESKTOP_SESSION': 'kde4'}, _LinuxDesktopEnvironment.KDE4), ({'XDG_CURRENT_DESKTOP': 'my_custom_de', 'DESKTOP_SESSION': 'kde'}, _LinuxDesktopEnvironment.KDE3), ({'XDG_CURRENT_DESKTOP': 'my_custom_de', 'DESKTOP_SESSION': 'xfce'}, _LinuxDesktopEnvironment.XFCE), ({'XDG_CURRENT_DESKTOP': 'my_custom_de', 'DESKTOP_SESSION': 'my_custom_de', 'GNOME_DESKTOP_SESSION_ID': 1}, _LinuxDesktopEnvironment.GNOME), ({'GNOME_DESKTOP_SESSION_ID': 1}, _LinuxDesktopEnvironment.GNOME), ({'KDE_FULL_SESSION': 1}, _LinuxDesktopEnvironment.KDE3), ({'KDE_FULL_SESSION': 1, 'DESKTOP_SESSION': 'kde4'}, _LinuxDesktopEnvironment.KDE4), ({'XDG_CURRENT_DESKTOP': 'X-Cinnamon'}, _LinuxDesktopEnvironment.CINNAMON), ({'XDG_CURRENT_DESKTOP': 'Deepin'}, _LinuxDesktopEnvironment.DEEPIN), ({'XDG_CURRENT_DESKTOP': 'GNOME'}, _LinuxDesktopEnvironment.GNOME), ({'XDG_CURRENT_DESKTOP': 'GNOME:GNOME-Classic'}, _LinuxDesktopEnvironment.GNOME), ({'XDG_CURRENT_DESKTOP': 'GNOME : GNOME-Classic'}, _LinuxDesktopEnvironment.GNOME), ({'XDG_CURRENT_DESKTOP': 'ubuntu:GNOME'}, _LinuxDesktopEnvironment.GNOME), ({'XDG_CURRENT_DESKTOP': 'Unity', 'DESKTOP_SESSION': 'gnome-fallback'}, _LinuxDesktopEnvironment.GNOME), ({'XDG_CURRENT_DESKTOP': 'KDE', 'KDE_SESSION_VERSION': '5'}, _LinuxDesktopEnvironment.KDE5), ({'XDG_CURRENT_DESKTOP': 'KDE', 'KDE_SESSION_VERSION': '6'}, _LinuxDesktopEnvironment.KDE6), ({'XDG_CURRENT_DESKTOP': 'KDE'}, _LinuxDesktopEnvironment.KDE4), ({'XDG_CURRENT_DESKTOP': 'Pantheon'}, _LinuxDesktopEnvironment.PANTHEON), ({'XDG_CURRENT_DESKTOP': 'UKUI'}, _LinuxDesktopEnvironment.UKUI), ({'XDG_CURRENT_DESKTOP': 'Unity'}, _LinuxDesktopEnvironment.UNITY), ({'XDG_CURRENT_DESKTOP': 'Unity:Unity7'}, _LinuxDesktopEnvironment.UNITY), ({'XDG_CURRENT_DESKTOP': 'Unity:Unity8'}, _LinuxDesktopEnvironment.UNITY), ] for env, expected_desktop_environment in test_cases: self.assertEqual(_get_linux_desktop_environment(env, Logger()), expected_desktop_environment) def test_chrome_cookie_decryptor_linux_derive_key(self): key = LinuxChromeCookieDecryptor.derive_key(b'abc') self.assertEqual(key, b'7\xa1\xec\xd4m\xfcA\xc7\xb19Z\xd0\x19\xdcM\x17') def test_chrome_cookie_decryptor_mac_derive_key(self): key = MacChromeCookieDecryptor.derive_key(b'abc') self.assertEqual(key, b'Y\xe2\xc0\xd0P\xf6\xf4\xe1l\xc1\x8cQ\xcb|\xcdY') def test_chrome_cookie_decryptor_linux_v10(self): with MonkeyPatch(cookies, {'_get_linux_keyring_password': lambda *args, **kwargs: b''}): encrypted_value = b'v10\xccW%\xcd\xe6\xe6\x9fM" \xa7\xb0\xca\xe4\x07\xd6' value = 'USD' decryptor = LinuxChromeCookieDecryptor('Chrome', Logger()) self.assertEqual(decryptor.decrypt(encrypted_value), value) def test_chrome_cookie_decryptor_linux_v11(self): with MonkeyPatch(cookies, {'_get_linux_keyring_password': lambda *args, **kwargs: b''}): encrypted_value = b'v11#\x81\x10>`w\x8f)\xc0\xb2\xc1\r\xf4\x1al\xdd\x93\xfd\xf8\xf8N\xf2\xa9\x83\xf1\xe9o\x0elVQd' value = 'tz=Europe.London' decryptor = LinuxChromeCookieDecryptor('Chrome', Logger()) self.assertEqual(decryptor.decrypt(encrypted_value), value) def test_chrome_cookie_decryptor_linux_v10_meta24(self): with MonkeyPatch(cookies, {'_get_linux_keyring_password': lambda *args, **kwargs: b''}): encrypted_value = b'v10\x1f\xe4\x0e[\x83\x0c\xcc*kPi \xce\x8d\x1d\xbb\x80\r\x11\t\xbb\x9e^Hy\x94\xf4\x963\x9f\x82\xba\xfe\xa1\xed\xb9\xf1)\x00710\x92\xc8/<\x96B' value = 'DE' decryptor = LinuxChromeCookieDecryptor('Chrome', Logger(), meta_version=24) self.assertEqual(decryptor.decrypt(encrypted_value), value) def test_chrome_cookie_decryptor_windows_v10(self): with MonkeyPatch(cookies, { '_get_windows_v10_key': lambda *args, **kwargs: b'Y\xef\xad\xad\xeerp\xf0Y\xe6\x9b\x12\xc2<z\x16]\n\xbb\xb8\xcb\xd7\x9bA\xc3\x14e\x99{\xd6\xf4&', }): encrypted_value = b'v10T\xb8\xf3\xb8\x01\xa7TtcV\xfc\x88\xb8\xb8\xef\x05\xb5\xfd\x18\xc90\x009\xab\xb1\x893\x85)\x87\xe1\xa9-\xa3\xad=' value = '32101439' decryptor = WindowsChromeCookieDecryptor('', Logger()) self.assertEqual(decryptor.decrypt(encrypted_value), value) def test_chrome_cookie_decryptor_windows_v10_meta24(self): with MonkeyPatch(cookies, { '_get_windows_v10_key': lambda *args, **kwargs: b'\xea\x8b\x02\xc3\xc6\xc5\x99\xc3\xa3[ j\xfa\xf6\xfcU\xac\x13u\xdc\x0c\x0e\xf1\x03\x90\xb6\xdf\xbb\x8fL\xb1\xb2', }): encrypted_value = b'v10dN\xe1\xacy\x84^\xe1I\xact\x03r\xfb\xe2\xce{^\x0e<(\xb0y\xeb\x01\xfb@"\x9e\x8c\xa53~\xdb*\x8f\xac\x8b\xe3\xfd3\x06\xe5\x93\x19OyOG\xb2\xfb\x1d$\xc0\xda\x13j\x9e\xfe\xc5\xa3\xa8\xfe\xd9' value = '1234' decryptor = WindowsChromeCookieDecryptor('', Logger(), meta_version=24) self.assertEqual(decryptor.decrypt(encrypted_value), value) def test_chrome_cookie_decryptor_mac_v10(self): with MonkeyPatch(cookies, {'_get_mac_keyring_password': lambda *args, **kwargs: b'6eIDUdtKAacvlHwBVwvg/Q=='}): encrypted_value = b'v10\xb3\xbe\xad\xa1[\x9fC\xa1\x98\xe0\x9a\x01\xd9\xcf\xbfc' value = '2021-06-01-22' decryptor = MacChromeCookieDecryptor('', Logger()) self.assertEqual(decryptor.decrypt(encrypted_value), value) def test_safari_cookie_parsing(self): cookies = ( b'cook\x00\x00\x00\x01\x00\x00\x00i\x00\x00\x01\x00\x01\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00Y' b'\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x008\x00\x00\x00B\x00\x00\x00F\x00\x00\x00H' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x03\xa5>\xc3A\x00\x00\x80\xc3\x07:\xc3A' b'localhost\x00foo\x00/\x00test%20%3Bcookie\x00\x00\x00\x054\x07\x17 \x05\x00\x00\x00Kbplist00\xd1\x01' b'\x02_\x10\x18NSHTTPCookieAcceptPolicy\x10\x02\x08\x0b&\x00\x00\x00\x00\x00\x00\x01\x01\x00\x00\x00' b'\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(') jar = parse_safari_cookies(cookies) self.assertEqual(len(jar), 1) cookie = next(iter(jar)) self.assertEqual(cookie.domain, 'localhost') self.assertEqual(cookie.port, None) self.assertEqual(cookie.path, '/') self.assertEqual(cookie.name, 'foo') self.assertEqual(cookie.value, 'test%20%3Bcookie') self.assertFalse(cookie.secure) expected_expiration = dt.datetime(2021, 6, 18, 21, 39, 19, tzinfo=dt.timezone.utc) self.assertEqual(cookie.expires, int(expected_expiration.timestamp())) def test_pbkdf2_sha1(self): key = pbkdf2_sha1(b'peanuts', b' ' * 16, 1, 16) self.assertEqual(key, b'g\xe1\x8e\x0fQ\x1c\x9b\xf3\xc9`!\xaa\x90\xd9\xd34') class TestLenientSimpleCookie(unittest.TestCase): def _run_tests(self, *cases): for message, raw_cookie, expected in cases: cookie = LenientSimpleCookie(raw_cookie) with self.subTest(message, expected=expected): self.assertEqual(cookie.keys(), expected.keys(), message) for key, expected_value in expected.items(): morsel = cookie[key] if isinstance(expected_value, tuple): expected_value, expected_attributes = expected_value else: expected_attributes = {} attributes = { key: value for key, value in dict(morsel).items() if value != '' } self.assertEqual(attributes, expected_attributes, message) self.assertEqual(morsel.value, expected_value, message) def test_parsing(self): self._run_tests( # Copied from https://github.com/python/cpython/blob/v3.10.7/Lib/test/test_http_cookies.py ( 'Test basic cookie', 'chips=ahoy; vienna=finger', {'chips': 'ahoy', 'vienna': 'finger'}, ), ( 'Test quoted cookie', 'keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"', {'keebler': 'E=mc2; L="Loves"; fudge=\012;'}, ), ( "Allow '=' in an unquoted value", 'keebler=E=mc2', {'keebler': 'E=mc2'}, ), ( "Allow cookies with ':' in their name", 'key:term=value:term', {'key:term': 'value:term'}, ), ( "Allow '[' and ']' in cookie values", 'a=b; c=[; d=r; f=h', {'a': 'b', 'c': '[', 'd': 'r', 'f': 'h'}, ), ( 'Test basic cookie attributes', 'Customer="WILE_E_COYOTE"; Version=1; Path=/acme', {'Customer': ('WILE_E_COYOTE', {'version': '1', 'path': '/acme'})}, ), ( 'Test flag only cookie attributes', 'Customer="WILE_E_COYOTE"; HttpOnly; Secure', {'Customer': ('WILE_E_COYOTE', {'httponly': True, 'secure': True})}, ), ( 'Test flag only attribute with values', 'eggs=scrambled; httponly=foo; secure=bar; Path=/bacon', {'eggs': ('scrambled', {'httponly': 'foo', 'secure': 'bar', 'path': '/bacon'})}, ), ( "Test special case for 'expires' attribute, 4 digit year", 'Customer="W"; expires=Wed, 01 Jan 2010 00:00:00 GMT', {'Customer': ('W', {'expires': 'Wed, 01 Jan 2010 00:00:00 GMT'})}, ), ( "Test special case for 'expires' attribute, 2 digit year", 'Customer="W"; expires=Wed, 01 Jan 98 00:00:00 GMT', {'Customer': ('W', {'expires': 'Wed, 01 Jan 98 00:00:00 GMT'})}, ), ( 'Test extra spaces in keys and values', 'eggs = scrambled ; secure ; path = bar ; foo=foo ', {'eggs': ('scrambled', {'secure': True, 'path': 'bar'}), 'foo': 'foo'}, ), ( 'Test quoted attributes', 'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"', {'Customer': ('WILE_E_COYOTE', {'version': '1', 'path': '/acme'})}, ), # Our own tests that CPython passes ( "Allow ';' in quoted value", 'chips="a;hoy"; vienna=finger', {'chips': 'a;hoy', 'vienna': 'finger'}, ), ( 'Keep only the last set value', 'a=c; a=b', {'a': 'b'}, ), ) def test_lenient_parsing(self): self._run_tests( ( 'Ignore and try to skip invalid cookies', 'chips={"ahoy;": 1}; vienna="finger;"', {'vienna': 'finger;'}, ), ( 'Ignore cookies without a name', 'a=b; unnamed; c=d', {'a': 'b', 'c': 'd'}, ), ( "Ignore '\"' cookie without name", 'a=b; "; c=d', {'a': 'b', 'c': 'd'}, ), ( 'Skip all space separated values', 'x a=b c=d x; e=f', {'a': 'b', 'c': 'd', 'e': 'f'}, ), ( 'Skip all space separated values', 'x a=b; data={"complex": "json", "with": "key=value"}; x c=d x', {'a': 'b', 'c': 'd'}, ), ( 'Expect quote mending', 'a=b; invalid="; c=d', {'a': 'b', 'c': 'd'}, ), ( 'Reset morsel after invalid to not capture attributes', 'a=b; invalid; Version=1; c=d', {'a': 'b', 'c': 'd'}, ), ( 'Reset morsel after invalid to not capture attributes', 'a=b; $invalid; $Version=1; c=d', {'a': 'b', 'c': 'd'}, ), ( 'Continue after non-flag attribute without value', 'a=b; path; Version=1; c=d', {'a': 'b', 'c': 'd'}, ), ( 'Allow cookie attributes with `$` prefix', 'Customer="WILE_E_COYOTE"; $Version=1; $Secure; $Path=/acme', {'Customer': ('WILE_E_COYOTE', {'version': '1', 'secure': True, 'path': '/acme'})}, ), ( 'Invalid Morsel keys should not result in an error', 'Key=Value; [Invalid]=Value; Another=Value', {'Key': 'Value', 'Another': 'Value'}, ), )
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_cache.py
test/test_cache.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import shutil from test.helper import FakeYDL from yt_dlp.cache import Cache def _is_empty(d): return not bool(os.listdir(d)) def _mkdir(d): if not os.path.exists(d): os.mkdir(d) class TestCache(unittest.TestCase): def setUp(self): TEST_DIR = os.path.dirname(os.path.abspath(__file__)) TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata') _mkdir(TESTDATA_DIR) self.test_dir = os.path.join(TESTDATA_DIR, 'cache_test') self.tearDown() def tearDown(self): if os.path.exists(self.test_dir): shutil.rmtree(self.test_dir) def test_cache(self): ydl = FakeYDL({ 'cachedir': self.test_dir, }) c = Cache(ydl) obj = {'x': 1, 'y': ['ä', '\\a', True]} self.assertEqual(c.load('test_cache', 'k.'), None) c.store('test_cache', 'k.', obj) self.assertEqual(c.load('test_cache', 'k2'), None) self.assertFalse(_is_empty(self.test_dir)) self.assertEqual(c.load('test_cache', 'k.'), obj) self.assertEqual(c.load('test_cache', 'y'), None) self.assertEqual(c.load('test_cache2', 'k.'), None) c.remove() self.assertFalse(os.path.exists(self.test_dir)) self.assertEqual(c.load('test_cache', 'k.'), None) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_aes.py
test/test_aes.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import base64 from yt_dlp.aes import ( aes_cbc_decrypt, aes_cbc_decrypt_bytes, aes_cbc_encrypt, aes_ctr_decrypt, aes_ctr_encrypt, aes_decrypt, aes_decrypt_text, aes_ecb_decrypt, aes_ecb_encrypt, aes_encrypt, aes_gcm_decrypt_and_verify, aes_gcm_decrypt_and_verify_bytes, key_expansion, pad_block, ) from yt_dlp.dependencies import Cryptodome # the encrypted data can be generate with 'devscripts/generate_aes_testdata.py' class TestAES(unittest.TestCase): def setUp(self): self.key = self.iv = [0x20, 0x15] + 14 * [0] self.secret_msg = b'Secret message goes here' def test_encrypt(self): msg = b'message' key = list(range(16)) encrypted = aes_encrypt(list(msg), key) decrypted = bytes(aes_decrypt(encrypted, key)) self.assertEqual(decrypted, msg) def test_cbc_decrypt(self): data = b'\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6\x27\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd' decrypted = bytes(aes_cbc_decrypt(list(data), self.key, self.iv)) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) if Cryptodome.AES: decrypted = aes_cbc_decrypt_bytes(data, bytes(self.key), bytes(self.iv)) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) def test_cbc_encrypt(self): data = list(self.secret_msg) encrypted = bytes(aes_cbc_encrypt(data, self.key, self.iv)) self.assertEqual( encrypted, b'\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6\'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd') def test_ctr_decrypt(self): data = list(b'\x03\xc7\xdd\xd4\x8e\xb3\xbc\x1a*O\xdc1\x12+8Aio\xd1z\xb5#\xaf\x08') decrypted = bytes(aes_ctr_decrypt(data, self.key, self.iv)) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) def test_ctr_encrypt(self): data = list(self.secret_msg) encrypted = bytes(aes_ctr_encrypt(data, self.key, self.iv)) self.assertEqual( encrypted, b'\x03\xc7\xdd\xd4\x8e\xb3\xbc\x1a*O\xdc1\x12+8Aio\xd1z\xb5#\xaf\x08') def test_gcm_decrypt(self): data = b'\x159Y\xcf5eud\x90\x9c\x85&]\x14\x1d\x0f.\x08\xb4T\xe4/\x17\xbd' authentication_tag = b'\xe8&I\x80rI\x07\x9d}YWuU@:e' decrypted = bytes(aes_gcm_decrypt_and_verify( list(data), self.key, list(authentication_tag), self.iv[:12])) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) if Cryptodome.AES: decrypted = aes_gcm_decrypt_and_verify_bytes( data, bytes(self.key), authentication_tag, bytes(self.iv[:12])) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) def test_gcm_aligned_decrypt(self): data = b'\x159Y\xcf5eud\x90\x9c\x85&]\x14\x1d\x0f' authentication_tag = b'\x08\xb1\x9d!&\x98\xd0\xeaRq\x90\xe6;\xb5]\xd8' decrypted = bytes(aes_gcm_decrypt_and_verify( list(data), self.key, list(authentication_tag), self.iv[:12])) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg[:16]) if Cryptodome.AES: decrypted = aes_gcm_decrypt_and_verify_bytes( data, bytes(self.key), authentication_tag, bytes(self.iv[:12])) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg[:16]) def test_decrypt_text(self): password = bytes(self.key).decode() encrypted = base64.b64encode( bytes(self.iv[:8]) + b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae', ).decode() decrypted = (aes_decrypt_text(encrypted, password, 16)) self.assertEqual(decrypted, self.secret_msg) password = bytes(self.key).decode() encrypted = base64.b64encode( bytes(self.iv[:8]) + b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83', ).decode() decrypted = (aes_decrypt_text(encrypted, password, 32)) self.assertEqual(decrypted, self.secret_msg) def test_ecb_encrypt(self): data = list(self.secret_msg) encrypted = bytes(aes_ecb_encrypt(data, self.key)) self.assertEqual( encrypted, b'\xaa\x86]\x81\x97>\x02\x92\x9d\x1bR[[L/u\xd3&\xd1(h\xde{\x81\x94\xba\x02\xae\xbd\xa6\xd0:') def test_ecb_decrypt(self): data = list(b'\xaa\x86]\x81\x97>\x02\x92\x9d\x1bR[[L/u\xd3&\xd1(h\xde{\x81\x94\xba\x02\xae\xbd\xa6\xd0:') decrypted = bytes(aes_ecb_decrypt(data, self.key, self.iv)) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) def test_key_expansion(self): key = '4f6bdaa39e2f8cb07f5e722d9edef314' self.assertEqual(key_expansion(list(bytearray.fromhex(key))), [ 0x4F, 0x6B, 0xDA, 0xA3, 0x9E, 0x2F, 0x8C, 0xB0, 0x7F, 0x5E, 0x72, 0x2D, 0x9E, 0xDE, 0xF3, 0x14, 0x53, 0x66, 0x20, 0xA8, 0xCD, 0x49, 0xAC, 0x18, 0xB2, 0x17, 0xDE, 0x35, 0x2C, 0xC9, 0x2D, 0x21, 0x8C, 0xBE, 0xDD, 0xD9, 0x41, 0xF7, 0x71, 0xC1, 0xF3, 0xE0, 0xAF, 0xF4, 0xDF, 0x29, 0x82, 0xD5, 0x2D, 0xAD, 0xDE, 0x47, 0x6C, 0x5A, 0xAF, 0x86, 0x9F, 0xBA, 0x00, 0x72, 0x40, 0x93, 0x82, 0xA7, 0xF9, 0xBE, 0x82, 0x4E, 0x95, 0xE4, 0x2D, 0xC8, 0x0A, 0x5E, 0x2D, 0xBA, 0x4A, 0xCD, 0xAF, 0x1D, 0x54, 0xC7, 0x26, 0x98, 0xC1, 0x23, 0x0B, 0x50, 0xCB, 0x7D, 0x26, 0xEA, 0x81, 0xB0, 0x89, 0xF7, 0x93, 0x60, 0x4E, 0x94, 0x52, 0x43, 0x45, 0xC4, 0x99, 0x3E, 0x63, 0x2E, 0x18, 0x8E, 0xEA, 0xD9, 0xCA, 0xE7, 0x7B, 0x39, 0x98, 0xA4, 0x3E, 0xFD, 0x01, 0x9A, 0x5D, 0xD3, 0x19, 0x14, 0xB7, 0x0A, 0xB0, 0x4E, 0x1C, 0xED, 0x28, 0xEA, 0x22, 0x10, 0x29, 0x70, 0x7F, 0xC3, 0x30, 0x64, 0xC8, 0xC9, 0xE8, 0xA6, 0xC1, 0xE9, 0xC0, 0x4C, 0xE3, 0xF9, 0xE9, 0x3C, 0x9C, 0x3A, 0xD9, 0x58, 0x54, 0xF3, 0xB4, 0x86, 0xCC, 0xDC, 0x74, 0xCA, 0x2F, 0x25, 0x9D, 0xF6, 0xB3, 0x1F, 0x44, 0xAE, 0xE7, 0xEC]) def test_pad_block(self): block = [0x21, 0xA0, 0x43, 0xFF] self.assertEqual(pad_block(block, 'pkcs7'), [*block, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C]) self.assertEqual(pad_block(block, 'iso7816'), [*block, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]) self.assertEqual(pad_block(block, 'whitespace'), [*block, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20]) self.assertEqual(pad_block(block, 'zero'), [*block, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]) block = list(range(16)) for mode in ('pkcs7', 'iso7816', 'whitespace', 'zero'): self.assertEqual(pad_block(block, mode), block, mode) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/conftest.py
test/conftest.py
import inspect import pytest from yt_dlp.networking import RequestHandler from yt_dlp.networking.common import _REQUEST_HANDLERS from yt_dlp.utils._utils import _YDLLogger as FakeLogger @pytest.fixture def handler(request): RH_KEY = getattr(request, 'param', None) if not RH_KEY: return if inspect.isclass(RH_KEY) and issubclass(RH_KEY, RequestHandler): handler = RH_KEY elif RH_KEY in _REQUEST_HANDLERS: handler = _REQUEST_HANDLERS[RH_KEY] else: pytest.skip(f'{RH_KEY} request handler is not available') class HandlerWrapper(handler): RH_KEY = handler.RH_KEY def __init__(self, **kwargs): super().__init__(logger=FakeLogger, **kwargs) return HandlerWrapper @pytest.fixture(autouse=True) def skip_handler(request, handler): """usage: pytest.mark.skip_handler('my_handler', 'reason')""" for marker in request.node.iter_markers('skip_handler'): if marker.args[0] == handler.RH_KEY: pytest.skip(marker.args[1] if len(marker.args) > 1 else '') @pytest.fixture(autouse=True) def skip_handler_if(request, handler): """usage: pytest.mark.skip_handler_if('my_handler', lambda request: True, 'reason')""" for marker in request.node.iter_markers('skip_handler_if'): if marker.args[0] == handler.RH_KEY and marker.args[1](request): pytest.skip(marker.args[2] if len(marker.args) > 2 else '') @pytest.fixture(autouse=True) def skip_handlers_if(request, handler): """usage: pytest.mark.skip_handlers_if(lambda request, handler: True, 'reason')""" for marker in request.node.iter_markers('skip_handlers_if'): if handler and marker.args[0](request, handler): pytest.skip(marker.args[1] if len(marker.args) > 1 else '') @pytest.fixture(autouse=True) def handler_flaky(request, handler): """Mark a certain handler as being flaky. This will skip the test if pytest does not get run using `--allow-flaky` usage: pytest.mark.handler_flaky('my_handler', os.name != 'nt', reason='reason') """ for marker in request.node.iter_markers(handler_flaky.__name__): if ( marker.args[0] == handler.RH_KEY and (not marker.args[1:] or any(marker.args[1:])) and request.config.getoption('disallow_flaky') ): reason = marker.kwargs.get('reason') pytest.skip(f'flaky: {reason}' if reason else 'flaky') def pytest_addoption(parser, pluginmanager): parser.addoption( '--disallow-flaky', action='store_true', help='disallow flaky tests from running.', ) def pytest_configure(config): config.addinivalue_line( 'markers', 'skip_handler(handler): skip test for the given handler', ) config.addinivalue_line( 'markers', 'skip_handler_if(handler): skip test for the given handler if condition is true', ) config.addinivalue_line( 'markers', 'skip_handlers_if(handler): skip test for handlers when the condition is true', ) config.addinivalue_line( 'markers', 'handler_flaky(handler): mark handler as flaky if condition is true', )
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_download.py
test/test_download.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import collections import hashlib import json from test.helper import ( assertGreaterEqual, assertLessEqual, expect_info_dict, expect_warnings, get_params, gettestcases, getwebpagetestcases, is_download_test, try_rm, ) import yt_dlp.YoutubeDL # isort: split from yt_dlp.extractor import get_info_extractor from yt_dlp.networking.exceptions import HTTPError, TransportError from yt_dlp.utils import ( DownloadError, ExtractorError, UnavailableVideoError, YoutubeDLError, format_bytes, join_nonempty, ) RETRIES = 3 class YoutubeDL(yt_dlp.YoutubeDL): def __init__(self, *args, **kwargs): self.to_stderr = self.to_screen self.processed_info_dicts = [] super().__init__(*args, **kwargs) def report_warning(self, message, *args, **kwargs): # Don't accept warnings during tests raise ExtractorError(message) def process_info(self, info_dict): self.processed_info_dicts.append(info_dict.copy()) return super().process_info(info_dict) def _file_md5(fn): with open(fn, 'rb') as f: return hashlib.md5(f.read()).hexdigest() normal_test_cases = gettestcases() webpage_test_cases = getwebpagetestcases() tests_counter = collections.defaultdict(collections.Counter) @is_download_test class TestDownload(unittest.TestCase): maxDiff = None COMPLETED_TESTS = {} def __str__(self): """Identify each test with the `add_ie` attribute, if available.""" cls, add_ie = type(self), getattr(self, self._testMethodName).add_ie return f'{self._testMethodName} ({cls.__module__}.{cls.__name__}){f" [{add_ie}]" if add_ie else ""}:' # Dynamically generate tests def generator(test_case, tname): def test_template(self): if self.COMPLETED_TESTS.get(tname): return self.COMPLETED_TESTS[tname] = True ie = yt_dlp.extractor.get_info_extractor(test_case['name'])() other_ies = [get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', [])] is_playlist = any(k.startswith('playlist') for k in test_case) test_cases = test_case.get( 'playlist', [] if is_playlist else [test_case]) def print_skipping(reason): print('Skipping {}: {}'.format(test_case['name'], reason)) self.skipTest(reason) if not ie.working(): print_skipping('IE marked as not _WORKING') for tc in test_cases: if tc.get('expected_exception'): continue info_dict = tc.get('info_dict', {}) params = tc.get('params', {}) if not info_dict.get('id'): raise Exception(f'Test {tname} definition incorrect - "id" key is not present') elif not info_dict.get('ext') and info_dict.get('_type', 'video') == 'video': if params.get('skip_download') and params.get('ignore_no_formats_error'): continue raise Exception(f'Test {tname} definition incorrect - "ext" key must be present to define the output file') if 'skip' in test_case: print_skipping(test_case['skip']) for other_ie in other_ies: if not other_ie.working(): print_skipping(f'test depends on {other_ie.ie_key()}IE, marked as not WORKING') params = get_params(test_case.get('params', {})) params['outtmpl'] = tname + '_' + params['outtmpl'] if is_playlist and 'playlist' not in test_case: params.setdefault('playlistend', max( test_case.get('playlist_mincount', -1), test_case.get('playlist_count', -2) + 1, test_case.get('playlist_maxcount', -2) + 1)) params.setdefault('skip_download', True) if 'playlist_duration_sum' not in test_case: params.setdefault('extract_flat', 'in_playlist') ydl = YoutubeDL(params, auto_init=False) ydl.add_default_info_extractors() finished_hook_called = set() def _hook(status): if status['status'] == 'finished': finished_hook_called.add(status['filename']) ydl.add_progress_hook(_hook) expect_warnings(ydl, test_case.get('expected_warnings', [])) def get_tc_filename(tc): return ydl.prepare_filename(dict(tc.get('info_dict', {}))) res_dict = None def match_exception(err): expected_exception = test_case.get('expected_exception') if not expected_exception: return False if err.__class__.__name__ == expected_exception: return True return any(exc.__class__.__name__ == expected_exception for exc in err.exc_info) def try_rm_tcs_files(tcs=None): if tcs is None: tcs = test_cases for tc in tcs: tc_filename = get_tc_filename(tc) try_rm(tc_filename) try_rm(tc_filename + '.part') try_rm(os.path.splitext(tc_filename)[0] + '.info.json') try_rm_tcs_files() try: test_url = test_case['url'] try_num = 1 while True: try: # We're not using .download here since that is just a shim # for outside error handling, and returns the exit code # instead of the result dict. res_dict = ydl.extract_info( test_url, force_generic_extractor=params.get('force_generic_extractor', False)) except (DownloadError, ExtractorError) as err: # Check if the exception is not a network related one if not isinstance(err.exc_info[1], (TransportError, UnavailableVideoError)) or (isinstance(err.exc_info[1], HTTPError) and err.exc_info[1].status == 503): if match_exception(err): return err.msg = f'{getattr(err, "msg", err)} ({tname})' raise if try_num == RETRIES: raise print(f'Retrying: {try_num} failed tries\n\n##########\n\n') try_num += 1 except YoutubeDLError as err: if match_exception(err): return raise else: break if is_playlist: self.assertTrue(res_dict['_type'] in ['playlist', 'multi_video']) self.assertTrue('entries' in res_dict) expect_info_dict(self, res_dict, test_case.get('info_dict', {})) num_entries = len(res_dict.get('entries', [])) if 'playlist_mincount' in test_case: mincount = test_case['playlist_mincount'] assertGreaterEqual( self, num_entries, mincount, f'Expected at least {mincount} entries in playlist {test_url}, but got only {num_entries}') if 'playlist_count' in test_case: count = test_case['playlist_count'] got = num_entries if num_entries <= count else 'more' self.assertEqual( num_entries, count, f'Expected exactly {count} entries in playlist {test_url}, but got {got}') if 'playlist_maxcount' in test_case: maxcount = test_case['playlist_maxcount'] assertLessEqual( self, num_entries, maxcount, f'Expected at most {maxcount} entries in playlist {test_url}, but got more') if 'playlist_duration_sum' in test_case: got_duration = sum(e['duration'] for e in res_dict['entries']) self.assertEqual( test_case['playlist_duration_sum'], got_duration) # Generalize both playlists and single videos to unified format for # simplicity if 'entries' not in res_dict: res_dict['entries'] = [res_dict] for tc_num, tc in enumerate(test_cases): tc_res_dict = res_dict['entries'][tc_num] # First, check test cases' data against extracted data alone expect_info_dict(self, tc_res_dict, tc.get('info_dict', {})) if tc_res_dict.get('_type', 'video') != 'video': continue # Now, check downloaded file consistency tc_filename = get_tc_filename(tc) if not test_case.get('params', {}).get('skip_download', False): self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename) self.assertTrue(tc_filename in finished_hook_called) expected_minsize = tc.get('file_minsize', 10000) if expected_minsize is not None: if params.get('test'): expected_minsize = max(expected_minsize, 10000) got_fsize = os.path.getsize(tc_filename) assertGreaterEqual( self, got_fsize, expected_minsize, f'Expected {tc_filename} to be at least {format_bytes(expected_minsize)}, ' f'but it\'s only {format_bytes(got_fsize)} ') if 'md5' in tc: md5_for_file = _file_md5(tc_filename) self.assertEqual(tc['md5'], md5_for_file) # Finally, check test cases' data again but this time against # extracted data from info JSON file written during processing info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json' self.assertTrue( os.path.exists(info_json_fn), f'Missing info file {info_json_fn}') with open(info_json_fn, encoding='utf-8') as infof: info_dict = json.load(infof) expect_info_dict(self, info_dict, tc.get('info_dict', {})) finally: try_rm_tcs_files() if is_playlist and res_dict is not None and res_dict.get('entries'): # Remove all other files that may have been extracted if the # extractor returns full results even with extract_flat res_tcs = [{'info_dict': e} for e in res_dict['entries']] try_rm_tcs_files(res_tcs) ydl.close() return test_template # And add them to TestDownload def inject_tests(test_cases, label=''): for test_case in test_cases: name = test_case['name'] tname = join_nonempty('test', name, label, tests_counter[name][label], delim='_') tests_counter[name][label] += 1 test_method = generator(test_case, tname) test_method.__name__ = tname test_method.add_ie = ','.join(test_case.get('add_ie', [])) setattr(TestDownload, test_method.__name__, test_method) inject_tests(normal_test_cases) # TODO: disable redirection to the IE to ensure we are actually testing the webpage extraction inject_tests(webpage_test_cases, 'webpage') def batch_generator(name): def test_template(self): for label, num_tests in tests_counter[name].items(): for i in range(num_tests): test_name = join_nonempty('test', name, label, i, delim='_') try: getattr(self, test_name)() except unittest.SkipTest: print(f'Skipped {test_name}') return test_template for name in tests_counter: test_method = batch_generator(name) test_method.__name__ = f'test_{name}_all' test_method.add_ie = '' setattr(TestDownload, test_method.__name__, test_method) del test_method if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_socks.py
test/test_socks.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import threading import unittest import pytest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import abc import contextlib import enum import functools import http.server import json import random import socket import struct import time from socketserver import ( BaseRequestHandler, StreamRequestHandler, ThreadingTCPServer, ) from test.helper import http_server_port, verify_address_availability from yt_dlp.networking import Request from yt_dlp.networking.exceptions import ProxyError, TransportError from yt_dlp.socks import ( SOCKS4_REPLY_VERSION, SOCKS4_VERSION, SOCKS5_USER_AUTH_SUCCESS, SOCKS5_USER_AUTH_VERSION, SOCKS5_VERSION, Socks5AddressType, Socks5Auth, ) SOCKS5_USER_AUTH_FAILURE = 0x1 class Socks4CD(enum.IntEnum): REQUEST_GRANTED = 90 REQUEST_REJECTED_OR_FAILED = 91 REQUEST_REJECTED_CANNOT_CONNECT_TO_IDENTD = 92 REQUEST_REJECTED_DIFFERENT_USERID = 93 class Socks5Reply(enum.IntEnum): SUCCEEDED = 0x0 GENERAL_FAILURE = 0x1 CONNECTION_NOT_ALLOWED = 0x2 NETWORK_UNREACHABLE = 0x3 HOST_UNREACHABLE = 0x4 CONNECTION_REFUSED = 0x5 TTL_EXPIRED = 0x6 COMMAND_NOT_SUPPORTED = 0x7 ADDRESS_TYPE_NOT_SUPPORTED = 0x8 class SocksTestRequestHandler(BaseRequestHandler): def __init__(self, *args, socks_info=None, **kwargs): self.socks_info = socks_info super().__init__(*args, **kwargs) class SocksProxyHandler(BaseRequestHandler): def __init__(self, request_handler_class, socks_server_kwargs, *args, **kwargs): self.socks_kwargs = socks_server_kwargs or {} self.request_handler_class = request_handler_class super().__init__(*args, **kwargs) class Socks5ProxyHandler(StreamRequestHandler, SocksProxyHandler): # SOCKS5 protocol https://tools.ietf.org/html/rfc1928 # SOCKS5 username/password authentication https://tools.ietf.org/html/rfc1929 def handle(self): sleep = self.socks_kwargs.get('sleep') if sleep: time.sleep(sleep) version, nmethods = self.connection.recv(2) assert version == SOCKS5_VERSION methods = list(self.connection.recv(nmethods)) auth = self.socks_kwargs.get('auth') if auth is not None and Socks5Auth.AUTH_USER_PASS not in methods: self.connection.sendall(struct.pack('!BB', SOCKS5_VERSION, Socks5Auth.AUTH_NO_ACCEPTABLE)) self.server.close_request(self.request) return elif Socks5Auth.AUTH_USER_PASS in methods: self.connection.sendall(struct.pack('!BB', SOCKS5_VERSION, Socks5Auth.AUTH_USER_PASS)) _, user_len = struct.unpack('!BB', self.connection.recv(2)) username = self.connection.recv(user_len).decode() pass_len = ord(self.connection.recv(1)) password = self.connection.recv(pass_len).decode() if username == auth[0] and password == auth[1]: self.connection.sendall(struct.pack('!BB', SOCKS5_USER_AUTH_VERSION, SOCKS5_USER_AUTH_SUCCESS)) else: self.connection.sendall(struct.pack('!BB', SOCKS5_USER_AUTH_VERSION, SOCKS5_USER_AUTH_FAILURE)) self.server.close_request(self.request) return elif Socks5Auth.AUTH_NONE in methods: self.connection.sendall(struct.pack('!BB', SOCKS5_VERSION, Socks5Auth.AUTH_NONE)) else: self.connection.sendall(struct.pack('!BB', SOCKS5_VERSION, Socks5Auth.AUTH_NO_ACCEPTABLE)) self.server.close_request(self.request) return version, command, _, address_type = struct.unpack('!BBBB', self.connection.recv(4)) socks_info = { 'version': version, 'auth_methods': methods, 'command': command, 'client_address': self.client_address, 'ipv4_address': None, 'domain_address': None, 'ipv6_address': None, } if address_type == Socks5AddressType.ATYP_IPV4: socks_info['ipv4_address'] = socket.inet_ntoa(self.connection.recv(4)) elif address_type == Socks5AddressType.ATYP_DOMAINNAME: socks_info['domain_address'] = self.connection.recv(ord(self.connection.recv(1))).decode() elif address_type == Socks5AddressType.ATYP_IPV6: socks_info['ipv6_address'] = socket.inet_ntop(socket.AF_INET6, self.connection.recv(16)) else: self.server.close_request(self.request) socks_info['port'] = struct.unpack('!H', self.connection.recv(2))[0] # dummy response, the returned IP is just a placeholder self.connection.sendall(struct.pack( '!BBBBIH', SOCKS5_VERSION, self.socks_kwargs.get('reply', Socks5Reply.SUCCEEDED), 0x0, 0x1, 0x7f000001, 40000)) self.request_handler_class(self.request, self.client_address, self.server, socks_info=socks_info) class Socks4ProxyHandler(StreamRequestHandler, SocksProxyHandler): # SOCKS4 protocol http://www.openssh.com/txt/socks4.protocol # SOCKS4A protocol http://www.openssh.com/txt/socks4a.protocol def _read_until_null(self): return b''.join(iter(functools.partial(self.connection.recv, 1), b'\x00')) def handle(self): sleep = self.socks_kwargs.get('sleep') if sleep: time.sleep(sleep) socks_info = { 'version': SOCKS4_VERSION, 'command': None, 'client_address': self.client_address, 'ipv4_address': None, 'port': None, 'domain_address': None, } version, command, dest_port, dest_ip = struct.unpack('!BBHI', self.connection.recv(8)) socks_info['port'] = dest_port socks_info['command'] = command if version != SOCKS4_VERSION: self.server.close_request(self.request) return use_remote_dns = False if 0x0 < dest_ip <= 0xFF: use_remote_dns = True else: socks_info['ipv4_address'] = socket.inet_ntoa(struct.pack('!I', dest_ip)) user_id = self._read_until_null().decode() if user_id != (self.socks_kwargs.get('user_id') or ''): self.connection.sendall(struct.pack( '!BBHI', SOCKS4_REPLY_VERSION, Socks4CD.REQUEST_REJECTED_DIFFERENT_USERID, 0x00, 0x00000000)) self.server.close_request(self.request) return if use_remote_dns: socks_info['domain_address'] = self._read_until_null().decode() # dummy response, the returned IP is just a placeholder self.connection.sendall( struct.pack( '!BBHI', SOCKS4_REPLY_VERSION, self.socks_kwargs.get('cd_reply', Socks4CD.REQUEST_GRANTED), 40000, 0x7f000001)) self.request_handler_class(self.request, self.client_address, self.server, socks_info=socks_info) class IPv6ThreadingTCPServer(ThreadingTCPServer): address_family = socket.AF_INET6 class SocksHTTPTestRequestHandler(http.server.BaseHTTPRequestHandler, SocksTestRequestHandler): def do_GET(self): if self.path == '/socks_info': payload = json.dumps(self.socks_info.copy()) self.send_response(200) self.send_header('Content-Type', 'application/json; charset=utf-8') self.send_header('Content-Length', str(len(payload))) self.end_headers() self.wfile.write(payload.encode()) class SocksWebSocketTestRequestHandler(SocksTestRequestHandler): def handle(self): import websockets.sync.server protocol = websockets.ServerProtocol() connection = websockets.sync.server.ServerConnection(socket=self.request, protocol=protocol, close_timeout=0) connection.handshake() for message in connection: if message == 'socks_info': connection.send(json.dumps(self.socks_info)) connection.close() @contextlib.contextmanager def socks_server(socks_server_class, request_handler, bind_ip=None, **socks_server_kwargs): server = server_thread = None try: bind_address = bind_ip or '127.0.0.1' server_type = ThreadingTCPServer if '.' in bind_address else IPv6ThreadingTCPServer server = server_type( (bind_address, 0), functools.partial(socks_server_class, request_handler, socks_server_kwargs)) server_port = http_server_port(server) server_thread = threading.Thread(target=server.serve_forever) server_thread.daemon = True server_thread.start() if '.' not in bind_address: yield f'[{bind_address}]:{server_port}' else: yield f'{bind_address}:{server_port}' finally: server.shutdown() server.server_close() server_thread.join(2.0) class SocksProxyTestContext(abc.ABC): REQUEST_HANDLER_CLASS = None def socks_server(self, server_class, *args, **kwargs): return socks_server(server_class, self.REQUEST_HANDLER_CLASS, *args, **kwargs) @abc.abstractmethod def socks_info_request(self, handler, target_domain=None, target_port=None, **req_kwargs) -> dict: """return a dict of socks_info""" class HTTPSocksTestProxyContext(SocksProxyTestContext): REQUEST_HANDLER_CLASS = SocksHTTPTestRequestHandler def socks_info_request(self, handler, target_domain=None, target_port=None, **req_kwargs): request = Request(f'http://{target_domain or "127.0.0.1"}:{target_port or "40000"}/socks_info', **req_kwargs) handler.validate(request) return json.loads(handler.send(request).read().decode()) class WebSocketSocksTestProxyContext(SocksProxyTestContext): REQUEST_HANDLER_CLASS = SocksWebSocketTestRequestHandler def socks_info_request(self, handler, target_domain=None, target_port=None, **req_kwargs): request = Request(f'ws://{target_domain or "127.0.0.1"}:{target_port or "40000"}', **req_kwargs) handler.validate(request) ws = handler.send(request) ws.send('socks_info') socks_info = ws.recv() ws.close() return json.loads(socks_info) CTX_MAP = { 'http': HTTPSocksTestProxyContext, 'ws': WebSocketSocksTestProxyContext, } @pytest.fixture(scope='module') def ctx(request): return CTX_MAP[request.param]() @pytest.mark.parametrize( 'handler,ctx', [ ('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws'), ('CurlCFFI', 'http'), ], indirect=True) @pytest.mark.handler_flaky('CurlCFFI', reason='segfaults') class TestSocks4Proxy: def test_socks4_no_auth(self, handler, ctx): with handler() as rh: with ctx.socks_server(Socks4ProxyHandler) as server_address: response = ctx.socks_info_request( rh, proxies={'all': f'socks4://{server_address}'}) assert response['version'] == 4 def test_socks4_auth(self, handler, ctx): with handler() as rh: with ctx.socks_server(Socks4ProxyHandler, user_id='user') as server_address: with pytest.raises(ProxyError): ctx.socks_info_request(rh, proxies={'all': f'socks4://{server_address}'}) response = ctx.socks_info_request( rh, proxies={'all': f'socks4://user:@{server_address}'}) assert response['version'] == 4 def test_socks4a_ipv4_target(self, handler, ctx): with ctx.socks_server(Socks4ProxyHandler) as server_address: with handler(proxies={'all': f'socks4a://{server_address}'}) as rh: response = ctx.socks_info_request(rh, target_domain='127.0.0.1') assert response['version'] == 4 assert (response['ipv4_address'] == '127.0.0.1') != (response['domain_address'] == '127.0.0.1') def test_socks4a_domain_target(self, handler, ctx): with ctx.socks_server(Socks4ProxyHandler) as server_address: with handler(proxies={'all': f'socks4a://{server_address}'}) as rh: response = ctx.socks_info_request(rh, target_domain='localhost') assert response['version'] == 4 assert response['ipv4_address'] is None assert response['domain_address'] == 'localhost' def test_ipv4_client_source_address(self, handler, ctx): with ctx.socks_server(Socks4ProxyHandler) as server_address: source_address = f'127.0.0.{random.randint(5, 255)}' verify_address_availability(source_address) with handler(proxies={'all': f'socks4://{server_address}'}, source_address=source_address) as rh: response = ctx.socks_info_request(rh) assert response['client_address'][0] == source_address assert response['version'] == 4 @pytest.mark.parametrize('reply_code', [ Socks4CD.REQUEST_REJECTED_OR_FAILED, Socks4CD.REQUEST_REJECTED_CANNOT_CONNECT_TO_IDENTD, Socks4CD.REQUEST_REJECTED_DIFFERENT_USERID, ]) def test_socks4_errors(self, handler, ctx, reply_code): with ctx.socks_server(Socks4ProxyHandler, cd_reply=reply_code) as server_address: with handler(proxies={'all': f'socks4://{server_address}'}) as rh: with pytest.raises(ProxyError): ctx.socks_info_request(rh) def test_ipv6_socks4_proxy(self, handler, ctx): with ctx.socks_server(Socks4ProxyHandler, bind_ip='::1') as server_address: with handler(proxies={'all': f'socks4://{server_address}'}) as rh: response = ctx.socks_info_request(rh, target_domain='127.0.0.1') assert response['client_address'][0] == '::1' assert response['ipv4_address'] == '127.0.0.1' assert response['version'] == 4 def test_timeout(self, handler, ctx): with ctx.socks_server(Socks4ProxyHandler, sleep=2) as server_address: with handler(proxies={'all': f'socks4://{server_address}'}, timeout=0.5) as rh: with pytest.raises(TransportError): ctx.socks_info_request(rh) @pytest.mark.parametrize( 'handler,ctx', [ ('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws'), ('CurlCFFI', 'http'), ], indirect=True) @pytest.mark.handler_flaky('CurlCFFI', reason='segfaults') class TestSocks5Proxy: def test_socks5_no_auth(self, handler, ctx): with ctx.socks_server(Socks5ProxyHandler) as server_address: with handler(proxies={'all': f'socks5://{server_address}'}) as rh: response = ctx.socks_info_request(rh) assert response['auth_methods'] == [0x0] assert response['version'] == 5 def test_socks5_user_pass(self, handler, ctx): with ctx.socks_server(Socks5ProxyHandler, auth=('test', 'testpass')) as server_address: with handler() as rh: with pytest.raises(ProxyError): ctx.socks_info_request(rh, proxies={'all': f'socks5://{server_address}'}) response = ctx.socks_info_request( rh, proxies={'all': f'socks5://test:testpass@{server_address}'}) assert response['auth_methods'] == [Socks5Auth.AUTH_NONE, Socks5Auth.AUTH_USER_PASS] assert response['version'] == 5 def test_socks5_ipv4_target(self, handler, ctx): with ctx.socks_server(Socks5ProxyHandler) as server_address: with handler(proxies={'all': f'socks5://{server_address}'}) as rh: response = ctx.socks_info_request(rh, target_domain='127.0.0.1') assert response['ipv4_address'] == '127.0.0.1' assert response['version'] == 5 def test_socks5_domain_target(self, handler, ctx): with ctx.socks_server(Socks5ProxyHandler) as server_address: with handler(proxies={'all': f'socks5://{server_address}'}) as rh: response = ctx.socks_info_request(rh, target_domain='localhost') assert (response['ipv4_address'] == '127.0.0.1') != (response['ipv6_address'] == '::1') assert response['version'] == 5 def test_socks5h_domain_target(self, handler, ctx): with ctx.socks_server(Socks5ProxyHandler) as server_address: with handler(proxies={'all': f'socks5h://{server_address}'}) as rh: response = ctx.socks_info_request(rh, target_domain='localhost') assert response['ipv4_address'] is None assert response['domain_address'] == 'localhost' assert response['version'] == 5 def test_socks5h_ip_target(self, handler, ctx): with ctx.socks_server(Socks5ProxyHandler) as server_address: with handler(proxies={'all': f'socks5h://{server_address}'}) as rh: response = ctx.socks_info_request(rh, target_domain='127.0.0.1') assert response['ipv4_address'] == '127.0.0.1' assert response['domain_address'] is None assert response['version'] == 5 def test_socks5_ipv6_destination(self, handler, ctx): with ctx.socks_server(Socks5ProxyHandler) as server_address: with handler(proxies={'all': f'socks5://{server_address}'}) as rh: response = ctx.socks_info_request(rh, target_domain='[::1]') assert response['ipv6_address'] == '::1' assert response['version'] == 5 def test_ipv6_socks5_proxy(self, handler, ctx): with ctx.socks_server(Socks5ProxyHandler, bind_ip='::1') as server_address: with handler(proxies={'all': f'socks5://{server_address}'}) as rh: response = ctx.socks_info_request(rh, target_domain='127.0.0.1') assert response['client_address'][0] == '::1' assert response['ipv4_address'] == '127.0.0.1' assert response['version'] == 5 # XXX: is there any feasible way of testing IPv6 source addresses? # Same would go for non-proxy source_address test... def test_ipv4_client_source_address(self, handler, ctx): with ctx.socks_server(Socks5ProxyHandler) as server_address: source_address = f'127.0.0.{random.randint(5, 255)}' verify_address_availability(source_address) with handler(proxies={'all': f'socks5://{server_address}'}, source_address=source_address) as rh: response = ctx.socks_info_request(rh) assert response['client_address'][0] == source_address assert response['version'] == 5 @pytest.mark.parametrize('reply_code', [ Socks5Reply.GENERAL_FAILURE, Socks5Reply.CONNECTION_NOT_ALLOWED, Socks5Reply.NETWORK_UNREACHABLE, Socks5Reply.HOST_UNREACHABLE, Socks5Reply.CONNECTION_REFUSED, Socks5Reply.TTL_EXPIRED, Socks5Reply.COMMAND_NOT_SUPPORTED, Socks5Reply.ADDRESS_TYPE_NOT_SUPPORTED, ]) def test_socks5_errors(self, handler, ctx, reply_code): with ctx.socks_server(Socks5ProxyHandler, reply=reply_code) as server_address: with handler(proxies={'all': f'socks5://{server_address}'}) as rh: with pytest.raises(ProxyError): ctx.socks_info_request(rh) def test_timeout(self, handler, ctx): with ctx.socks_server(Socks5ProxyHandler, sleep=2) as server_address: with handler(proxies={'all': f'socks5://{server_address}'}, timeout=1) as rh: with pytest.raises(TransportError): ctx.socks_info_request(rh) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_compat.py
test/test_compat.py
#!/usr/bin/env python3 # Allow direct execution import datetime as dt import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import struct from yt_dlp import compat from yt_dlp.compat import urllib # isort: split from yt_dlp.compat import compat_etree_fromstring, compat_expanduser, compat_datetime_from_timestamp from yt_dlp.compat.urllib.request import getproxies class TestCompat(unittest.TestCase): def test_compat_passthrough(self): with self.assertWarns(DeprecationWarning): _ = compat.compat_basestring self.assertEqual(urllib.request.getproxies, getproxies) with self.assertWarns(DeprecationWarning): _ = compat.compat_pycrypto_AES # Must not raise error def test_compat_expanduser(self): old_home = os.environ.get('HOME') test_str = R'C:\Documents and Settings\тест\Application Data' try: os.environ['HOME'] = test_str self.assertEqual(compat_expanduser('~'), test_str) finally: os.environ['HOME'] = old_home or '' def test_compat_etree_fromstring(self): xml = ''' <root foo="bar" spam="中文"> <normal>foo</normal> <chinese>中文</chinese> <foo><bar>spam</bar></foo> </root> ''' doc = compat_etree_fromstring(xml.encode()) self.assertTrue(isinstance(doc.attrib['foo'], str)) self.assertTrue(isinstance(doc.attrib['spam'], str)) self.assertTrue(isinstance(doc.find('normal').text, str)) self.assertTrue(isinstance(doc.find('chinese').text, str)) self.assertTrue(isinstance(doc.find('foo/bar').text, str)) def test_compat_etree_fromstring_doctype(self): xml = '''<?xml version="1.0"?> <!DOCTYPE smil PUBLIC "-//W3C//DTD SMIL 2.0//EN" "http://www.w3.org/2001/SMIL20/SMIL20.dtd"> <smil xmlns="http://www.w3.org/2001/SMIL20/Language"></smil>''' compat_etree_fromstring(xml) def test_struct_unpack(self): self.assertEqual(struct.unpack('!B', b'\x00'), (0,)) def test_compat_datetime_from_timestamp(self): self.assertEqual( compat_datetime_from_timestamp(0), dt.datetime(1970, 1, 1, 0, 0, 0, tzinfo=dt.timezone.utc)) self.assertEqual( compat_datetime_from_timestamp(1), dt.datetime(1970, 1, 1, 0, 0, 1, tzinfo=dt.timezone.utc)) self.assertEqual( compat_datetime_from_timestamp(3600), dt.datetime(1970, 1, 1, 1, 0, 0, tzinfo=dt.timezone.utc)) self.assertEqual( compat_datetime_from_timestamp(-1), dt.datetime(1969, 12, 31, 23, 59, 59, tzinfo=dt.timezone.utc)) self.assertEqual( compat_datetime_from_timestamp(-86400), dt.datetime(1969, 12, 31, 0, 0, 0, tzinfo=dt.timezone.utc)) self.assertEqual( compat_datetime_from_timestamp(0.5), dt.datetime(1970, 1, 1, 0, 0, 0, 500000, tzinfo=dt.timezone.utc)) self.assertEqual( compat_datetime_from_timestamp(1.000001), dt.datetime(1970, 1, 1, 0, 0, 1, 1, tzinfo=dt.timezone.utc)) self.assertEqual( compat_datetime_from_timestamp(-1.25), dt.datetime(1969, 12, 31, 23, 59, 58, 750000, tzinfo=dt.timezone.utc)) self.assertEqual( compat_datetime_from_timestamp(-1577923200), dt.datetime(1920, 1, 1, 0, 0, 0, tzinfo=dt.timezone.utc)) self.assertEqual( compat_datetime_from_timestamp(4102444800), dt.datetime(2100, 1, 1, 0, 0, 0, tzinfo=dt.timezone.utc)) self.assertEqual( compat_datetime_from_timestamp(173568960000), dt.datetime(7470, 3, 8, 0, 0, 0, tzinfo=dt.timezone.utc)) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_age_restriction.py
test/test_age_restriction.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import is_download_test, try_rm from yt_dlp import YoutubeDL from yt_dlp.utils import DownloadError def _download_restricted(url, filename, age): """ Returns true if the file has been downloaded """ params = { 'age_limit': age, 'skip_download': True, 'writeinfojson': True, 'outtmpl': '%(id)s.%(ext)s', } ydl = YoutubeDL(params) ydl.add_default_info_extractors() json_filename = os.path.splitext(filename)[0] + '.info.json' try_rm(json_filename) try: ydl.download([url]) except DownloadError: pass else: return os.path.exists(json_filename) finally: try_rm(json_filename) @is_download_test class TestAgeRestriction(unittest.TestCase): def _assert_restricted(self, url, filename, age, old_age=None): self.assertTrue(_download_restricted(url, filename, old_age)) self.assertFalse(_download_restricted(url, filename, age)) def test_youtube(self): self._assert_restricted('HtVdAasjOgU', 'HtVdAasjOgU.mp4', 10) def test_youporn(self): self._assert_restricted( 'https://www.youporn.com/watch/16715086/sex-ed-in-detention-18-asmr/', '16715086.mp4', 2, old_age=25) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_websockets.py
test/test_websockets.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import time import pytest from test.helper import verify_address_availability from yt_dlp.networking.common import Features, DEFAULT_TIMEOUT sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import http.client import http.cookiejar import http.server import json import random import ssl import threading from yt_dlp import socks from yt_dlp.cookies import YoutubeDLCookieJar from yt_dlp.dependencies import websockets from yt_dlp.networking import Request from yt_dlp.networking.exceptions import ( CertificateVerifyError, HTTPError, ProxyError, RequestError, SSLError, TransportError, ) from yt_dlp.utils.traversal import traverse_obj from yt_dlp.utils.networking import HTTPHeaderDict TEST_DIR = os.path.dirname(os.path.abspath(__file__)) pytestmark = pytest.mark.handler_flaky( 'Websockets', os.name == 'nt' or sys.implementation.name == 'pypy', reason='segfaults', ) def websocket_handler(websocket): for message in websocket: if isinstance(message, bytes): if message == b'bytes': return websocket.send('2') elif isinstance(message, str): if message == 'headers': return websocket.send(json.dumps(dict(websocket.request.headers.raw_items()))) elif message == 'path': return websocket.send(websocket.request.path) elif message == 'source_address': return websocket.send(websocket.remote_address[0]) elif message == 'str': return websocket.send('1') return websocket.send(message) def process_request(self, request): if request.path.startswith('/gen_'): status = http.HTTPStatus(int(request.path[5:])) if 300 <= status.value <= 300: return websockets.http11.Response( status.value, status.phrase, websockets.datastructures.Headers([('Location', '/')]), b'') return self.protocol.reject(status.value, status.phrase) elif request.path.startswith('/get_cookie'): response = self.protocol.accept(request) response.headers['Set-Cookie'] = 'test=ytdlp' return response return self.protocol.accept(request) def create_websocket_server(**ws_kwargs): import websockets.sync.server wsd = websockets.sync.server.serve( websocket_handler, '127.0.0.1', 0, process_request=process_request, open_timeout=2, **ws_kwargs) ws_port = wsd.socket.getsockname()[1] ws_server_thread = threading.Thread(target=wsd.serve_forever) ws_server_thread.daemon = True ws_server_thread.start() return ws_server_thread, ws_port def create_ws_websocket_server(): return create_websocket_server() def create_wss_websocket_server(): certfn = os.path.join(TEST_DIR, 'testcert.pem') sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) sslctx.load_cert_chain(certfn, None) return create_websocket_server(ssl=sslctx) MTLS_CERT_DIR = os.path.join(TEST_DIR, 'testdata', 'certificate') def create_mtls_wss_websocket_server(): certfn = os.path.join(TEST_DIR, 'testcert.pem') cacertfn = os.path.join(MTLS_CERT_DIR, 'ca.crt') sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) sslctx.verify_mode = ssl.CERT_REQUIRED sslctx.load_verify_locations(cafile=cacertfn) sslctx.load_cert_chain(certfn, None) return create_websocket_server(ssl=sslctx) def create_legacy_wss_websocket_server(): certfn = os.path.join(TEST_DIR, 'testcert.pem') sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) sslctx.maximum_version = ssl.TLSVersion.TLSv1_2 sslctx.set_ciphers('SHA1:AESCCM:aDSS:eNULL:aNULL') sslctx.load_cert_chain(certfn, None) return create_websocket_server(ssl=sslctx) def ws_validate_and_send(rh, req): rh.validate(req) max_tries = 3 for i in range(max_tries): try: return rh.send(req) except TransportError as e: if i < (max_tries - 1) and 'connection closed during handshake' in str(e): # websockets server sometimes hangs on new connections continue raise @pytest.mark.skipif(not websockets, reason='websockets must be installed to test websocket request handlers') @pytest.mark.parametrize('handler', ['Websockets'], indirect=True) class TestWebsSocketRequestHandlerConformance: @classmethod def setup_class(cls): cls.ws_thread, cls.ws_port = create_ws_websocket_server() cls.ws_base_url = f'ws://127.0.0.1:{cls.ws_port}' cls.wss_thread, cls.wss_port = create_wss_websocket_server() cls.wss_base_url = f'wss://127.0.0.1:{cls.wss_port}' cls.bad_wss_thread, cls.bad_wss_port = create_websocket_server(ssl=ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)) cls.bad_wss_host = f'wss://127.0.0.1:{cls.bad_wss_port}' cls.mtls_wss_thread, cls.mtls_wss_port = create_mtls_wss_websocket_server() cls.mtls_wss_base_url = f'wss://127.0.0.1:{cls.mtls_wss_port}' cls.legacy_wss_thread, cls.legacy_wss_port = create_legacy_wss_websocket_server() cls.legacy_wss_host = f'wss://127.0.0.1:{cls.legacy_wss_port}' def test_basic_websockets(self, handler): with handler() as rh: ws = ws_validate_and_send(rh, Request(self.ws_base_url)) assert 'upgrade' in ws.headers assert ws.status == 101 ws.send('foo') assert ws.recv() == 'foo' ws.close() # https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6 @pytest.mark.parametrize('msg,opcode', [('str', 1), (b'bytes', 2)]) def test_send_types(self, handler, msg, opcode): with handler() as rh: ws = ws_validate_and_send(rh, Request(self.ws_base_url)) ws.send(msg) assert int(ws.recv()) == opcode ws.close() def test_verify_cert(self, handler): with handler() as rh: with pytest.raises(CertificateVerifyError): ws_validate_and_send(rh, Request(self.wss_base_url)) with handler(verify=False) as rh: ws = ws_validate_and_send(rh, Request(self.wss_base_url)) assert ws.status == 101 ws.close() def test_ssl_error(self, handler): with handler(verify=False) as rh: with pytest.raises(SSLError, match=r'ssl(?:v3|/tls) alert handshake failure') as exc_info: ws_validate_and_send(rh, Request(self.bad_wss_host)) assert not issubclass(exc_info.type, CertificateVerifyError) def test_legacy_ssl_extension(self, handler): with handler(verify=False) as rh: ws = ws_validate_and_send(rh, Request(self.legacy_wss_host, extensions={'legacy_ssl': True})) assert ws.status == 101 ws.close() # Ensure only applies to request extension with pytest.raises(SSLError): ws_validate_and_send(rh, Request(self.legacy_wss_host)) def test_legacy_ssl_support(self, handler): with handler(verify=False, legacy_ssl_support=True) as rh: ws = ws_validate_and_send(rh, Request(self.legacy_wss_host)) assert ws.status == 101 ws.close() @pytest.mark.parametrize('path,expected', [ # Unicode characters should be encoded with uppercase percent-encoding ('/中文', '/%E4%B8%AD%E6%96%87'), # don't normalize existing percent encodings ('/%c7%9f', '/%c7%9f'), ]) def test_percent_encode(self, handler, path, expected): with handler() as rh: ws = ws_validate_and_send(rh, Request(f'{self.ws_base_url}{path}')) ws.send('path') assert ws.recv() == expected assert ws.status == 101 ws.close() def test_remove_dot_segments(self, handler): with handler() as rh: # This isn't a comprehensive test, # but it should be enough to check whether the handler is removing dot segments ws = ws_validate_and_send(rh, Request(f'{self.ws_base_url}/a/b/./../../test')) assert ws.status == 101 ws.send('path') assert ws.recv() == '/test' ws.close() # We are restricted to known HTTP status codes in http.HTTPStatus # Redirects are not supported for websockets @pytest.mark.parametrize('status', (200, 204, 301, 302, 303, 400, 500, 511)) def test_raise_http_error(self, handler, status): with handler() as rh: with pytest.raises(HTTPError) as exc_info: ws_validate_and_send(rh, Request(f'{self.ws_base_url}/gen_{status}')) assert exc_info.value.status == status @pytest.mark.parametrize('params,extensions', [ ({'timeout': sys.float_info.min}, {}), ({}, {'timeout': sys.float_info.min}), ]) def test_read_timeout(self, handler, params, extensions): with handler(**params) as rh: with pytest.raises(TransportError): ws_validate_and_send(rh, Request(self.ws_base_url, extensions=extensions)) def test_connect_timeout(self, handler): # nothing should be listening on this port connect_timeout_url = 'ws://10.255.255.255' with handler(timeout=0.01) as rh, pytest.raises(TransportError): now = time.time() ws_validate_and_send(rh, Request(connect_timeout_url)) assert time.time() - now < DEFAULT_TIMEOUT # Per request timeout, should override handler timeout request = Request(connect_timeout_url, extensions={'timeout': 0.01}) with handler() as rh, pytest.raises(TransportError): now = time.time() ws_validate_and_send(rh, request) assert time.time() - now < DEFAULT_TIMEOUT def test_cookies(self, handler): cookiejar = YoutubeDLCookieJar() cookiejar.set_cookie(http.cookiejar.Cookie( version=0, name='test', value='ytdlp', port=None, port_specified=False, domain='127.0.0.1', domain_specified=True, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=False, comment=None, comment_url=None, rest={})) with handler(cookiejar=cookiejar) as rh: ws = ws_validate_and_send(rh, Request(self.ws_base_url)) ws.send('headers') assert HTTPHeaderDict(json.loads(ws.recv()))['cookie'] == 'test=ytdlp' ws.close() with handler() as rh: ws = ws_validate_and_send(rh, Request(self.ws_base_url)) ws.send('headers') assert 'cookie' not in HTTPHeaderDict(json.loads(ws.recv())) ws.close() ws = ws_validate_and_send(rh, Request(self.ws_base_url, extensions={'cookiejar': cookiejar})) ws.send('headers') assert HTTPHeaderDict(json.loads(ws.recv()))['cookie'] == 'test=ytdlp' ws.close() @pytest.mark.skip_handler('Websockets', 'Set-Cookie not supported by websockets') def test_cookie_sync_only_cookiejar(self, handler): # Ensure that cookies are ONLY being handled by the cookiejar with handler() as rh: ws_validate_and_send(rh, Request(f'{self.ws_base_url}/get_cookie', extensions={'cookiejar': YoutubeDLCookieJar()})) ws = ws_validate_and_send(rh, Request(self.ws_base_url, extensions={'cookiejar': YoutubeDLCookieJar()})) ws.send('headers') assert 'cookie' not in HTTPHeaderDict(json.loads(ws.recv())) ws.close() @pytest.mark.skip_handler('Websockets', 'Set-Cookie not supported by websockets') def test_cookie_sync_delete_cookie(self, handler): # Ensure that cookies are ONLY being handled by the cookiejar cookiejar = YoutubeDLCookieJar() with handler(verbose=True, cookiejar=cookiejar) as rh: ws_validate_and_send(rh, Request(f'{self.ws_base_url}/get_cookie')) ws = ws_validate_and_send(rh, Request(self.ws_base_url)) ws.send('headers') assert HTTPHeaderDict(json.loads(ws.recv()))['cookie'] == 'test=ytdlp' ws.close() cookiejar.clear_session_cookies() ws = ws_validate_and_send(rh, Request(self.ws_base_url)) ws.send('headers') assert 'cookie' not in HTTPHeaderDict(json.loads(ws.recv())) ws.close() def test_source_address(self, handler): source_address = f'127.0.0.{random.randint(5, 255)}' verify_address_availability(source_address) with handler(source_address=source_address) as rh: ws = ws_validate_and_send(rh, Request(self.ws_base_url)) ws.send('source_address') assert source_address == ws.recv() ws.close() def test_response_url(self, handler): with handler() as rh: url = f'{self.ws_base_url}/something' ws = ws_validate_and_send(rh, Request(url)) assert ws.url == url ws.close() def test_request_headers(self, handler): with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh: # Global Headers ws = ws_validate_and_send(rh, Request(self.ws_base_url)) ws.send('headers') headers = HTTPHeaderDict(json.loads(ws.recv())) assert headers['test1'] == 'test' ws.close() # Per request headers, merged with global ws = ws_validate_and_send(rh, Request( self.ws_base_url, headers={'test2': 'changed', 'test3': 'test3'})) ws.send('headers') headers = HTTPHeaderDict(json.loads(ws.recv())) assert headers['test1'] == 'test' assert headers['test2'] == 'changed' assert headers['test3'] == 'test3' ws.close() def test_keep_header_casing(self, handler): with handler(headers=HTTPHeaderDict({'x-TeSt1': 'test'})) as rh: ws = ws_validate_and_send(rh, Request(self.ws_base_url, headers={'x-TeSt2': 'test'}, extensions={'keep_header_casing': True})) ws.send('headers') headers = json.loads(ws.recv()) assert 'x-TeSt1' in headers assert 'x-TeSt2' in headers @pytest.mark.parametrize('client_cert', ( {'client_certificate': os.path.join(MTLS_CERT_DIR, 'clientwithkey.crt')}, { 'client_certificate': os.path.join(MTLS_CERT_DIR, 'client.crt'), 'client_certificate_key': os.path.join(MTLS_CERT_DIR, 'client.key'), }, { 'client_certificate': os.path.join(MTLS_CERT_DIR, 'clientwithencryptedkey.crt'), 'client_certificate_password': 'foobar', }, { 'client_certificate': os.path.join(MTLS_CERT_DIR, 'client.crt'), 'client_certificate_key': os.path.join(MTLS_CERT_DIR, 'clientencrypted.key'), 'client_certificate_password': 'foobar', }, )) def test_mtls(self, handler, client_cert): with handler( # Disable client-side validation of unacceptable self-signed testcert.pem # The test is of a check on the server side, so unaffected verify=False, client_cert=client_cert, ) as rh: ws_validate_and_send(rh, Request(self.mtls_wss_base_url)).close() def test_request_disable_proxy(self, handler): for proxy_proto in handler._SUPPORTED_PROXY_SCHEMES or ['ws']: # Given handler is configured with a proxy with handler(proxies={'ws': f'{proxy_proto}://10.255.255.255'}, timeout=5) as rh: # When a proxy is explicitly set to None for the request ws = ws_validate_and_send(rh, Request(self.ws_base_url, proxies={'http': None})) # Then no proxy should be used assert ws.status == 101 ws.close() @pytest.mark.skip_handlers_if( lambda _, handler: Features.NO_PROXY not in handler._SUPPORTED_FEATURES, 'handler does not support NO_PROXY') def test_noproxy(self, handler): for proxy_proto in handler._SUPPORTED_PROXY_SCHEMES or ['ws']: # Given the handler is configured with a proxy with handler(proxies={'ws': f'{proxy_proto}://10.255.255.255'}, timeout=5) as rh: for no_proxy in (f'127.0.0.1:{self.ws_port}', '127.0.0.1', 'localhost'): # When request no proxy includes the request url host ws = ws_validate_and_send(rh, Request(self.ws_base_url, proxies={'no': no_proxy})) # Then the proxy should not be used assert ws.status == 101 ws.close() @pytest.mark.skip_handlers_if( lambda _, handler: Features.ALL_PROXY not in handler._SUPPORTED_FEATURES, 'handler does not support ALL_PROXY') def test_allproxy(self, handler): supported_proto = traverse_obj(handler._SUPPORTED_PROXY_SCHEMES, 0, default='ws') # This is a bit of a hacky test, but it should be enough to check whether the handler is using the proxy. # 0.1s might not be enough of a timeout if proxy is not used in all cases, but should still get failures. with handler(proxies={'all': f'{supported_proto}://10.255.255.255'}, timeout=0.1) as rh: with pytest.raises(TransportError): ws_validate_and_send(rh, Request(self.ws_base_url)).close() with handler(timeout=0.1) as rh: with pytest.raises(TransportError): ws_validate_and_send( rh, Request(self.ws_base_url, proxies={'all': f'{supported_proto}://10.255.255.255'})).close() def create_fake_ws_connection(raised): import websockets.sync.client class FakeWsConnection(websockets.sync.client.ClientConnection): def __init__(self, *args, **kwargs): class FakeResponse: body = b'' headers = {} status_code = 101 reason_phrase = 'test' self.response = FakeResponse() def send(self, *args, **kwargs): raise raised() def recv(self, *args, **kwargs): raise raised() def close(self, *args, **kwargs): return return FakeWsConnection() @pytest.mark.parametrize('handler', ['Websockets'], indirect=True) class TestWebsocketsRequestHandler: @pytest.mark.parametrize('raised,expected', [ # https://websockets.readthedocs.io/en/stable/reference/exceptions.html (lambda: websockets.exceptions.InvalidURI(msg='test', uri='test://'), RequestError), # Requires a response object. Should be covered by HTTP error tests. # (lambda: websockets.exceptions.InvalidStatus(), TransportError), (lambda: websockets.exceptions.InvalidHandshake(), TransportError), # These are subclasses of InvalidHandshake (lambda: websockets.exceptions.InvalidHeader(name='test'), TransportError), (lambda: websockets.exceptions.NegotiationError(), TransportError), # Catch-all (lambda: websockets.exceptions.WebSocketException(), TransportError), (lambda: TimeoutError(), TransportError), # These may be raised by our create_connection implementation, which should also be caught (lambda: OSError(), TransportError), (lambda: ssl.SSLError(), SSLError), (lambda: ssl.SSLCertVerificationError(), CertificateVerifyError), (lambda: socks.ProxyError(), ProxyError), ]) def test_request_error_mapping(self, handler, monkeypatch, raised, expected): import websockets.sync.client import yt_dlp.networking._websockets with handler() as rh: def fake_connect(*args, **kwargs): raise raised() monkeypatch.setattr(yt_dlp.networking._websockets, 'create_connection', lambda *args, **kwargs: None) monkeypatch.setattr(websockets.sync.client, 'connect', fake_connect) with pytest.raises(expected) as exc_info: rh.send(Request('ws://fake-url')) assert exc_info.type is expected @pytest.mark.parametrize('raised,expected,match', [ # https://websockets.readthedocs.io/en/stable/reference/sync/client.html#websockets.sync.client.ClientConnection.send (lambda: websockets.exceptions.ConnectionClosed(None, None), TransportError, None), (lambda: RuntimeError(), TransportError, None), (lambda: TimeoutError(), TransportError, None), (lambda: TypeError(), RequestError, None), (lambda: socks.ProxyError(), ProxyError, None), # Catch-all (lambda: websockets.exceptions.WebSocketException(), TransportError, None), ]) def test_ws_send_error_mapping(self, handler, monkeypatch, raised, expected, match): from yt_dlp.networking._websockets import WebsocketsResponseAdapter ws = WebsocketsResponseAdapter(create_fake_ws_connection(raised), url='ws://fake-url') with pytest.raises(expected, match=match) as exc_info: ws.send('test') assert exc_info.type is expected @pytest.mark.parametrize('raised,expected,match', [ # https://websockets.readthedocs.io/en/stable/reference/sync/client.html#websockets.sync.client.ClientConnection.recv (lambda: websockets.exceptions.ConnectionClosed(None, None), TransportError, None), (lambda: RuntimeError(), TransportError, None), (lambda: TimeoutError(), TransportError, None), (lambda: socks.ProxyError(), ProxyError, None), # Catch-all (lambda: websockets.exceptions.WebSocketException(), TransportError, None), ]) def test_ws_recv_error_mapping(self, handler, monkeypatch, raised, expected, match): from yt_dlp.networking._websockets import WebsocketsResponseAdapter ws = WebsocketsResponseAdapter(create_fake_ws_connection(raised), url='ws://fake-url') with pytest.raises(expected, match=match) as exc_info: ws.recv() assert exc_info.type is expected
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_downloader_http.py
test/test_downloader_http.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import http.server import re import threading from test.helper import http_server_port, try_rm from yt_dlp import YoutubeDL from yt_dlp.downloader.http import HttpFD from yt_dlp.utils._utils import _YDLLogger as FakeLogger TEST_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_SIZE = 10 * 1024 class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler): def log_message(self, format, *args): pass def send_content_range(self, total=None): range_header = self.headers.get('Range') start = end = None if range_header: mobj = re.search(r'^bytes=(\d+)-(\d+)', range_header) if mobj: start = int(mobj.group(1)) end = int(mobj.group(2)) valid_range = start is not None and end is not None if valid_range: content_range = f'bytes {start}-{end}' if total: content_range += f'/{total}' self.send_header('Content-Range', content_range) return (end - start + 1) if valid_range else total def serve(self, range=True, content_length=True): self.send_response(200) self.send_header('Content-Type', 'video/mp4') size = TEST_SIZE if range: size = self.send_content_range(TEST_SIZE) if content_length: self.send_header('Content-Length', size) self.end_headers() self.wfile.write(b'#' * size) def do_GET(self): if self.path == '/regular': self.serve() elif self.path == '/no-content-length': self.serve(content_length=False) elif self.path == '/no-range': self.serve(range=False) elif self.path == '/no-range-no-content-length': self.serve(range=False, content_length=False) else: assert False class TestHttpFD(unittest.TestCase): def setUp(self): self.httpd = http.server.HTTPServer( ('127.0.0.1', 0), HTTPTestRequestHandler) self.port = http_server_port(self.httpd) self.server_thread = threading.Thread(target=self.httpd.serve_forever) self.server_thread.daemon = True self.server_thread.start() def download(self, params, ep): params['logger'] = FakeLogger() ydl = YoutubeDL(params) downloader = HttpFD(ydl, params) filename = 'testfile.mp4' try_rm(filename) self.assertTrue(downloader.real_download(filename, { 'url': f'http://127.0.0.1:{self.port}/{ep}', }), ep) self.assertEqual(os.path.getsize(filename), TEST_SIZE, ep) try_rm(filename) def download_all(self, params): for ep in ('regular', 'no-content-length', 'no-range', 'no-range-no-content-length'): self.download(params, ep) def test_regular(self): self.download_all({}) def test_chunked(self): self.download_all({ 'http_chunk_size': 1000, }) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/__init__.py
test/__init__.py
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_execution.py
test/test_execution.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import contextlib import subprocess from yt_dlp.utils import Popen rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) LAZY_EXTRACTORS = 'yt_dlp/extractor/lazy_extractors.py' class TestExecution(unittest.TestCase): def run_yt_dlp(self, exe=(sys.executable, 'yt_dlp/__main__.py'), opts=('--version', )): stdout, stderr, returncode = Popen.run( [*exe, '--ignore-config', *opts], cwd=rootDir, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(stderr, file=sys.stderr) self.assertEqual(returncode, 0) return stdout.strip(), stderr.strip() def test_main_exec(self): self.run_yt_dlp() def test_import(self): self.run_yt_dlp(exe=(sys.executable, '-c', 'import yt_dlp')) def test_module_exec(self): self.run_yt_dlp(exe=(sys.executable, '-m', 'yt_dlp')) def test_cmdline_umlauts(self): _, stderr = self.run_yt_dlp(opts=('ä', '--version')) self.assertFalse(stderr) def test_lazy_extractors(self): try: subprocess.check_call([sys.executable, 'devscripts/make_lazy_extractors.py', LAZY_EXTRACTORS], cwd=rootDir, stdout=subprocess.DEVNULL) self.assertTrue(os.path.exists(LAZY_EXTRACTORS)) _, stderr = self.run_yt_dlp(opts=('-s', 'test:')) # `MIN_RECOMMENDED` emits a deprecated feature warning for deprecated Python versions if stderr and stderr.startswith('Deprecated Feature: Support for Python'): stderr = '' self.assertFalse(stderr) subprocess.check_call([sys.executable, 'test/test_all_urls.py'], cwd=rootDir, stdout=subprocess.DEVNULL) finally: with contextlib.suppress(OSError): os.remove(LAZY_EXTRACTORS) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_netrc.py
test/test_netrc.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from yt_dlp.extractor import gen_extractor_classes from yt_dlp.extractor.common import InfoExtractor NO_LOGIN = InfoExtractor._perform_login class TestNetRc(unittest.TestCase): def test_netrc_present(self): for ie in gen_extractor_classes(): if ie._perform_login is NO_LOGIN: continue self.assertTrue( ie._NETRC_MACHINE, f'Extractor {ie.IE_NAME} supports login, but is missing a _NETRC_MACHINE property') if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_InfoExtractor.py
test/test_InfoExtractor.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import http.server import threading from test.helper import FakeYDL, expect_dict, expect_value, http_server_port from yt_dlp.compat import compat_etree_fromstring from yt_dlp.extractor import YoutubeIE, get_info_extractor from yt_dlp.extractor.common import InfoExtractor from yt_dlp.utils import ( ExtractorError, RegexNotFoundError, encode_data_uri, strip_jsonp, ) TEAPOT_RESPONSE_STATUS = 418 TEAPOT_RESPONSE_BODY = "<h1>418 I'm a teapot</h1>" class InfoExtractorTestRequestHandler(http.server.BaseHTTPRequestHandler): def log_message(self, format, *args): pass def do_GET(self): if self.path == '/teapot': self.send_response(TEAPOT_RESPONSE_STATUS) self.send_header('Content-Type', 'text/html; charset=utf-8') self.end_headers() self.wfile.write(TEAPOT_RESPONSE_BODY.encode()) elif self.path == '/fake.m3u8': self.send_response(200) self.send_header('Content-Length', '1024') self.end_headers() self.wfile.write(1024 * b'\x00') elif self.path == '/bipbop.m3u8': with open('test/testdata/m3u8/bipbop_16x9.m3u8', 'rb') as f: data = f.read() self.send_response(200) self.send_header('Content-Length', str(len(data))) self.end_headers() self.wfile.write(data) else: assert False class DummyIE(InfoExtractor): def _sort_formats(self, formats, field_preference=[]): self._downloader.sort_formats( {'formats': formats, '_format_sort_fields': field_preference}) class TestInfoExtractor(unittest.TestCase): def setUp(self): self.ie = DummyIE(FakeYDL()) def test_ie_key(self): self.assertEqual(get_info_extractor(YoutubeIE.ie_key()), YoutubeIE) def test_get_netrc_login_info(self): for params in [ {'usenetrc': True, 'netrc_location': './test/testdata/netrc/netrc'}, {'netrc_cmd': f'{sys.executable} ./test/testdata/netrc/print_netrc.py'}, ]: ie = DummyIE(FakeYDL(params)) self.assertEqual(ie._get_netrc_login_info(netrc_machine='normal_use'), ('user', 'pass')) self.assertEqual(ie._get_netrc_login_info(netrc_machine='empty_user'), ('', 'pass')) self.assertEqual(ie._get_netrc_login_info(netrc_machine='empty_pass'), ('user', '')) self.assertEqual(ie._get_netrc_login_info(netrc_machine='both_empty'), ('', '')) self.assertEqual(ie._get_netrc_login_info(netrc_machine='nonexistent'), (None, None)) def test_html_search_regex(self): html = '<p id="foo">Watch this <a href="http://www.youtube.com/watch?v=BaW_jenozKc">video</a></p>' search = lambda re, *args: self.ie._html_search_regex(re, html, *args) self.assertEqual(search(r'<p id="foo">(.+?)</p>', 'foo'), 'Watch this video') def test_opengraph(self): ie = self.ie html = ''' <meta name="og:title" content='Foo'/> <meta content="Some video's description " name="og:description"/> <meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&amp;key2=val2'/> <meta content='application/x-shockwave-flash' property='og:video:type'> <meta content='Foo' property=og:foobar> <meta name="og:test1" content='foo > < bar'/> <meta name="og:test2" content="foo >//< bar"/> <meta property=og-test3 content='Ill-formatted opengraph'/> <meta property=og:test4 content=unquoted-value/> ''' self.assertEqual(ie._og_search_title(html), 'Foo') self.assertEqual(ie._og_search_description(html), 'Some video\'s description ') self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2') self.assertEqual(ie._og_search_video_url(html, default=None), None) self.assertEqual(ie._og_search_property('foobar', html), 'Foo') self.assertEqual(ie._og_search_property('test1', html), 'foo > < bar') self.assertEqual(ie._og_search_property('test2', html), 'foo >//< bar') self.assertEqual(ie._og_search_property('test3', html), 'Ill-formatted opengraph') self.assertEqual(ie._og_search_property(('test0', 'test1'), html), 'foo > < bar') self.assertRaises(RegexNotFoundError, ie._og_search_property, 'test0', html, None, fatal=True) self.assertRaises(RegexNotFoundError, ie._og_search_property, ('test0', 'test00'), html, None, fatal=True) self.assertEqual(ie._og_search_property('test4', html), 'unquoted-value') def test_html_search_meta(self): ie = self.ie html = ''' <meta name="a" content="1" /> <meta name='b' content='2'> <meta name="c" content='3'> <meta name=d content='4'> <meta property="e" content='5' > <meta content="6" name="f"> ''' self.assertEqual(ie._html_search_meta('a', html), '1') self.assertEqual(ie._html_search_meta('b', html), '2') self.assertEqual(ie._html_search_meta('c', html), '3') self.assertEqual(ie._html_search_meta('d', html), '4') self.assertEqual(ie._html_search_meta('e', html), '5') self.assertEqual(ie._html_search_meta('f', html), '6') self.assertEqual(ie._html_search_meta(('a', 'b', 'c'), html), '1') self.assertEqual(ie._html_search_meta(('c', 'b', 'a'), html), '3') self.assertEqual(ie._html_search_meta(('z', 'x', 'c'), html), '3') self.assertRaises(RegexNotFoundError, ie._html_search_meta, 'z', html, None, fatal=True) self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True) def test_search_json_ld_realworld(self): _TESTS = [ # https://github.com/ytdl-org/youtube-dl/issues/23306 ( r'''<script type="application/ld+json"> { "@context": "http://schema.org/", "@type": "VideoObject", "name": "1 On 1 With Kleio", "url": "https://www.eporner.com/hd-porn/xN49A1cT3eB/1-On-1-With-Kleio/", "duration": "PT0H12M23S", "thumbnailUrl": ["https://static-eu-cdn.eporner.com/thumbs/static4/7/78/780/780814/9_360.jpg", "https://imggen.eporner.com/780814/1920/1080/9.jpg"], "contentUrl": "https://gvideo.eporner.com/xN49A1cT3eB/xN49A1cT3eB.mp4", "embedUrl": "https://www.eporner.com/embed/xN49A1cT3eB/1-On-1-With-Kleio/", "image": "https://static-eu-cdn.eporner.com/thumbs/static4/7/78/780/780814/9_360.jpg", "width": "1920", "height": "1080", "encodingFormat": "mp4", "bitrate": "6617kbps", "isFamilyFriendly": "False", "description": "Kleio Valentien", "uploadDate": "2015-12-05T21:24:35+01:00", "interactionStatistic": { "@type": "InteractionCounter", "interactionType": { "@type": "http://schema.org/WatchAction" }, "userInteractionCount": 1120958 }, "aggregateRating": { "@type": "AggregateRating", "ratingValue": "88", "ratingCount": "630", "bestRating": "100", "worstRating": "0" }, "actor": [{ "@type": "Person", "name": "Kleio Valentien", "url": "https://www.eporner.com/pornstar/kleio-valentien/" }]} </script>''', { 'title': '1 On 1 With Kleio', 'description': 'Kleio Valentien', 'url': 'https://gvideo.eporner.com/xN49A1cT3eB/xN49A1cT3eB.mp4', 'timestamp': 1449347075, 'duration': 743.0, 'view_count': 1120958, 'width': 1920, 'height': 1080, }, {}, ), ( r'''<script type="application/ld+json"> { "@context": "https://schema.org", "@graph": [ { "@type": "NewsArticle", "mainEntityOfPage": { "@type": "WebPage", "@id": "https://www.ant1news.gr/Society/article/620286/symmoria-anilikon-dikigoros-thymaton-ithelan-na-toys-apoteleiosoyn" }, "headline": "Συμμορία ανηλίκων – δικηγόρος θυμάτων: ήθελαν να τους αποτελειώσουν", "name": "Συμμορία ανηλίκων – δικηγόρος θυμάτων: ήθελαν να τους αποτελειώσουν", "description": "Τα παιδιά δέχθηκαν την επίθεση επειδή αρνήθηκαν να γίνουν μέλη της συμμορίας, ανέφερε ο Γ. Ζαχαρόπουλος.", "image": { "@type": "ImageObject", "url": "https://ant1media.azureedge.net/imgHandler/1100/a635c968-be71-447c-bf9c-80d843ece21e.jpg", "width": 1100, "height": 756 }, "datePublished": "2021-11-10T08:50:00+03:00", "dateModified": "2021-11-10T08:52:53+03:00", "author": { "@type": "Person", "@id": "https://www.ant1news.gr/", "name": "Ant1news", "image": "https://www.ant1news.gr/images/logo-e5d7e4b3e714c88e8d2eca96130142f6.png", "url": "https://www.ant1news.gr/" }, "publisher": { "@type": "Organization", "@id": "https://www.ant1news.gr#publisher", "name": "Ant1news", "url": "https://www.ant1news.gr", "logo": { "@type": "ImageObject", "url": "https://www.ant1news.gr/images/logo-e5d7e4b3e714c88e8d2eca96130142f6.png", "width": 400, "height": 400 }, "sameAs": [ "https://www.facebook.com/Ant1news.gr", "https://twitter.com/antennanews", "https://www.youtube.com/channel/UC0smvAbfczoN75dP0Hw4Pzw", "https://www.instagram.com/ant1news/" ] }, "keywords": "μαχαίρωμα,συμμορία ανηλίκων,ΕΙΔΗΣΕΙΣ,ΕΙΔΗΣΕΙΣ ΣΗΜΕΡΑ,ΝΕΑ,Κοινωνία - Ant1news", "articleSection": "Κοινωνία" } ] } </script>''', { 'timestamp': 1636523400, 'title': 'md5:91fe569e952e4d146485740ae927662b', }, {'expected_type': 'NewsArticle'}, ), ( r'''<script type="application/ld+json"> {"url":"/vrtnu/a-z/het-journaal/2021/het-journaal-het-journaal-19u-20211231/", "name":"Het journaal 19u", "description":"Het journaal 19u van vrijdag 31 december 2021.", "potentialAction":{"url":"https://vrtnu.page.link/pfVy6ihgCAJKgHqe8","@type":"ShareAction"}, "mainEntityOfPage":{"@id":"1640092242445","@type":"WebPage"}, "publication":[{ "startDate":"2021-12-31T19:00:00.000+01:00", "endDate":"2022-01-30T23:55:00.000+01:00", "publishedBy":{"name":"een","@type":"Organization"}, "publishedOn":{"url":"https://www.vrt.be/vrtnu/","name":"VRT NU","@type":"BroadcastService"}, "@id":"pbs-pub-3a7ec233-da95-4c1e-9b2b-cf5fdfebcbe8", "@type":"BroadcastEvent" }], "video":{ "name":"Het journaal - Aflevering 365 (Seizoen 2021)", "description":"Het journaal 19u van vrijdag 31 december 2021. Bekijk aflevering 365 van seizoen 2021 met VRT NU via de site of app.", "thumbnailUrl":"//images.vrt.be/width1280/2021/12/31/80d5ed00-6a64-11ec-b07d-02b7b76bf47f.jpg", "expires":"2022-01-30T23:55:00.000+01:00", "hasPart":[ {"name":"Explosie Turnhout","startOffset":70,"@type":"Clip"}, {"name":"Jaarwisseling","startOffset":440,"@type":"Clip"}, {"name":"Natuurbranden Colorado","startOffset":1179,"@type":"Clip"}, {"name":"Klimaatverandering","startOffset":1263,"@type":"Clip"}, {"name":"Zacht weer","startOffset":1367,"@type":"Clip"}, {"name":"Financiële balans","startOffset":1383,"@type":"Clip"}, {"name":"Club Brugge","startOffset":1484,"@type":"Clip"}, {"name":"Mentale gezondheid bij topsporters","startOffset":1575,"@type":"Clip"}, {"name":"Olympische Winterspelen","startOffset":1728,"@type":"Clip"}, {"name":"Sober oudjaar in Nederland","startOffset":1873,"@type":"Clip"} ], "duration":"PT34M39.23S", "uploadDate":"2021-12-31T19:00:00.000+01:00", "@id":"vid-9457d0c6-b8ac-4aba-b5e1-15aa3a3295b5", "@type":"VideoObject" }, "genre":["Nieuws en actua"], "episodeNumber":365, "partOfSeries":{"name":"Het journaal","@id":"222831405527","@type":"TVSeries"}, "partOfSeason":{"name":"Seizoen 2021","@id":"961809365527","@type":"TVSeason"}, "@context":"https://schema.org","@id":"961685295527","@type":"TVEpisode"}</script> ''', { 'chapters': [ {'title': 'Explosie Turnhout', 'start_time': 70, 'end_time': 440}, {'title': 'Jaarwisseling', 'start_time': 440, 'end_time': 1179}, {'title': 'Natuurbranden Colorado', 'start_time': 1179, 'end_time': 1263}, {'title': 'Klimaatverandering', 'start_time': 1263, 'end_time': 1367}, {'title': 'Zacht weer', 'start_time': 1367, 'end_time': 1383}, {'title': 'Financiële balans', 'start_time': 1383, 'end_time': 1484}, {'title': 'Club Brugge', 'start_time': 1484, 'end_time': 1575}, {'title': 'Mentale gezondheid bij topsporters', 'start_time': 1575, 'end_time': 1728}, {'title': 'Olympische Winterspelen', 'start_time': 1728, 'end_time': 1873}, {'title': 'Sober oudjaar in Nederland', 'start_time': 1873, 'end_time': 2079.23}, ], 'title': 'Het journaal - Aflevering 365 (Seizoen 2021)', }, {}, ), ( # test multiple thumbnails in a list r''' <script type="application/ld+json"> {"@context":"https://schema.org", "@type":"VideoObject", "thumbnailUrl":["https://www.rainews.it/cropgd/640x360/dl/img/2021/12/30/1640886376927_GettyImages.jpg"]} </script>''', { 'thumbnails': [{'url': 'https://www.rainews.it/cropgd/640x360/dl/img/2021/12/30/1640886376927_GettyImages.jpg'}], }, {}, ), ( # test single thumbnail r''' <script type="application/ld+json"> {"@context":"https://schema.org", "@type":"VideoObject", "thumbnailUrl":"https://www.rainews.it/cropgd/640x360/dl/img/2021/12/30/1640886376927_GettyImages.jpg"} </script>''', { 'thumbnails': [{'url': 'https://www.rainews.it/cropgd/640x360/dl/img/2021/12/30/1640886376927_GettyImages.jpg'}], }, {}, ), ( # test thumbnail_url key without URL scheme r''' <script type="application/ld+json"> { "@context": "https://schema.org", "@type": "VideoObject", "thumbnail_url": "//www.nobelprize.org/images/12693-landscape-medium-gallery.jpg" }</script>''', { 'thumbnails': [{'url': 'https://www.nobelprize.org/images/12693-landscape-medium-gallery.jpg'}], }, {}, ), ] for html, expected_dict, search_json_ld_kwargs in _TESTS: expect_dict( self, self.ie._search_json_ld(html, None, **search_json_ld_kwargs), expected_dict, ) def test_download_json(self): uri = encode_data_uri(b'{"foo": "blah"}', 'application/json') self.assertEqual(self.ie._download_json(uri, None), {'foo': 'blah'}) uri = encode_data_uri(b'callback({"foo": "blah"})', 'application/javascript') self.assertEqual(self.ie._download_json(uri, None, transform_source=strip_jsonp), {'foo': 'blah'}) uri = encode_data_uri(b'{"foo": invalid}', 'application/json') self.assertRaises(ExtractorError, self.ie._download_json, uri, None) self.assertEqual(self.ie._download_json(uri, None, fatal=False), None) def test_parse_html5_media_entries(self): # inline video tag expect_dict( self, self.ie._parse_html5_media_entries( 'https://127.0.0.1/video.html', r'<html><video src="/vid.mp4" /></html>', None)[0], { 'formats': [{ 'url': 'https://127.0.0.1/vid.mp4', }], }) # from https://www.r18.com/ # with kpbs in label expect_dict( self, self.ie._parse_html5_media_entries( 'https://www.r18.com/', r''' <video id="samplevideo_amateur" class="js-samplevideo video-js vjs-default-skin vjs-big-play-centered" controls preload="auto" width="400" height="225" poster="//pics.r18.com/digital/amateur/mgmr105/mgmr105jp.jpg"> <source id="video_source" src="https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_sm_w.mp4" type="video/mp4" res="240" label="300kbps"> <source id="video_source" src="https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dm_w.mp4" type="video/mp4" res="480" label="1000kbps"> <source id="video_source" src="https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dmb_w.mp4" type="video/mp4" res="740" label="1500kbps"> <p>Your browser does not support the video tag.</p> </video> ''', None)[0], { 'formats': [{ 'url': 'https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_sm_w.mp4', 'ext': 'mp4', 'format_id': '300kbps', 'height': 240, 'tbr': 300, }, { 'url': 'https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dm_w.mp4', 'ext': 'mp4', 'format_id': '1000kbps', 'height': 480, 'tbr': 1000, }, { 'url': 'https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dmb_w.mp4', 'ext': 'mp4', 'format_id': '1500kbps', 'height': 740, 'tbr': 1500, }], 'thumbnail': '//pics.r18.com/digital/amateur/mgmr105/mgmr105jp.jpg', }) # from https://www.csfd.cz/ # with width and height expect_dict( self, self.ie._parse_html5_media_entries( 'https://www.csfd.cz/', r''' <video width="770" height="328" preload="none" controls poster="https://img.csfd.cz/files/images/film/video/preview/163/344/163344118_748d20.png?h360" > <source src="https://video.csfd.cz/files/videos/157/750/157750813/163327358_eac647.mp4" type="video/mp4" width="640" height="360"> <source src="https://video.csfd.cz/files/videos/157/750/157750813/163327360_3d2646.mp4" type="video/mp4" width="1280" height="720"> <source src="https://video.csfd.cz/files/videos/157/750/157750813/163327356_91f258.mp4" type="video/mp4" width="1920" height="1080"> <source src="https://video.csfd.cz/files/videos/157/750/157750813/163327359_962b4a.webm" type="video/webm" width="640" height="360"> <source src="https://video.csfd.cz/files/videos/157/750/157750813/163327361_6feee0.webm" type="video/webm" width="1280" height="720"> <source src="https://video.csfd.cz/files/videos/157/750/157750813/163327357_8ab472.webm" type="video/webm" width="1920" height="1080"> <track src="https://video.csfd.cz/files/subtitles/163/344/163344115_4c388b.srt" type="text/x-srt" kind="subtitles" srclang="cs" label="cs"> </video> ''', None)[0], { 'formats': [{ 'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327358_eac647.mp4', 'ext': 'mp4', 'width': 640, 'height': 360, }, { 'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327360_3d2646.mp4', 'ext': 'mp4', 'width': 1280, 'height': 720, }, { 'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327356_91f258.mp4', 'ext': 'mp4', 'width': 1920, 'height': 1080, }, { 'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327359_962b4a.webm', 'ext': 'webm', 'width': 640, 'height': 360, }, { 'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327361_6feee0.webm', 'ext': 'webm', 'width': 1280, 'height': 720, }, { 'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327357_8ab472.webm', 'ext': 'webm', 'width': 1920, 'height': 1080, }], 'subtitles': { 'cs': [{'url': 'https://video.csfd.cz/files/subtitles/163/344/163344115_4c388b.srt'}], }, 'thumbnail': 'https://img.csfd.cz/files/images/film/video/preview/163/344/163344118_748d20.png?h360', }) # from https://tamasha.com/v/Kkdjw # with height in label expect_dict( self, self.ie._parse_html5_media_entries( 'https://tamasha.com/v/Kkdjw', r''' <video crossorigin="anonymous"> <source src="https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4" type="video/mp4" label="AUTO" res="0"/> <source src="https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4" type="video/mp4" label="240p" res="240"/> <source src="https://s-v2.tamasha.com/statics/videos_file/20/00/Kkdjw_200041c66f657fc967db464d156eafbc1ed9fe6f_n_144.mp4" type="video/mp4" label="144p" res="144"/> </video> ''', None)[0], { 'formats': [{ 'url': 'https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4', }, { 'url': 'https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4', 'ext': 'mp4', 'format_id': '240p', 'height': 240, }, { 'url': 'https://s-v2.tamasha.com/statics/videos_file/20/00/Kkdjw_200041c66f657fc967db464d156eafbc1ed9fe6f_n_144.mp4', 'ext': 'mp4', 'format_id': '144p', 'height': 144, }], }) # from https://www.directvnow.com # with data-src expect_dict( self, self.ie._parse_html5_media_entries( 'https://www.directvnow.com', r''' <video id="vid1" class="header--video-masked active" muted playsinline> <source data-src="https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4" type="video/mp4" /> </video> ''', None)[0], { 'formats': [{ 'ext': 'mp4', 'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4', }], }) # from https://www.directvnow.com # with data-src expect_dict( self, self.ie._parse_html5_media_entries( 'https://www.directvnow.com', r''' <video id="vid1" class="header--video-masked active" muted playsinline> <source data-src="https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4" type="video/mp4" /> </video> ''', None)[0], { 'formats': [{ 'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4', 'ext': 'mp4', }], }) # from https://www.klarna.com/uk/ # with data-video-src expect_dict( self, self.ie._parse_html5_media_entries( 'https://www.directvnow.com', r''' <video loop autoplay muted class="responsive-video block-kl__video video-on-medium"> <source src="" data-video-desktop data-video-src="https://www.klarna.com/uk/wp-content/uploads/sites/11/2019/01/KL062_Smooth3_0_DogWalking_5s_920x080_.mp4" type="video/mp4" /> </video> ''', None)[0], { 'formats': [{ 'url': 'https://www.klarna.com/uk/wp-content/uploads/sites/11/2019/01/KL062_Smooth3_0_DogWalking_5s_920x080_.mp4', 'ext': 'mp4', }], }) # from https://0000.studio/ # with type attribute but without extension in URL expect_dict( self, self.ie._parse_html5_media_entries( 'https://0000.studio', r''' <video src="https://d1ggyt9m8pwf3g.cloudfront.net/protected/ap-northeast-1:1864af40-28d5-492b-b739-b32314b1a527/archive/clip/838db6a7-8973-4cd6-840d-8517e4093c92" controls="controls" type="video/mp4" preload="metadata" autoplay="autoplay" playsinline class="object-contain"> </video> ''', None)[0], { 'formats': [{ 'url': 'https://d1ggyt9m8pwf3g.cloudfront.net/protected/ap-northeast-1:1864af40-28d5-492b-b739-b32314b1a527/archive/clip/838db6a7-8973-4cd6-840d-8517e4093c92', 'ext': 'mp4', }], }) def test_extract_jwplayer_data_realworld(self): # from http://www.suffolk.edu/sjc/ expect_dict( self, self.ie._extract_jwplayer_data(r''' <script type='text/javascript'> jwplayer('my-video').setup({ file: 'rtmp://192.138.214.154/live/sjclive', fallback: 'true', width: '95%', aspectratio: '16:9', primary: 'flash', mediaid:'XEgvuql4' }); </script> ''', None, require_title=False), { 'id': 'XEgvuql4', 'formats': [{ 'url': 'rtmp://192.138.214.154/live/sjclive', 'ext': 'flv', }], }) # from https://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary/ expect_dict( self, self.ie._extract_jwplayer_data(r''' <script type="text/javascript"> jwplayer("mediaplayer").setup({ 'videoid': "7564", 'width': "100%", 'aspectratio': "16:9", 'stretching': "exactfit", 'autostart': 'false', 'flashplayer': "https://t04.vipstreamservice.com/jwplayer/v5.10/player.swf", 'file': "https://cdn.pornoxo.com/key=MF+oEbaxqTKb50P-w9G3nA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/4b2157147afe5efa93ce1978e0265289c193874e02597.flv", 'image': "https://t03.vipstreamservice.com/thumbs/pxo-full/2009-12/14/a4b2157147afe5efa93ce1978e0265289c193874e02597.flv-full-13.jpg", 'filefallback': "https://cdn.pornoxo.com/key=9ZPsTR5EvPLQrBaak2MUGA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/m_4b2157147afe5efa93ce1978e0265289c193874e02597.mp4", 'logo.hide': true, 'skin': "https://t04.vipstreamservice.com/jwplayer/skin/modieus-blk.zip", 'plugins': "https://t04.vipstreamservice.com/jwplayer/dock/dockableskinnableplugin.swf", 'dockableskinnableplugin.piclink': "/index.php?key=ajax-videothumbsn&vid=7564&data=2009-12--14--4b2157147afe5efa93ce1978e0265289c193874e02597.flv--17370", 'controlbar': 'bottom', 'modes': [ {type: 'flash', src: 'https://t04.vipstreamservice.com/jwplayer/v5.10/player.swf'} ], 'provider': 'http' }); //noinspection JSAnnotator invideo.setup({ adsUrl: "/banner-iframe/?zoneId=32", adsUrl2: "", autostart: false }); </script> ''', 'dummy', require_title=False), { 'thumbnail': 'https://t03.vipstreamservice.com/thumbs/pxo-full/2009-12/14/a4b2157147afe5efa93ce1978e0265289c193874e02597.flv-full-13.jpg', 'formats': [{ 'url': 'https://cdn.pornoxo.com/key=MF+oEbaxqTKb50P-w9G3nA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/4b2157147afe5efa93ce1978e0265289c193874e02597.flv', 'ext': 'flv', }], }) # from http://www.indiedb.com/games/king-machine/videos expect_dict( self, self.ie._extract_jwplayer_data(r''' <script> jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/\/www.indiedb.com\/","displaytitle":false,"autostart":false,"repeat":false,"title":"king machine trailer 1","sharing":{"link":"http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1","code":"<iframe width=\"560\" height=\"315\" src=\"http:\/\/www.indiedb.com\/media\/iframe\/1522983\" frameborder=\"0\" allowfullscreen><\/iframe><br><a href=\"http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1\">king machine trailer 1 - Indie DB<\/a>"},"related":{"file":"http:\/\/rss.indiedb.com\/media\/recommended\/1522983\/feed\/rss.xml","dimensions":"160x120","onclick":"link"},"sources":[{"file":"http:\/\/cdn.dbolical.com\/cache\/videos\/games\/1\/50\/49678\/encode_mp4\/king-machine-trailer.mp4","label":"360p SD","default":"true"},{"file":"http:\/\/cdn.dbolical.com\/cache\/videos\/games\/1\/50\/49678\/encode720p_mp4\/king-machine-trailer.mp4","label":"720p HD"}],"image":"http:\/\/media.indiedb.com\/cache\/images\/games\/1\/50\/49678\/thumb_620x2000\/king-machine-trailer.mp4.jpg","advertising":{"client":"vast","tag":"http:\/\/ads.intergi.com\/adrawdata\/3.0\/5205\/4251742\/0\/1013\/ADTECH;cors=yes;width=560;height=315;referring_url=http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1;content_url=http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1;media_id=1522983;title=king+machine+trailer+1;device=__DEVICE__;model=__MODEL__;os=Windows+OS;osversion=__OSVERSION__;ua=__UA__;ip=109.171.17.81;uniqueid=1522983;tags=__TAGS__;number=58cac25928151;time=1489683033"},"width":620,"height":349}).once("play", function(event) { videoAnalytics("play"); }).once("complete", function(event) { videoAnalytics("completed"); }); </script> ''', 'dummy'), { 'title': 'king machine trailer 1',
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_traversal.py
test/test_traversal.py
import http.cookies import re import xml.etree.ElementTree import pytest from yt_dlp.utils import ( ExtractorError, determine_ext, dict_get, int_or_none, join_nonempty, str_or_none, ) from yt_dlp.utils.traversal import ( find_element, find_elements, require, subs_list_to_dict, traverse_obj, trim_str, unpack, ) _TEST_DATA = { 100: 100, 1.2: 1.2, 'str': 'str', 'None': None, '...': ..., 'urls': [ {'index': 0, 'url': 'https://www.example.com/0'}, {'index': 1, 'url': 'https://www.example.com/1'}, ], 'data': ( {'index': 2}, {'index': 3}, ), 'dict': {}, } _TEST_HTML = '''<html><body> <div class="a">1</div> <div class="a" id="x" custom="z">2</div> <div class="b" data-id="y" custom="z">3</div> <p class="a">4</p> <p id="d" custom="e">5</p> </body></html>''' class TestTraversal: def test_traversal_base(self): assert traverse_obj(_TEST_DATA, ('str',)) == 'str', \ 'allow tuple path' assert traverse_obj(_TEST_DATA, ['str']) == 'str', \ 'allow list path' assert traverse_obj(_TEST_DATA, (value for value in ('str',))) == 'str', \ 'allow iterable path' assert traverse_obj(_TEST_DATA, 'str') == 'str', \ 'single items should be treated as a path' assert traverse_obj(_TEST_DATA, 100) == 100, \ 'allow int path' assert traverse_obj(_TEST_DATA, 1.2) == 1.2, \ 'allow float path' assert traverse_obj(_TEST_DATA, None) == _TEST_DATA, \ '`None` should not perform any modification' def test_traversal_ellipsis(self): assert traverse_obj(_TEST_DATA, ...) == [x for x in _TEST_DATA.values() if x not in (None, {})], \ '`...` should give all non discarded values' assert traverse_obj(_TEST_DATA, ('urls', 0, ...)) == list(_TEST_DATA['urls'][0].values()), \ '`...` selection for dicts should select all values' assert traverse_obj(_TEST_DATA, (..., ..., 'url')) == ['https://www.example.com/0', 'https://www.example.com/1'], \ 'nested `...` queries should work' assert traverse_obj(_TEST_DATA, (..., ..., 'index')) == list(range(4)), \ '`...` query result should be flattened' assert traverse_obj(iter(range(4)), ...) == list(range(4)), \ '`...` should accept iterables' def test_traversal_function(self): filter_func = lambda x, y: x == 'urls' and isinstance(y, list) assert traverse_obj(_TEST_DATA, filter_func) == [_TEST_DATA['urls']], \ 'function as query key should perform a filter based on (key, value)' assert traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], str)) == ['str'], \ 'exceptions in the query function should be catched' assert traverse_obj(iter(range(4)), lambda _, x: x % 2 == 0) == [0, 2], \ 'function key should accept iterables' # Wrong function signature should raise (debug mode) with pytest.raises(Exception): traverse_obj(_TEST_DATA, lambda a: ...) with pytest.raises(Exception): traverse_obj(_TEST_DATA, lambda a, b, c: ...) def test_traversal_set(self): # transformation/type, like `expected_type` assert traverse_obj(_TEST_DATA, (..., {str.upper})) == ['STR'], \ 'Function in set should be a transformation' assert traverse_obj(_TEST_DATA, (..., {str})) == ['str'], \ 'Type in set should be a type filter' assert traverse_obj(_TEST_DATA, (..., {str, int})) == [100, 'str'], \ 'Multiple types in set should be a type filter' assert traverse_obj(_TEST_DATA, {dict}) == _TEST_DATA, \ 'A single set should be wrapped into a path' assert traverse_obj(_TEST_DATA, (..., {str.upper})) == ['STR'], \ 'Transformation function should not raise' expected = [x for x in map(str_or_none, _TEST_DATA.values()) if x is not None] assert traverse_obj(_TEST_DATA, (..., {str_or_none})) == expected, \ 'Function in set should be a transformation' assert traverse_obj(_TEST_DATA, ('fail', {lambda _: 'const'})) == 'const', \ 'Function in set should always be called' # Sets with length < 1 or > 1 not including only types should raise with pytest.raises(Exception): traverse_obj(_TEST_DATA, set()) with pytest.raises(Exception): traverse_obj(_TEST_DATA, {str.upper, str}) def test_traversal_slice(self): _SLICE_DATA = [0, 1, 2, 3, 4] assert traverse_obj(_TEST_DATA, ('dict', slice(1))) is None, \ 'slice on a dictionary should not throw' assert traverse_obj(_SLICE_DATA, slice(1)) == _SLICE_DATA[:1], \ 'slice key should apply slice to sequence' assert traverse_obj(_SLICE_DATA, slice(1, 2)) == _SLICE_DATA[1:2], \ 'slice key should apply slice to sequence' assert traverse_obj(_SLICE_DATA, slice(1, 4, 2)) == _SLICE_DATA[1:4:2], \ 'slice key should apply slice to sequence' def test_traversal_alternatives(self): assert traverse_obj(_TEST_DATA, 'fail', 'str') == 'str', \ 'multiple `paths` should be treated as alternative paths' assert traverse_obj(_TEST_DATA, 'str', 100) == 'str', \ 'alternatives should exit early' assert traverse_obj(_TEST_DATA, 'fail', 'fail') is None, \ 'alternatives should return `default` if exhausted' assert traverse_obj(_TEST_DATA, (..., 'fail'), 100) == 100, \ 'alternatives should track their own branching return' assert traverse_obj(_TEST_DATA, ('dict', ...), ('data', ...)) == list(_TEST_DATA['data']), \ 'alternatives on empty objects should search further' def test_traversal_branching_nesting(self): assert traverse_obj(_TEST_DATA, ('urls', (3, 0), 'url')) == ['https://www.example.com/0'], \ 'tuple as key should be treated as branches' assert traverse_obj(_TEST_DATA, ('urls', [3, 0], 'url')) == ['https://www.example.com/0'], \ 'list as key should be treated as branches' assert traverse_obj(_TEST_DATA, ('urls', ((1, 'fail'), (0, 'url')))) == ['https://www.example.com/0'], \ 'double nesting in path should be treated as paths' assert traverse_obj(['0', [1, 2]], [(0, 1), 0]) == [1], \ 'do not fail early on branching' expected = ['https://www.example.com/0', 'https://www.example.com/1'] assert traverse_obj(_TEST_DATA, ('urls', ((0, ('fail', 'url')), (1, 'url')))) == expected, \ 'tripple nesting in path should be treated as branches' assert traverse_obj(_TEST_DATA, ('urls', ('fail', (..., 'url')))) == expected, \ 'ellipsis as branch path start gets flattened' def test_traversal_dict(self): assert traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}) == {0: 100, 1: 1.2}, \ 'dict key should result in a dict with the same keys' expected = {0: 'https://www.example.com/0'} assert traverse_obj(_TEST_DATA, {0: ('urls', 0, 'url')}) == expected, \ 'dict key should allow paths' expected = {0: ['https://www.example.com/0']} assert traverse_obj(_TEST_DATA, {0: ('urls', (3, 0), 'url')}) == expected, \ 'tuple in dict path should be treated as branches' assert traverse_obj(_TEST_DATA, {0: ('urls', ((1, 'fail'), (0, 'url')))}) == expected, \ 'double nesting in dict path should be treated as paths' expected = {0: ['https://www.example.com/1', 'https://www.example.com/0']} assert traverse_obj(_TEST_DATA, {0: ('urls', ((1, ('fail', 'url')), (0, 'url')))}) == expected, \ 'tripple nesting in dict path should be treated as branches' assert traverse_obj(_TEST_DATA, {0: 'fail'}) == {}, \ 'remove `None` values when top level dict key fails' assert traverse_obj(_TEST_DATA, {0: 'fail'}, default=...) == {0: ...}, \ 'use `default` if key fails and `default`' assert traverse_obj(_TEST_DATA, {0: 'dict'}) == {}, \ 'remove empty values when dict key' assert traverse_obj(_TEST_DATA, {0: 'dict'}, default=...) == {0: ...}, \ 'use `default` when dict key and `default`' assert traverse_obj(_TEST_DATA, {0: {0: 'fail'}}) == {}, \ 'remove empty values when nested dict key fails' assert traverse_obj(None, {0: 'fail'}) == {}, \ 'default to dict if pruned' assert traverse_obj(None, {0: 'fail'}, default=...) == {0: ...}, \ 'default to dict if pruned and default is given' assert traverse_obj(_TEST_DATA, {0: {0: 'fail'}}, default=...) == {0: {0: ...}}, \ 'use nested `default` when nested dict key fails and `default`' assert traverse_obj(_TEST_DATA, {0: ('dict', ...)}) == {}, \ 'remove key if branch in dict key not successful' def test_traversal_default(self): _DEFAULT_DATA = {'None': None, 'int': 0, 'list': []} assert traverse_obj(_DEFAULT_DATA, 'fail') is None, \ 'default value should be `None`' assert traverse_obj(_DEFAULT_DATA, 'fail', 'fail', default=...) == ..., \ 'chained fails should result in default' assert traverse_obj(_DEFAULT_DATA, 'None', 'int') == 0, \ 'should not short cirquit on `None`' assert traverse_obj(_DEFAULT_DATA, 'fail', default=1) == 1, \ 'invalid dict key should result in `default`' assert traverse_obj(_DEFAULT_DATA, 'None', default=1) == 1, \ '`None` is a deliberate sentinel and should become `default`' assert traverse_obj(_DEFAULT_DATA, ('list', 10)) is None, \ '`IndexError` should result in `default`' assert traverse_obj(_DEFAULT_DATA, (..., 'fail'), default=1) == 1, \ 'if branched but not successful return `default` if defined, not `[]`' assert traverse_obj(_DEFAULT_DATA, (..., 'fail'), default=None) is None, \ 'if branched but not successful return `default` even if `default` is `None`' assert traverse_obj(_DEFAULT_DATA, (..., 'fail')) == [], \ 'if branched but not successful return `[]`, not `default`' assert traverse_obj(_DEFAULT_DATA, ('list', ...)) == [], \ 'if branched but object is empty return `[]`, not `default`' assert traverse_obj(None, ...) == [], \ 'if branched but object is `None` return `[]`, not `default`' assert traverse_obj({0: None}, (0, ...)) == [], \ 'if branched but state is `None` return `[]`, not `default`' @pytest.mark.parametrize('path', [ ('fail', ...), (..., 'fail'), 100 * ('fail',) + (...,), (...,) + 100 * ('fail',), ]) def test_traversal_branching(self, path): assert traverse_obj({}, path) == [], \ 'if branched but state is `None`, return `[]` (not `default`)' assert traverse_obj({}, 'fail', path) == [], \ 'if branching in last alternative and previous did not match, return `[]` (not `default`)' assert traverse_obj({0: 'x'}, 0, path) == 'x', \ 'if branching in last alternative and previous did match, return single value' assert traverse_obj({0: 'x'}, path, 0) == 'x', \ 'if branching in first alternative and non-branching path does match, return single value' assert traverse_obj({}, path, 'fail') is None, \ 'if branching in first alternative and non-branching path does not match, return `default`' def test_traversal_expected_type(self): _EXPECTED_TYPE_DATA = {'str': 'str', 'int': 0} assert traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=str) == 'str', \ 'accept matching `expected_type` type' assert traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int) is None, \ 'reject non matching `expected_type` type' assert traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: str(x)) == '0', \ 'transform type using type function' assert traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=lambda _: 1 / 0) is None, \ 'wrap expected_type fuction in try_call' assert traverse_obj(_EXPECTED_TYPE_DATA, ..., expected_type=str) == ['str'], \ 'eliminate items that expected_type fails on' assert traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}, expected_type=int) == {0: 100}, \ 'type as expected_type should filter dict values' assert traverse_obj(_TEST_DATA, {0: 100, 1: 1.2, 2: 'None'}, expected_type=str_or_none) == {0: '100', 1: '1.2'}, \ 'function as expected_type should transform dict values' assert traverse_obj(_TEST_DATA, ({0: 1.2}, 0, {int_or_none}), expected_type=int) == 1, \ 'expected_type should not filter non final dict values' assert traverse_obj(_TEST_DATA, {0: {0: 100, 1: 'str'}}, expected_type=int) == {0: {0: 100}}, \ 'expected_type should transform deep dict values' assert traverse_obj(_TEST_DATA, [({0: '...'}, {0: '...'})], expected_type=type(...)) == [{0: ...}, {0: ...}], \ 'expected_type should transform branched dict values' assert traverse_obj({1: {3: 4}}, [(1, 2), 3], expected_type=int) == [4], \ 'expected_type regression for type matching in tuple branching' assert traverse_obj(_TEST_DATA, ['data', ...], expected_type=int) == [], \ 'expected_type regression for type matching in dict result' def test_traversal_get_all(self): _GET_ALL_DATA = {'key': [0, 1, 2]} assert traverse_obj(_GET_ALL_DATA, ('key', ...), get_all=False) == 0, \ 'if not `get_all`, return only first matching value' assert traverse_obj(_GET_ALL_DATA, ..., get_all=False) == [0, 1, 2], \ 'do not overflatten if not `get_all`' def test_traversal_casesense(self): _CASESENSE_DATA = { 'KeY': 'value0', 0: { 'KeY': 'value1', 0: {'KeY': 'value2'}, }, } assert traverse_obj(_CASESENSE_DATA, 'key') is None, \ 'dict keys should be case sensitive unless `casesense`' assert traverse_obj(_CASESENSE_DATA, 'keY', casesense=False) == 'value0', \ 'allow non matching key case if `casesense`' assert traverse_obj(_CASESENSE_DATA, [0, ('keY',)], casesense=False) == ['value1'], \ 'allow non matching key case in branch if `casesense`' assert traverse_obj(_CASESENSE_DATA, [0, ([0, 'keY'],)], casesense=False) == ['value2'], \ 'allow non matching key case in branch path if `casesense`' def test_traversal_traverse_string(self): _TRAVERSE_STRING_DATA = {'str': 'str', 1.2: 1.2} assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0)) is None, \ 'do not traverse into string if not `traverse_string`' assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0), traverse_string=True) == 's', \ 'traverse into string if `traverse_string`' assert traverse_obj(_TRAVERSE_STRING_DATA, (1.2, 1), traverse_string=True) == '.', \ 'traverse into converted data if `traverse_string`' assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', ...), traverse_string=True) == 'str', \ '`...` should result in string (same value) if `traverse_string`' assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)), traverse_string=True) == 'sr', \ '`slice` should result in string if `traverse_string`' assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == 's'), traverse_string=True) == 'str', \ 'function should result in string if `traverse_string`' assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)), traverse_string=True) == ['s', 'r'], \ 'branching should result in list if `traverse_string`' assert traverse_obj({}, (0, ...), traverse_string=True) == [], \ 'branching should result in list if `traverse_string`' assert traverse_obj({}, (0, lambda x, y: True), traverse_string=True) == [], \ 'branching should result in list if `traverse_string`' assert traverse_obj({}, (0, slice(1)), traverse_string=True) == [], \ 'branching should result in list if `traverse_string`' def test_traversal_re(self): mobj = re.fullmatch(r'0(12)(?P<group>3)(4)?', '0123') assert traverse_obj(mobj, ...) == [x for x in mobj.groups() if x is not None], \ '`...` on a `re.Match` should give its `groups()`' assert traverse_obj(mobj, lambda k, _: k in (0, 2)) == ['0123', '3'], \ 'function on a `re.Match` should give groupno, value starting at 0' assert traverse_obj(mobj, 'group') == '3', \ 'str key on a `re.Match` should give group with that name' assert traverse_obj(mobj, 2) == '3', \ 'int key on a `re.Match` should give group with that name' assert traverse_obj(mobj, 'gRoUp', casesense=False) == '3', \ 'str key on a `re.Match` should respect casesense' assert traverse_obj(mobj, 'fail') is None, \ 'failing str key on a `re.Match` should return `default`' assert traverse_obj(mobj, 'gRoUpS', casesense=False) is None, \ 'failing str key on a `re.Match` should return `default`' assert traverse_obj(mobj, 8) is None, \ 'failing int key on a `re.Match` should return `default`' assert traverse_obj(mobj, lambda k, _: k in (0, 'group')) == ['0123', '3'], \ 'function on a `re.Match` should give group name as well' def test_traversal_xml_etree(self): etree = xml.etree.ElementTree.fromstring('''<?xml version="1.0"?> <data> <country name="Liechtenstein"> <rank>1</rank> <year>2008</year> <gdppc>141100</gdppc> <neighbor name="Austria" direction="E"/> <neighbor name="Switzerland" direction="W"/> </country> <country name="Singapore"> <rank>4</rank> <year>2011</year> <gdppc>59900</gdppc> <neighbor name="Malaysia" direction="N"/> </country> <country name="Panama"> <rank>68</rank> <year>2011</year> <gdppc>13600</gdppc> <neighbor name="Costa Rica" direction="W"/> <neighbor name="Colombia" direction="E"/> </country> </data>''') assert traverse_obj(etree, '') == etree, \ 'empty str key should return the element itself' assert traverse_obj(etree, 'country') == list(etree), \ 'str key should lead all children with that tag name' assert traverse_obj(etree, ...) == list(etree), \ '`...` as key should return all children' assert traverse_obj(etree, lambda _, x: x[0].text == '4') == [etree[1]], \ 'function as key should get element as value' assert traverse_obj(etree, lambda i, _: i == 1) == [etree[1]], \ 'function as key should get index as key' assert traverse_obj(etree, 0) == etree[0], \ 'int key should return the nth child' expected = ['Austria', 'Switzerland', 'Malaysia', 'Costa Rica', 'Colombia'] assert traverse_obj(etree, './/neighbor/@name') == expected, \ '`@<attribute>` at end of path should give that attribute' assert traverse_obj(etree, '//neighbor/@fail') == [None, None, None, None, None], \ '`@<nonexistant>` at end of path should give `None`' assert traverse_obj(etree, ('//neighbor/@', 2)) == {'name': 'Malaysia', 'direction': 'N'}, \ '`@` should give the full attribute dict' assert traverse_obj(etree, '//year/text()') == ['2008', '2011', '2011'], \ '`text()` at end of path should give the inner text' assert traverse_obj(etree, '//*[@direction]/@direction') == ['E', 'W', 'N', 'W', 'E'], \ 'full Python xpath features should be supported' assert traverse_obj(etree, (0, '@name')) == 'Liechtenstein', \ 'special transformations should act on current element' assert traverse_obj(etree, ('country', 0, ..., 'text()', {int_or_none})) == [1, 2008, 141100], \ 'special transformations should act on current element' def test_traversal_unbranching(self): assert traverse_obj(_TEST_DATA, [(100, 1.2), all]) == [100, 1.2], \ '`all` should give all results as list' assert traverse_obj(_TEST_DATA, [(100, 1.2), any]) == 100, \ '`any` should give the first result' assert traverse_obj(_TEST_DATA, [100, all]) == [100], \ '`all` should give list if non branching' assert traverse_obj(_TEST_DATA, [100, any]) == 100, \ '`any` should give single item if non branching' assert traverse_obj(_TEST_DATA, [('dict', 'None', 100), all]) == [100], \ '`all` should filter `None` and empty dict' assert traverse_obj(_TEST_DATA, [('dict', 'None', 100), any]) == 100, \ '`any` should filter `None` and empty dict' assert traverse_obj(_TEST_DATA, [{ 'all': [('dict', 'None', 100, 1.2), all], 'any': [('dict', 'None', 100, 1.2), any], }]) == {'all': [100, 1.2], 'any': 100}, \ '`all`/`any` should apply to each dict path separately' assert traverse_obj(_TEST_DATA, [{ 'all': [('dict', 'None', 100, 1.2), all], 'any': [('dict', 'None', 100, 1.2), any], }], get_all=False) == {'all': [100, 1.2], 'any': 100}, \ '`all`/`any` should apply to dict regardless of `get_all`' assert traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), all, {float}]) is None, \ '`all` should reset branching status' assert traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), any, {float}]) is None, \ '`any` should reset branching status' assert traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), all, ..., {float}]) == [1.2], \ '`all` should allow further branching' assert traverse_obj(_TEST_DATA, [('dict', 'None', 'urls', 'data'), any, ..., 'index']) == [0, 1], \ '`any` should allow further branching' def test_traversal_morsel(self): morsel = http.cookies.Morsel() values = dict(zip(morsel, 'abcdefghijklmnop', strict=False)) morsel.set('item_key', 'item_value', 'coded_value') morsel.update(values) values['key'] = 'item_key' values['value'] = 'item_value' for key, value in values.items(): assert traverse_obj(morsel, key) == value, \ 'Morsel should provide access to all values' assert traverse_obj(morsel, ...) == list(values.values()), \ '`...` should yield all values' assert traverse_obj(morsel, lambda k, v: True) == list(values.values()), \ 'function key should yield all values' assert traverse_obj(morsel, [(None,), any]) == morsel, \ 'Morsel should not be implicitly changed to dict on usage' def test_traversal_filter(self): data = [None, False, True, 0, 1, 0.0, 1.1, '', 'str', {}, {0: 0}, [], [1]] assert traverse_obj(data, [..., filter]) == [True, 1, 1.1, 'str', {0: 0}, [1]], \ '`filter` should filter falsy values' class TestTraversalHelpers: def test_traversal_require(self): with pytest.raises(ExtractorError): traverse_obj(_TEST_DATA, ['None', {require('value')}]) assert traverse_obj(_TEST_DATA, ['str', {require('value')}]) == 'str', \ '`require` should pass through non `None` values' def test_subs_list_to_dict(self): assert traverse_obj([ {'name': 'de', 'url': 'https://example.com/subs/de.vtt'}, {'name': 'en', 'url': 'https://example.com/subs/en1.ass'}, {'name': 'en', 'url': 'https://example.com/subs/en2.ass'}, ], [..., { 'id': 'name', 'url': 'url', }, all, {subs_list_to_dict}]) == { 'de': [{'url': 'https://example.com/subs/de.vtt'}], 'en': [ {'url': 'https://example.com/subs/en1.ass'}, {'url': 'https://example.com/subs/en2.ass'}, ], }, 'function should build subtitle dict from list of subtitles' assert traverse_obj([ {'name': 'de', 'url': 'https://example.com/subs/de.ass'}, {'name': 'de'}, {'name': 'en', 'content': 'content'}, {'url': 'https://example.com/subs/en'}, ], [..., { 'id': 'name', 'data': 'content', 'url': 'url', }, all, {subs_list_to_dict(lang=None)}]) == { 'de': [{'url': 'https://example.com/subs/de.ass'}], 'en': [{'data': 'content'}], }, 'subs with mandatory items missing should be filtered' assert traverse_obj([ {'url': 'https://example.com/subs/de.ass', 'name': 'de'}, {'url': 'https://example.com/subs/en', 'name': 'en'}, ], [..., { 'id': 'name', 'ext': ['url', {determine_ext(default_ext=None)}], 'url': 'url', }, all, {subs_list_to_dict(ext='ext')}]) == { 'de': [{'url': 'https://example.com/subs/de.ass', 'ext': 'ass'}], 'en': [{'url': 'https://example.com/subs/en', 'ext': 'ext'}], }, '`ext` should set default ext but leave existing value untouched' assert traverse_obj([ {'name': 'en', 'url': 'https://example.com/subs/en2', 'prio': True}, {'name': 'en', 'url': 'https://example.com/subs/en1', 'prio': False}, ], [..., { 'id': 'name', 'quality': ['prio', {int}], 'url': 'url', }, all, {subs_list_to_dict(ext='ext')}]) == {'en': [ {'url': 'https://example.com/subs/en1', 'ext': 'ext'}, {'url': 'https://example.com/subs/en2', 'ext': 'ext'}, ]}, '`quality` key should sort subtitle list accordingly' assert traverse_obj([ {'name': 'de', 'url': 'https://example.com/subs/de.ass'}, {'name': 'de'}, {'name': 'en', 'content': 'content'}, {'url': 'https://example.com/subs/en'}, ], [..., { 'id': 'name', 'url': 'url', 'data': 'content', }, all, {subs_list_to_dict(lang='en')}]) == { 'de': [{'url': 'https://example.com/subs/de.ass'}], 'en': [ {'data': 'content'}, {'url': 'https://example.com/subs/en'}, ], }, 'optionally provided lang should be used if no id available' assert traverse_obj([ {'name': 1, 'url': 'https://example.com/subs/de1'}, {'name': {}, 'url': 'https://example.com/subs/de2'}, {'name': 'de', 'ext': 1, 'url': 'https://example.com/subs/de3'}, {'name': 'de', 'ext': {}, 'url': 'https://example.com/subs/de4'}, ], [..., { 'id': 'name', 'url': 'url', 'ext': 'ext', }, all, {subs_list_to_dict(lang=None)}]) == { 'de': [ {'url': 'https://example.com/subs/de3'}, {'url': 'https://example.com/subs/de4'}, ], }, 'non str types should be ignored for id and ext' assert traverse_obj([ {'name': 1, 'url': 'https://example.com/subs/de1'}, {'name': {}, 'url': 'https://example.com/subs/de2'}, {'name': 'de', 'ext': 1, 'url': 'https://example.com/subs/de3'}, {'name': 'de', 'ext': {}, 'url': 'https://example.com/subs/de4'}, ], [..., { 'id': 'name', 'url': 'url', 'ext': 'ext', }, all, {subs_list_to_dict(lang='de')}]) == { 'de': [ {'url': 'https://example.com/subs/de1'}, {'url': 'https://example.com/subs/de2'}, {'url': 'https://example.com/subs/de3'}, {'url': 'https://example.com/subs/de4'}, ], }, 'non str types should be replaced by default id' def test_trim_str(self): with pytest.raises(TypeError): trim_str('positional') assert callable(trim_str(start='a')) assert trim_str(start='ab')('abc') == 'c' assert trim_str(end='bc')('abc') == 'a' assert trim_str(start='a', end='c')('abc') == 'b' assert trim_str(start='ab', end='c')('abc') == '' assert trim_str(start='a', end='bc')('abc') == '' assert trim_str(start='ab', end='bc')('abc') == '' assert trim_str(start='abc', end='abc')('abc') == '' assert trim_str(start='', end='')('abc') == 'abc' def test_unpack(self): assert unpack(lambda *x: ''.join(map(str, x)))([1, 2, 3]) == '123' assert unpack(join_nonempty)([1, 2, 3]) == '1-2-3' assert unpack(join_nonempty, delim=' ')([1, 2, 3]) == '1 2 3' with pytest.raises(TypeError): unpack(join_nonempty)() with pytest.raises(TypeError): unpack() def test_find_element(self): for improper_kwargs in [ dict(attr='data-id'), dict(value='y'), dict(attr='data-id', value='y', cls='a'), dict(attr='data-id', value='y', id='x'), dict(cls='a', id='x'), dict(cls='a', tag='p'), dict(cls='[ab]', regex=True), ]: with pytest.raises(AssertionError): find_element(**improper_kwargs)(_TEST_HTML) assert find_element(cls='a')(_TEST_HTML) == '1' assert find_element(cls='a', html=True)(_TEST_HTML) == '<div class="a">1</div>' assert find_element(id='x')(_TEST_HTML) == '2' assert find_element(id='[ex]')(_TEST_HTML) is None assert find_element(id='[ex]', regex=True)(_TEST_HTML) == '2' assert find_element(id='x', html=True)(_TEST_HTML) == '<div class="a" id="x" custom="z">2</div>' assert find_element(attr='data-id', value='y')(_TEST_HTML) == '3' assert find_element(attr='data-id', value='y(?:es)?')(_TEST_HTML) is None assert find_element(attr='data-id', value='y(?:es)?', regex=True)(_TEST_HTML) == '3' assert find_element( attr='data-id', value='y', html=True)(_TEST_HTML) == '<div class="b" data-id="y" custom="z">3</div>' def test_find_elements(self): for improper_kwargs in [ dict(tag='p'), dict(attr='data-id'), dict(value='y'), dict(attr='data-id', value='y', cls='a'), dict(cls='a', tag='div'), dict(cls='[ab]', regex=True), ]: with pytest.raises(AssertionError): find_elements(**improper_kwargs)(_TEST_HTML) assert find_elements(cls='a')(_TEST_HTML) == ['1', '2', '4'] assert find_elements(cls='a', html=True)(_TEST_HTML) == [ '<div class="a">1</div>', '<div class="a" id="x" custom="z">2</div>', '<p class="a">4</p>'] assert find_elements(attr='custom', value='z')(_TEST_HTML) == ['2', '3'] assert find_elements(attr='custom', value='[ez]')(_TEST_HTML) == [] assert find_elements(attr='custom', value='[ez]', regex=True)(_TEST_HTML) == ['2', '3', '5'] class TestDictGet: def test_dict_get(self): FALSE_VALUES = { 'none': None, 'false': False, 'zero': 0, 'empty_string': '', 'empty_list': [], } d = {**FALSE_VALUES, 'a': 42} assert dict_get(d, 'a') == 42 assert dict_get(d, 'b') is None assert dict_get(d, 'b', 42) == 42 assert dict_get(d, ('a',)) == 42 assert dict_get(d, ('b', 'a')) == 42 assert dict_get(d, ('b', 'c', 'a', 'd')) == 42 assert dict_get(d, ('b', 'c')) is None assert dict_get(d, ('b', 'c'), 42) == 42 for key, false_value in FALSE_VALUES.items(): assert dict_get(d, ('b', 'c', key)) is None assert dict_get(d, ('b', 'c', key), skip_false_values=False) == false_value
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_update.py
test/test_update.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import FakeYDL, report_warning from yt_dlp.update import UpdateInfo, Updater, UPDATE_SOURCES, _make_label # XXX: Keep in sync with yt_dlp.update.UPDATE_SOURCES TEST_UPDATE_SOURCES = { 'stable': 'yt-dlp/yt-dlp', 'nightly': 'yt-dlp/yt-dlp-nightly-builds', 'master': 'yt-dlp/yt-dlp-master-builds', } TEST_API_DATA = { 'yt-dlp/yt-dlp/latest': { 'tag_name': '2023.12.31', 'target_commitish': 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', 'name': 'yt-dlp 2023.12.31', 'body': 'BODY', }, 'yt-dlp/yt-dlp-nightly-builds/latest': { 'tag_name': '2023.12.31.123456', 'target_commitish': 'master', 'name': 'yt-dlp nightly 2023.12.31.123456', 'body': 'Generated from: https://github.com/yt-dlp/yt-dlp/commit/cccccccccccccccccccccccccccccccccccccccc', }, 'yt-dlp/yt-dlp-master-builds/latest': { 'tag_name': '2023.12.31.987654', 'target_commitish': 'master', 'name': 'yt-dlp master 2023.12.31.987654', 'body': 'Generated from: https://github.com/yt-dlp/yt-dlp/commit/dddddddddddddddddddddddddddddddddddddddd', }, 'yt-dlp/yt-dlp/tags/testing': { 'tag_name': 'testing', 'target_commitish': '9999999999999999999999999999999999999999', 'name': 'testing', 'body': 'BODY', }, 'fork/yt-dlp/latest': { 'tag_name': '2050.12.31', 'target_commitish': 'eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee', 'name': '2050.12.31', 'body': 'BODY', }, 'fork/yt-dlp/tags/pr0000': { 'tag_name': 'pr0000', 'target_commitish': 'ffffffffffffffffffffffffffffffffffffffff', 'name': 'pr1234 2023.11.11.000000', 'body': 'BODY', }, 'fork/yt-dlp/tags/pr1234': { 'tag_name': 'pr1234', 'target_commitish': '0000000000000000000000000000000000000000', 'name': 'pr1234 2023.12.31.555555', 'body': 'BODY', }, 'fork/yt-dlp/tags/pr9999': { 'tag_name': 'pr9999', 'target_commitish': '1111111111111111111111111111111111111111', 'name': 'pr9999', 'body': 'BODY', }, 'fork/yt-dlp-satellite/tags/pr987': { 'tag_name': 'pr987', 'target_commitish': 'master', 'name': 'pr987', 'body': 'Generated from: https://github.com/yt-dlp/yt-dlp/commit/2222222222222222222222222222222222222222', }, } TEST_LOCKFILE_COMMENT = '# This file is used for regulating self-update' TEST_LOCKFILE_V1 = rf'''{TEST_LOCKFILE_COMMENT} lock 2022.08.18.36 .+ Python 3\.6 lock 2023.11.16 (?!win_x86_exe).+ Python 3\.7 lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server) lock 2024.10.22 py2exe .+ lock 2024.10.22 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b lock 2024.10.22 zip Python 3\.8 lock 2024.10.22 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2) lock 2025.08.11 darwin_legacy_exe .+ ''' TEST_LOCKFILE_V2_TMPL = r'''%s lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6 lockV2 yt-dlp/yt-dlp 2023.11.16 (?!win_x86_exe).+ Python 3\.7 lockV2 yt-dlp/yt-dlp 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server) lockV2 yt-dlp/yt-dlp 2024.10.22 py2exe .+ lockV2 yt-dlp/yt-dlp 2024.10.22 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b lockV2 yt-dlp/yt-dlp 2024.10.22 zip Python 3\.8 lockV2 yt-dlp/yt-dlp 2024.10.22 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2) lockV2 yt-dlp/yt-dlp 2025.08.11 darwin_legacy_exe .+ lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 (?!win_x86_exe).+ Python 3\.7 lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 win_x86_exe .+ Windows-(?:Vista|2008Server) lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 py2exe .+ lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 zip Python 3\.8 lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2) lockV2 yt-dlp/yt-dlp-nightly-builds 2025.08.12.233030 darwin_legacy_exe .+ lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 (?!win_x86_exe).+ Python 3\.7 lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 win_x86_exe .+ Windows-(?:Vista|2008Server) lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.045052 py2exe .+ lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 zip Python 3\.8 lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2) lockV2 yt-dlp/yt-dlp-master-builds 2025.08.12.232447 darwin_legacy_exe .+ ''' TEST_LOCKFILE_V2 = TEST_LOCKFILE_V2_TMPL % TEST_LOCKFILE_COMMENT TEST_LOCKFILE_ACTUAL = TEST_LOCKFILE_V2_TMPL % TEST_LOCKFILE_V1.rstrip('\n') TEST_LOCKFILE_FORK = rf'''{TEST_LOCKFILE_ACTUAL}# Test if a fork blocks updates to non-numeric tags lockV2 fork/yt-dlp pr0000 .+ Python 3.6 lockV2 fork/yt-dlp pr1234 (?!win_x86_exe).+ Python 3\.7 lockV2 fork/yt-dlp pr1234 win_x86_exe .+ Windows-(?:Vista|2008Server) lockV2 fork/yt-dlp pr9999 .+ Python 3.11 ''' class FakeUpdater(Updater): current_version = '2022.01.01' current_commit = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' _channel = 'stable' _origin = 'yt-dlp/yt-dlp' _update_sources = TEST_UPDATE_SOURCES def _download_update_spec(self, *args, **kwargs): return TEST_LOCKFILE_ACTUAL def _call_api(self, tag): tag = f'tags/{tag}' if tag != 'latest' else tag return TEST_API_DATA[f'{self.requested_repo}/{tag}'] def _report_error(self, msg, *args, **kwargs): report_warning(msg) class TestUpdate(unittest.TestCase): maxDiff = None def test_update_spec(self): ydl = FakeYDL() updater = FakeUpdater(ydl, 'stable') def test(lockfile, identifier, input_tag, expect_tag, exact=False, repo='yt-dlp/yt-dlp'): updater._identifier = identifier updater._exact = exact updater.requested_repo = repo result = updater._process_update_spec(lockfile, input_tag) self.assertEqual( result, expect_tag, f'{identifier!r} requesting {repo}@{input_tag} (exact={exact}) ' f'returned {result!r} instead of {expect_tag!r}') for lockfile in (TEST_LOCKFILE_V1, TEST_LOCKFILE_V2, TEST_LOCKFILE_ACTUAL, TEST_LOCKFILE_FORK): # Normal operation test(lockfile, 'zip Python 3.12.0', '2023.12.31', '2023.12.31') test(lockfile, 'zip Python 3.12.0', '2023.12.31', '2023.12.31', exact=True) # py2exe should never update beyond 2024.10.22 test(lockfile, 'py2exe Python 3.8', '2025.01.01', '2024.10.22') test(lockfile, 'py2exe Python 3.8', '2025.01.01', None, exact=True) # Python 3.6 --update should update only to the py3.6 lock test(lockfile, 'zip Python 3.6.0', '2023.11.16', '2022.08.18.36') # Python 3.6 --update-to an exact version later than the py3.6 lock should return None test(lockfile, 'zip Python 3.6.0', '2023.11.16', None, exact=True) # Python 3.7 should be able to update to the py3.7 lock test(lockfile, 'zip Python 3.7.0', '2023.11.16', '2023.11.16') test(lockfile, 'zip Python 3.7.1', '2023.11.16', '2023.11.16', exact=True) # Non-win_x86_exe builds on py3.7 must be locked at py3.7 lock test(lockfile, 'zip Python 3.7.1', '2023.12.31', '2023.11.16') test(lockfile, 'zip Python 3.7.1', '2023.12.31', None, exact=True) # Python 3.8 should only update to the py3.8 lock test(lockfile, 'zip Python 3.8.10', '2025.01.01', '2024.10.22') test(lockfile, 'zip Python 3.8.110', '2025.01.01', None, exact=True) test( # Windows Vista w/ win_x86_exe must be locked at Vista lock lockfile, 'win_x86_exe Python 3.7.9 (CPython x86 32bit) - Windows-Vista-6.0.6003-SP2', '2023.12.31', '2023.11.16') test( # Windows 2008Server w/ win_x86_exe must be locked at Vista lock lockfile, 'win_x86_exe Python 3.7.9 (CPython x86 32bit) - Windows-2008Server', '2023.12.31', None, exact=True) test( # Windows 7 w/ win_x86_exe py3.7 build should be able to update beyond py3.7 lock lockfile, 'win_x86_exe Python 3.7.9 (CPython x86 32bit) - Windows-7-6.1.7601-SP1', '2023.12.31', '2023.12.31', exact=True) test( # Windows 7 win_x86_exe should only update to Win7 lock lockfile, 'win_x86_exe Python 3.7.9 (CPython x86 32bit) - Windows-7-6.1.7601-SP1', '2025.01.01', '2024.10.22') test( # Windows 2008ServerR2 win_exe should only update to Win7 lock lockfile, 'win_exe Python 3.8.10 (CPython x86 32bit) - Windows-2008ServerR2', '2025.12.31', '2024.10.22') test( # Windows 8.1 w/ '2008Server' in platform string should be able to update beyond py3.7 lock lockfile, 'win_x86_exe Python 3.7.9 (CPython x86 32bit) - Windows-post2008Server-6.2.9200', '2023.12.31', '2023.12.31', exact=True) test( # win_exe built w/Python 3.8 on Windows>=8 should be able to update beyond py3.8 lock lockfile, 'win_exe Python 3.8.10 (CPython AMD64 64bit) - Windows-10-10.0.20348-SP0', '2025.01.01', '2025.01.01', exact=True) test( # linux_armv7l_exe w/glibc2.7 should only update to glibc<2.31 lock lockfile, 'linux_armv7l_exe Python 3.8.0 (CPython armv7l 32bit) - Linux-6.5.0-1025-azure-armv7l-with-glibc2.7', '2025.01.01', '2024.10.22') test( # linux_armv7l_exe w/Python 3.8 and glibc>=2.31 should be able to update beyond py3.8 and glibc<2.31 locks lockfile, 'linux_armv7l_exe Python 3.8.0 (CPython armv7l 32bit) - Linux-6.5.0-1025-azure-armv7l-with-glibc2.31', '2025.01.01', '2025.01.01') test( # linux_armv7l_exe w/glibc2.30 should only update to glibc<2.31 lock lockfile, 'linux_armv7l_exe Python 3.8.0 (CPython armv7l 64bit) - Linux-6.5.0-1025-azure-aarch64-with-glibc2.30 (OpenSSL', '2025.01.01', '2024.10.22') test( # linux_aarch64_exe w/glibc2.17 should only update to glibc<2.31 lock lockfile, 'linux_aarch64_exe Python 3.8.0 (CPython aarch64 64bit) - Linux-6.5.0-1025-azure-aarch64-with-glibc2.17', '2025.01.01', '2024.10.22') test( # linux_aarch64_exe w/glibc2.40 and glibc>=2.31 should be able to update beyond py3.8 and glibc<2.31 locks lockfile, 'linux_aarch64_exe Python 3.8.0 (CPython aarch64 64bit) - Linux-6.5.0-1025-azure-aarch64-with-glibc2.40', '2025.01.01', '2025.01.01') test( # linux_aarch64_exe w/glibc2.3 should only update to glibc<2.31 lock lockfile, 'linux_aarch64_exe Python 3.8.0 (CPython aarch64 64bit) - Linux-6.5.0-1025-azure-aarch64-with-glibc2.3 (OpenSSL', '2025.01.01', '2024.10.22') test(lockfile, 'darwin_legacy_exe Python 3.10.5', '2025.08.11', '2025.08.11') test(lockfile, 'darwin_legacy_exe Python 3.10.5', '2025.08.11', '2025.08.11', exact=True) test(lockfile, 'darwin_legacy_exe Python 3.10.5', '2025.08.12', '2025.08.11') test(lockfile, 'darwin_legacy_exe Python 3.10.5', '2025.08.12', None, exact=True) # Forks can block updates to non-numeric tags rather than lock test(TEST_LOCKFILE_FORK, 'zip Python 3.6.3', 'pr0000', None, repo='fork/yt-dlp') test(TEST_LOCKFILE_FORK, 'zip Python 3.7.4', 'pr0000', 'pr0000', repo='fork/yt-dlp') test(TEST_LOCKFILE_FORK, 'zip Python 3.7.4', 'pr1234', None, repo='fork/yt-dlp') test(TEST_LOCKFILE_FORK, 'zip Python 3.8.1', 'pr1234', 'pr1234', repo='fork/yt-dlp', exact=True) test( TEST_LOCKFILE_FORK, 'win_x86_exe Python 3.7.9 (CPython x86 32bit) - Windows-Vista-6.0.6003-SP2', 'pr1234', None, repo='fork/yt-dlp') test( TEST_LOCKFILE_FORK, 'win_x86_exe Python 3.7.9 (CPython x86 32bit) - Windows-7-6.1.7601-SP1', '2023.12.31', '2023.12.31', repo='fork/yt-dlp') test(TEST_LOCKFILE_FORK, 'zip Python 3.11.2', 'pr9999', None, repo='fork/yt-dlp', exact=True) test(TEST_LOCKFILE_FORK, 'zip Python 3.12.0', 'pr9999', 'pr9999', repo='fork/yt-dlp') def test_query_update(self): ydl = FakeYDL() def test(target, expected, current_version=None, current_commit=None, identifier=None): updater = FakeUpdater(ydl, target) if current_version: updater.current_version = current_version if current_commit: updater.current_commit = current_commit updater._identifier = identifier or 'zip' update_info = updater.query_update(_output=True) self.assertDictEqual( update_info.__dict__ if update_info else {}, expected.__dict__ if expected else {}) test('yt-dlp/yt-dlp@latest', UpdateInfo( '2023.12.31', version='2023.12.31', requested_version='2023.12.31', commit='b' * 40)) test('yt-dlp/yt-dlp-nightly-builds@latest', UpdateInfo( '2023.12.31.123456', version='2023.12.31.123456', requested_version='2023.12.31.123456', commit='c' * 40)) test('yt-dlp/yt-dlp-master-builds@latest', UpdateInfo( '2023.12.31.987654', version='2023.12.31.987654', requested_version='2023.12.31.987654', commit='d' * 40)) test('fork/yt-dlp@latest', UpdateInfo( '2050.12.31', version='2050.12.31', requested_version='2050.12.31', commit='e' * 40)) test('fork/yt-dlp@pr0000', UpdateInfo( 'pr0000', version='2023.11.11.000000', requested_version='2023.11.11.000000', commit='f' * 40)) test('fork/yt-dlp@pr1234', UpdateInfo( 'pr1234', version='2023.12.31.555555', requested_version='2023.12.31.555555', commit='0' * 40)) test('fork/yt-dlp@pr9999', UpdateInfo( 'pr9999', version=None, requested_version=None, commit='1' * 40)) test('fork/yt-dlp-satellite@pr987', UpdateInfo( 'pr987', version=None, requested_version=None, commit='2' * 40)) test('yt-dlp/yt-dlp', None, current_version='2024.01.01') test('stable', UpdateInfo( '2023.12.31', version='2023.12.31', requested_version='2023.12.31', commit='b' * 40)) test('nightly', UpdateInfo( '2023.12.31.123456', version='2023.12.31.123456', requested_version='2023.12.31.123456', commit='c' * 40)) test('master', UpdateInfo( '2023.12.31.987654', version='2023.12.31.987654', requested_version='2023.12.31.987654', commit='d' * 40)) test('testing', None, current_commit='9' * 40) test('testing', UpdateInfo('testing', commit='9' * 40)) def test_make_label(self): STABLE_REPO = UPDATE_SOURCES['stable'] NIGHTLY_REPO = UPDATE_SOURCES['nightly'] MASTER_REPO = UPDATE_SOURCES['master'] for inputs, expected in [ ([STABLE_REPO, '2025.09.02', '2025.09.02'], f'stable@2025.09.02 from {STABLE_REPO}'), ([NIGHTLY_REPO, '2025.09.02.123456', '2025.09.02.123456'], f'nightly@2025.09.02.123456 from {NIGHTLY_REPO}'), ([MASTER_REPO, '2025.09.02.987654', '2025.09.02.987654'], f'master@2025.09.02.987654 from {MASTER_REPO}'), (['fork/yt-dlp', 'experimental', '2025.12.31.000000'], 'fork/yt-dlp@experimental build 2025.12.31.000000'), (['fork/yt-dlp', '2025.09.02', '2025.09.02'], 'fork/yt-dlp@2025.09.02'), ([STABLE_REPO, 'experimental', '2025.12.31.000000'], f'{STABLE_REPO}@experimental build 2025.12.31.000000'), ([STABLE_REPO, 'experimental'], f'{STABLE_REPO}@experimental'), (['fork/yt-dlp', 'experimental'], 'fork/yt-dlp@experimental'), ]: result = _make_label(*inputs) self.assertEqual( result, expected, f'{inputs!r} returned {result!r} instead of {expected!r}') if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_YoutubeDLCookieJar.py
test/test_YoutubeDLCookieJar.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import re import tempfile from yt_dlp.cookies import YoutubeDLCookieJar class TestYoutubeDLCookieJar(unittest.TestCase): def test_keep_session_cookies(self): cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/session_cookies.txt') cookiejar.load() tf = tempfile.NamedTemporaryFile(delete=False) try: cookiejar.save(filename=tf.name) temp = tf.read().decode() self.assertTrue(re.search( r'www\.foobar\.foobar\s+FALSE\s+/\s+TRUE\s+0\s+YoutubeDLExpiresEmpty\s+YoutubeDLExpiresEmptyValue', temp)) self.assertTrue(re.search( r'www\.foobar\.foobar\s+FALSE\s+/\s+TRUE\s+0\s+YoutubeDLExpires0\s+YoutubeDLExpires0Value', temp)) finally: tf.close() os.remove(tf.name) def test_strip_httponly_prefix(self): cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/httponly_cookies.txt') cookiejar.load() def assert_cookie_has_value(key): self.assertEqual(cookiejar._cookies['www.foobar.foobar']['/'][key].value, key + '_VALUE') assert_cookie_has_value('HTTPONLY_COOKIE') assert_cookie_has_value('JS_ACCESSIBLE_COOKIE') def test_malformed_cookies(self): cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/malformed_cookies.txt') cookiejar.load() # Cookies should be empty since all malformed cookie file entries # will be ignored self.assertFalse(cookiejar._cookies) def test_get_cookie_header(self): cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/httponly_cookies.txt') cookiejar.load() header = cookiejar.get_cookie_header('https://www.foobar.foobar') self.assertIn('HTTPONLY_COOKIE', header) def test_get_cookies_for_url(self): cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/session_cookies.txt') cookiejar.load() cookies = cookiejar.get_cookies_for_url('https://www.foobar.foobar/') self.assertEqual(len(cookies), 2) cookies = cookiejar.get_cookies_for_url('https://foobar.foobar/') self.assertFalse(cookies) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_networking_utils.py
test/test_networking_utils.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import pytest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import io import random import ssl from yt_dlp.cookies import YoutubeDLCookieJar from yt_dlp.dependencies import certifi from yt_dlp.networking import Response from yt_dlp.networking._helper import ( InstanceStoreMixin, add_accept_encoding_header, get_redirect_method, make_socks_proxy_opts, ssl_load_certs, ) from yt_dlp.networking.exceptions import ( HTTPError, IncompleteRead, ) from yt_dlp.socks import ProxyType from yt_dlp.utils.networking import HTTPHeaderDict, select_proxy TEST_DIR = os.path.dirname(os.path.abspath(__file__)) class TestNetworkingUtils: def test_select_proxy(self): proxies = { 'all': 'socks5://example.com', 'http': 'http://example.com:1080', 'no': 'bypass.example.com,yt-dl.org', } assert select_proxy('https://example.com', proxies) == proxies['all'] assert select_proxy('http://example.com', proxies) == proxies['http'] assert select_proxy('http://bypass.example.com', proxies) is None assert select_proxy('https://yt-dl.org', proxies) is None @pytest.mark.parametrize('socks_proxy,expected', [ ('socks5h://example.com', { 'proxytype': ProxyType.SOCKS5, 'addr': 'example.com', 'port': 1080, 'rdns': True, 'username': None, 'password': None, }), ('socks5://user:@example.com:5555', { 'proxytype': ProxyType.SOCKS5, 'addr': 'example.com', 'port': 5555, 'rdns': False, 'username': 'user', 'password': '', }), ('socks4://u%40ser:pa%20ss@127.0.0.1:1080', { 'proxytype': ProxyType.SOCKS4, 'addr': '127.0.0.1', 'port': 1080, 'rdns': False, 'username': 'u@ser', 'password': 'pa ss', }), ('socks4a://:pa%20ss@127.0.0.1', { 'proxytype': ProxyType.SOCKS4A, 'addr': '127.0.0.1', 'port': 1080, 'rdns': True, 'username': '', 'password': 'pa ss', }), ]) def test_make_socks_proxy_opts(self, socks_proxy, expected): assert make_socks_proxy_opts(socks_proxy) == expected def test_make_socks_proxy_unknown(self): with pytest.raises(ValueError, match='Unknown SOCKS proxy version: socks'): make_socks_proxy_opts('socks://127.0.0.1') @pytest.mark.skipif(not certifi, reason='certifi is not installed') def test_load_certifi(self): context_certifi = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) context_certifi.load_verify_locations(cafile=certifi.where()) context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ssl_load_certs(context, use_certifi=True) assert context.get_ca_certs() == context_certifi.get_ca_certs() context_default = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) context_default.load_default_certs() context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ssl_load_certs(context, use_certifi=False) assert context.get_ca_certs() == context_default.get_ca_certs() if context_default.get_ca_certs() == context_certifi.get_ca_certs(): pytest.skip('System uses certifi as default. The test is not valid') @pytest.mark.parametrize('method,status,expected', [ ('GET', 303, 'GET'), ('HEAD', 303, 'HEAD'), ('PUT', 303, 'GET'), ('POST', 301, 'GET'), ('HEAD', 301, 'HEAD'), ('POST', 302, 'GET'), ('HEAD', 302, 'HEAD'), ('PUT', 302, 'PUT'), ('POST', 308, 'POST'), ('POST', 307, 'POST'), ('HEAD', 308, 'HEAD'), ('HEAD', 307, 'HEAD'), ]) def test_get_redirect_method(self, method, status, expected): assert get_redirect_method(method, status) == expected @pytest.mark.parametrize('headers,supported_encodings,expected', [ ({'Accept-Encoding': 'br'}, ['gzip', 'br'], {'Accept-Encoding': 'br'}), ({}, ['gzip', 'br'], {'Accept-Encoding': 'gzip, br'}), ({'Content-type': 'application/json'}, [], {'Content-type': 'application/json', 'Accept-Encoding': 'identity'}), ]) def test_add_accept_encoding_header(self, headers, supported_encodings, expected): headers = HTTPHeaderDict(headers) add_accept_encoding_header(headers, supported_encodings) assert headers == HTTPHeaderDict(expected) class TestInstanceStoreMixin: class FakeInstanceStoreMixin(InstanceStoreMixin): def _create_instance(self, **kwargs): return random.randint(0, 1000000) def _close_instance(self, instance): pass def test_mixin(self): mixin = self.FakeInstanceStoreMixin() assert mixin._get_instance(d={'a': 1, 'b': 2, 'c': {'d', 4}}) == mixin._get_instance(d={'a': 1, 'b': 2, 'c': {'d', 4}}) assert mixin._get_instance(d={'a': 1, 'b': 2, 'c': {'e', 4}}) != mixin._get_instance(d={'a': 1, 'b': 2, 'c': {'d', 4}}) assert mixin._get_instance(d={'a': 1, 'b': 2, 'c': {'d', 4}} != mixin._get_instance(d={'a': 1, 'b': 2, 'g': {'d', 4}})) assert mixin._get_instance(d={'a': 1}, e=[1, 2, 3]) == mixin._get_instance(d={'a': 1}, e=[1, 2, 3]) assert mixin._get_instance(d={'a': 1}, e=[1, 2, 3]) != mixin._get_instance(d={'a': 1}, e=[1, 2, 3, 4]) cookiejar = YoutubeDLCookieJar() assert mixin._get_instance(b=[1, 2], c=cookiejar) == mixin._get_instance(b=[1, 2], c=cookiejar) assert mixin._get_instance(b=[1, 2], c=cookiejar) != mixin._get_instance(b=[1, 2], c=YoutubeDLCookieJar()) # Different order assert mixin._get_instance(c=cookiejar, b=[1, 2]) == mixin._get_instance(b=[1, 2], c=cookiejar) m = mixin._get_instance(t=1234) assert mixin._get_instance(t=1234) == m mixin._clear_instances() assert mixin._get_instance(t=1234) != m class TestNetworkingExceptions: @staticmethod def create_response(status): return Response(fp=io.BytesIO(b'test'), url='http://example.com', headers={'tesT': 'test'}, status=status) def test_http_error(self): response = self.create_response(403) error = HTTPError(response) assert error.status == 403 assert str(error) == error.msg == 'HTTP Error 403: Forbidden' assert error.reason == response.reason assert error.response is response data = error.response.read() assert data == b'test' assert repr(error) == '<HTTPError 403: Forbidden>' def test_redirect_http_error(self): response = self.create_response(301) error = HTTPError(response, redirect_loop=True) assert str(error) == error.msg == 'HTTP Error 301: Moved Permanently (redirect loop detected)' assert error.reason == 'Moved Permanently' def test_incomplete_read_error(self): error = IncompleteRead(4, 3, cause='test') assert isinstance(error, IncompleteRead) assert repr(error) == '<IncompleteRead: 4 bytes read, 3 more expected>' assert str(error) == error.msg == '4 bytes read, 3 more expected' assert error.partial == 4 assert error.expected == 3 assert error.cause == 'test' error = IncompleteRead(3) assert repr(error) == '<IncompleteRead: 3 bytes read>' assert str(error) == '3 bytes read'
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_jsinterp.py
test/test_jsinterp.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import math from yt_dlp.jsinterp import JS_Undefined, JSInterpreter, js_number_to_string class NaN: pass class TestJSInterpreter(unittest.TestCase): def _test(self, jsi_or_code, expected, func='f', args=()): if isinstance(jsi_or_code, str): jsi_or_code = JSInterpreter(jsi_or_code) got = jsi_or_code.call_function(func, *args) if expected is NaN: self.assertTrue(math.isnan(got), f'{got} is not NaN') else: self.assertEqual(got, expected) def test_basic(self): jsi = JSInterpreter('function f(){;}') self.assertEqual(repr(jsi.extract_function('f')), 'F<f>') self._test(jsi, None) self._test('function f(){return 42;}', 42) self._test('function f(){42}', None) self._test('var f = function(){return 42;}', 42) def test_add(self): self._test('function f(){return 42 + 7;}', 49) self._test('function f(){return 42 + undefined;}', NaN) self._test('function f(){return 42 + null;}', 42) def test_sub(self): self._test('function f(){return 42 - 7;}', 35) self._test('function f(){return 42 - undefined;}', NaN) self._test('function f(){return 42 - null;}', 42) def test_mul(self): self._test('function f(){return 42 * 7;}', 294) self._test('function f(){return 42 * undefined;}', NaN) self._test('function f(){return 42 * null;}', 0) def test_div(self): jsi = JSInterpreter('function f(a, b){return a / b;}') self._test(jsi, NaN, args=(0, 0)) self._test(jsi, NaN, args=(JS_Undefined, 1)) self._test(jsi, float('inf'), args=(2, 0)) self._test(jsi, 0, args=(0, 3)) def test_mod(self): self._test('function f(){return 42 % 7;}', 0) self._test('function f(){return 42 % 0;}', NaN) self._test('function f(){return 42 % undefined;}', NaN) def test_exp(self): self._test('function f(){return 42 ** 2;}', 1764) self._test('function f(){return 42 ** undefined;}', NaN) self._test('function f(){return 42 ** null;}', 1) self._test('function f(){return undefined ** 42;}', NaN) def test_calc(self): self._test('function f(a){return 2*a+1;}', 7, args=[3]) def test_empty_return(self): self._test('function f(){return; y()}', None) def test_morespace(self): self._test('function f (a) { return 2 * a + 1 ; }', 7, args=[3]) self._test('function f () { x = 2 ; return x; }', 2) def test_strange_chars(self): self._test('function $_xY1 ($_axY1) { var $_axY2 = $_axY1 + 1; return $_axY2; }', 21, args=[20], func='$_xY1') def test_operators(self): self._test('function f(){return 1 << 5;}', 32) self._test('function f(){return 2 ** 5}', 32) self._test('function f(){return 19 & 21;}', 17) self._test('function f(){return 11 >> 2;}', 2) self._test('function f(){return []? 2+3: 4;}', 5) self._test('function f(){return 1 == 2}', False) self._test('function f(){return 0 && 1 || 2;}', 2) self._test('function f(){return 0 ?? 42;}', 0) self._test('function f(){return "life, the universe and everything" < 42;}', False) self._test('function f(){return 0 - 7 * - 6;}', 42) self._test('function f(){return true << "5";}', 32) self._test('function f(){return true << true;}', 2) self._test('function f(){return "19" & "21.9";}', 17) self._test('function f(){return "19" & false;}', 0) self._test('function f(){return "11.0" >> "2.1";}', 2) self._test('function f(){return 5 ^ 9;}', 12) self._test('function f(){return 0.0 << NaN}', 0) self._test('function f(){return null << undefined}', 0) # TODO: Does not work due to number too large # self._test('function f(){return 21 << 4294967297}', 42) def test_array_access(self): self._test('function f(){var x = [1,2,3]; x[0] = 4; x[0] = 5; x[2.0] = 7; return x;}', [5, 2, 7]) def test_parens(self): self._test('function f(){return (1) + (2) * ((( (( (((((3)))))) )) ));}', 7) self._test('function f(){return (1 + 2) * 3;}', 9) def test_quotes(self): self._test(R'function f(){return "a\"\\("}', R'a"\(') def test_assignments(self): self._test('function f(){var x = 20; x = 30 + 1; return x;}', 31) self._test('function f(){var x = 20; x += 30 + 1; return x;}', 51) self._test('function f(){var x = 20; x -= 30 + 1; return x;}', -11) self._test('function f(){var x = 2; var y = ["a", "b"]; y[x%y["length"]]="z"; return y}', ['z', 'b']) @unittest.skip('Not implemented') def test_comments(self): self._test(''' function f() { var x = /* 1 + */ 2; var y = /* 30 * 40 */ 50; return x + y; } ''', 52) self._test(''' function f() { var x = "/*"; var y = 1 /* comment */ + 2; return y; } ''', 3) def test_precedence(self): self._test(''' function f() { var a = [10, 20, 30, 40, 50]; var b = 6; a[0]=a[b%a.length]; return a; } ''', [20, 20, 30, 40, 50]) def test_builtins(self): self._test('function f() { return NaN }', NaN) def test_date(self): self._test('function f() { return new Date("Wednesday 31 December 1969 18:01:26 MDT") - 0; }', 86000) jsi = JSInterpreter('function f(dt) { return new Date(dt) - 0; }') self._test(jsi, 86000, args=['Wednesday 31 December 1969 18:01:26 MDT']) self._test(jsi, 86000, args=['12/31/1969 18:01:26 MDT']) # m/d/y self._test(jsi, 0, args=['1 January 1970 00:00:00 UTC']) def test_call(self): jsi = JSInterpreter(''' function x() { return 2; } function y(a) { return x() + (a?a:0); } function z() { return y(3); } ''') self._test(jsi, 5, func='z') self._test(jsi, 2, func='y') def test_if(self): self._test(''' function f() { let a = 9; if (0==0) {a++} return a } ''', 10) self._test(''' function f() { if (0==0) {return 10} } ''', 10) self._test(''' function f() { if (0!=0) {return 1} else {return 10} } ''', 10) """ # Unsupported self._test(''' function f() { if (0!=0) {return 1} else if (1==0) {return 2} else {return 10} } ''', 10) """ def test_for_loop(self): self._test('function f() { a=0; for (i=0; i-10; i++) {a++} return a }', 10) def test_switch(self): jsi = JSInterpreter(''' function f(x) { switch(x){ case 1:x+=1; case 2:x+=2; case 3:x+=3;break; case 4:x+=4; default:x=0; } return x } ''') self._test(jsi, 7, args=[1]) self._test(jsi, 6, args=[3]) self._test(jsi, 0, args=[5]) def test_switch_default(self): jsi = JSInterpreter(''' function f(x) { switch(x){ case 2: x+=2; default: x-=1; case 5: case 6: x+=6; case 0: break; case 1: x+=1; } return x } ''') self._test(jsi, 2, args=[1]) self._test(jsi, 11, args=[5]) self._test(jsi, 14, args=[9]) def test_try(self): self._test('function f() { try{return 10} catch(e){return 5} }', 10) def test_catch(self): self._test('function f() { try{throw 10} catch(e){return 5} }', 5) def test_finally(self): self._test('function f() { try{throw 10} finally {return 42} }', 42) self._test('function f() { try{throw 10} catch(e){return 5} finally {return 42} }', 42) def test_nested_try(self): self._test(''' function f() {try { try{throw 10} finally {throw 42} } catch(e){return 5} } ''', 5) def test_for_loop_continue(self): self._test('function f() { a=0; for (i=0; i-10; i++) { continue; a++ } return a }', 0) def test_for_loop_break(self): self._test('function f() { a=0; for (i=0; i-10; i++) { break; a++ } return a }', 0) def test_for_loop_try(self): self._test(''' function f() { for (i=0; i-10; i++) { try { if (i == 5) throw i} catch {return 10} finally {break} }; return 42 } ''', 42) def test_literal_list(self): self._test('function f() { return [1, 2, "asdf", [5, 6, 7]][3] }', [5, 6, 7]) def test_comma(self): self._test('function f() { a=5; a -= 1, a+=3; return a }', 7) self._test('function f() { a=5; return (a -= 1, a+=3, a); }', 7) self._test('function f() { return (l=[0,1,2,3], function(a, b){return a+b})((l[1], l[2]), l[3]) }', 5) def test_void(self): self._test('function f() { return void 42; }', None) def test_return_function(self): jsi = JSInterpreter(''' function f() { return [1, function(){return 1}][1] } ''') self.assertEqual(jsi.call_function('f')([]), 1) def test_null(self): self._test('function f() { return null; }', None) self._test('function f() { return [null > 0, null < 0, null == 0, null === 0]; }', [False, False, False, False]) self._test('function f() { return [null >= 0, null <= 0]; }', [True, True]) def test_undefined(self): self._test('function f() { return undefined === undefined; }', True) self._test('function f() { return undefined; }', JS_Undefined) self._test('function f() {return undefined ?? 42; }', 42) self._test('function f() { let v; return v; }', JS_Undefined) self._test('function f() { let v; return v**0; }', 1) self._test('function f() { let v; return [v>42, v<=42, v&&42, 42&&v]; }', [False, False, JS_Undefined, JS_Undefined]) self._test(''' function f() { return [ undefined === undefined, undefined == undefined, undefined == null, undefined < undefined, undefined > undefined, undefined === 0, undefined == 0, undefined < 0, undefined > 0, undefined >= 0, undefined <= 0, undefined > null, undefined < null, undefined === null ]; } ''', list(map(bool, (1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)))) jsi = JSInterpreter(''' function f() { let v; return [42+v, v+42, v**42, 42**v, 0**v]; } ''') for y in jsi.call_function('f'): self.assertTrue(math.isnan(y)) def test_object(self): self._test('function f() { return {}; }', {}) self._test('function f() { let a = {m1: 42, m2: 0 }; return [a["m1"], a.m2]; }', [42, 0]) self._test('function f() { let a; return a?.qq; }', JS_Undefined) self._test('function f() { let a = {m1: 42, m2: 0 }; return a?.qq; }', JS_Undefined) def test_regex(self): self._test('function f() { let a=/,,[/,913,/](,)}/; }', None) self._test('function f() { let a=/,,[/,913,/](,)}/; return a; }', R'/,,[/,913,/](,)}/0') R''' # We are not compiling regex jsi = JSInterpreter('function f() { let a=/,,[/,913,/](,)}/; return a; }') self.assertIsInstance(jsi.call_function('f'), re.Pattern) jsi = JSInterpreter('function f() { let a=/,,[/,913,/](,)}/i; return a; }') self.assertEqual(jsi.call_function('f').flags & re.I, re.I) jsi = JSInterpreter(R'function f() { let a=/,][}",],()}(\[)/; return a; }') self.assertEqual(jsi.call_function('f').pattern, r',][}",],()}(\[)') jsi = JSInterpreter(R'function f() { let a=[/[)\\]/]; return a[0]; }') self.assertEqual(jsi.call_function('f').pattern, r'[)\\]') ''' @unittest.skip('Not implemented') def test_replace(self): self._test('function f() { let a="data-name".replace("data-", ""); return a }', 'name') self._test('function f() { let a="data-name".replace(new RegExp("^.+-"), ""); return a; }', 'name') self._test('function f() { let a="data-name".replace(/^.+-/, ""); return a; }', 'name') self._test('function f() { let a="data-name".replace(/a/g, "o"); return a; }', 'doto-nome') self._test('function f() { let a="data-name".replaceAll("a", "o"); return a; }', 'doto-nome') def test_char_code_at(self): jsi = JSInterpreter('function f(i){return "test".charCodeAt(i)}') self._test(jsi, 116, args=[0]) self._test(jsi, 101, args=[1]) self._test(jsi, 115, args=[2]) self._test(jsi, 116, args=[3]) self._test(jsi, None, args=[4]) self._test(jsi, 116, args=['not_a_number']) def test_bitwise_operators_overflow(self): self._test('function f(){return -524999584 << 5}', 379882496) self._test('function f(){return 1236566549 << 5}', 915423904) def test_bitwise_operators_typecast(self): self._test('function f(){return null << 5}', 0) self._test('function f(){return undefined >> 5}', 0) self._test('function f(){return 42 << NaN}', 42) def test_negative(self): self._test('function f(){return 2 * -2.0 ;}', -4) self._test('function f(){return 2 - - -2 ;}', 0) self._test('function f(){return 2 - - - -2 ;}', 4) self._test('function f(){return 2 - + + - -2;}', 0) self._test('function f(){return 2 + - + - -2;}', 0) @unittest.skip('Not implemented') def test_packed(self): jsi = JSInterpreter('''function f(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}''') self.assertEqual(jsi.call_function('f', '''h 7=g("1j");7.7h({7g:[{33:"w://7f-7e-7d-7c.v.7b/7a/79/78/77/76.74?t=73&s=2s&e=72&f=2t&71=70.0.0.1&6z=6y&6x=6w"}],6v:"w://32.v.u/6u.31",16:"r%",15:"r%",6t:"6s",6r:"",6q:"l",6p:"l",6o:"6n",6m:\'6l\',6k:"6j",9:[{33:"/2u?b=6i&n=50&6h=w://32.v.u/6g.31",6f:"6e"}],1y:{6d:1,6c:\'#6b\',6a:\'#69\',68:"67",66:30,65:r,},"64":{63:"%62 2m%m%61%5z%5y%5x.u%5w%5v%5u.2y%22 2k%m%1o%22 5t%m%1o%22 5s%m%1o%22 2j%m%5r%22 16%m%5q%22 15%m%5p%22 5o%2z%5n%5m%2z",5l:"w://v.u/d/1k/5k.2y",5j:[]},\'5i\':{"5h":"5g"},5f:"5e",5d:"w://v.u",5c:{},5b:l,1x:[0.25,0.50,0.75,1,1.25,1.5,2]});h 1m,1n,5a;h 59=0,58=0;h 7=g("1j");h 2x=0,57=0,56=0;$.55({54:{\'53-52\':\'2i-51\'}});7.j(\'4z\',6(x){c(5>0&&x.1l>=5&&1n!=1){1n=1;$(\'q.4y\').4x(\'4w\')}});7.j(\'13\',6(x){2x=x.1l});7.j(\'2g\',6(x){2w(x)});7.j(\'4v\',6(){$(\'q.2v\').4u()});6 2w(x){$(\'q.2v\').4t();c(1m)19;1m=1;17=0;c(4s.4r===l){17=1}$.4q(\'/2u?b=4p&2l=1k&4o=2t-4n-4m-2s-4l&4k=&4j=&4i=&17=\'+17,6(2r){$(\'#4h\').4g(2r)});$(\'.3-8-4f-4e:4d("4c")\').2h(6(e){2q();g().4b(0);g().4a(l)});6 2q(){h $14=$("<q />").2p({1l:"49",16:"r%",15:"r%",48:0,2n:0,2o:47,46:"45(10%, 10%, 10%, 0.4)","44-43":"42"});$("<41 />").2p({16:"60%",15:"60%",2o:40,"3z-2n":"3y"}).3x({\'2m\':\'/?b=3w&2l=1k\',\'2k\':\'0\',\'2j\':\'2i\'}).2f($14);$14.2h(6(){$(3v).3u();g().2g()});$14.2f($(\'#1j\'))}g().13(0);}6 3t(){h 9=7.1b(2e);2d.2c(9);c(9.n>1){1r(i=0;i<9.n;i++){c(9[i].1a==2e){2d.2c(\'!!=\'+i);7.1p(i)}}}}7.j(\'3s\',6(){g().1h("/2a/3r.29","3q 10 28",6(){g().13(g().27()+10)},"2b");$("q[26=2b]").23().21(\'.3-20-1z\');g().1h("/2a/3p.29","3o 10 28",6(){h 12=g().27()-10;c(12<0)12=0;g().13(12)},"24");$("q[26=24]").23().21(\'.3-20-1z\');});6 1i(){}7.j(\'3n\',6(){1i()});7.j(\'3m\',6(){1i()});7.j("k",6(y){h 9=7.1b();c(9.n<2)19;$(\'.3-8-3l-3k\').3j(6(){$(\'#3-8-a-k\').1e(\'3-8-a-z\');$(\'.3-a-k\').p(\'o-1f\',\'11\')});7.1h("/3i/3h.3g","3f 3e",6(){$(\'.3-1w\').3d(\'3-8-1v\');$(\'.3-8-1y, .3-8-1x\').p(\'o-1g\',\'11\');c($(\'.3-1w\').3c(\'3-8-1v\')){$(\'.3-a-k\').p(\'o-1g\',\'l\');$(\'.3-a-k\').p(\'o-1f\',\'l\');$(\'.3-8-a\').1e(\'3-8-a-z\');$(\'.3-8-a:1u\').3b(\'3-8-a-z\')}3a{$(\'.3-a-k\').p(\'o-1g\',\'11\');$(\'.3-a-k\').p(\'o-1f\',\'11\');$(\'.3-8-a:1u\').1e(\'3-8-a-z\')}},"39");7.j("38",6(y){1d.37(\'1c\',y.9[y.36].1a)});c(1d.1t(\'1c\')){35("1s(1d.1t(\'1c\'));",34)}});h 18;6 1s(1q){h 9=7.1b();c(9.n>1){1r(i=0;i<9.n;i++){c(9[i].1a==1q){c(i==18){19}18=i;7.1p(i)}}}}',36,270,'|||jw|||function|player|settings|tracks|submenu||if||||jwplayer|var||on|audioTracks|true|3D|length|aria|attr|div|100|||sx|filemoon|https||event|active||false|tt|seek|dd|height|width|adb|current_audio|return|name|getAudioTracks|default_audio|localStorage|removeClass|expanded|checked|addButton|callMeMaybe|vplayer|0fxcyc2ajhp1|position|vvplay|vvad|220|setCurrentAudioTrack|audio_name|for|audio_set|getItem|last|open|controls|playbackRates|captions|rewind|icon|insertAfter||detach|ff00||button|getPosition|sec|png|player8|ff11|log|console|track_name|appendTo|play|click|no|scrolling|frameborder|file_code|src|top|zIndex|css|showCCform|data|1662367683|383371|dl|video_ad|doPlay|prevt|mp4|3E||jpg|thumbs|file|300|setTimeout|currentTrack|setItem|audioTrackChanged|dualSound|else|addClass|hasClass|toggleClass|Track|Audio|svg|dualy|images|mousedown|buttons|topbar|playAttemptFailed|beforePlay|Rewind|fr|Forward|ff|ready|set_audio_track|remove|this|upload_srt|prop|50px|margin|1000001|iframe|center|align|text|rgba|background|1000000|left|absolute|pause|setCurrentCaptions|Upload|contains|item|content|html|fviews|referer|prem|embed|3e57249ef633e0d03bf76ceb8d8a4b65|216|83|hash|view|get|TokenZir|window|hide|show|complete|slow|fadeIn|video_ad_fadein|time||cache|Cache|Content|headers|ajaxSetup|v2done|tott|vastdone2|vastdone1|vvbefore|playbackRateControls|cast|aboutlink|FileMoon|abouttext|UHD|1870|qualityLabels|sites|GNOME_POWER|link|2Fiframe|3C|allowfullscreen|22360|22640|22no|marginheight|marginwidth|2FGNOME_POWER|2F0fxcyc2ajhp1|2Fe|2Ffilemoon|2F|3A||22https|3Ciframe|code|sharing|fontOpacity|backgroundOpacity|Tahoma|fontFamily|303030|backgroundColor|FFFFFF|color|userFontScale|thumbnails|kind|0fxcyc2ajhp10000|url|get_slides|start|startparam|none|preload|html5|primary|hlshtml|androidhls|duration|uniform|stretching|0fxcyc2ajhp1_xt|image|2048|sp|6871|asn|127|srv|43200|_g3XlBcu2lmD9oDexD2NLWSmah2Nu3XcDrl93m9PwXY|m3u8||master|0fxcyc2ajhp1_x|00076|01|hls2|to|s01|delivery|storage|moon|sources|setup'''.split('|'))) # noqa: SIM905 def test_join(self): test_input = list('test') tests = [ 'function f(a, b){return a.join(b)}', 'function f(a, b){return Array.prototype.join.call(a, b)}', 'function f(a, b){return Array.prototype.join.apply(a, [b])}', ] for test in tests: jsi = JSInterpreter(test) self._test(jsi, 'test', args=[test_input, '']) self._test(jsi, 't-e-s-t', args=[test_input, '-']) self._test(jsi, '', args=[[], '-']) def test_split(self): test_result = list('test') tests = [ 'function f(a, b){return a.split(b)}', 'function f(a, b){return a["split"](b)}', 'function f(a, b){let x = ["split"]; return a[x[0]](b)}', 'function f(a, b){return String.prototype.split.call(a, b)}', 'function f(a, b){return String.prototype.split.apply(a, [b])}', ] for test in tests: jsi = JSInterpreter(test) self._test(jsi, test_result, args=['test', '']) self._test(jsi, test_result, args=['t-e-s-t', '-']) self._test(jsi, [''], args=['', '-']) self._test(jsi, [], args=['', '']) def test_slice(self): self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice()}', [0, 1, 2, 3, 4, 5, 6, 7, 8]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0)}', [0, 1, 2, 3, 4, 5, 6, 7, 8]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(5)}', [5, 6, 7, 8]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(99)}', []) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-2)}', [7, 8]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-99)}', [0, 1, 2, 3, 4, 5, 6, 7, 8]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0, 0)}', []) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(1, 0)}', []) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0, 1)}', [0]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(3, 6)}', [3, 4, 5]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(1, -1)}', [1, 2, 3, 4, 5, 6, 7]) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-1, 1)}', []) self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-3, -1)}', [6, 7]) self._test('function f(){return "012345678".slice()}', '012345678') self._test('function f(){return "012345678".slice(0)}', '012345678') self._test('function f(){return "012345678".slice(5)}', '5678') self._test('function f(){return "012345678".slice(99)}', '') self._test('function f(){return "012345678".slice(-2)}', '78') self._test('function f(){return "012345678".slice(-99)}', '012345678') self._test('function f(){return "012345678".slice(0, 0)}', '') self._test('function f(){return "012345678".slice(1, 0)}', '') self._test('function f(){return "012345678".slice(0, 1)}', '0') self._test('function f(){return "012345678".slice(3, 6)}', '345') self._test('function f(){return "012345678".slice(1, -1)}', '1234567') self._test('function f(){return "012345678".slice(-1, 1)}', '') self._test('function f(){return "012345678".slice(-3, -1)}', '67') def test_splice(self): self._test('function f(){var T = ["0", "1", "2"]; T["splice"](2, 1, "0")[0]; return T }', ['0', '1', '0']) def test_js_number_to_string(self): for test, radix, expected in [ (0, None, '0'), (-0, None, '0'), (0.0, None, '0'), (-0.0, None, '0'), (math.nan, None, 'NaN'), (-math.nan, None, 'NaN'), (math.inf, None, 'Infinity'), (-math.inf, None, '-Infinity'), (10 ** 21.5, 8, '526665530627250154000000'), (6, 2, '110'), (254, 16, 'fe'), (-10, 2, '-1010'), (-0xff, 2, '-11111111'), (0.1 + 0.2, 16, '0.4cccccccccccd'), (1234.1234, 10, '1234.1234'), # (1000000000000000128, 10, '1000000000000000100') ]: assert js_number_to_string(test, radix) == expected def test_extract_function(self): jsi = JSInterpreter('function a(b) { return b + 1; }') func = jsi.extract_function('a') self.assertEqual(func([2]), 3) def test_extract_function_with_global_stack(self): jsi = JSInterpreter('function c(d) { return d + e + f + g; }') func = jsi.extract_function('c', {'e': 10}, {'f': 100, 'g': 1000}) self.assertEqual(func([1]), 1111) def test_extract_object(self): jsi = JSInterpreter('var a={};a.xy={};var xy;var zxy={};xy={z:function(){return "abc"}};') self.assertTrue('z' in jsi.extract_object('xy', None)) def test_increment_decrement(self): self._test('function f() { var x = 1; return ++x; }', 2) self._test('function f() { var x = 1; return x++; }', 1) self._test('function f() { var x = 1; x--; return x }', 0) self._test('function f() { var y; var x = 1; x++, --x, x--, x--, y="z", "abc", x++; return --x }', -1) self._test('function f() { var a = "test--"; return a; }', 'test--') self._test('function f() { var b = 1; var a = "b--"; return a; }', 'b--') def test_nested_function_scoping(self): self._test(R''' function f() { var g = function() { var P = 2; return P; }; var P = 1; g(); return P; } ''', 1) self._test(R''' function f() { var x = function() { for (var w = 1, M = []; w < 2; w++) switch (w) { case 1: M.push("a"); case 2: M.push("b"); } return M }; var w = "c"; var M = "d"; var y = x(); y.push(w); y.push(M); return y; } ''', ['a', 'b', 'c', 'd']) self._test(R''' function f() { var P, Q; var z = 100; var g = function() { var P, Q; P = 2; Q = 15; z = 0; return P+Q; }; P = 1; Q = 10; var x = g(), y = 3; return P+Q+x+y+z; } ''', 31) def test_undefined_varnames(self): jsi = JSInterpreter('function f(){ var a; return [a, b]; }') self._test(jsi, [JS_Undefined, JS_Undefined]) self.assertEqual(jsi._undefined_varnames, {'b'}) if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_all_urls.py
test/test_all_urls.py
#!/usr/bin/env python3 # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import collections from test.helper import gettestcases from yt_dlp.extractor import FacebookIE, YoutubeIE, gen_extractors class TestAllURLsMatching(unittest.TestCase): def setUp(self): self.ies = gen_extractors() def matching_ies(self, url): return [ie.IE_NAME for ie in self.ies if ie.suitable(url) and ie.IE_NAME != 'generic'] def assertMatch(self, url, ie_list): self.assertEqual(self.matching_ies(url), ie_list) def test_youtube_playlist_matching(self): assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist']) assertTab = lambda url: self.assertMatch(url, ['youtube:tab']) assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') # 585 assertPlaylist('PL63F0C78739B09958') assertTab('https://www.youtube.com/AsapSCIENCE') assertTab('https://www.youtube.com/embedded') assertTab('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q') assertTab('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC') assertTab('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668 self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M')) # Top tracks assertTab('https://www.youtube.com/playlist?list=MCUS.20142101') def test_youtube_matching(self): self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M')) self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) # 668 self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube']) # self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube']) # /v/ is no longer valid self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube']) self.assertMatch('http://www.cleanvideosearch.com/media/action/yt/watch?videoId=8v_4O44sfjM', ['youtube']) def test_youtube_channel_matching(self): assertChannel = lambda url: self.assertMatch(url, ['youtube:tab']) assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM') assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM?feature=gb_ch_rec') assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM/videos') def test_youtube_user_matching(self): self.assertMatch('http://www.youtube.com/NASAgovVideo/videos', ['youtube:tab']) def test_youtube_feeds(self): self.assertMatch('https://www.youtube.com/feed/library', ['youtube:tab']) self.assertMatch('https://www.youtube.com/feed/history', ['youtube:tab']) self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:tab']) self.assertMatch('https://www.youtube.com/feed/subscriptions', ['youtube:tab']) def test_youtube_search_matching(self): self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url']) self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url']) def test_facebook_matching(self): self.assertTrue(FacebookIE.suitable('https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268')) self.assertTrue(FacebookIE.suitable('https://www.facebook.com/cindyweather?fref=ts#!/photo.php?v=10152183998945793')) def test_no_duplicates(self): ies = gen_extractors() for tc in gettestcases(include_onlymatching=True): url = tc['url'] for ie in ies: if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'): self.assertTrue(ie.suitable(url), f'{type(ie).__name__} should match URL {url!r}') else: self.assertFalse( ie.suitable(url), f'{type(ie).__name__} should not match URL {url!r} . That URL belongs to {tc["name"]}.') def test_keywords(self): self.assertMatch(':ytsubs', ['youtube:subscriptions']) self.assertMatch(':ytsubscriptions', ['youtube:subscriptions']) self.assertMatch(':ythistory', ['youtube:history']) def test_vimeo_matching(self): self.assertMatch('https://vimeo.com/channels/tributes', ['vimeo:channel']) self.assertMatch('https://vimeo.com/channels/31259', ['vimeo:channel']) self.assertMatch('https://vimeo.com/channels/31259/53576664', ['vimeo']) self.assertMatch('https://vimeo.com/user7108434', ['vimeo:user']) self.assertMatch('https://vimeo.com/user7108434/videos', ['vimeo:user']) self.assertMatch('https://vimeo.com/user21297594/review/75524534/3c257a1b5d', ['vimeo:review']) # https://github.com/ytdl-org/youtube-dl/issues/1930 def test_soundcloud_not_matching_sets(self): self.assertMatch('http://soundcloud.com/floex/sets/gone-ep', ['soundcloud:set']) def test_tumblr(self): self.assertMatch('http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes', ['Tumblr']) self.assertMatch('http://tatianamaslanydaily.tumblr.com/post/54196191430', ['Tumblr']) def test_pbs(self): # https://github.com/ytdl-org/youtube-dl/issues/2350 self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['pbs']) self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['pbs']) def test_no_duplicated_ie_names(self): name_accu = collections.defaultdict(list) for ie in self.ies: name_accu[ie.IE_NAME.lower()].append(type(ie).__name__) for (ie_name, ie_list) in name_accu.items(): self.assertEqual( len(ie_list), 1, f'Multiple extractors with the same IE_NAME "{ie_name}" ({", ".join(ie_list)})') if __name__ == '__main__': unittest.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_pot/test_pot_builtin_webpospec.py
test/test_pot/test_pot_builtin_webpospec.py
import pytest from yt_dlp.extractor.youtube.pot._provider import IEContentProvider, BuiltinIEContentProvider from yt_dlp.extractor.youtube.pot.cache import CacheProviderWritePolicy from yt_dlp.utils import bug_reports_message from yt_dlp.extractor.youtube.pot.provider import ( PoTokenRequest, PoTokenContext, ) from yt_dlp.version import __version__ from yt_dlp.extractor.youtube.pot._builtin.webpo_cachespec import WebPoPCSP from yt_dlp.extractor.youtube.pot._registry import _pot_pcs_providers @pytest.fixture() def pot_request(pot_request) -> PoTokenRequest: pot_request.visitor_data = 'CgsxMjNhYmNYWVpfLSiA4s%2DqBg%3D%3D' # visitor_id=123abcXYZ_- return pot_request class TestWebPoPCSP: def test_base_type(self): assert issubclass(WebPoPCSP, IEContentProvider) assert issubclass(WebPoPCSP, BuiltinIEContentProvider) def test_init(self, ie, logger): pcs = WebPoPCSP(ie=ie, logger=logger, settings={}) assert pcs.PROVIDER_NAME == 'webpo' assert pcs.PROVIDER_VERSION == __version__ assert pcs.BUG_REPORT_MESSAGE == bug_reports_message(before='') assert pcs.is_available() def test_is_registered(self): assert _pot_pcs_providers.value.get('WebPo') == WebPoPCSP @pytest.mark.parametrize('client_name, context, is_authenticated', [ ('ANDROID', PoTokenContext.GVS, False), ('IOS', PoTokenContext.GVS, False), ('IOS', PoTokenContext.PLAYER, False), ]) def test_not_supports(self, ie, logger, pot_request, client_name, context, is_authenticated): pcs = WebPoPCSP(ie=ie, logger=logger, settings={}) pot_request.innertube_context['client']['clientName'] = client_name pot_request.context = context pot_request.is_authenticated = is_authenticated assert pcs.generate_cache_spec(pot_request) is None @pytest.mark.parametrize('client_name, context, is_authenticated, remote_host, source_address, request_proxy, expected', [ *[(client, context, is_authenticated, remote_host, source_address, request_proxy, expected) for client in [ 'WEB', 'MWEB', 'TVHTML5', 'WEB_EMBEDDED_PLAYER', 'WEB_CREATOR', 'TVHTML5_SIMPLY_EMBEDDED_PLAYER', 'TVHTML5_SIMPLY'] for context, is_authenticated, remote_host, source_address, request_proxy, expected in [ (PoTokenContext.GVS, False, 'example-remote-host', 'example-source-address', 'example-request-proxy', {'t': 'webpo', 'ip': 'example-remote-host', 'sa': 'example-source-address', 'px': 'example-request-proxy', 'cb': '123abcXYZ_-', 'cbt': 'visitor_id'}), (PoTokenContext.PLAYER, False, 'example-remote-host', 'example-source-address', 'example-request-proxy', {'t': 'webpo', 'ip': 'example-remote-host', 'sa': 'example-source-address', 'px': 'example-request-proxy', 'cb': '123abcXYZ_-', 'cbt': 'video_id'}), (PoTokenContext.GVS, True, 'example-remote-host', 'example-source-address', 'example-request-proxy', {'t': 'webpo', 'ip': 'example-remote-host', 'sa': 'example-source-address', 'px': 'example-request-proxy', 'cb': 'example-data-sync-id', 'cbt': 'datasync_id'}), ]], ('WEB_REMIX', PoTokenContext.PLAYER, False, 'example-remote-host', 'example-source-address', 'example-request-proxy', {'t': 'webpo', 'ip': 'example-remote-host', 'sa': 'example-source-address', 'px': 'example-request-proxy', 'cb': '123abcXYZ_-', 'cbt': 'visitor_id'}), ('WEB', PoTokenContext.GVS, False, None, None, None, {'t': 'webpo', 'cb': '123abcXYZ_-', 'cbt': 'visitor_id', 'ip': None, 'sa': None, 'px': None}), ('TVHTML5', PoTokenContext.PLAYER, False, None, None, 'http://example.com', {'t': 'webpo', 'cb': '123abcXYZ_-', 'cbt': 'video_id', 'ip': None, 'sa': None, 'px': 'http://example.com'}), ]) def test_generate_key_bindings(self, ie, logger, pot_request, client_name, context, is_authenticated, remote_host, source_address, request_proxy, expected): pcs = WebPoPCSP(ie=ie, logger=logger, settings={}) pot_request.innertube_context['client']['clientName'] = client_name pot_request.context = context pot_request.is_authenticated = is_authenticated pot_request.innertube_context['client']['remoteHost'] = remote_host pot_request.request_source_address = source_address pot_request.request_proxy = request_proxy pot_request.video_id = '123abcXYZ_-' # same as visitor id to test type assert pcs.generate_cache_spec(pot_request).key_bindings == expected def test_no_bind_visitor_id(self, ie, logger, pot_request): # Should not bind to visitor id if setting is set to False pcs = WebPoPCSP(ie=ie, logger=logger, settings={'bind_to_visitor_id': ['false']}) pot_request.innertube_context['client']['clientName'] = 'WEB' pot_request.context = PoTokenContext.GVS pot_request.is_authenticated = False assert pcs.generate_cache_spec(pot_request).key_bindings == {'t': 'webpo', 'ip': None, 'sa': None, 'px': None, 'cb': 'CgsxMjNhYmNYWVpfLSiA4s%2DqBg%3D%3D', 'cbt': 'visitor_data'} def test_default_ttl(self, ie, logger, pot_request): pcs = WebPoPCSP(ie=ie, logger=logger, settings={}) assert pcs.generate_cache_spec(pot_request).default_ttl == 6 * 60 * 60 # should default to 6 hours def test_write_policy(self, ie, logger, pot_request): pcs = WebPoPCSP(ie=ie, logger=logger, settings={}) pot_request.context = PoTokenContext.GVS assert pcs.generate_cache_spec(pot_request).write_policy == CacheProviderWritePolicy.WRITE_ALL pot_request.context = PoTokenContext.PLAYER assert pcs.generate_cache_spec(pot_request).write_policy == CacheProviderWritePolicy.WRITE_FIRST
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_pot/test_pot_framework.py
test/test_pot/test_pot_framework.py
import pytest from yt_dlp.extractor.youtube.pot._provider import IEContentProvider, configuration_arg from yt_dlp.cookies import YoutubeDLCookieJar from yt_dlp.utils.networking import HTTPHeaderDict from yt_dlp.extractor.youtube.pot.provider import ( PoTokenRequest, PoTokenContext, ExternalRequestFeature, ) from yt_dlp.extractor.youtube.pot.cache import ( PoTokenCacheProvider, PoTokenCacheSpec, PoTokenCacheSpecProvider, CacheProviderWritePolicy, ) import yt_dlp.extractor.youtube.pot.cache as cache from yt_dlp.networking import Request from yt_dlp.extractor.youtube.pot.provider import ( PoTokenResponse, PoTokenProvider, PoTokenProviderRejectedRequest, provider_bug_report_message, register_provider, register_preference, ) from yt_dlp.extractor.youtube.pot._registry import _pot_providers, _ptp_preferences, _pot_pcs_providers, _pot_cache_providers, _pot_cache_provider_preferences class ExamplePTP(PoTokenProvider): PROVIDER_NAME = 'example' PROVIDER_VERSION = '0.0.1' BUG_REPORT_LOCATION = 'https://example.com/issues' _SUPPORTED_CLIENTS = ('WEB',) _SUPPORTED_CONTEXTS = (PoTokenContext.GVS, ) _SUPPORTED_EXTERNAL_REQUEST_FEATURES = ( ExternalRequestFeature.PROXY_SCHEME_HTTP, ExternalRequestFeature.PROXY_SCHEME_SOCKS5H, ) def is_available(self) -> bool: return True def _real_request_pot(self, request: PoTokenRequest) -> PoTokenResponse: return PoTokenResponse('example-token', expires_at=123) class ExampleCacheProviderPCP(PoTokenCacheProvider): PROVIDER_NAME = 'example' PROVIDER_VERSION = '0.0.1' BUG_REPORT_LOCATION = 'https://example.com/issues' def is_available(self) -> bool: return True def get(self, key: str): return 'example-cache' def store(self, key: str, value: str, expires_at: int): pass def delete(self, key: str): pass class ExampleCacheSpecProviderPCSP(PoTokenCacheSpecProvider): PROVIDER_NAME = 'example' PROVIDER_VERSION = '0.0.1' BUG_REPORT_LOCATION = 'https://example.com/issues' def generate_cache_spec(self, request: PoTokenRequest): return PoTokenCacheSpec( key_bindings={'field': 'example-key'}, default_ttl=60, write_policy=CacheProviderWritePolicy.WRITE_FIRST, ) class TestPoTokenProvider: def test_base_type(self): assert issubclass(PoTokenProvider, IEContentProvider) def test_create_provider_missing_fetch_method(self, ie, logger): class MissingMethodsPTP(PoTokenProvider): def is_available(self) -> bool: return True with pytest.raises(TypeError): MissingMethodsPTP(ie=ie, logger=logger, settings={}) def test_create_provider_missing_available_method(self, ie, logger): class MissingMethodsPTP(PoTokenProvider): def _real_request_pot(self, request: PoTokenRequest) -> PoTokenResponse: raise PoTokenProviderRejectedRequest('Not implemented') with pytest.raises(TypeError): MissingMethodsPTP(ie=ie, logger=logger, settings={}) def test_barebones_provider(self, ie, logger): class BarebonesProviderPTP(PoTokenProvider): def is_available(self) -> bool: return True def _real_request_pot(self, request: PoTokenRequest) -> PoTokenResponse: raise PoTokenProviderRejectedRequest('Not implemented') provider = BarebonesProviderPTP(ie=ie, logger=logger, settings={}) assert provider.PROVIDER_NAME == 'BarebonesProvider' assert provider.PROVIDER_KEY == 'BarebonesProvider' assert provider.PROVIDER_VERSION == '0.0.0' assert provider.BUG_REPORT_MESSAGE == 'please report this issue to the provider developer at (developer has not provided a bug report location) .' def test_example_provider_success(self, ie, logger, pot_request): provider = ExamplePTP(ie=ie, logger=logger, settings={}) assert provider.PROVIDER_NAME == 'example' assert provider.PROVIDER_KEY == 'Example' assert provider.PROVIDER_VERSION == '0.0.1' assert provider.BUG_REPORT_MESSAGE == 'please report this issue to the provider developer at https://example.com/issues .' assert provider.is_available() response = provider.request_pot(pot_request) assert response.po_token == 'example-token' assert response.expires_at == 123 def test_provider_unsupported_context(self, ie, logger, pot_request): provider = ExamplePTP(ie=ie, logger=logger, settings={}) pot_request.context = PoTokenContext.PLAYER with pytest.raises(PoTokenProviderRejectedRequest): provider.request_pot(pot_request) def test_provider_unsupported_client(self, ie, logger, pot_request): provider = ExamplePTP(ie=ie, logger=logger, settings={}) pot_request.innertube_context['client']['clientName'] = 'ANDROID' with pytest.raises(PoTokenProviderRejectedRequest): provider.request_pot(pot_request) def test_provider_unsupported_proxy_scheme(self, ie, logger, pot_request): provider = ExamplePTP(ie=ie, logger=logger, settings={}) pot_request.request_proxy = 'socks4://example.com' with pytest.raises( PoTokenProviderRejectedRequest, match=r'External requests by "example" provider do not support proxy scheme "socks4"\. Supported proxy ' 'schemes: http, socks5h', ): provider.request_pot(pot_request) pot_request.request_proxy = 'http://example.com' assert provider.request_pot(pot_request) def test_provider_ignore_external_request_features(self, ie, logger, pot_request): class InternalPTP(ExamplePTP): _SUPPORTED_EXTERNAL_REQUEST_FEATURES = None provider = InternalPTP(ie=ie, logger=logger, settings={}) pot_request.request_proxy = 'socks5://example.com' assert provider.request_pot(pot_request) pot_request.request_source_address = '0.0.0.0' assert provider.request_pot(pot_request) def test_provider_unsupported_external_request_source_address(self, ie, logger, pot_request): class InternalPTP(ExamplePTP): _SUPPORTED_EXTERNAL_REQUEST_FEATURES = tuple() provider = InternalPTP(ie=ie, logger=logger, settings={}) pot_request.request_source_address = None assert provider.request_pot(pot_request) pot_request.request_source_address = '0.0.0.0' with pytest.raises( PoTokenProviderRejectedRequest, match='External requests by "example" provider do not support setting source address', ): provider.request_pot(pot_request) def test_provider_supported_external_request_source_address(self, ie, logger, pot_request): class InternalPTP(ExamplePTP): _SUPPORTED_EXTERNAL_REQUEST_FEATURES = ( ExternalRequestFeature.SOURCE_ADDRESS, ) provider = InternalPTP(ie=ie, logger=logger, settings={}) pot_request.request_source_address = None assert provider.request_pot(pot_request) pot_request.request_source_address = '0.0.0.0' assert provider.request_pot(pot_request) def test_provider_unsupported_external_request_tls_verification(self, ie, logger, pot_request): class InternalPTP(ExamplePTP): _SUPPORTED_EXTERNAL_REQUEST_FEATURES = tuple() provider = InternalPTP(ie=ie, logger=logger, settings={}) pot_request.request_verify_tls = True assert provider.request_pot(pot_request) pot_request.request_verify_tls = False with pytest.raises( PoTokenProviderRejectedRequest, match='External requests by "example" provider do not support ignoring TLS certificate failures', ): provider.request_pot(pot_request) def test_provider_supported_external_request_tls_verification(self, ie, logger, pot_request): class InternalPTP(ExamplePTP): _SUPPORTED_EXTERNAL_REQUEST_FEATURES = ( ExternalRequestFeature.DISABLE_TLS_VERIFICATION, ) provider = InternalPTP(ie=ie, logger=logger, settings={}) pot_request.request_verify_tls = True assert provider.request_pot(pot_request) pot_request.request_verify_tls = False assert provider.request_pot(pot_request) def test_provider_request_webpage(self, ie, logger, pot_request): provider = ExamplePTP(ie=ie, logger=logger, settings={}) cookiejar = YoutubeDLCookieJar() pot_request.request_headers = HTTPHeaderDict({'User-Agent': 'example-user-agent'}) pot_request.request_proxy = 'socks5://example-proxy.com' pot_request.request_cookiejar = cookiejar def mock_urlopen(request): return request ie._downloader.urlopen = mock_urlopen sent_request = provider._request_webpage(Request( 'https://example.com', ), pot_request=pot_request) assert sent_request.url == 'https://example.com' assert sent_request.headers['User-Agent'] == 'example-user-agent' assert sent_request.proxies == {'all': 'socks5://example-proxy.com'} assert sent_request.extensions['cookiejar'] is cookiejar assert 'Requesting webpage' in logger.messages['info'] def test_provider_request_webpage_override(self, ie, logger, pot_request): provider = ExamplePTP(ie=ie, logger=logger, settings={}) cookiejar_request = YoutubeDLCookieJar() pot_request.request_headers = HTTPHeaderDict({'User-Agent': 'example-user-agent'}) pot_request.request_proxy = 'socks5://example-proxy.com' pot_request.request_cookiejar = cookiejar_request def mock_urlopen(request): return request ie._downloader.urlopen = mock_urlopen sent_request = provider._request_webpage(Request( 'https://example.com', headers={'User-Agent': 'override-user-agent-override'}, proxies={'http': 'http://example-proxy-override.com'}, extensions={'cookiejar': YoutubeDLCookieJar()}, ), pot_request=pot_request, note='Custom requesting webpage') assert sent_request.url == 'https://example.com' assert sent_request.headers['User-Agent'] == 'override-user-agent-override' assert sent_request.proxies == {'http': 'http://example-proxy-override.com'} assert sent_request.extensions['cookiejar'] is not cookiejar_request assert 'Custom requesting webpage' in logger.messages['info'] def test_provider_request_webpage_no_log(self, ie, logger, pot_request): provider = ExamplePTP(ie=ie, logger=logger, settings={}) def mock_urlopen(request): return request ie._downloader.urlopen = mock_urlopen sent_request = provider._request_webpage(Request( 'https://example.com', ), note=False) assert sent_request.url == 'https://example.com' assert 'info' not in logger.messages def test_provider_request_webpage_no_pot_request(self, ie, logger): provider = ExamplePTP(ie=ie, logger=logger, settings={}) def mock_urlopen(request): return request ie._downloader.urlopen = mock_urlopen sent_request = provider._request_webpage(Request( 'https://example.com', ), pot_request=None) assert sent_request.url == 'https://example.com' def test_get_config_arg(self, ie, logger): provider = ExamplePTP(ie=ie, logger=logger, settings={'abc': ['123D'], 'xyz': ['456a', '789B']}) assert provider._configuration_arg('abc') == ['123d'] assert provider._configuration_arg('abc', default=['default']) == ['123d'] assert provider._configuration_arg('ABC', default=['default']) == ['default'] assert provider._configuration_arg('abc', casesense=True) == ['123D'] assert provider._configuration_arg('xyz', casesense=False) == ['456a', '789b'] def test_require_class_end_with_suffix(self, ie, logger): class InvalidSuffix(PoTokenProvider): PROVIDER_NAME = 'invalid-suffix' def _real_request_pot(self, request: PoTokenRequest) -> PoTokenResponse: raise PoTokenProviderRejectedRequest('Not implemented') def is_available(self) -> bool: return True provider = InvalidSuffix(ie=ie, logger=logger, settings={}) with pytest.raises(AssertionError): provider.PROVIDER_KEY # noqa: B018 class TestPoTokenCacheProvider: def test_base_type(self): assert issubclass(PoTokenCacheProvider, IEContentProvider) def test_create_provider_missing_get_method(self, ie, logger): class MissingMethodsPCP(PoTokenCacheProvider): def store(self, key: str, value: str, expires_at: int): pass def delete(self, key: str): pass def is_available(self) -> bool: return True with pytest.raises(TypeError): MissingMethodsPCP(ie=ie, logger=logger, settings={}) def test_create_provider_missing_store_method(self, ie, logger): class MissingMethodsPCP(PoTokenCacheProvider): def get(self, key: str): pass def delete(self, key: str): pass def is_available(self) -> bool: return True with pytest.raises(TypeError): MissingMethodsPCP(ie=ie, logger=logger, settings={}) def test_create_provider_missing_delete_method(self, ie, logger): class MissingMethodsPCP(PoTokenCacheProvider): def get(self, key: str): pass def store(self, key: str, value: str, expires_at: int): pass def is_available(self) -> bool: return True with pytest.raises(TypeError): MissingMethodsPCP(ie=ie, logger=logger, settings={}) def test_create_provider_missing_is_available_method(self, ie, logger): class MissingMethodsPCP(PoTokenCacheProvider): def get(self, key: str): pass def store(self, key: str, value: str, expires_at: int): pass def delete(self, key: str): pass with pytest.raises(TypeError): MissingMethodsPCP(ie=ie, logger=logger, settings={}) def test_barebones_provider(self, ie, logger): class BarebonesProviderPCP(PoTokenCacheProvider): def is_available(self) -> bool: return True def get(self, key: str): return 'example-cache' def store(self, key: str, value: str, expires_at: int): pass def delete(self, key: str): pass provider = BarebonesProviderPCP(ie=ie, logger=logger, settings={}) assert provider.PROVIDER_NAME == 'BarebonesProvider' assert provider.PROVIDER_KEY == 'BarebonesProvider' assert provider.PROVIDER_VERSION == '0.0.0' assert provider.BUG_REPORT_MESSAGE == 'please report this issue to the provider developer at (developer has not provided a bug report location) .' def test_create_provider_example(self, ie, logger): provider = ExampleCacheProviderPCP(ie=ie, logger=logger, settings={}) assert provider.PROVIDER_NAME == 'example' assert provider.PROVIDER_KEY == 'ExampleCacheProvider' assert provider.PROVIDER_VERSION == '0.0.1' assert provider.BUG_REPORT_MESSAGE == 'please report this issue to the provider developer at https://example.com/issues .' assert provider.is_available() def test_get_config_arg(self, ie, logger): provider = ExampleCacheProviderPCP(ie=ie, logger=logger, settings={'abc': ['123D'], 'xyz': ['456a', '789B']}) assert provider._configuration_arg('abc') == ['123d'] assert provider._configuration_arg('abc', default=['default']) == ['123d'] assert provider._configuration_arg('ABC', default=['default']) == ['default'] assert provider._configuration_arg('abc', casesense=True) == ['123D'] assert provider._configuration_arg('xyz', casesense=False) == ['456a', '789b'] def test_require_class_end_with_suffix(self, ie, logger): class InvalidSuffix(PoTokenCacheProvider): def get(self, key: str): return 'example-cache' def store(self, key: str, value: str, expires_at: int): pass def delete(self, key: str): pass def is_available(self) -> bool: return True provider = InvalidSuffix(ie=ie, logger=logger, settings={}) with pytest.raises(AssertionError): provider.PROVIDER_KEY # noqa: B018 class TestPoTokenCacheSpecProvider: def test_base_type(self): assert issubclass(PoTokenCacheSpecProvider, IEContentProvider) def test_create_provider_missing_supports_method(self, ie, logger): class MissingMethodsPCS(PoTokenCacheSpecProvider): pass with pytest.raises(TypeError): MissingMethodsPCS(ie=ie, logger=logger, settings={}) def test_create_provider_barebones(self, ie, pot_request, logger): class BarebonesProviderPCSP(PoTokenCacheSpecProvider): def generate_cache_spec(self, request: PoTokenRequest): return PoTokenCacheSpec( default_ttl=100, key_bindings={}, ) provider = BarebonesProviderPCSP(ie=ie, logger=logger, settings={}) assert provider.PROVIDER_NAME == 'BarebonesProvider' assert provider.PROVIDER_KEY == 'BarebonesProvider' assert provider.PROVIDER_VERSION == '0.0.0' assert provider.BUG_REPORT_MESSAGE == 'please report this issue to the provider developer at (developer has not provided a bug report location) .' assert provider.is_available() assert provider.generate_cache_spec(request=pot_request).default_ttl == 100 assert provider.generate_cache_spec(request=pot_request).key_bindings == {} assert provider.generate_cache_spec(request=pot_request).write_policy == CacheProviderWritePolicy.WRITE_ALL def test_create_provider_example(self, ie, pot_request, logger): provider = ExampleCacheSpecProviderPCSP(ie=ie, logger=logger, settings={}) assert provider.PROVIDER_NAME == 'example' assert provider.PROVIDER_KEY == 'ExampleCacheSpecProvider' assert provider.PROVIDER_VERSION == '0.0.1' assert provider.BUG_REPORT_MESSAGE == 'please report this issue to the provider developer at https://example.com/issues .' assert provider.is_available() assert provider.generate_cache_spec(pot_request) assert provider.generate_cache_spec(pot_request).key_bindings == {'field': 'example-key'} assert provider.generate_cache_spec(pot_request).default_ttl == 60 assert provider.generate_cache_spec(pot_request).write_policy == CacheProviderWritePolicy.WRITE_FIRST def test_get_config_arg(self, ie, logger): provider = ExampleCacheSpecProviderPCSP(ie=ie, logger=logger, settings={'abc': ['123D'], 'xyz': ['456a', '789B']}) assert provider._configuration_arg('abc') == ['123d'] assert provider._configuration_arg('abc', default=['default']) == ['123d'] assert provider._configuration_arg('ABC', default=['default']) == ['default'] assert provider._configuration_arg('abc', casesense=True) == ['123D'] assert provider._configuration_arg('xyz', casesense=False) == ['456a', '789b'] def test_require_class_end_with_suffix(self, ie, logger): class InvalidSuffix(PoTokenCacheSpecProvider): def generate_cache_spec(self, request: PoTokenRequest): return None provider = InvalidSuffix(ie=ie, logger=logger, settings={}) with pytest.raises(AssertionError): provider.PROVIDER_KEY # noqa: B018 class TestPoTokenRequest: def test_copy_request(self, pot_request): copied_request = pot_request.copy() assert copied_request is not pot_request assert copied_request.context == pot_request.context assert copied_request.innertube_context == pot_request.innertube_context assert copied_request.innertube_context is not pot_request.innertube_context copied_request.innertube_context['client']['clientName'] = 'ANDROID' assert pot_request.innertube_context['client']['clientName'] != 'ANDROID' assert copied_request.innertube_host == pot_request.innertube_host assert copied_request.session_index == pot_request.session_index assert copied_request.player_url == pot_request.player_url assert copied_request.is_authenticated == pot_request.is_authenticated assert copied_request.visitor_data == pot_request.visitor_data assert copied_request.data_sync_id == pot_request.data_sync_id assert copied_request.video_id == pot_request.video_id assert copied_request.request_cookiejar is pot_request.request_cookiejar assert copied_request.request_proxy == pot_request.request_proxy assert copied_request.request_headers == pot_request.request_headers assert copied_request.request_headers is not pot_request.request_headers assert copied_request.request_timeout == pot_request.request_timeout assert copied_request.request_source_address == pot_request.request_source_address assert copied_request.request_verify_tls == pot_request.request_verify_tls assert copied_request.bypass_cache == pot_request.bypass_cache def test_provider_bug_report_message(ie, logger): provider = ExamplePTP(ie=ie, logger=logger, settings={}) assert provider.BUG_REPORT_MESSAGE == 'please report this issue to the provider developer at https://example.com/issues .' message = provider_bug_report_message(provider) assert message == '; please report this issue to the provider developer at https://example.com/issues .' message_before = provider_bug_report_message(provider, before='custom message!') assert message_before == 'custom message! Please report this issue to the provider developer at https://example.com/issues .' def test_register_provider(ie): @register_provider class UnavailableProviderPTP(PoTokenProvider): def is_available(self) -> bool: return False def _real_request_pot(self, request: PoTokenRequest) -> PoTokenResponse: raise PoTokenProviderRejectedRequest('Not implemented') assert _pot_providers.value.get('UnavailableProvider') == UnavailableProviderPTP _pot_providers.value.pop('UnavailableProvider') def test_register_pot_preference(ie): before = len(_ptp_preferences.value) @register_preference(ExamplePTP) def unavailable_preference(provider: PoTokenProvider, request: PoTokenRequest): return 1 assert len(_ptp_preferences.value) == before + 1 def test_register_cache_provider(ie): @cache.register_provider class UnavailableCacheProviderPCP(PoTokenCacheProvider): def is_available(self) -> bool: return False def get(self, key: str): return 'example-cache' def store(self, key: str, value: str, expires_at: int): pass def delete(self, key: str): pass assert _pot_cache_providers.value.get('UnavailableCacheProvider') == UnavailableCacheProviderPCP _pot_cache_providers.value.pop('UnavailableCacheProvider') def test_register_cache_provider_spec(ie): @cache.register_spec class UnavailableCacheProviderPCSP(PoTokenCacheSpecProvider): def is_available(self) -> bool: return False def generate_cache_spec(self, request: PoTokenRequest): return None assert _pot_pcs_providers.value.get('UnavailableCacheProvider') == UnavailableCacheProviderPCSP _pot_pcs_providers.value.pop('UnavailableCacheProvider') def test_register_cache_provider_preference(ie): before = len(_pot_cache_provider_preferences.value) @cache.register_preference(ExampleCacheProviderPCP) def unavailable_preference(provider: PoTokenCacheProvider, request: PoTokenRequest): return 1 assert len(_pot_cache_provider_preferences.value) == before + 1 def test_logger_log_level(logger): assert logger.LogLevel('INFO') == logger.LogLevel.INFO assert logger.LogLevel('debuG') == logger.LogLevel.DEBUG assert logger.LogLevel(10) == logger.LogLevel.DEBUG assert logger.LogLevel('UNKNOWN') == logger.LogLevel.INFO def test_configuration_arg(): config = {'abc': ['123D'], 'xyz': ['456a', '789B']} assert configuration_arg(config, 'abc') == ['123d'] assert configuration_arg(config, 'abc', default=['default']) == ['123d'] assert configuration_arg(config, 'ABC', default=['default']) == ['default'] assert configuration_arg(config, 'abc', casesense=True) == ['123D'] assert configuration_arg(config, 'xyz', casesense=False) == ['456a', '789b']
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_pot/test_pot_builtin_utils.py
test/test_pot/test_pot_builtin_utils.py
import pytest from yt_dlp.extractor.youtube.pot.provider import ( PoTokenContext, ) from yt_dlp.extractor.youtube.pot.utils import get_webpo_content_binding, ContentBindingType class TestGetWebPoContentBinding: @pytest.mark.parametrize('client_name, context, is_authenticated, expected', [ *[(client, context, is_authenticated, expected) for client in [ 'WEB', 'MWEB', 'TVHTML5', 'WEB_EMBEDDED_PLAYER', 'WEB_CREATOR', 'TVHTML5_SIMPLY_EMBEDDED_PLAYER', 'TVHTML5_SIMPLY'] for context, is_authenticated, expected in [ (PoTokenContext.GVS, False, ('example-visitor-data', ContentBindingType.VISITOR_DATA)), (PoTokenContext.PLAYER, False, ('example-video-id', ContentBindingType.VIDEO_ID)), (PoTokenContext.SUBS, False, ('example-video-id', ContentBindingType.VIDEO_ID)), (PoTokenContext.GVS, True, ('example-data-sync-id', ContentBindingType.DATASYNC_ID)), ]], ('WEB_REMIX', PoTokenContext.GVS, False, ('example-visitor-data', ContentBindingType.VISITOR_DATA)), ('WEB_REMIX', PoTokenContext.PLAYER, False, ('example-visitor-data', ContentBindingType.VISITOR_DATA)), ('ANDROID', PoTokenContext.GVS, False, (None, None)), ('IOS', PoTokenContext.GVS, False, (None, None)), ]) def test_get_webpo_content_binding(self, pot_request, client_name, context, is_authenticated, expected): pot_request.innertube_context['client']['clientName'] = client_name pot_request.context = context pot_request.is_authenticated = is_authenticated assert get_webpo_content_binding(pot_request) == expected def test_extract_visitor_id(self, pot_request): pot_request.visitor_data = 'CgsxMjNhYmNYWVpfLSiA4s%2DqBg%3D%3D' assert get_webpo_content_binding(pot_request, bind_to_visitor_id=True) == ('123abcXYZ_-', ContentBindingType.VISITOR_ID) def test_invalid_visitor_id(self, pot_request): # visitor id not alphanumeric (i.e. protobuf extraction failed) pot_request.visitor_data = 'CggxMjM0NTY3OCiA4s-qBg%3D%3D' assert get_webpo_content_binding(pot_request, bind_to_visitor_id=True) == (pot_request.visitor_data, ContentBindingType.VISITOR_DATA) def test_no_visitor_id(self, pot_request): pot_request.visitor_data = 'KIDiz6oG' assert get_webpo_content_binding(pot_request, bind_to_visitor_id=True) == (pot_request.visitor_data, ContentBindingType.VISITOR_DATA) def test_invalid_base64(self, pot_request): pot_request.visitor_data = 'invalid-base64' assert get_webpo_content_binding(pot_request, bind_to_visitor_id=True) == (pot_request.visitor_data, ContentBindingType.VISITOR_DATA) def test_gvs_video_id_binding_experiment(self, pot_request): pot_request.context = PoTokenContext.GVS pot_request._gvs_bind_to_video_id = True assert get_webpo_content_binding(pot_request) == ('example-video-id', ContentBindingType.VIDEO_ID)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false