diff --git a/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ae1cedef2202096faadc3ff0dd0614f8adf394 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__init__.py @@ -0,0 +1,5 @@ + +import sys +from cpuinfo.cpuinfo import * + + diff --git a/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__main__.py b/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..09ebf764121cc9bae49421535e56d0c5a9394290 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__main__.py @@ -0,0 +1,5 @@ + +import cpuinfo + +cpuinfo.main() + diff --git a/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7b43ae8ed6001af57547a965ee06ec16c33e37a Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__pycache__/__main__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63f0022fb4c6b89d47bd18293e8b372bf4f9808a Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__pycache__/__main__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__pycache__/cpuinfo.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__pycache__/cpuinfo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f528521b7f294d9fc72ddd5e29b7b8c29521e31 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/__pycache__/cpuinfo.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/cpuinfo.py b/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/cpuinfo.py new file mode 100644 index 0000000000000000000000000000000000000000..ea2f90e397404bb200d8c74a0331b9ede5c5b106 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/cpuinfo/cpuinfo.py @@ -0,0 +1,2827 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- + +# Copyright (c) 2014-2022 Matthew Brennan Jones +# Py-cpuinfo gets CPU info with pure Python +# It uses the MIT License +# It is hosted at: https://github.com/workhorsy/py-cpuinfo +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +CPUINFO_VERSION = (9, 0, 0) +CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION]) + +import os, sys +import platform +import multiprocessing +import ctypes + + +CAN_CALL_CPUID_IN_SUBPROCESS = True + +g_trace = None + + +class Trace(object): + def __init__(self, is_active, is_stored_in_string): + self._is_active = is_active + if not self._is_active: + return + + from datetime import datetime + from io import StringIO + + if is_stored_in_string: + self._output = StringIO() + else: + date = datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f") + self._output = open('cpuinfo_trace_{0}.trace'.format(date), 'w') + + self._stdout = StringIO() + self._stderr = StringIO() + self._err = None + + def header(self, msg): + if not self._is_active: return + + from inspect import stack + frame = stack()[1] + file = frame[1] + line = frame[2] + self._output.write("{0} ({1} {2})\n".format(msg, file, line)) + self._output.flush() + + def success(self): + if not self._is_active: return + + from inspect import stack + frame = stack()[1] + file = frame[1] + line = frame[2] + + self._output.write("Success ... ({0} {1})\n\n".format(file, line)) + self._output.flush() + + def fail(self, msg): + if not self._is_active: return + + from inspect import stack + frame = stack()[1] + file = frame[1] + line = frame[2] + + if isinstance(msg, str): + msg = ''.join(['\t' + line for line in msg.split('\n')]) + '\n' + + self._output.write(msg) + self._output.write("Failed ... ({0} {1})\n\n".format(file, line)) + self._output.flush() + elif isinstance(msg, Exception): + from traceback import format_exc + err_string = format_exc() + self._output.write("\tFailed ... ({0} {1})\n".format(file, line)) + self._output.write(''.join(['\t\t{0}\n'.format(n) for n in err_string.split('\n')]) + '\n') + self._output.flush() + + def command_header(self, msg): + if not self._is_active: return + + from inspect import stack + frame = stack()[3] + file = frame[1] + line = frame[2] + self._output.write("\t{0} ({1} {2})\n".format(msg, file, line)) + self._output.flush() + + def command_output(self, msg, output): + if not self._is_active: return + + self._output.write("\t\t{0}\n".format(msg)) + self._output.write(''.join(['\t\t\t{0}\n'.format(n) for n in output.split('\n')]) + '\n') + self._output.flush() + + def keys(self, keys, info, new_info): + if not self._is_active: return + + from inspect import stack + frame = stack()[2] + file = frame[1] + line = frame[2] + + # List updated keys + self._output.write("\tChanged keys ({0} {1})\n".format(file, line)) + changed_keys = [key for key in keys if key in info and key in new_info and info[key] != new_info[key]] + if changed_keys: + for key in changed_keys: + self._output.write('\t\t{0}: {1} to {2}\n'.format(key, info[key], new_info[key])) + else: + self._output.write('\t\tNone\n') + + # List new keys + self._output.write("\tNew keys ({0} {1})\n".format(file, line)) + new_keys = [key for key in keys if key in new_info and key not in info] + if new_keys: + for key in new_keys: + self._output.write('\t\t{0}: {1}\n'.format(key, new_info[key])) + else: + self._output.write('\t\tNone\n') + + self._output.write('\n') + self._output.flush() + + def write(self, msg): + if not self._is_active: return + + self._output.write(msg + '\n') + self._output.flush() + + def to_dict(self, info, is_fail): + return { + 'output' : self._output.getvalue(), + 'stdout' : self._stdout.getvalue(), + 'stderr' : self._stderr.getvalue(), + 'info' : info, + 'err' : self._err, + 'is_fail' : is_fail + } + +class DataSource(object): + bits = platform.architecture()[0] + cpu_count = multiprocessing.cpu_count() + is_windows = platform.system().lower() == 'windows' + arch_string_raw = platform.machine() + uname_string_raw = platform.uname()[5] + can_cpuid = True + + @staticmethod + def has_proc_cpuinfo(): + return os.path.exists('/proc/cpuinfo') + + @staticmethod + def has_dmesg(): + return len(_program_paths('dmesg')) > 0 + + @staticmethod + def has_var_run_dmesg_boot(): + uname = platform.system().strip().strip('"').strip("'").strip().lower() + return 'linux' in uname and os.path.exists('/var/run/dmesg.boot') + + @staticmethod + def has_cpufreq_info(): + return len(_program_paths('cpufreq-info')) > 0 + + @staticmethod + def has_sestatus(): + return len(_program_paths('sestatus')) > 0 + + @staticmethod + def has_sysctl(): + return len(_program_paths('sysctl')) > 0 + + @staticmethod + def has_isainfo(): + return len(_program_paths('isainfo')) > 0 + + @staticmethod + def has_kstat(): + return len(_program_paths('kstat')) > 0 + + @staticmethod + def has_sysinfo(): + uname = platform.system().strip().strip('"').strip("'").strip().lower() + is_beos = 'beos' in uname or 'haiku' in uname + return is_beos and len(_program_paths('sysinfo')) > 0 + + @staticmethod + def has_lscpu(): + return len(_program_paths('lscpu')) > 0 + + @staticmethod + def has_ibm_pa_features(): + return len(_program_paths('lsprop')) > 0 + + @staticmethod + def has_wmic(): + returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version']) + return returncode == 0 and len(output) > 0 + + @staticmethod + def cat_proc_cpuinfo(): + return _run_and_get_stdout(['cat', '/proc/cpuinfo']) + + @staticmethod + def cpufreq_info(): + return _run_and_get_stdout(['cpufreq-info']) + + @staticmethod + def sestatus_b(): + return _run_and_get_stdout(['sestatus', '-b']) + + @staticmethod + def dmesg_a(): + return _run_and_get_stdout(['dmesg', '-a']) + + @staticmethod + def cat_var_run_dmesg_boot(): + return _run_and_get_stdout(['cat', '/var/run/dmesg.boot']) + + @staticmethod + def sysctl_machdep_cpu_hw_cpufrequency(): + return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency']) + + @staticmethod + def isainfo_vb(): + return _run_and_get_stdout(['isainfo', '-vb']) + + @staticmethod + def kstat_m_cpu_info(): + return _run_and_get_stdout(['kstat', '-m', 'cpu_info']) + + @staticmethod + def sysinfo_cpu(): + return _run_and_get_stdout(['sysinfo', '-cpu']) + + @staticmethod + def lscpu(): + return _run_and_get_stdout(['lscpu']) + + @staticmethod + def ibm_pa_features(): + import glob + + ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features') + if ibm_features: + return _run_and_get_stdout(['lsprop', ibm_features[0]]) + + @staticmethod + def wmic_cpu(): + return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list']) + + @staticmethod + def winreg_processor_brand(): + processor_brand = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "ProcessorNameString") + return processor_brand.strip() + + @staticmethod + def winreg_vendor_id_raw(): + vendor_id_raw = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "VendorIdentifier") + return vendor_id_raw + + @staticmethod + def winreg_arch_string_raw(): + arch_string_raw = _read_windows_registry_key(r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", "PROCESSOR_ARCHITECTURE") + return arch_string_raw + + @staticmethod + def winreg_hz_actual(): + hz_actual = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "~Mhz") + hz_actual = _to_decimal_string(hz_actual) + return hz_actual + + @staticmethod + def winreg_feature_bits(): + feature_bits = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "FeatureSet") + return feature_bits + + +def _program_paths(program_name): + paths = [] + exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep)) + for p in os.environ['PATH'].split(os.pathsep): + p = os.path.join(p, program_name) + if os.access(p, os.X_OK): + paths.append(p) + for e in exts: + pext = p + e + if os.access(pext, os.X_OK): + paths.append(pext) + return paths + +def _run_and_get_stdout(command, pipe_command=None): + from subprocess import Popen, PIPE + + g_trace.command_header('Running command "' + ' '.join(command) + '" ...') + + # Run the command normally + if not pipe_command: + p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) + # Run the command and pipe it into another command + else: + p2 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) + p1 = Popen(pipe_command, stdin=p2.stdout, stdout=PIPE, stderr=PIPE) + p2.stdout.close() + + # Get the stdout and stderr + stdout_output, stderr_output = p1.communicate() + stdout_output = stdout_output.decode(encoding='UTF-8') + stderr_output = stderr_output.decode(encoding='UTF-8') + + # Send the result to the logger + g_trace.command_output('return code:', str(p1.returncode)) + g_trace.command_output('stdout:', stdout_output) + + # Return the return code and stdout + return p1.returncode, stdout_output + +def _read_windows_registry_key(key_name, field_name): + g_trace.command_header('Reading Registry key "{0}" field "{1}" ...'.format(key_name, field_name)) + + try: + import _winreg as winreg + except ImportError as err: + try: + import winreg + except ImportError as err: + pass + + key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key_name) + value = winreg.QueryValueEx(key, field_name)[0] + winreg.CloseKey(key) + g_trace.command_output('value:', str(value)) + return value + +# Make sure we are running on a supported system +def _check_arch(): + arch, bits = _parse_arch(DataSource.arch_string_raw) + if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', + 'PPC_64', 'S390X', 'MIPS_32', 'MIPS_64', + "RISCV_32", "RISCV_64"]: + raise Exception("py-cpuinfo currently only works on X86 " + "and some ARM/PPC/S390X/MIPS/RISCV CPUs.") + +def _obj_to_b64(thing): + import pickle + import base64 + + a = thing + b = pickle.dumps(a) + c = base64.b64encode(b) + d = c.decode('utf8') + return d + +def _b64_to_obj(thing): + import pickle + import base64 + + try: + a = base64.b64decode(thing) + b = pickle.loads(a) + return b + except Exception: + return {} + +def _utf_to_str(input): + if isinstance(input, list): + return [_utf_to_str(element) for element in input] + elif isinstance(input, dict): + return {_utf_to_str(key): _utf_to_str(value) + for key, value in input.items()} + else: + return input + +def _copy_new_fields(info, new_info): + keys = [ + 'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly', + 'hz_advertised', 'hz_actual', 'arch', 'bits', 'count', + 'arch_string_raw', 'uname_string_raw', + 'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity', + 'stepping', 'model', 'family', + 'processor_type', 'flags', + 'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size' + ] + + g_trace.keys(keys, info, new_info) + + # Update the keys with new values + for key in keys: + if new_info.get(key, None) and not info.get(key, None): + info[key] = new_info[key] + elif key == 'flags' and new_info.get('flags'): + for f in new_info['flags']: + if f not in info['flags']: info['flags'].append(f) + info['flags'].sort() + +def _get_field_actual(cant_be_number, raw_string, field_names): + for line in raw_string.splitlines(): + for field_name in field_names: + field_name = field_name.lower() + if ':' in line: + left, right = line.split(':', 1) + left = left.strip().lower() + right = right.strip() + if left == field_name and len(right) > 0: + if cant_be_number: + if not right.isdigit(): + return right + else: + return right + + return None + +def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names): + retval = _get_field_actual(cant_be_number, raw_string, field_names) + + # Convert the return value + if retval and convert_to: + try: + retval = convert_to(retval) + except Exception: + retval = default_value + + # Return the default if there is no return value + if retval is None: + retval = default_value + + return retval + +def _to_decimal_string(ticks): + try: + # Convert to string + ticks = '{0}'.format(ticks) + # Sometimes ',' is used as a decimal separator + ticks = ticks.replace(',', '.') + + # Strip off non numbers and decimal places + ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip() + if ticks == '': + ticks = '0' + + # Add decimal if missing + if '.' not in ticks: + ticks = '{0}.0'.format(ticks) + + # Remove trailing zeros + ticks = ticks.rstrip('0') + + # Add one trailing zero for empty right side + if ticks.endswith('.'): + ticks = '{0}0'.format(ticks) + + # Make sure the number can be converted to a float + ticks = float(ticks) + ticks = '{0}'.format(ticks) + return ticks + except Exception: + return '0.0' + +def _hz_short_to_full(ticks, scale): + try: + # Make sure the number can be converted to a float + ticks = float(ticks) + ticks = '{0}'.format(ticks) + + # Scale the numbers + hz = ticks.lstrip('0') + old_index = hz.index('.') + hz = hz.replace('.', '') + hz = hz.ljust(scale + old_index+1, '0') + new_index = old_index + scale + hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:]) + left, right = hz.split('.') + left, right = int(left), int(right) + return (left, right) + except Exception: + return (0, 0) + +def _hz_friendly_to_full(hz_string): + try: + hz_string = hz_string.strip().lower() + hz, scale = (None, None) + + if hz_string.endswith('ghz'): + scale = 9 + elif hz_string.endswith('mhz'): + scale = 6 + elif hz_string.endswith('hz'): + scale = 0 + + hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip() + if not '.' in hz: + hz += '.0' + + hz, scale = _hz_short_to_full(hz, scale) + + return (hz, scale) + except Exception: + return (0, 0) + +def _hz_short_to_friendly(ticks, scale): + try: + # Get the raw Hz as a string + left, right = _hz_short_to_full(ticks, scale) + result = '{0}.{1}'.format(left, right) + + # Get the location of the dot, and remove said dot + dot_index = result.index('.') + result = result.replace('.', '') + + # Get the Hz symbol and scale + symbol = "Hz" + scale = 0 + if dot_index > 9: + symbol = "GHz" + scale = 9 + elif dot_index > 6: + symbol = "MHz" + scale = 6 + elif dot_index > 3: + symbol = "KHz" + scale = 3 + + # Get the Hz with the dot at the new scaled point + result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:]) + + # Format the ticks to have 4 numbers after the decimal + # and remove any superfluous zeroes. + result = '{0:.4f} {1}'.format(float(result), symbol) + result = result.rstrip('0') + return result + except Exception: + return '0.0000 Hz' + +def _to_friendly_bytes(input): + import re + + if not input: + return input + input = "{0}".format(input) + + formats = { + r"^[0-9]+B$" : 'B', + r"^[0-9]+K$" : 'KB', + r"^[0-9]+M$" : 'MB', + r"^[0-9]+G$" : 'GB' + } + + for pattern, friendly_size in formats.items(): + if re.match(pattern, input): + return "{0} {1}".format(input[ : -1].strip(), friendly_size) + + return input + +def _friendly_bytes_to_int(friendly_bytes): + input = friendly_bytes.lower() + + formats = [ + {'gib' : 1024 * 1024 * 1024}, + {'mib' : 1024 * 1024}, + {'kib' : 1024}, + + {'gb' : 1024 * 1024 * 1024}, + {'mb' : 1024 * 1024}, + {'kb' : 1024}, + + {'g' : 1024 * 1024 * 1024}, + {'m' : 1024 * 1024}, + {'k' : 1024}, + {'b' : 1}, + ] + + try: + for entry in formats: + pattern = list(entry.keys())[0] + multiplier = list(entry.values())[0] + if input.endswith(pattern): + return int(input.split(pattern)[0].strip()) * multiplier + + except Exception as err: + pass + + return friendly_bytes + +def _parse_cpu_brand_string(cpu_string): + # Just return 0 if the processor brand does not have the Hz + if not 'hz' in cpu_string.lower(): + return ('0.0', 0) + + hz = cpu_string.lower() + scale = 0 + + if hz.endswith('mhz'): + scale = 6 + elif hz.endswith('ghz'): + scale = 9 + if '@' in hz: + hz = hz.split('@')[1] + else: + hz = hz.rsplit(None, 1)[1] + + hz = hz.rstrip('mhz').rstrip('ghz').strip() + hz = _to_decimal_string(hz) + + return (hz, scale) + +def _parse_cpu_brand_string_dx(cpu_string): + import re + + # Find all the strings inside brackets () + starts = [m.start() for m in re.finditer(r"\(", cpu_string)] + ends = [m.start() for m in re.finditer(r"\)", cpu_string)] + insides = {k: v for k, v in zip(starts, ends)} + insides = [cpu_string[start+1 : end] for start, end in insides.items()] + + # Find all the fields + vendor_id, stepping, model, family = (None, None, None, None) + for inside in insides: + for pair in inside.split(','): + pair = [n.strip() for n in pair.split(':')] + if len(pair) > 1: + name, value = pair[0], pair[1] + if name == 'origin': + vendor_id = value.strip('"') + elif name == 'stepping': + stepping = int(value.lstrip('0x'), 16) + elif name == 'model': + model = int(value.lstrip('0x'), 16) + elif name in ['fam', 'family']: + family = int(value.lstrip('0x'), 16) + + # Find the Processor Brand + # Strip off extra strings in brackets at end + brand = cpu_string.strip() + is_working = True + while is_working: + is_working = False + for inside in insides: + full = "({0})".format(inside) + if brand.endswith(full): + brand = brand[ :-len(full)].strip() + is_working = True + + # Find the Hz in the brand string + hz_brand, scale = _parse_cpu_brand_string(brand) + + # Find Hz inside brackets () after the brand string + if hz_brand == '0.0': + for inside in insides: + hz = inside + for entry in ['GHz', 'MHz', 'Hz']: + if entry in hz: + hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)] + hz_brand, scale = _parse_cpu_brand_string(hz) + break + + return (hz_brand, scale, brand, vendor_id, stepping, model, family) + +def _parse_dmesg_output(output): + try: + # Get all the dmesg lines that might contain a CPU string + lines = output.split(' CPU0:')[1:] + \ + output.split(' CPU1:')[1:] + \ + output.split(' CPU:')[1:] + \ + output.split('\nCPU0:')[1:] + \ + output.split('\nCPU1:')[1:] + \ + output.split('\nCPU:')[1:] + lines = [l.split('\n')[0].strip() for l in lines] + + # Convert the lines to CPU strings + cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines] + + # Find the CPU string that has the most fields + best_string = None + highest_count = 0 + for cpu_string in cpu_strings: + count = sum([n is not None for n in cpu_string]) + if count > highest_count: + highest_count = count + best_string = cpu_string + + # If no CPU string was found, return {} + if not best_string: + return {} + + hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string + + # Origin + if ' Origin=' in output: + fields = output[output.find(' Origin=') : ].split('\n')[0] + fields = fields.strip().split() + fields = [n.strip().split('=') for n in fields] + fields = [{n[0].strip().lower() : n[1].strip()} for n in fields] + + for field in fields: + name = list(field.keys())[0] + value = list(field.values())[0] + + if name == 'origin': + vendor_id = value.strip('"') + elif name == 'stepping': + stepping = int(value.lstrip('0x'), 16) + elif name == 'model': + model = int(value.lstrip('0x'), 16) + elif name in ['fam', 'family']: + family = int(value.lstrip('0x'), 16) + + # Features + flag_lines = [] + for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']: + if category in output: + flag_lines.append(output.split(category)[1].split('\n')[0]) + + flags = [] + for line in flag_lines: + line = line.split('<')[1].split('>')[0].lower() + for flag in line.split(','): + flags.append(flag) + flags.sort() + + # Convert from GHz/MHz string to Hz + hz_advertised, scale = _parse_cpu_brand_string(processor_brand) + + # If advertised hz not found, use the actual hz + if hz_advertised == '0.0': + scale = 6 + hz_advertised = _to_decimal_string(hz_actual) + + info = { + 'vendor_id_raw' : vendor_id, + 'brand_raw' : processor_brand, + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'flags' : flags + } + + if hz_advertised and hz_advertised != '0.0': + info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale) + info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale) + + if hz_advertised and hz_advertised != '0.0': + info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale) + info['hz_actual'] = _hz_short_to_full(hz_actual, scale) + + return {k: v for k, v in info.items() if v} + except Exception as err: + g_trace.fail(err) + #raise + + return {} + +def _parse_arch(arch_string_raw): + import re + + arch, bits = None, None + arch_string_raw = arch_string_raw.lower() + + # X86 + if re.match(r'^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw): + arch = 'X86_32' + bits = 32 + elif re.match(r'^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw): + arch = 'X86_64' + bits = 64 + # ARM + elif re.match(r'^armv8-a|aarch64|arm64$', arch_string_raw): + arch = 'ARM_8' + bits = 64 + elif re.match(r'^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw): + arch = 'ARM_7' + bits = 32 + elif re.match(r'^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw): + arch = 'ARM_8' + bits = 32 + # PPC + elif re.match(r'^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw): + arch = 'PPC_32' + bits = 32 + elif re.match(r'^powerpc$|^ppc64$|^ppc64le$', arch_string_raw): + arch = 'PPC_64' + bits = 64 + # SPARC + elif re.match(r'^sparc32$|^sparc$', arch_string_raw): + arch = 'SPARC_32' + bits = 32 + elif re.match(r'^sparc64$|^sun4u$|^sun4v$', arch_string_raw): + arch = 'SPARC_64' + bits = 64 + # S390X + elif re.match(r'^s390x$', arch_string_raw): + arch = 'S390X' + bits = 64 + elif arch_string_raw == 'mips': + arch = 'MIPS_32' + bits = 32 + elif arch_string_raw == 'mips64': + arch = 'MIPS_64' + bits = 64 + # RISCV + elif re.match(r'^riscv$|^riscv32$|^riscv32be$', arch_string_raw): + arch = 'RISCV_32' + bits = 32 + elif re.match(r'^riscv64$|^riscv64be$', arch_string_raw): + arch = 'RISCV_64' + bits = 64 + + return (arch, bits) + +def _is_bit_set(reg, bit): + mask = 1 << bit + is_set = reg & mask > 0 + return is_set + + +def _is_selinux_enforcing(trace): + # Just return if the SE Linux Status Tool is not installed + if not DataSource.has_sestatus(): + trace.fail('Failed to find sestatus.') + return False + + # Run the sestatus, and just return if it failed to run + returncode, output = DataSource.sestatus_b() + if returncode != 0: + trace.fail('Failed to run sestatus. Skipping ...') + return False + + # Figure out if explicitly in enforcing mode + for line in output.splitlines(): + line = line.strip().lower() + if line.startswith("current mode:"): + if line.endswith("enforcing"): + return True + else: + return False + + # Figure out if we can execute heap and execute memory + can_selinux_exec_heap = False + can_selinux_exec_memory = False + for line in output.splitlines(): + line = line.strip().lower() + if line.startswith("allow_execheap") and line.endswith("on"): + can_selinux_exec_heap = True + elif line.startswith("allow_execmem") and line.endswith("on"): + can_selinux_exec_memory = True + + trace.command_output('can_selinux_exec_heap:', can_selinux_exec_heap) + trace.command_output('can_selinux_exec_memory:', can_selinux_exec_memory) + + return (not can_selinux_exec_heap or not can_selinux_exec_memory) + +def _filter_dict_keys_with_empty_values(info, acceptable_values = {}): + filtered_info = {} + for key in info: + value = info[key] + + # Keep if value is acceptable + if key in acceptable_values: + if acceptable_values[key] == value: + filtered_info[key] = value + continue + + # Filter out None, 0, "", (), {}, [] + if not value: + continue + + # Filter out (0, 0) + if value == (0, 0): + continue + + # Filter out -1 + if value == -1: + continue + + # Filter out strings that start with "0.0" + if type(value) == str and value.startswith('0.0'): + continue + + filtered_info[key] = value + + return filtered_info + +class ASM(object): + def __init__(self, restype=None, argtypes=(), machine_code=[]): + self.restype = restype + self.argtypes = argtypes + self.machine_code = machine_code + self.prochandle = None + self.mm = None + self.func = None + self.address = None + self.size = 0 + + def compile(self): + machine_code = bytes.join(b'', self.machine_code) + self.size = ctypes.c_size_t(len(machine_code)) + + if DataSource.is_windows: + # Allocate a memory segment the size of the machine code, and make it executable + size = len(machine_code) + # Alloc at least 1 page to ensure we own all pages that we want to change protection on + if size < 0x1000: size = 0x1000 + MEM_COMMIT = ctypes.c_ulong(0x1000) + PAGE_READWRITE = ctypes.c_ulong(0x4) + pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc + pfnVirtualAlloc.restype = ctypes.c_void_p + self.address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE) + if not self.address: + raise Exception("Failed to VirtualAlloc") + + # Copy the machine code into the memory segment + memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr) + if memmove(self.address, machine_code, size) < 0: + raise Exception("Failed to memmove") + + # Enable execute permissions + PAGE_EXECUTE = ctypes.c_ulong(0x10) + old_protect = ctypes.c_ulong(0) + pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect + res = pfnVirtualProtect(ctypes.c_void_p(self.address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect)) + if not res: + raise Exception("Failed VirtualProtect") + + # Flush Instruction Cache + # First, get process Handle + if not self.prochandle: + pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess + pfnGetCurrentProcess.restype = ctypes.c_void_p + self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess()) + # Actually flush cache + res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(self.address), ctypes.c_size_t(size)) + if not res: + raise Exception("Failed FlushInstructionCache") + else: + from mmap import mmap, MAP_PRIVATE, MAP_ANONYMOUS, PROT_WRITE, PROT_READ, PROT_EXEC + + # Allocate a private and executable memory segment the size of the machine code + machine_code = bytes.join(b'', self.machine_code) + self.size = len(machine_code) + self.mm = mmap(-1, self.size, flags=MAP_PRIVATE | MAP_ANONYMOUS, prot=PROT_WRITE | PROT_READ | PROT_EXEC) + + # Copy the machine code into the memory segment + self.mm.write(machine_code) + self.address = ctypes.addressof(ctypes.c_int.from_buffer(self.mm)) + + # Cast the memory segment into a function + functype = ctypes.CFUNCTYPE(self.restype, *self.argtypes) + self.func = functype(self.address) + + def run(self): + # Call the machine code like a function + retval = self.func() + + return retval + + def free(self): + # Free the function memory segment + if DataSource.is_windows: + MEM_RELEASE = ctypes.c_ulong(0x8000) + ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(self.address), ctypes.c_size_t(0), MEM_RELEASE) + else: + self.mm.close() + + self.prochandle = None + self.mm = None + self.func = None + self.address = None + self.size = 0 + + +class CPUID(object): + def __init__(self, trace=None): + if trace is None: + trace = Trace(False, False) + + # Figure out if SE Linux is on and in enforcing mode + self.is_selinux_enforcing = _is_selinux_enforcing(trace) + + def _asm_func(self, restype=None, argtypes=(), machine_code=[]): + asm = ASM(restype, argtypes, machine_code) + asm.compile() + return asm + + def _run_asm(self, *machine_code): + asm = ASM(ctypes.c_uint32, (), machine_code) + asm.compile() + retval = asm.run() + asm.free() + return retval + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID + def get_vendor_id(self): + # EBX + ebx = self._run_asm( + b"\x31\xC0", # xor eax,eax + b"\x0F\xA2" # cpuid + b"\x89\xD8" # mov ax,bx + b"\xC3" # ret + ) + + # ECX + ecx = self._run_asm( + b"\x31\xC0", # xor eax,eax + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + # EDX + edx = self._run_asm( + b"\x31\xC0", # xor eax,eax + b"\x0f\xa2" # cpuid + b"\x89\xD0" # mov ax,dx + b"\xC3" # ret + ) + + # Each 4bits is a ascii letter in the name + vendor_id = [] + for reg in [ebx, edx, ecx]: + for n in [0, 8, 16, 24]: + vendor_id.append(chr((reg >> n) & 0xFF)) + vendor_id = ''.join(vendor_id) + + return vendor_id + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits + def get_info(self): + # EAX + eax = self._run_asm( + b"\xB8\x01\x00\x00\x00", # mov eax,0x1" + b"\x0f\xa2" # cpuid + b"\xC3" # ret + ) + + # Get the CPU info + stepping_id = (eax >> 0) & 0xF # 4 bits + model = (eax >> 4) & 0xF # 4 bits + family_id = (eax >> 8) & 0xF # 4 bits + processor_type = (eax >> 12) & 0x3 # 2 bits + extended_model_id = (eax >> 16) & 0xF # 4 bits + extended_family_id = (eax >> 20) & 0xFF # 8 bits + family = 0 + + if family_id in [15]: + family = extended_family_id + family_id + else: + family = family_id + + if family_id in [6, 15]: + model = (extended_model_id << 4) + model + + return { + 'stepping' : stepping_id, + 'model' : model, + 'family' : family, + 'processor_type' : processor_type + } + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported + def get_max_extension_support(self): + # Check for extension support + max_extension_support = self._run_asm( + b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000 + b"\x0f\xa2" # cpuid + b"\xC3" # ret + ) + + return max_extension_support + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits + def get_flags(self, max_extension_support): + # EDX + edx = self._run_asm( + b"\xB8\x01\x00\x00\x00", # mov eax,0x1" + b"\x0f\xa2" # cpuid + b"\x89\xD0" # mov ax,dx + b"\xC3" # ret + ) + + # ECX + ecx = self._run_asm( + b"\xB8\x01\x00\x00\x00", # mov eax,0x1" + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + # Get the CPU flags + flags = { + 'fpu' : _is_bit_set(edx, 0), + 'vme' : _is_bit_set(edx, 1), + 'de' : _is_bit_set(edx, 2), + 'pse' : _is_bit_set(edx, 3), + 'tsc' : _is_bit_set(edx, 4), + 'msr' : _is_bit_set(edx, 5), + 'pae' : _is_bit_set(edx, 6), + 'mce' : _is_bit_set(edx, 7), + 'cx8' : _is_bit_set(edx, 8), + 'apic' : _is_bit_set(edx, 9), + #'reserved1' : _is_bit_set(edx, 10), + 'sep' : _is_bit_set(edx, 11), + 'mtrr' : _is_bit_set(edx, 12), + 'pge' : _is_bit_set(edx, 13), + 'mca' : _is_bit_set(edx, 14), + 'cmov' : _is_bit_set(edx, 15), + 'pat' : _is_bit_set(edx, 16), + 'pse36' : _is_bit_set(edx, 17), + 'pn' : _is_bit_set(edx, 18), + 'clflush' : _is_bit_set(edx, 19), + #'reserved2' : _is_bit_set(edx, 20), + 'dts' : _is_bit_set(edx, 21), + 'acpi' : _is_bit_set(edx, 22), + 'mmx' : _is_bit_set(edx, 23), + 'fxsr' : _is_bit_set(edx, 24), + 'sse' : _is_bit_set(edx, 25), + 'sse2' : _is_bit_set(edx, 26), + 'ss' : _is_bit_set(edx, 27), + 'ht' : _is_bit_set(edx, 28), + 'tm' : _is_bit_set(edx, 29), + 'ia64' : _is_bit_set(edx, 30), + 'pbe' : _is_bit_set(edx, 31), + + 'pni' : _is_bit_set(ecx, 0), + 'pclmulqdq' : _is_bit_set(ecx, 1), + 'dtes64' : _is_bit_set(ecx, 2), + 'monitor' : _is_bit_set(ecx, 3), + 'ds_cpl' : _is_bit_set(ecx, 4), + 'vmx' : _is_bit_set(ecx, 5), + 'smx' : _is_bit_set(ecx, 6), + 'est' : _is_bit_set(ecx, 7), + 'tm2' : _is_bit_set(ecx, 8), + 'ssse3' : _is_bit_set(ecx, 9), + 'cid' : _is_bit_set(ecx, 10), + #'reserved3' : _is_bit_set(ecx, 11), + 'fma' : _is_bit_set(ecx, 12), + 'cx16' : _is_bit_set(ecx, 13), + 'xtpr' : _is_bit_set(ecx, 14), + 'pdcm' : _is_bit_set(ecx, 15), + #'reserved4' : _is_bit_set(ecx, 16), + 'pcid' : _is_bit_set(ecx, 17), + 'dca' : _is_bit_set(ecx, 18), + 'sse4_1' : _is_bit_set(ecx, 19), + 'sse4_2' : _is_bit_set(ecx, 20), + 'x2apic' : _is_bit_set(ecx, 21), + 'movbe' : _is_bit_set(ecx, 22), + 'popcnt' : _is_bit_set(ecx, 23), + 'tscdeadline' : _is_bit_set(ecx, 24), + 'aes' : _is_bit_set(ecx, 25), + 'xsave' : _is_bit_set(ecx, 26), + 'osxsave' : _is_bit_set(ecx, 27), + 'avx' : _is_bit_set(ecx, 28), + 'f16c' : _is_bit_set(ecx, 29), + 'rdrnd' : _is_bit_set(ecx, 30), + 'hypervisor' : _is_bit_set(ecx, 31) + } + + # Get a list of only the flags that are true + flags = [k for k, v in flags.items() if v] + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features + if max_extension_support >= 7: + # EBX + ebx = self._run_asm( + b"\x31\xC9", # xor ecx,ecx + b"\xB8\x07\x00\x00\x00" # mov eax,7 + b"\x0f\xa2" # cpuid + b"\x89\xD8" # mov ax,bx + b"\xC3" # ret + ) + + # ECX + ecx = self._run_asm( + b"\x31\xC9", # xor ecx,ecx + b"\xB8\x07\x00\x00\x00" # mov eax,7 + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + # Get the extended CPU flags + extended_flags = { + #'fsgsbase' : _is_bit_set(ebx, 0), + #'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1), + 'sgx' : _is_bit_set(ebx, 2), + 'bmi1' : _is_bit_set(ebx, 3), + 'hle' : _is_bit_set(ebx, 4), + 'avx2' : _is_bit_set(ebx, 5), + #'reserved' : _is_bit_set(ebx, 6), + 'smep' : _is_bit_set(ebx, 7), + 'bmi2' : _is_bit_set(ebx, 8), + 'erms' : _is_bit_set(ebx, 9), + 'invpcid' : _is_bit_set(ebx, 10), + 'rtm' : _is_bit_set(ebx, 11), + 'pqm' : _is_bit_set(ebx, 12), + #'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13), + 'mpx' : _is_bit_set(ebx, 14), + 'pqe' : _is_bit_set(ebx, 15), + 'avx512f' : _is_bit_set(ebx, 16), + 'avx512dq' : _is_bit_set(ebx, 17), + 'rdseed' : _is_bit_set(ebx, 18), + 'adx' : _is_bit_set(ebx, 19), + 'smap' : _is_bit_set(ebx, 20), + 'avx512ifma' : _is_bit_set(ebx, 21), + 'pcommit' : _is_bit_set(ebx, 22), + 'clflushopt' : _is_bit_set(ebx, 23), + 'clwb' : _is_bit_set(ebx, 24), + 'intel_pt' : _is_bit_set(ebx, 25), + 'avx512pf' : _is_bit_set(ebx, 26), + 'avx512er' : _is_bit_set(ebx, 27), + 'avx512cd' : _is_bit_set(ebx, 28), + 'sha' : _is_bit_set(ebx, 29), + 'avx512bw' : _is_bit_set(ebx, 30), + 'avx512vl' : _is_bit_set(ebx, 31), + + 'prefetchwt1' : _is_bit_set(ecx, 0), + 'avx512vbmi' : _is_bit_set(ecx, 1), + 'umip' : _is_bit_set(ecx, 2), + 'pku' : _is_bit_set(ecx, 3), + 'ospke' : _is_bit_set(ecx, 4), + #'reserved' : _is_bit_set(ecx, 5), + 'avx512vbmi2' : _is_bit_set(ecx, 6), + #'reserved' : _is_bit_set(ecx, 7), + 'gfni' : _is_bit_set(ecx, 8), + 'vaes' : _is_bit_set(ecx, 9), + 'vpclmulqdq' : _is_bit_set(ecx, 10), + 'avx512vnni' : _is_bit_set(ecx, 11), + 'avx512bitalg' : _is_bit_set(ecx, 12), + #'reserved' : _is_bit_set(ecx, 13), + 'avx512vpopcntdq' : _is_bit_set(ecx, 14), + #'reserved' : _is_bit_set(ecx, 15), + #'reserved' : _is_bit_set(ecx, 16), + #'mpx0' : _is_bit_set(ecx, 17), + #'mpx1' : _is_bit_set(ecx, 18), + #'mpx2' : _is_bit_set(ecx, 19), + #'mpx3' : _is_bit_set(ecx, 20), + #'mpx4' : _is_bit_set(ecx, 21), + 'rdpid' : _is_bit_set(ecx, 22), + #'reserved' : _is_bit_set(ecx, 23), + #'reserved' : _is_bit_set(ecx, 24), + #'reserved' : _is_bit_set(ecx, 25), + #'reserved' : _is_bit_set(ecx, 26), + #'reserved' : _is_bit_set(ecx, 27), + #'reserved' : _is_bit_set(ecx, 28), + #'reserved' : _is_bit_set(ecx, 29), + 'sgx_lc' : _is_bit_set(ecx, 30), + #'reserved' : _is_bit_set(ecx, 31) + } + + # Get a list of only the flags that are true + extended_flags = [k for k, v in extended_flags.items() if v] + flags += extended_flags + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits + if max_extension_support >= 0x80000001: + # EBX + ebx = self._run_asm( + b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001 + b"\x0f\xa2" # cpuid + b"\x89\xD8" # mov ax,bx + b"\xC3" # ret + ) + + # ECX + ecx = self._run_asm( + b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001 + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + # Get the extended CPU flags + extended_flags = { + 'fpu' : _is_bit_set(ebx, 0), + 'vme' : _is_bit_set(ebx, 1), + 'de' : _is_bit_set(ebx, 2), + 'pse' : _is_bit_set(ebx, 3), + 'tsc' : _is_bit_set(ebx, 4), + 'msr' : _is_bit_set(ebx, 5), + 'pae' : _is_bit_set(ebx, 6), + 'mce' : _is_bit_set(ebx, 7), + 'cx8' : _is_bit_set(ebx, 8), + 'apic' : _is_bit_set(ebx, 9), + #'reserved' : _is_bit_set(ebx, 10), + 'syscall' : _is_bit_set(ebx, 11), + 'mtrr' : _is_bit_set(ebx, 12), + 'pge' : _is_bit_set(ebx, 13), + 'mca' : _is_bit_set(ebx, 14), + 'cmov' : _is_bit_set(ebx, 15), + 'pat' : _is_bit_set(ebx, 16), + 'pse36' : _is_bit_set(ebx, 17), + #'reserved' : _is_bit_set(ebx, 18), + 'mp' : _is_bit_set(ebx, 19), + 'nx' : _is_bit_set(ebx, 20), + #'reserved' : _is_bit_set(ebx, 21), + 'mmxext' : _is_bit_set(ebx, 22), + 'mmx' : _is_bit_set(ebx, 23), + 'fxsr' : _is_bit_set(ebx, 24), + 'fxsr_opt' : _is_bit_set(ebx, 25), + 'pdpe1gp' : _is_bit_set(ebx, 26), + 'rdtscp' : _is_bit_set(ebx, 27), + #'reserved' : _is_bit_set(ebx, 28), + 'lm' : _is_bit_set(ebx, 29), + '3dnowext' : _is_bit_set(ebx, 30), + '3dnow' : _is_bit_set(ebx, 31), + + 'lahf_lm' : _is_bit_set(ecx, 0), + 'cmp_legacy' : _is_bit_set(ecx, 1), + 'svm' : _is_bit_set(ecx, 2), + 'extapic' : _is_bit_set(ecx, 3), + 'cr8_legacy' : _is_bit_set(ecx, 4), + 'abm' : _is_bit_set(ecx, 5), + 'sse4a' : _is_bit_set(ecx, 6), + 'misalignsse' : _is_bit_set(ecx, 7), + '3dnowprefetch' : _is_bit_set(ecx, 8), + 'osvw' : _is_bit_set(ecx, 9), + 'ibs' : _is_bit_set(ecx, 10), + 'xop' : _is_bit_set(ecx, 11), + 'skinit' : _is_bit_set(ecx, 12), + 'wdt' : _is_bit_set(ecx, 13), + #'reserved' : _is_bit_set(ecx, 14), + 'lwp' : _is_bit_set(ecx, 15), + 'fma4' : _is_bit_set(ecx, 16), + 'tce' : _is_bit_set(ecx, 17), + #'reserved' : _is_bit_set(ecx, 18), + 'nodeid_msr' : _is_bit_set(ecx, 19), + #'reserved' : _is_bit_set(ecx, 20), + 'tbm' : _is_bit_set(ecx, 21), + 'topoext' : _is_bit_set(ecx, 22), + 'perfctr_core' : _is_bit_set(ecx, 23), + 'perfctr_nb' : _is_bit_set(ecx, 24), + #'reserved' : _is_bit_set(ecx, 25), + 'dbx' : _is_bit_set(ecx, 26), + 'perftsc' : _is_bit_set(ecx, 27), + 'pci_l2i' : _is_bit_set(ecx, 28), + #'reserved' : _is_bit_set(ecx, 29), + #'reserved' : _is_bit_set(ecx, 30), + #'reserved' : _is_bit_set(ecx, 31) + } + + # Get a list of only the flags that are true + extended_flags = [k for k, v in extended_flags.items() if v] + flags += extended_flags + + flags.sort() + return flags + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String + def get_processor_brand(self, max_extension_support): + processor_brand = "" + + # Processor brand string + if max_extension_support >= 0x80000004: + instructions = [ + b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002 + b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003 + b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004 + ] + for instruction in instructions: + # EAX + eax = self._run_asm( + instruction, # mov ax,0x8000000? + b"\x0f\xa2" # cpuid + b"\x89\xC0" # mov ax,ax + b"\xC3" # ret + ) + + # EBX + ebx = self._run_asm( + instruction, # mov ax,0x8000000? + b"\x0f\xa2" # cpuid + b"\x89\xD8" # mov ax,bx + b"\xC3" # ret + ) + + # ECX + ecx = self._run_asm( + instruction, # mov ax,0x8000000? + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + # EDX + edx = self._run_asm( + instruction, # mov ax,0x8000000? + b"\x0f\xa2" # cpuid + b"\x89\xD0" # mov ax,dx + b"\xC3" # ret + ) + + # Combine each of the 4 bytes in each register into the string + for reg in [eax, ebx, ecx, edx]: + for n in [0, 8, 16, 24]: + processor_brand += chr((reg >> n) & 0xFF) + + # Strip off any trailing NULL terminators and white space + processor_brand = processor_brand.strip("\0").strip() + + return processor_brand + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features + def get_cache(self, max_extension_support): + cache_info = {} + + # Just return if the cache feature is not supported + if max_extension_support < 0x80000006: + return cache_info + + # ECX + ecx = self._run_asm( + b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006 + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + cache_info = { + 'size_b' : (ecx & 0xFF) * 1024, + 'associativity' : (ecx >> 12) & 0xF, + 'line_size_b' : (ecx >> 16) & 0xFFFF + } + + return cache_info + + def get_ticks_func(self): + retval = None + + if DataSource.bits == '32bit': + # Works on x86_32 + restype = None + argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint)) + get_ticks_x86_32 = self._asm_func(restype, argtypes, + [ + b"\x55", # push bp + b"\x89\xE5", # mov bp,sp + b"\x31\xC0", # xor ax,ax + b"\x0F\xA2", # cpuid + b"\x0F\x31", # rdtsc + b"\x8B\x5D\x08", # mov bx,[di+0x8] + b"\x8B\x4D\x0C", # mov cx,[di+0xc] + b"\x89\x13", # mov [bp+di],dx + b"\x89\x01", # mov [bx+di],ax + b"\x5D", # pop bp + b"\xC3" # ret + ] + ) + + # Monkey patch func to combine high and low args into one return + old_func = get_ticks_x86_32.func + def new_func(): + # Pass two uint32s into function + high = ctypes.c_uint32(0) + low = ctypes.c_uint32(0) + old_func(ctypes.byref(high), ctypes.byref(low)) + + # Shift the two uint32s into one uint64 + retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value + return retval + get_ticks_x86_32.func = new_func + + retval = get_ticks_x86_32 + elif DataSource.bits == '64bit': + # Works on x86_64 + restype = ctypes.c_uint64 + argtypes = () + get_ticks_x86_64 = self._asm_func(restype, argtypes, + [ + b"\x48", # dec ax + b"\x31\xC0", # xor ax,ax + b"\x0F\xA2", # cpuid + b"\x0F\x31", # rdtsc + b"\x48", # dec ax + b"\xC1\xE2\x20", # shl dx,byte 0x20 + b"\x48", # dec ax + b"\x09\xD0", # or ax,dx + b"\xC3", # ret + ] + ) + + retval = get_ticks_x86_64 + return retval + + def get_raw_hz(self): + from time import sleep + + ticks_fn = self.get_ticks_func() + + start = ticks_fn.func() + sleep(1) + end = ticks_fn.func() + + ticks = (end - start) + ticks_fn.free() + + return ticks + +def _get_cpu_info_from_cpuid_actual(): + ''' + Warning! This function has the potential to crash the Python runtime. + Do not call it directly. Use the _get_cpu_info_from_cpuid function instead. + It will safely call this function in another process. + ''' + + from io import StringIO + + trace = Trace(True, True) + info = {} + + # Pipe stdout and stderr to strings + sys.stdout = trace._stdout + sys.stderr = trace._stderr + + try: + # Get the CPU arch and bits + arch, bits = _parse_arch(DataSource.arch_string_raw) + + # Return none if this is not an X86 CPU + if not arch in ['X86_32', 'X86_64']: + trace.fail('Not running on X86_32 or X86_64. Skipping ...') + return trace.to_dict(info, True) + + # Return none if SE Linux is in enforcing mode + cpuid = CPUID(trace) + if cpuid.is_selinux_enforcing: + trace.fail('SELinux is enforcing. Skipping ...') + return trace.to_dict(info, True) + + # Get the cpu info from the CPUID register + max_extension_support = cpuid.get_max_extension_support() + cache_info = cpuid.get_cache(max_extension_support) + info = cpuid.get_info() + + processor_brand = cpuid.get_processor_brand(max_extension_support) + + # Get the Hz and scale + hz_actual = cpuid.get_raw_hz() + hz_actual = _to_decimal_string(hz_actual) + + # Get the Hz and scale + hz_advertised, scale = _parse_cpu_brand_string(processor_brand) + info = { + 'vendor_id_raw' : cpuid.get_vendor_id(), + 'hardware_raw' : '', + 'brand_raw' : processor_brand, + + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0), + 'hz_advertised' : _hz_short_to_full(hz_advertised, scale), + 'hz_actual' : _hz_short_to_full(hz_actual, 0), + + 'l2_cache_size' : cache_info['size_b'], + 'l2_cache_line_size' : cache_info['line_size_b'], + 'l2_cache_associativity' : cache_info['associativity'], + + 'stepping' : info['stepping'], + 'model' : info['model'], + 'family' : info['family'], + 'processor_type' : info['processor_type'], + 'flags' : cpuid.get_flags(max_extension_support) + } + + info = _filter_dict_keys_with_empty_values(info) + trace.success() + except Exception as err: + from traceback import format_exc + err_string = format_exc() + trace._err = ''.join(['\t\t{0}\n'.format(n) for n in err_string.split('\n')]) + '\n' + return trace.to_dict(info, True) + + return trace.to_dict(info, False) + +def _get_cpu_info_from_cpuid_subprocess_wrapper(queue): + orig_stdout = sys.stdout + orig_stderr = sys.stderr + + output = _get_cpu_info_from_cpuid_actual() + + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + queue.put(_obj_to_b64(output)) + +def _get_cpu_info_from_cpuid(): + ''' + Returns the CPU info gathered by querying the X86 cpuid register in a new process. + Returns {} on non X86 cpus. + Returns {} if SELinux is in enforcing mode. + ''' + + g_trace.header('Tying to get info from CPUID ...') + + from multiprocessing import Process, Queue + + # Return {} if can't cpuid + if not DataSource.can_cpuid: + g_trace.fail('Can\'t CPUID. Skipping ...') + return {} + + # Get the CPU arch and bits + arch, bits = _parse_arch(DataSource.arch_string_raw) + + # Return {} if this is not an X86 CPU + if not arch in ['X86_32', 'X86_64']: + g_trace.fail('Not running on X86_32 or X86_64. Skipping ...') + return {} + + try: + if CAN_CALL_CPUID_IN_SUBPROCESS: + # Start running the function in a subprocess + queue = Queue() + p = Process(target=_get_cpu_info_from_cpuid_subprocess_wrapper, args=(queue,)) + p.start() + + # Wait for the process to end, while it is still alive + while p.is_alive(): + p.join(0) + + # Return {} if it failed + if p.exitcode != 0: + g_trace.fail('Failed to run CPUID in process. Skipping ...') + return {} + + # Return {} if no results + if queue.empty(): + g_trace.fail('Failed to get anything from CPUID process. Skipping ...') + return {} + # Return the result, only if there is something to read + else: + output = _b64_to_obj(queue.get()) + import pprint + pp = pprint.PrettyPrinter(indent=4) + #pp.pprint(output) + + if 'output' in output and output['output']: + g_trace.write(output['output']) + + if 'stdout' in output and output['stdout']: + sys.stdout.write('{0}\n'.format(output['stdout'])) + sys.stdout.flush() + + if 'stderr' in output and output['stderr']: + sys.stderr.write('{0}\n'.format(output['stderr'])) + sys.stderr.flush() + + if 'is_fail' not in output: + g_trace.fail('Failed to get is_fail from CPUID process. Skipping ...') + return {} + + # Fail if there was an exception + if 'err' in output and output['err']: + g_trace.fail('Failed to run CPUID in process. Skipping ...') + g_trace.write(output['err']) + g_trace.write('Failed ...') + return {} + + if 'is_fail' in output and output['is_fail']: + g_trace.write('Failed ...') + return {} + + if 'info' not in output or not output['info']: + g_trace.fail('Failed to get return info from CPUID process. Skipping ...') + return {} + + return output['info'] + else: + # FIXME: This should write the values like in the above call to actual + orig_stdout = sys.stdout + orig_stderr = sys.stderr + + output = _get_cpu_info_from_cpuid_actual() + + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + g_trace.success() + return output['info'] + except Exception as err: + g_trace.fail(err) + + # Return {} if everything failed + return {} + +def _get_cpu_info_from_proc_cpuinfo(): + ''' + Returns the CPU info gathered from /proc/cpuinfo. + Returns {} if /proc/cpuinfo is not found. + ''' + + g_trace.header('Tying to get info from /proc/cpuinfo ...') + + try: + # Just return {} if there is no cpuinfo + if not DataSource.has_proc_cpuinfo(): + g_trace.fail('Failed to find /proc/cpuinfo. Skipping ...') + return {} + + returncode, output = DataSource.cat_proc_cpuinfo() + if returncode != 0: + g_trace.fail('Failed to run cat /proc/cpuinfo. Skipping ...') + return {} + + # Various fields + vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor') + processor_brand = _get_field(True, output, None, None, 'model name', 'cpu', 'processor', 'uarch') + cache_size = _get_field(False, output, None, '', 'cache size') + stepping = _get_field(False, output, int, -1, 'stepping') + model = _get_field(False, output, int, -1, 'model') + family = _get_field(False, output, int, -1, 'cpu family') + hardware = _get_field(False, output, None, '', 'Hardware') + + # Flags + flags = _get_field(False, output, None, None, 'flags', 'Features', 'ASEs implemented') + if flags: + flags = flags.split() + flags.sort() + + # Check for other cache format + if not cache_size: + try: + for i in range(0, 10): + name = "cache{0}".format(i) + value = _get_field(False, output, None, None, name) + if value: + value = [entry.split('=') for entry in value.split(' ')] + value = dict(value) + if 'level' in value and value['level'] == '3' and 'size' in value: + cache_size = value['size'] + break + except Exception: + pass + + # Convert from MHz string to Hz + hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock', 'cpu MHz dynamic', 'cpu MHz static') + hz_actual = hz_actual.lower().rstrip('mhz').strip() + hz_actual = _to_decimal_string(hz_actual) + + # Convert from GHz/MHz string to Hz + hz_advertised, scale = (None, 0) + try: + hz_advertised, scale = _parse_cpu_brand_string(processor_brand) + except Exception: + pass + + info = { + 'hardware_raw' : hardware, + 'brand_raw' : processor_brand, + + 'l3_cache_size' : _friendly_bytes_to_int(cache_size), + 'flags' : flags, + 'vendor_id_raw' : vendor_id, + 'stepping' : stepping, + 'model' : model, + 'family' : family, + } + + # Make the Hz the same for actual and advertised if missing any + if not hz_advertised or hz_advertised == '0.0': + hz_advertised = hz_actual + scale = 6 + elif not hz_actual or hz_actual == '0.0': + hz_actual = hz_advertised + + # Add the Hz if there is one + if _hz_short_to_full(hz_advertised, scale) > (0, 0): + info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale) + info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale) + if _hz_short_to_full(hz_actual, scale) > (0, 0): + info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6) + info['hz_actual'] = _hz_short_to_full(hz_actual, 6) + + info = _filter_dict_keys_with_empty_values(info, {'stepping':0, 'model':0, 'family':0}) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_cpufreq_info(): + ''' + Returns the CPU info gathered from cpufreq-info. + Returns {} if cpufreq-info is not found. + ''' + + g_trace.header('Tying to get info from cpufreq-info ...') + + try: + hz_brand, scale = '0.0', 0 + + if not DataSource.has_cpufreq_info(): + g_trace.fail('Failed to find cpufreq-info. Skipping ...') + return {} + + returncode, output = DataSource.cpufreq_info() + if returncode != 0: + g_trace.fail('Failed to run cpufreq-info. Skipping ...') + return {} + + hz_brand = output.split('current CPU frequency is')[1].split('\n')[0] + i = hz_brand.find('Hz') + assert(i != -1) + hz_brand = hz_brand[0 : i+2].strip().lower() + + if hz_brand.endswith('mhz'): + scale = 6 + elif hz_brand.endswith('ghz'): + scale = 9 + hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip() + hz_brand = _to_decimal_string(hz_brand) + + info = { + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale), + 'hz_advertised' : _hz_short_to_full(hz_brand, scale), + 'hz_actual' : _hz_short_to_full(hz_brand, scale), + } + + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_lscpu(): + ''' + Returns the CPU info gathered from lscpu. + Returns {} if lscpu is not found. + ''' + + g_trace.header('Tying to get info from lscpu ...') + + try: + if not DataSource.has_lscpu(): + g_trace.fail('Failed to find lscpu. Skipping ...') + return {} + + returncode, output = DataSource.lscpu() + if returncode != 0: + g_trace.fail('Failed to run lscpu. Skipping ...') + return {} + + info = {} + + new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz') + if new_hz: + new_hz = _to_decimal_string(new_hz) + scale = 6 + info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale) + info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale) + info['hz_advertised'] = _hz_short_to_full(new_hz, scale) + info['hz_actual'] = _hz_short_to_full(new_hz, scale) + + new_hz = _get_field(False, output, None, None, 'CPU dynamic MHz', 'CPU static MHz') + if new_hz: + new_hz = _to_decimal_string(new_hz) + scale = 6 + info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale) + info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale) + info['hz_advertised'] = _hz_short_to_full(new_hz, scale) + info['hz_actual'] = _hz_short_to_full(new_hz, scale) + + vendor_id = _get_field(False, output, None, None, 'Vendor ID') + if vendor_id: + info['vendor_id_raw'] = vendor_id + + brand = _get_field(False, output, None, None, 'Model name') + if brand: + info['brand_raw'] = brand + else: + brand = _get_field(False, output, None, None, 'Model') + if brand and not brand.isdigit(): + info['brand_raw'] = brand + + family = _get_field(False, output, None, None, 'CPU family') + if family and family.isdigit(): + info['family'] = int(family) + + stepping = _get_field(False, output, None, None, 'Stepping') + if stepping and stepping.isdigit(): + info['stepping'] = int(stepping) + + model = _get_field(False, output, None, None, 'Model') + if model and model.isdigit(): + info['model'] = int(model) + + l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache') + if l1_data_cache_size: + l1_data_cache_size = l1_data_cache_size.split('(')[0].strip() + info['l1_data_cache_size'] = _friendly_bytes_to_int(l1_data_cache_size) + + l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache') + if l1_instruction_cache_size: + l1_instruction_cache_size = l1_instruction_cache_size.split('(')[0].strip() + info['l1_instruction_cache_size'] = _friendly_bytes_to_int(l1_instruction_cache_size) + + l2_cache_size = _get_field(False, output, None, None, 'L2 cache', 'L2d cache') + if l2_cache_size: + l2_cache_size = l2_cache_size.split('(')[0].strip() + info['l2_cache_size'] = _friendly_bytes_to_int(l2_cache_size) + + l3_cache_size = _get_field(False, output, None, None, 'L3 cache') + if l3_cache_size: + l3_cache_size = l3_cache_size.split('(')[0].strip() + info['l3_cache_size'] = _friendly_bytes_to_int(l3_cache_size) + + # Flags + flags = _get_field(False, output, None, None, 'flags', 'Features', 'ASEs implemented') + if flags: + flags = flags.split() + flags.sort() + info['flags'] = flags + + info = _filter_dict_keys_with_empty_values(info, {'stepping':0, 'model':0, 'family':0}) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_dmesg(): + ''' + Returns the CPU info gathered from dmesg. + Returns {} if dmesg is not found or does not have the desired info. + ''' + + g_trace.header('Tying to get info from the dmesg ...') + + # Just return {} if this arch has an unreliable dmesg log + arch, bits = _parse_arch(DataSource.arch_string_raw) + if arch in ['S390X']: + g_trace.fail('Running on S390X. Skipping ...') + return {} + + # Just return {} if there is no dmesg + if not DataSource.has_dmesg(): + g_trace.fail('Failed to find dmesg. Skipping ...') + return {} + + # If dmesg fails return {} + returncode, output = DataSource.dmesg_a() + if output is None or returncode != 0: + g_trace.fail('Failed to run \"dmesg -a\". Skipping ...') + return {} + + info = _parse_dmesg_output(output) + g_trace.success() + return info + + +# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf +# page 767 +def _get_cpu_info_from_ibm_pa_features(): + ''' + Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features + Returns {} if lsprop is not found or ibm,pa-features does not have the desired info. + ''' + + g_trace.header('Tying to get info from lsprop ...') + + try: + # Just return {} if there is no lsprop + if not DataSource.has_ibm_pa_features(): + g_trace.fail('Failed to find lsprop. Skipping ...') + return {} + + # If ibm,pa-features fails return {} + returncode, output = DataSource.ibm_pa_features() + if output is None or returncode != 0: + g_trace.fail('Failed to glob /proc/device-tree/cpus/*/ibm,pa-features. Skipping ...') + return {} + + # Filter out invalid characters from output + value = output.split("ibm,pa-features")[1].lower() + value = [s for s in value if s in list('0123456789abcfed')] + value = ''.join(value) + + # Get data converted to Uint32 chunks + left = int(value[0 : 8], 16) + right = int(value[8 : 16], 16) + + # Get the CPU flags + flags = { + # Byte 0 + 'mmu' : _is_bit_set(left, 0), + 'fpu' : _is_bit_set(left, 1), + 'slb' : _is_bit_set(left, 2), + 'run' : _is_bit_set(left, 3), + #'reserved' : _is_bit_set(left, 4), + 'dabr' : _is_bit_set(left, 5), + 'ne' : _is_bit_set(left, 6), + 'wtr' : _is_bit_set(left, 7), + + # Byte 1 + 'mcr' : _is_bit_set(left, 8), + 'dsisr' : _is_bit_set(left, 9), + 'lp' : _is_bit_set(left, 10), + 'ri' : _is_bit_set(left, 11), + 'dabrx' : _is_bit_set(left, 12), + 'sprg3' : _is_bit_set(left, 13), + 'rislb' : _is_bit_set(left, 14), + 'pp' : _is_bit_set(left, 15), + + # Byte 2 + 'vpm' : _is_bit_set(left, 16), + 'dss_2.05' : _is_bit_set(left, 17), + #'reserved' : _is_bit_set(left, 18), + 'dar' : _is_bit_set(left, 19), + #'reserved' : _is_bit_set(left, 20), + 'ppr' : _is_bit_set(left, 21), + 'dss_2.02' : _is_bit_set(left, 22), + 'dss_2.06' : _is_bit_set(left, 23), + + # Byte 3 + 'lsd_in_dscr' : _is_bit_set(left, 24), + 'ugr_in_dscr' : _is_bit_set(left, 25), + #'reserved' : _is_bit_set(left, 26), + #'reserved' : _is_bit_set(left, 27), + #'reserved' : _is_bit_set(left, 28), + #'reserved' : _is_bit_set(left, 29), + #'reserved' : _is_bit_set(left, 30), + #'reserved' : _is_bit_set(left, 31), + + # Byte 4 + 'sso_2.06' : _is_bit_set(right, 0), + #'reserved' : _is_bit_set(right, 1), + #'reserved' : _is_bit_set(right, 2), + #'reserved' : _is_bit_set(right, 3), + #'reserved' : _is_bit_set(right, 4), + #'reserved' : _is_bit_set(right, 5), + #'reserved' : _is_bit_set(right, 6), + #'reserved' : _is_bit_set(right, 7), + + # Byte 5 + 'le' : _is_bit_set(right, 8), + 'cfar' : _is_bit_set(right, 9), + 'eb' : _is_bit_set(right, 10), + 'lsq_2.07' : _is_bit_set(right, 11), + #'reserved' : _is_bit_set(right, 12), + #'reserved' : _is_bit_set(right, 13), + #'reserved' : _is_bit_set(right, 14), + #'reserved' : _is_bit_set(right, 15), + + # Byte 6 + 'dss_2.07' : _is_bit_set(right, 16), + #'reserved' : _is_bit_set(right, 17), + #'reserved' : _is_bit_set(right, 18), + #'reserved' : _is_bit_set(right, 19), + #'reserved' : _is_bit_set(right, 20), + #'reserved' : _is_bit_set(right, 21), + #'reserved' : _is_bit_set(right, 22), + #'reserved' : _is_bit_set(right, 23), + + # Byte 7 + #'reserved' : _is_bit_set(right, 24), + #'reserved' : _is_bit_set(right, 25), + #'reserved' : _is_bit_set(right, 26), + #'reserved' : _is_bit_set(right, 27), + #'reserved' : _is_bit_set(right, 28), + #'reserved' : _is_bit_set(right, 29), + #'reserved' : _is_bit_set(right, 30), + #'reserved' : _is_bit_set(right, 31), + } + + # Get a list of only the flags that are true + flags = [k for k, v in flags.items() if v] + flags.sort() + + info = { + 'flags' : flags + } + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + return {} + + +def _get_cpu_info_from_cat_var_run_dmesg_boot(): + ''' + Returns the CPU info gathered from /var/run/dmesg.boot. + Returns {} if dmesg is not found or does not have the desired info. + ''' + + g_trace.header('Tying to get info from the /var/run/dmesg.boot log ...') + + # Just return {} if there is no /var/run/dmesg.boot + if not DataSource.has_var_run_dmesg_boot(): + g_trace.fail('Failed to find /var/run/dmesg.boot file. Skipping ...') + return {} + + # If dmesg.boot fails return {} + returncode, output = DataSource.cat_var_run_dmesg_boot() + if output is None or returncode != 0: + g_trace.fail('Failed to run \"cat /var/run/dmesg.boot\". Skipping ...') + return {} + + info = _parse_dmesg_output(output) + g_trace.success() + return info + + +def _get_cpu_info_from_sysctl(): + ''' + Returns the CPU info gathered from sysctl. + Returns {} if sysctl is not found. + ''' + + g_trace.header('Tying to get info from sysctl ...') + + try: + # Just return {} if there is no sysctl + if not DataSource.has_sysctl(): + g_trace.fail('Failed to find sysctl. Skipping ...') + return {} + + # If sysctl fails return {} + returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency() + if output is None or returncode != 0: + g_trace.fail('Failed to run \"sysctl machdep.cpu hw.cpufrequency\". Skipping ...') + return {} + + # Various fields + vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor') + processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string') + cache_size = _get_field(False, output, int, 0, 'machdep.cpu.cache.size') + stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping') + model = _get_field(False, output, int, 0, 'machdep.cpu.model') + family = _get_field(False, output, int, 0, 'machdep.cpu.family') + + # Flags + flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split() + flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split()) + flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split()) + flags.sort() + + # Convert from GHz/MHz string to Hz + hz_advertised, scale = _parse_cpu_brand_string(processor_brand) + hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency') + hz_actual = _to_decimal_string(hz_actual) + + info = { + 'vendor_id_raw' : vendor_id, + 'brand_raw' : processor_brand, + + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0), + 'hz_advertised' : _hz_short_to_full(hz_advertised, scale), + 'hz_actual' : _hz_short_to_full(hz_actual, 0), + + 'l2_cache_size' : int(cache_size) * 1024, + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'flags' : flags + } + + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + return {} + + +def _get_cpu_info_from_sysinfo(): + ''' + Returns the CPU info gathered from sysinfo. + Returns {} if sysinfo is not found. + ''' + + info = _get_cpu_info_from_sysinfo_v1() + info.update(_get_cpu_info_from_sysinfo_v2()) + return info + +def _get_cpu_info_from_sysinfo_v1(): + ''' + Returns the CPU info gathered from sysinfo. + Returns {} if sysinfo is not found. + ''' + + g_trace.header('Tying to get info from sysinfo version 1 ...') + + try: + # Just return {} if there is no sysinfo + if not DataSource.has_sysinfo(): + g_trace.fail('Failed to find sysinfo. Skipping ...') + return {} + + # If sysinfo fails return {} + returncode, output = DataSource.sysinfo_cpu() + if output is None or returncode != 0: + g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...') + return {} + + # Various fields + vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ') + processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip() + cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size') + stepping = int(output.split(', stepping ')[1].split(',')[0].strip()) + model = int(output.split(', model ')[1].split(',')[0].strip()) + family = int(output.split(', family ')[1].split(',')[0].strip()) + + # Flags + flags = [] + for line in output.split('\n'): + if line.startswith('\t\t'): + for flag in line.strip().lower().split(): + flags.append(flag) + flags.sort() + + # Convert from GHz/MHz string to Hz + hz_advertised, scale = _parse_cpu_brand_string(processor_brand) + hz_actual = hz_advertised + + info = { + 'vendor_id_raw' : vendor_id, + 'brand_raw' : processor_brand, + + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale), + 'hz_advertised' : _hz_short_to_full(hz_advertised, scale), + 'hz_actual' : _hz_short_to_full(hz_actual, scale), + + 'l2_cache_size' : _to_friendly_bytes(cache_size), + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'flags' : flags + } + + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_sysinfo_v2(): + ''' + Returns the CPU info gathered from sysinfo. + Returns {} if sysinfo is not found. + ''' + + g_trace.header('Tying to get info from sysinfo version 2 ...') + + try: + # Just return {} if there is no sysinfo + if not DataSource.has_sysinfo(): + g_trace.fail('Failed to find sysinfo. Skipping ...') + return {} + + # If sysinfo fails return {} + returncode, output = DataSource.sysinfo_cpu() + if output is None or returncode != 0: + g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...') + return {} + + # Various fields + vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ') + processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip() + cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size') + signature = output.split('Signature:')[1].split('\n')[0].strip() + # + stepping = int(signature.split('stepping ')[1].split(',')[0].strip()) + model = int(signature.split('model ')[1].split(',')[0].strip()) + family = int(signature.split('family ')[1].split(',')[0].strip()) + + # Flags + def get_subsection_flags(output): + retval = [] + for line in output.split('\n')[1:]: + if not line.startswith(' ') and not line.startswith(' '): break + for entry in line.strip().lower().split(' '): + retval.append(entry) + return retval + + flags = get_subsection_flags(output.split('Features: ')[1]) + \ + get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \ + get_subsection_flags(output.split('Extended Features (0x80000001): ')[1]) + flags.sort() + + # Convert from GHz/MHz string to Hz + lines = [n for n in output.split('\n') if n] + raw_hz = lines[0].split('running at ')[1].strip().lower() + hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip() + hz_advertised = _to_decimal_string(hz_advertised) + hz_actual = hz_advertised + + scale = 0 + if raw_hz.endswith('mhz'): + scale = 6 + elif raw_hz.endswith('ghz'): + scale = 9 + + info = { + 'vendor_id_raw' : vendor_id, + 'brand_raw' : processor_brand, + + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale), + 'hz_advertised' : _hz_short_to_full(hz_advertised, scale), + 'hz_actual' : _hz_short_to_full(hz_actual, scale), + + 'l2_cache_size' : _to_friendly_bytes(cache_size), + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'flags' : flags + } + + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_wmic(): + ''' + Returns the CPU info gathered from WMI. + Returns {} if not on Windows, or wmic is not installed. + ''' + g_trace.header('Tying to get info from wmic ...') + + try: + # Just return {} if not Windows or there is no wmic + if not DataSource.is_windows or not DataSource.has_wmic(): + g_trace.fail('Failed to find WMIC, or not on Windows. Skipping ...') + return {} + + returncode, output = DataSource.wmic_cpu() + if output is None or returncode != 0: + g_trace.fail('Failed to run wmic. Skipping ...') + return {} + + # Break the list into key values pairs + value = output.split("\n") + value = [s.rstrip().split('=') for s in value if '=' in s] + value = {k: v for k, v in value if v} + + # Get the advertised MHz + processor_brand = value.get('Name') + hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand) + + # Get the actual MHz + hz_actual = value.get('CurrentClockSpeed') + scale_actual = 6 + if hz_actual: + hz_actual = _to_decimal_string(hz_actual) + + # Get cache sizes + l2_cache_size = value.get('L2CacheSize') # NOTE: L2CacheSize is in kilobytes + if l2_cache_size: + l2_cache_size = int(l2_cache_size) * 1024 + + l3_cache_size = value.get('L3CacheSize') # NOTE: L3CacheSize is in kilobytes + if l3_cache_size: + l3_cache_size = int(l3_cache_size) * 1024 + + # Get family, model, and stepping + family, model, stepping = '', '', '' + description = value.get('Description') or value.get('Caption') + entries = description.split(' ') + + if 'Family' in entries and entries.index('Family') < len(entries)-1: + i = entries.index('Family') + family = int(entries[i + 1]) + + if 'Model' in entries and entries.index('Model') < len(entries)-1: + i = entries.index('Model') + model = int(entries[i + 1]) + + if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1: + i = entries.index('Stepping') + stepping = int(entries[i + 1]) + + info = { + 'vendor_id_raw' : value.get('Manufacturer'), + 'brand_raw' : processor_brand, + + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual), + 'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised), + 'hz_actual' : _hz_short_to_full(hz_actual, scale_actual), + + 'l2_cache_size' : l2_cache_size, + 'l3_cache_size' : l3_cache_size, + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + } + + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_registry(): + ''' + Returns the CPU info gathered from the Windows Registry. + Returns {} if not on Windows. + ''' + + g_trace.header('Tying to get info from Windows registry ...') + + try: + # Just return {} if not on Windows + if not DataSource.is_windows: + g_trace.fail('Not running on Windows. Skipping ...') + return {} + + # Get the CPU name + processor_brand = DataSource.winreg_processor_brand().strip() + + # Get the CPU vendor id + vendor_id = DataSource.winreg_vendor_id_raw() + + # Get the CPU arch and bits + arch_string_raw = DataSource.winreg_arch_string_raw() + arch, bits = _parse_arch(arch_string_raw) + + # Get the actual CPU Hz + hz_actual = DataSource.winreg_hz_actual() + hz_actual = _to_decimal_string(hz_actual) + + # Get the advertised CPU Hz + hz_advertised, scale = _parse_cpu_brand_string(processor_brand) + + # If advertised hz not found, use the actual hz + if hz_advertised == '0.0': + scale = 6 + hz_advertised = _to_decimal_string(hz_actual) + + # Get the CPU features + feature_bits = DataSource.winreg_feature_bits() + + def is_set(bit): + mask = 0x80000000 >> bit + retval = mask & feature_bits > 0 + return retval + + # http://en.wikipedia.org/wiki/CPUID + # http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean + # http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm + flags = { + 'fpu' : is_set(0), # Floating Point Unit + 'vme' : is_set(1), # V86 Mode Extensions + 'de' : is_set(2), # Debug Extensions - I/O breakpoints supported + 'pse' : is_set(3), # Page Size Extensions (4 MB pages supported) + 'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available + 'msr' : is_set(5), # Model Specific Registers + 'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages) + 'mce' : is_set(7), # Machine Check Exception supported + 'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available + 'apic' : is_set(9), # Local APIC present (multiprocessor operation support) + 'sepamd' : is_set(10), # Fast system calls (AMD only) + 'sep' : is_set(11), # Fast system calls + 'mtrr' : is_set(12), # Memory Type Range Registers + 'pge' : is_set(13), # Page Global Enable + 'mca' : is_set(14), # Machine Check Architecture + 'cmov' : is_set(15), # Conditional MOVe instructions + 'pat' : is_set(16), # Page Attribute Table + 'pse36' : is_set(17), # 36 bit Page Size Extensions + 'serial' : is_set(18), # Processor Serial Number + 'clflush' : is_set(19), # Cache Flush + #'reserved1' : is_set(20), # reserved + 'dts' : is_set(21), # Debug Trace Store + 'acpi' : is_set(22), # ACPI support + 'mmx' : is_set(23), # MultiMedia Extensions + 'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions + 'sse' : is_set(25), # SSE instructions + 'sse2' : is_set(26), # SSE2 (WNI) instructions + 'ss' : is_set(27), # self snoop + #'reserved2' : is_set(28), # reserved + 'tm' : is_set(29), # Automatic clock control + 'ia64' : is_set(30), # IA64 instructions + '3dnow' : is_set(31) # 3DNow! instructions available + } + + # Get a list of only the flags that are true + flags = [k for k, v in flags.items() if v] + flags.sort() + + info = { + 'vendor_id_raw' : vendor_id, + 'brand_raw' : processor_brand, + + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6), + 'hz_advertised' : _hz_short_to_full(hz_advertised, scale), + 'hz_actual' : _hz_short_to_full(hz_actual, 6), + + 'flags' : flags + } + + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + return {} + +def _get_cpu_info_from_kstat(): + ''' + Returns the CPU info gathered from isainfo and kstat. + Returns {} if isainfo or kstat are not found. + ''' + + g_trace.header('Tying to get info from kstat ...') + + try: + # Just return {} if there is no isainfo or kstat + if not DataSource.has_isainfo() or not DataSource.has_kstat(): + g_trace.fail('Failed to find isinfo or kstat. Skipping ...') + return {} + + # If isainfo fails return {} + returncode, flag_output = DataSource.isainfo_vb() + if flag_output is None or returncode != 0: + g_trace.fail('Failed to run \"isainfo -vb\". Skipping ...') + return {} + + # If kstat fails return {} + returncode, kstat = DataSource.kstat_m_cpu_info() + if kstat is None or returncode != 0: + g_trace.fail('Failed to run \"kstat -m cpu_info\". Skipping ...') + return {} + + # Various fields + vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip() + processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip() + stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip()) + model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip()) + family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip()) + + # Flags + flags = flag_output.strip().split('\n')[-1].strip().lower().split() + flags.sort() + + # Convert from GHz/MHz string to Hz + scale = 6 + hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip() + hz_advertised = _to_decimal_string(hz_advertised) + + # Convert from GHz/MHz string to Hz + hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip() + hz_actual = _to_decimal_string(hz_actual) + + info = { + 'vendor_id_raw' : vendor_id, + 'brand_raw' : processor_brand, + + 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale), + 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0), + 'hz_advertised' : _hz_short_to_full(hz_advertised, scale), + 'hz_actual' : _hz_short_to_full(hz_actual, 0), + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'flags' : flags + } + + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + return {} + +def _get_cpu_info_from_platform_uname(): + + g_trace.header('Tying to get info from platform.uname ...') + + try: + uname = DataSource.uname_string_raw.split(',')[0] + + family, model, stepping = (None, None, None) + entries = uname.split(' ') + + if 'Family' in entries and entries.index('Family') < len(entries)-1: + i = entries.index('Family') + family = int(entries[i + 1]) + + if 'Model' in entries and entries.index('Model') < len(entries)-1: + i = entries.index('Model') + model = int(entries[i + 1]) + + if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1: + i = entries.index('Stepping') + stepping = int(entries[i + 1]) + + info = { + 'family' : family, + 'model' : model, + 'stepping' : stepping + } + info = _filter_dict_keys_with_empty_values(info) + g_trace.success() + return info + except Exception as err: + g_trace.fail(err) + return {} + +def _get_cpu_info_internal(): + ''' + Returns the CPU info by using the best sources of information for your OS. + Returns {} if nothing is found. + ''' + + g_trace.write('!' * 80) + + # Get the CPU arch and bits + arch, bits = _parse_arch(DataSource.arch_string_raw) + + friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits' + friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info) + PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize) + + info = { + 'python_version' : PYTHON_VERSION, + 'cpuinfo_version' : CPUINFO_VERSION, + 'cpuinfo_version_string' : CPUINFO_VERSION_STRING, + 'arch' : arch, + 'bits' : bits, + 'count' : DataSource.cpu_count, + 'arch_string_raw' : DataSource.arch_string_raw, + } + + g_trace.write("python_version: {0}".format(info['python_version'])) + g_trace.write("cpuinfo_version: {0}".format(info['cpuinfo_version'])) + g_trace.write("arch: {0}".format(info['arch'])) + g_trace.write("bits: {0}".format(info['bits'])) + g_trace.write("count: {0}".format(info['count'])) + g_trace.write("arch_string_raw: {0}".format(info['arch_string_raw'])) + + # Try the Windows wmic + _copy_new_fields(info, _get_cpu_info_from_wmic()) + + # Try the Windows registry + _copy_new_fields(info, _get_cpu_info_from_registry()) + + # Try /proc/cpuinfo + _copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo()) + + # Try cpufreq-info + _copy_new_fields(info, _get_cpu_info_from_cpufreq_info()) + + # Try LSCPU + _copy_new_fields(info, _get_cpu_info_from_lscpu()) + + # Try sysctl + _copy_new_fields(info, _get_cpu_info_from_sysctl()) + + # Try kstat + _copy_new_fields(info, _get_cpu_info_from_kstat()) + + # Try dmesg + _copy_new_fields(info, _get_cpu_info_from_dmesg()) + + # Try /var/run/dmesg.boot + _copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot()) + + # Try lsprop ibm,pa-features + _copy_new_fields(info, _get_cpu_info_from_ibm_pa_features()) + + # Try sysinfo + _copy_new_fields(info, _get_cpu_info_from_sysinfo()) + + # Try querying the CPU cpuid register + # FIXME: This should print stdout and stderr to trace log + _copy_new_fields(info, _get_cpu_info_from_cpuid()) + + # Try platform.uname + _copy_new_fields(info, _get_cpu_info_from_platform_uname()) + + g_trace.write('!' * 80) + + return info + +def get_cpu_info_json(): + ''' + Returns the CPU info by using the best sources of information for your OS. + Returns the result in a json string + ''' + + import json + + output = None + + # If running under pyinstaller, run normally + if getattr(sys, 'frozen', False): + info = _get_cpu_info_internal() + output = json.dumps(info) + output = "{0}".format(output) + # if not running under pyinstaller, run in another process. + # This is done because multiprocesing has a design flaw that + # causes non main programs to run multiple times on Windows. + else: + from subprocess import Popen, PIPE + + command = [sys.executable, __file__, '--json'] + p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) + output = p1.communicate()[0] + + if p1.returncode != 0: + return "{}" + + output = output.decode(encoding='UTF-8') + + return output + +def get_cpu_info(): + ''' + Returns the CPU info by using the best sources of information for your OS. + Returns the result in a dict + ''' + + import json + + output = get_cpu_info_json() + + # Convert JSON to Python with non unicode strings + output = json.loads(output, object_hook = _utf_to_str) + + return output + +def main(): + from argparse import ArgumentParser + import json + + # Parse args + parser = ArgumentParser(description='Gets CPU info with pure Python') + parser.add_argument('--json', action='store_true', help='Return the info in JSON format') + parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo') + parser.add_argument('--trace', action='store_true', help='Traces code paths used to find CPU info to file') + args = parser.parse_args() + + global g_trace + g_trace = Trace(args.trace, False) + + try: + _check_arch() + except Exception as err: + sys.stderr.write(str(err) + "\n") + sys.exit(1) + + info = _get_cpu_info_internal() + + if not info: + sys.stderr.write("Failed to find cpu info\n") + sys.exit(1) + + if args.json: + print(json.dumps(info)) + elif args.version: + print(CPUINFO_VERSION_STRING) + else: + print('Python Version: {0}'.format(info.get('python_version', ''))) + print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', ''))) + print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', ''))) + print('Hardware Raw: {0}'.format(info.get('hardware_raw', ''))) + print('Brand Raw: {0}'.format(info.get('brand_raw', ''))) + print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', ''))) + print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', ''))) + print('Hz Advertised: {0}'.format(info.get('hz_advertised', ''))) + print('Hz Actual: {0}'.format(info.get('hz_actual', ''))) + print('Arch: {0}'.format(info.get('arch', ''))) + print('Bits: {0}'.format(info.get('bits', ''))) + print('Count: {0}'.format(info.get('count', ''))) + print('Arch String Raw: {0}'.format(info.get('arch_string_raw', ''))) + print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', ''))) + print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', ''))) + print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', ''))) + print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', ''))) + print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', ''))) + print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', ''))) + print('Stepping: {0}'.format(info.get('stepping', ''))) + print('Model: {0}'.format(info.get('model', ''))) + print('Family: {0}'.format(info.get('family', ''))) + print('Processor Type: {0}'.format(info.get('processor_type', ''))) + print('Flags: {0}'.format(', '.join(info.get('flags', '')))) + + +if __name__ == '__main__': + main() +else: + g_trace = Trace(False, False) + _check_arch() diff --git a/evalkit_cambrian/lib/python3.10/site-packages/ftfy/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/ftfy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34ccd6000412639369f275a65aba10d0ec4ccb2e Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/ftfy/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/ftfy/__pycache__/badness.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/ftfy/__pycache__/badness.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2327b5e7c3198f9dcd78158cd4de3846d0daa963 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/ftfy/__pycache__/badness.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/ftfy/__pycache__/chardata.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/ftfy/__pycache__/chardata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..469dc86bea8cad9353fcd5f594a0d5d9c7272418 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/ftfy/__pycache__/chardata.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/referencing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e09207d7e4b90aba221181d87886fd4f54038abf --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/referencing/__init__.py @@ -0,0 +1,7 @@ +""" +Cross-specification, implementation-agnostic JSON referencing. +""" + +from referencing._core import Anchor, Registry, Resource, Specification + +__all__ = ["Anchor", "Registry", "Resource", "Specification"] diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/__pycache__/_attrs.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/referencing/__pycache__/_attrs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b4e12b09d2a03bca0b3f4ffa1e8813fa26d0a5c Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/referencing/__pycache__/_attrs.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/__pycache__/exceptions.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/referencing/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fe5dce42877b53976d3f9ad2da904c1f03a5e62 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/referencing/__pycache__/exceptions.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/__pycache__/jsonschema.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/referencing/__pycache__/jsonschema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0277ff3e12acd72bcb64d616cae6e53d788df663 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/referencing/__pycache__/jsonschema.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/__pycache__/retrieval.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/referencing/__pycache__/retrieval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19d93d06992d418c0abbfa83acb5bf7b31eb86fc Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/referencing/__pycache__/retrieval.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/_attrs.py b/evalkit_cambrian/lib/python3.10/site-packages/referencing/_attrs.py new file mode 100644 index 0000000000000000000000000000000000000000..ae85b865fed622afe83e8d6b7b17a1f0d174aba3 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/referencing/_attrs.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from typing import NoReturn, TypeVar + +from attrs import define as _define, frozen as _frozen + +_T = TypeVar("_T") + + +def define(cls: type[_T]) -> type[_T]: # pragma: no cover + cls.__init_subclass__ = _do_not_subclass + return _define(cls) + + +def frozen(cls: type[_T]) -> type[_T]: + cls.__init_subclass__ = _do_not_subclass + return _frozen(cls) + + +class UnsupportedSubclassing(Exception): + def __str__(self): + return ( + "Subclassing is not part of referencing's public API. " + "If no other suitable API exists for what you're trying to do, " + "feel free to file an issue asking for one." + ) + + +@staticmethod +def _do_not_subclass() -> NoReturn: # pragma: no cover + raise UnsupportedSubclassing() diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/_attrs.pyi b/evalkit_cambrian/lib/python3.10/site-packages/referencing/_attrs.pyi new file mode 100644 index 0000000000000000000000000000000000000000..278e4109b622dc3ecab7e3e0d0562ba594b80a33 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/referencing/_attrs.pyi @@ -0,0 +1,20 @@ +from typing import Any, Callable, TypeVar, Union + +from attr import attrib, field + +class UnsupportedSubclassing(Exception): ... + +_T = TypeVar("_T") + +def __dataclass_transform__( + *, + frozen_default: bool = False, + field_descriptors: tuple[Union[type, Callable[..., Any]], ...] = ..., +) -> Callable[[_T], _T]: ... +@__dataclass_transform__(field_descriptors=(attrib, field)) +def define(cls: type[_T]) -> type[_T]: ... +@__dataclass_transform__( + frozen_default=True, + field_descriptors=(attrib, field), +) +def frozen(cls: type[_T]) -> type[_T]: ... diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/_core.py b/evalkit_cambrian/lib/python3.10/site-packages/referencing/_core.py new file mode 100644 index 0000000000000000000000000000000000000000..ec2d51bdc4c47e270502bdb22fe006135cd9c501 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/referencing/_core.py @@ -0,0 +1,739 @@ +from __future__ import annotations + +from collections.abc import Iterable, Iterator, Sequence +from enum import Enum +from typing import Any, Callable, ClassVar, Generic, Protocol +from urllib.parse import unquote, urldefrag, urljoin + +from attrs import evolve, field +from rpds import HashTrieMap, HashTrieSet, List + +try: + from typing_extensions import TypeVar +except ImportError: # pragma: no cover + from typing import TypeVar + +from referencing import exceptions +from referencing._attrs import frozen +from referencing.typing import URI, Anchor as AnchorType, D, Mapping, Retrieve + +EMPTY_UNCRAWLED: HashTrieSet[URI] = HashTrieSet() +EMPTY_PREVIOUS_RESOLVERS: List[URI] = List() + + +class _Unset(Enum): + """ + What sillyness... + """ + + SENTINEL = 1 + + +_UNSET = _Unset.SENTINEL + + +class _MaybeInSubresource(Protocol[D]): + def __call__( + self, + segments: Sequence[int | str], + resolver: Resolver[D], + subresource: Resource[D], + ) -> Resolver[D]: ... + + +def _detect_or_error(contents: D) -> Specification[D]: + if not isinstance(contents, Mapping): + raise exceptions.CannotDetermineSpecification(contents) + + jsonschema_dialect_id = contents.get("$schema") # type: ignore[reportUnknownMemberType] + if not isinstance(jsonschema_dialect_id, str): + raise exceptions.CannotDetermineSpecification(contents) + + from referencing.jsonschema import specification_with + + return specification_with(jsonschema_dialect_id) + + +def _detect_or_default( + default: Specification[D], +) -> Callable[[D], Specification[D]]: + def _detect(contents: D) -> Specification[D]: + if not isinstance(contents, Mapping): + return default + + jsonschema_dialect_id = contents.get("$schema") # type: ignore[reportUnknownMemberType] + if jsonschema_dialect_id is None: + return default + + from referencing.jsonschema import specification_with + + return specification_with( + jsonschema_dialect_id, # type: ignore[reportUnknownArgumentType] + default=default, + ) + + return _detect + + +class _SpecificationDetector: + def __get__( + self, + instance: Specification[D] | None, + cls: type[Specification[D]], + ) -> Callable[[D], Specification[D]]: + if instance is None: + return _detect_or_error + else: + return _detect_or_default(instance) + + +@frozen +class Specification(Generic[D]): + """ + A specification which defines referencing behavior. + + The various methods of a `Specification` allow for varying referencing + behavior across JSON Schema specification versions, etc. + """ + + #: A short human-readable name for the specification, used for debugging. + name: str + + #: Find the ID of a given document. + id_of: Callable[[D], URI | None] + + #: Retrieve the subresources of the given document (without traversing into + #: the subresources themselves). + subresources_of: Callable[[D], Iterable[D]] + + #: While resolving a JSON pointer, conditionally enter a subresource + #: (if e.g. we have just entered a keyword whose value is a subresource) + maybe_in_subresource: _MaybeInSubresource[D] + + #: Retrieve the anchors contained in the given document. + _anchors_in: Callable[ + [Specification[D], D], + Iterable[AnchorType[D]], + ] = field(alias="anchors_in") + + #: An opaque specification where resources have no subresources + #: nor internal identifiers. + OPAQUE: ClassVar[Specification[Any]] + + #: Attempt to discern which specification applies to the given contents. + #: + #: May be called either as an instance method or as a class method, with + #: slightly different behavior in the following case: + #: + #: Recall that not all contents contains enough internal information about + #: which specification it is written for -- the JSON Schema ``{}``, + #: for instance, is valid under many different dialects and may be + #: interpreted as any one of them. + #: + #: When this method is used as an instance method (i.e. called on a + #: specific specification), that specification is used as the default + #: if the given contents are unidentifiable. + #: + #: On the other hand when called as a class method, an error is raised. + #: + #: To reiterate, ``DRAFT202012.detect({})`` will return ``DRAFT202012`` + #: whereas the class method ``Specification.detect({})`` will raise an + #: error. + #: + #: (Note that of course ``DRAFT202012.detect(...)`` may return some other + #: specification when given a schema which *does* identify as being for + #: another version). + #: + #: Raises: + #: + #: `CannotDetermineSpecification` + #: + #: if the given contents don't have any discernible + #: information which could be used to guess which + #: specification they identify as + detect = _SpecificationDetector() + + def __repr__(self) -> str: + return f"" + + def anchors_in(self, contents: D): + """ + Retrieve the anchors contained in the given document. + """ + return self._anchors_in(self, contents) + + def create_resource(self, contents: D) -> Resource[D]: + """ + Create a resource which is interpreted using this specification. + """ + return Resource(contents=contents, specification=self) + + +Specification.OPAQUE = Specification( + name="opaque", + id_of=lambda contents: None, + subresources_of=lambda contents: [], + anchors_in=lambda specification, contents: [], + maybe_in_subresource=lambda segments, resolver, subresource: resolver, +) + + +@frozen +class Resource(Generic[D]): + r""" + A document (deserialized JSON) with a concrete interpretation under a spec. + + In other words, a Python object, along with an instance of `Specification` + which describes how the document interacts with referencing -- both + internally (how it refers to other `Resource`\ s) and externally (how it + should be identified such that it is referenceable by other documents). + """ + + contents: D + _specification: Specification[D] = field(alias="specification") + + @classmethod + def from_contents( + cls, + contents: D, + default_specification: ( + type[Specification[D]] | Specification[D] + ) = Specification, + ) -> Resource[D]: + """ + Create a resource guessing which specification applies to the contents. + + Raises: + + `CannotDetermineSpecification` + + if the given contents don't have any discernible + information which could be used to guess which + specification they identify as + + """ + specification = default_specification.detect(contents) + return specification.create_resource(contents=contents) + + @classmethod + def opaque(cls, contents: D) -> Resource[D]: + """ + Create an opaque `Resource` -- i.e. one with opaque specification. + + See `Specification.OPAQUE` for details. + """ + return Specification.OPAQUE.create_resource(contents=contents) + + def id(self) -> URI | None: + """ + Retrieve this resource's (specification-specific) identifier. + """ + id = self._specification.id_of(self.contents) + if id is None: + return + return id.rstrip("#") + + def subresources(self) -> Iterable[Resource[D]]: + """ + Retrieve this resource's subresources. + """ + return ( + Resource.from_contents( + each, + default_specification=self._specification, + ) + for each in self._specification.subresources_of(self.contents) + ) + + def anchors(self) -> Iterable[AnchorType[D]]: + """ + Retrieve this resource's (specification-specific) identifier. + """ + return self._specification.anchors_in(self.contents) + + def pointer(self, pointer: str, resolver: Resolver[D]) -> Resolved[D]: + """ + Resolve the given JSON pointer. + + Raises: + + `exceptions.PointerToNowhere` + + if the pointer points to a location not present in the document + + """ + if not pointer: + return Resolved(contents=self.contents, resolver=resolver) + + contents = self.contents + segments: list[int | str] = [] + for segment in unquote(pointer[1:]).split("/"): + if isinstance(contents, Sequence): + segment = int(segment) + else: + segment = segment.replace("~1", "/").replace("~0", "~") + try: + contents = contents[segment] # type: ignore[reportUnknownArgumentType] + except LookupError as lookup_error: + error = exceptions.PointerToNowhere(ref=pointer, resource=self) + raise error from lookup_error + + segments.append(segment) + last = resolver + resolver = self._specification.maybe_in_subresource( + segments=segments, + resolver=resolver, + subresource=self._specification.create_resource(contents), + ) + if resolver is not last: + segments = [] + return Resolved(contents=contents, resolver=resolver) # type: ignore[reportUnknownArgumentType] + + +def _fail_to_retrieve(uri: URI): + raise exceptions.NoSuchResource(ref=uri) + + +@frozen +class Registry(Mapping[URI, Resource[D]]): + r""" + A registry of `Resource`\ s, each identified by their canonical URIs. + + Registries store a collection of in-memory resources, and optionally + enable additional resources which may be stored elsewhere (e.g. in a + database, a separate set of files, over the network, etc.). + + They also lazily walk their known resources, looking for subresources + within them. In other words, subresources contained within any added + resources will be retrievable via their own IDs (though this discovery of + subresources will be delayed until necessary). + + Registries are immutable, and their methods return new instances of the + registry with the additional resources added to them. + + The ``retrieve`` argument can be used to configure retrieval of resources + dynamically, either over the network, from a database, or the like. + Pass it a callable which will be called if any URI not present in the + registry is accessed. It must either return a `Resource` or else raise a + `NoSuchResource` exception indicating that the resource does not exist + even according to the retrieval logic. + """ + + _resources: HashTrieMap[URI, Resource[D]] = field( + default=HashTrieMap(), + converter=HashTrieMap.convert, # type: ignore[reportGeneralTypeIssues] + alias="resources", + ) + _anchors: HashTrieMap[tuple[URI, str], AnchorType[D]] = HashTrieMap() + _uncrawled: HashTrieSet[URI] = EMPTY_UNCRAWLED + _retrieve: Retrieve[D] = field(default=_fail_to_retrieve, alias="retrieve") + + def __getitem__(self, uri: URI) -> Resource[D]: + """ + Return the (already crawled) `Resource` identified by the given URI. + """ + try: + return self._resources[uri.rstrip("#")] + except KeyError: + raise exceptions.NoSuchResource(ref=uri) from None + + def __iter__(self) -> Iterator[URI]: + """ + Iterate over all crawled URIs in the registry. + """ + return iter(self._resources) + + def __len__(self) -> int: + """ + Count the total number of fully crawled resources in this registry. + """ + return len(self._resources) + + def __rmatmul__( + self, + new: Resource[D] | Iterable[Resource[D]], + ) -> Registry[D]: + """ + Create a new registry with resource(s) added using their internal IDs. + + Resources must have a internal IDs (e.g. the :kw:`$id` keyword in + modern JSON Schema versions), otherwise an error will be raised. + + Both a single resource as well as an iterable of resources works, i.e.: + + * ``resource @ registry`` or + + * ``[iterable, of, multiple, resources] @ registry`` + + which -- again, assuming the resources have internal IDs -- is + equivalent to calling `Registry.with_resources` as such: + + .. code:: python + + registry.with_resources( + (resource.id(), resource) for resource in new_resources + ) + + Raises: + + `NoInternalID` + + if the resource(s) in fact do not have IDs + + """ + if isinstance(new, Resource): + new = (new,) + + resources = self._resources + uncrawled = self._uncrawled + for resource in new: + id = resource.id() + if id is None: + raise exceptions.NoInternalID(resource=resource) + uncrawled = uncrawled.insert(id) + resources = resources.insert(id, resource) + return evolve(self, resources=resources, uncrawled=uncrawled) + + def __repr__(self) -> str: + size = len(self) + pluralized = "resource" if size == 1 else "resources" + if self._uncrawled: + uncrawled = len(self._uncrawled) + if uncrawled == size: + summary = f"uncrawled {pluralized}" + else: + summary = f"{pluralized}, {uncrawled} uncrawled" + else: + summary = f"{pluralized}" + return f"" + + def get_or_retrieve(self, uri: URI) -> Retrieved[D, Resource[D]]: + """ + Get a resource from the registry, crawling or retrieving if necessary. + + May involve crawling to find the given URI if it is not already known, + so the returned object is a `Retrieved` object which contains both the + resource value as well as the registry which ultimately contained it. + """ + resource = self._resources.get(uri) + if resource is not None: + return Retrieved(registry=self, value=resource) + + registry = self.crawl() + resource = registry._resources.get(uri) + if resource is not None: + return Retrieved(registry=registry, value=resource) + + try: + resource = registry._retrieve(uri) + except ( + exceptions.CannotDetermineSpecification, + exceptions.NoSuchResource, + ): + raise + except Exception as error: + raise exceptions.Unretrievable(ref=uri) from error + else: + registry = registry.with_resource(uri, resource) + return Retrieved(registry=registry, value=resource) + + def remove(self, uri: URI): + """ + Return a registry with the resource identified by a given URI removed. + """ + if uri not in self._resources: + raise exceptions.NoSuchResource(ref=uri) + + return evolve( + self, + resources=self._resources.remove(uri), + uncrawled=self._uncrawled.discard(uri), + anchors=HashTrieMap( + (k, v) for k, v in self._anchors.items() if k[0] != uri + ), + ) + + def anchor(self, uri: URI, name: str): + """ + Retrieve a given anchor from a resource which must already be crawled. + """ + value = self._anchors.get((uri, name)) + if value is not None: + return Retrieved(value=value, registry=self) + + registry = self.crawl() + value = registry._anchors.get((uri, name)) + if value is not None: + return Retrieved(value=value, registry=registry) + + resource = self[uri] + canonical_uri = resource.id() + if canonical_uri is not None: + value = registry._anchors.get((canonical_uri, name)) + if value is not None: + return Retrieved(value=value, registry=registry) + + if "/" in name: + raise exceptions.InvalidAnchor( + ref=uri, + resource=resource, + anchor=name, + ) + raise exceptions.NoSuchAnchor(ref=uri, resource=resource, anchor=name) + + def contents(self, uri: URI) -> D: + """ + Retrieve the (already crawled) contents identified by the given URI. + """ + return self[uri].contents + + def crawl(self) -> Registry[D]: + """ + Crawl all added resources, discovering subresources. + """ + resources = self._resources + anchors = self._anchors + uncrawled = [(uri, resources[uri]) for uri in self._uncrawled] + while uncrawled: + uri, resource = uncrawled.pop() + + id = resource.id() + if id is not None: + uri = urljoin(uri, id) + resources = resources.insert(uri, resource) + for each in resource.anchors(): + anchors = anchors.insert((uri, each.name), each) + uncrawled.extend((uri, each) for each in resource.subresources()) + return evolve( + self, + resources=resources, + anchors=anchors, + uncrawled=EMPTY_UNCRAWLED, + ) + + def with_resource(self, uri: URI, resource: Resource[D]): + """ + Add the given `Resource` to the registry, without crawling it. + """ + return self.with_resources([(uri, resource)]) + + def with_resources( + self, + pairs: Iterable[tuple[URI, Resource[D]]], + ) -> Registry[D]: + r""" + Add the given `Resource`\ s to the registry, without crawling them. + """ + resources = self._resources + uncrawled = self._uncrawled + for uri, resource in pairs: + # Empty fragment URIs are equivalent to URIs without the fragment. + # TODO: Is this true for non JSON Schema resources? Probably not. + uri = uri.rstrip("#") + uncrawled = uncrawled.insert(uri) + resources = resources.insert(uri, resource) + return evolve(self, resources=resources, uncrawled=uncrawled) + + def with_contents( + self, + pairs: Iterable[tuple[URI, D]], + **kwargs: Any, + ) -> Registry[D]: + r""" + Add the given contents to the registry, autodetecting when necessary. + """ + return self.with_resources( + (uri, Resource.from_contents(each, **kwargs)) + for uri, each in pairs + ) + + def combine(self, *registries: Registry[D]) -> Registry[D]: + """ + Combine together one or more other registries, producing a unified one. + """ + if registries == (self,): + return self + resources = self._resources + anchors = self._anchors + uncrawled = self._uncrawled + retrieve = self._retrieve + for registry in registries: + resources = resources.update(registry._resources) + anchors = anchors.update(registry._anchors) + uncrawled = uncrawled.update(registry._uncrawled) + + if registry._retrieve is not _fail_to_retrieve: # type: ignore[reportUnnecessaryComparison] ??? + if registry._retrieve is not retrieve is not _fail_to_retrieve: # type: ignore[reportUnnecessaryComparison] ??? + raise ValueError( # noqa: TRY003 + "Cannot combine registries with conflicting retrieval " + "functions.", + ) + retrieve = registry._retrieve + return evolve( + self, + anchors=anchors, + resources=resources, + uncrawled=uncrawled, + retrieve=retrieve, + ) + + def resolver(self, base_uri: URI = "") -> Resolver[D]: + """ + Return a `Resolver` which resolves references against this registry. + """ + return Resolver(base_uri=base_uri, registry=self) + + def resolver_with_root(self, resource: Resource[D]) -> Resolver[D]: + """ + Return a `Resolver` with a specific root resource. + """ + uri = resource.id() or "" + return Resolver( + base_uri=uri, + registry=self.with_resource(uri, resource), + ) + + +#: An anchor or resource. +AnchorOrResource = TypeVar( + "AnchorOrResource", + AnchorType[Any], + Resource[Any], + default=Resource[Any], +) + + +@frozen +class Retrieved(Generic[D, AnchorOrResource]): + """ + A value retrieved from a `Registry`. + """ + + value: AnchorOrResource + registry: Registry[D] + + +@frozen +class Resolved(Generic[D]): + """ + A reference resolved to its contents by a `Resolver`. + """ + + contents: D + resolver: Resolver[D] + + +@frozen +class Resolver(Generic[D]): + """ + A reference resolver. + + Resolvers help resolve references (including relative ones) by + pairing a fixed base URI with a `Registry`. + + This object, under normal circumstances, is expected to be used by + *implementers of libraries* built on top of `referencing` (e.g. JSON Schema + implementations or other libraries resolving JSON references), + not directly by end-users populating registries or while writing + schemas or other resources. + + References are resolved against the base URI, and the combined URI + is then looked up within the registry. + + The process of resolving a reference may itself involve calculating + a *new* base URI for future reference resolution (e.g. if an + intermediate resource sets a new base URI), or may involve encountering + additional subresources and adding them to a new registry. + """ + + _base_uri: URI = field(alias="base_uri") + _registry: Registry[D] = field(alias="registry") + _previous: List[URI] = field(default=List(), repr=False, alias="previous") + + def lookup(self, ref: URI) -> Resolved[D]: + """ + Resolve the given reference to the resource it points to. + + Raises: + + `exceptions.Unresolvable` + + or a subclass thereof (see below) if the reference isn't + resolvable + + `exceptions.NoSuchAnchor` + + if the reference is to a URI where a resource exists but + contains a plain name fragment which does not exist within + the resource + + `exceptions.PointerToNowhere` + + if the reference is to a URI where a resource exists but + contains a JSON pointer to a location within the resource + that does not exist + + """ + if ref.startswith("#"): + uri, fragment = self._base_uri, ref[1:] + else: + uri, fragment = urldefrag(urljoin(self._base_uri, ref)) + try: + retrieved = self._registry.get_or_retrieve(uri) + except exceptions.NoSuchResource: + raise exceptions.Unresolvable(ref=ref) from None + except exceptions.Unretrievable as error: + raise exceptions.Unresolvable(ref=ref) from error + + if fragment.startswith("/"): + resolver = self._evolve(registry=retrieved.registry, base_uri=uri) + return retrieved.value.pointer(pointer=fragment, resolver=resolver) + + if fragment: + retrieved = retrieved.registry.anchor(uri, fragment) + resolver = self._evolve(registry=retrieved.registry, base_uri=uri) + return retrieved.value.resolve(resolver=resolver) + + resolver = self._evolve(registry=retrieved.registry, base_uri=uri) + return Resolved(contents=retrieved.value.contents, resolver=resolver) + + def in_subresource(self, subresource: Resource[D]) -> Resolver[D]: + """ + Create a resolver for a subresource (which may have a new base URI). + """ + id = subresource.id() + if id is None: + return self + return evolve(self, base_uri=urljoin(self._base_uri, id)) + + def dynamic_scope(self) -> Iterable[tuple[URI, Registry[D]]]: + """ + In specs with such a notion, return the URIs in the dynamic scope. + """ + for uri in self._previous: + yield uri, self._registry + + def _evolve(self, base_uri: URI, **kwargs: Any): + """ + Evolve, appending to the dynamic scope. + """ + previous = self._previous + if self._base_uri and (not previous or base_uri != self._base_uri): + previous = previous.push_front(self._base_uri) + return evolve(self, base_uri=base_uri, previous=previous, **kwargs) + + +@frozen +class Anchor(Generic[D]): + """ + A simple anchor in a `Resource`. + """ + + name: str + resource: Resource[D] + + def resolve(self, resolver: Resolver[D]): + """ + Return the resource for this anchor. + """ + return Resolved(contents=self.resource.contents, resolver=resolver) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/exceptions.py b/evalkit_cambrian/lib/python3.10/site-packages/referencing/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..3267fc70732e73c0a888d9f60551ad9373ed6d16 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/referencing/exceptions.py @@ -0,0 +1,165 @@ +""" +Errors, oh no! +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +import attrs + +from referencing._attrs import frozen + +if TYPE_CHECKING: + from referencing import Resource + from referencing.typing import URI + + +@frozen +class NoSuchResource(KeyError): + """ + The given URI is not present in a registry. + + Unlike most exceptions, this class *is* intended to be publicly + instantiable and *is* part of the public API of the package. + """ + + ref: URI + + def __eq__(self, other: object) -> bool: + if self.__class__ is not other.__class__: + return NotImplemented + return attrs.astuple(self) == attrs.astuple(other) + + def __hash__(self) -> int: + return hash(attrs.astuple(self)) + + +@frozen +class NoInternalID(Exception): + """ + A resource has no internal ID, but one is needed. + + E.g. in modern JSON Schema drafts, this is the :kw:`$id` keyword. + + One might be needed if a resource was to-be added to a registry but no + other URI is available, and the resource doesn't declare its canonical URI. + """ + + resource: Resource[Any] + + def __eq__(self, other: object) -> bool: + if self.__class__ is not other.__class__: + return NotImplemented + return attrs.astuple(self) == attrs.astuple(other) + + def __hash__(self) -> int: + return hash(attrs.astuple(self)) + + +@frozen +class Unretrievable(KeyError): + """ + The given URI is not present in a registry, and retrieving it failed. + """ + + ref: URI + + def __eq__(self, other: object) -> bool: + if self.__class__ is not other.__class__: + return NotImplemented + return attrs.astuple(self) == attrs.astuple(other) + + def __hash__(self) -> int: + return hash(attrs.astuple(self)) + + +@frozen +class CannotDetermineSpecification(Exception): + """ + Attempting to detect the appropriate `Specification` failed. + + This happens if no discernible information is found in the contents of the + new resource which would help identify it. + """ + + contents: Any + + def __eq__(self, other: object) -> bool: + if self.__class__ is not other.__class__: + return NotImplemented + return attrs.astuple(self) == attrs.astuple(other) + + def __hash__(self) -> int: + return hash(attrs.astuple(self)) + + +@attrs.frozen # Because here we allow subclassing below. +class Unresolvable(Exception): + """ + A reference was unresolvable. + """ + + ref: URI + + def __eq__(self, other: object) -> bool: + if self.__class__ is not other.__class__: + return NotImplemented + return attrs.astuple(self) == attrs.astuple(other) + + def __hash__(self) -> int: + return hash(attrs.astuple(self)) + + +@frozen +class PointerToNowhere(Unresolvable): + """ + A JSON Pointer leads to a part of a document that does not exist. + """ + + resource: Resource[Any] + + def __str__(self) -> str: + msg = f"{self.ref!r} does not exist within {self.resource.contents!r}" + if self.ref == "/": + msg += ( + ". The pointer '/' is a valid JSON Pointer but it points to " + "an empty string property ''. If you intended to point " + "to the entire resource, you should use '#'." + ) + return msg + + +@frozen +class NoSuchAnchor(Unresolvable): + """ + An anchor does not exist within a particular resource. + """ + + resource: Resource[Any] + anchor: str + + def __str__(self) -> str: + return ( + f"{self.anchor!r} does not exist within {self.resource.contents!r}" + ) + + +@frozen +class InvalidAnchor(Unresolvable): + """ + An anchor which could never exist in a resource was dereferenced. + + It is somehow syntactically invalid. + """ + + resource: Resource[Any] + anchor: str + + def __str__(self) -> str: + return ( + f"'#{self.anchor}' is not a valid anchor, neither as a " + "plain name anchor nor as a JSON Pointer. You may have intended " + f"to use '#/{self.anchor}', as the slash is required *before each " + "segment* of a JSON pointer." + ) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/jsonschema.py b/evalkit_cambrian/lib/python3.10/site-packages/referencing/jsonschema.py new file mode 100644 index 0000000000000000000000000000000000000000..169e109d914e558ec3693cef5ecdcd4dc82aedaa --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/referencing/jsonschema.py @@ -0,0 +1,642 @@ +""" +Referencing implementations for JSON Schema specs (historic & current). +""" + +from __future__ import annotations + +from collections.abc import Iterable, Sequence, Set +from typing import Any, Union + +from referencing import Anchor, Registry, Resource, Specification, exceptions +from referencing._attrs import frozen +from referencing._core import ( + _UNSET, # type: ignore[reportPrivateUsage] + Resolved as _Resolved, + Resolver as _Resolver, + _Unset, # type: ignore[reportPrivateUsage] +) +from referencing.typing import URI, Anchor as AnchorType, Mapping + +#: A JSON Schema which is a JSON object +ObjectSchema = Mapping[str, Any] + +#: A JSON Schema of any kind +Schema = Union[bool, ObjectSchema] + +#: A Resource whose contents are JSON Schemas +SchemaResource = Resource[Schema] + +#: A JSON Schema Registry +SchemaRegistry = Registry[Schema] + +#: The empty JSON Schema Registry +EMPTY_REGISTRY: SchemaRegistry = Registry() + + +@frozen +class UnknownDialect(Exception): + """ + A dialect identifier was found for a dialect unknown by this library. + + If it's a custom ("unofficial") dialect, be sure you've registered it. + """ + + uri: URI + + +def _dollar_id(contents: Schema) -> URI | None: + if isinstance(contents, bool): + return + return contents.get("$id") + + +def _legacy_dollar_id(contents: Schema) -> URI | None: + if isinstance(contents, bool) or "$ref" in contents: + return + id = contents.get("$id") + if id is not None and not id.startswith("#"): + return id + + +def _legacy_id(contents: ObjectSchema) -> URI | None: + if "$ref" in contents: + return + id = contents.get("id") + if id is not None and not id.startswith("#"): + return id + + +def _anchor( + specification: Specification[Schema], + contents: Schema, +) -> Iterable[AnchorType[Schema]]: + if isinstance(contents, bool): + return + anchor = contents.get("$anchor") + if anchor is not None: + yield Anchor( + name=anchor, + resource=specification.create_resource(contents), + ) + + dynamic_anchor = contents.get("$dynamicAnchor") + if dynamic_anchor is not None: + yield DynamicAnchor( + name=dynamic_anchor, + resource=specification.create_resource(contents), + ) + + +def _anchor_2019( + specification: Specification[Schema], + contents: Schema, +) -> Iterable[Anchor[Schema]]: + if isinstance(contents, bool): + return [] + anchor = contents.get("$anchor") + if anchor is None: + return [] + return [ + Anchor( + name=anchor, + resource=specification.create_resource(contents), + ), + ] + + +def _legacy_anchor_in_dollar_id( + specification: Specification[Schema], + contents: Schema, +) -> Iterable[Anchor[Schema]]: + if isinstance(contents, bool): + return [] + id = contents.get("$id", "") + if not id.startswith("#"): + return [] + return [ + Anchor( + name=id[1:], + resource=specification.create_resource(contents), + ), + ] + + +def _legacy_anchor_in_id( + specification: Specification[ObjectSchema], + contents: ObjectSchema, +) -> Iterable[Anchor[ObjectSchema]]: + id = contents.get("id", "") + if not id.startswith("#"): + return [] + return [ + Anchor( + name=id[1:], + resource=specification.create_resource(contents), + ), + ] + + +def _subresources_of( + in_value: Set[str] = frozenset(), + in_subvalues: Set[str] = frozenset(), + in_subarray: Set[str] = frozenset(), +): + """ + Create a callable returning JSON Schema specification-style subschemas. + + Relies on specifying the set of keywords containing subschemas in their + values, in a subobject's values, or in a subarray. + """ + + def subresources_of(contents: Schema) -> Iterable[ObjectSchema]: + if isinstance(contents, bool): + return + for each in in_value: + if each in contents: + yield contents[each] + for each in in_subarray: + if each in contents: + yield from contents[each] + for each in in_subvalues: + if each in contents: + yield from contents[each].values() + + return subresources_of + + +def _subresources_of_with_crazy_items( + in_value: Set[str] = frozenset(), + in_subvalues: Set[str] = frozenset(), + in_subarray: Set[str] = frozenset(), +): + """ + Specifically handle older drafts where there are some funky keywords. + """ + + def subresources_of(contents: Schema) -> Iterable[ObjectSchema]: + if isinstance(contents, bool): + return + for each in in_value: + if each in contents: + yield contents[each] + for each in in_subarray: + if each in contents: + yield from contents[each] + for each in in_subvalues: + if each in contents: + yield from contents[each].values() + + items = contents.get("items") + if items is not None: + if isinstance(items, Sequence): + yield from items + else: + yield items + + return subresources_of + + +def _subresources_of_with_crazy_items_dependencies( + in_value: Set[str] = frozenset(), + in_subvalues: Set[str] = frozenset(), + in_subarray: Set[str] = frozenset(), +): + """ + Specifically handle older drafts where there are some funky keywords. + """ + + def subresources_of(contents: Schema) -> Iterable[ObjectSchema]: + if isinstance(contents, bool): + return + for each in in_value: + if each in contents: + yield contents[each] + for each in in_subarray: + if each in contents: + yield from contents[each] + for each in in_subvalues: + if each in contents: + yield from contents[each].values() + + items = contents.get("items") + if items is not None: + if isinstance(items, Sequence): + yield from items + else: + yield items + dependencies = contents.get("dependencies") + if dependencies is not None: + values = iter(dependencies.values()) + value = next(values, None) + if isinstance(value, Mapping): + yield value + yield from values + + return subresources_of + + +def _subresources_of_with_crazy_aP_items_dependencies( + in_value: Set[str] = frozenset(), + in_subvalues: Set[str] = frozenset(), + in_subarray: Set[str] = frozenset(), +): + """ + Specifically handle even older drafts where there are some funky keywords. + """ + + def subresources_of(contents: ObjectSchema) -> Iterable[ObjectSchema]: + for each in in_value: + if each in contents: + yield contents[each] + for each in in_subarray: + if each in contents: + yield from contents[each] + for each in in_subvalues: + if each in contents: + yield from contents[each].values() + + items = contents.get("items") + if items is not None: + if isinstance(items, Sequence): + yield from items + else: + yield items + dependencies = contents.get("dependencies") + if dependencies is not None: + values = iter(dependencies.values()) + value = next(values, None) + if isinstance(value, Mapping): + yield value + yield from values + + for each in "additionalItems", "additionalProperties": + value = contents.get(each) + if isinstance(value, Mapping): + yield value + + return subresources_of + + +def _maybe_in_subresource( + in_value: Set[str] = frozenset(), + in_subvalues: Set[str] = frozenset(), + in_subarray: Set[str] = frozenset(), +): + in_child = in_subvalues | in_subarray + + def maybe_in_subresource( + segments: Sequence[int | str], + resolver: _Resolver[Any], + subresource: Resource[Any], + ) -> _Resolver[Any]: + _segments = iter(segments) + for segment in _segments: + if segment not in in_value and ( + segment not in in_child or next(_segments, None) is None + ): + return resolver + return resolver.in_subresource(subresource) + + return maybe_in_subresource + + +def _maybe_in_subresource_crazy_items( + in_value: Set[str] = frozenset(), + in_subvalues: Set[str] = frozenset(), + in_subarray: Set[str] = frozenset(), +): + in_child = in_subvalues | in_subarray + + def maybe_in_subresource( + segments: Sequence[int | str], + resolver: _Resolver[Any], + subresource: Resource[Any], + ) -> _Resolver[Any]: + _segments = iter(segments) + for segment in _segments: + if segment == "items" and isinstance( + subresource.contents, + Mapping, + ): + return resolver.in_subresource(subresource) + if segment not in in_value and ( + segment not in in_child or next(_segments, None) is None + ): + return resolver + return resolver.in_subresource(subresource) + + return maybe_in_subresource + + +def _maybe_in_subresource_crazy_items_dependencies( + in_value: Set[str] = frozenset(), + in_subvalues: Set[str] = frozenset(), + in_subarray: Set[str] = frozenset(), +): + in_child = in_subvalues | in_subarray + + def maybe_in_subresource( + segments: Sequence[int | str], + resolver: _Resolver[Any], + subresource: Resource[Any], + ) -> _Resolver[Any]: + _segments = iter(segments) + for segment in _segments: + if segment in {"items", "dependencies"} and isinstance( + subresource.contents, + Mapping, + ): + return resolver.in_subresource(subresource) + if segment not in in_value and ( + segment not in in_child or next(_segments, None) is None + ): + return resolver + return resolver.in_subresource(subresource) + + return maybe_in_subresource + + +#: JSON Schema draft 2020-12 +DRAFT202012 = Specification( + name="draft2020-12", + id_of=_dollar_id, + subresources_of=_subresources_of( + in_value={ + "additionalProperties", + "contains", + "contentSchema", + "else", + "if", + "items", + "not", + "propertyNames", + "then", + "unevaluatedItems", + "unevaluatedProperties", + }, + in_subarray={"allOf", "anyOf", "oneOf", "prefixItems"}, + in_subvalues={ + "$defs", + "definitions", + "dependentSchemas", + "patternProperties", + "properties", + }, + ), + anchors_in=_anchor, + maybe_in_subresource=_maybe_in_subresource( + in_value={ + "additionalProperties", + "contains", + "contentSchema", + "else", + "if", + "items", + "not", + "propertyNames", + "then", + "unevaluatedItems", + "unevaluatedProperties", + }, + in_subarray={"allOf", "anyOf", "oneOf", "prefixItems"}, + in_subvalues={ + "$defs", + "definitions", + "dependentSchemas", + "patternProperties", + "properties", + }, + ), +) +#: JSON Schema draft 2019-09 +DRAFT201909 = Specification( + name="draft2019-09", + id_of=_dollar_id, + subresources_of=_subresources_of_with_crazy_items( + in_value={ + "additionalItems", + "additionalProperties", + "contains", + "contentSchema", + "else", + "if", + "not", + "propertyNames", + "then", + "unevaluatedItems", + "unevaluatedProperties", + }, + in_subarray={"allOf", "anyOf", "oneOf"}, + in_subvalues={ + "$defs", + "definitions", + "dependentSchemas", + "patternProperties", + "properties", + }, + ), + anchors_in=_anchor_2019, + maybe_in_subresource=_maybe_in_subresource_crazy_items( + in_value={ + "additionalItems", + "additionalProperties", + "contains", + "contentSchema", + "else", + "if", + "not", + "propertyNames", + "then", + "unevaluatedItems", + "unevaluatedProperties", + }, + in_subarray={"allOf", "anyOf", "oneOf"}, + in_subvalues={ + "$defs", + "definitions", + "dependentSchemas", + "patternProperties", + "properties", + }, + ), +) +#: JSON Schema draft 7 +DRAFT7 = Specification( + name="draft-07", + id_of=_legacy_dollar_id, + subresources_of=_subresources_of_with_crazy_items_dependencies( + in_value={ + "additionalItems", + "additionalProperties", + "contains", + "else", + "if", + "not", + "propertyNames", + "then", + }, + in_subarray={"allOf", "anyOf", "oneOf"}, + in_subvalues={"definitions", "patternProperties", "properties"}, + ), + anchors_in=_legacy_anchor_in_dollar_id, + maybe_in_subresource=_maybe_in_subresource_crazy_items_dependencies( + in_value={ + "additionalItems", + "additionalProperties", + "contains", + "else", + "if", + "not", + "propertyNames", + "then", + }, + in_subarray={"allOf", "anyOf", "oneOf"}, + in_subvalues={"definitions", "patternProperties", "properties"}, + ), +) +#: JSON Schema draft 6 +DRAFT6 = Specification( + name="draft-06", + id_of=_legacy_dollar_id, + subresources_of=_subresources_of_with_crazy_items_dependencies( + in_value={ + "additionalItems", + "additionalProperties", + "contains", + "not", + "propertyNames", + }, + in_subarray={"allOf", "anyOf", "oneOf"}, + in_subvalues={"definitions", "patternProperties", "properties"}, + ), + anchors_in=_legacy_anchor_in_dollar_id, + maybe_in_subresource=_maybe_in_subresource_crazy_items_dependencies( + in_value={ + "additionalItems", + "additionalProperties", + "contains", + "not", + "propertyNames", + }, + in_subarray={"allOf", "anyOf", "oneOf"}, + in_subvalues={"definitions", "patternProperties", "properties"}, + ), +) +#: JSON Schema draft 4 +DRAFT4 = Specification( + name="draft-04", + id_of=_legacy_id, + subresources_of=_subresources_of_with_crazy_aP_items_dependencies( + in_value={"not"}, + in_subarray={"allOf", "anyOf", "oneOf"}, + in_subvalues={"definitions", "patternProperties", "properties"}, + ), + anchors_in=_legacy_anchor_in_id, + maybe_in_subresource=_maybe_in_subresource_crazy_items_dependencies( + in_value={"additionalItems", "additionalProperties", "not"}, + in_subarray={"allOf", "anyOf", "oneOf"}, + in_subvalues={"definitions", "patternProperties", "properties"}, + ), +) +#: JSON Schema draft 3 +DRAFT3 = Specification( + name="draft-03", + id_of=_legacy_id, + subresources_of=_subresources_of_with_crazy_aP_items_dependencies( + in_subarray={"extends"}, + in_subvalues={"definitions", "patternProperties", "properties"}, + ), + anchors_in=_legacy_anchor_in_id, + maybe_in_subresource=_maybe_in_subresource_crazy_items_dependencies( + in_value={"additionalItems", "additionalProperties"}, + in_subarray={"extends"}, + in_subvalues={"definitions", "patternProperties", "properties"}, + ), +) + + +_SPECIFICATIONS: Registry[Specification[Schema]] = Registry( + { + dialect_id: Resource.opaque(specification) + for dialect_id, specification in [ + ("https://json-schema.org/draft/2020-12/schema", DRAFT202012), + ("https://json-schema.org/draft/2019-09/schema", DRAFT201909), + ("http://json-schema.org/draft-07/schema", DRAFT7), + ("http://json-schema.org/draft-06/schema", DRAFT6), + ("http://json-schema.org/draft-04/schema", DRAFT4), + ("http://json-schema.org/draft-03/schema", DRAFT3), + ] + }, +) + + +def specification_with( + dialect_id: URI, + default: Specification[Any] | _Unset = _UNSET, +) -> Specification[Any]: + """ + Retrieve the `Specification` with the given dialect identifier. + + Raises: + + `UnknownDialect` + + if the given ``dialect_id`` isn't known + + """ + resource = _SPECIFICATIONS.get(dialect_id.rstrip("#")) + if resource is not None: + return resource.contents + if default is _UNSET: + raise UnknownDialect(dialect_id) + return default + + +@frozen +class DynamicAnchor: + """ + Dynamic anchors, introduced in draft 2020. + """ + + name: str + resource: SchemaResource + + def resolve(self, resolver: _Resolver[Schema]) -> _Resolved[Schema]: + """ + Resolve this anchor dynamically. + """ + last = self.resource + for uri, registry in resolver.dynamic_scope(): + try: + anchor = registry.anchor(uri, self.name).value + except exceptions.NoSuchAnchor: + continue + if isinstance(anchor, DynamicAnchor): + last = anchor.resource + return _Resolved( + contents=last.contents, + resolver=resolver.in_subresource(last), + ) + + +def lookup_recursive_ref(resolver: _Resolver[Schema]) -> _Resolved[Schema]: + """ + Recursive references (via recursive anchors), present only in draft 2019. + + As per the 2019 specification (§ 8.2.4.2.1), only the ``#`` recursive + reference is supported (and is therefore assumed to be the relevant + reference). + """ + resolved = resolver.lookup("#") + if isinstance(resolved.contents, Mapping) and resolved.contents.get( + "$recursiveAnchor", + ): + for uri, _ in resolver.dynamic_scope(): + next_resolved = resolver.lookup(uri) + if not isinstance( + next_resolved.contents, + Mapping, + ) or not next_resolved.contents.get("$recursiveAnchor"): + break + resolved = next_resolved + return resolved diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/py.typed b/evalkit_cambrian/lib/python3.10/site-packages/referencing/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/retrieval.py b/evalkit_cambrian/lib/python3.10/site-packages/referencing/retrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..53e0512b199fb014d11075ee3047c848ed7c2d69 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/referencing/retrieval.py @@ -0,0 +1,92 @@ +""" +Helpers related to (dynamic) resource retrieval. +""" + +from __future__ import annotations + +from functools import lru_cache +from typing import TYPE_CHECKING, Callable +import json + +try: + from typing_extensions import TypeVar +except ImportError: # pragma: no cover + from typing import TypeVar + +from referencing import Resource + +if TYPE_CHECKING: + from referencing.typing import URI, D, Retrieve + +#: A serialized document (e.g. a JSON string) +_T = TypeVar("_T", default=str) + + +def to_cached_resource( + cache: Callable[[Retrieve[D]], Retrieve[D]] | None = None, + loads: Callable[[_T], D] = json.loads, + from_contents: Callable[[D], Resource[D]] = Resource.from_contents, +) -> Callable[[Callable[[URI], _T]], Retrieve[D]]: + """ + Create a retriever which caches its return values from a simpler callable. + + Takes a function which returns things like serialized JSON (strings) and + returns something suitable for passing to `Registry` as a retrieve + function. + + This decorator both reduces a small bit of boilerplate for a common case + (deserializing JSON from strings and creating `Resource` objects from the + result) as well as makes the probable need for caching a bit easier. + Retrievers which otherwise do expensive operations (like hitting the + network) might otherwise be called repeatedly. + + Examples + -------- + + .. testcode:: + + from referencing import Registry + from referencing.typing import URI + import referencing.retrieval + + + @referencing.retrieval.to_cached_resource() + def retrieve(uri: URI): + print(f"Retrieved {uri}") + + # Normally, go get some expensive JSON from the network, a file ... + return ''' + { + "$schema": "https://json-schema.org/draft/2020-12/schema", + "foo": "bar" + } + ''' + + one = Registry(retrieve=retrieve).get_or_retrieve("urn:example:foo") + print(one.value.contents["foo"]) + + # Retrieving the same URI again reuses the same value (and thus doesn't + # print another retrieval message here) + two = Registry(retrieve=retrieve).get_or_retrieve("urn:example:foo") + print(two.value.contents["foo"]) + + .. testoutput:: + + Retrieved urn:example:foo + bar + bar + + """ + if cache is None: + cache = lru_cache(maxsize=None) + + def decorator(retrieve: Callable[[URI], _T]): + @cache + def cached_retrieve(uri: URI): + response = retrieve(uri) + contents = loads(response) + return from_contents(contents) + + return cached_retrieve + + return decorator diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..169cadb279f3cef952b17adaa2284b1f512afac2 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_core.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f6a2b6831d52115e2937e113f4c5a4c3ac06d35 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_core.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_exceptions.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6542f7ab6c366e89710ddc1e26959bf42d95429 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_exceptions.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_jsonschema.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_jsonschema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bce47e2a0b4dcebd9e23e8bd24294fe2041cf71d Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_jsonschema.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_referencing_suite.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_referencing_suite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac062fd83b249aaa2e9463b223cc754e1ad04be0 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_referencing_suite.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_retrieval.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_retrieval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5518e4199c95113c379a95ddfad238d3eb586555 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/__pycache__/test_retrieval.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_core.py b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_core.py new file mode 100644 index 0000000000000000000000000000000000000000..3edddbc3d96581e1c74069baa873900495366bab --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_core.py @@ -0,0 +1,1057 @@ +from rpds import HashTrieMap +import pytest + +from referencing import Anchor, Registry, Resource, Specification, exceptions +from referencing.jsonschema import DRAFT202012 + +ID_AND_CHILDREN = Specification( + name="id-and-children", + id_of=lambda contents: contents.get("ID"), + subresources_of=lambda contents: contents.get("children", []), + anchors_in=lambda specification, contents: [ + Anchor( + name=name, + resource=specification.create_resource(contents=each), + ) + for name, each in contents.get("anchors", {}).items() + ], + maybe_in_subresource=lambda segments, resolver, subresource: ( + resolver.in_subresource(subresource) + if not len(segments) % 2 + and all(each == "children" for each in segments[::2]) + else resolver + ), +) + + +def blow_up(uri): # pragma: no cover + """ + A retriever suitable for use in tests which expect it never to be used. + """ + raise RuntimeError("This retrieve function expects to never be called!") + + +class TestRegistry: + def test_with_resource(self): + """ + Adding a resource to the registry then allows re-retrieving it. + """ + + resource = Resource.opaque(contents={"foo": "bar"}) + uri = "urn:example" + registry = Registry().with_resource(uri=uri, resource=resource) + assert registry[uri] is resource + + def test_with_resources(self): + """ + Adding multiple resources to the registry is like adding each one. + """ + + one = Resource.opaque(contents={}) + two = Resource(contents={"foo": "bar"}, specification=ID_AND_CHILDREN) + registry = Registry().with_resources( + [ + ("http://example.com/1", one), + ("http://example.com/foo/bar", two), + ], + ) + assert registry == Registry().with_resource( + uri="http://example.com/1", + resource=one, + ).with_resource( + uri="http://example.com/foo/bar", + resource=two, + ) + + def test_matmul_resource(self): + uri = "urn:example:resource" + resource = ID_AND_CHILDREN.create_resource({"ID": uri, "foo": 12}) + registry = resource @ Registry() + assert registry == Registry().with_resource(uri, resource) + + def test_matmul_many_resources(self): + one_uri = "urn:example:one" + one = ID_AND_CHILDREN.create_resource({"ID": one_uri, "foo": 12}) + + two_uri = "urn:example:two" + two = ID_AND_CHILDREN.create_resource({"ID": two_uri, "foo": 12}) + + registry = [one, two] @ Registry() + assert registry == Registry().with_resources( + [(one_uri, one), (two_uri, two)], + ) + + def test_matmul_resource_without_id(self): + resource = Resource.opaque(contents={"foo": "bar"}) + with pytest.raises(exceptions.NoInternalID) as e: + resource @ Registry() + assert e.value == exceptions.NoInternalID(resource=resource) + + def test_with_contents_from_json_schema(self): + uri = "urn:example" + schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"} + registry = Registry().with_contents([(uri, schema)]) + + expected = Resource(contents=schema, specification=DRAFT202012) + assert registry[uri] == expected + + def test_with_contents_and_default_specification(self): + uri = "urn:example" + registry = Registry().with_contents( + [(uri, {"foo": "bar"})], + default_specification=Specification.OPAQUE, + ) + assert registry[uri] == Resource.opaque({"foo": "bar"}) + + def test_len(self): + total = 5 + registry = Registry().with_contents( + [(str(i), {"foo": "bar"}) for i in range(total)], + default_specification=Specification.OPAQUE, + ) + assert len(registry) == total + + def test_bool_empty(self): + assert not Registry() + + def test_bool_not_empty(self): + registry = Registry().with_contents( + [(str(i), {"foo": "bar"}) for i in range(3)], + default_specification=Specification.OPAQUE, + ) + assert registry + + def test_iter(self): + registry = Registry().with_contents( + [(str(i), {"foo": "bar"}) for i in range(8)], + default_specification=Specification.OPAQUE, + ) + assert set(registry) == {str(i) for i in range(8)} + + def test_crawl_still_has_top_level_resource(self): + resource = Resource.opaque({"foo": "bar"}) + uri = "urn:example" + registry = Registry({uri: resource}).crawl() + assert registry[uri] is resource + + def test_crawl_finds_a_subresource(self): + child_id = "urn:child" + root = ID_AND_CHILDREN.create_resource( + {"ID": "urn:root", "children": [{"ID": child_id, "foo": 12}]}, + ) + registry = root @ Registry() + with pytest.raises(LookupError): + registry[child_id] + + expected = ID_AND_CHILDREN.create_resource({"ID": child_id, "foo": 12}) + assert registry.crawl()[child_id] == expected + + def test_crawl_finds_anchors_with_id(self): + resource = ID_AND_CHILDREN.create_resource( + {"ID": "urn:bar", "anchors": {"foo": 12}}, + ) + registry = resource @ Registry() + + assert registry.crawl().anchor(resource.id(), "foo").value == Anchor( + name="foo", + resource=ID_AND_CHILDREN.create_resource(12), + ) + + def test_crawl_finds_anchors_no_id(self): + resource = ID_AND_CHILDREN.create_resource({"anchors": {"foo": 12}}) + registry = Registry().with_resource("urn:root", resource) + + assert registry.crawl().anchor("urn:root", "foo").value == Anchor( + name="foo", + resource=ID_AND_CHILDREN.create_resource(12), + ) + + def test_contents(self): + resource = Resource.opaque({"foo": "bar"}) + uri = "urn:example" + registry = Registry().with_resource(uri, resource) + assert registry.contents(uri) == {"foo": "bar"} + + def test_getitem_strips_empty_fragments(self): + uri = "http://example.com/" + resource = ID_AND_CHILDREN.create_resource({"ID": uri + "#"}) + registry = resource @ Registry() + assert registry[uri] == registry[uri + "#"] == resource + + def test_contents_strips_empty_fragments(self): + uri = "http://example.com/" + resource = ID_AND_CHILDREN.create_resource({"ID": uri + "#"}) + registry = resource @ Registry() + assert ( + registry.contents(uri) + == registry.contents(uri + "#") + == {"ID": uri + "#"} + ) + + def test_contents_nonexistent_resource(self): + registry = Registry() + with pytest.raises(exceptions.NoSuchResource) as e: + registry.contents("urn:example") + assert e.value == exceptions.NoSuchResource(ref="urn:example") + + def test_crawled_anchor(self): + resource = ID_AND_CHILDREN.create_resource({"anchors": {"foo": "bar"}}) + registry = Registry().with_resource("urn:example", resource) + retrieved = registry.anchor("urn:example", "foo") + assert retrieved.value == Anchor( + name="foo", + resource=ID_AND_CHILDREN.create_resource("bar"), + ) + assert retrieved.registry == registry.crawl() + + def test_anchor_in_nonexistent_resource(self): + registry = Registry() + with pytest.raises(exceptions.NoSuchResource) as e: + registry.anchor("urn:example", "foo") + assert e.value == exceptions.NoSuchResource(ref="urn:example") + + def test_init(self): + one = Resource.opaque(contents={}) + two = ID_AND_CHILDREN.create_resource({"foo": "bar"}) + registry = Registry( + { + "http://example.com/1": one, + "http://example.com/foo/bar": two, + }, + ) + assert ( + registry + == Registry() + .with_resources( + [ + ("http://example.com/1", one), + ("http://example.com/foo/bar", two), + ], + ) + .crawl() + ) + + def test_dict_conversion(self): + """ + Passing a `dict` to `Registry` gets converted to a `HashTrieMap`. + + So continuing to use the registry works. + """ + + one = Resource.opaque(contents={}) + two = ID_AND_CHILDREN.create_resource({"foo": "bar"}) + registry = Registry( + {"http://example.com/1": one}, + ).with_resource("http://example.com/foo/bar", two) + assert ( + registry.crawl() + == Registry() + .with_resources( + [ + ("http://example.com/1", one), + ("http://example.com/foo/bar", two), + ], + ) + .crawl() + ) + + def test_no_such_resource(self): + registry = Registry() + with pytest.raises(exceptions.NoSuchResource) as e: + registry["urn:bigboom"] + assert e.value == exceptions.NoSuchResource(ref="urn:bigboom") + + def test_combine(self): + one = Resource.opaque(contents={}) + two = ID_AND_CHILDREN.create_resource({"foo": "bar"}) + three = ID_AND_CHILDREN.create_resource({"baz": "quux"}) + four = ID_AND_CHILDREN.create_resource({"anchors": {"foo": 12}}) + + first = Registry({"http://example.com/1": one}) + second = Registry().with_resource("http://example.com/foo/bar", two) + third = Registry( + { + "http://example.com/1": one, + "http://example.com/baz": three, + }, + ) + fourth = ( + Registry() + .with_resource( + "http://example.com/foo/quux", + four, + ) + .crawl() + ) + assert first.combine(second, third, fourth) == Registry( + [ + ("http://example.com/1", one), + ("http://example.com/baz", three), + ("http://example.com/foo/quux", four), + ], + anchors=HashTrieMap( + { + ("http://example.com/foo/quux", "foo"): Anchor( + name="foo", + resource=ID_AND_CHILDREN.create_resource(12), + ), + }, + ), + ).with_resource("http://example.com/foo/bar", two) + + def test_combine_self(self): + """ + Combining a registry with itself short-circuits. + + This is a performance optimization -- otherwise we do lots more work + (in jsonschema this seems to correspond to making the test suite take + *3x* longer). + """ + + registry = Registry({"urn:foo": "bar"}) + assert registry.combine(registry) is registry + + def test_combine_with_uncrawled_resources(self): + one = Resource.opaque(contents={}) + two = ID_AND_CHILDREN.create_resource({"foo": "bar"}) + three = ID_AND_CHILDREN.create_resource({"baz": "quux"}) + + first = Registry().with_resource("http://example.com/1", one) + second = Registry().with_resource("http://example.com/foo/bar", two) + third = Registry( + { + "http://example.com/1": one, + "http://example.com/baz": three, + }, + ) + expected = Registry( + [ + ("http://example.com/1", one), + ("http://example.com/foo/bar", two), + ("http://example.com/baz", three), + ], + ) + combined = first.combine(second, third) + assert combined != expected + assert combined.crawl() == expected + + def test_combine_with_single_retrieve(self): + one = Resource.opaque(contents={}) + two = ID_AND_CHILDREN.create_resource({"foo": "bar"}) + three = ID_AND_CHILDREN.create_resource({"baz": "quux"}) + + def retrieve(uri): # pragma: no cover + pass + + first = Registry().with_resource("http://example.com/1", one) + second = Registry( + retrieve=retrieve, + ).with_resource("http://example.com/2", two) + third = Registry().with_resource("http://example.com/3", three) + + assert first.combine(second, third) == Registry( + retrieve=retrieve, + ).with_resources( + [ + ("http://example.com/1", one), + ("http://example.com/2", two), + ("http://example.com/3", three), + ], + ) + assert second.combine(first, third) == Registry( + retrieve=retrieve, + ).with_resources( + [ + ("http://example.com/1", one), + ("http://example.com/2", two), + ("http://example.com/3", three), + ], + ) + + def test_combine_with_common_retrieve(self): + one = Resource.opaque(contents={}) + two = ID_AND_CHILDREN.create_resource({"foo": "bar"}) + three = ID_AND_CHILDREN.create_resource({"baz": "quux"}) + + def retrieve(uri): # pragma: no cover + pass + + first = Registry(retrieve=retrieve).with_resource( + "http://example.com/1", + one, + ) + second = Registry( + retrieve=retrieve, + ).with_resource("http://example.com/2", two) + third = Registry(retrieve=retrieve).with_resource( + "http://example.com/3", + three, + ) + + assert first.combine(second, third) == Registry( + retrieve=retrieve, + ).with_resources( + [ + ("http://example.com/1", one), + ("http://example.com/2", two), + ("http://example.com/3", three), + ], + ) + assert second.combine(first, third) == Registry( + retrieve=retrieve, + ).with_resources( + [ + ("http://example.com/1", one), + ("http://example.com/2", two), + ("http://example.com/3", three), + ], + ) + + def test_combine_conflicting_retrieve(self): + one = Resource.opaque(contents={}) + two = ID_AND_CHILDREN.create_resource({"foo": "bar"}) + three = ID_AND_CHILDREN.create_resource({"baz": "quux"}) + + def foo_retrieve(uri): # pragma: no cover + pass + + def bar_retrieve(uri): # pragma: no cover + pass + + first = Registry(retrieve=foo_retrieve).with_resource( + "http://example.com/1", + one, + ) + second = Registry().with_resource("http://example.com/2", two) + third = Registry(retrieve=bar_retrieve).with_resource( + "http://example.com/3", + three, + ) + + with pytest.raises(Exception, match="conflict.*retriev"): + first.combine(second, third) + + def test_remove(self): + one = Resource.opaque(contents={}) + two = ID_AND_CHILDREN.create_resource({"foo": "bar"}) + registry = Registry({"urn:foo": one, "urn:bar": two}) + assert registry.remove("urn:foo") == Registry({"urn:bar": two}) + + def test_remove_uncrawled(self): + one = Resource.opaque(contents={}) + two = ID_AND_CHILDREN.create_resource({"foo": "bar"}) + registry = Registry().with_resources( + [("urn:foo", one), ("urn:bar", two)], + ) + assert registry.remove("urn:foo") == Registry().with_resource( + "urn:bar", + two, + ) + + def test_remove_with_anchors(self): + one = Resource.opaque(contents={}) + two = ID_AND_CHILDREN.create_resource({"anchors": {"foo": "bar"}}) + registry = ( + Registry() + .with_resources( + [("urn:foo", one), ("urn:bar", two)], + ) + .crawl() + ) + assert ( + registry.remove("urn:bar") + == Registry() + .with_resource( + "urn:foo", + one, + ) + .crawl() + ) + + def test_remove_nonexistent_uri(self): + with pytest.raises(exceptions.NoSuchResource) as e: + Registry().remove("urn:doesNotExist") + assert e.value == exceptions.NoSuchResource(ref="urn:doesNotExist") + + def test_retrieve(self): + foo = Resource.opaque({"foo": "bar"}) + registry = Registry(retrieve=lambda uri: foo) + assert registry.get_or_retrieve("urn:example").value == foo + + def test_retrieve_arbitrary_exception(self): + foo = Resource.opaque({"foo": "bar"}) + + def retrieve(uri): + if uri == "urn:succeed": + return foo + raise Exception("Oh no!") + + registry = Registry(retrieve=retrieve) + assert registry.get_or_retrieve("urn:succeed").value == foo + with pytest.raises(exceptions.Unretrievable): + registry.get_or_retrieve("urn:uhoh") + + def test_retrieve_no_such_resource(self): + foo = Resource.opaque({"foo": "bar"}) + + def retrieve(uri): + if uri == "urn:succeed": + return foo + raise exceptions.NoSuchResource(ref=uri) + + registry = Registry(retrieve=retrieve) + assert registry.get_or_retrieve("urn:succeed").value == foo + with pytest.raises(exceptions.NoSuchResource): + registry.get_or_retrieve("urn:uhoh") + + def test_retrieve_cannot_determine_specification(self): + def retrieve(uri): + return Resource.from_contents({}) + + registry = Registry(retrieve=retrieve) + with pytest.raises(exceptions.CannotDetermineSpecification): + registry.get_or_retrieve("urn:uhoh") + + def test_retrieve_already_available_resource(self): + foo = Resource.opaque({"foo": "bar"}) + registry = Registry({"urn:example": foo}, retrieve=blow_up) + assert registry["urn:example"] == foo + assert registry.get_or_retrieve("urn:example").value == foo + + def test_retrieve_first_checks_crawlable_resource(self): + child = ID_AND_CHILDREN.create_resource({"ID": "urn:child", "foo": 12}) + root = ID_AND_CHILDREN.create_resource({"children": [child.contents]}) + registry = Registry(retrieve=blow_up).with_resource("urn:root", root) + assert registry.crawl()["urn:child"] == child + + def test_resolver(self): + one = Resource.opaque(contents={}) + registry = Registry({"http://example.com": one}) + resolver = registry.resolver(base_uri="http://example.com") + assert resolver.lookup("#").contents == {} + + def test_resolver_with_root_identified(self): + root = ID_AND_CHILDREN.create_resource({"ID": "http://example.com"}) + resolver = Registry().resolver_with_root(root) + assert resolver.lookup("http://example.com").contents == root.contents + assert resolver.lookup("#").contents == root.contents + + def test_resolver_with_root_unidentified(self): + root = Resource.opaque(contents={}) + resolver = Registry().resolver_with_root(root) + assert resolver.lookup("#").contents == root.contents + + def test_repr(self): + one = Resource.opaque(contents={}) + two = ID_AND_CHILDREN.create_resource({"foo": "bar"}) + registry = Registry().with_resources( + [ + ("http://example.com/1", one), + ("http://example.com/foo/bar", two), + ], + ) + assert repr(registry) == "" + assert repr(registry.crawl()) == "" + + def test_repr_mixed_crawled(self): + one = Resource.opaque(contents={}) + two = ID_AND_CHILDREN.create_resource({"foo": "bar"}) + registry = ( + Registry( + {"http://example.com/1": one}, + ) + .crawl() + .with_resource(uri="http://example.com/foo/bar", resource=two) + ) + assert repr(registry) == "" + + def test_repr_one_resource(self): + registry = Registry().with_resource( + uri="http://example.com/1", + resource=Resource.opaque(contents={}), + ) + assert repr(registry) == "" + + def test_repr_empty(self): + assert repr(Registry()) == "" + + +class TestResource: + def test_from_contents_from_json_schema(self): + schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"} + resource = Resource.from_contents(schema) + assert resource == Resource(contents=schema, specification=DRAFT202012) + + def test_from_contents_with_no_discernible_information(self): + """ + Creating a resource with no discernible way to see what + specification it belongs to (e.g. no ``$schema`` keyword for JSON + Schema) raises an error. + """ + + with pytest.raises(exceptions.CannotDetermineSpecification): + Resource.from_contents({"foo": "bar"}) + + def test_from_contents_with_no_discernible_information_and_default(self): + resource = Resource.from_contents( + {"foo": "bar"}, + default_specification=Specification.OPAQUE, + ) + assert resource == Resource.opaque(contents={"foo": "bar"}) + + def test_from_contents_unneeded_default(self): + schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"} + resource = Resource.from_contents( + schema, + default_specification=Specification.OPAQUE, + ) + assert resource == Resource( + contents=schema, + specification=DRAFT202012, + ) + + def test_non_mapping_from_contents(self): + resource = Resource.from_contents( + True, + default_specification=ID_AND_CHILDREN, + ) + assert resource == Resource( + contents=True, + specification=ID_AND_CHILDREN, + ) + + def test_from_contents_with_fallback(self): + resource = Resource.from_contents( + {"foo": "bar"}, + default_specification=Specification.OPAQUE, + ) + assert resource == Resource.opaque(contents={"foo": "bar"}) + + def test_id_delegates_to_specification(self): + specification = Specification( + name="", + id_of=lambda contents: "urn:fixedID", + subresources_of=lambda contents: [], + anchors_in=lambda specification, contents: [], + maybe_in_subresource=( + lambda segments, resolver, subresource: resolver + ), + ) + resource = Resource( + contents={"foo": "baz"}, + specification=specification, + ) + assert resource.id() == "urn:fixedID" + + def test_id_strips_empty_fragment(self): + uri = "http://example.com/" + root = ID_AND_CHILDREN.create_resource({"ID": uri + "#"}) + assert root.id() == uri + + def test_subresources_delegates_to_specification(self): + resource = ID_AND_CHILDREN.create_resource({"children": [{}, 12]}) + assert list(resource.subresources()) == [ + ID_AND_CHILDREN.create_resource(each) for each in [{}, 12] + ] + + def test_subresource_with_different_specification(self): + schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"} + resource = ID_AND_CHILDREN.create_resource({"children": [schema]}) + assert list(resource.subresources()) == [ + DRAFT202012.create_resource(schema), + ] + + def test_anchors_delegates_to_specification(self): + resource = ID_AND_CHILDREN.create_resource( + {"anchors": {"foo": {}, "bar": 1, "baz": ""}}, + ) + assert list(resource.anchors()) == [ + Anchor(name="foo", resource=ID_AND_CHILDREN.create_resource({})), + Anchor(name="bar", resource=ID_AND_CHILDREN.create_resource(1)), + Anchor(name="baz", resource=ID_AND_CHILDREN.create_resource("")), + ] + + def test_pointer_to_mapping(self): + resource = Resource.opaque(contents={"foo": "baz"}) + resolver = Registry().resolver() + assert resource.pointer("/foo", resolver=resolver).contents == "baz" + + def test_pointer_to_array(self): + resource = Resource.opaque(contents={"foo": {"bar": [3]}}) + resolver = Registry().resolver() + assert resource.pointer("/foo/bar/0", resolver=resolver).contents == 3 + + def test_root_pointer(self): + contents = {"foo": "baz"} + resource = Resource.opaque(contents=contents) + resolver = Registry().resolver() + assert resource.pointer("", resolver=resolver).contents == contents + + def test_opaque(self): + contents = {"foo": "bar"} + assert Resource.opaque(contents) == Resource( + contents=contents, + specification=Specification.OPAQUE, + ) + + +class TestResolver: + def test_lookup_exact_uri(self): + resource = Resource.opaque(contents={"foo": "baz"}) + resolver = Registry({"http://example.com/1": resource}).resolver() + resolved = resolver.lookup("http://example.com/1") + assert resolved.contents == resource.contents + + def test_lookup_subresource(self): + root = ID_AND_CHILDREN.create_resource( + { + "ID": "http://example.com/", + "children": [ + {"ID": "http://example.com/a", "foo": 12}, + ], + }, + ) + registry = root @ Registry() + resolved = registry.resolver().lookup("http://example.com/a") + assert resolved.contents == {"ID": "http://example.com/a", "foo": 12} + + def test_lookup_anchor_with_id(self): + root = ID_AND_CHILDREN.create_resource( + { + "ID": "http://example.com/", + "anchors": {"foo": 12}, + }, + ) + registry = root @ Registry() + resolved = registry.resolver().lookup("http://example.com/#foo") + assert resolved.contents == 12 + + def test_lookup_anchor_without_id(self): + root = ID_AND_CHILDREN.create_resource({"anchors": {"foo": 12}}) + resolver = Registry().with_resource("urn:example", root).resolver() + resolved = resolver.lookup("urn:example#foo") + assert resolved.contents == 12 + + def test_lookup_unknown_reference(self): + resolver = Registry().resolver() + ref = "http://example.com/does/not/exist" + with pytest.raises(exceptions.Unresolvable) as e: + resolver.lookup(ref) + assert e.value == exceptions.Unresolvable(ref=ref) + + def test_lookup_non_existent_pointer(self): + resource = Resource.opaque({"foo": {}}) + resolver = Registry({"http://example.com/1": resource}).resolver() + ref = "http://example.com/1#/foo/bar" + with pytest.raises(exceptions.Unresolvable) as e: + resolver.lookup(ref) + assert e.value == exceptions.PointerToNowhere( + ref="/foo/bar", + resource=resource, + ) + assert str(e.value) == "'/foo/bar' does not exist within {'foo': {}}" + + def test_lookup_non_existent_pointer_to_array_index(self): + resource = Resource.opaque([1, 2, 4, 8]) + resolver = Registry({"http://example.com/1": resource}).resolver() + ref = "http://example.com/1#/10" + with pytest.raises(exceptions.Unresolvable) as e: + resolver.lookup(ref) + assert e.value == exceptions.PointerToNowhere( + ref="/10", + resource=resource, + ) + + def test_lookup_pointer_to_empty_string(self): + resolver = Registry().resolver_with_root(Resource.opaque({"": {}})) + assert resolver.lookup("#/").contents == {} + + def test_lookup_non_existent_pointer_to_empty_string(self): + resource = Resource.opaque({"foo": {}}) + resolver = Registry().resolver_with_root(resource) + with pytest.raises( + exceptions.Unresolvable, + match="^'/' does not exist within {'foo': {}}.*'#'", + ) as e: + resolver.lookup("#/") + assert e.value == exceptions.PointerToNowhere( + ref="/", + resource=resource, + ) + + def test_lookup_non_existent_anchor(self): + root = ID_AND_CHILDREN.create_resource({"anchors": {}}) + resolver = Registry().with_resource("urn:example", root).resolver() + resolved = resolver.lookup("urn:example") + assert resolved.contents == root.contents + + ref = "urn:example#noSuchAnchor" + with pytest.raises(exceptions.Unresolvable) as e: + resolver.lookup(ref) + assert "'noSuchAnchor' does not exist" in str(e.value) + assert e.value == exceptions.NoSuchAnchor( + ref="urn:example", + resource=root, + anchor="noSuchAnchor", + ) + + def test_lookup_invalid_JSON_pointerish_anchor(self): + resolver = Registry().resolver_with_root( + ID_AND_CHILDREN.create_resource( + { + "ID": "http://example.com/", + "foo": {"bar": 12}, + }, + ), + ) + + valid = resolver.lookup("#/foo/bar") + assert valid.contents == 12 + + with pytest.raises(exceptions.InvalidAnchor) as e: + resolver.lookup("#foo/bar") + assert " '#/foo/bar'" in str(e.value) + + def test_lookup_retrieved_resource(self): + resource = Resource.opaque(contents={"foo": "baz"}) + resolver = Registry(retrieve=lambda uri: resource).resolver() + resolved = resolver.lookup("http://example.com/") + assert resolved.contents == resource.contents + + def test_lookup_failed_retrieved_resource(self): + """ + Unretrievable exceptions are also wrapped in Unresolvable. + """ + + uri = "http://example.com/" + + registry = Registry(retrieve=blow_up) + with pytest.raises(exceptions.Unretrievable): + registry.get_or_retrieve(uri) + + resolver = registry.resolver() + with pytest.raises(exceptions.Unresolvable): + resolver.lookup(uri) + + def test_repeated_lookup_from_retrieved_resource(self): + """ + A (custom-)retrieved resource is added to the registry returned by + looking it up. + """ + resource = Resource.opaque(contents={"foo": "baz"}) + once = [resource] + + def retrieve(uri): + return once.pop() + + resolver = Registry(retrieve=retrieve).resolver() + resolved = resolver.lookup("http://example.com/") + assert resolved.contents == resource.contents + + resolved = resolved.resolver.lookup("http://example.com/") + assert resolved.contents == resource.contents + + def test_repeated_anchor_lookup_from_retrieved_resource(self): + resource = Resource.opaque(contents={"foo": "baz"}) + once = [resource] + + def retrieve(uri): + return once.pop() + + resolver = Registry(retrieve=retrieve).resolver() + resolved = resolver.lookup("http://example.com/") + assert resolved.contents == resource.contents + + resolved = resolved.resolver.lookup("#") + assert resolved.contents == resource.contents + + # FIXME: The tests below aren't really representable in the current + # suite, though we should probably think of ways to do so. + + def test_in_subresource(self): + root = ID_AND_CHILDREN.create_resource( + { + "ID": "http://example.com/", + "children": [ + { + "ID": "child/", + "children": [{"ID": "grandchild"}], + }, + ], + }, + ) + registry = root @ Registry() + + resolver = registry.resolver() + first = resolver.lookup("http://example.com/") + assert first.contents == root.contents + + with pytest.raises(exceptions.Unresolvable): + first.resolver.lookup("grandchild") + + sub = first.resolver.in_subresource( + ID_AND_CHILDREN.create_resource(first.contents["children"][0]), + ) + second = sub.lookup("grandchild") + assert second.contents == {"ID": "grandchild"} + + def test_in_pointer_subresource(self): + root = ID_AND_CHILDREN.create_resource( + { + "ID": "http://example.com/", + "children": [ + { + "ID": "child/", + "children": [{"ID": "grandchild"}], + }, + ], + }, + ) + registry = root @ Registry() + + resolver = registry.resolver() + first = resolver.lookup("http://example.com/") + assert first.contents == root.contents + + with pytest.raises(exceptions.Unresolvable): + first.resolver.lookup("grandchild") + + second = first.resolver.lookup("#/children/0") + third = second.resolver.lookup("grandchild") + assert third.contents == {"ID": "grandchild"} + + def test_dynamic_scope(self): + one = ID_AND_CHILDREN.create_resource( + { + "ID": "http://example.com/", + "children": [ + { + "ID": "child/", + "children": [{"ID": "grandchild"}], + }, + ], + }, + ) + two = ID_AND_CHILDREN.create_resource( + { + "ID": "http://example.com/two", + "children": [{"ID": "two-child/"}], + }, + ) + registry = [one, two] @ Registry() + + resolver = registry.resolver() + first = resolver.lookup("http://example.com/") + second = first.resolver.lookup("#/children/0") + third = second.resolver.lookup("grandchild") + fourth = third.resolver.lookup("http://example.com/two") + assert list(fourth.resolver.dynamic_scope()) == [ + ("http://example.com/child/grandchild", fourth.resolver._registry), + ("http://example.com/child/", fourth.resolver._registry), + ("http://example.com/", fourth.resolver._registry), + ] + assert list(third.resolver.dynamic_scope()) == [ + ("http://example.com/child/", third.resolver._registry), + ("http://example.com/", third.resolver._registry), + ] + assert list(second.resolver.dynamic_scope()) == [ + ("http://example.com/", second.resolver._registry), + ] + assert list(first.resolver.dynamic_scope()) == [] + + +class TestSpecification: + def test_create_resource(self): + specification = Specification( + name="", + id_of=lambda contents: "urn:fixedID", + subresources_of=lambda contents: [], + anchors_in=lambda specification, contents: [], + maybe_in_subresource=( + lambda segments, resolver, subresource: resolver + ), + ) + resource = specification.create_resource(contents={"foo": "baz"}) + assert resource == Resource( + contents={"foo": "baz"}, + specification=specification, + ) + assert resource.id() == "urn:fixedID" + + def test_detect_from_json_schema(self): + schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"} + specification = Specification.detect(schema) + assert specification == DRAFT202012 + + def test_detect_with_no_discernible_information(self): + with pytest.raises(exceptions.CannotDetermineSpecification): + Specification.detect({"foo": "bar"}) + + def test_detect_with_non_URI_schema(self): + with pytest.raises(exceptions.CannotDetermineSpecification): + Specification.detect({"$schema": 37}) + + def test_detect_with_no_discernible_information_and_default(self): + specification = Specification.OPAQUE.detect({"foo": "bar"}) + assert specification is Specification.OPAQUE + + def test_detect_unneeded_default(self): + schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"} + specification = Specification.OPAQUE.detect(schema) + assert specification == DRAFT202012 + + def test_non_mapping_detect(self): + with pytest.raises(exceptions.CannotDetermineSpecification): + Specification.detect(True) + + def test_non_mapping_detect_with_default(self): + specification = ID_AND_CHILDREN.detect(True) + assert specification is ID_AND_CHILDREN + + def test_detect_with_fallback(self): + specification = Specification.OPAQUE.detect({"foo": "bar"}) + assert specification is Specification.OPAQUE + + def test_repr(self): + assert ( + repr(ID_AND_CHILDREN) == "" + ) + + +class TestOpaqueSpecification: + THINGS = [{"foo": "bar"}, True, 37, "foo", object()] + + @pytest.mark.parametrize("thing", THINGS) + def test_no_id(self, thing): + """ + An arbitrary thing has no ID. + """ + + assert Specification.OPAQUE.id_of(thing) is None + + @pytest.mark.parametrize("thing", THINGS) + def test_no_subresources(self, thing): + """ + An arbitrary thing has no subresources. + """ + + assert list(Specification.OPAQUE.subresources_of(thing)) == [] + + @pytest.mark.parametrize("thing", THINGS) + def test_no_anchors(self, thing): + """ + An arbitrary thing has no anchors. + """ + + assert list(Specification.OPAQUE.anchors_in(thing)) == [] + + +@pytest.mark.parametrize( + "cls", + [Anchor, Registry, Resource, Specification, exceptions.PointerToNowhere], +) +def test_nonsubclassable(cls): + with pytest.raises(Exception, match="(?i)subclassing"): + + class Boom(cls): # pragma: no cover + pass diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_exceptions.py b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..85cf99ecdd86c86e84df0b64f24aec6c447f4c08 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_exceptions.py @@ -0,0 +1,34 @@ +import itertools + +import pytest + +from referencing import Resource, exceptions + + +def pairs(choices): + return itertools.combinations(choices, 2) + + +TRUE = Resource.opaque(True) + + +thunks = ( + lambda: exceptions.CannotDetermineSpecification(TRUE), + lambda: exceptions.NoSuchResource("urn:example:foo"), + lambda: exceptions.NoInternalID(TRUE), + lambda: exceptions.InvalidAnchor(resource=TRUE, anchor="foo", ref="a#b"), + lambda: exceptions.NoSuchAnchor(resource=TRUE, anchor="foo", ref="a#b"), + lambda: exceptions.PointerToNowhere(resource=TRUE, ref="urn:example:foo"), + lambda: exceptions.Unresolvable("urn:example:foo"), + lambda: exceptions.Unretrievable("urn:example:foo"), +) + + +@pytest.mark.parametrize("one, two", pairs(each() for each in thunks)) +def test_eq_incompatible_types(one, two): + assert one != two + + +@pytest.mark.parametrize("thunk", thunks) +def test_hash(thunk): + assert thunk() in {thunk()} diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_jsonschema.py b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_jsonschema.py new file mode 100644 index 0000000000000000000000000000000000000000..c80714d0132bebbec33401f42a2e06aee3fed9c6 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_jsonschema.py @@ -0,0 +1,382 @@ +import pytest + +from referencing import Registry, Resource, Specification +import referencing.jsonschema + + +@pytest.mark.parametrize( + "uri, expected", + [ + ( + "https://json-schema.org/draft/2020-12/schema", + referencing.jsonschema.DRAFT202012, + ), + ( + "https://json-schema.org/draft/2019-09/schema", + referencing.jsonschema.DRAFT201909, + ), + ( + "http://json-schema.org/draft-07/schema#", + referencing.jsonschema.DRAFT7, + ), + ( + "http://json-schema.org/draft-06/schema#", + referencing.jsonschema.DRAFT6, + ), + ( + "http://json-schema.org/draft-04/schema#", + referencing.jsonschema.DRAFT4, + ), + ( + "http://json-schema.org/draft-03/schema#", + referencing.jsonschema.DRAFT3, + ), + ], +) +def test_schemas_with_explicit_schema_keywords_are_detected(uri, expected): + """ + The $schema keyword in JSON Schema is a dialect identifier. + """ + contents = {"$schema": uri} + resource = Resource.from_contents(contents) + assert resource == Resource(contents=contents, specification=expected) + + +def test_unknown_dialect(): + dialect_id = "http://example.com/unknown-json-schema-dialect-id" + with pytest.raises(referencing.jsonschema.UnknownDialect) as excinfo: + Resource.from_contents({"$schema": dialect_id}) + assert excinfo.value.uri == dialect_id + + +@pytest.mark.parametrize( + "id, specification", + [ + ("$id", referencing.jsonschema.DRAFT202012), + ("$id", referencing.jsonschema.DRAFT201909), + ("$id", referencing.jsonschema.DRAFT7), + ("$id", referencing.jsonschema.DRAFT6), + ("id", referencing.jsonschema.DRAFT4), + ("id", referencing.jsonschema.DRAFT3), + ], +) +def test_id_of_mapping(id, specification): + uri = "http://example.com/some-schema" + assert specification.id_of({id: uri}) == uri + + +@pytest.mark.parametrize( + "specification", + [ + referencing.jsonschema.DRAFT202012, + referencing.jsonschema.DRAFT201909, + referencing.jsonschema.DRAFT7, + referencing.jsonschema.DRAFT6, + ], +) +@pytest.mark.parametrize("value", [True, False]) +def test_id_of_bool(specification, value): + assert specification.id_of(value) is None + + +@pytest.mark.parametrize( + "specification", + [ + referencing.jsonschema.DRAFT202012, + referencing.jsonschema.DRAFT201909, + referencing.jsonschema.DRAFT7, + referencing.jsonschema.DRAFT6, + ], +) +@pytest.mark.parametrize("value", [True, False]) +def test_anchors_in_bool(specification, value): + assert list(specification.anchors_in(value)) == [] + + +@pytest.mark.parametrize( + "specification", + [ + referencing.jsonschema.DRAFT202012, + referencing.jsonschema.DRAFT201909, + referencing.jsonschema.DRAFT7, + referencing.jsonschema.DRAFT6, + ], +) +@pytest.mark.parametrize("value", [True, False]) +def test_subresources_of_bool(specification, value): + assert list(specification.subresources_of(value)) == [] + + +@pytest.mark.parametrize( + "uri, expected", + [ + ( + "https://json-schema.org/draft/2020-12/schema", + referencing.jsonschema.DRAFT202012, + ), + ( + "https://json-schema.org/draft/2019-09/schema", + referencing.jsonschema.DRAFT201909, + ), + ( + "http://json-schema.org/draft-07/schema#", + referencing.jsonschema.DRAFT7, + ), + ( + "http://json-schema.org/draft-06/schema#", + referencing.jsonschema.DRAFT6, + ), + ( + "http://json-schema.org/draft-04/schema#", + referencing.jsonschema.DRAFT4, + ), + ( + "http://json-schema.org/draft-03/schema#", + referencing.jsonschema.DRAFT3, + ), + ], +) +def test_specification_with(uri, expected): + assert referencing.jsonschema.specification_with(uri) == expected + + +@pytest.mark.parametrize( + "uri, expected", + [ + ( + "http://json-schema.org/draft-07/schema", + referencing.jsonschema.DRAFT7, + ), + ( + "http://json-schema.org/draft-06/schema", + referencing.jsonschema.DRAFT6, + ), + ( + "http://json-schema.org/draft-04/schema", + referencing.jsonschema.DRAFT4, + ), + ( + "http://json-schema.org/draft-03/schema", + referencing.jsonschema.DRAFT3, + ), + ], +) +def test_specification_with_no_empty_fragment(uri, expected): + assert referencing.jsonschema.specification_with(uri) == expected + + +def test_specification_with_unknown_dialect(): + dialect_id = "http://example.com/unknown-json-schema-dialect-id" + with pytest.raises(referencing.jsonschema.UnknownDialect) as excinfo: + referencing.jsonschema.specification_with(dialect_id) + assert excinfo.value.uri == dialect_id + + +def test_specification_with_default(): + dialect_id = "http://example.com/unknown-json-schema-dialect-id" + specification = referencing.jsonschema.specification_with( + dialect_id, + default=Specification.OPAQUE, + ) + assert specification is Specification.OPAQUE + + +# FIXME: The tests below should move to the referencing suite but I haven't yet +# figured out how to represent dynamic (& recursive) ref lookups in it. +def test_lookup_trivial_dynamic_ref(): + one = referencing.jsonschema.DRAFT202012.create_resource( + {"$dynamicAnchor": "foo"}, + ) + resolver = Registry().with_resource("http://example.com", one).resolver() + resolved = resolver.lookup("http://example.com#foo") + assert resolved.contents == one.contents + + +def test_multiple_lookup_trivial_dynamic_ref(): + TRUE = referencing.jsonschema.DRAFT202012.create_resource(True) + root = referencing.jsonschema.DRAFT202012.create_resource( + { + "$id": "http://example.com", + "$dynamicAnchor": "fooAnchor", + "$defs": { + "foo": { + "$id": "foo", + "$dynamicAnchor": "fooAnchor", + "$defs": { + "bar": True, + "baz": { + "$dynamicAnchor": "fooAnchor", + }, + }, + }, + }, + }, + ) + resolver = ( + Registry() + .with_resources( + [ + ("http://example.com", root), + ("http://example.com/foo/", TRUE), + ("http://example.com/foo/bar", root), + ], + ) + .resolver() + ) + + first = resolver.lookup("http://example.com") + second = first.resolver.lookup("foo/") + resolver = second.resolver.lookup("bar").resolver + fourth = resolver.lookup("#fooAnchor") + assert fourth.contents == root.contents + + +def test_multiple_lookup_dynamic_ref_to_nondynamic_ref(): + one = referencing.jsonschema.DRAFT202012.create_resource( + {"$anchor": "fooAnchor"}, + ) + two = referencing.jsonschema.DRAFT202012.create_resource( + { + "$id": "http://example.com", + "$dynamicAnchor": "fooAnchor", + "$defs": { + "foo": { + "$id": "foo", + "$dynamicAnchor": "fooAnchor", + "$defs": { + "bar": True, + "baz": { + "$dynamicAnchor": "fooAnchor", + }, + }, + }, + }, + }, + ) + resolver = ( + Registry() + .with_resources( + [ + ("http://example.com", two), + ("http://example.com/foo/", one), + ("http://example.com/foo/bar", two), + ], + ) + .resolver() + ) + + first = resolver.lookup("http://example.com") + second = first.resolver.lookup("foo/") + resolver = second.resolver.lookup("bar").resolver + fourth = resolver.lookup("#fooAnchor") + assert fourth.contents == two.contents + + +def test_lookup_trivial_recursive_ref(): + one = referencing.jsonschema.DRAFT201909.create_resource( + {"$recursiveAnchor": True}, + ) + resolver = Registry().with_resource("http://example.com", one).resolver() + first = resolver.lookup("http://example.com") + resolved = referencing.jsonschema.lookup_recursive_ref( + resolver=first.resolver, + ) + assert resolved.contents == one.contents + + +def test_lookup_recursive_ref_to_bool(): + TRUE = referencing.jsonschema.DRAFT201909.create_resource(True) + registry = Registry({"http://example.com": TRUE}) + resolved = referencing.jsonschema.lookup_recursive_ref( + resolver=registry.resolver(base_uri="http://example.com"), + ) + assert resolved.contents == TRUE.contents + + +def test_multiple_lookup_recursive_ref_to_bool(): + TRUE = referencing.jsonschema.DRAFT201909.create_resource(True) + root = referencing.jsonschema.DRAFT201909.create_resource( + { + "$id": "http://example.com", + "$recursiveAnchor": True, + "$defs": { + "foo": { + "$id": "foo", + "$recursiveAnchor": True, + "$defs": { + "bar": True, + "baz": { + "$recursiveAnchor": True, + "$anchor": "fooAnchor", + }, + }, + }, + }, + }, + ) + resolver = ( + Registry() + .with_resources( + [ + ("http://example.com", root), + ("http://example.com/foo/", TRUE), + ("http://example.com/foo/bar", root), + ], + ) + .resolver() + ) + + first = resolver.lookup("http://example.com") + second = first.resolver.lookup("foo/") + resolver = second.resolver.lookup("bar").resolver + fourth = referencing.jsonschema.lookup_recursive_ref(resolver=resolver) + assert fourth.contents == root.contents + + +def test_multiple_lookup_recursive_ref_with_nonrecursive_ref(): + one = referencing.jsonschema.DRAFT201909.create_resource( + {"$recursiveAnchor": True}, + ) + two = referencing.jsonschema.DRAFT201909.create_resource( + { + "$id": "http://example.com", + "$recursiveAnchor": True, + "$defs": { + "foo": { + "$id": "foo", + "$recursiveAnchor": True, + "$defs": { + "bar": True, + "baz": { + "$recursiveAnchor": True, + "$anchor": "fooAnchor", + }, + }, + }, + }, + }, + ) + three = referencing.jsonschema.DRAFT201909.create_resource( + {"$recursiveAnchor": False}, + ) + resolver = ( + Registry() + .with_resources( + [ + ("http://example.com", three), + ("http://example.com/foo/", two), + ("http://example.com/foo/bar", one), + ], + ) + .resolver() + ) + + first = resolver.lookup("http://example.com") + second = first.resolver.lookup("foo/") + resolver = second.resolver.lookup("bar").resolver + fourth = referencing.jsonschema.lookup_recursive_ref(resolver=resolver) + assert fourth.contents == two.contents + + +def test_empty_registry(): + assert referencing.jsonschema.EMPTY_REGISTRY == Registry() diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_referencing_suite.py b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_referencing_suite.py new file mode 100644 index 0000000000000000000000000000000000000000..4b8ae9177c197456bb3bbd62d4c1875bc95ff28b --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_referencing_suite.py @@ -0,0 +1,66 @@ +from pathlib import Path +import json +import os + +import pytest + +from referencing import Registry +from referencing.exceptions import Unresolvable +import referencing.jsonschema + + +class SuiteNotFound(Exception): + def __str__(self): # pragma: no cover + return ( + "Cannot find the referencing suite. " + "Set the REFERENCING_SUITE environment variable to the path to " + "the suite, or run the test suite from alongside a full checkout " + "of the git repository." + ) + + +if "REFERENCING_SUITE" in os.environ: # pragma: no cover + SUITE = Path(os.environ["REFERENCING_SUITE"]) / "tests" +else: + SUITE = Path(__file__).parent.parent.parent / "suite/tests" +if not SUITE.is_dir(): # pragma: no cover + raise SuiteNotFound() +DIALECT_IDS = json.loads(SUITE.joinpath("specifications.json").read_text()) + + +@pytest.mark.parametrize( + "test_path", + [ + pytest.param(each, id=f"{each.parent.name}-{each.stem}") + for each in SUITE.glob("*/**/*.json") + ], +) +def test_referencing_suite(test_path, subtests): + dialect_id = DIALECT_IDS[test_path.relative_to(SUITE).parts[0]] + specification = referencing.jsonschema.specification_with(dialect_id) + loaded = json.loads(test_path.read_text()) + registry = loaded["registry"] + registry = Registry().with_resources( + (uri, specification.create_resource(contents)) + for uri, contents in loaded["registry"].items() + ) + for test in loaded["tests"]: + with subtests.test(test=test): + if "normalization" in test_path.stem: + pytest.xfail("APIs need to change for proper URL support.") + + resolver = registry.resolver(base_uri=test.get("base_uri", "")) + + if test.get("error"): + with pytest.raises(Unresolvable): + resolver.lookup(test["ref"]) + else: + resolved = resolver.lookup(test["ref"]) + assert resolved.contents == test["target"] + + then = test.get("then") + while then: # pragma: no cover + with subtests.test(test=test, then=then): + resolved = resolved.resolver.lookup(then["ref"]) + assert resolved.contents == then["target"] + then = then.get("then") diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_retrieval.py b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_retrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..d0a8f8ad9975d1a760bca14dea7b60d41fb8ea75 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/referencing/tests/test_retrieval.py @@ -0,0 +1,106 @@ +from functools import lru_cache +import json + +import pytest + +from referencing import Registry, Resource, exceptions +from referencing.jsonschema import DRAFT202012 +from referencing.retrieval import to_cached_resource + + +class TestToCachedResource: + def test_it_caches_retrieved_resources(self): + contents = {"$schema": "https://json-schema.org/draft/2020-12/schema"} + stack = [json.dumps(contents)] + + @to_cached_resource() + def retrieve(uri): + return stack.pop() + + registry = Registry(retrieve=retrieve) + + expected = Resource.from_contents(contents) + + got = registry.get_or_retrieve("urn:example:schema") + assert got.value == expected + + # And a second time we get the same value. + again = registry.get_or_retrieve("urn:example:schema") + assert again.value is got.value + + def test_custom_loader(self): + contents = {"$schema": "https://json-schema.org/draft/2020-12/schema"} + stack = [json.dumps(contents)[::-1]] + + @to_cached_resource(loads=lambda s: json.loads(s[::-1])) + def retrieve(uri): + return stack.pop() + + registry = Registry(retrieve=retrieve) + + expected = Resource.from_contents(contents) + + got = registry.get_or_retrieve("urn:example:schema") + assert got.value == expected + + # And a second time we get the same value. + again = registry.get_or_retrieve("urn:example:schema") + assert again.value is got.value + + def test_custom_from_contents(self): + contents = {} + stack = [json.dumps(contents)] + + @to_cached_resource(from_contents=DRAFT202012.create_resource) + def retrieve(uri): + return stack.pop() + + registry = Registry(retrieve=retrieve) + + expected = DRAFT202012.create_resource(contents) + + got = registry.get_or_retrieve("urn:example:schema") + assert got.value == expected + + # And a second time we get the same value. + again = registry.get_or_retrieve("urn:example:schema") + assert again.value is got.value + + def test_custom_cache(self): + schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"} + mapping = { + "urn:example:1": dict(schema, foo=1), + "urn:example:2": dict(schema, foo=2), + "urn:example:3": dict(schema, foo=3), + } + + resources = { + uri: Resource.from_contents(contents) + for uri, contents in mapping.items() + } + + @to_cached_resource(cache=lru_cache(maxsize=2)) + def retrieve(uri): + return json.dumps(mapping.pop(uri)) + + registry = Registry(retrieve=retrieve) + + got = registry.get_or_retrieve("urn:example:1") + assert got.value == resources["urn:example:1"] + assert registry.get_or_retrieve("urn:example:1").value is got.value + assert registry.get_or_retrieve("urn:example:1").value is got.value + + got = registry.get_or_retrieve("urn:example:2") + assert got.value == resources["urn:example:2"] + assert registry.get_or_retrieve("urn:example:2").value is got.value + assert registry.get_or_retrieve("urn:example:2").value is got.value + + # This still succeeds, but evicts the first URI + got = registry.get_or_retrieve("urn:example:3") + assert got.value == resources["urn:example:3"] + assert registry.get_or_retrieve("urn:example:3").value is got.value + assert registry.get_or_retrieve("urn:example:3").value is got.value + + # And now this fails (as we popped the value out of `mapping`) + with pytest.raises(exceptions.Unretrievable): + registry.get_or_retrieve("urn:example:1") diff --git a/evalkit_cambrian/lib/python3.10/site-packages/referencing/typing.py b/evalkit_cambrian/lib/python3.10/site-packages/referencing/typing.py new file mode 100644 index 0000000000000000000000000000000000000000..a61446417e68e8e397346b14e8525cc9062f1c59 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/referencing/typing.py @@ -0,0 +1,61 @@ +""" +Type-annotation related support for the referencing library. +""" + +from __future__ import annotations + +from collections.abc import Mapping as Mapping +from typing import TYPE_CHECKING, Any, Protocol + +try: + from typing_extensions import TypeVar +except ImportError: # pragma: no cover + from typing import TypeVar + +if TYPE_CHECKING: + from referencing._core import Resolved, Resolver, Resource + +#: A URI which identifies a `Resource`. +URI = str + +#: The type of documents within a registry. +D = TypeVar("D", default=Any) + + +class Retrieve(Protocol[D]): + """ + A retrieval callable, usable within a `Registry` for resource retrieval. + + Does not make assumptions about where the resource might be coming from. + """ + + def __call__(self, uri: URI) -> Resource[D]: + """ + Retrieve the resource with the given URI. + + Raise `referencing.exceptions.NoSuchResource` if you wish to indicate + the retriever cannot lookup the given URI. + """ + ... + + +class Anchor(Protocol[D]): + """ + An anchor within a `Resource`. + + Beyond "simple" anchors, some specifications like JSON Schema's 2020 + version have dynamic anchors. + """ + + @property + def name(self) -> str: + """ + Return the name of this anchor. + """ + ... + + def resolve(self, resolver: Resolver[D]) -> Resolved[D]: + """ + Return the resource for this anchor. + """ + ... diff --git a/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1236d017d4405a778b7e102be6ddfc570ec9e41c --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__init__.py @@ -0,0 +1,59 @@ +from tomlkit.api import TOMLDocument +from tomlkit.api import aot +from tomlkit.api import array +from tomlkit.api import boolean +from tomlkit.api import comment +from tomlkit.api import date +from tomlkit.api import datetime +from tomlkit.api import document +from tomlkit.api import dump +from tomlkit.api import dumps +from tomlkit.api import float_ +from tomlkit.api import inline_table +from tomlkit.api import integer +from tomlkit.api import item +from tomlkit.api import key +from tomlkit.api import key_value +from tomlkit.api import load +from tomlkit.api import loads +from tomlkit.api import nl +from tomlkit.api import parse +from tomlkit.api import register_encoder +from tomlkit.api import string +from tomlkit.api import table +from tomlkit.api import time +from tomlkit.api import unregister_encoder +from tomlkit.api import value +from tomlkit.api import ws + + +__version__ = "0.12.0" +__all__ = [ + "aot", + "array", + "boolean", + "comment", + "date", + "datetime", + "document", + "dump", + "dumps", + "float_", + "inline_table", + "integer", + "item", + "key", + "key_value", + "load", + "loads", + "nl", + "parse", + "string", + "table", + "time", + "TOMLDocument", + "value", + "ws", + "register_encoder", + "unregister_encoder", +] diff --git a/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/api.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdd888735c583c9dfa9d1d5bad6c9bf612b44c51 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/api.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/exceptions.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f51e3bb088290a406e9c0181d911f504475d7fb Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/exceptions.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/items.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/items.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0ff0b6aec5d3f712d6cc4c558078b0c5767d162 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/items.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/parser.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9a5566e693cabeca2e882a37f609bd49e4323a8 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/parser.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/toml_file.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/toml_file.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15f73dc0baff8f3fafe89cddfb2fb1642140c185 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/__pycache__/toml_file.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/_types.py b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/_types.py new file mode 100644 index 0000000000000000000000000000000000000000..cc1847b5e69447bb934076be14b66766aedb22c1 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/_types.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import TypeVar + + +WT = TypeVar("WT", bound="WrapperType") + +if TYPE_CHECKING: # pragma: no cover + # Define _CustomList and _CustomDict as a workaround for: + # https://github.com/python/mypy/issues/11427 + # + # According to this issue, the typeshed contains a "lie" + # (it adds MutableSequence to the ancestry of list and MutableMapping to + # the ancestry of dict) which completely messes with the type inference for + # Table, InlineTable, Array and Container. + # + # Importing from builtins is preferred over simple assignment, see issues: + # https://github.com/python/mypy/issues/8715 + # https://github.com/python/mypy/issues/10068 + from builtins import dict as _CustomDict # noqa: N812 + from builtins import float as _CustomFloat # noqa: N812 + from builtins import int as _CustomInt # noqa: N812 + from builtins import list as _CustomList # noqa: N812 + from typing import Callable + from typing import Concatenate + from typing import ParamSpec + from typing import Protocol + + P = ParamSpec("P") + + class WrapperType(Protocol): + def _new(self: WT, value: Any) -> WT: + ... + +else: + from collections.abc import MutableMapping + from collections.abc import MutableSequence + from numbers import Integral + from numbers import Real + + class _CustomList(MutableSequence, list): + """Adds MutableSequence mixin while pretending to be a builtin list""" + + class _CustomDict(MutableMapping, dict): + """Adds MutableMapping mixin while pretending to be a builtin dict""" + + class _CustomInt(Integral, int): + """Adds Integral mixin while pretending to be a builtin int""" + + class _CustomFloat(Real, float): + """Adds Real mixin while pretending to be a builtin float""" + + +def wrap_method( + original_method: Callable[Concatenate[WT, P], Any] +) -> Callable[Concatenate[WT, P], Any]: + def wrapper(self: WT, *args: P.args, **kwargs: P.kwargs) -> Any: + result = original_method(self, *args, **kwargs) + if result is NotImplemented: + return result + return self._new(result) + + return wrapper diff --git a/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/parser.py b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..bdf0c4a2313b27e759822cd3df583d86c1a176c1 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/parser.py @@ -0,0 +1,1141 @@ +from __future__ import annotations + +import datetime +import re +import string + +from tomlkit._compat import decode +from tomlkit._utils import RFC_3339_LOOSE +from tomlkit._utils import _escaped +from tomlkit._utils import parse_rfc3339 +from tomlkit.container import Container +from tomlkit.exceptions import EmptyKeyError +from tomlkit.exceptions import EmptyTableNameError +from tomlkit.exceptions import InternalParserError +from tomlkit.exceptions import InvalidCharInStringError +from tomlkit.exceptions import InvalidControlChar +from tomlkit.exceptions import InvalidDateError +from tomlkit.exceptions import InvalidDateTimeError +from tomlkit.exceptions import InvalidNumberError +from tomlkit.exceptions import InvalidTimeError +from tomlkit.exceptions import InvalidUnicodeValueError +from tomlkit.exceptions import ParseError +from tomlkit.exceptions import UnexpectedCharError +from tomlkit.exceptions import UnexpectedEofError +from tomlkit.items import AoT +from tomlkit.items import Array +from tomlkit.items import Bool +from tomlkit.items import BoolType +from tomlkit.items import Comment +from tomlkit.items import Date +from tomlkit.items import DateTime +from tomlkit.items import Float +from tomlkit.items import InlineTable +from tomlkit.items import Integer +from tomlkit.items import Item +from tomlkit.items import Key +from tomlkit.items import KeyType +from tomlkit.items import Null +from tomlkit.items import SingleKey +from tomlkit.items import String +from tomlkit.items import StringType +from tomlkit.items import Table +from tomlkit.items import Time +from tomlkit.items import Trivia +from tomlkit.items import Whitespace +from tomlkit.source import Source +from tomlkit.toml_char import TOMLChar +from tomlkit.toml_document import TOMLDocument + + +CTRL_I = 0x09 # Tab +CTRL_J = 0x0A # Line feed +CTRL_M = 0x0D # Carriage return +CTRL_CHAR_LIMIT = 0x1F +CHR_DEL = 0x7F + + +class Parser: + """ + Parser for TOML documents. + """ + + def __init__(self, string: str | bytes) -> None: + # Input to parse + self._src = Source(decode(string)) + + self._aot_stack: list[Key] = [] + + @property + def _state(self): + return self._src.state + + @property + def _idx(self): + return self._src.idx + + @property + def _current(self): + return self._src.current + + @property + def _marker(self): + return self._src.marker + + def extract(self) -> str: + """ + Extracts the value between marker and index + """ + return self._src.extract() + + def inc(self, exception: type[ParseError] | None = None) -> bool: + """ + Increments the parser if the end of the input has not been reached. + Returns whether or not it was able to advance. + """ + return self._src.inc(exception=exception) + + def inc_n(self, n: int, exception: type[ParseError] | None = None) -> bool: + """ + Increments the parser by n characters + if the end of the input has not been reached. + """ + return self._src.inc_n(n=n, exception=exception) + + def consume(self, chars, min=0, max=-1): + """ + Consume chars until min/max is satisfied is valid. + """ + return self._src.consume(chars=chars, min=min, max=max) + + def end(self) -> bool: + """ + Returns True if the parser has reached the end of the input. + """ + return self._src.end() + + def mark(self) -> None: + """ + Sets the marker to the index's current position + """ + self._src.mark() + + def parse_error(self, exception=ParseError, *args, **kwargs): + """ + Creates a generic "parse error" at the current position. + """ + return self._src.parse_error(exception, *args, **kwargs) + + def parse(self) -> TOMLDocument: + body = TOMLDocument(True) + + # Take all keyvals outside of tables/AoT's. + while not self.end(): + # Break out if a table is found + if self._current == "[": + break + + # Otherwise, take and append one KV + item = self._parse_item() + if not item: + break + + key, value = item + if (key is not None and key.is_multi()) or not self._merge_ws(value, body): + # We actually have a table + try: + body.append(key, value) + except Exception as e: + raise self.parse_error(ParseError, str(e)) from e + + self.mark() + + while not self.end(): + key, value = self._parse_table() + if isinstance(value, Table) and value.is_aot_element(): + # This is just the first table in an AoT. Parse the rest of the array + # along with it. + value = self._parse_aot(value, key) + + try: + body.append(key, value) + except Exception as e: + raise self.parse_error(ParseError, str(e)) from e + + body.parsing(False) + + return body + + def _merge_ws(self, item: Item, container: Container) -> bool: + """ + Merges the given Item with the last one currently in the given Container if + both are whitespace items. + + Returns True if the items were merged. + """ + last = container.last_item() + if not last: + return False + + if not isinstance(item, Whitespace) or not isinstance(last, Whitespace): + return False + + start = self._idx - (len(last.s) + len(item.s)) + container.body[-1] = ( + container.body[-1][0], + Whitespace(self._src[start : self._idx]), + ) + + return True + + def _is_child(self, parent: Key, child: Key) -> bool: + """ + Returns whether a key is strictly a child of another key. + AoT siblings are not considered children of one another. + """ + parent_parts = tuple(parent) + child_parts = tuple(child) + + if parent_parts == child_parts: + return False + + return parent_parts == child_parts[: len(parent_parts)] + + def _parse_item(self) -> tuple[Key | None, Item] | None: + """ + Attempts to parse the next item and returns it, along with its key + if the item is value-like. + """ + self.mark() + with self._state as state: + while True: + c = self._current + if c == "\n": + # Found a newline; Return all whitespace found up to this point. + self.inc() + + return None, Whitespace(self.extract()) + elif c in " \t\r": + # Skip whitespace. + if not self.inc(): + return None, Whitespace(self.extract()) + elif c == "#": + # Found a comment, parse it + indent = self.extract() + cws, comment, trail = self._parse_comment_trail() + + return None, Comment(Trivia(indent, cws, comment, trail)) + elif c == "[": + # Found a table, delegate to the calling function. + return + else: + # Beginning of a KV pair. + # Return to beginning of whitespace so it gets included + # as indentation for the KV about to be parsed. + state.restore = True + break + + return self._parse_key_value(True) + + def _parse_comment_trail(self, parse_trail: bool = True) -> tuple[str, str, str]: + """ + Returns (comment_ws, comment, trail) + If there is no comment, comment_ws and comment will + simply be empty. + """ + if self.end(): + return "", "", "" + + comment = "" + comment_ws = "" + self.mark() + + while True: + c = self._current + + if c == "\n": + break + elif c == "#": + comment_ws = self.extract() + + self.mark() + self.inc() # Skip # + + # The comment itself + while not self.end() and not self._current.is_nl(): + code = ord(self._current) + if code == CHR_DEL or code <= CTRL_CHAR_LIMIT and code != CTRL_I: + raise self.parse_error(InvalidControlChar, code, "comments") + + if not self.inc(): + break + + comment = self.extract() + self.mark() + + break + elif c in " \t\r": + self.inc() + else: + raise self.parse_error(UnexpectedCharError, c) + + if self.end(): + break + + trail = "" + if parse_trail: + while self._current.is_spaces() and self.inc(): + pass + + if self._current == "\r": + self.inc() + + if self._current == "\n": + self.inc() + + if self._idx != self._marker or self._current.is_ws(): + trail = self.extract() + + return comment_ws, comment, trail + + def _parse_key_value(self, parse_comment: bool = False) -> tuple[Key, Item]: + # Leading indent + self.mark() + + while self._current.is_spaces() and self.inc(): + pass + + indent = self.extract() + + # Key + key = self._parse_key() + + self.mark() + + found_equals = self._current == "=" + while self._current.is_kv_sep() and self.inc(): + if self._current == "=": + if found_equals: + raise self.parse_error(UnexpectedCharError, "=") + else: + found_equals = True + if not found_equals: + raise self.parse_error(UnexpectedCharError, self._current) + + if not key.sep: + key.sep = self.extract() + else: + key.sep += self.extract() + + # Value + val = self._parse_value() + # Comment + if parse_comment: + cws, comment, trail = self._parse_comment_trail() + meta = val.trivia + if not meta.comment_ws: + meta.comment_ws = cws + + meta.comment = comment + meta.trail = trail + else: + val.trivia.trail = "" + + val.trivia.indent = indent + + return key, val + + def _parse_key(self) -> Key: + """ + Parses a Key at the current position; + WS before the key must be exhausted first at the callsite. + """ + self.mark() + while self._current.is_spaces() and self.inc(): + # Skip any leading whitespace + pass + if self._current in "\"'": + return self._parse_quoted_key() + else: + return self._parse_bare_key() + + def _parse_quoted_key(self) -> Key: + """ + Parses a key enclosed in either single or double quotes. + """ + # Extract the leading whitespace + original = self.extract() + quote_style = self._current + key_type = next((t for t in KeyType if t.value == quote_style), None) + + if key_type is None: + raise RuntimeError("Should not have entered _parse_quoted_key()") + + key_str = self._parse_string( + StringType.SLB if key_type == KeyType.Basic else StringType.SLL + ) + if key_str._t.is_multiline(): + raise self.parse_error(UnexpectedCharError, key_str._t.value) + original += key_str.as_string() + self.mark() + while self._current.is_spaces() and self.inc(): + pass + original += self.extract() + key = SingleKey(str(key_str), t=key_type, sep="", original=original) + if self._current == ".": + self.inc() + key = key.concat(self._parse_key()) + + return key + + def _parse_bare_key(self) -> Key: + """ + Parses a bare key. + """ + while ( + self._current.is_bare_key_char() or self._current.is_spaces() + ) and self.inc(): + pass + + original = self.extract() + key = original.strip() + if not key: + # Empty key + raise self.parse_error(EmptyKeyError) + + if " " in key: + # Bare key with spaces in it + raise self.parse_error(ParseError, f'Invalid key "{key}"') + + key = SingleKey(key, KeyType.Bare, "", original) + + if self._current == ".": + self.inc() + key = key.concat(self._parse_key()) + + return key + + def _parse_value(self) -> Item: + """ + Attempts to parse a value at the current position. + """ + self.mark() + c = self._current + trivia = Trivia() + + if c == StringType.SLB.value: + return self._parse_basic_string() + elif c == StringType.SLL.value: + return self._parse_literal_string() + elif c == BoolType.TRUE.value[0]: + return self._parse_true() + elif c == BoolType.FALSE.value[0]: + return self._parse_false() + elif c == "[": + return self._parse_array() + elif c == "{": + return self._parse_inline_table() + elif c in "+-" or self._peek(4) in { + "+inf", + "-inf", + "inf", + "+nan", + "-nan", + "nan", + }: + # Number + while self._current not in " \t\n\r#,]}" and self.inc(): + pass + + raw = self.extract() + + item = self._parse_number(raw, trivia) + if item is not None: + return item + + raise self.parse_error(InvalidNumberError) + elif c in string.digits: + # Integer, Float, Date, Time or DateTime + while self._current not in " \t\n\r#,]}" and self.inc(): + pass + + raw = self.extract() + + m = RFC_3339_LOOSE.match(raw) + if m: + if m.group(1) and m.group(5): + # datetime + try: + dt = parse_rfc3339(raw) + assert isinstance(dt, datetime.datetime) + return DateTime( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dt.tzinfo, + trivia, + raw, + ) + except ValueError: + raise self.parse_error(InvalidDateTimeError) + + if m.group(1): + try: + dt = parse_rfc3339(raw) + assert isinstance(dt, datetime.date) + date = Date(dt.year, dt.month, dt.day, trivia, raw) + self.mark() + while self._current not in "\t\n\r#,]}" and self.inc(): + pass + + time_raw = self.extract() + time_part = time_raw.rstrip() + trivia.comment_ws = time_raw[len(time_part) :] + if not time_part: + return date + + dt = parse_rfc3339(raw + time_part) + assert isinstance(dt, datetime.datetime) + return DateTime( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dt.tzinfo, + trivia, + raw + time_part, + ) + except ValueError: + raise self.parse_error(InvalidDateError) + + if m.group(5): + try: + t = parse_rfc3339(raw) + assert isinstance(t, datetime.time) + return Time( + t.hour, + t.minute, + t.second, + t.microsecond, + t.tzinfo, + trivia, + raw, + ) + except ValueError: + raise self.parse_error(InvalidTimeError) + + item = self._parse_number(raw, trivia) + if item is not None: + return item + + raise self.parse_error(InvalidNumberError) + else: + raise self.parse_error(UnexpectedCharError, c) + + def _parse_true(self): + return self._parse_bool(BoolType.TRUE) + + def _parse_false(self): + return self._parse_bool(BoolType.FALSE) + + def _parse_bool(self, style: BoolType) -> Bool: + with self._state: + style = BoolType(style) + + # only keep parsing for bool if the characters match the style + # try consuming rest of chars in style + for c in style: + self.consume(c, min=1, max=1) + + return Bool(style, Trivia()) + + def _parse_array(self) -> Array: + # Consume opening bracket, EOF here is an issue (middle of array) + self.inc(exception=UnexpectedEofError) + + elems: list[Item] = [] + prev_value = None + while True: + # consume whitespace + mark = self._idx + self.consume(TOMLChar.SPACES + TOMLChar.NL) + indent = self._src[mark : self._idx] + newline = set(TOMLChar.NL) & set(indent) + if newline: + elems.append(Whitespace(indent)) + continue + + # consume comment + if self._current == "#": + cws, comment, trail = self._parse_comment_trail(parse_trail=False) + elems.append(Comment(Trivia(indent, cws, comment, trail))) + continue + + # consume indent + if indent: + elems.append(Whitespace(indent)) + continue + + # consume value + if not prev_value: + try: + elems.append(self._parse_value()) + prev_value = True + continue + except UnexpectedCharError: + pass + + # consume comma + if prev_value and self._current == ",": + self.inc(exception=UnexpectedEofError) + elems.append(Whitespace(",")) + prev_value = False + continue + + # consume closing bracket + if self._current == "]": + # consume closing bracket, EOF here doesn't matter + self.inc() + break + + raise self.parse_error(UnexpectedCharError, self._current) + + try: + res = Array(elems, Trivia()) + except ValueError: + pass + else: + return res + + def _parse_inline_table(self) -> InlineTable: + # consume opening bracket, EOF here is an issue (middle of array) + self.inc(exception=UnexpectedEofError) + + elems = Container(True) + trailing_comma = None + while True: + # consume leading whitespace + mark = self._idx + self.consume(TOMLChar.SPACES) + raw = self._src[mark : self._idx] + if raw: + elems.add(Whitespace(raw)) + + if not trailing_comma: + # None: empty inline table + # False: previous key-value pair was not followed by a comma + if self._current == "}": + # consume closing bracket, EOF here doesn't matter + self.inc() + break + + if ( + trailing_comma is False + or trailing_comma is None + and self._current == "," + ): + # Either the previous key-value pair was not followed by a comma + # or the table has an unexpected leading comma. + raise self.parse_error(UnexpectedCharError, self._current) + else: + # True: previous key-value pair was followed by a comma + if self._current == "}" or self._current == ",": + raise self.parse_error(UnexpectedCharError, self._current) + + key, val = self._parse_key_value(False) + elems.add(key, val) + + # consume trailing whitespace + mark = self._idx + self.consume(TOMLChar.SPACES) + raw = self._src[mark : self._idx] + if raw: + elems.add(Whitespace(raw)) + + # consume trailing comma + trailing_comma = self._current == "," + if trailing_comma: + # consume closing bracket, EOF here is an issue (middle of inline table) + self.inc(exception=UnexpectedEofError) + + return InlineTable(elems, Trivia()) + + def _parse_number(self, raw: str, trivia: Trivia) -> Item | None: + # Leading zeros are not allowed + sign = "" + if raw.startswith(("+", "-")): + sign = raw[0] + raw = raw[1:] + + if len(raw) > 1 and ( + raw.startswith("0") + and not raw.startswith(("0.", "0o", "0x", "0b", "0e")) + or sign + and raw.startswith(".") + ): + return None + + if raw.startswith(("0o", "0x", "0b")) and sign: + return None + + digits = "[0-9]" + base = 10 + if raw.startswith("0b"): + digits = "[01]" + base = 2 + elif raw.startswith("0o"): + digits = "[0-7]" + base = 8 + elif raw.startswith("0x"): + digits = "[0-9a-f]" + base = 16 + + # Underscores should be surrounded by digits + clean = re.sub(f"(?i)(?<={digits})_(?={digits})", "", raw).lower() + + if "_" in clean: + return None + + if ( + clean.endswith(".") + or not clean.startswith("0x") + and clean.split("e", 1)[0].endswith(".") + ): + return None + + try: + return Integer(int(sign + clean, base), trivia, sign + raw) + except ValueError: + try: + return Float(float(sign + clean), trivia, sign + raw) + except ValueError: + return None + + def _parse_literal_string(self) -> String: + with self._state: + return self._parse_string(StringType.SLL) + + def _parse_basic_string(self) -> String: + with self._state: + return self._parse_string(StringType.SLB) + + def _parse_escaped_char(self, multiline): + if multiline and self._current.is_ws(): + # When the last non-whitespace character on a line is + # a \, it will be trimmed along with all whitespace + # (including newlines) up to the next non-whitespace + # character or closing delimiter. + # """\ + # hello \ + # world""" + tmp = "" + while self._current.is_ws(): + tmp += self._current + # consume the whitespace, EOF here is an issue + # (middle of string) + self.inc(exception=UnexpectedEofError) + continue + + # the escape followed by whitespace must have a newline + # before any other chars + if "\n" not in tmp: + raise self.parse_error(InvalidCharInStringError, self._current) + + return "" + + if self._current in _escaped: + c = _escaped[self._current] + + # consume this char, EOF here is an issue (middle of string) + self.inc(exception=UnexpectedEofError) + + return c + + if self._current in {"u", "U"}: + # this needs to be a unicode + u, ue = self._peek_unicode(self._current == "U") + if u is not None: + # consume the U char and the unicode value + self.inc_n(len(ue) + 1) + + return u + + raise self.parse_error(InvalidUnicodeValueError) + + raise self.parse_error(InvalidCharInStringError, self._current) + + def _parse_string(self, delim: StringType) -> String: + # only keep parsing for string if the current character matches the delim + if self._current != delim.unit: + raise self.parse_error( + InternalParserError, + f"Invalid character for string type {delim}", + ) + + # consume the opening/first delim, EOF here is an issue + # (middle of string or middle of delim) + self.inc(exception=UnexpectedEofError) + + if self._current == delim.unit: + # consume the closing/second delim, we do not care if EOF occurs as + # that would simply imply an empty single line string + if not self.inc() or self._current != delim.unit: + # Empty string + return String(delim, "", "", Trivia()) + + # consume the third delim, EOF here is an issue (middle of string) + self.inc(exception=UnexpectedEofError) + + delim = delim.toggle() # convert delim to multi delim + + self.mark() # to extract the original string with whitespace and all + value = "" + + # A newline immediately following the opening delimiter will be trimmed. + if delim.is_multiline(): + if self._current == "\n": + # consume the newline, EOF here is an issue (middle of string) + self.inc(exception=UnexpectedEofError) + else: + cur = self._current + with self._state(restore=True): + if self.inc(): + cur += self._current + if cur == "\r\n": + self.inc_n(2, exception=UnexpectedEofError) + + escaped = False # whether the previous key was ESCAPE + while True: + code = ord(self._current) + if ( + delim.is_singleline() + and not escaped + and (code == CHR_DEL or code <= CTRL_CHAR_LIMIT and code != CTRL_I) + ) or ( + delim.is_multiline() + and not escaped + and ( + code == CHR_DEL + or code <= CTRL_CHAR_LIMIT + and code not in [CTRL_I, CTRL_J, CTRL_M] + ) + ): + raise self.parse_error(InvalidControlChar, code, "strings") + elif not escaped and self._current == delim.unit: + # try to process current as a closing delim + original = self.extract() + + close = "" + if delim.is_multiline(): + # Consume the delimiters to see if we are at the end of the string + close = "" + while self._current == delim.unit: + close += self._current + self.inc() + + if len(close) < 3: + # Not a triple quote, leave in result as-is. + # Adding back the characters we already consumed + value += close + continue + + if len(close) == 3: + # We are at the end of the string + return String(delim, value, original, Trivia()) + + if len(close) >= 6: + raise self.parse_error(InvalidCharInStringError, self._current) + + value += close[:-3] + original += close[:-3] + + return String(delim, value, original, Trivia()) + else: + # consume the closing delim, we do not care if EOF occurs as + # that would simply imply the end of self._src + self.inc() + + return String(delim, value, original, Trivia()) + elif delim.is_basic() and escaped: + # attempt to parse the current char as an escaped value, an exception + # is raised if this fails + value += self._parse_escaped_char(delim.is_multiline()) + + # no longer escaped + escaped = False + elif delim.is_basic() and self._current == "\\": + # the next char is being escaped + escaped = True + + # consume this char, EOF here is an issue (middle of string) + self.inc(exception=UnexpectedEofError) + else: + # this is either a literal string where we keep everything as is, + # or this is not a special escaped char in a basic string + value += self._current + + # consume this char, EOF here is an issue (middle of string) + self.inc(exception=UnexpectedEofError) + + def _parse_table( + self, parent_name: Key | None = None, parent: Table | None = None + ) -> tuple[Key, Table | AoT]: + """ + Parses a table element. + """ + if self._current != "[": + raise self.parse_error( + InternalParserError, "_parse_table() called on non-bracket character." + ) + + indent = self.extract() + self.inc() # Skip opening bracket + + if self.end(): + raise self.parse_error(UnexpectedEofError) + + is_aot = False + if self._current == "[": + if not self.inc(): + raise self.parse_error(UnexpectedEofError) + + is_aot = True + try: + key = self._parse_key() + except EmptyKeyError: + raise self.parse_error(EmptyTableNameError) from None + if self.end(): + raise self.parse_error(UnexpectedEofError) + elif self._current != "]": + raise self.parse_error(UnexpectedCharError, self._current) + + key.sep = "" + full_key = key + name_parts = tuple(key) + if any(" " in part.key.strip() and part.is_bare() for part in name_parts): + raise self.parse_error( + ParseError, f'Invalid table name "{full_key.as_string()}"' + ) + + missing_table = False + if parent_name: + parent_name_parts = tuple(parent_name) + else: + parent_name_parts = () + + if len(name_parts) > len(parent_name_parts) + 1: + missing_table = True + + name_parts = name_parts[len(parent_name_parts) :] + + values = Container(True) + + self.inc() # Skip closing bracket + if is_aot: + # TODO: Verify close bracket + self.inc() + + cws, comment, trail = self._parse_comment_trail() + + result = Null() + table = Table( + values, + Trivia(indent, cws, comment, trail), + is_aot, + name=name_parts[0].key if name_parts else key.key, + display_name=full_key.as_string(), + is_super_table=False, + ) + + if len(name_parts) > 1: + if missing_table: + # Missing super table + # i.e. a table initialized like this: [foo.bar] + # without initializing [foo] + # + # So we have to create the parent tables + table = Table( + Container(True), + Trivia("", cws, comment, trail), + is_aot and name_parts[0] in self._aot_stack, + is_super_table=True, + name=name_parts[0].key, + ) + + result = table + key = name_parts[0] + + for i, _name in enumerate(name_parts[1:]): + child = table.get( + _name, + Table( + Container(True), + Trivia(indent, cws, comment, trail), + is_aot and i == len(name_parts) - 2, + is_super_table=i < len(name_parts) - 2, + name=_name.key, + display_name=full_key.as_string() + if i == len(name_parts) - 2 + else None, + ), + ) + + if is_aot and i == len(name_parts) - 2: + table.raw_append(_name, AoT([child], name=table.name, parsed=True)) + else: + table.raw_append(_name, child) + + table = child + values = table.value + else: + if name_parts: + key = name_parts[0] + + while not self.end(): + item = self._parse_item() + if item: + _key, item = item + if not self._merge_ws(item, values): + table.raw_append(_key, item) + else: + if self._current == "[": + _, key_next = self._peek_table() + + if self._is_child(full_key, key_next): + key_next, table_next = self._parse_table(full_key, table) + + table.raw_append(key_next, table_next) + + # Picking up any sibling + while not self.end(): + _, key_next = self._peek_table() + + if not self._is_child(full_key, key_next): + break + + key_next, table_next = self._parse_table(full_key, table) + + table.raw_append(key_next, table_next) + + break + else: + raise self.parse_error( + InternalParserError, + "_parse_item() returned None on a non-bracket character.", + ) + + if isinstance(result, Null): + result = table + + if is_aot and (not self._aot_stack or full_key != self._aot_stack[-1]): + result = self._parse_aot(result, full_key) + + return key, result + + def _peek_table(self) -> tuple[bool, Key]: + """ + Peeks ahead non-intrusively by cloning then restoring the + initial state of the parser. + + Returns the name of the table about to be parsed, + as well as whether it is part of an AoT. + """ + # we always want to restore after exiting this scope + with self._state(save_marker=True, restore=True): + if self._current != "[": + raise self.parse_error( + InternalParserError, + "_peek_table() entered on non-bracket character", + ) + + # AoT + self.inc() + is_aot = False + if self._current == "[": + self.inc() + is_aot = True + try: + return is_aot, self._parse_key() + except EmptyKeyError: + raise self.parse_error(EmptyTableNameError) from None + + def _parse_aot(self, first: Table, name_first: Key) -> AoT: + """ + Parses all siblings of the provided table first and bundles them into + an AoT. + """ + payload = [first] + self._aot_stack.append(name_first) + while not self.end(): + is_aot_next, name_next = self._peek_table() + if is_aot_next and name_next == name_first: + _, table = self._parse_table(name_first) + payload.append(table) + else: + break + + self._aot_stack.pop() + + return AoT(payload, parsed=True) + + def _peek(self, n: int) -> str: + """ + Peeks ahead n characters. + + n is the max number of characters that will be peeked. + """ + # we always want to restore after exiting this scope + with self._state(restore=True): + buf = "" + for _ in range(n): + if self._current not in " \t\n\r#,]}" + self._src.EOF: + buf += self._current + self.inc() + continue + + break + return buf + + def _peek_unicode(self, is_long: bool) -> tuple[str | None, str | None]: + """ + Peeks ahead non-intrusively by cloning then restoring the + initial state of the parser. + + Returns the unicode value is it's a valid one else None. + """ + # we always want to restore after exiting this scope + with self._state(save_marker=True, restore=True): + if self._current not in {"u", "U"}: + raise self.parse_error( + InternalParserError, "_peek_unicode() entered on non-unicode value" + ) + + self.inc() # Dropping prefix + self.mark() + + if is_long: + chars = 8 + else: + chars = 4 + + if not self.inc_n(chars): + value, extracted = None, None + else: + extracted = self.extract() + + if extracted[0].lower() == "d" and extracted[1].strip("01234567"): + return None, None + + try: + value = chr(int(extracted, 16)) + except (ValueError, OverflowError): + value = None + + return value, extracted diff --git a/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/py.typed b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/toml_char.py b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/toml_char.py new file mode 100644 index 0000000000000000000000000000000000000000..b4bb4110c557de854a815c677a4f968be550c21d --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/toml_char.py @@ -0,0 +1,52 @@ +import string + + +class TOMLChar(str): + def __init__(self, c): + super().__init__() + + if len(self) > 1: + raise ValueError("A TOML character must be of length 1") + + BARE = string.ascii_letters + string.digits + "-_" + KV = "= \t" + NUMBER = string.digits + "+-_.e" + SPACES = " \t" + NL = "\n\r" + WS = SPACES + NL + + def is_bare_key_char(self) -> bool: + """ + Whether the character is a valid bare key name or not. + """ + return self in self.BARE + + def is_kv_sep(self) -> bool: + """ + Whether the character is a valid key/value separator or not. + """ + return self in self.KV + + def is_int_float_char(self) -> bool: + """ + Whether the character if a valid integer or float value character or not. + """ + return self in self.NUMBER + + def is_ws(self) -> bool: + """ + Whether the character is a whitespace character or not. + """ + return self in self.WS + + def is_nl(self) -> bool: + """ + Whether the character is a new line character or not. + """ + return self in self.NL + + def is_spaces(self) -> bool: + """ + Whether the character is a space or not + """ + return self in self.SPACES diff --git a/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/toml_file.py b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/toml_file.py new file mode 100644 index 0000000000000000000000000000000000000000..745913080399db59a7efcfd9d0c57ddc84fb72a4 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/tomlkit/toml_file.py @@ -0,0 +1,58 @@ +import os +import re + +from typing import TYPE_CHECKING + +from tomlkit.api import loads +from tomlkit.toml_document import TOMLDocument + + +if TYPE_CHECKING: + from _typeshed import StrPath as _StrPath +else: + from typing import Union + + _StrPath = Union[str, os.PathLike] + + +class TOMLFile: + """ + Represents a TOML file. + + :param path: path to the TOML file + """ + + def __init__(self, path: _StrPath) -> None: + self._path = path + self._linesep = os.linesep + + def read(self) -> TOMLDocument: + """Read the file content as a :class:`tomlkit.toml_document.TOMLDocument`.""" + with open(self._path, encoding="utf-8", newline="") as f: + content = f.read() + + # check if consistent line endings + num_newline = content.count("\n") + if num_newline > 0: + num_win_eol = content.count("\r\n") + if num_win_eol == num_newline: + self._linesep = "\r\n" + elif num_win_eol == 0: + self._linesep = "\n" + else: + self._linesep = "mixed" + + return loads(content) + + def write(self, data: TOMLDocument) -> None: + """Write the TOMLDocument to the file.""" + content = data.as_string() + + # apply linesep + if self._linesep == "\n": + content = content.replace("\r\n", "\n") + elif self._linesep == "\r\n": + content = re.sub(r"(? +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_add_relu_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_add_relu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..71bd94fc06b6f7cf7b3cc5d61b2b079bc2fbd298 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_add_relu_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _add_relu_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_add_relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); +}; + +struct TORCH_API _add_relu__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_add_relu_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); +}; + +struct TORCH_API _add_relu_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_add_relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +}; + +struct TORCH_API _add_relu_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_add_relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); +}; + +struct TORCH_API _add_relu__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_add_relu_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); +}; + +struct TORCH_API _add_relu_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_add_relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation.h new file mode 100644 index 0000000000000000000000000000000000000000..60ecc85c8853f100fd8c9adb3d20c933e916b573 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _addmm_activation_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false) { + return at::_ops::_addmm_activation_out::call(self, mat1, mat2, beta, alpha, use_gelu, out); +} +// aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _addmm_activation_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) { + return at::_ops::_addmm_activation_out::call(self, mat1, mat2, beta, alpha, use_gelu, out); +} + +// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor +inline at::Tensor _addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false) { + return at::_ops::_addmm_activation::call(self, mat1, mat2, beta, alpha, use_gelu); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3538a667af6b5e324115d09aea940e06284e75bb --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _coalesced(const at::Tensor & self, bool coalesced); +TORCH_API at::Tensor & _coalesced_out(const at::Tensor & self, bool coalesced, at::Tensor & out); +TORCH_API at::Tensor & _coalesced_sparse_(at::Tensor & self, bool coalesced); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ebc1be64f5de8027d5dac1531237890bb423415b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API int64_t _cufft_get_plan_cache_size(at::DeviceIndex device_index); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max.h new file mode 100644 index 0000000000000000000000000000000000000000..c480bde5b6fa6395813d0d1073c15923ea05b36d --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max.h @@ -0,0 +1,82 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] +inline ::std::vector _foreach_clamp_max(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_max_Scalar::call(self, scalar); +} + +// aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () +inline void _foreach_clamp_max_(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_max__Scalar::call(self, scalar); +} + +// aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[] +inline ::std::vector _foreach_clamp_max(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_max_List::call(self, other); +} + +// aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> () +inline void _foreach_clamp_max_(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_max__List::call(self, other); +} + +// aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] +inline ::std::vector _foreach_clamp_max(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_max_ScalarList::call(self, scalars); +} + +// aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () +inline void _foreach_clamp_max_(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_max__ScalarList::call(self, scalars); +} + +// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out); +} +// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out); +} + +// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_max_List_out::call(self, other, out); +} +// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_outf(at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_clamp_max_List_out::call(self, other, out); +} + +// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out); +} +// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_outf(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_expm1.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_expm1.h new file mode 100644 index 0000000000000000000000000000000000000000..6e7ad09ecdf59b82f1762a0de4f5d2b9fb3e92a0 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_expm1.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_expm1(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_expm1(at::TensorList self) { + return at::_ops::_foreach_expm1::call(self); +} + +// aten::_foreach_expm1_(Tensor(a!)[] self) -> () +inline void _foreach_expm1_(at::TensorList self) { + return at::_ops::_foreach_expm1_::call(self); +} + +// aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_expm1_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_expm1_out::call(self, out); +} +// aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_expm1_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_expm1_out::call(self, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sgd_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sgd_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3ebd1e59275902528b5cca34e40ee7fe45ca5592 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sgd_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _fused_sgd_ { + using schema = void (at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, bool, bool, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_sgd_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf); +}; + +struct TORCH_API _fused_sgd__tensor_lr { + using schema = void (at::TensorList, at::TensorList, at::TensorList, double, double, const at::Tensor &, double, bool, bool, bool, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_sgd_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_lr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf); +}; + +struct TORCH_API _fused_sgd_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_sgd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out); +}; + +struct TORCH_API _fused_sgd { + using schema = ::std::tuple<::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, double, double, double, double, bool, bool, bool, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_sgd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_sgd(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)") + static ::std::tuple<::std::vector,::std::vector,::std::vector> call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf); + static ::std::tuple<::std::vector,::std::vector,::std::vector> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf); +}; + +struct TORCH_API _fused_sgd_tensor_lr_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList, double, double, const at::Tensor &, double, bool, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_sgd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_lr_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out); +}; + +struct TORCH_API _fused_sgd_tensor_lr { + using schema = ::std::tuple<::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, double, double, const at::Tensor &, double, bool, bool, bool, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_sgd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_lr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_sgd.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)") + static ::std::tuple<::std::vector,::std::vector,::std::vector> call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf); + static ::std::tuple<::std::vector,::std::vector,::std::vector> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_meta_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c0da4e941db5590b372270ca7249e6803457c72b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options); +TORCH_API at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + +} // namespace meta +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int8pack_mm_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int8pack_mm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bdd37fa2c2a89ca9b9c3ecba89144e3e8557fa80 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int8pack_mm_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _weight_int8pack_mm_cpu(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scales); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_meta_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f0b0c764f8e5997eef32d1187fb355df8d50b086 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple aminmax(const at::Tensor & self, ::std::optional dim=::std::nullopt, bool keepdim=false); +TORCH_API ::std::tuple aminmax_out(at::Tensor & min, at::Tensor & max, const at::Tensor & self, ::std::optional dim=::std::nullopt, bool keepdim=false); +TORCH_API ::std::tuple aminmax_outf(const at::Tensor & self, ::std::optional dim, bool keepdim, at::Tensor & min, at::Tensor & max); + +} // namespace meta +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cos_meta_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cos_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..16bf9233c1daeef3db4864f43d60175023d0953d --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cos_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor cos(const at::Tensor & self); +TORCH_API at::Tensor & cos_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & cos_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & cos_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..cb3610b9885098d2fe26b4ea1f79e7f1906451d3 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta +inline at::Tensor cudnn_affine_grid_generator_backward(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) { + return at::_ops::cudnn_affine_grid_generator_backward::call(grad, N, C, H, W); +} + +// aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & cudnn_affine_grid_generator_backward_out(at::Tensor & out, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) { + return at::_ops::cudnn_affine_grid_generator_backward_out::call(grad, N, C, H, W, out); +} +// aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & cudnn_affine_grid_generator_backward_outf(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) { + return at::_ops::cudnn_affine_grid_generator_backward_out::call(grad, N, C, H, W, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_relu_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_relu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..734eb2fe7cf4b33d6aadc4ef5bab8cab447a9f1e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_relu_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cudnn_convolution_relu { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_convolution_relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); +}; + +struct TORCH_API cudnn_convolution_relu_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_convolution_relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/elu_compositeexplicitautogradnonfunctional_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/elu_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fc01c74e6acb77b8e705ddc3cc05e348bd5bb87c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/elu_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor elu(const at::Tensor & self, const at::Scalar & alpha=1, const at::Scalar & scale=1, const at::Scalar & input_scale=1); +TORCH_API at::Tensor & elu_(at::Tensor & self, const at::Scalar & alpha=1, const at::Scalar & scale=1, const at::Scalar & input_scale=1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..62684b1ac058212095c6435e0c28ed1d72b32125 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/histc.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/histc.h new file mode 100644 index 0000000000000000000000000000000000000000..e4744841bba93e429f84064d60f7c979aa718631 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/histc.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & histc_out(at::Tensor & out, const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) { + return at::_ops::histc_out::call(self, bins, min, max, out); +} +// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & histc_outf(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) { + return at::_ops::histc_out::call(self, bins, min, max, out); +} + +// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor +inline at::Tensor histc(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) { + return at::_ops::histc::call(self, bins, min, max); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e296726ad92ca0571689f47687e0ef8cfeb2d595 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ex_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_inv_ex_out : public at::meta::structured_linalg_inv_ex { +void impl(const at::Tensor & A, bool check_errors, const at::Tensor & inverse, const at::Tensor & info); +}; +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/median_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/median_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e9fe7c75da3605a27d8f51896e7b5938190124f3 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/median_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API median { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::median") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "median(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API median_dim { + using schema = ::std::tuple (const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::median") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)") + static ::std::tuple call(const at::Tensor & self, int64_t dim, bool keepdim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim); +}; + +struct TORCH_API median_dim_values { + using schema = ::std::tuple (const at::Tensor &, int64_t, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::median") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim_values") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)") + static ::std::tuple call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +}; + +struct TORCH_API median_names_dim { + using schema = ::std::tuple (const at::Tensor &, at::Dimname, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::median") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)") + static ::std::tuple call(const at::Tensor & self, at::Dimname dim, bool keepdim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim); +}; + +struct TORCH_API median_names_dim_values { + using schema = ::std::tuple (const at::Tensor &, at::Dimname, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::median") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_dim_values") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)") + static ::std::tuple call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +}; + +struct TORCH_API median_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::median") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..22fe29cea1cc16deb70cadb52786eeb74b3ad8ba --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API miopen_rnn_backward { + using schema = ::std::tuple> (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &, const at::Tensor &, ::std::array); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::miopen_rnn_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])") + static ::std::tuple> call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask); + static ::std::tuple> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask); +}; + +struct TORCH_API miopen_rnn_backward_out { + using schema = void (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const ::std::optional &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const ::std::optional &, const at::Tensor &, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::miopen_rnn_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()") + static void call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..de0ffff2d5db05089b6c419d361c281fa8cf2458 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor) +inline ::std::tuple mps_convolution_transpose_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask); + } +} + +// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor) +inline ::std::tuple mps_convolution_transpose_backward_symint(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask); + } +} + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mps_convolution_transpose_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1); + } +} + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mps_convolution_transpose_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1); + } +} + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mps_convolution_transpose_backward_symint_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); + } +} + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mps_convolution_transpose_backward_symint_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); + } +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num.h new file mode 100644 index 0000000000000000000000000000000000000000..bd537af44c5f8d3f9b9e1ad77c7340e6e6b18ed0 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor +inline at::Tensor nan_to_num(const at::Tensor & self, ::std::optional nan=::std::nullopt, ::std::optional posinf=::std::nullopt, ::std::optional neginf=::std::nullopt) { + return at::_ops::nan_to_num::call(self, nan, posinf, neginf); +} + +// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!) +inline at::Tensor & nan_to_num_(at::Tensor & self, ::std::optional nan=::std::nullopt, ::std::optional posinf=::std::nullopt, ::std::optional neginf=::std::nullopt) { + return at::_ops::nan_to_num_::call(self, nan, posinf, neginf); +} + +// aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nan_to_num_out(at::Tensor & out, const at::Tensor & self, ::std::optional nan=::std::nullopt, ::std::optional posinf=::std::nullopt, ::std::optional neginf=::std::nullopt) { + return at::_ops::nan_to_num_out::call(self, nan, posinf, neginf, out); +} +// aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nan_to_num_outf(const at::Tensor & self, ::std::optional nan, ::std::optional posinf, ::std::optional neginf, at::Tensor & out) { + return at::_ops::nan_to_num_out::call(self, nan, posinf, neginf, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/one_hot_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/one_hot_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..380e59fea9fe315e6d6186d4f6205150871f4d03 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/one_hot_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API one_hot { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::one_hot") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "one_hot(Tensor self, int num_classes=-1) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t num_classes); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_classes); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/polar_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/polar_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b20e365abf3feee7e1a3e4b4c8269b01a09b0ff7 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/polar_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor polar(const at::Tensor & abs, const at::Tensor & angle); +TORCH_API at::Tensor & polar_out(const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..beccf4d774614be3f8747bceb6d61b9669785e86 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API rrelu { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rrelu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional generator); +}; + +struct TORCH_API rrelu_ { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rrelu_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional generator); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/silu_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/silu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..10aba0595feec2dd5bbad3434ee1bd28c859bdf7 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/silu_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor silu(const at::Tensor & self); +TORCH_API at::Tensor & silu_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & silu_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & silu_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_forward.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_forward.h new file mode 100644 index 0000000000000000000000000000000000000000..805dbe39e11026207871ab8cc7221318dbca420c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_forward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +inline at::Tensor & slow_conv3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output); + } +} + +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +inline at::Tensor & slow_conv3d_forward_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_forward_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output); + } +} + +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +inline at::Tensor & slow_conv3d_forward_symint_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, padding, output); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, padding, output); + } +} + +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +inline at::Tensor & slow_conv3d_forward_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, padding, output); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv3d_forward_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) { + return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, padding, output); + } +} + +// aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor +inline at::Tensor slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::slow_conv3d_forward::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding)); +} +namespace symint { + template ::value>> + at::Tensor slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::slow_conv3d_forward::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding)); + } +} + +// aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor +inline at::Tensor slow_conv3d_forward_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, padding); +} +namespace symint { + template ::value>> + at::Tensor slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, padding); + } +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0.h new file mode 100644 index 0000000000000000000000000000000000000000..06c0c3fa37c06fd0cd8a9dcea8bb27c0c3ac4db5 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_modified_bessel_i0(Tensor self) -> Tensor +inline at::Tensor special_modified_bessel_i0(const at::Tensor & self) { + return at::_ops::special_modified_bessel_i0::call(self); +} + +// aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_modified_bessel_i0_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_modified_bessel_i0_out::call(self, out); +} +// aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_modified_bessel_i0_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_modified_bessel_i0_out::call(self, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_meta.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..bd859b3f4c84751c13a12817850caa7e2988ef9c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_scaled_modified_bessel_k0 : public TensorIteratorBase { + + + void meta(const at::Tensor & x); +}; + +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9fd6424235f370d0201706626414b1b13bf92116 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor upsample_bicubic2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor upsample_bicubic2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & upsample_bicubic2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); + +} // namespace cuda +} // namespace at