language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
gevent__gevent
src/gevent/tests/test__greenlet.py
{ "start": 30654, "end": 31221 }
class ____(greentest.TestCase): def test_init(self): self.switch_expected = False # in python-dbg mode this will check that Greenlet() does not create any circular refs gevent.Greenlet() def test_kill_scheduled(self): gevent.spawn(gevent.sleep, timing.LARGE_TICK).kill() def test_kill_started(self): g = gevent.spawn(gevent.sleep, timing.LARGE_TICK) try: gevent.sleep(timing.SMALLEST_RELIABLE_DELAY) finally: g.kill() @greentest.skipOnPurePython("Needs C extension")
TestRef
python
astropy__astropy
astropy/visualization/wcsaxes/patches.py
{ "start": 4901, "end": 7724 }
class ____(Polygon): """ Create a patch representing a latitude-longitude quadrangle. The edges of the quadrangle lie on two lines of constant longitude and two lines of constant latitude (or the equivalent component names in the coordinate frame of interest, such as right ascension and declination). Note that lines of constant latitude are not great circles. Unlike `matplotlib.patches.Rectangle`, the edges of this patch will render as curved lines if appropriate for the WCS transformation. Parameters ---------- anchor : tuple or `~astropy.units.Quantity` ['angle'] This can be either a tuple of two `~astropy.units.Quantity` objects, or a single `~astropy.units.Quantity` array with two elements. width : `~astropy.units.Quantity` ['angle'] The width of the quadrangle in longitude (or, e.g., right ascension) height : `~astropy.units.Quantity` ['angle'] The height of the quadrangle in latitude (or, e.g., declination) resolution : int, optional The number of points that make up each side of the quadrangle - increase this to get a smoother quadrangle. vertex_unit : `~astropy.units.Unit` ['angle'] The units in which the resulting polygon should be defined - this should match the unit that the transformation (e.g. the WCS transformation) expects as input. Notes ----- Additional keyword arguments are passed to `~matplotlib.patches.Polygon` """ def __init__( self, anchor, width, height, resolution=100, vertex_unit=u.degree, **kwargs ): # Extract longitude/latitude, either from a tuple of two quantities, or # a single 2-element Quantity. longitude, latitude = u.Quantity(anchor).to_value(vertex_unit) # Convert the quadrangle dimensions to the appropriate units width = width.to_value(vertex_unit) height = height.to_value(vertex_unit) # Create progressions in longitude and latitude lon_seq = longitude + np.linspace(0, width, resolution + 1) lat_seq = latitude + np.linspace(0, height, resolution + 1) # Trace the path of the quadrangle lon = np.concatenate( [ lon_seq[:-1], np.repeat(lon_seq[-1], resolution), np.flip(lon_seq[1:]), np.repeat(lon_seq[0], resolution), ] ) lat = np.concatenate( [ np.repeat(lat_seq[0], resolution), lat_seq[:-1], np.repeat(lat_seq[-1], resolution), np.flip(lat_seq[1:]), ] ) # Create polygon vertices vertices = np.array([lon, lat]).transpose() super().__init__(vertices, **kwargs)
Quadrangle
python
tensorflow__tensorflow
tensorflow/python/keras/utils/generic_utils.py
{ "start": 28660, "end": 40600 }
class ____(object): """Displays a progress bar. Args: target: Total number of steps expected, None if unknown. width: Progress bar width on screen. verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose) stateful_metrics: Iterable of string names of metrics that should *not* be averaged over time. Metrics in this list will be displayed as-is. All others will be averaged by the progbar before display. interval: Minimum visual progress update interval (in seconds). unit_name: Display name for step counts (usually "step" or "sample"). """ def __init__(self, target, width=30, verbose=1, interval=0.05, stateful_metrics=None, unit_name='step'): self.target = target self.width = width self.verbose = verbose self.interval = interval self.unit_name = unit_name if stateful_metrics: self.stateful_metrics = set(stateful_metrics) else: self.stateful_metrics = set() self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()) or 'ipykernel' in sys.modules or 'posix' in sys.modules or 'PYCHARM_HOSTED' in os.environ) self._total_width = 0 self._seen_so_far = 0 # We use a dict + list to avoid garbage collection # issues found in OrderedDict self._values = {} self._values_order = [] self._start = time.time() self._last_update = 0 self._time_after_first_step = None def update(self, current, values=None, finalize=None): """Updates the progress bar. Args: current: Index of current step. values: List of tuples: `(name, value_for_last_step)`. If `name` is in `stateful_metrics`, `value_for_last_step` will be displayed as-is. Else, an average of the metric over time will be displayed. finalize: Whether this is the last update for the progress bar. If `None`, defaults to `current >= self.target`. """ if finalize is None: if self.target is None: finalize = False else: finalize = current >= self.target values = values or [] for k, v in values: if k not in self._values_order: self._values_order.append(k) if k not in self.stateful_metrics: # In the case that progress bar doesn't have a target value in the first # epoch, both on_batch_end and on_epoch_end will be called, which will # cause 'current' and 'self._seen_so_far' to have the same value. Force # the minimal value to 1 here, otherwise stateful_metric will be 0s. value_base = max(current - self._seen_so_far, 1) if k not in self._values: self._values[k] = [v * value_base, value_base] else: self._values[k][0] += v * value_base self._values[k][1] += value_base else: # Stateful metrics output a numeric value. This representation # means "take an average from a single value" but keeps the # numeric formatting. self._values[k] = [v, 1] self._seen_so_far = current now = time.time() info = ' - %.0fs' % (now - self._start) if self.verbose == 1: if now - self._last_update < self.interval and not finalize: return prev_total_width = self._total_width if self._dynamic_display: sys.stdout.write('\b' * prev_total_width) sys.stdout.write('\r') else: sys.stdout.write('\n') if self.target is not None: numdigits = int(np.log10(self.target)) + 1 bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target) prog = float(current) / self.target prog_width = int(self.width * prog) if prog_width > 0: bar += ('=' * (prog_width - 1)) if current < self.target: bar += '>' else: bar += '=' bar += ('.' * (self.width - prog_width)) bar += ']' else: bar = '%7d/Unknown' % current self._total_width = len(bar) sys.stdout.write(bar) time_per_unit = self._estimate_step_duration(current, now) if self.target is None or finalize: if time_per_unit >= 1 or time_per_unit == 0: info += ' %.0fs/%s' % (time_per_unit, self.unit_name) elif time_per_unit >= 1e-3: info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name) else: info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name) else: eta = time_per_unit * (self.target - current) if eta > 3600: eta_format = '%d:%02d:%02d' % (eta // 3600, (eta % 3600) // 60, eta % 60) elif eta > 60: eta_format = '%d:%02d' % (eta // 60, eta % 60) else: eta_format = '%ds' % eta info = ' - ETA: %s' % eta_format for k in self._values_order: info += ' - %s:' % k if isinstance(self._values[k], list): avg = np.mean(self._values[k][0] / max(1, self._values[k][1])) if abs(avg) > 1e-3: info += ' %.4f' % avg else: info += ' %.4e' % avg else: info += ' %s' % self._values[k] self._total_width += len(info) if prev_total_width > self._total_width: info += (' ' * (prev_total_width - self._total_width)) if finalize: info += '\n' sys.stdout.write(info) sys.stdout.flush() elif self.verbose == 2: if finalize: numdigits = int(np.log10(self.target)) + 1 count = ('%' + str(numdigits) + 'd/%d') % (current, self.target) info = count + info for k in self._values_order: info += ' - %s:' % k avg = np.mean(self._values[k][0] / max(1, self._values[k][1])) if avg > 1e-3: info += ' %.4f' % avg else: info += ' %.4e' % avg info += '\n' sys.stdout.write(info) sys.stdout.flush() self._last_update = now def add(self, n, values=None): self.update(self._seen_so_far + n, values) def _estimate_step_duration(self, current, now): """Estimate the duration of a single step. Given the step number `current` and the corresponding time `now` this function returns an estimate for how long a single step takes. If this is called before one step has been completed (i.e. `current == 0`) then zero is given as an estimate. The duration estimate ignores the duration of the (assumed to be non-representative) first step for estimates when more steps are available (i.e. `current>1`). Args: current: Index of current step. now: The current time. Returns: Estimate of the duration of a single step. """ if current: # there are a few special scenarios here: # 1) somebody is calling the progress bar without ever supplying step 1 # 2) somebody is calling the progress bar and supplies step one multiple # times, e.g. as part of a finalizing call # in these cases, we just fall back to the simple calculation if self._time_after_first_step is not None and current > 1: time_per_unit = (now - self._time_after_first_step) / (current - 1) else: time_per_unit = (now - self._start) / current if current == 1: self._time_after_first_step = now return time_per_unit else: return 0 def _update_stateful_metrics(self, stateful_metrics): self.stateful_metrics = self.stateful_metrics.union(stateful_metrics) def make_batches(size, batch_size): """Returns a list of batch indices (tuples of indices). Args: size: Integer, total size of the data to slice into batches. batch_size: Integer, batch size. Returns: A list of tuples of array indices. """ num_batches = int(np.ceil(size / float(batch_size))) return [(i * batch_size, min(size, (i + 1) * batch_size)) for i in range(0, num_batches)] def slice_arrays(arrays, start=None, stop=None): """Slice an array or list of arrays. This takes an array-like, or a list of array-likes, and outputs: - arrays[start:stop] if `arrays` is an array-like - [x[start:stop] for x in arrays] if `arrays` is a list Can also work on list/array of indices: `slice_arrays(x, indices)` Args: arrays: Single array or list of arrays. start: can be an integer index (start index) or a list/array of indices stop: integer (stop index); should be None if `start` was a list. Returns: A slice of the array(s). Raises: ValueError: If the value of start is a list and stop is not None. """ if arrays is None: return [None] if isinstance(start, list) and stop is not None: raise ValueError('The stop argument has to be None if the value of start ' 'is a list.') elif isinstance(arrays, list): if hasattr(start, '__len__'): # hdf5 datasets only support list objects as indices if hasattr(start, 'shape'): start = start.tolist() return [None if x is None else x[start] for x in arrays] return [ None if x is None else None if not hasattr(x, '__getitem__') else x[start:stop] for x in arrays ] else: if hasattr(start, '__len__'): if hasattr(start, 'shape'): start = start.tolist() return arrays[start] if hasattr(start, '__getitem__'): return arrays[start:stop] return [None] def to_list(x): """Normalizes a list/tensor into a list. If a tensor is passed, we return a list of size 1 containing the tensor. Args: x: target object to be normalized. Returns: A list. """ if isinstance(x, list): return x return [x] def to_snake_case(name): intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name) insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower() # If the class is private the name starts with "_" which is not secure # for creating scopes. We prefix the name with "private" in this case. if insecure[0] != '_': return insecure return 'private' + insecure def is_all_none(structure): iterable = nest.flatten(structure) # We cannot use Python's `any` because the iterable may return Tensors. for element in iterable: if element is not None: return False return True def check_for_unexpected_keys(name, input_dict, expected_values): unknown = set(input_dict.keys()).difference(expected_values) if unknown: raise ValueError('Unknown entries in {} dictionary: {}. Only expected ' 'following keys: {}'.format(name, list(unknown), expected_values)) def validate_kwargs(kwargs, allowed_kwargs, error_message='Keyword argument not understood:'): """Checks that all keyword arguments are in the set of allowed keys.""" for kwarg in kwargs: if kwarg not in allowed_kwargs: raise TypeError(error_message, kwarg) def validate_config(config): """Determines whether config appears to be a valid layer config.""" return isinstance(config, dict) and _LAYER_UNDEFINED_CONFIG_KEY not in config def default(method): """Decorates a method to detect overrides in subclasses.""" method._is_default = True # pylint: disable=protected-access return method def is_default(method): """Check if a method is decorated with the `default` wrapper.""" return getattr(method, '_is_default', False) def populate_dict_with_module_objects(target_dict, modules, obj_filter): for module in modules: for name in dir(module): obj = getattr(module, name) if obj_filter(obj): target_dict[name] = obj
Progbar
python
jazzband__tablib
src/tablib/core.py
{ "start": 25261, "end": 28380 }
class ____: """A book of :class:`Dataset` objects. """ def __init__(self, sets=None): self._datasets = sets or [] def __repr__(self): try: return f'<{self.title.lower()} databook>' except AttributeError: return '<databook object>' def wipe(self): """Removes all :class:`Dataset` objects from the :class:`Databook`.""" self._datasets = [] def sheets(self): return self._datasets def add_sheet(self, dataset): """Adds given :class:`Dataset` to the :class:`Databook`.""" if isinstance(dataset, Dataset): self._datasets.append(dataset) else: raise InvalidDatasetType def _package(self): """Packages :class:`Databook` for delivery.""" collector = [] for dset in self._datasets: collector.append({ 'title': dset.title, 'data': dset._package() }) return collector @property def size(self): """The number of the :class:`Dataset` objects within :class:`Databook`.""" return len(self._datasets) def load(self, in_stream, format, **kwargs): """ Import `in_stream` to the :class:`Databook` object using the `format`. `in_stream` can be a file-like object, a string, or a bytestring. :param \\*\\*kwargs: (optional) custom configuration to the format `import_book`. """ stream = normalize_input(in_stream) if not format: format = detect_format(stream) fmt = registry.get_format(format) if not hasattr(fmt, 'import_book'): raise UnsupportedFormat(f'Format {format} cannot be loaded.') fmt.import_book(self, stream, **kwargs) return self def export(self, format, **kwargs): """ Export :class:`Databook` object to `format`. :param \\*\\*kwargs: (optional) custom configuration to the format `export_book`. """ fmt = registry.get_format(format) if not hasattr(fmt, 'export_book'): raise UnsupportedFormat(f'Format {format} cannot be exported.') return fmt.export_book(self, **kwargs) def detect_format(stream): """Return format name of given stream (file-like object, string, or bytestring).""" stream = normalize_input(stream) fmt_title = None for fmt in registry.formats(): try: if fmt.detect(stream): fmt_title = fmt.title break except AttributeError: pass finally: if hasattr(stream, 'seek'): stream.seek(0) return fmt_title def import_set(stream, format=None, **kwargs): """Return dataset of given stream (file-like object, string, or bytestring).""" return Dataset().load(stream, format, **kwargs) def import_book(stream, format=None, **kwargs): """Return dataset of given stream (file-like object, string, or bytestring).""" return Databook().load(stream, format, **kwargs) registry.register_builtins()
Databook
python
rapidsai__cudf
python/dask_cudf/dask_cudf/_expr/expr.py
{ "start": 1764, "end": 2346 }
class ____(CumulativeBlockwise): @property def _args(self) -> list: return self.operands[:1] @property def _kwargs(self) -> dict: # Must pass axis and skipna as kwargs in cudf return {"axis": self.axis, "skipna": self.skipna} # The upstream Var code uses `Series.values`, and relies on numpy # for most of the logic. Unfortunately, cudf -> cupy conversion # is not supported for data containing null values. Therefore, # we must implement our own version of Var for now. This logic # is mostly copied from dask-cudf.
PatchCumulativeBlockwise
python
giampaolo__psutil
tests/__init__.py
{ "start": 34325, "end": 38730 }
class ____: """A container that lists all Process class method names + some reasonable parameters to be called with. Utility methods (parent(), children(), ...) are excluded. >>> ns = process_namespace(psutil.Process()) >>> for fun, name in ns.iter(ns.getters): ... fun() """ utils = [('cpu_percent', (), {}), ('memory_percent', (), {})] ignored = [ ('as_dict', (), {}), ('children', (), {'recursive': True}), ('connections', (), {}), # deprecated ('is_running', (), {}), ('oneshot', (), {}), ('parent', (), {}), ('parents', (), {}), ('pid', (), {}), ('wait', (0,), {}), ] getters = [ ('cmdline', (), {}), ('cpu_times', (), {}), ('create_time', (), {}), ('cwd', (), {}), ('exe', (), {}), ('memory_full_info', (), {}), ('memory_info', (), {}), ('name', (), {}), ('net_connections', (), {'kind': 'all'}), ('nice', (), {}), ('num_ctx_switches', (), {}), ('num_threads', (), {}), ('open_files', (), {}), ('ppid', (), {}), ('status', (), {}), ('threads', (), {}), ('username', (), {}), ] if POSIX: getters += [('uids', (), {})] getters += [('gids', (), {})] getters += [('terminal', (), {})] getters += [('num_fds', (), {})] if HAS_PROC_IO_COUNTERS: getters += [('io_counters', (), {})] if HAS_IONICE: getters += [('ionice', (), {})] if HAS_RLIMIT: getters += [('rlimit', (psutil.RLIMIT_NOFILE,), {})] if HAS_CPU_AFFINITY: getters += [('cpu_affinity', (), {})] if HAS_PROC_CPU_NUM: getters += [('cpu_num', (), {})] if HAS_ENVIRON: getters += [('environ', (), {})] if WINDOWS: getters += [('num_handles', (), {})] if HAS_MEMORY_MAPS: getters += [('memory_maps', (), {'grouped': False})] setters = [] if POSIX: setters += [('nice', (0,), {})] else: setters += [('nice', (psutil.NORMAL_PRIORITY_CLASS,), {})] if HAS_RLIMIT: setters += [('rlimit', (psutil.RLIMIT_NOFILE, (1024, 4096)), {})] if HAS_IONICE: if LINUX: setters += [('ionice', (psutil.IOPRIO_CLASS_NONE, 0), {})] else: setters += [('ionice', (psutil.IOPRIO_NORMAL,), {})] if HAS_CPU_AFFINITY: setters += [('cpu_affinity', ([_get_eligible_cpu()],), {})] killers = [ ('send_signal', (signal.SIGTERM,), {}), ('suspend', (), {}), ('resume', (), {}), ('terminate', (), {}), ('kill', (), {}), ] if WINDOWS: killers += [('send_signal', (signal.CTRL_C_EVENT,), {})] killers += [('send_signal', (signal.CTRL_BREAK_EVENT,), {})] all = utils + getters + setters + killers def __init__(self, proc): self._proc = proc def iter(self, ls, clear_cache=True): """Given a list of tuples yields a set of (fun, fun_name) tuples in random order. """ ls = list(ls) random.shuffle(ls) for fun_name, args, kwds in ls: if clear_cache: self.clear_cache() fun = getattr(self._proc, fun_name) fun = functools.partial(fun, *args, **kwds) yield (fun, fun_name) def clear_cache(self): """Clear the cache of a Process instance.""" self._proc._init(self._proc.pid, _ignore_nsp=True) @classmethod def test_class_coverage(cls, test_class, ls): """Given a TestCase instance and a list of tuples checks that the class defines the required test method names. """ for fun_name, _, _ in ls: meth_name = 'test_' + fun_name if not hasattr(test_class, meth_name): msg = ( f"{test_class.__class__.__name__!r} class should define a" f" {meth_name!r} method" ) raise AttributeError(msg) @classmethod def test(cls): this = {x[0] for x in cls.all} ignored = {x[0] for x in cls.ignored} klass = {x for x in dir(psutil.Process) if x[0] != '_'} leftout = (this | ignored) ^ klass if leftout: raise ValueError(f"uncovered Process class names: {leftout!r}")
process_namespace
python
jina-ai__jina
jina/serve/runtimes/servers/grpc.py
{ "start": 474, "end": 9030 }
class ____(BaseServer): """GRPC Server implementation""" def __init__( self, grpc_server_options: Optional[dict] = None, ssl_keyfile: Optional[str] = None, ssl_certfile: Optional[str] = None, proxy: bool = False, **kwargs, ): """Initialize the gateway :param grpc_server_options: Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1} :param ssl_keyfile: the path to the key file :param ssl_certfile: the path to the certificate file :param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy :param kwargs: keyword args """ super().__init__(**kwargs) if not proxy and os.name != 'nt': os.unsetenv('http_proxy') os.unsetenv('https_proxy') self.grpc_server_options = grpc_server_options self.grpc_tracing_server_interceptors = self.aio_tracing_server_interceptors() self.ssl_keyfile = ssl_keyfile self.ssl_certfile = ssl_certfile self.health_servicer = health.aio.HealthServicer() async def setup_server(self): """ setup GRPC server """ self.logger.debug(f'Setting up GRPC server') if docarray_v2: from jina.serve.runtimes.gateway.request_handling import ( GatewayRequestHandler, ) if isinstance(self._request_handler, GatewayRequestHandler): await self._request_handler.streamer._get_endpoints_input_output_models( is_cancel=self.is_cancel ) self._request_handler.streamer._validate_flow_docarray_compatibility() self.server = grpc.aio.server( options=get_server_side_grpc_options(self.grpc_server_options), interceptors=self.grpc_tracing_server_interceptors, ) jina_pb2_grpc.add_JinaRPCServicer_to_server(self._request_handler, self.server) jina_pb2_grpc.add_JinaSingleDataRequestRPCServicer_to_server( self._request_handler, self.server ) if hasattr(self._request_handler, 'stream_doc'): jina_pb2_grpc.add_JinaSingleDocumentRequestRPCServicer_to_server( self._request_handler, self.server ) if hasattr(self._request_handler, 'endpoint_discovery'): jina_pb2_grpc.add_JinaDiscoverEndpointsRPCServicer_to_server( self._request_handler, self.server ) if hasattr(self._request_handler, 'process_data'): jina_pb2_grpc.add_JinaDataRequestRPCServicer_to_server( self._request_handler, self.server ) if hasattr(self._request_handler, 'dry_run'): jina_pb2_grpc.add_JinaGatewayDryRunRPCServicer_to_server( self._request_handler, self.server ) if hasattr(self._request_handler, 'snapshot'): jina_pb2_grpc.add_JinaExecutorSnapshotServicer_to_server( self._request_handler, self.server ) if hasattr(self._request_handler, 'snapshot_status'): jina_pb2_grpc.add_JinaExecutorSnapshotProgressServicer_to_server( self._request_handler, self.server ) if hasattr(self._request_handler, 'restore'): jina_pb2_grpc.add_JinaExecutorRestoreServicer_to_server( self._request_handler, self.server ) if hasattr(self._request_handler, 'restore_status'): jina_pb2_grpc.add_JinaExecutorRestoreProgressServicer_to_server( self._request_handler, self.server ) jina_pb2_grpc.add_JinaInfoRPCServicer_to_server( self._request_handler, self.server ) service_names = ( jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name, jina_pb2.DESCRIPTOR.services_by_name['JinaSingleDataRequestRPC'].full_name, jina_pb2.DESCRIPTOR.services_by_name['JinaDataRequestRPC'].full_name, jina_pb2.DESCRIPTOR.services_by_name['JinaGatewayDryRunRPC'].full_name, jina_pb2.DESCRIPTOR.services_by_name[ 'JinaSingleDocumentRequestRPC' ].full_name, jina_pb2.DESCRIPTOR.services_by_name['JinaDiscoverEndpointsRPC'].full_name, jina_pb2.DESCRIPTOR.services_by_name['JinaInfoRPC'].full_name, reflection.SERVICE_NAME, ) # Mark all services as healthy. health_pb2_grpc.add_HealthServicer_to_server(self.health_servicer, self.server) reflection.enable_server_reflection(service_names, self.server) bind_addr = f'{self.host}:{self.port}' if self.ssl_keyfile and self.ssl_certfile: with open(self.ssl_keyfile, 'rb') as f: private_key = f.read() with open(self.ssl_certfile, 'rb') as f: certificate_chain = f.read() server_credentials = grpc.ssl_server_credentials( ( ( private_key, certificate_chain, ), ) ) self.server.add_secure_port(bind_addr, server_credentials) elif ( self.ssl_keyfile != self.ssl_certfile ): # if we have only ssl_keyfile and not ssl_certfile or vice versa raise ValueError( f"you can't pass a ssl_keyfile without a ssl_certfile and vice versa" ) else: self.server.add_insecure_port(bind_addr) self.logger.info(f'start server bound to {bind_addr}') await self.server.start() self.logger.debug(f'server bound to {bind_addr} started') for service in service_names: await self.health_servicer.set( service, health_pb2.HealthCheckResponse.SERVING ) self.logger.debug(f'GRPC server setup successful') async def shutdown(self): """Free other resources allocated with the server, e.g, gateway object, ...""" self.logger.debug(f'Shutting down server') await super().shutdown() await self.health_servicer.enter_graceful_shutdown() await self._request_handler.close() # allow pending requests to be processed await self.server.stop(grace=None) self.logger.debug(f'Server shutdown finished') async def run_server(self): """Run GRPC server forever""" await self.server.wait_for_termination() @staticmethod def is_ready( ctrl_address: str, timeout: float = 1.0, logger=None, **kwargs ) -> bool: """ Check if status is ready. :param ctrl_address: the address where the control request needs to be sent :param timeout: timeout of the health check in seconds :param logger: JinaLogger to be used :param kwargs: extra keyword arguments :return: True if status is ready else False. """ try: from grpc_health.v1 import health_pb2, health_pb2_grpc response = send_health_check_sync(ctrl_address, timeout=timeout) return ( response.status == health_pb2.HealthCheckResponse.ServingStatus.SERVING ) except RpcError as exc: if logger: logger.debug(f'Exception: {exc}') return False @staticmethod async def async_is_ready( ctrl_address: str, timeout: float = 1.0, logger=None, **kwargs ) -> bool: """ Async Check if status is ready. :param ctrl_address: the address where the control request needs to be sent :param timeout: timeout of the health check in seconds :param logger: JinaLogger to be used :param kwargs: extra keyword arguments :return: True if status is ready else False. """ try: from grpc_health.v1 import health_pb2, health_pb2_grpc response = await send_health_check_async(ctrl_address, timeout=timeout) return ( response.status == health_pb2.HealthCheckResponse.ServingStatus.SERVING ) except RpcError as exc: if logger: logger.debug(f'Exception: {exc}') return False
GRPCServer
python
django-debug-toolbar__django-debug-toolbar
tests/panels/test_versions.py
{ "start": 235, "end": 1341 }
class ____(BaseTestCase): panel_id = VersionsPanel.panel_id def test_app_version_from_get_version_fn(self): class FakeApp: def get_version(self): return version_info_t(1, 2, 3, "", "") self.assertEqual(self.panel.get_app_version(FakeApp()), "1.2.3") def test_incompatible_app_version_fn(self): class FakeApp: def get_version(self, some_other_arg): # This should be ignored by the get_version_from_app return version_info_t(0, 0, 0, "", "") VERSION = version_info_t(1, 2, 3, "", "") self.assertEqual(self.panel.get_app_version(FakeApp()), "1.2.3") def test_app_version_from_VERSION(self): class FakeApp: VERSION = version_info_t(1, 2, 3, "", "") self.assertEqual(self.panel.get_app_version(FakeApp()), "1.2.3") def test_app_version_from_underscore_version(self): class FakeApp: __version__ = version_info_t(1, 2, 3, "", "") self.assertEqual(self.panel.get_app_version(FakeApp()), "1.2.3")
VersionsPanelTestCase
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/dataclassDescriptors1.py
{ "start": 160, "end": 659 }
class ____: @overload def __get__(self, __obj: None, __owner: Any) -> "MyDescriptor": ... @overload def __get__(self, __obj: object, __owner: Any) -> int: ... def __get__(self, __obj: object | None, __owner: Any) -> "int | MyDescriptor": if __obj is None: return self return cast(Any, __obj)._x def __set__(self, __obj: object, __value: int) -> None: if __obj is not None: cast(Any, __obj)._x = __value @dataclass
MyDescriptor
python
pydantic__pydantic
tests/benchmarks/basemodel_eq_performance.py
{ "start": 645, "end": 1642 }
class ____(pydantic.BaseModel, frozen=True): def __eq__(self, other: Any) -> bool: if isinstance(other, pydantic.BaseModel): # When comparing instances of generic types for equality, as long as all field values are equal, # only require their generic origin types to be equal, rather than exact type equality. # This prevents headaches like MyGeneric(x=1) != MyGeneric[Any](x=1). self_type = self.__pydantic_generic_metadata__['origin'] or self.__class__ other_type = other.__pydantic_generic_metadata__['origin'] or other.__class__ return ( self_type == other_type and self.__dict__ == other.__dict__ and self.__pydantic_private__ == other.__pydantic_private__ and self.__pydantic_extra__ == other.__pydantic_extra__ ) else: return NotImplemented # delegate to the other item in the comparison
OldImplementationModel
python
doocs__leetcode
solution/1600-1699/1652.Defuse the Bomb/Solution.py
{ "start": 0, "end": 431 }
class ____: def decrypt(self, code: List[int], k: int) -> List[int]: n = len(code) ans = [0] * n if k == 0: return ans for i in range(n): if k > 0: for j in range(i + 1, i + k + 1): ans[i] += code[j % n] else: for j in range(i + k, i): ans[i] += code[(j + n) % n] return ans
Solution
python
eth-brownie__brownie
brownie/_gui/console.py
{ "start": 420, "end": 1211 }
class ____(tk.Text): def __init__(self, parent): super().__init__(parent, height=1) self.configure(**TEXT_STYLE) self.configure(background="#161616") self._content = "" def write(self, text): self.configure(state="normal") self.delete(1.0, "end") self.insert(1.0, text) self.configure(state="disabled") self._content = text def append(self, text): self.configure(state="normal") self.insert("end", text) self.configure(state="disabled") self._content += text def clear(self): self.configure(state="normal") self.delete(1.0, "end") self.configure(state="disabled") self._content = "" def read(self): return self._content
Console
python
ray-project__ray
python/ray/autoscaler/v2/event_logger.py
{ "start": 528, "end": 8141 }
class ____: """ Logs events related to the autoscaler. # TODO: - Add more logging for other events. - Rate limit the events if too spammy. """ def __init__(self, logger: EventLoggerAdapter): self._logger = logger def log_cluster_scheduling_update( self, cluster_resources: Dict[str, float], launch_requests: Optional[List[LaunchRequest]] = None, terminate_requests: Optional[List[TerminationRequest]] = None, infeasible_requests: Optional[List[ResourceRequest]] = None, infeasible_gang_requests: Optional[List[GangResourceRequest]] = None, infeasible_cluster_resource_constraints: Optional[ List[ClusterResourceConstraint] ] = None, ) -> None: """ Log updates to the autoscaler scheduling state. Emits: - info logs for node launches and terminations (counts grouped by node type). - an info log summarizing the cluster size after a resize (CPUs/GPUs/TPUs). - warnings describing infeasible single resource requests, infeasible gang (placement group) requests, and infeasible cluster resource constraints. Args: cluster_resources: Mapping of resource name to total resources for the current cluster state. launch_requests: Node launch requests issued in this scheduling step. terminate_requests: Node termination requests issued in this scheduling step. infeasible_requests: Resource requests that could not be satisfied by any available node type. infeasible_gang_requests: Gang/placement group requests that could not be scheduled. infeasible_cluster_resource_constraints: Cluster-level resource constraints that could not be satisfied. Returns: None """ # Log any launch events. if launch_requests: launch_type_count = defaultdict(int) for req in launch_requests: launch_type_count[req.instance_type] += req.count for idx, (instance_type, count) in enumerate(launch_type_count.items()): log_str = f"Adding {count} node(s) of type {instance_type}." self._logger.info(f"{log_str}") logger.info(f"{log_str}") # Log any terminate events. if terminate_requests: termination_by_causes_and_type = defaultdict(int) for req in terminate_requests: termination_by_causes_and_type[(req.cause, req.instance_type)] += 1 cause_reason_map = { TerminationRequest.Cause.OUTDATED: "outdated", TerminationRequest.Cause.MAX_NUM_NODES: "max number of worker nodes reached", # noqa TerminationRequest.Cause.MAX_NUM_NODE_PER_TYPE: "max number of worker nodes per type reached", # noqa TerminationRequest.Cause.IDLE: "idle", } for idx, ((cause, instance_type), count) in enumerate( termination_by_causes_and_type.items() ): log_str = f"Removing {count} nodes of type {instance_type} ({cause_reason_map[cause]})." # noqa self._logger.info(f"{log_str}") logger.info(f"{log_str}") # Cluster shape changes. if launch_requests or terminate_requests: num_cpus = cluster_resources.get("CPU", 0) log_str = f"Resized to {int(num_cpus)} CPUs" if "GPU" in cluster_resources: log_str += f", {int(cluster_resources['GPU'])} GPUs" if "TPU" in cluster_resources: log_str += f", {int(cluster_resources['TPU'])} TPUs" self._logger.info(f"{log_str}.") self._logger.debug(f"Current cluster resources: {dict(cluster_resources)}.") # Log any infeasible requests. if infeasible_requests: requests_by_count = ResourceRequestUtil.group_by_count(infeasible_requests) log_str = "No available node types can fulfill resource requests " for idx, req_count in enumerate(requests_by_count): resource_map = ResourceRequestUtil.to_resource_map(req_count.request) log_str += f"{resource_map}*{req_count.count}" if idx < len(requests_by_count) - 1: log_str += ", " # Parse and log label selectors if present if req_count.request.label_selectors: selector_strs = [] for selector in req_count.request.label_selectors: for constraint in selector.label_constraints: op = LabelSelectorOperator.Name(constraint.operator) values = ",".join(constraint.label_values) selector_strs.append( f"{constraint.label_key} {op} [{values}]" ) if selector_strs: log_str += ( " with label selectors: [" + "; ".join(selector_strs) + "]" ) log_str += ( ". Add suitable node types to this cluster to resolve this issue." ) self._logger.warning(log_str) if infeasible_gang_requests: # Log for each placement group requests. for gang_request in infeasible_gang_requests: log_str = ( "No available node types can fulfill " "placement group requests (detail={details}): ".format( details=gang_request.details ) ) requests_by_count = ResourceRequestUtil.group_by_count( gang_request.requests ) for idx, req_count in enumerate(requests_by_count): resource_map = ResourceRequestUtil.to_resource_map( req_count.request ) log_str += f"{resource_map}*{req_count.count}" if idx < len(requests_by_count) - 1: log_str += ", " log_str += ( ". Add suitable node types to this cluster to resolve this issue." ) self._logger.warning(log_str) if infeasible_cluster_resource_constraints: # We will only have max 1 cluster resource constraint for now since it's # from `request_resources()` sdk, where the most recent call would override # the previous one. for infeasible_constraint in infeasible_cluster_resource_constraints: log_str = "No available node types can fulfill cluster constraint: " for i, requests_by_count in enumerate( infeasible_constraint.resource_requests ): resource_map = ResourceRequestUtil.to_resource_map( requests_by_count.request ) log_str += f"{resource_map}*{requests_by_count.count}" if i < len(infeasible_constraint.resource_requests) - 1: log_str += ", " log_str += ( ". Add suitable node types to this cluster to resolve this issue." ) self._logger.warning(log_str)
AutoscalerEventLogger
python
walkccc__LeetCode
solutions/3534. Path Existence Queries in a Graph II/3534.py
{ "start": 0, "end": 1635 }
class ____: def pathExistenceQueries( self, n: int, nums: list[int], maxDiff: int, queries: list[list[int]], ) -> list[int]: sortedNumAndIndexes = sorted((num, i) for i, num in enumerate(nums)) sortedNums = [num for num, _ in sortedNumAndIndexes] indexMap = {originalIndex: sortedIndex for sortedIndex, (_, originalIndex) in enumerate(sortedNumAndIndexes)} maxLevel = n.bit_length() + 1 # jump[i][j] is the index of the j-th ancestor of i jump = [[0] * maxLevel for _ in range(n)] right = 0 for i in range(n): while right + 1 < n and sortedNums[right + 1] - sortedNums[i] <= maxDiff: right += 1 jump[i][0] = right for level in range(1, maxLevel): for i in range(n): prevJump = jump[i][level - 1] jump[i][level] = jump[prevJump][level - 1] def minJumps(start: int, end: int, level: int) -> int: """ Returns the minimum number of jumps from `start` to `end` using binary lifting. """ if start == end: return 0 if jump[start][0] >= end: return 1 if jump[start][level] < end: return math.inf for j in range(level, -1, -1): if jump[start][j] < end: break return (1 << j) + minJumps(jump[start][j], end, j) def minDist(u: int, v: int) -> int: uIndex = indexMap[u] vIndex = indexMap[v] start = min(uIndex, vIndex) end = max(uIndex, vIndex) res = minJumps(start, end, maxLevel - 1) return res if res < math.inf else -1 return [minDist(u, v) for u, v in queries]
Solution
python
dagster-io__dagster
python_modules/libraries/dagster-dg-core/dagster_dg_core/utils/__init__.py
{ "start": 13614, "end": 14163 }
class ____(DgClickHelpMixin, click.Command): # pyright: ignore[reportIncompatibleMethodOverride] def __init__(self, *args, unlaunched: bool = False, **kwargs): """DgClickCommand with conditional hiding for unlaunched features. Args: unlaunched: If True, the command will be hidden unless DG_SHOW_UNLAUNCHED_COMMANDS environment variable is set. """ if unlaunched: kwargs["hidden"] = not show_dg_unlaunched_commands() super().__init__(*args, **kwargs)
DgClickCommand
python
scipy__scipy
scipy/sparse/_base.py
{ "start": 57043, "end": 58343 }
class ____: """A namespace class to separate sparray from spmatrix""" @classmethod def __class_getitem__(cls, arg, /): """ Return a parametrized wrapper around the `~scipy.sparse.sparray` type. .. versionadded:: 1.16.0 Returns ------- alias : types.GenericAlias A parametrized `~scipy.sparse.sparray` type. Examples -------- >>> import numpy as np >>> from scipy.sparse import coo_array >>> coo_array[np.int8, tuple[int]] scipy.sparse._coo.coo_array[numpy.int8, tuple[int]] """ from types import GenericAlias return GenericAlias(cls, arg) sparray.__doc__ = _spbase.__doc__ def isspmatrix(x): """Is `x` of a sparse matrix type? Parameters ---------- x object to check for being a sparse matrix Returns ------- bool True if `x` is a sparse matrix, False otherwise Examples -------- >>> import numpy as np >>> from scipy.sparse import csr_array, csr_matrix, isspmatrix >>> isspmatrix(csr_matrix([[5]])) True >>> isspmatrix(csr_array([[5]])) False >>> isspmatrix(np.array([[5]])) False >>> isspmatrix(5) False """ return isinstance(x, spmatrix)
sparray
python
spyder-ide__spyder
spyder/utils/stylesheet.py
{ "start": 19755, "end": 23017 }
class ____(BaseDockTabBarStyleSheet): """ Style for special tab bars. Notes ----- This is the base class for horizontal tab bars that follow the design discussed on issue spyder-ide/ux-improvements#4. """ SCROLL_BUTTONS_BORDER_POS = 'right' def set_stylesheet(self): super().set_stylesheet() # -- Main constants css = self.get_stylesheet() margin_size = AppStyle.MarginSize # -- Basic style css['QTabBar::tab'].setValues( # Only add margin to the bottom margin=f'0px 0px {2 * margin_size}px 0px', # Border radius is added for specific tabs (see below) borderRadius='0px', # Remove a colored border added by QDarkStyle borderBottom='0px', # Padding for text inside tabs padding='4px 10px', ) # -- Style for not selected tabs css['QTabBar::tab:!selected'].setValues( border='0px', backgroundColor=SpyderPalette.COLOR_BACKGROUND_4, borderLeft=f'1px solid {SpyderPalette.COLOR_BACKGROUND_4}', borderRight=f'1px solid {SpyderPalette.SPECIAL_TABS_SEPARATOR}', ) css['QTabBar::tab:!selected:hover'].setValues( backgroundColor=SpyderPalette.COLOR_BACKGROUND_5, borderLeftColor=SpyderPalette.COLOR_BACKGROUND_5 ) # -- Style for the not selected tabs to the right and left of the # selected one. # Note: For some strange reason, Qt uses the `next-selected` state for # the left tab. css['QTabBar::tab:next-selected'].setValues( borderRightColor=SpyderPalette.COLOR_BACKGROUND_4, ) css['QTabBar::tab:next-selected:hover'].setValues( borderRightColor=SpyderPalette.SPECIAL_TABS_SEPARATOR, backgroundColor=SpyderPalette.COLOR_BACKGROUND_5 ) css['QTabBar::tab:previous-selected'].setValues( borderLeftColor=SpyderPalette.COLOR_BACKGROUND_4, ) css['QTabBar::tab:previous-selected:hover'].setValues( borderLeftColor=SpyderPalette.SPECIAL_TABS_SEPARATOR, backgroundColor=SpyderPalette.COLOR_BACKGROUND_5 ) # -- First and last tabs have rounded borders css['QTabBar::tab:first'].setValues( borderTopLeftRadius=SpyderPalette.SIZE_BORDER_RADIUS, borderBottomLeftRadius=SpyderPalette.SIZE_BORDER_RADIUS, ) css['QTabBar::tab:last'].setValues( borderTopRightRadius=SpyderPalette.SIZE_BORDER_RADIUS, borderBottomRightRadius=SpyderPalette.SIZE_BORDER_RADIUS, ) # -- Last tab doesn't need to show the separator css['QTabBar::tab:last:!selected'].setValues( borderRightColor=SpyderPalette.COLOR_BACKGROUND_4 ) css['QTabBar::tab:last:!selected:hover'].setValues( borderRightColor=SpyderPalette.COLOR_BACKGROUND_5, backgroundColor=SpyderPalette.COLOR_BACKGROUND_5 ) # -- Set bottom margin for scroll buttons. css['QTabBar QToolButton'].setValues( marginBottom=f'{2 * margin_size}px', )
SpecialTabBarStyleSheet
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_E.py
{ "start": 1424, "end": 3875 }
class ____(Benchmark): r""" Eckerle4 objective function. Eckerle, K., NIST (1979). Circular Interference Transmittance Study. ..[1] https://www.itl.nist.gov/div898/strd/nls/data/eckerle4.shtml #TODO, this is a NIST regression standard dataset, docstring needs improving """ def __init__(self, dimensions=3): Benchmark.__init__(self, dimensions) self._bounds = list(zip([0., 1., 10.], [20, 20., 600.])) self.global_optimum = [[1.5543827178, 4.0888321754, 4.5154121844e2]] self.fglob = 1.4635887487E-03 self.a = asarray([1.5750000E-04, 1.6990000E-04, 2.3500000E-04, 3.1020000E-04, 4.9170000E-04, 8.7100000E-04, 1.7418000E-03, 4.6400000E-03, 6.5895000E-03, 9.7302000E-03, 1.4900200E-02, 2.3731000E-02, 4.0168300E-02, 7.1255900E-02, 1.2644580E-01, 2.0734130E-01, 2.9023660E-01, 3.4456230E-01, 3.6980490E-01, 3.6685340E-01, 3.1067270E-01, 2.0781540E-01, 1.1643540E-01, 6.1676400E-02, 3.3720000E-02, 1.9402300E-02, 1.1783100E-02, 7.4357000E-03, 2.2732000E-03, 8.8000000E-04, 4.5790000E-04, 2.3450000E-04, 1.5860000E-04, 1.1430000E-04, 7.1000000E-05]) self.b = asarray([4.0000000E+02, 4.0500000E+02, 4.1000000E+02, 4.1500000E+02, 4.2000000E+02, 4.2500000E+02, 4.3000000E+02, 4.3500000E+02, 4.3650000E+02, 4.3800000E+02, 4.3950000E+02, 4.4100000E+02, 4.4250000E+02, 4.4400000E+02, 4.4550000E+02, 4.4700000E+02, 4.4850000E+02, 4.5000000E+02, 4.5150000E+02, 4.5300000E+02, 4.5450000E+02, 4.5600000E+02, 4.5750000E+02, 4.5900000E+02, 4.6050000E+02, 4.6200000E+02, 4.6350000E+02, 4.6500000E+02, 4.7000000E+02, 4.7500000E+02, 4.8000000E+02, 4.8500000E+02, 4.9000000E+02, 4.9500000E+02, 5.0000000E+02]) def fun(self, x, *args): self.nfev += 1 vec = x[0] / x[1] * exp(-(self.b - x[2]) ** 2 / (2 * x[1] ** 2)) return sum((self.a - vec) ** 2)
Eckerle4
python
gevent__gevent
src/greentest/3.12/test_httplib.py
{ "start": 87192, "end": 99212 }
class ____(TestCase): def setUp(self): response_text = ( 'HTTP/1.1 200 OK\r\n\r\n' # Reply to CONNECT 'HTTP/1.1 200 OK\r\n' # Reply to HEAD 'Content-Length: 42\r\n\r\n' ) self.host = 'proxy.com' self.port = client.HTTP_PORT self.conn = client.HTTPConnection(self.host) self.conn._create_connection = self._create_connection(response_text) def tearDown(self): self.conn.close() def _create_connection(self, response_text): def create_connection(address, timeout=None, source_address=None): return FakeSocket(response_text, host=address[0], port=address[1]) return create_connection def test_set_tunnel_host_port_headers_add_host_missing(self): tunnel_host = 'destination.com' tunnel_port = 8888 tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)'} tunnel_headers_after = tunnel_headers.copy() tunnel_headers_after['Host'] = '%s:%d' % (tunnel_host, tunnel_port) self.conn.set_tunnel(tunnel_host, port=tunnel_port, headers=tunnel_headers) self.conn.request('HEAD', '/', '') self.assertEqual(self.conn.sock.host, self.host) self.assertEqual(self.conn.sock.port, self.port) self.assertEqual(self.conn._tunnel_host, tunnel_host) self.assertEqual(self.conn._tunnel_port, tunnel_port) self.assertEqual(self.conn._tunnel_headers, tunnel_headers_after) def test_set_tunnel_host_port_headers_set_host_identical(self): tunnel_host = 'destination.com' tunnel_port = 8888 tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)', 'Host': '%s:%d' % (tunnel_host, tunnel_port)} self.conn.set_tunnel(tunnel_host, port=tunnel_port, headers=tunnel_headers) self.conn.request('HEAD', '/', '') self.assertEqual(self.conn.sock.host, self.host) self.assertEqual(self.conn.sock.port, self.port) self.assertEqual(self.conn._tunnel_host, tunnel_host) self.assertEqual(self.conn._tunnel_port, tunnel_port) self.assertEqual(self.conn._tunnel_headers, tunnel_headers) def test_set_tunnel_host_port_headers_set_host_different(self): tunnel_host = 'destination.com' tunnel_port = 8888 tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)', 'Host': '%s:%d' % ('example.com', 4200)} self.conn.set_tunnel(tunnel_host, port=tunnel_port, headers=tunnel_headers) self.conn.request('HEAD', '/', '') self.assertEqual(self.conn.sock.host, self.host) self.assertEqual(self.conn.sock.port, self.port) self.assertEqual(self.conn._tunnel_host, tunnel_host) self.assertEqual(self.conn._tunnel_port, tunnel_port) self.assertEqual(self.conn._tunnel_headers, tunnel_headers) def test_disallow_set_tunnel_after_connect(self): # Once connected, we shouldn't be able to tunnel anymore self.conn.connect() self.assertRaises(RuntimeError, self.conn.set_tunnel, 'destination.com') def test_connect_with_tunnel(self): d = { b'host': b'destination.com', b'port': client.HTTP_PORT, } self.conn.set_tunnel(d[b'host'].decode('ascii')) self.conn.request('HEAD', '/', '') self.assertEqual(self.conn.sock.host, self.host) self.assertEqual(self.conn.sock.port, self.port) self.assertIn(b'CONNECT %(host)s:%(port)d HTTP/1.1\r\n' b'Host: %(host)s:%(port)d\r\n\r\n' % d, self.conn.sock.data) self.assertIn(b'HEAD / HTTP/1.1\r\nHost: %(host)s\r\n' % d, self.conn.sock.data) def test_connect_with_tunnel_with_default_port(self): d = { b'host': b'destination.com', b'port': client.HTTP_PORT, } self.conn.set_tunnel(d[b'host'].decode('ascii'), port=d[b'port']) self.conn.request('HEAD', '/', '') self.assertEqual(self.conn.sock.host, self.host) self.assertEqual(self.conn.sock.port, self.port) self.assertIn(b'CONNECT %(host)s:%(port)d HTTP/1.1\r\n' b'Host: %(host)s:%(port)d\r\n\r\n' % d, self.conn.sock.data) self.assertIn(b'HEAD / HTTP/1.1\r\nHost: %(host)s\r\n' % d, self.conn.sock.data) def test_connect_with_tunnel_with_nonstandard_port(self): d = { b'host': b'destination.com', b'port': 8888, } self.conn.set_tunnel(d[b'host'].decode('ascii'), port=d[b'port']) self.conn.request('HEAD', '/', '') self.assertEqual(self.conn.sock.host, self.host) self.assertEqual(self.conn.sock.port, self.port) self.assertIn(b'CONNECT %(host)s:%(port)d HTTP/1.1\r\n' b'Host: %(host)s:%(port)d\r\n\r\n' % d, self.conn.sock.data) self.assertIn(b'HEAD / HTTP/1.1\r\nHost: %(host)s:%(port)d\r\n' % d, self.conn.sock.data) # This request is not RFC-valid, but it's been possible with the library # for years, so don't break it unexpectedly... This also tests # case-insensitivity when injecting Host: headers if they're missing. def test_connect_with_tunnel_with_different_host_header(self): d = { b'host': b'destination.com', b'tunnel_host_header': b'example.com:9876', b'port': client.HTTP_PORT, } self.conn.set_tunnel( d[b'host'].decode('ascii'), headers={'HOST': d[b'tunnel_host_header'].decode('ascii')}) self.conn.request('HEAD', '/', '') self.assertEqual(self.conn.sock.host, self.host) self.assertEqual(self.conn.sock.port, self.port) self.assertIn(b'CONNECT %(host)s:%(port)d HTTP/1.1\r\n' b'HOST: %(tunnel_host_header)s\r\n\r\n' % d, self.conn.sock.data) self.assertIn(b'HEAD / HTTP/1.1\r\nHost: %(host)s\r\n' % d, self.conn.sock.data) def test_connect_with_tunnel_different_host(self): d = { b'host': b'destination.com', b'port': client.HTTP_PORT, } self.conn.set_tunnel(d[b'host'].decode('ascii')) self.conn.request('HEAD', '/', '') self.assertEqual(self.conn.sock.host, self.host) self.assertEqual(self.conn.sock.port, self.port) self.assertIn(b'CONNECT %(host)s:%(port)d HTTP/1.1\r\n' b'Host: %(host)s:%(port)d\r\n\r\n' % d, self.conn.sock.data) self.assertIn(b'HEAD / HTTP/1.1\r\nHost: %(host)s\r\n' % d, self.conn.sock.data) def test_connect_with_tunnel_idna(self): dest = '\u03b4\u03c0\u03b8.gr' dest_port = b'%s:%d' % (dest.encode('idna'), client.HTTP_PORT) expected = b'CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n' % ( dest_port, dest_port) self.conn.set_tunnel(dest) self.conn.request('HEAD', '/', '') self.assertEqual(self.conn.sock.host, self.host) self.assertEqual(self.conn.sock.port, client.HTTP_PORT) self.assertIn(expected, self.conn.sock.data) def test_tunnel_connect_single_send_connection_setup(self): """Regresstion test for https://bugs.python.org/issue43332.""" with mock.patch.object(self.conn, 'send') as mock_send: self.conn.set_tunnel('destination.com') self.conn.connect() self.conn.request('GET', '/') mock_send.assert_called() # Likely 2, but this test only cares about the first. self.assertGreater( len(mock_send.mock_calls), 1, msg=f'unexpected number of send calls: {mock_send.mock_calls}') proxy_setup_data_sent = mock_send.mock_calls[0][1][0] self.assertIn(b'CONNECT destination.com', proxy_setup_data_sent) self.assertTrue( proxy_setup_data_sent.endswith(b'\r\n\r\n'), msg=f'unexpected proxy data sent {proxy_setup_data_sent!r}') def test_connect_put_request(self): d = { b'host': b'destination.com', b'port': client.HTTP_PORT, } self.conn.set_tunnel(d[b'host'].decode('ascii')) self.conn.request('PUT', '/', '') self.assertEqual(self.conn.sock.host, self.host) self.assertEqual(self.conn.sock.port, self.port) self.assertIn(b'CONNECT %(host)s:%(port)d HTTP/1.1\r\n' b'Host: %(host)s:%(port)d\r\n\r\n' % d, self.conn.sock.data) self.assertIn(b'PUT / HTTP/1.1\r\nHost: %(host)s\r\n' % d, self.conn.sock.data) def test_connect_put_request_ipv6(self): self.conn.set_tunnel('[1:2:3::4]', 1234) self.conn.request('PUT', '/', '') self.assertEqual(self.conn.sock.host, self.host) self.assertEqual(self.conn.sock.port, client.HTTP_PORT) self.assertIn(b'CONNECT [1:2:3::4]:1234', self.conn.sock.data) self.assertIn(b'Host: [1:2:3::4]:1234', self.conn.sock.data) def test_connect_put_request_ipv6_port(self): self.conn.set_tunnel('[1:2:3::4]:1234') self.conn.request('PUT', '/', '') self.assertEqual(self.conn.sock.host, self.host) self.assertEqual(self.conn.sock.port, client.HTTP_PORT) self.assertIn(b'CONNECT [1:2:3::4]:1234', self.conn.sock.data) self.assertIn(b'Host: [1:2:3::4]:1234', self.conn.sock.data) def test_tunnel_debuglog(self): expected_header = 'X-Dummy: 1' response_text = 'HTTP/1.0 200 OK\r\n{}\r\n\r\n'.format(expected_header) self.conn.set_debuglevel(1) self.conn._create_connection = self._create_connection(response_text) self.conn.set_tunnel('destination.com') with support.captured_stdout() as output: self.conn.request('PUT', '/', '') lines = output.getvalue().splitlines() self.assertIn('header: {}'.format(expected_header), lines) def test_proxy_response_headers(self): expected_header = ('X-Dummy', '1') response_text = ( 'HTTP/1.0 200 OK\r\n' '{0}\r\n\r\n'.format(':'.join(expected_header)) ) self.conn._create_connection = self._create_connection(response_text) self.conn.set_tunnel('destination.com') self.conn.request('PUT', '/', '') headers = self.conn.get_proxy_response_headers() self.assertIn(expected_header, headers.items()) def test_no_proxy_response_headers(self): expected_header = ('X-Dummy', '1') response_text = ( 'HTTP/1.0 200 OK\r\n' '{0}\r\n\r\n'.format(':'.join(expected_header)) ) self.conn._create_connection = self._create_connection(response_text) self.conn.request('PUT', '/', '') headers = self.conn.get_proxy_response_headers() self.assertIsNone(headers) def test_tunnel_leak(self): sock = None def _create_connection(address, timeout=None, source_address=None): nonlocal sock sock = FakeSocket( 'HTTP/1.1 404 NOT FOUND\r\n\r\n', host=address[0], port=address[1], ) return sock self.conn._create_connection = _create_connection self.conn.set_tunnel('destination.com') exc = None try: self.conn.request('HEAD', '/', '') except OSError as e: # keeping a reference to exc keeps response alive in the traceback exc = e self.assertIsNotNone(exc) self.assertTrue(sock.file_closed) if __name__ == '__main__': unittest.main(verbosity=2)
TunnelTests
python
ansible__ansible
lib/ansible/_internal/_datatag/_tags.py
{ "start": 267, "end": 3259 }
class ____(AnsibleDatatagBase): """ A tag that stores origin metadata for a tagged value, intended for forensic/diagnostic use. Origin metadata should not be used to make runtime decisions, as it is not guaranteed to be present or accurate. Setting both `path` and `line_num` can result in diagnostic display of referenced file contents. Either `path` or `description` must be present. """ path: str | None = None """The path from which the tagged content originated.""" description: str | None = None """A description of the origin, for display to users.""" line_num: int | None = None """An optional line number, starting at 1.""" col_num: int | None = None """An optional column number, starting at 1.""" UNKNOWN: t.ClassVar[t.Self] @classmethod def get_or_create_tag(cls, value: t.Any, path: str | os.PathLike | None) -> Origin: """Return the tag from the given value, creating a tag from the provided path if no tag was found.""" if not (origin := cls.get_tag(value)): if path: origin = Origin(path=str(path)) # convert tagged strings and path-like values to a native str else: origin = Origin.UNKNOWN return origin def replace( self, path: str | types.EllipsisType = ..., description: str | types.EllipsisType = ..., line_num: int | None | types.EllipsisType = ..., col_num: int | None | types.EllipsisType = ..., ) -> t.Self: """Return a new origin based on an existing one, with the given fields replaced.""" return dataclasses.replace( self, **{ key: value for key, value in dict( path=path, description=description, line_num=line_num, col_num=col_num, ).items() if value is not ... }, # type: ignore[arg-type] ) def _post_validate(self) -> None: if self.path: if not self.path.startswith('/'): raise RuntimeError('The `src` field must be an absolute path.') elif not self.description: raise RuntimeError('The `src` or `description` field must be specified.') def __str__(self) -> str: """Renders the origin in the form of path:line_num:col_num, omitting missing/invalid elements from the right.""" if self.path: value = self.path else: value = self.description if self.line_num and self.line_num > 0: value += f':{self.line_num}' if self.col_num and self.col_num > 0: value += f':{self.col_num}' if self.path and self.description: value += f' ({self.description})' return value Origin.UNKNOWN = Origin(description='<unknown>') @dataclasses.dataclass(**_tag_dataclass_kwargs)
Origin
python
PyCQA__pylint
tests/functional/c/ctor_arguments.py
{ "start": 544, "end": 630 }
class ____(Class1Arg): def __init__(self, *args, **kwargs): pass
ClassAllArgs
python
getsentry__sentry
tests/snuba/search/test_backend.py
{ "start": 98282, "end": 98360 }
class ____(TestCase, EventsSnubaSearchTestCases): pass
EventsSnubaSearchTest
python
Lightning-AI__lightning
src/lightning/fabric/strategies/xla.py
{ "start": 1578, "end": 11443 }
class ____(ParallelStrategy): """Strategy for training multiple TPU devices using the :func:`torch_xla.distributed.xla_multiprocessing.spawn` method.""" def __init__( self, accelerator: Optional[Accelerator] = None, parallel_devices: Optional[list[torch.device]] = None, checkpoint_io: Optional[XLACheckpointIO] = None, precision: Optional[XLAPrecision] = None, sync_module_states: bool = True, ) -> None: super().__init__( accelerator=accelerator, parallel_devices=parallel_devices, cluster_environment=XLAEnvironment(), checkpoint_io=checkpoint_io, precision=precision, ) self._backward_sync_control = None # XLA synchronizes gradients in the optimizer.step() call self._launched = False self._sync_module_states = sync_module_states @property @override def root_device(self) -> torch.device: if not self._launched: raise RuntimeError("Accessing the XLA device before processes have spawned is not allowed.") import torch_xla.core.xla_model as xm return xm.xla_device() @property def num_processes(self) -> int: return len(self.parallel_devices) if self.parallel_devices is not None else 0 @property @override def checkpoint_io(self) -> XLACheckpointIO: plugin = self._checkpoint_io if plugin is not None: assert isinstance(plugin, XLACheckpointIO) return plugin return XLACheckpointIO() @checkpoint_io.setter @override def checkpoint_io(self, io: Optional[CheckpointIO]) -> None: if io is not None and not isinstance(io, XLACheckpointIO): raise TypeError(f"The XLA strategy can only work with the `XLACheckpointIO` plugin, found {io}") self._checkpoint_io = io @property @override def precision(self) -> XLAPrecision: plugin = self._precision if plugin is not None: assert isinstance(plugin, XLAPrecision) return plugin return XLAPrecision("32-true") @precision.setter @override def precision(self, precision: Optional[Precision]) -> None: if precision is not None and not isinstance(precision, XLAPrecision): raise TypeError(f"The XLA strategy can only work with the `XLAPrecision` plugin, found {precision}") self._precision = precision @property @override def global_rank(self) -> int: return super().global_rank if self._launched else 0 @property @override def local_rank(self) -> int: return super().local_rank if self._launched else 0 @property @override def node_rank(self) -> int: return super().node_rank if self._launched else 0 @property @override def world_size(self) -> int: return super().world_size if self._launched else 1 @override def _configure_launcher(self) -> None: self._launcher = _XLALauncher(self) @override def setup_environment(self) -> None: assert self.parallel_devices is not None if len(self.parallel_devices) == 1: # spawning only 1 device with PjRT is not supported: # https://github.com/Lightning-AI/pytorch-lightning/pull/17408#discussion_r1170671732 raise NotImplementedError( f"The {type(self).__name__} does not support running on a single device with the PjRT runtime." " Try using all devices or the `SingleDeviceXLAStrategy` strategy" ) self._launched = True rank_zero_only.rank = self.global_rank super().setup_environment() @override def setup_module(self, module: Module) -> Module: if self._sync_module_states: if _XLA_GREATER_EQUAL_2_1: from torch_xla.core.xla_model import broadcast_master_param else: from torch_xla.experimental.pjrt import broadcast_master_param broadcast_master_param(module) return module @override def module_to_device(self, module: Module) -> None: module.to(self.root_device) @override def process_dataloader(self, dataloader: DataLoader) -> "MpDeviceLoader": from torch_xla.distributed.parallel_loader import MpDeviceLoader if isinstance(dataloader, MpDeviceLoader): # dataloader is already wrapped by MpDeviceLoader return dataloader dataloader = MpDeviceLoader(dataloader, self.root_device) # Mimic interface to torch.utils.data.DataLoader dataloader.dataset = dataloader._loader.dataset dataloader.batch_sampler = getattr(dataloader._loader, "batch_sampler", None) return dataloader @override def all_gather(self, tensor: Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> Tensor: """Function to gather a tensor from several distributed processes. Args: tensor: tensor to all-gather. group: unused. sync_grads: flag that allows users to synchronize gradients for the all-gather operation. Return: A tensor of shape (world_size, ...) """ if not self._launched: return tensor if not isinstance(tensor, Tensor): raise NotImplementedError( f"`{type(self).__name__}.all_gather` is only implemented for tensors. Given {tensor}" ) if tensor.dim() == 0: tensor = tensor.unsqueeze(0) original_device = tensor.device tensor = tensor.to(self.root_device) import torch_xla.core.functions as xf import torch_xla.core.xla_model as xm tensor = xf.all_gather(tensor) if sync_grads else xm.all_gather(tensor) tensor = tensor.to(original_device) return tensor @override def all_reduce( self, output: Union[Tensor, Any], group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = None ) -> Tensor: if not isinstance(output, Tensor): output = torch.tensor(output, device=self.root_device) invalid_reduce_op = isinstance(reduce_op, ReduceOp) and reduce_op != ReduceOp.SUM invalid_reduce_op_str = isinstance(reduce_op, str) and reduce_op.lower() not in ("sum", "mean", "avg") if invalid_reduce_op or invalid_reduce_op_str: raise ValueError( "Currently, the XLAStrategy only supports `sum`, `mean`, `avg` for the reduce operation, got:" f" {reduce_op}" ) import torch_xla.core.xla_model as xm output = xm.mesh_reduce("reduce", output, sum) if isinstance(reduce_op, str) and reduce_op.lower() in ("avg", "mean"): output = output / self.world_size return output @override def barrier(self, name: Optional[str] = None, *args: Any, **kwargs: Any) -> None: if not self._launched: return import torch_xla.core.xla_model as xm if name is None: # `None` is not supported: "TypeError: _xla_rendezvous(): incompatible function arguments" name = "" xm.rendezvous(name) @override def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast: if not self._launched: return obj import torch_xla.core.xla_model as xm is_tensor = isinstance(obj, Tensor) if is_tensor: if obj.dim() == 0: obj = obj.unsqueeze(0) original_device = obj.device # XLA distributed requires that the data is on the XLA device obj = obj.to(self.root_device) else: # support for arbitrary pickle-ables buffer = io.BytesIO() torch.save(obj, buffer) obj = torch.tensor( # type: ignore[assignment] bytearray(buffer.getbuffer()), device=self.root_device, dtype=torch.float ) obj = [obj] xm.collective_broadcast(obj, root_ordinal=src) obj = obj[0] if not is_tensor: # this will preserve the dtype and device of any tensors buffer = io.BytesIO(obj.cpu().byte().numpy()) obj = torch.load(buffer) else: obj = obj.to(original_device) return obj @override def save_checkpoint( self, path: _PATH, state: dict[str, Union[Module, Optimizer, Any]], storage_options: Optional[Any] = None, filter: Optional[dict[str, Callable[[str, Any], bool]]] = None, ) -> None: """Save model, optimizer, and other state as a checkpoint file. Args: path: A path to where the file(s) should be saved state: A dictionary with contents to be saved. If the dict contains modules or optimizers, their state-dict will be retrieved and converted automatically. storage_options: Additional options for the ``CheckpointIO`` plugin filter: An optional dictionary of the same format as ``state`` mapping keys to callables that return a boolean indicating whether the given parameter should be saved (``True``) or filtered out (``False``). """ import torch_xla.core.xla_model as xm # sync any pending lazy tensors on all ranks before saving to prevent potential collective hangs xm.mark_step() # save on global rank zero only super().save_checkpoint(path, state, storage_options=storage_options, filter=filter) @classmethod @override def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None: strategy_registry.register("xla", cls, description=cls.__name__)
XLAStrategy
python
tiangolo__fastapi
tests/test_dependency_class.py
{ "start": 367, "end": 470 }
class ____: async def __call__(self, value: str) -> str: return value
AsyncCallableDependency
python
facebook__pyre-check
client/language_server/protocol.py
{ "start": 14813, "end": 15070 }
class ____(json_mixins.CamlCaseAndExcludeJsonMixin): """Result for nuclide-vscode-lsp coverage feature.""" covered_percent: float uncovered_ranges: List[Diagnostic] default_message: str @dataclasses.dataclass(frozen=True)
TypeCoverageResponse
python
django__django
tests/utils_tests/test_autoreload.py
{ "start": 11412, "end": 11782 }
class ____(SimpleTestCase): def test_common_roots(self): paths = ( Path("/first/second"), Path("/first/second/third"), Path("/first/"), Path("/root/first/"), ) results = autoreload.common_roots(paths) self.assertCountEqual(results, [Path("/first/"), Path("/root/first/")])
TestCommonRoots
python
huggingface__transformers
tests/models/parakeet/test_feature_extraction_parakeet.py
{ "start": 3433, "end": 8522 }
class ____(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = ParakeetFeatureExtractor def setUp(self): self.feat_extract_tester = ParakeetFeatureExtractionTester(self) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id")[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def test_torch_integration(self): """ reproducer: https://gist.github.com/eustlb/c4a0999e54466b7e8d8b040d8e0900df """ # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ 0.60935932, 1.18187428, 1.29877627, 1.36461377, 1.09311509, 1.39821815, 1.63753450, 1.37100816, 1.26510608, 1.70332706, 1.69067430, 1.28770995, 1.52999651, 1.77962756, 1.71420062, 1.21944094, 1.30884087, 1.44343364, 1.17694926, 1.42690814, 1.78877723, 1.68655288, 1.27155364, 1.66103351, 1.75820673, 1.41575801, 1.40622294, 1.70603478, 1.63117850, 1.13353217, ] ) # fmt: on input_speech = self._load_datasamples(1) feature_extractor = ParakeetFeatureExtractor() inputs = feature_extractor(input_speech, return_tensors="pt") self.assertEqual(inputs.input_features.shape, (1, 586, 80)) torch.testing.assert_close(inputs.input_features[0, 100, :30], EXPECTED_INPUT_FEATURES, atol=1e-4, rtol=1e-4) self.assertEqual(inputs.attention_mask.shape, (1, 586)) # last frame should be masked self.assertEqual(inputs.attention_mask.sum(), 585) @require_torch def test_torch_integration_batch(self): """ reproducer: https://gist.github.com/eustlb/c4a0999e54466b7e8d8b040d8e0900df """ # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ [ 0.60935932, 1.18187428, 1.29877627, 1.36461377, 1.09311533, 1.39821827, 1.63753450, 1.37100816, 1.26510608, 1.70332706, 1.69067478, 1.28770995, 1.52999651, 1.77962780, 1.71420062, 1.21944094, 1.30884087, 1.44343400, 1.17694926, 1.42690814, 1.78877664, 1.68655288, 1.27155364, 1.66103351, 1.75820673, 1.41575801, 1.40622294, 1.70603478, 1.63117862, 1.13353217], [ 0.58339858, 0.54317272, 0.46222782, 0.34154415, 0.17806509, 0.32182255, 0.28909618, 0.02141305, -0.09710173, -0.35818669, -0.48172510, -0.52942866, -0.58029658, -0.70519227, -0.67929971, -0.54698551, -0.28611183, -0.24780270, -0.31363955, -0.41913241, -0.32394424, -0.44897896, -0.68657434, -0.62047797, -0.46886450, -0.65987164, -1.02435589, -0.58527517, -0.56095684, -0.73582536], [-0.91937613, -0.97933632, -1.06843162, -1.02642107, -0.94232899, -0.83840621, -0.82306921, -0.45763230, -0.45182887, -0.75917768, -0.42541453, -0.28512970, -0.39637473, -0.66478080, -0.68004298, -0.49690303, -0.31799242, -0.12917191, 0.13149273, 0.10163058, -0.40041649, 0.05001565, 0.23906317, 0.28816083, 0.14308788, -0.29588422, -0.05428466, 0.14418560, 0.28865972, -0.12138986], [ 0.73217624, 0.84484011, 0.79323846, 0.66315967, 0.41556871, 0.88633078, 0.90718138, 0.91268104, 1.15920067, 1.26141894, 1.10222173, 0.92990804, 0.96352047, 0.88142169, 0.56635213, 0.71491158, 0.81301254, 0.67301887, 0.74780160, 0.64429688, 0.22885245, 0.47035533, 0.46498337, 0.17544533, 0.44458991, 0.79245001, 0.57207537, 0.85768145, 1.00491571, 0.93360955], [ 1.40496337, 1.32492661, 1.16519547, 0.98379827, 0.77614164, 0.95871657, 0.81910741, 1.23010278, 1.33011520, 1.16538525, 1.28319681, 1.45041633, 1.33421600, 0.91677380, 0.67107433, 0.52890682, 0.82009870, 1.15821445, 1.15343642, 1.10958862, 1.44962490, 1.44485891, 1.46043479, 1.90800595, 1.95863307, 1.63670933, 1.49021459, 1.18701911, 0.74906683, 0.84700620] ] ) # fmt: on input_speech = self._load_datasamples(5) feature_extractor = ParakeetFeatureExtractor() inputs = feature_extractor(input_speech, return_tensors="pt") self.assertEqual(inputs.input_features.shape, (5, 2941, 80)) torch.testing.assert_close(inputs.input_features[:, 100, :30], EXPECTED_INPUT_FEATURES, atol=1e-4, rtol=1e-4) self.assertEqual(inputs.attention_mask.shape, (5, 2941)) self.assertTrue(inputs.attention_mask.sum(dim=-1).tolist(), [585, 481, 1248, 990, 2940])
ParakeetFeatureExtractionTest
python
scrapy__scrapy
tests/test_engine_stop_download_bytes.py
{ "start": 618, "end": 3147 }
class ____(TestEngineBase): @deferred_f_from_coro_f async def test_crawler(self, mockserver: MockServer) -> None: for spider in ( MySpider, DictItemsSpider, AttrsItemsSpider, DataClassItemsSpider, ): run = BytesReceivedCrawlerRun(spider) with LogCapture() as log: await run.run(mockserver) log.check_present( ( "scrapy.core.downloader.handlers.http11", "DEBUG", f"Download stopped for <GET {mockserver.url('/redirected')}> " "from signal handler BytesReceivedCrawlerRun.bytes_received", ) ) log.check_present( ( "scrapy.core.downloader.handlers.http11", "DEBUG", f"Download stopped for <GET {mockserver.url('/static/')}> " "from signal handler BytesReceivedCrawlerRun.bytes_received", ) ) log.check_present( ( "scrapy.core.downloader.handlers.http11", "DEBUG", f"Download stopped for <GET {mockserver.url('/numbers')}> " "from signal handler BytesReceivedCrawlerRun.bytes_received", ) ) self._assert_visited_urls(run) self._assert_scheduled_requests(run, count=9) self._assert_downloaded_responses(run, count=9) self._assert_signals_caught(run) self._assert_headers_received(run) self._assert_bytes_received(run) @staticmethod def _assert_bytes_received(run: CrawlerRun) -> None: assert len(run.bytes) == 9 for request, data in run.bytes.items(): joined_data = b"".join(data) assert len(data) == 1 # signal was fired only once if run.getpath(request.url) == "/numbers": # Received bytes are not the complete response. The exact amount depends # on the buffer size, which can vary, so we only check that the amount # of received bytes is strictly less than the full response. numbers = [str(x).encode("utf8") for x in range(2**18)] assert len(joined_data) < len(b"".join(numbers))
TestBytesReceivedEngine
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_partition_backfill.py
{ "start": 68161, "end": 103121 }
class ____(ExecutingGraphQLContextTestMatrix): def test_launch_from_failure(self, graphql_context): repository_selector = infer_repository_selector(graphql_context) partition_set_selector = { "repositorySelector": repository_selector, "partitionSetName": "chained_failure_job_partition_set", } # trigger failure in the conditionally_fail solid output_file = os.path.join( get_system_temp_directory(), "chained_failure_pipeline_conditionally_fail" ) try: with open(output_file, "w", encoding="utf8"): result = execute_dagster_graphql_and_finish_runs( graphql_context, LAUNCH_PARTITION_BACKFILL_MUTATION, variables={ "backfillParams": { "selector": partition_set_selector, "partitionNames": ["2", "3"], } }, ) finally: os.remove(output_file) assert not result.errors assert result.data assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" # re-execute from failure (without the failure file) result = execute_dagster_graphql_and_finish_runs( graphql_context, LAUNCH_PARTITION_BACKFILL_MUTATION, variables={ "backfillParams": { "selector": partition_set_selector, "partitionNames": ["2", "3"], "fromFailure": True, } }, ) assert not result.errors assert result.data assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" backfill_id = result.data["launchPartitionBackfill"]["backfillId"] result = execute_dagster_graphql( graphql_context, PARTITION_PROGRESS_QUERY, variables={"backfillId": backfill_id}, ) assert not result.errors assert result.data assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill" assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED" assert result.data["partitionBackfillOrError"]["numCancelable"] == 2 assert len(result.data["partitionBackfillOrError"]["partitionNames"]) == 2 assert result.data["partitionBackfillOrError"]["fromFailure"] def test_launch_backfill_with_all_partitions_flag(self, graphql_context): repository_selector = infer_repository_selector(graphql_context) partition_set_selector = { "repositorySelector": repository_selector, "partitionSetName": "chained_failure_job_partition_set", } result = execute_dagster_graphql_and_finish_runs( graphql_context, LAUNCH_PARTITION_BACKFILL_MUTATION, variables={ "backfillParams": { "selector": partition_set_selector, "allPartitions": True, } }, ) assert not result.errors assert result.data assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" backfill_id = result.data["launchPartitionBackfill"]["backfillId"] result = execute_dagster_graphql( graphql_context, PARTITION_PROGRESS_QUERY, variables={"backfillId": backfill_id}, ) assert not result.errors assert result.data assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill" assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED" assert result.data["partitionBackfillOrError"]["numCancelable"] == 10 assert len(result.data["partitionBackfillOrError"]["partitionNames"]) == 10 def test_reexecute_asset_backfill_from_failure(self, graphql_context): asset_keys = [ AssetKey("unpartitioned_upstream_of_partitioned"), AssetKey("upstream_daily_partitioned_asset"), AssetKey("downstream_weekly_partitioned_asset"), ] partitions = ["2023-01-09"] result = execute_dagster_graphql( graphql_context, LAUNCH_PARTITION_BACKFILL_MUTATION, variables={ "backfillParams": { "partitionNames": partitions, "assetSelection": [asset_key.to_graphql_input() for asset_key in asset_keys], } }, ) assert not result.errors assert result.data assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" backfill_id = result.data["launchPartitionBackfill"]["backfillId"] _execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id) _mock_asset_backfill_runs( graphql_context, AssetKey("unpartitioned_upstream_of_partitioned"), backfill_id, DagsterRunStatus.SUCCESS, None, ) _execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id) _mock_asset_backfill_runs( graphql_context, AssetKey("upstream_daily_partitioned_asset"), backfill_id, DagsterRunStatus.FAILURE, "2023-01-09", ) _execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id) # mark backfill as completed so we can retry it backfill = graphql_context.instance.get_backfill(backfill_id) graphql_context.instance.update_backfill(backfill.with_status(BulkActionStatus.COMPLETED)) result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": {"parentRunId": backfill_id, "strategy": "FROM_FAILURE"}, }, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" retry_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"] first_backfill = graphql_context.instance.get_backfill(backfill_id) retried_from_failure_backfill = graphql_context.instance.get_backfill(retry_backfill_id) assert ( first_backfill.asset_backfill_data.failed_and_downstream_subset == retried_from_failure_backfill.asset_backfill_data.target_subset ) assert retried_from_failure_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == backfill_id assert retried_from_failure_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": {"parentRunId": backfill_id, "strategy": "ALL_STEPS"}, }, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" retry_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"] first_backfill = graphql_context.instance.get_backfill(backfill_id) retried_backfill = graphql_context.instance.get_backfill(retry_backfill_id) assert ( first_backfill.asset_backfill_data.target_subset == retried_backfill.asset_backfill_data.target_subset ) assert retried_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == backfill_id assert retried_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id def test_reexecute_successful_asset_backfill(self, graphql_context): asset_keys = [ AssetKey("unpartitioned_upstream_of_partitioned"), AssetKey("upstream_daily_partitioned_asset"), ] partitions = ["2023-01-09"] result = execute_dagster_graphql( graphql_context, LAUNCH_PARTITION_BACKFILL_MUTATION, variables={ "backfillParams": { "partitionNames": partitions, "assetSelection": [asset_key.to_graphql_input() for asset_key in asset_keys], } }, ) assert not result.errors assert result.data assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" backfill_id = result.data["launchPartitionBackfill"]["backfillId"] _execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id) _mock_asset_backfill_runs( graphql_context, AssetKey("unpartitioned_upstream_of_partitioned"), backfill_id, DagsterRunStatus.SUCCESS, None, ) _execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id) _mock_asset_backfill_runs( graphql_context, AssetKey("upstream_daily_partitioned_asset"), backfill_id, DagsterRunStatus.SUCCESS, "2023-01-09", ) _execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id) # mark backfill as complete so we can retry it backfill = graphql_context.instance.get_backfill(backfill_id) graphql_context.instance.update_backfill(backfill.with_status(BulkActionStatus.COMPLETED)) result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": {"parentRunId": backfill_id, "strategy": "FROM_FAILURE"}, }, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "PythonError" assert ( "Cannot re-execute from failure an asset backfill that has no missing materializations" in result.data["reexecutePartitionBackfill"]["message"] ) result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": {"parentRunId": backfill_id, "strategy": "ALL_STEPS"}, }, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" retry_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"] first_backfill = graphql_context.instance.get_backfill(backfill_id) retried_backfill = graphql_context.instance.get_backfill(retry_backfill_id) assert ( first_backfill.asset_backfill_data.target_subset == retried_backfill.asset_backfill_data.target_subset ) assert retried_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == backfill_id assert retried_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id def test_reexecute_asset_backfill_still_in_progress(self, graphql_context): asset_keys = [ AssetKey("unpartitioned_upstream_of_partitioned"), AssetKey("upstream_daily_partitioned_asset"), AssetKey("downstream_weekly_partitioned_asset"), ] partitions = ["2023-01-09"] result = execute_dagster_graphql( graphql_context, LAUNCH_PARTITION_BACKFILL_MUTATION, variables={ "backfillParams": { "partitionNames": partitions, "assetSelection": [asset_key.to_graphql_input() for asset_key in asset_keys], } }, ) assert not result.errors assert result.data assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" backfill_id = result.data["launchPartitionBackfill"]["backfillId"] _execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id) _mock_asset_backfill_runs( graphql_context, AssetKey("unpartitioned_upstream_of_partitioned"), backfill_id, DagsterRunStatus.SUCCESS, None, ) _execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id) _mock_asset_backfill_runs( graphql_context, AssetKey("upstream_daily_partitioned_asset"), backfill_id, DagsterRunStatus.FAILURE, "2023-01-09", ) _execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id) # try to retry the backfill while it is still in progress result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": {"parentRunId": backfill_id, "strategy": "FROM_FAILURE"}, }, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "PythonError" assert "still in progress" in result.data["reexecutePartitionBackfill"]["message"] result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": {"parentRunId": backfill_id, "strategy": "ALL_STEPS"}, }, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "PythonError" assert "still in progress" in result.data["reexecutePartitionBackfill"]["message"] # once the first backfill is in a completed state, we can retry it backfill = graphql_context.instance.get_backfill(backfill_id) graphql_context.instance.update_backfill(backfill.with_status(BulkActionStatus.FAILED)) result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": {"parentRunId": backfill_id, "strategy": "FROM_FAILURE"}, }, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" retry_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"] first_backfill = graphql_context.instance.get_backfill(backfill_id) retried_from_failure_backfill = graphql_context.instance.get_backfill(retry_backfill_id) assert ( first_backfill.asset_backfill_data.failed_and_downstream_subset == retried_from_failure_backfill.asset_backfill_data.target_subset ) assert retried_from_failure_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == backfill_id assert retried_from_failure_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": {"parentRunId": backfill_id, "strategy": "ALL_STEPS"}, }, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" retry_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"] first_backfill = graphql_context.instance.get_backfill(backfill_id) retried_backfill = graphql_context.instance.get_backfill(retry_backfill_id) assert ( first_backfill.asset_backfill_data.target_subset == retried_backfill.asset_backfill_data.target_subset ) assert retried_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == backfill_id assert retried_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id def test_reexecute_asset_backfill_twice(self, graphql_context): asset_keys = [ AssetKey("unpartitioned_upstream_of_partitioned"), AssetKey("upstream_daily_partitioned_asset"), AssetKey("downstream_weekly_partitioned_asset"), ] partitions = ["2023-01-09"] result = execute_dagster_graphql( graphql_context, LAUNCH_PARTITION_BACKFILL_MUTATION, variables={ "backfillParams": { "partitionNames": partitions, "assetSelection": [asset_key.to_graphql_input() for asset_key in asset_keys], } }, ) assert not result.errors assert result.data assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" backfill_id = result.data["launchPartitionBackfill"]["backfillId"] _execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id) _mock_asset_backfill_runs( graphql_context, AssetKey("unpartitioned_upstream_of_partitioned"), backfill_id, DagsterRunStatus.SUCCESS, None, ) _execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id) _mock_asset_backfill_runs( graphql_context, AssetKey("upstream_daily_partitioned_asset"), backfill_id, DagsterRunStatus.FAILURE, "2023-01-09", ) _execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id) # mark backfill as completed so we can retry it backfill = graphql_context.instance.get_backfill(backfill_id) graphql_context.instance.update_backfill(backfill.with_status(BulkActionStatus.COMPLETED)) result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": {"parentRunId": backfill_id, "strategy": "FROM_FAILURE"}, }, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" retry_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"] first_backfill = graphql_context.instance.get_backfill(backfill_id) retried_backfill = graphql_context.instance.get_backfill(retry_backfill_id) assert ( first_backfill.asset_backfill_data.failed_and_downstream_subset == retried_backfill.asset_backfill_data.target_subset ) assert retried_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == backfill_id assert retried_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id _execute_asset_backfill_iteration_no_side_effects( graphql_context, retried_backfill.backfill_id ) # mark some partitions failed so we can retry again _mock_asset_backfill_runs( graphql_context, AssetKey("upstream_daily_partitioned_asset"), retried_backfill.backfill_id, DagsterRunStatus.FAILURE, "2023-01-09", ) _execute_asset_backfill_iteration_no_side_effects( graphql_context, retried_backfill.backfill_id ) # refetch the backfill to get the updated statuses of all assets retried_backfill = graphql_context.instance.get_backfill(retry_backfill_id) graphql_context.instance.update_backfill( retried_backfill.with_status(BulkActionStatus.COMPLETED) ) result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": {"parentRunId": retry_backfill_id, "strategy": "FROM_FAILURE"}, }, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" seconrd_retry_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"] second_retried_backfill = graphql_context.instance.get_backfill(seconrd_retry_backfill_id) retried_backfill = graphql_context.instance.get_backfill(retry_backfill_id) assert ( retried_backfill.asset_backfill_data.failed_and_downstream_subset == second_retried_backfill.asset_backfill_data.target_subset ) assert second_retried_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == retry_backfill_id assert second_retried_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id def test_retry_job_backfill(self, graphql_context): repository_selector = infer_repository_selector(graphql_context) partition_set_selector = { "repositorySelector": repository_selector, "partitionSetName": "chained_failure_job_partition_set", } # trigger failure in the conditionally_fail solid output_file = os.path.join( get_system_temp_directory(), "chained_failure_pipeline_conditionally_fail" ) try: with open(output_file, "w", encoding="utf8"): result = execute_dagster_graphql_and_finish_runs( graphql_context, LAUNCH_PARTITION_BACKFILL_MUTATION, variables={ "backfillParams": { "selector": partition_set_selector, "partitionNames": ["2", "3"], } }, ) finally: os.remove(output_file) assert not result.errors assert result.data assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" backfill_id = result.data["launchPartitionBackfill"]["backfillId"] backfill = graphql_context.instance.get_backfill(backfill_id) graphql_context.instance.update_backfill(backfill.with_status(BulkActionStatus.COMPLETED)) # re-execute from failure (without the failure file) result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": {"parentRunId": backfill_id, "strategy": "FROM_FAILURE"} }, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" retried_from_failure_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"] retried_from_failure_backfill = graphql_context.instance.get_backfill( retried_from_failure_backfill_id ) assert retried_from_failure_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == backfill_id assert retried_from_failure_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id result = execute_dagster_graphql( graphql_context, PARTITION_PROGRESS_QUERY, variables={"backfillId": retried_from_failure_backfill_id}, ) assert not result.errors assert result.data assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill" assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED" assert result.data["partitionBackfillOrError"]["numCancelable"] == 2 assert len(result.data["partitionBackfillOrError"]["partitionNames"]) == 2 assert result.data["partitionBackfillOrError"]["fromFailure"] # re-execute (without the failure file) result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={"reexecutionParams": {"parentRunId": backfill_id, "strategy": "ALL_STEPS"}}, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" retried_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"] retried_backfill = graphql_context.instance.get_backfill(retried_backfill_id) assert retried_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == backfill_id assert retried_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id result = execute_dagster_graphql( graphql_context, PARTITION_PROGRESS_QUERY, variables={"backfillId": retried_backfill_id}, ) assert not result.errors assert result.data assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill" assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED" assert result.data["partitionBackfillOrError"]["numCancelable"] == 2 assert len(result.data["partitionBackfillOrError"]["partitionNames"]) == 2 assert not result.data["partitionBackfillOrError"]["fromFailure"] def test_retry_in_progress_job_backfill(self, graphql_context): repository_selector = infer_repository_selector(graphql_context) partition_set_selector = { "repositorySelector": repository_selector, "partitionSetName": "chained_failure_job_partition_set", } # trigger failure in the conditionally_fail solid output_file = os.path.join( get_system_temp_directory(), "chained_failure_pipeline_conditionally_fail" ) try: with open(output_file, "w", encoding="utf8"): result = execute_dagster_graphql_and_finish_runs( graphql_context, LAUNCH_PARTITION_BACKFILL_MUTATION, variables={ "backfillParams": { "selector": partition_set_selector, "partitionNames": ["2", "3"], } }, ) finally: os.remove(output_file) assert not result.errors assert result.data assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" backfill_id = result.data["launchPartitionBackfill"]["backfillId"] result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": {"parentRunId": backfill_id, "strategy": "FROM_FAILURE"} }, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "PythonError" assert "still in progress" in result.data["reexecutePartitionBackfill"]["message"] result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={"reexecutionParams": {"parentRunId": backfill_id, "strategy": "ALL_STEPS"}}, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "PythonError" assert "still in progress" in result.data["reexecutePartitionBackfill"]["message"] # mark backfill as complete and confirm that we can retry it backfill = graphql_context.instance.get_backfill(backfill_id) graphql_context.instance.update_backfill(backfill.with_status(BulkActionStatus.COMPLETED)) # re-execute from failure (without the failure file) result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": {"parentRunId": backfill_id, "strategy": "FROM_FAILURE"} }, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" retried_from_failure_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"] retried_from_failure_backfill = graphql_context.instance.get_backfill( retried_from_failure_backfill_id ) assert retried_from_failure_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == backfill_id assert retried_from_failure_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id # re-execute (without the failure file) result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={"reexecutionParams": {"parentRunId": backfill_id, "strategy": "ALL_STEPS"}}, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" retried_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"] retried_backfill = graphql_context.instance.get_backfill(retried_backfill_id) assert retried_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == backfill_id assert retried_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id def test_retry_job_backfill_twice(self, graphql_context): repository_selector = infer_repository_selector(graphql_context) partition_set_selector = { "repositorySelector": repository_selector, "partitionSetName": "chained_failure_job_partition_set", } # trigger failure in the conditionally_fail solid output_file = os.path.join( get_system_temp_directory(), "chained_failure_pipeline_conditionally_fail" ) with open(output_file, "w", encoding="utf8"): result = execute_dagster_graphql_and_finish_runs( graphql_context, LAUNCH_PARTITION_BACKFILL_MUTATION, variables={ "backfillParams": { "selector": partition_set_selector, "partitionNames": ["2", "3"], } }, ) assert not result.errors assert result.data assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" backfill_id = result.data["launchPartitionBackfill"]["backfillId"] # mark backfill as complete and confirm that we can retry it backfill = graphql_context.instance.get_backfill(backfill_id) graphql_context.instance.update_backfill(backfill.with_status(BulkActionStatus.COMPLETED)) try: with open(output_file, "w", encoding="utf8"): # re-execute from failure (still with the failure file) result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": { "parentRunId": backfill_id, "strategy": "FROM_FAILURE", } }, ) finally: os.remove(output_file) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" retried_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"] retried_backfill = graphql_context.instance.get_backfill(retried_backfill_id) assert retried_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == backfill_id assert retried_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id graphql_context.instance.update_backfill( retried_backfill.with_status(BulkActionStatus.COMPLETED) ) # re-execute from failure (without the failure file) result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": { "parentRunId": retried_backfill.backfill_id, "strategy": "FROM_FAILURE", } }, ) assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" second_retried_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"] second_retried_backfill = graphql_context.instance.get_backfill(second_retried_backfill_id) assert second_retried_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == retried_backfill_id assert second_retried_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id def test_retry_successful_job_backfill(self, graphql_context): repository_selector = infer_repository_selector(graphql_context) partition_set_selector = { "repositorySelector": repository_selector, "partitionSetName": "chained_failure_job_partition_set", } result = execute_dagster_graphql_and_finish_runs( graphql_context, LAUNCH_PARTITION_BACKFILL_MUTATION, variables={ "backfillParams": { "selector": partition_set_selector, "partitionNames": ["2", "3"], } }, ) assert not result.errors assert result.data assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" backfill_id = result.data["launchPartitionBackfill"]["backfillId"] # mark backfill as complete backfill = graphql_context.instance.get_backfill(backfill_id) graphql_context.instance.update_backfill(backfill.with_status(BulkActionStatus.COMPLETED)) result = execute_dagster_graphql( graphql_context, RETRY_BACKFILL_MUTATION, variables={ "reexecutionParams": {"parentRunId": backfill_id, "strategy": "FROM_FAILURE"} }, ) # Unlike asset backfills, we don't currently have a way to see if a job backfill has runs that # failed without querying the DB. So we always allow retries assert not result.errors assert result.data assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess" retried_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"] retried_backfill = graphql_context.instance.get_backfill(retried_backfill_id) assert retried_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == backfill_id assert retried_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id
TestLaunchDaemonBackfillFromFailure
python
dagster-io__dagster
python_modules/libraries/dagster-shared/dagster_shared/yaml_utils/__init__.py
{ "start": 1735, "end": 1899 }
class ____(yaml.SafeLoader, _CanRemoveImplicitResolver): pass DagsterRunConfigYamlLoader.remove_implicit_resolver(YAML_TIMESTAMP_TAG)
DagsterRunConfigYamlLoader
python
ray-project__ray
python/ray/_private/thirdparty/pynvml/pynvml.py
{ "start": 107619, "end": 107844 }
class ____(Structure): _fields_ = [('version', c_uint), ('maxAttackerAdvantage', c_ulong), ] ConfComputeSetKeyRotationThresholdInfo_v1 = 0x1000010
c_nvmlConfComputeSetKeyRotationThresholdInfo_t
python
streamlit__streamlit
lib/streamlit/elements/image.py
{ "start": 1619, "end": 9611 }
class ____: @gather_metrics("image") def image( self, image: ImageOrImageList, # TODO: Narrow type of caption, dependent on type of image, # by way of overload caption: str | list[str] | None = None, width: Width = "content", use_column_width: UseColumnWith = None, clamp: bool = False, channels: Channels = "RGB", output_format: ImageFormatOrAuto = "auto", *, use_container_width: bool | None = None, ) -> DeltaGenerator: """Display an image or list of images. Parameters ---------- image : numpy.ndarray, BytesIO, str, Path, or list of these The image to display. This can be one of the following: - A URL (string) for a hosted image. - A path to a local image file. The path can be a ``str`` or ``Path`` object. Paths can be absolute or relative to the working directory (where you execute ``streamlit run``). - An SVG string like ``<svg xmlns=...</svg>``. - A byte array defining an image. This includes monochrome images of shape (w,h) or (w,h,1), color images of shape (w,h,3), or RGBA images of shape (w,h,4), where w and h are the image width and height, respectively. - A list of any of the above. Streamlit displays the list as a row of images that overflow to additional rows as needed. caption : str or list of str Image caption(s). If this is ``None`` (default), no caption is displayed. If ``image`` is a list of multiple images, ``caption`` must be a list of captions (one caption for each image) or ``None``. Captions can optionally contain GitHub-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. See the ``body`` parameter of |st.markdown|_ for additional, supported Markdown directives. .. |st.markdown| replace:: ``st.markdown`` .. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown width : "content", "stretch", or int The width of the image element. This can be one of the following: - ``"content"`` (default): The width of the element matches the width of its content, but doesn't exceed the width of the parent container. - ``"stretch"``: The width of the element matches the width of the parent container. - An integer specifying the width in pixels: The element has a fixed width. If the specified width is greater than the width of the parent container, the width of the element matches the width of the parent container. When using an SVG image without a default width, use ``"stretch"`` or an integer. use_column_width : "auto", "always", "never", or bool If "auto", set the image's width to its natural size, but do not exceed the width of the column. If "always" or True, set the image's width to the column width. If "never" or False, set the image's width to its natural size. Note: if set, `use_column_width` takes precedence over the `width` parameter. .. deprecated:: ``use_column_width`` is deprecated and will be removed in a future release. Please use the ``width`` parameter instead. clamp : bool Whether to clamp image pixel values to a valid range (0-255 per channel). This is only used for byte array images; the parameter is ignored for image URLs and files. If this is ``False`` (default) and an image has an out-of-range value, a ``RuntimeError`` will be raised. channels : "RGB" or "BGR" The color format when ``image`` is an ``nd.array``. This is ignored for other image types. If this is ``"RGB"`` (default), ``image[:, :, 0]`` is the red channel, ``image[:, :, 1]`` is the green channel, and ``image[:, :, 2]`` is the blue channel. For images coming from libraries like OpenCV, you should set this to ``"BGR"`` instead. output_format : "JPEG", "PNG", or "auto" The output format to use when transferring the image data. If this is ``"auto"`` (default), Streamlit identifies the compression type based on the type and format of the image. Photos should use the ``"JPEG"`` format for lossy compression while diagrams should use the ``"PNG"`` format for lossless compression. use_container_width : bool Whether to override ``width`` with the width of the parent container. If ``use_container_width`` is ``False`` (default), Streamlit sets the image's width according to ``width``. If ``use_container_width`` is ``True``, Streamlit sets the width of the image to match the width of the parent container. .. deprecated:: ``use_container_width`` is deprecated and will be removed in a future release. For ``use_container_width=True``, use ``width="stretch"``. For ``use_container_width=False``, use ``width="content"``. Example ------- >>> import streamlit as st >>> st.image("sunrise.jpg", caption="Sunrise by the mountains") .. output:: https://doc-image.streamlit.app/ height: 710px """ if use_column_width is not None: if use_container_width is not None: raise StreamlitAPIException( "`use_container_width` and `use_column_width` cannot be set at the same time.", "Please utilize `use_container_width` since `use_column_width` is deprecated.", ) show_deprecation_warning( "The `use_column_width` parameter has been deprecated and will be removed " "in a future release. Please utilize the `width` parameter instead." ) if use_column_width in {"auto", "never"} or use_column_width is False: width = "content" elif use_column_width == "always" or use_column_width is True: width = "stretch" if use_container_width is not None: show_deprecation_warning( make_deprecated_name_warning( "use_container_width", "width", "2025-12-31", "For `use_container_width=True`, use `width='stretch'`. " "For `use_container_width=False`, use `width='content'`.", include_st_prefix=False, ), show_in_browser=False, ) if use_container_width is True: width = "stretch" elif isinstance(width, int): # Preserve the existing behavior with respect to use_container_width=False # and width=int. pass else: width = "content" validate_width(width, allow_content=True) layout_config = LayoutConfig(width=width) image_list_proto = ImageListProto() marshall_images( self.dg._get_delta_path_str(), image, caption, layout_config, image_list_proto, clamp, channels, output_format, ) return self.dg._enqueue("imgs", image_list_proto, layout_config=layout_config) @property def dg(self) -> DeltaGenerator: """Get our DeltaGenerator.""" return cast("DeltaGenerator", self)
ImageMixin
python
joke2k__faker
faker/providers/address/ka_GE/__init__.py
{ "start": 45, "end": 28924 }
class ____(AddressProvider): city_formats = ["{{city_name}}"] street_name_formats = ["{{street_title}} {{street_suffix}}"] street_address_formats = ["{{street_name}} {{building_number}}"] address_formats = ["{{street_address}}, {{city}}"] building_number_formats = ["##"] street_suffixes = ["ქ."] # Source: Wikipedia's list of sovereign states # https://en.wikipedia.org/wiki/List_of_sovereign_states countries = ( "ავსტრალია", "ავსტრია", "ავღანეთი", "აზერბაიჯანი", "ალბანეთი", "ალჟირი", "ამერიკის სამოა", "ამერიკის ვირჯინიის კუნძულები", "ამერიკის შეერთებული შტატები", "ანგილია", "ანგოლა", "ანდორა", "ანტიგუა და ბარბუდა", "არაბთა გაერთიანებული საამიროები", "არგენტინა", "არუბა", "აღმოსავლეთი ტიმორი", "ახალი ზელანდია", "ახალი კალედონია", "ბანგლადეში", "ბარბადოსი", "ბასას-და-ინდია", "ბაჰამის კუნძულები", "ბაჰრეინი", "ბელარუსი", "ბელგია", "ბელიზი", "ბენინი", "ბერმუდა", "ბოლივია", "ბოსნია და ჰერცეგოვინა", "ბოტსვანა", "ბრაზილია", "ბრიტანეთის ვირჯინიის კუნძულები", "ბრიტანეთის ინდოეთის ოკეანის ტერიტორია", "ბრუნეი", "ბულგარეთი", "ბურკინა ფასო", "ბურუნდი", "ბუვე", "ბჰუტანი", "გაბონი", "გაიანა", "გამბია", "განა", "გერმანია", "გვადელუპა", "გვატემალა", "გვინეა", "გვინეა-ბისაუ", "გიბრალტარი", "გრენადა", "გრენლანდია", "გუამი", "დანია", "დიდი ბრიტანეთი", "დომინიკელთა რესპუბლიკა", "დომინიკა", "ეგვიპტე", "ევროპა (კუნძული)", "ეთიოპია", "ეკვადორი", "ეკვატორული გვინეა", "ერაყი", "ერიტრეა", "ესპანეთი", "ესტონეთი", "ეშმორის და კარტიეს კუნძულები", "უოლისი და ფუტუნა", "ვანუატუ", "ვატიკანი", "ვენესუელა", "ვიეტნამი", "ზამბია", "ზიმბაბვე", "თურქეთი", "თურქმენეთი", "იამაიკა", "იან მაიენი", "იაპონია", "იემენი", "ინდოეთი", "ინდონეზია", "იორდანია", "ირანი", "ირლანდია", "ისლანდია", "ისრაელი", "იტალია", "კაბო-ვერდე", "კაიმანის კუნძულები", "კამბოჯა", "კამერუნი", "კანადა", "კატარი", "კენია", "კვიპროსი", "კინგმენის რიფი", "კირიბატი", "ქოქოსის კუნძულები", "კოლუმბია", "კომორის კუნძულები", "კონგოს დემოკრატიული რესპუბლიკა", "კონგოს რესპუბლიკა", "კორეის რესპუბლიკა", "ჩრდილოეთი კორეა", "კოსტა-რიკა", "კოტ-დ’ივუარი", "კუბა", "კუკის კუნძულები", "ლაოსი", "ლატვია", "ლესოთო", "ლიბანი", "ლიბერია", "ლიბია", "ლიტვა", "ლიხტენშტაინი", "ლუქსემბურგი", "მადაგასკარი", "მავრიკი", "მავრიტანია", "მაიოტა", "მაკაო", "მაკედონია", "მალავი", "მალაიზია", "მალდივი", "მალი", "მალტა", "მაროკო", "მარშალის კუნძულები", "მარჯნის ზღვის კუნძულები", "მექსიკა", "მიანმარი", "მიკრონეზია", "მოზამბიკი", "მოლდოვა", "მონაკო", "მონსერატი", "მონღოლეთი", "ნამიბია", "ნაურუ", "ნეპალი", "ნიგერი", "ნიგერია", "ნიდერლანდი", "ნიდერლანდის ანტილები", "ნიკარაგუა", "ნიუე", "ნორვეგია", "ნორფოლკის კუნძული", "ომანი", "პაკისტანი", "პალაუ", "პალმირა (ატოლი)", "პანამა", "პაპუა-ახალი გვინეა", "პარაგვაი", "პერუ", "პიტკერნის კუნძულები", "პოლონეთი", "პორტუგალია", "პრინც-ედუარდის კუნძული", "პუერტო-რიკო", "ჟუან-დი-ნოვა", "რეიუნიონი", "რუანდა", "რუმინეთი", "რუსეთი", "საბერძნეთი", "სალვადორი", "სამოა", "სამხრეთ აფრიკის რესპუბლიკა", "სამხრეთი გეორგია და სამხრეთ სენდვიჩის კუნძულები", "სამხრეთი სუდანი", "სან-მარინო", "სან-ტომე და პრინსიპი", "საუდის არაბეთი", "საფრანგეთი", "საფრანგეთის გვიანა", "საფრანგეთის პოლინეზია", "საფრანგეთის სამხრეთული და ანტარქტიდული ტერიტორია", "საქართველო", "სეიშელის კუნძულები", "სენეგალი", "სენ-პიერი და მიკელონი", "სენტ-ვინსენტი და გრენადინები", "სენტ-კიტსი და ნევისი", "სენტ-ლუსია", "სერბეთი", "სეუტა", "სვაზილენდი", "სვალბარდი", "სიერა-ლეონე", "სინგაპური", "სირია", "სლოვაკეთი", "სლოვენია", "სოლომონის კუნძულები", "სომალი", "სომხეთი", "სუდანი", "სურინამი", "ტაივანი", "ტაილანდი", "ტანზანია", "ტაჯიკეთი", "ტერქსისა და კაიკოსის კუნძულები", "ტოგო", "ტოკელაუ", "ტონგა", "ტრინიდადი და ტობაგო", "ტუვალუ", "ტუნისი", "უგანდა", "უზბეკეთი", "უკრაინა", "უნგრეთი", "ურუგვაი", "ფარერის კუნძულები", "ფილიპინები", "ფინეთი", "ფიჯი", "ფოლკლენდის კუნძულები", "ქუვეითი", "ღაზის სექტორი", "ყაზახეთი", "ყირგიზეთი", "შვეიცარია", "შვედეთი", "შობის კუნძული", "შრი-ლანკა", "ჩადი", "ჩერნოგორია", "ჩეხეთი", "ჩეჩნეთის რესპუბლიკა იჩქერია", "ჩილე", "ჩინეთი", "ჩრდილოეთი მარიანას კუნძულები", "ცენტრალური აფრიკის რესპუბლიკა", "წმინდა ელენე, ამაღლება და ტრისტანი-და-კუნია", "წყნარი ოკეანის კუნძულები", "ხორვატია", "ჯერსი", "ჯიბუტი", "ჰაიტი", "ჰონდურასი", "ჰონკონგი", "ჰერდი და მაკდონალდის კუნძულები", ) # Source: Tbilisi city directory # http://directory.ge/map/index.php?lang=eng street_titles = ( "300 არაგველის", "8 მარტის", "აბაკელიას", "აბანოს", "აბასთუმანის", "აბაშელის", "აბაშის", "აბაშიძე გრიგოლის", "აბაშიძე დოდოს", "აბაშიძე ირაკლის", "აბაშიძე ჰეიდარის", "აბაშიძის", "აბდუშელიშვილი მალხაზის", "აბესაძე გიას", "აბზიანიძის", "აბო ტბილელის", "აბოვიანის", "აბუსერიძე-ტბელის", "აგარის", "აგლაძე რაფიელის", "ადიგენის", "ავთანდილის", "ავლაბრის", "ავლევის", "ათონელის", "აკეთის", "აკოფიანის", "აკურის", "ალადაშვილის", "ალაზნის", "ალგეთის", "ალექსიძე მერაბის", "ალვანის", "ალიხანიანის", "ალმასიანის", "ამაღლების", "ამბროლაურის", "ამირანაშვილი პეტრეს", "ამირეჯიბის", "ანაკლიის", "ანანურის", "ანდრონიკაშვილის", "ანდღულაძის", "ანტონ კატალიკოსის", "ანტონოვსკაიას", "ანჯაფარიძე ვერიკოს", "არაგვის", "არაგვისპირელი შიოს", "არალეთის", "არარატის", "არაყიშვილი დიმიტრის", "არბოს", "არბოშიკის", "არგვეთის", "არდაზიანის", "არდონის", "არეშიძის", "არველაძის", "ართვინის", "არმაზის", "არსენალის", "ასათიანი ლადოს", "ასკანის", "ასურეთის", "ასხინის", "ატენის", "აფანასიევის", "აფხაზეთის", "აწყურის", "აჭარის", "ახალარსენალის", "ახალდაბის", "ახალუბნის", "ახალქალაქის", "ახვლედიანი ელენეს", "ახვლედიანი გიორგის", "ახვლედიანის", "ახმეტელის", "ახმეტის", "ახოსპირელის", "ახტალის", "ახუთის", "ახუნდოვის", "აჯამეთის", "ბააზოვის", "ბაგინეთის", "ბადიაურის", "ბაზალეთის", "ბათუმის", "ბაკურიანის", "ბაკურციხის", "ბალადინის", "ბალანჩივაძე მელიტონის", "ბარათაშვილი ნოკოლოზის", "ბარათაშვილის", "ბარალეთის", "ბარამიძე ალექსანდრეს", "ბარისახოს", "ბარნოვის", "ბაქოს", "ბაქრაძე დავითის", "ბაქრაძე დიმიტრის", "ბაღდათის", "ბაღნარის", "ბახმაროს", "ბახტრიონის", "ბედიის", "ბევრეთის", "ბეთანიის", "ბეთლემის", "ბელიაშვილი აკაკის", "ბენაშვილის", "ბენდელიანი ჭიჭიკოს", "ბეჟანიშვილი ეკას", "ბერბუქის", "ბერიაშვილის", "ბერიკაშვილის", "ბერიტაშვილის", "ბერიძე ვუკოლის", "ბერძენიშვილის", "ბესიკის", "ბექა ოპიზარის", "ბეღლეთის", "ბზიფის", "ბიჭვინთის", "ბოგვის", "ბოდავის", "ბოდბის", "ბოლნისის", "ბორბალოს", "ბოროდინოს", "მ. ლებანიძის", "ბოტანიკურის", "ბოცვაძის", "ბოჭორიშვილის", "ბოჭორმის", "ბჟოლეთის", "ბროლოსანის", "ბროსეს", "ბუაჩიძე თენგიზის", "ბუდაპეშტის", "ბულაჩაურის", "ბურკიაშვილის", "ბურძგლას", "ბუღეულის", "ბუხაიძის", "გაბაშვილი ეკატერინეს", "გაგარინი იურის", "გალავნის", "გალაქტიონ ტაბიძის", "გალის", "გამრეკელის", "გამყრელიძის", "გამცემლიძე შოთას", "განთიადის", "გარე კახეთის", "გარეჯელი დავითის", "გარიყული მარიამის", "გაფრინდაულის", "გახოკიძე აკაკის", "გახოკიძის", "გეგუთის", "გედევანიშვილის", "გეზათის", "გელათის", "გერგეტის", "გვაზაურის", "გვეტაძე რაჟდენის", "გივიშვილის", "გიორგაძის", "გიორგი ბრწყინვალის", "გიორგი მერჩულეს", "გლინკას", "გოგაშენის", "გოგებაშვილის იაკობის", "გოგიბერიძის", "გოგოლაურის", "გოგოლის", "გოგჩის", "გოთუას", "გოკიელის", "გომარეთის", "გომბორის", "გომის", "გონაშვილი ჰამლეტის", "გორგასლის", "გორდის", "გორის", "გორკის", "გოცირიძის", "გოძიაშვილის", "გრანელი ტერენტის", "გრიბოედოვის", "გრიშაშვილის", "გროზნოს", "გრუზინსკი პეტრეს", "გუდამაყრის", "გუდარეხის", "გუდარის", "გუდაუთის", "გუდიაშვილი ლადოს", "გუთნის", "გულიას", "გულისაშვილის", "გულუა გიას", "გუმათის", "გუმათჰესის", "გუმბრის", "გუნიას", "გურგენიძის", "გურიელის", "გურიის", "გურჯაანის", "დაბახანას", "დადიანი შალვას", "დადიანი ცოტნეს", "დაისის", "ლ. ელიავას", "დარკვეთის", "დგებუაძის", "დედოფლისწყაროს", "დეკაბრისტების", "დელისის", "დეპოს", "დვალის", "დვირის", "დიდგორის", "დიდხევის", "დიდი ხეივნის", "დიდი ჯიხაიშის", "დ. ყიფიანის", "დიმიტრი თავდადებულის", "დირსიჭალას", "დიუმა ალექსანდრეს", "დმანისის", "დობროლიუბოვის", "დოდაშვილი სოლომონის", "დოესის", "დოლიძე გოგის", "დოლიძის", "დოქის", "დოღუმბარის", "დუტუ მეგრელის", "დუშეთის", "ედისის", "ევდოშვილის", "ეკალაძის", "ელდარის", "ენგურის", "ენგურჰესის", "ენისელის", "ენუქიძის", "ერევნის", "ერისთავი თორნიკეს", "ერისთავი კონსტანტინეს", "ერისთავ-ხოშტარიას", "ერწოს", "ესენინის", "სანდრო ეულის", "ეფრემ მცირის", "ექიმის", "ვაზიანის", "ვაზისუბნის", "ვაკელი იონას", "ვანის", "ვარდევანის", "ვარდისუბნის", "ვართაგავას", "რომის", "ვასაძის", "ვაშლოვანის", "ვახტანგ VI–ის", "ვეზიროვის", "ვეკუა ვოვას", "ვერცხლის", "ვერჰარნის", "ვეძათხევის", "ვეძინის", "ვირსალაძის", "ვორონინის", "საარბრჯუკენის", "ზაზიშვილი გიგოს", "ზალდასტანიშვილის", "ზანდუკელი მიხეილის", "ზარზმის", "ზაქარიაძე სერგოს", "ზედაზნის", "ზედამზის", "ზედაუბნის", "ზეინკლის", "ზეკარის", "ზემო ვაკის", "ზემო ვეძისის", "ზესტაფონის", "ზვარეთის", "ზიარის", "ზიგზაგის", "ზინდისის", "ზიჩი მიხაის", "ზოვრეთის", "ზუბალაშვილების", "ზუგდიდის", "ზურაბიშვილი ავლიპის", "თაბუკაშვილი რეზოს", "თავაძე ფერდინანდის", "თამარაშენის", "თამარაშვილი მიხეილის", "გ. სვანიძის", "თარხნიშვილის", "თაქთაქიშვილის", "თაყაიშვილი სესილიას", "თევდორე მღვდლის", "თეთნულდის", "თეთრიწყაროს", "თეკლათის", "თელავის", "ხახანაშვილის", "თელეთის", "თერგის", "თეძმის", "თვალჭრელიძის", "თიანეთის", "თმოგველის", "თმოგვის", "თოდრიას", "თოიძის", "თონეს", "თორაძის", "თოფურიას", "თრიალეთის", "თუმანიანის", "თხინვალის", "იალბუზის", "იამანიძე შოთას", "იაშვილი პაოლოს", "იბრაჰიმ ისპაჰანელის", "იდუმალას", "იეთიმ გურჯის", "იერუსალიმის", "ივერიის", "ივლეთის", "იზაშვილის", "ილორის", "ილურიძე კონსტანტინეს", "იმედაშვილი გაიოზის", "იმერეთის", "ინანიშვილი რამაზის", "ინაშვილის", "ინგოროყვა პავლეს", "ინტერნატის", "იორის", "იოსებიძის", "იოსელიანის", "იპოლიტე-ივანოვის", "ირბაქი ნიკიფორეს", "ირგვლივის", "ისაკიანის", "ისნის", "იფნის", "იყალთოს", "კავთისხევის", "კავსაძის", "კაიშაურის", "კაკაბაძე პოლიკარპეს", "კაკაბაძეების", "კაკლიანის", "კოტე ხიმშიაშვილის", "კალატოზის", "კალიუჟნის", "კალოუბნის", "კანდელაკის", "კანდელაკის", "კანკავას", "კაპანაძის", "კარალეთის", "კარგარეთელის", "კასპის", "კაჭრეთის", "კახიანის", "კედია სპირიდონის", "კეკელიძე კორნელის", "კელაპტრიშვილი ომარის", "კერესელიძე არჩილის", "კერესელიძის", "კეცხოველი ნიკოს", "კვალეთის", "კვალის", "კვანტალიანის", "კვერნაულის", "კვესეთის", "კიევის", "კიკეთის", "კიკვიძის", "კისისხევის", "კიშინიოვის", "კლდეკარის", "კლდიაშვილის", "კნოლევის", "კობახიძის", "კობერიძის", "კოდალოს", "კოდორის", "კოკინაკის", "კოლმეურნეობის ველის", "კოლხეთის", "კომუნის", "კონდოლის", "კონსტიტუციის", "კოფცოვის", "კოსტავას", "კოტეტიშვილი ვახტანგის", "კოშკოვანის", "კოხრეიძის", "კოჯრის", "ჯ. კახიძის", "კრწანისის", "კუმისის", "კუპრაძის", "კურნატოვსკის", "კურსების", "კურსკის", "კუფტინის", "ლაგოდეხის", "ლაზოს", "ლაითურის", "ლაილაშის", "ლალიონის", "ლამის", "ლამისყანის", "ლანჩხუთის", "ლარეხის", "ლარსის", "ლაღიძე მიტროფანეს", "ლაღიძე რევაზის", "ლებარდეს", "ლეკიშვილის", "ლენტეხის", "ლეონიძე გიორგის", "ლეჟავას", "ლერმონტოვის", "ლერწმის", "ლესელიძის", "ლესია უკრაინკას", "ლეჩხუმის", "ლიახვის", "ლიბანის", "ლიკანის", "ლისაშვილის", "ლიუბოვსკის", "ლიხაურის", "ლიხის", "ლომაურის", "ლომთათიძის", "ლომონოსოვის", "ლორთქიფანიძე გრიგოლის", "ლორთქიფანიძის", "ლოჭინის", "ლუბლიანას", "ლუსიანინის", "მაზნიაშვილის", "მათიაშვილის", "მაიაკოვსკის", "მამასახლისოვის", "მამკოდის", "მამკოდის", "მამრაძის", "მანაგაძე ალეხსანდეს", "მანავის", "მანგლისის", "მანიჯაშვილი კახას", "მანჯგალაძე ეროსის", "მარაბდის", "მარგიანი რევაზის", "მარელისის", "მარი ნიკოს", "მარიჯანის", "მარტვილის", "მარტყოფის", "მარუაშვილი გიორგის", "მარუხის გმირების", "მარჯანიშვილი კოტეს", "მარჯანიშვილი კოტეს", "მაღალაშვილის", "მაღაროს", "მაჩაბელი ივანეს", "მაჩხაანის", "მაცესტის", "მაჭრის", "მახათას", "მახინჯაურის", "მგალობლიშვილის", "მებაღიშვილის", "მეგობრობის", "მეგრელაძის", "მეველეს", "მელაანის", "მელიქიშვილის", "მესხეთის", "მესხიას", "მესხიშვილი ალექსის", "მესხიშვილის", "მეტეხის", "მეუნარგიას", "მექანიზაციის", "მეჯვრისხევის", "მთავარანგელოზის", "მთაწმინდის", "მთისძირის", "მიმინოშვილი რომანის", "მინდელაურის", "მინდელის", "მირზა მეფის", "მირზაანის", "მიროტაძის", "მიტინგის", "მიქატაძის", "მიქატაძის", "მიქელაძე ევგენის", "მიქელაძის", "მიშველაძე არჩილის", "მიჩურინის", "მიცკევიჩის", "მნათობის", "მოლითის", "მოლოკოვის", "მორეტის", "მოსაშვილის", "მოსე ხონელის", "მოსიძე ვახტანგის", "მოსტკოვის", "მოსულიშვილის", "მრევლიშვილის", "მტკვრის", "მუკუზანის", "მუსხელიშვილის", "მუხაძის", "მუხაძის", "მუხრანის", "მშველიძის", "მცხეთის", "ნაბახტაურის", "ნაგომარის", "ნადიკვარის", "ნადირაძე კოლაუს", "ნავთლუღის", "ნათაძის", "ნაკადულის", "ნიშნიანიძის", "ნანეიშვილი ვიქტორის", "ნანეიშვილი ვლადიმერის", "ნარგიზის", "ნასაკირალის", "ნასიძე სულხანის", "ნაქალაქევის", "ნაქერალას", "ნიაბის", "ნიაღვრის", "ნიზამის", "ნიკოლაძე ნიკოს", "ნინიძის", "ნიორაძის", "ნოვოროსისკის", "ნონეშვილი იოსების", "ნოსირის", "ნოსტეს", "ნუცუბიძის", "ობსერვატორიის", "ოდესის", "ონიაშვილის", "ონის", "ოჟიოს", "ორბეთის", "ორბელების", "ორთაჭალის", "ორპირის", "ორხევის", "ოსეთის", "ოსიაურის", "ოფრეთის", "ოქრომჭედლების", "ოქროყანის", "ოჩამჩირის", "ოცხელების", "ოძელაშვილის", "ოძისის", "პაიჭაძის", "პალიასტომის", "პანკისის", "პასტერის", "პატარიძის", "პატარძეულის", "პეტეფი შანდორის", "პეტრე იბერის", "პეტრიაშვილის", "პეტრიწის", "პიატიგორსკის", "პიონერის", "პისარევის", "პლატონის", "პუშკინი ალექსანდრეს", "ჟველაურის", "ჟინვალის", "ჟონეთის", "ჟორესის", "ჟღენტის", "რადიანი შალვას", "რაზიკაშვილის", "რაზმაძის", "რატევანის", "რატილის", "რაჭის", "რევოლუცის", "რთველაძის", "რიონის", "რიონჰესის", "რიწის", "რკინიგზის", "რკინის", "როდენის", "როსტოვის", "როსტომაშვილის", "რუისპირის", "რუსთაველის", "რჩეულიშვილის", "საადის", "სააკაძე პაატას", "სააკაძის", "საბადურის", "საბანისძის", "საბაშვილის", "საგარეჯოს", "საგურამოს", "სადმელის", "სავანელის", "სათემოს", "საიათნოვას", "საირმის", "სალამის", "სალხინოს", "სამამულო ომის გმირების", "სამგორის", "სამტრედიის", "სამურზაყანოს", "სამურის", "სამღებროს", "სამღერეთის", "სამშვილდეს", "სანავარდოს", "სანკტ-პეტერბურგის", "სარაჯიშვილი დავითის", "სარაჯიშვილი პეტრეს", "სართანიას", "სართიჭალის", "სარკინეთის", "საქანელას", "საქარის", "საყვირის", "საჩხერის", "საცხენისის", "საჭილაოს", "სახოკიას", "სევანის", "სენაკის", "სვანეთის", "გუდაურის", "სვირის", "სიონის", "სიღნაღის", "სიხარულიძის", "სკოლის", "სომხეთის", "სოხუმის", "სოღანლუღის", "სპანდარიანის", "სპარტაკის", "სტამბის", "სტანისლავსკის", "სტურუას", "სუვოროვის", "სულიაშვილის", "სულხანიშვილის", "სულხან-საბას", "სუმბატაშვილ-იუჟინსკის", "სუნდუკიანის", "სურამის", "სურგულაძის", "სხვიტორის", "სხირტლაძის", "სხულუხიას", "ტაბახმელას", "ტაბიძე ტიციანის", "ტანძიის", "ტარიელის", "ტატიშვილი ერეკლეს", "ტატიშვილის", "ტაშირის", "ტაშკენტის", "ტელეგრაფის", "ტეტელაშვილის", "ტეხურის", "ტვიშის", "ტიბაანის", "ტირიფონის", "ტიულენევის", "ტიხონოვის", "ტოლენჯის", "ტოლსტოის", "ტოლსტონოგოვის", "ტრანსპორტის", "ტრაქტორის", "ტრიკოტაჟის", "ტურგენევის", "ტუსკიას", "ტყავის", "ტყეკულტურის", "ტყვარჩელის", "ტყვიავის", "ტყიბულის", "ტყის", "უბილავას", "უზნაძე დიმიტრის", "უზნაძის", "უიარაღოს", "უკლება კირილეს", "უმიკაშვილის", "უნივერსიტეტის", "ურბნისის", "ურეკის", "ურიდიას", "ურიცკის", "უფლისციხის", "უშაკოვის", "უჩანეიშვილი ირაკლის", "უწერის", "უჯარმის", "ფაბრიკის", "ფალიაშვილის", "ფანასკერტელ-ციციშვილის", "ფანჯიკიძის", "ფარავნის", "ფასანაურის", "ფაღავა ირაკლის", "ფერისცვალების", "ფიზკულტურის", "ფილიას", "ფირდოუსის", "ფიროსმანის", "ფიფიას", "ფოთის", "ფოსტის", "ფოცხვერაშვილის", "ფოცხიაშვილი მორისის", "ფურცელაძის", "ფშავის", "ქავთარაძის", "ქარელის", "ქართველიშვილი ლევანის", "ქართლის", "ქებურიას", "ქედის", "ქერჩის", "ქვალონის", "ქვიშხეთის", "ქიაჩელის", "ქიზიყის", "ქინქლაძე ოთარის", "ქინძმარაულის", "ქიქოძე გერონტის", "ქობულაძის", "ქობულეთის", "ქსნის", "ქსოვრელის", "ქუთათელაძის", "ქუთათელაძე აპოლონის", "ქუთაისის", "ქუმსიაშვილის", "ქურდიანი არჩილის", "ქურდიანი ზაქარიას", "ქურხულის", "ქუჩიშვილის", "ღამბაშიძის", "ღრმაღელეს", "ღუდუშაური ოთარის", "ყავლაშვილი შოთას", "ყარყარაშვილის", "ყვარელის", "ყირიმის", "ყიფიანის", "ყიფშიძის", "ყუშიტაშვილის", "შავგულიძის", "შავთელის", "შავი ზღვის", "შავიშვილის", "შავნაბადას", "შავსოფელის", "შანიძე აკაკის", "შანშიაშვილის", "შარაშიძის", "შარდენის", "შარტავა ჟიულის", "შატბერაშვილის", "შატილის", "შაქრიანის", "შევჩენკო ტარასის", "შენგელაიას", "შერვაშიძის", "შილდის", "შინდისის", "შიო მღვიმელის", "შირაქის", "შოვის", "შორაპნის", "შროშის", "შუამთის", "შურდულის", "შხეფის", "ჩაიკოვსკის", "ჩაილურის", "ჩაისუბნის", "ჩანჩიბაძის", "ჩარგლის", "ჩარხის", "ჩაქვის", "ჩაჩავას", "ჩახრუხაძის", "ჩერნიშევსკის", "ჩერქეზიშვილის", "ჩეჩელაშვილის", "ჩეხოვის", "ჩიკვანიას", "ჩიტაიას", "ჩიტაძის", "ჩიქობავა არნოლდის", "ჩიქოვანის", "ჩკალოვის", "ჩოლოყაშვილი ქაიხოსროს", "ჩოჩუას", "ჩოხატაურის", "ჩოხელის", "ჩუბინაშვილი გიორგის", "ჩუბინიძის", "ჩხიკვაძის", "ცაბაძე გიორგის", "ცაგარელი არჩილის", "ცაგერის", "ცაიშის", "ცემის", "ციმაკურიძის", "ცინცაძე კალისტრატეს", "ცისარტკელას", "ცისკრის", "ციხისძირის", "ცოდნისკარის", "ცურტაველი იაკობის", "ცუცქირიძის", "ცხემის", "ცხვედაძის", "ცხრა აპრილის", "ცხრა ძმის", "ძეგამის", "ძევერის", "ძმობის", "ძოწენიძის", "წავკისის", "წალენჯიხის", "წალკის", "წაღვერის", "წერეთლის", "წერნაკის", "წერონისის", "წიკლაურის", "წინამძღვრიშვილის", "წინამძღვრიშვილის", "წინანაურის", "წინანდლის", "წინაუბნის", "წიწამურის", "წმ. ნიკოლოზის", "წნორისწყლის", "წრომის", "წულაძის", "წულუკიძის", "წურწუმიას", "წუწუნავას", "წუწხვატის", "წყალსადენის", "წყალტუბოს", "წყაროს", "ჭაბუკიანი ვახტანგის", "ჭავჭავაძე ზურაბის", "ჭავჭავაძე ალექსანდრეს", "ჭალადიდის", "ჭანტურია გიას", "ჭიათურის", "ჭიაურელი მიხეილის", "ჭიჭინაძე ზაქარიას", "ჭოველიძე თამარის", "ჭონქაძე დანიელის", "ჭოპორტის", "ჭოროხის", "ჭრებალოს", "ჭრელაშვილის", "ხაბეიშვილის", "ხაზინის", "ხანძთელი გრიგოლის", "ხარაბაძის", "ხარაგაულის", "ხარფუხის", "ხაჩატურიანის", "ხევის", "ხევისუბნის", "ხევსურეთის", "ხევძმარის", "ხეთაგუროვის", "ხერგიანის", "ხერთვისის", "ხერხეულიძეების", "ხეჩუაშვილის", "ხვამლის", "ხვანჭკარის", "ხვედელიანის", "ხვინგიას", "ხვიჩია იპოლიტეს", "ხიდის", "ხიდისთავის", "ხივინის", "ხიმშიაშვილის", "ხმელნიცკის", "ხოდაშენის", "ხომლელის", "ხონის", "ხორავა აკაკის", "ხორნაბუჯის", "ხოშარაულის", "ხრამჰესის", "ხრესილის", "ხუდადოვის", "ჯაბაურის", "ჯაბიძის", "ჯავახეთის", "ჯავახიშვილი ივანეს", "ჯავახიშვილი მიხეილის", "ჯავის", "ჯამბულის", "ჯანაშვილის", "ჯანაშიას", "ჯანჯღავას", "ჯვარედინის", "პოლიტკოვსკაიას", "ჯიქიას", "ჯორბენაძის", "ჯორჯაძის", "ჰოსპიტალის", ) # Source: List of cities and towns in Georgia (Wikipedia) # https://en.wikipedia.org/wiki/List_of_cities_and_towns_in_Georgia_(country) city_names = ( "აბაშა", "ამბროლაური", "ახალი ათონი", "ახალქალაქი", "ახალციხე", "ახმეტა", "ბათუმი", "ბაღდათი", "ბოლნისი", "ბორჯომი", "გაგრა", "გალი", "გარდაბანი", "გორი", "გუდაუთა", "გურჯაანი", "დედოფლისწყარო", "დმანისი", "დუშეთი", "ვალე", "ვანი", "ზესტაფონი", "ზუგდიდი", "თბილისი", "თეთრიწყარო", "თელავი", "თერჯოლა", "კასპი", "ლაგოდეხი", "ლანჩხუთი", "მარნეული", "მარტვილი", "მცხეთა", "ნინოწმინდა", "ოზურგეთი", "ონი", "ოჩამჩირე", "რუსთავი", "საგარეჯო", "სამტრედია", "საჩხერე", "სენაკი", "სიღნაღი", "სოხუმი", "ტყვარჩელი", "ტყიბული", "ფოთი", "ქარელი", "ქობულეთი", "ქუთაისი", "ყვარელი", "ცაგერი", "ცხინვალი", "წალენჯიხა", "წალკა", "წნორი", "წყალტუბო", "ჭიათურა", "ხაშური", "ხობი", "ხონი", "ჯვარი", ) def street_title(self) -> str: return self.random_element(self.street_titles) def city_name(self) -> str: return self.random_element(self.city_names)
Provider
python
huggingface__transformers
src/transformers/models/visual_bert/configuration_visual_bert.py
{ "start": 787, "end": 6767 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`VisualBertModel`]. It is used to instantiate an VisualBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the VisualBERT [uclanlp/visualbert-vqa-coco-pre](https://huggingface.co/uclanlp/visualbert-vqa-coco-pre) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the VisualBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`VisualBertModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the `inputs_ids` passed to the forward method of [`VisualBertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. visual_embedding_dim (`int`, *optional*, defaults to 512): Dimensionality of the visual embeddings to be passed to the model. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`VisualBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. bypass_transformer (`bool`, *optional*, defaults to `False`): Whether or not the model should bypass the transformer for the visual embeddings. If set to `True`, the model directly concatenates the visual embeddings from [`VisualBertEmbeddings`] with text output from transformers, and then pass it to a self-attention layer. special_visual_initialize (`bool`, *optional*, defaults to `True`): Whether or not the visual token type and position type embedding weights should be initialized the same as the textual token type and positive type embeddings. When set to `True`, the weights of the textual token type and position type embeddings are copied to the respective visual embedding layers. Example: ```python >>> from transformers import VisualBertConfig, VisualBertModel >>> # Initializing a VisualBERT visualbert-vqa-coco-pre style configuration >>> configuration = VisualBertConfig.from_pretrained("uclanlp/visualbert-vqa-coco-pre") >>> # Initializing a model (with random weights) from the visualbert-vqa-coco-pre style configuration >>> model = VisualBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "visual_bert" def __init__( self, vocab_size=30522, hidden_size=768, visual_embedding_dim=512, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, bypass_transformer=False, special_visual_initialize=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.visual_embedding_dim = visual_embedding_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.bypass_transformer = bypass_transformer self.special_visual_initialize = special_visual_initialize __all__ = ["VisualBertConfig"]
VisualBertConfig
python
great-expectations__great_expectations
contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_column_values_to_be_probabilistically_greater_than_or_equal_to_threshold.py
{ "start": 1788, "end": 6939 }
class ____(ColumnMapExpectation): """Expect the column values to be probabilistically greater than or equal to the specified threshold. This function builds upon the custom column map expectations of Great Expectations. This function asks a yes/no question of each row in the user-specified column; namely, does the confidence threshold provided by the DataProfiler model exceed the user-specified threshold. Args: column (str): The column name that you want to check. threshold (float): The value, usually as a decimal (e.g. .32), you want to use to flag low confidence predictions df.expect_column_values_to_be_probabilistically_greater_than_or_equal_to_threshold( column, threshold=float(0<=1) ) """ examples = [ { "data": { "OPEID6": ["1002", "1052", "25034", "McRoomyRoom"], "INSTNM": [ "Alabama A & M University", "University of Alabama at Birmingham", "Amridge University", "McRoomyRoom", ], "ZIP": ["35762", "35294-0110", "36117-3553", "McRoomyRoom"], "ACCREDAGENCY": [ "Southern Association of Colleges and Schools Commission on Colleges", "Southern Association of Colleges and Schools Commission on Colleges", "Southern Association of Colleges and Schools Commission on Colleges", "McRoomyRoom", ], "INSTURL": [ "www.aamu.edu/", "https://www.uab.edu", "www.amridgeuniversity.edu", "McRoomyRoom", ], "NPCURL": [ "www.aamu.edu/admissions-aid/tuition-fees/net-price-calculator.html", "https://uab.studentaidcalculator.com/survey.aspx", "www2.amridgeuniversity.edu:9091/", "McRoomyRoom", ], "LATITUDE": ["34.783368", "33.505697", "32.362609", "McRoomyRoom"], "LONGITUDE": ["-86.568502", "-86.799345", "-86.17401", "McRoomyRoom"], "RELAFFIL": ["NULL", "NULL", "74", "McRoomyRoom"], "DEATH_YR2_RT": [ "PrivacySuppressed", "PrivacySuppressed", "PrivacySuppressed", "McRoomyRoom", ], "SEARCH_STRING": [ "Alabama A & M University AAMU", "University of Alabama at Birmingham ", "Amridge University Southern Christian University Regions University", "McRoomyRoom", ], }, "tests": [ { "title": "positive_test_with_column_one", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "OPEID6", "threshold": 0.00}, "out": { "success": True, "unexpected_index_list": [], "unexpected_list": [], }, }, { "title": "error_test_threshold", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "ZIP", "threshold": 0.61}, "out": { "success": False, }, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.prediction_confidence_greater_than_or_equal_to_threshold" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ( "threshold", "mostly", ) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = { "threshold": None, "result_format": "BASIC", "catch_exceptions": False, } # This object contains metadata for display in the public Gallery library_metadata = { "requirements": ["dataprofiler", "tensorflow", "scikit-learn", "numpy"], "maturity": "experimental", # "concept_only", "experimental", "beta", or "production" "tags": ["dataprofiler"], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@taylorfturner", # Don't forget to add your github handle here! ], } if __name__ == "__main__": diagnostics_report = ( ExpectColumnValuesToBeProbabilisticallyGreaterThanOrEqualToThreshold().run_diagnostics() ) print(diagnostics_report.generate_checklist())
ExpectColumnValuesToBeProbabilisticallyGreaterThanOrEqualToThreshold
python
django__django
tests/inspectdb/models.py
{ "start": 1280, "end": 1761 }
class ____(models.Model): field = models.IntegerField(db_column="field") # Underscores field_field_0 = models.IntegerField(db_column="Field_") field_field_1 = models.IntegerField(db_column="Field__") field_field_2 = models.IntegerField(db_column="__field") # Other chars prc_x = models.IntegerField(db_column="prc(%) x") non_ascii = models.IntegerField(db_column="tamaño") class Meta: db_table = "inspectdb_special.table name"
SpecialName
python
pytorch__pytorch
torch/distributed/elastic/rendezvous/dynamic_rendezvous.py
{ "start": 16034, "end": 16478 }
class ____(Enum): """Specifies the possible actions based on the state of the rendezvous.""" KEEP_ALIVE = 1 ADD_TO_PARTICIPANTS = 2 ADD_TO_WAIT_LIST = 3 ADD_TO_REDUNDANCY_LIST = 4 REMOVE_FROM_PARTICIPANTS = 5 REMOVE_FROM_WAIT_LIST = 6 REMOVE_FROM_REDUNDANCY_LIST = 7 MARK_RENDEZVOUS_COMPLETE = 8 MARK_RENDEZVOUS_CLOSED = 9 SYNC = 10 ERROR_CLOSED = 11 ERROR_TIMEOUT = 12 FINISH = 13
_Action
python
facelessuser__pymdown-extensions
pymdownx/arithmatex.py
{ "start": 9526, "end": 11657 }
class ____(BlockProcessor): """MathJax block processor to find $$MathJax$$ content.""" def __init__(self, pattern, config, md): """Initialize.""" # Generic setup self.generic = config.get('generic', False) wrap = config.get('tex_block_wrap', ['\\[', '\\]']) self.wrap = ( wrap[0].replace('{', '}}').replace('}', '}}') + '{}' + wrap[1].replace('{', '}}').replace('}', '}}') ) self.block_tag = config.get('block_tag', 'div') # Default setup self.preview = config.get('preview', False) self.match = None self.pattern = re.compile(pattern) BlockProcessor.__init__(self, md.parser) def test(self, parent, block): """Return 'True' for future Python Markdown block compatibility.""" self.match = self.pattern.match(block) if self.pattern is not None else None return self.match is not None def mathjax_output(self, parent, math): """Default MathJax output.""" grandparent = parent parent = etree.SubElement(grandparent, self.block_tag, {'class': 'arithmatex'}) if self.preview: preview = etree.SubElement(parent, 'div', {'class': 'MathJax_Preview'}) preview.text = md_util.AtomicString(math) el = etree.SubElement(parent, 'script', {'type': 'math/tex; mode=display'}) el.text = md_util.AtomicString(math) def generic_output(self, parent, math): """Generic output.""" el = etree.SubElement(parent, self.block_tag, {'class': 'arithmatex'}) el.text = md_util.AtomicString(self.wrap.format(math)) def run(self, parent, blocks): """Find and handle block content.""" blocks.pop(0) groups = self.match.groupdict() math = groups.get('math', '') if not math: math = groups.get('math2', '') if not math: math = groups.get('math3', '') if self.generic: self.generic_output(parent, math) else: self.mathjax_output(parent, math) return True
BlockArithmatexProcessor
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B024.py
{ "start": 2253, "end": 2341 }
class ____(ABC): # error (not an abstract attribute) foo = 2
abc_set_class_variable_2
python
getsentry__sentry
tests/sentry/backup/test_imports.py
{ "start": 49090, "end": 59142 }
class ____(ImportTestCase): """ Ensures that filtering operations include the correct models. """ def test_import_filter_users(self) -> None: self.create_exhaustive_user("user_1") self.create_exhaustive_user("user_2") with tempfile.TemporaryDirectory() as tmp_dir: tmp_path = self.export_to_tmp_file_and_clear_database(tmp_dir) with open(tmp_path, "rb") as tmp_file: import_in_user_scope(tmp_file, user_filter={"user_2"}, printer=NOOP_PRINTER) with assume_test_silo_mode(SiloMode.CONTROL): # Count users, but also count a random model naively derived from just `User` alone, # like `UserEmail`. Because `Email` and `UserEmail` have some automagic going on that # causes them to be created when a `User` is, we explicitly check to ensure that they # are behaving correctly as well. assert User.objects.count() == 1 assert UserEmail.objects.count() == 1 assert Email.objects.count() == 1 assert ( ControlImportChunk.objects.filter( model="sentry.user", min_ordinal=1, max_ordinal=1 ).count() == 1 ) assert ( ControlImportChunk.objects.filter( model="sentry.useremail", min_ordinal=1, max_ordinal=1 ).count() == 1 ) assert ( ControlImportChunk.objects.filter( model="sentry.email", min_ordinal=1, max_ordinal=1 ).count() == 1 ) assert not User.objects.filter(username="user_1").exists() assert User.objects.filter(username="user_2").exists() def test_import_filter_users_shared_email(self) -> None: self.create_exhaustive_user("user_1", email="a@example.com") self.create_exhaustive_user("user_2", email="b@example.com") self.create_exhaustive_user("user_3", email="a@example.com") self.create_exhaustive_user("user_4", email="b@example.com") with tempfile.TemporaryDirectory() as tmp_dir: tmp_path = self.export_to_tmp_file_and_clear_database(tmp_dir) with open(tmp_path, "rb") as tmp_file: import_in_user_scope( tmp_file, user_filter={"user_1", "user_2", "user_3"}, printer=NOOP_PRINTER ) with assume_test_silo_mode(SiloMode.CONTROL): assert User.objects.count() == 3 assert UserEmail.objects.count() == 3 assert Email.objects.count() == 2 # Lower due to shared emails assert ( ControlImportChunk.objects.filter( model="sentry.user", min_ordinal=1, max_ordinal=3 ).count() == 1 ) assert ( ControlImportChunk.objects.filter( model="sentry.useremail", min_ordinal=1, max_ordinal=3 ).count() == 1 ) assert ( ControlImportChunk.objects.filter( model="sentry.email", min_ordinal=1, max_ordinal=2 ).count() == 1 ) assert User.objects.filter(username="user_1").exists() assert User.objects.filter(username="user_2").exists() assert User.objects.filter(username="user_3").exists() assert not User.objects.filter(username="user_4").exists() def test_import_filter_users_empty(self) -> None: self.create_exhaustive_user("user_1") self.create_exhaustive_user("user_2") with tempfile.TemporaryDirectory() as tmp_dir: tmp_path = self.export_to_tmp_file_and_clear_database(tmp_dir) with open(tmp_path, "rb") as tmp_file: import_in_user_scope(tmp_file, user_filter=set(), printer=NOOP_PRINTER) with assume_test_silo_mode(SiloMode.CONTROL): assert User.objects.count() == 0 assert UserEmail.objects.count() == 0 assert Email.objects.count() == 0 def test_import_filter_orgs_single(self) -> None: a = self.create_exhaustive_user("user_a_only", email="shared@example.com") b = self.create_exhaustive_user("user_b_only", email="shared@example.com") c = self.create_exhaustive_user("user_c_only", email="shared@example.com") a_b = self.create_exhaustive_user("user_a_and_b") b_c = self.create_exhaustive_user("user_b_and_c") a_b_c = self.create_exhaustive_user("user_all", email="shared@example.com") self.create_exhaustive_organization("org-a", a, a_b, [a_b_c]) self.create_exhaustive_organization("org-b", b_c, a_b_c, [b, a_b]) self.create_exhaustive_organization("org-c", a_b_c, b_c, [c]) with tempfile.TemporaryDirectory() as tmp_dir: tmp_path = self.export_to_tmp_file_and_clear_database(tmp_dir) with open(tmp_path, "rb") as tmp_file: import_in_organization_scope(tmp_file, org_filter={"org-b"}, printer=NOOP_PRINTER) assert Organization.objects.count() == 1 assert ( RegionImportChunk.objects.filter( model="sentry.organization", min_ordinal=1, max_ordinal=1 ).count() == 1 ) assert not Organization.objects.filter(slug="org-a").exists() assert Organization.objects.filter(slug="org-b").exists() assert not Organization.objects.filter(slug="org-c").exists() with assume_test_silo_mode(SiloMode.CONTROL): assert OrgAuthToken.objects.count() == 1 assert User.objects.count() == 4 assert UserEmail.objects.count() == 4 assert Email.objects.count() == 3 # Lower due to `shared@example.com` assert not User.objects.filter(username="user_a_only").exists() assert User.objects.filter(username="user_b_only").exists() assert not User.objects.filter(username="user_c_only").exists() assert User.objects.filter(username="user_a_and_b").exists() assert User.objects.filter(username="user_b_and_c").exists() assert User.objects.filter(username="user_all").exists() def test_import_filter_orgs_multiple(self) -> None: a = self.create_exhaustive_user("user_a_only", email="shared@example.com") b = self.create_exhaustive_user("user_b_only", email="shared@example.com") c = self.create_exhaustive_user("user_c_only", email="shared@example.com") a_b = self.create_exhaustive_user("user_a_and_b") b_c = self.create_exhaustive_user("user_b_and_c") a_b_c = self.create_exhaustive_user("user_all", email="shared@example.com") self.create_exhaustive_organization("org-a", a, a_b, [a_b_c]) self.create_exhaustive_organization("org-b", b_c, a_b_c, [b, a_b]) self.create_exhaustive_organization("org-c", a_b_c, b_c, [c]) with tempfile.TemporaryDirectory() as tmp_dir: tmp_path = self.export_to_tmp_file_and_clear_database(tmp_dir) with open(tmp_path, "rb") as tmp_file: import_in_organization_scope( tmp_file, org_filter={"org-a", "org-c"}, printer=NOOP_PRINTER ) assert Organization.objects.count() == 2 assert ( RegionImportChunk.objects.filter( model="sentry.organization", min_ordinal=1, max_ordinal=2 ).count() == 1 ) assert Organization.objects.filter(slug="org-a").exists() assert not Organization.objects.filter(slug="org-b").exists() assert Organization.objects.filter(slug="org-c").exists() with assume_test_silo_mode(SiloMode.CONTROL): assert OrgAuthToken.objects.count() == 2 assert ( ControlImportChunk.objects.filter( model="sentry.orgauthtoken", min_ordinal=1, max_ordinal=2 ).count() == 1 ) assert User.objects.count() == 5 assert UserEmail.objects.count() == 5 assert Email.objects.count() == 3 # Lower due to `shared@example.com` assert User.objects.filter(username="user_a_only").exists() assert not User.objects.filter(username="user_b_only").exists() assert User.objects.filter(username="user_c_only").exists() assert User.objects.filter(username="user_a_and_b").exists() assert User.objects.filter(username="user_b_and_c").exists() assert User.objects.filter(username="user_all").exists() def test_import_filter_orgs_empty(self) -> None: a = self.create_exhaustive_user("user_a_only") b = self.create_exhaustive_user("user_b_only") c = self.create_exhaustive_user("user_c_only") a_b = self.create_exhaustive_user("user_a_and_b") b_c = self.create_exhaustive_user("user_b_and_c") a_b_c = self.create_exhaustive_user("user_all") self.create_exhaustive_organization("org-a", a, a_b, [a_b_c]) self.create_exhaustive_organization("org-b", b_c, a_b_c, [b, a_b]) self.create_exhaustive_organization("org-c", a_b_c, b_c, [c]) with tempfile.TemporaryDirectory() as tmp_dir: tmp_path = self.export_to_tmp_file_and_clear_database(tmp_dir) with open(tmp_path, "rb") as tmp_file: import_in_organization_scope(tmp_file, org_filter=set(), printer=NOOP_PRINTER) assert Organization.objects.count() == 0 with assume_test_silo_mode(SiloMode.CONTROL): assert OrgAuthToken.objects.count() == 0 assert User.objects.count() == 0 assert UserEmail.objects.count() == 0 assert Email.objects.count() == 0 COLLISION_TESTED: set[NormalizedModelName] = set()
FilterTests
python
walkccc__LeetCode
solutions/2574. Left and Right Sum Differences/2574.py
{ "start": 0, "end": 441 }
class ____: def leftRigthDifference(self, nums: list[int]) -> list[int]: n = len(nums) leftSum = [0] * n rightSum = [0] * n prefix = 0 suffix = 0 for i in range(n): if i > 0: prefix += nums[i - 1] leftSum[i] = prefix for i in range(n - 1, -1, -1): if i + 1 < n: suffix += nums[i + 1] rightSum[i] = suffix return [abs(l - r) for l, r in zip(leftSum, rightSum)]
Solution
python
keon__algorithms
tests/test_graph.py
{ "start": 2554, "end": 3161 }
class ____(unittest.TestCase): def test_dijkstra(self): g = Dijkstra(9) g.graph = [[0, 4, 0, 0, 0, 0, 0, 8, 0], [4, 0, 8, 0, 0, 0, 0, 11, 0], [0, 8, 0, 7, 0, 4, 0, 0, 2], [0, 0, 7, 0, 9, 14, 0, 0, 0], [0, 0, 0, 9, 0, 10, 0, 0, 0], [0, 0, 4, 14, 10, 0, 2, 0, 0], [0, 0, 0, 0, 0, 2, 0, 1, 6], [8, 11, 0, 0, 0, 0, 1, 0, 7], [0, 0, 2, 0, 0, 0, 6, 7, 0]] self.assertEqual(g.dijkstra(0), [0, 4, 12, 19, 21, 11, 9, 8, 14])
TestDijkstra
python
getsentry__sentry
src/sentry/replays/usecases/ingest/__init__.py
{ "start": 2108, "end": 2241 }
class ____(msgspec.Struct, gc=False, tag_field="type", tag=6): pass # These are the schema definitions we care about.
PluginEvent
python
Farama-Foundation__Gymnasium
gymnasium/wrappers/vector/rendering.py
{ "start": 566, "end": 7216 }
class ____(VectorWrapper, gym.utils.RecordConstructorArgs): """Adds support for Human-based Rendering for Vector-based environments.""" ACCEPTED_RENDER_MODES = [ "rgb_array", "rgb_array_list", "depth_array", "depth_array_list", ] def __init__(self, env: VectorEnv, screen_size: tuple[int, int] | None = None): """Constructor for Human Rendering of Vector-based environments. Args: env: The vector environment screen_size: The rendering screen size otherwise the environment sub-env render size is used """ VectorWrapper.__init__(self, env) gym.utils.RecordConstructorArgs.__init__(self, screen_size=screen_size) self.screen_size = screen_size self.scaled_subenv_size, self.num_rows, self.num_cols = None, None, None self.window = None # Has to be initialized before asserts, as self.window is used in auto close self.clock = None assert ( self.env.render_mode in self.ACCEPTED_RENDER_MODES ), f"Expected env.render_mode to be one of {self.ACCEPTED_RENDER_MODES} but got '{env.render_mode}'" assert ( "render_fps" in self.env.metadata ), "The base environment must specify 'render_fps' to be used with the HumanRendering wrapper" if "human" not in self.metadata["render_modes"]: self.metadata = deepcopy(self.env.metadata) self.metadata["render_modes"].append("human") @property def render_mode(self) -> str: """Always returns ``'human'``.""" return "human" def step( self, actions: ActType ) -> tuple[ObsType, ArrayType, ArrayType, ArrayType, dict[str, Any]]: """Perform a step in the base environment and render a frame to the screen.""" result = super().step(actions) self._render_frame() return result def reset( self, *, seed: int | list[int] | None = None, options: dict[str, Any] | None = None, ) -> tuple[ObsType, dict[str, Any]]: """Reset the base environment and render a frame to the screen.""" result = super().reset(seed=seed, options=options) self._render_frame() return result def _render_frame(self): """Fetch the last frame from the base environment and render it to the screen.""" try: import pygame except ImportError: raise DependencyNotInstalled( "pygame is not installed, run `pip install gymnasium[classic-control]`" ) assert self.env.render_mode is not None if self.env.render_mode.endswith("_last"): subenv_renders = self.env.render() assert isinstance(subenv_renders, list) subenv_renders = subenv_renders[-1] else: subenv_renders = self.env.render() assert subenv_renders is not None assert len(subenv_renders) == self.num_envs assert all( isinstance(render, np.ndarray) for render in subenv_renders ), f"Expected `env.render()` to return a numpy array, actually returned {[type(render) for render in subenv_renders]}" subenv_renders = np.array(subenv_renders, dtype=np.uint8) subenv_renders = np.transpose(subenv_renders, axes=(0, 2, 1, 3)) # shape = (num envs, width, height, channels) if self.screen_size is None: self.screen_size = subenv_renders.shape[1:3] if self.scaled_subenv_size is None: subenv_size = subenv_renders.shape[1:3] width_ratio = subenv_size[0] / self.screen_size[0] height_ratio = subenv_size[1] / self.screen_size[1] num_rows, num_cols = 1, 1 while num_rows * num_cols < self.num_envs: row_ratio = num_rows * height_ratio col_ratio = num_cols * width_ratio if row_ratio == col_ratio: num_rows, num_cols = num_rows + 1, num_cols + 1 elif row_ratio > col_ratio: num_cols += 1 else: num_rows += 1 scaling_factor = min( self.screen_size[0] / (num_cols * subenv_size[0]), self.screen_size[1] / (num_rows * subenv_size[1]), ) assert ( num_cols * subenv_size[0] * scaling_factor == self.screen_size[0] ) or (num_rows * subenv_size[1] * scaling_factor == self.screen_size[1]) self.num_rows = num_rows self.num_cols = num_cols self.scaled_subenv_size = ( int(subenv_size[0] * scaling_factor), int(subenv_size[1] * scaling_factor), ) assert self.num_rows * self.num_cols >= self.num_envs assert self.scaled_subenv_size[0] * self.num_cols <= self.screen_size[0] assert self.scaled_subenv_size[1] * self.num_rows <= self.screen_size[1] # print(f'{self.num_envs=}, {self.num_rows=}, {self.num_cols=}, {self.screen_size=}, {self.scaled_subenv_size=}') try: import cv2 except ImportError as e: raise DependencyNotInstalled( 'opencv (cv2) is not installed, run `pip install "gymnasium[other]"`' ) from e merged_rgb_array = np.zeros(self.screen_size + (3,), dtype=np.uint8) cols, rows = np.meshgrid(np.arange(self.num_cols), np.arange(self.num_rows)) for i, col, row in zip(range(self.num_envs), cols.flatten(), rows.flatten()): scaled_render = cv2.resize(subenv_renders[i], self.scaled_subenv_size[::-1]) x = col * self.scaled_subenv_size[0] y = row * self.scaled_subenv_size[1] merged_rgb_array[ x : x + self.scaled_subenv_size[0], y : y + self.scaled_subenv_size[1], ] = scaled_render if self.window is None: pygame.init() pygame.display.init() self.window = pygame.display.set_mode(self.screen_size) if self.clock is None: self.clock = pygame.time.Clock() surf = pygame.surfarray.make_surface(merged_rgb_array) self.window.blit(surf, (0, 0)) pygame.event.pump() self.clock.tick(self.metadata["render_fps"]) pygame.display.flip() def close(self): """Close the rendering window.""" if self.window is not None: import pygame pygame.display.quit() pygame.quit() super().close()
HumanRendering
python
doocs__leetcode
solution/1100-1199/1144.Decrease Elements To Make Array Zigzag/Solution.py
{ "start": 0, "end": 422 }
class ____: def movesToMakeZigzag(self, nums: List[int]) -> int: ans = [0, 0] n = len(nums) for i in range(2): for j in range(i, n, 2): d = 0 if j: d = max(d, nums[j] - nums[j - 1] + 1) if j < n - 1: d = max(d, nums[j] - nums[j + 1] + 1) ans[i] += d return min(ans)
Solution
python
sympy__sympy
sympy/functions/combinatorial/numbers.py
{ "start": 56123, "end": 57712 }
class ____(DefinedFunction): r""" Partition numbers The Partition numbers are a sequence of integers `p_n` that represent the number of distinct ways of representing `n` as a sum of natural numbers (with order irrelevant). The generating function for `p_n` is given by: .. math:: \sum_{n=0}^\infty p_n x^n = \prod_{k=1}^\infty (1 - x^k)^{-1} Examples ======== >>> from sympy import partition, Symbol >>> [partition(n) for n in range(9)] [1, 1, 2, 3, 5, 7, 11, 15, 22] >>> n = Symbol('n', integer=True, negative=True) >>> partition(n) 0 See Also ======== bell, bernoulli, catalan, euler, fibonacci, harmonic, lucas, genocchi, tribonacci References ========== .. [1] https://en.wikipedia.org/wiki/Partition_(number_theory%29 .. [2] https://en.wikipedia.org/wiki/Pentagonal_number_theorem """ is_integer = True is_nonnegative = True @classmethod def eval(cls, n): if n.is_integer is False: raise TypeError("n should be an integer") if n.is_negative is True: return S.Zero if n.is_zero is True or n is S.One: return S.One if n.is_Integer is True: return S(_partition(as_int(n))) def _eval_is_positive(self): if self.args[0].is_nonnegative is True: return True def _eval_Mod(self, q): # Ramanujan's congruences n = self.args[0] for p, rem in [(5, 4), (7, 5), (11, 6)]: if q == p and n % q == rem: return S.Zero
partition
python
fluentpython__example-code-2e
10-dp-1class-func/untyped/strategy_param2.py
{ "start": 2616, "end": 2915 }
class ____(Promotion): """discount for orders with 10 or more distinct items""" def __call__(self, order): distinct_items = {item.product for item in order.cart} if len(distinct_items) >= 10: return order.total() * self.percent / 100 return 0
LargeOrderPromo
python
walkccc__LeetCode
solutions/2472. Maximum Number of Non-overlapping Palindrome Substrings/2472.py
{ "start": 0, "end": 1109 }
class ____: def maxPalindromes(self, s: str, k: int) -> int: n = len(s) # dp[i] := the maximum number of substrings in the first i chars of s dp = [0] * (n + 1) def isPalindrome(l: int, r: int) -> bool: """Returns True is s[i..j) is a palindrome.""" if l < 0: return False while l < r: if s[l] != s[r]: return False l += 1 r -= 1 return True # If a palindrome is a subof another palindrome, then considering # the longer palindrome won't increase the number of non-overlapping # palindromes. So, we only need to consider the shorter one. Also, # considering palindromes with both k length and k + 1 length ensures that # we look for both even and odd length palindromes. for i in range(k, n + 1): dp[i] = dp[i - 1] # Consider palindrome with length k. if isPalindrome(i - k, i - 1): dp[i] = max(dp[i], 1 + dp[i - k]) # Consider palindrome with length k + 1. if isPalindrome(i - k - 1, i - 1): dp[i] = max(dp[i], 1 + dp[i - k - 1]) return dp[n]
Solution
python
google__jax
jax/_src/core.py
{ "start": 48484, "end": 48965 }
class ____: __slots__ = ['prev', 'axis_names'] def __init__(self, axis_names: AxisName | None): self.axis_names = axis_names def __enter__(self): self.prev = trace_ctx.axis_env if self.axis_names is not None: trace_ctx.set_axis_env(self.prev.add_spmd_axis_names(self.axis_names)) def __exit__(self, exc_type, exc_value, traceback): trace_ctx.set_axis_env(self.prev) add_spmd_axis_names = AddSpmdAxisNamesContextManager
AddSpmdAxisNamesContextManager
python
tensorflow__tensorflow
tensorflow/python/ops/ragged/ragged_reduce_op_test.py
{ "start": 1521, "end": 27060 }
class ____(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters( #========================================================================= # Docstring examples. RaggedTensor for testing is: # [[3, 1, 4], # [1, 5, ], # [9, ], # [2, 6 ]] #========================================================================= # keepdims=True dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, keepdims=False, expected=[15, 12, 4] # = [3+1+9+2, 1+5+6, 4] ), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=-2, keepdims=False, expected=[15, 12, 4] # = [3+1+9+2, 1+5+6, 4] ), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=1, keepdims=False, expected=[8, 6, 9, 8] # = [3+1+4, 1+5, 9, 2+6] ), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=-1, keepdims=False, expected=[8, 6, 9, 8] # = [3+1+4, 1+5, 9, 2+6] ), dict( ragged_reduce_op=ragged_math_ops.reduce_prod, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, keepdims=False, expected=[54, 30, 4] # = [3*1*9*2, 1*5*6, 4] ), dict( ragged_reduce_op=ragged_math_ops.reduce_prod, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=1, keepdims=False, expected=[12, 5, 9, 12] # = [3*1*4, 1*5, 9, 2*6] ), dict( ragged_reduce_op=ragged_math_ops.reduce_min, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, keepdims=False, expected=[1, 1, 4] # = [min(3, 1, 9, 2), min(1, 5, 6), 4] ), dict( ragged_reduce_op=ragged_math_ops.reduce_min, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=1, keepdims=False, expected=[1, 1, 9, 2] # = [min(3, 1, 4), min(1, 5), 9, min(2, 6)] ), dict( ragged_reduce_op=ragged_math_ops.reduce_max, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, keepdims=False, expected=[9, 6, 4] # = [max(3, 1, 9, 2), max(1, 5, 6), 4] ), dict( ragged_reduce_op=ragged_math_ops.reduce_max, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=1, keepdims=False, expected=[4, 5, 9, 6] # = [max(3, 1, 4), max(1, 5), 9, max(2, 6)] ), dict( ragged_reduce_op=ragged_math_ops.reduce_mean, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, keepdims=False, expected=[3.75, 4, 4] # = [mean(3, 1, 9, 2), mean(1, 5, 6), 4] ), dict( ragged_reduce_op=ragged_math_ops.reduce_variance, rt_input=[[3, 1, 4], [1, 1], [9], [2, 1]], axis=0, keepdims=False, expected=[9.6875, 0.0, 0.0]), dict( ragged_reduce_op=ragged_math_ops.reduce_std, rt_input=[[3, 1, 4], [3, 1], [2], [2, 1]], axis=0, keepdims=False, expected=[0.5, 0., 0.]), dict( ragged_reduce_op=ragged_math_ops.reduce_any, rt_input=[[True, True], [True, True, False, True], [False, True]], axis=0, keepdims=False, expected=[True, True, False, True]), dict( ragged_reduce_op=ragged_math_ops.reduce_any, rt_input=[[True, True], [True, True, False, True], [False, True]], axis=1, keepdims=False, expected=[True, True, True]), dict( ragged_reduce_op=ragged_math_ops.reduce_all, rt_input=[[True, True], [True, True, False, True], [False, True]], axis=0, keepdims=False, expected=[False, True, False, True]), dict( ragged_reduce_op=ragged_math_ops.reduce_all, rt_input=[[True, True], [True, True, False, True], [False, True]], axis=1, keepdims=False, expected=[True, False, False]), # keepdims=True dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, keepdims=True, expected=[[15, 12, 4]] # = [[3+1+9+2, 1+5+6, 4]] ), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=-2, keepdims=True, expected=[[15, 12, 4]] # = [[3+1+9+2, 1+5+6, 4]] ), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=1, keepdims=True, expected=[[8], [6], [9], [8]] # = [[3+1+4], [1+5], [9], [2+6]] ), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=-1, keepdims=True, expected=[[8], [6], [9], [8]] # = [[3+1+4], [1+5], [9], [2+6]] ), dict( ragged_reduce_op=ragged_math_ops.reduce_prod, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, keepdims=True, expected=[[54, 30, 4]] # = [[3*1*9*2, 1*5*6, 4]] ), dict( ragged_reduce_op=ragged_math_ops.reduce_prod, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=1, keepdims=True, expected=[[12], [5], [9], [12]] # = [[3*1*4], [1*5], [9], [2*6]] ), dict( ragged_reduce_op=ragged_math_ops.reduce_min, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, keepdims=True, expected=[[1, 1, 4]] # = [[min(3, 1, 9, 2), min(1, 5, 6), 4]] ), dict( ragged_reduce_op=ragged_math_ops.reduce_min, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=1, keepdims=True, expected=[[1], [1], [9], [2]] # = [[min(3, 1, 4)], [min(1, 5)], [9], [min(2, 6)]] ), dict( ragged_reduce_op=ragged_math_ops.reduce_max, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, keepdims=True, expected=[[9, 6, 4]] # = [[max(3, 1, 9, 2), max(1, 5, 6), 4]] ), dict( ragged_reduce_op=ragged_math_ops.reduce_max, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=1, keepdims=True, expected=[[4], [5], [9], [6]] # = [[max(3, 1, 4)], [max(1, 5)], [9], [max(2, 6)]] ), dict( ragged_reduce_op=ragged_math_ops.reduce_mean, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, keepdims=True, expected=[[3.75, 4, 4]] # = [[mean(3, 1, 9, 2), mean(1, 5, 6), 4]] ), dict( ragged_reduce_op=ragged_math_ops.reduce_variance, rt_input=[[3, 1, 4], [1, 1], [9], [2, 1]], axis=0, keepdims=True, expected=[[9.6875, 0., 0.]]), dict( ragged_reduce_op=ragged_math_ops.reduce_std, rt_input=[[3, 1, 4], [3, 1], [2], [2, 1]], axis=0, keepdims=True, expected=[[0.5, 0., 0.]]), dict( ragged_reduce_op=ragged_math_ops.reduce_any, rt_input=[[True, True], [True, True, False, True], [False, True]], axis=0, keepdims=True, expected=[[True, True, False, True]]), dict( ragged_reduce_op=ragged_math_ops.reduce_any, rt_input=[[True, True], [True, True, False, True], [False, True]], axis=1, keepdims=True, expected=[[True], [True], [True]]), dict( ragged_reduce_op=ragged_math_ops.reduce_all, rt_input=[[True, True], [True, True, False, True], [False, True]], axis=0, keepdims=True, expected=[[False, True, False, True]]), dict( ragged_reduce_op=ragged_math_ops.reduce_all, rt_input=[[True, True], [True, True, False, True], [False, True]], axis=1, keepdims=True, expected=[[True], [False], [False]]), #========================================================================= # Examples with the following RaggedTensor (ragged_rank=1): # [[0, 1, 2, 3], # [4 ], # [ ], # [5, 6 ], # [7 ], # [8, 9 ]] #========================================================================= # axis=None dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=None, keepdims=False, expected=0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9), dict( ragged_reduce_op=ragged_math_ops.reduce_prod, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=None, keepdims=False, expected=0 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9), dict( ragged_reduce_op=ragged_math_ops.reduce_min, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=None, keepdims=False, expected=min(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)), dict( ragged_reduce_op=ragged_math_ops.reduce_max, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=None, keepdims=False, expected=max(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)), dict( ragged_reduce_op=ragged_math_ops.reduce_mean, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=None, keepdims=False, expected=mean(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)), dict( ragged_reduce_op=ragged_math_ops.reduce_variance, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=None, keepdims=False, expected=variance(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)), dict( ragged_reduce_op=ragged_math_ops.reduce_std, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=None, keepdims=False, expected=std(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)), # axis=0 dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=0, keepdims=False, expected=[0 + 4 + 5 + 7 + 8, 1 + 6 + 9, 2, 3]), dict( ragged_reduce_op=ragged_math_ops.reduce_prod, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=0, keepdims=False, expected=[0 * 4 * 5 * 7 * 8, 1 * 6 * 9, 2, 3]), dict( ragged_reduce_op=ragged_math_ops.reduce_min, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=0, keepdims=False, expected=[min(0, 4, 5, 7, 8), min(1, 6, 9), 2, 3]), dict( ragged_reduce_op=ragged_math_ops.reduce_max, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=0, keepdims=False, expected=[max(0, 4, 5, 7, 8), max(1, 6, 9), 2, 3]), dict( ragged_reduce_op=ragged_math_ops.reduce_mean, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=0, keepdims=False, expected=[mean(0, 4, 5, 7, 8), mean(1, 6, 9), 2, 3]), dict( ragged_reduce_op=ragged_math_ops.reduce_variance, rt_input=[[0, 1, 2, 3], [1], [], [2, 1], [3], [4, 1]], axis=0, keepdims=False, expected=[variance(0, 1, 2, 3, 4), variance(1, 1, 1), 0, 0]), dict( ragged_reduce_op=ragged_math_ops.reduce_std, rt_input=[[1, 1, 2, 3], [1], [], [1, 1], [1], [1, 1]], axis=0, keepdims=False, expected=[std(1, 1, 1, 1, 1), std(1, 1, 1), 0, 0]), # axis=1 # Note: we don't test mean here because it gives a NaN, and this will # cause assertEqual to fail (since NaN != NaN). See testMeanNan(). dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=1, keepdims=False, expected=[0 + 1 + 2 + 3, 4, 0, 5 + 6, 7, 8 + 9]), dict( ragged_reduce_op=ragged_math_ops.reduce_prod, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=1, keepdims=False, expected=[0 * 1 * 2 * 3, 4, 1, 5 * 6, 7, 8 * 9]), dict( ragged_reduce_op=ragged_math_ops.reduce_min, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=1, keepdims=False, expected=[min(0, 1, 2, 3), 4, _MAX_INT32, min(5, 6), 7, min(8, 9)]), dict( ragged_reduce_op=ragged_math_ops.reduce_max, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=1, keepdims=False, expected=[max(0, 1, 2, 3), 4, _MIN_INT32, max(5, 6), 7, max(8, 9)]), #========================================================================= # Examples with ragged_rank=2: # [[[1, 2], [ ], [3, 4, 5]], # [[6, 7], [ ], [8 ]], # [ ], # [[9 ] ]] #========================================================================= # keepdims=False dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[], keepdims=False, expected=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=None, keepdims=False, expected=sum([1, 2, 3, 4, 5, 6, 7, 8, 9])), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=0, keepdims=False, expected=[[1 + 6 + 9, 2 + 7], [], [3 + 8, 4, 5]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=1, keepdims=False, expected=[[1 + 3, 2 + 4, 5], [6 + 8, 7], [], [9]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=2, keepdims=False, expected=[[1 + 2, 0, 3 + 4 + 5], [6 + 7, 0, 8], [], [9]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[0, 1], keepdims=False, expected=[1 + 3 + 6 + 8 + 9, 2 + 4 + 7, 5]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[0, 2], keepdims=False, expected=[1 + 6 + 9 + 2 + 7, 0, 3 + 8 + 4 + 5]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[1, 2], keepdims=False, expected=[1 + 2 + 3 + 4 + 5, 6 + 7 + 8, 0, 9]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[0, 1, 2], keepdims=False, expected=sum([1, 2, 3, 4, 5, 6, 7, 8, 9])), # keepdims=True dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[], keepdims=True, expected=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=None, keepdims=True, expected=[[[sum([1, 2, 3, 4, 5, 6, 7, 8, 9])]]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=0, keepdims=True, expected=[[[1 + 6 + 9, 2 + 7], [], [3 + 8, 4, 5]]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=1, keepdims=True, expected=[[[1 + 3, 2 + 4, 5]], [[6 + 8, 7]], [[]], [[9]]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=2, keepdims=True, expected=[[[1 + 2], [0], [3 + 4 + 5]], [[6 + 7], [0], [8]], [], [[9]]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[0, 1], keepdims=True, expected=[[[1 + 3 + 6 + 8 + 9, 2 + 4 + 7, 5]]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[0, 2], keepdims=True, expected=[[[1 + 6 + 9 + 2 + 7], [0], [3 + 8 + 4 + 5]]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[1, 2], keepdims=True, expected=[[[1 + 2 + 3 + 4 + 5]], [[6 + 7 + 8]], [[0]], [[9]]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[0, 1, 2], keepdims=True, expected=[[[sum([1, 2, 3, 4, 5, 6, 7, 8, 9])]]]), #========================================================================= # Examples for ragged_reduce_mean ragged_rank=2: # [[[1, 2], [3, 4, 5]], # [[6, 7], [8 ]], # [[9 ] ]] #========================================================================= dict( ragged_reduce_op=ragged_math_ops.reduce_mean, rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]], axis=0, keepdims=False, expected=[[mean(1, 6, 9), mean(2, 7)], [mean(3, 8), 4, 5]]), dict( ragged_reduce_op=ragged_math_ops.reduce_mean, rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]], axis=1, keepdims=False, expected=[[mean(1, 3), mean(2, 4), 5], [mean(6, 8), 7], [9]]), dict( ragged_reduce_op=ragged_math_ops.reduce_mean, rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]], axis=2, keepdims=False, expected=[[mean(1, 2), mean(3, 4, 5)], [mean(6, 7), 8], [9]]), dict( ragged_reduce_op=ragged_math_ops.reduce_variance, rt_input=[[[6, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]], axis=0, keepdims=False, expected=[[variance(6, 6, 9), variance(2, 7)], [variance(3, 8), 0., 0.]]), dict( ragged_reduce_op=ragged_math_ops.reduce_variance, rt_input=[[[6, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]], axis=1, keepdims=False, expected=[[variance(6, 3), variance(2, 4), 0.], [variance(6, 8), 0.], [0.]]), dict( ragged_reduce_op=ragged_math_ops.reduce_variance, rt_input=[[[6, 2], [6, 9, 9]], [[6, 7], [8]], [[9]]], axis=2, keepdims=False, expected=[[variance(6, 2), variance(6, 9, 9)], [variance(6, 7), 0.], [0.]]), dict( ragged_reduce_op=ragged_math_ops.reduce_std, rt_input=[[[6, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]], axis=0, keepdims=False, expected=[[std(6, 6, 9), std(2, 7)], [std(3, 8), 0., 0.]]), dict( ragged_reduce_op=ragged_math_ops.reduce_std, rt_input=[[[6, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]], axis=1, keepdims=False, expected=[[std(6, 3), std(2, 4), 0.], [std(6, 8), 0.], [0.]]), dict( ragged_reduce_op=ragged_math_ops.reduce_std, rt_input=[[[6, 2], [6, 9, 9]], [[6, 7], [8]], [[9]]], axis=2, keepdims=False, expected=[[std(6, 2), std(6, 9, 9)], [std(6, 7), 0.], [0.]]), # Test case for GitHub issue 27497, multiple negative axes. dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[-2, -1], keepdims=False, expected=[1 + 2 + 3 + 4 + 5, 6 + 7 + 8, 0, 9]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[-3, -2, -1], keepdims=False, expected=sum([1, 2, 3, 4, 5, 6, 7, 8, 9])), # Test case for GitHub issue 56222, small variance dict( ragged_reduce_op=ragged_math_ops.reduce_variance, rt_input=[[[0.214441], [0.214441], [0.214441], [0.214441], [0.214441], [0.214441], [0.214441]]], axis=[1], keepdims=False, expected=[[0.0]], ), dict( ragged_reduce_op=ragged_math_ops.reduce_std, rt_input=[[[0.214441], [0.214441], [0.214441], [0.214441], [0.214441], [0.214441], [0.214441]]], axis=[1], keepdims=False, expected=[[0.0]], ), ) def testReduce(self, ragged_reduce_op, rt_input, axis, keepdims, expected): rt_input = ragged_factory_ops.constant(rt_input) reduced = ragged_reduce_op(rt_input, axis, keepdims=keepdims) self.assertAllEqual(reduced, expected) def testReduceKeepsInnerDimensionShape(self): # Test for bug [b/139823356]. rt = ragged_factory_ops.constant([[[[1, 1]]]], ragged_rank=2) self.assertEqual(rt.shape.as_list(), [1, None, None, 2]) reduced = ragged_math_ops.reduce_sum(rt, axis=2) self.assertEqual(reduced.shape.as_list(), [1, None, 2]) def assertEqualWithNan(self, actual, expected): """Like assertEqual, but NaN==NaN.""" self.assertTrue( ((actual == expected) | (np.isnan(actual) & np.isnan(expected))).all()) def testMeanNan(self): rt_as_list = [[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]] expected = ( np.array([0 + 1 + 2 + 3, 4, 0, 5 + 6, 7, 8 + 9]) / np.array([4, 1, 0, 2, 1, 2])) rt_input = ragged_factory_ops.constant(rt_as_list) reduced = ragged_math_ops.reduce_mean(rt_input, axis=1) self.assertEqualWithNan(self.evaluate(reduced), expected) def testVarianceNan(self): rt_as_list = [[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]] expected = ([ variance(0, 1, 2, 3), variance(4), variance(), variance(5, 6), variance(7), variance(8, 9) ]) rt_input = ragged_factory_ops.constant(rt_as_list) reduced = ragged_math_ops.reduce_variance(rt_input, axis=1) self.assertEqualWithNan(self.evaluate(reduced), expected) def testStdNan(self): rt_as_list = [[0, 1, 1, 0], [4], [], [5, 6], [7], [8, 9]] expected = ([std(0, 1, 1, 0), std(4), std(), std(5, 6), std(7), std(8, 9)]) rt_input = ragged_factory_ops.constant(rt_as_list) reduced = ragged_math_ops.reduce_std(rt_input, axis=1) self.assertEqualWithNan(self.evaluate(reduced), expected) def testMeanWithTensorInputs(self): tensor = [[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]] expected = [2.0, 20.0] reduced = ragged_math_ops.reduce_mean(tensor, axis=1) self.assertAllEqual(reduced, expected) def testVarianceWithTensorInputs(self): tensor = [[6.0, 9.0, 6.0], [60.0, 90.0, 60.0]] expected = [2., 200.] reduced = ragged_math_ops.reduce_variance(tensor, axis=1) self.assertAllEqual(reduced, expected) def testStdWithTensorInputs(self): tensor = [[1.0, 2.0, 2.0, 1.0], [10.0, 20.0, 20.0, 10.0]] expected = [0.5, 5.] reduced = ragged_math_ops.reduce_std(tensor, axis=1) self.assertAllEqual(reduced, expected) def testErrors(self): rt_input = ragged_factory_ops.constant([[1, 2, 3], [4, 5]]) axis = array_ops.placeholder_with_default(constant_op.constant([0]), None) if not context.executing_eagerly(): self.assertRaisesRegex(ValueError, r'axis must be known at graph construction time.', ragged_math_ops.reduce_sum, rt_input, axis) self.assertRaisesRegex(TypeError, r'axis must be an int; got str.*', ragged_math_ops.reduce_sum, rt_input, ['x']) if __name__ == '__main__': googletest.main()
RaggedReduceOpsTest
python
lepture__authlib
authlib/jose/rfc7516/models.py
{ "start": 3011, "end": 3769 }
class ____(dict): """Shared header object for JWE. Combines protected header and shared unprotected header together. """ def __init__(self, protected, unprotected): obj = {} if unprotected: obj.update(unprotected) if protected: obj.update(protected) super().__init__(obj) self.protected = protected if protected else {} self.unprotected = unprotected if unprotected else {} def update_protected(self, addition): self.update(addition) self.protected.update(addition) @classmethod def from_dict(cls, obj): if isinstance(obj, cls): return obj return cls(obj.get("protected"), obj.get("unprotected"))
JWESharedHeader
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pylint/eq_without_hash.py
{ "start": 1685, "end": 1913 }
class ____: def __init__(self): self.name = "python" def __eq__(self, other): return isinstance(other, Language) and other.name == self.name def __hash__(self): return hash(self.name)
Language
python
getsentry__sentry
src/sentry/migrations/0993_add_event_id_to_grouphash_metadata.py
{ "start": 155, "end": 1466 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False dependencies = [ ("sentry", "0992_latestrepoerelease_indexes"), ] operations = [ migrations.AddField( model_name="grouphashmetadata", name="event_id", field=models.CharField(max_length=32, null=True), ), ]
Migration
python
tiangolo__fastapi
tests/test_default_response_class.py
{ "start": 197, "end": 358 }
class ____(JSONResponse): media_type = "application/x-orjson" def render(self, content: Any) -> bytes: return orjson.dumps(content)
ORJSONResponse
python
graphql-python__graphene
graphene/types/tests/test_interface.py
{ "start": 222, "end": 5077 }
class ____(UnmountedType): def get_type(self): return MyType def test_generate_interface(): class MyInterface(Interface): """Documentation""" assert MyInterface._meta.name == "MyInterface" assert MyInterface._meta.description == "Documentation" assert MyInterface._meta.fields == {} def test_generate_interface_with_meta(): class MyFirstInterface(Interface): pass class MyInterface(Interface): class Meta: name = "MyOtherInterface" description = "Documentation" interfaces = [MyFirstInterface] assert MyInterface._meta.name == "MyOtherInterface" assert MyInterface._meta.description == "Documentation" assert MyInterface._meta.interfaces == [MyFirstInterface] def test_generate_interface_with_fields(): class MyInterface(Interface): field = Field(MyType) assert "field" in MyInterface._meta.fields def test_ordered_fields_in_interface(): class MyInterface(Interface): b = Field(MyType) a = Field(MyType) field = MyScalar() asa = Field(MyType) assert list(MyInterface._meta.fields) == ["b", "a", "field", "asa"] def test_generate_interface_unmountedtype(): class MyInterface(Interface): field = MyScalar() assert "field" in MyInterface._meta.fields assert isinstance(MyInterface._meta.fields["field"], Field) def test_generate_interface_inherit_abstracttype(): class MyAbstractType: field1 = MyScalar() class MyInterface(Interface, MyAbstractType): field2 = MyScalar() assert list(MyInterface._meta.fields) == ["field1", "field2"] assert [type(x) for x in MyInterface._meta.fields.values()] == [Field, Field] def test_generate_interface_inherit_interface(): class MyBaseInterface(Interface): field1 = MyScalar() class MyInterface(MyBaseInterface): field2 = MyScalar() assert MyInterface._meta.name == "MyInterface" assert list(MyInterface._meta.fields) == ["field1", "field2"] assert [type(x) for x in MyInterface._meta.fields.values()] == [Field, Field] def test_generate_interface_inherit_abstracttype_reversed(): class MyAbstractType: field1 = MyScalar() class MyInterface(MyAbstractType, Interface): field2 = MyScalar() assert list(MyInterface._meta.fields) == ["field1", "field2"] assert [type(x) for x in MyInterface._meta.fields.values()] == [Field, Field] def test_resolve_type_default(): class MyInterface(Interface): field2 = String() class MyTestType(ObjectType): class Meta: interfaces = (MyInterface,) class Query(ObjectType): test = Field(MyInterface) def resolve_test(_, info): return MyTestType() schema = Schema(query=Query, types=[MyTestType]) result = schema.execute( """ query { test { __typename } } """ ) assert not result.errors assert result.data == {"test": {"__typename": "MyTestType"}} def test_resolve_type_custom(): class MyInterface(Interface): field2 = String() @classmethod def resolve_type(cls, instance, info): if instance["type"] == 1: return MyTestType1 return MyTestType2 class MyTestType1(ObjectType): class Meta: interfaces = (MyInterface,) class MyTestType2(ObjectType): class Meta: interfaces = (MyInterface,) class Query(ObjectType): test = Field(MyInterface) def resolve_test(_, info): return {"type": 1} schema = Schema(query=Query, types=[MyTestType1, MyTestType2]) result = schema.execute( """ query { test { __typename } } """ ) assert not result.errors assert result.data == {"test": {"__typename": "MyTestType1"}} def test_resolve_type_custom_interferes(): class MyInterface(Interface): field2 = String() type_ = String(name="type") def resolve_type_(_, info): return "foo" class MyTestType1(ObjectType): class Meta: interfaces = (MyInterface,) class MyTestType2(ObjectType): class Meta: interfaces = (MyInterface,) class Query(ObjectType): test = Field(MyInterface) def resolve_test(_, info): return MyTestType1() schema = Schema(query=Query, types=[MyTestType1, MyTestType2]) result = schema.execute( """ query { test { __typename type } } """ ) assert not result.errors assert result.data == {"test": {"__typename": "MyTestType1", "type": "foo"}}
MyScalar
python
tensorflow__tensorflow
tensorflow/python/distribute/coordinator/cluster_coordinator_test.py
{ "start": 17257, "end": 19252 }
class ____(test.TestCase, parameterized.TestCase): @classmethod def setUpClass(cls): super(CoordinatorContextTest, cls).setUpClass() cls.coordinator = make_coordinator(num_workers=5, num_ps=2) cls.strategy = cls.coordinator.strategy def testWorkerIndexDatasetFn(self): def dataset_fn(context): del context dataset = dataset_ops.DatasetV2.range(10) worker_index = coordinator_context.get_current_worker_index() dataset = dataset.shard( num_shards=self.strategy._extended._num_workers, index=worker_index, ) return dataset @def_function.function def per_worker_dataset_fn(): return self.strategy.distribute_datasets_from_function(dataset_fn) @def_function.function def train_fn(iterator): total = constant_op.constant(0, dtype=dtypes.int64) for batch in iterator: total += math_ops.reduce_sum(batch) return total per_worker_dataset = self.coordinator.create_per_worker_dataset( per_worker_dataset_fn) with self.strategy.scope(): iterator = iter(per_worker_dataset) ret_vals = [] # Use private APIs to schedule in tagged queues to ensure each worker # executes only one closure. for ix in range(5): closure = coordinator_lib.Closure( train_fn, self.coordinator._cluster.closure_queue._cancellation_mgr, args=(iterator,)) ret = closure.build_output_remote_value() # The queue doesn't keep track of tagged closures as inflight by # default, so hack around this for the test. self.coordinator._cluster.closure_queue._inflight_closure_count += 1 self.coordinator._cluster.closure_queue.put(closure, tag=ix) ret_vals.append(ret) self.coordinator.join() fetched_vals = [rv.fetch() for rv in ret_vals] expected_results = [5, 7, 9, 11, 13] self.assertAllClose(sorted(fetched_vals), expected_results)
CoordinatorContextTest
python
marshmallow-code__marshmallow
src/marshmallow/fields.py
{ "start": 35738, "end": 36829 }
class ____(Number[float]): """A double as an IEEE-754 double precision string. :param allow_nan: If `True`, `NaN`, `Infinity` and `-Infinity` are allowed, even though they are illegal according to the JSON specification. :param as_string: If `True`, format the value as a string. :param kwargs: The same keyword arguments that :class:`Number` receives. """ num_type = float #: Default error messages. default_error_messages = { "special": "Special numeric values (nan or infinity) are not permitted." } def __init__( self, *, allow_nan: bool = False, as_string: bool = False, **kwargs: Unpack[_BaseFieldKwargs], ): self.allow_nan = allow_nan super().__init__(as_string=as_string, **kwargs) def _validated(self, value: typing.Any) -> float: num = super()._validated(value) if self.allow_nan is False: if math.isnan(num) or num == float("inf") or num == float("-inf"): raise self.make_error("special") return num
Float
python
PrefectHQ__prefect
src/integrations/prefect-docker/tests/test_host.py
{ "start": 378, "end": 2807 }
class ____: @pytest.fixture def host_kwargs(self): _host_kwargs = dict( base_url="unix:///var/run/docker.sock", version="1.35", max_pool_size=8, credstore_env=None, client_kwargs={"tls": True}, ) return _host_kwargs @pytest.fixture def docker_host(self, host_kwargs): _docker_host = DockerHost(**host_kwargs) for key, val in host_kwargs.items(): assert getattr(_docker_host, key) == val return _docker_host @pytest.fixture def docker_host_from_env(self, host_kwargs): host_kwargs.pop("base_url") _docker_host = DockerHost(**host_kwargs) for key, val in host_kwargs.items(): assert getattr(_docker_host, key) == val return _docker_host def test_get_client(self, docker_host, mock_ctx_docker_client: MagicMock): with disable_run_logger(): docker_host.get_client() mock_ctx_docker_client.assert_called_once_with( base_url="unix:///var/run/docker.sock", version="1.35", max_pool_size=8, tls=True, ) def test_context_managed_get_client( self, docker_host, mock_ctx_docker_client: MagicMock ): with disable_run_logger(): with docker_host.get_client() as _: mock_ctx_docker_client.assert_called_once_with( base_url="unix:///var/run/docker.sock", version="1.35", max_pool_size=8, tls=True, ) def test_get_client_from_env( self, docker_host_from_env, mock_docker_client_from_env: MagicMock ): with disable_run_logger(): docker_host_from_env.get_client() mock_docker_client_from_env.assert_called_once_with( version="1.35", max_pool_size=8, tls=True, ) def test_context_managed_get_client_from_env( self, docker_host_from_env, mock_docker_client_from_env: MagicMock ): with disable_run_logger(): with docker_host_from_env.get_client() as _: mock_docker_client_from_env.assert_called_once_with( version="1.35", max_pool_size=8, tls=True, )
TestDockerHost
python
pytorch__pytorch
.github/scripts/test_gitutils.py
{ "start": 285, "end": 1002 }
class ____(TestCase): def test_iterator(self, input_: str = "abcdef") -> None: iter_ = PeekableIterator(input_) for idx, c in enumerate(iter_): self.assertEqual(c, input_[idx]) def test_is_iterable(self) -> None: from collections.abc import Iterator iter_ = PeekableIterator("") self.assertTrue(isinstance(iter_, Iterator)) def test_peek(self, input_: str = "abcdef") -> None: iter_ = PeekableIterator(input_) for idx, c in enumerate(iter_): if idx + 1 < len(input_): self.assertEqual(iter_.peek(), input_[idx + 1]) else: self.assertTrue(iter_.peek() is None)
TestPeekableIterator
python
getsentry__sentry
src/sentry/seer/anomaly_detection/types.py
{ "start": 1573, "end": 1697 }
class ____(TypedDict): organization_id: int project_id: NotRequired[int] alert: AlertInSeer
DeleteAlertDataRequest
python
sqlalchemy__sqlalchemy
test/perf/orm2010.py
{ "start": 430, "end": 670 }
class ____(Base): __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) type = Column(String(50), nullable=False) __mapper_args__ = {"polymorphic_on": type}
Employee
python
great-expectations__great_expectations
great_expectations/types/__init__.py
{ "start": 8315, "end": 9678 }
class ____(DictDot): def to_json_dict(self) -> Dict[str, JSONValues]: """Returns a JSON-serializable dict representation of the SerializableDictDot. Subclasses must implement this abstract method. Returns: A JSON-serializable dict representation of the SerializableDictDot """ # TODO: <Alex>2/4/2022</Alex> # A reference implementation can be provided, once circular import dependencies, caused by relative locations of # noqa: E501 # FIXME CoP # the "great_expectations/types/__init__.py" and "great_expectations/core/util.py" modules are resolved. # noqa: E501 # FIXME CoP raise NotImplementedError def safe_deep_copy(data, memo=None): """ This method makes a copy of a dictionary, applying deep copy to attribute values, except for non-pickleable objects. """ # noqa: E501 # FIXME CoP if isinstance(data, (pd.Series, pd.DataFrame)) or ( pyspark.pyspark and isinstance(data, pyspark.DataFrame) ): return data if isinstance(data, (list, tuple)): return [safe_deep_copy(data=element, memo=memo) for element in data] if isinstance(data, dict): return {key: safe_deep_copy(data=value, memo=memo) for key, value in data.items()} # noinspection PyArgumentList return copy.deepcopy(data, memo)
SerializableDictDot
python
pydantic__pydantic
tests/mypy/modules/plugin_success_baseConfig.py
{ "start": 2366, "end": 2507 }
class ____(BaseModel): x: int y: ClassVar[int] = 1 ClassVarModel(x=1) @dataclass(config=dict(validate_assignment=True))
ClassVarModel
python
Lightning-AI__lightning
tests/tests_pytorch/loggers/test_all.py
{ "start": 8216, "end": 12929 }
class ____(Logger): @property def name(self): return "" @property def version(self): return None def log_metrics(self, metrics, step=None) -> None: pass def log_hyperparams(self, params, *args, **kwargs) -> None: pass @mock.patch.dict(os.environ, {}) @pytest.mark.parametrize("logger_class", [*ALL_LOGGER_CLASSES_WO_NEPTUNE, CustomLoggerWithoutExperiment]) @RunIf(skip_windows=True) def test_logger_initialization(tmp_path, monkeypatch, logger_class): """Test that loggers get replaced by dummy loggers on global rank > 0 and that the experiment object is available at the right time in Trainer.""" _patch_comet_atexit(monkeypatch) try: _test_logger_initialization(tmp_path, logger_class) except (ImportError, ModuleNotFoundError): pytest.xfail(f"multi-process test requires {logger_class.__class__} dependencies to be installed.") def _test_logger_initialization(tmp_path, logger_class): logger_args = _get_logger_args(logger_class, tmp_path) logger = logger_class(**logger_args) callbacks = [LazyInitExperimentCheck()] if not isinstance(logger, CustomLoggerWithoutExperiment): callbacks.append(RankZeroLoggerCheck()) model = BoringModel() trainer = Trainer( logger=logger, default_root_dir=tmp_path, strategy="ddp_spawn", accelerator="cpu", devices=2, max_steps=1, callbacks=callbacks, ) trainer.fit(model) @mock.patch.dict(os.environ, {}) @mock.patch("lightning.pytorch.loggers.mlflow._get_resolve_tags", Mock()) def test_logger_with_prefix_all(mlflow_mock, wandb_mock, comet_mock, neptune_mock, monkeypatch, tmp_path): """Test that prefix is added at the beginning of the metric keys.""" prefix = "tmp" # Comet _patch_comet_atexit(monkeypatch) logger = _instantiate_logger(CometLogger, save_dir=tmp_path, prefix=prefix) logger.log_metrics({"test": 1.0}, step=0) logger.experiment.__internal_api__log_metrics__.assert_called_once_with( {"test": 1.0}, epoch=None, step=0, prefix=prefix, framework="pytorch-lightning" ) # MLflow Metric = mlflow_mock.entities.Metric logger = _instantiate_logger(MLFlowLogger, save_dir=tmp_path, prefix=prefix) logger.log_metrics({"test": 1.0}, step=0) logger.experiment.log_batch.assert_called_once_with( run_id=ANY, metrics=[Metric(key="tmp-test", value=1.0, timestamp=ANY, step=0)] ) # Neptune logger = _instantiate_logger(NeptuneLogger, api_key="test", project="project", save_dir=tmp_path, prefix=prefix) assert logger.experiment.__getitem__.call_count == 0 logger.log_metrics({"test": 1.0}, step=0) assert logger.experiment.__getitem__.call_count == 1 logger.experiment.__getitem__.assert_called_with("tmp/test") logger.experiment.__getitem__().append.assert_called_once_with(1.0, step=0) # TensorBoard if _TENSORBOARD_AVAILABLE: import torch.utils.tensorboard as tb else: import tensorboardX as tb monkeypatch.setattr(tb, "SummaryWriter", Mock()) logger = _instantiate_logger(TensorBoardLogger, save_dir=tmp_path, prefix=prefix) logger.log_metrics({"test": 1.0}, step=0) logger.experiment.add_scalar.assert_called_once_with("tmp-test", 1.0, 0) # WandB logger = _instantiate_logger(WandbLogger, save_dir=tmp_path, prefix=prefix) wandb_mock.run = None wandb_mock.init().step = 0 logger.log_metrics({"test": 1.0}, step=0) logger.experiment.log.assert_called_once_with({"tmp-test": 1.0, "trainer/global_step": 0}) @mock.patch("lightning.pytorch.loggers.mlflow._get_resolve_tags", Mock()) def test_logger_default_name(mlflow_mock, monkeypatch, tmp_path): """Test that the default logger name is lightning_logs.""" # CSV logger = CSVLogger(save_dir=tmp_path) assert logger.name == "lightning_logs" # TensorBoard if _TENSORBOARD_AVAILABLE: import torch.utils.tensorboard as tb else: import tensorboardX as tb monkeypatch.setattr(tb, "SummaryWriter", Mock()) logger = _instantiate_logger(TensorBoardLogger, save_dir=tmp_path) assert logger.name == "lightning_logs" # MLflow client = mlflow_mock.tracking.MlflowClient() client.get_experiment_by_name.return_value = None logger = _instantiate_logger(MLFlowLogger, save_dir=tmp_path) _ = logger.experiment logger._mlflow_client.create_experiment.assert_called_with(name="lightning_logs", artifact_location=ANY) # on MLFLowLogger `name` refers to the experiment id # assert logger.experiment.get_experiment(logger.name).name == "lightning_logs"
CustomLoggerWithoutExperiment
python
django__django
tests/i18n/test_compilation.py
{ "start": 10563, "end": 12531 }
class ____(MessageCompilationTests): @cached_property def msgfmt_version(self): # Note that msgfmt is installed via GNU gettext tools, hence the msgfmt # version should align to gettext. out, err, status = popen_wrapper( ["msgfmt", "--version"], stdout_encoding=DEFAULT_LOCALE_ENCODING, ) m = re.search(r"(\d+)\.(\d+)\.?(\d+)?", out) return tuple(int(d) for d in m.groups() if d is not None) def test_error_reported_by_msgfmt(self): # po file contains wrong po formatting. with self.assertRaises(CommandError): call_command("compilemessages", locale=["ja"], verbosity=0) # It should still fail a second time. with self.assertRaises(CommandError): call_command("compilemessages", locale=["ja"], verbosity=0) def test_msgfmt_error_including_non_ascii(self): # po file contains invalid msgstr content (triggers non-ascii error # content). Make sure the output of msgfmt is unaffected by the current # locale. env = os.environ.copy() env.update({"LC_ALL": "C"}) with mock.patch( "django.core.management.utils.run", lambda *args, **kwargs: run(*args, env=env, **kwargs), ): stderr = StringIO() with self.assertRaisesMessage( CommandError, "compilemessages generated one or more errors" ): call_command( "compilemessages", locale=["ko"], stdout=StringIO(), stderr=stderr ) if self.msgfmt_version < (0, 25): error_msg = "' cannot start a field name" else: error_msg = ( "a field name starts with a character that is not alphanumerical " "or underscore" ) self.assertIn(error_msg, stderr.getvalue())
CompilationErrorHandling
python
eventlet__eventlet
tests/mock.py
{ "start": 74003, "end": 75697 }
class ____: def __init__(self, spec, spec_set=False, parent=None, name=None, ids=None, instance=False): self.spec = spec self.ids = ids self.spec_set = spec_set self.parent = parent self.instance = instance self.name = name FunctionTypes = ( # python function type(create_autospec), # instance method type(ANY.__eq__), # unbound method type(_ANY.__eq__), ) FunctionAttributes = { 'func_closure', 'func_code', 'func_defaults', 'func_dict', 'func_doc', 'func_globals', 'func_name', } file_spec = None def mock_open(mock=None, read_data=''): """ A helper function to create a mock to replace the use of `open`. It works for `open` called directly or used as a context manager. The `mock` argument is the mock object to configure. If `None` (the default) then a `MagicMock` will be created for you, with the API limited to methods or attributes available on standard file handles. `read_data` is a string for the `read` method of the file handle to return. This is an empty string by default. """ global file_spec if file_spec is None: # set on first use if inPy3k: import _io file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) else: file_spec = file if mock is None: mock = MagicMock(name='open', spec=open) handle = MagicMock(spec=file_spec) handle.write.return_value = None handle.__enter__.return_value = handle handle.read.return_value = read_data mock.return_value = handle return mock
_SpecState
python
pytorch__pytorch
test/test_functionalization.py
{ "start": 2806, "end": 96834 }
class ____(TestCase): crossref = False def get_logs(self, func, *inpts, reapply_views=False, run_reinplace=False): inpts_clone = tree_map_only(torch.Tensor, torch.clone, inpts) traced_f = make_fx( _functionalize(func, reapply_views=reapply_views, crossref=self.crossref) )(*inpts) if run_reinplace: traced_f = reinplace(traced_f, *inpts_clone) return traced_f.code def assert_functionalization( self, func, *inpts, reapply_views=False, mutated_input_metadata=False ): clones1 = tree_map_only(torch.Tensor, torch.clone, inpts) clones2 = tree_map_only(torch.Tensor, torch.clone, inpts) clones3 = tree_map_only(torch.Tensor, torch.clone, inpts) # Compare outputs (and mutated inputs), with and without functionalization. out_ref = func(*inpts) out_functional = _functionalize( func, reapply_views=reapply_views, crossref=self.crossref )(*clones1) # The reinplacing pass is only valid to run with reapply_views=True. functional_func = make_fx( _functionalize(func, reapply_views=True, crossref=self.crossref) )(*clones2) reinplace_func = reinplace(functional_func, *clones2) # NOTE: for now, need to pass in fresh inputs here, because make_fx # will directly mutate the inputs that you trace with. # Once this is fixed we can clean this up. out_reinplace = reinplace_func(*clones3) # functionalize() deficiency: input metadata mutations aren't propagated properly, # so we just need to skip checks here for the tests that exercise that. if not mutated_input_metadata: flat_inpts = pytree.tree_leaves(inpts) flat_clones1 = pytree.tree_leaves(clones1) flat_clones3 = pytree.tree_leaves(clones3) for inpt, input_clone, input_clone3 in zip( flat_inpts, flat_clones1, flat_clones3 ): self.assertEqual( inpt, input_clone ) # input mutations should still occur self.assertEqual(inpt, input_clone3) # Handle tests with multi-tensor outputs if isinstance(out_ref, tuple): out_refs, out_functionals, out_reinplaces = ( list(out_ref), list(out_functional), list(out_reinplace), ) else: out_refs, out_functionals, out_reinplaces = ( [out_ref], [out_functional], [out_reinplace], ) for out_ref_, out_functional_, out_reinplace_ in zip( out_refs, out_functionals, out_reinplaces ): self.assertEqual(out_ref_, out_functional_) self.assertEqual(out_ref_, out_reinplace_) def test_save_for_backwards_segfault(self): inp = torch._to_functional_tensor( LoggingTensor(torch.randn(2, 2)) ).requires_grad_(True) inp.exp() def test_multiple_views_of_same_base(self): def f(x): y = x.view(-1) z = x.view(-1) x.add_(1) # y should have been updated. y2 = y + 1 # z should have been updated too. z2 = z + 1 return z2 self.assert_functionalization(f, torch.ones(4)) def test_freeze(self): def f(x): y = x.clone() z = y[0] torch._freeze_functional_tensor(y) x.add_(1) self.assertRaises(RuntimeError, lambda: y.add_(1)) self.assertRaises(RuntimeError, lambda: z.add_(1)) return z _functionalize(f, reapply_views=True, crossref=self.crossref)(torch.ones(3, 3)) def test_copy_stride_mismatch(self): def f(x): y = torch.empty_strided((2, 2), (5, 1)) y.copy_(x) return y r = _functionalize(f, reapply_views=True, crossref=self.crossref)( torch.ones(2, 2) ) self.assertEqual(r.stride(), (5, 1)) def test_set_(self): def f(x): y = torch.ones(2) y.set_(x.storage()) return y # We should probably get the crossref test to work, # but fixing it for Storage() objects is annoying. r = _functionalize(f, reapply_views=True, crossref=False)(torch.ones(2)) self.assertEqual(str(r.device), "cpu") def test_advanced_indexing(self): def f(): x = torch.zeros(3, 3) idx = torch.tensor([0]) val = torch.ones(3, 1) x[:, idx] = val return x self.assert_functionalization(f) def test_view_clone_view_inplace(self): def f(input): shape = [1, 1024, 128, 128] input_reshaped = input.view(shape) out = input_reshaped.clone() r = out.view(input.shape) r.relu_() return r def g(x): loss = f(x).sum() import torch.fx.traceback as fx_traceback from torch._functorch.aot_autograd import ( setup_stacktrace_preservation_hooks, ) setup_stacktrace_preservation_hooks([loss.grad_fn]) with fx_traceback.preserve_node_meta(): loss.backward() return x.grad with torch.autograd.detect_anomaly(check_nan=False): logs = self.get_logs(g, torch.ones(16, 64, 128, 128, requires_grad=True)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): view_copy = torch.ops.aten.view_copy.default(arg0_1, [1, 1024, 128, 128]); arg0_1 = None clone = torch.ops.aten.clone.default(view_copy); view_copy = None view_copy_1 = torch.ops.aten.view_copy.default(clone, [16, 64, 128, 128]) relu = torch.ops.aten.relu.default(view_copy_1); view_copy_1 = None view_copy_2 = torch.ops.aten.view_copy.default(relu, [1, 1024, 128, 128]); relu = None view_copy_3 = torch.ops.aten.view_copy.default(view_copy_2, [16, 64, 128, 128]); view_copy_2 = None view_copy_4 = torch.ops.aten.view_copy.default(clone, [16, 64, 128, 128]); clone = view_copy_4 = None sum_1 = torch.ops.aten.sum.default(view_copy_3) ones_like = torch.ops.aten.ones_like.default(sum_1, pin_memory = False, memory_format = torch.preserve_format); sum_1 = None expand_copy = torch.ops.aten.expand_copy.default(ones_like, [16, 64, 128, 128]); ones_like = None view_copy_5 = torch.ops.aten.view_copy.default(expand_copy, [1, 1024, 128, 128]); expand_copy = None new_empty_strided = torch.ops.aten.new_empty_strided.default(view_copy_5, [1, 1024, 128, 128], [16777216, 16384, 128, 1]) copy = torch.ops.aten.copy.default(new_empty_strided, view_copy_5); new_empty_strided = view_copy_5 = None view_copy_6 = torch.ops.aten.view_copy.default(copy, [16, 64, 128, 128]); view_copy_6 = None view_copy_7 = torch.ops.aten.view_copy.default(copy, [16, 64, 128, 128]) clone_1 = torch.ops.aten.clone.default(view_copy_7, memory_format = torch.contiguous_format) threshold_backward = torch.ops.aten.threshold_backward.default(clone_1, view_copy_3, 0); clone_1 = view_copy_3 = None copy_1 = torch.ops.aten.copy.default(view_copy_7, threshold_backward); view_copy_7 = threshold_backward = None view_copy_8 = torch.ops.aten.view_copy.default(copy_1, [1, 1024, 128, 128]); copy_1 = None view_copy_9 = torch.ops.aten.view_copy.default(view_copy_8, [16, 64, 128, 128]); view_copy_9 = None view_copy_10 = torch.ops.aten.view_copy.default(copy, [16, 64, 128, 128]); copy = None detach_copy = torch.ops.aten.detach_copy.default(view_copy_10); view_copy_10 = detach_copy = None view_copy_11 = torch.ops.aten.view_copy.default(view_copy_8, [16, 64, 128, 128]); view_copy_8 = None detach_copy_1 = torch.ops.aten.detach_copy.default(view_copy_11); view_copy_11 = None return detach_copy_1 """, ) # noqa: B950 def test_simple(self): def f(x): # simple test: 1 view op, 1 inplace op tmp = torch.ones(4, 2) y = x.view(4, 2) y.add_(tmp) z = x * x return y self.assert_functionalization(f, torch.ones(4, 2)) logs = self.get_logs(f, torch.ones(4, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False) view_copy = torch.ops.aten.view_copy.default(arg0_1, [4, 2]) add = torch.ops.aten.add.Tensor(view_copy, ones); view_copy = ones = None view_copy_1 = torch.ops.aten.view_copy.default(add, [4, 2]); add = None view_copy_2 = torch.ops.aten.view_copy.default(view_copy_1, [4, 2]) mul = torch.ops.aten.mul.Tensor(view_copy_1, view_copy_1); mul = None copy_ = torch.ops.aten.copy_.default(arg0_1, view_copy_1); arg0_1 = view_copy_1 = copy_ = None return view_copy_2 """, ) reinplaced_logs = self.get_logs( f, torch.ones(4, 2), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False) view = torch.ops.aten.view.default(arg0_1, [4, 2]) add = torch.ops.aten.add.Tensor(view, ones); view = ones = None view_1 = torch.ops.aten.view.default(add, [4, 2]); add = None view_2 = torch.ops.aten.view.default(view_1, [4, 2]) mul = torch.ops.aten.mul.Tensor(view_1, view_1); mul = None copy_ = torch.ops.aten.copy_.default(arg0_1, view_1); arg0_1 = view_1 = copy_ = None return view_2 """, ) def test_simple_out(self): def f(x): tmp = torch.ones(4, 2) y = x.view(4, 2) # the out= tensor will get resized, since it has size=0 to start. z = torch.empty(()) torch.add(y, tmp, out=z) w = z * z return w self.assert_functionalization(f, torch.ones(4, 2)) logs = self.get_logs(f, torch.ones(4, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False) view_copy = torch.ops.aten.view_copy.default(arg0_1, [4, 2]); arg0_1 = None empty = torch.ops.aten.empty.memory_format([], device = device(type='cpu'), pin_memory = False); empty = None add = torch.ops.aten.add.Tensor(view_copy, ones); view_copy = ones = None mul = torch.ops.aten.mul.Tensor(add, add); add = None return mul """, ) reinplaced_logs = self.get_logs( f, torch.ones(4, 2), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False) view = torch.ops.aten.view.default(arg0_1, [4, 2]); arg0_1 = None empty = torch.ops.aten.empty.memory_format([], device = device(type='cpu'), pin_memory = False); empty = None add = torch.ops.aten.add.Tensor(view, ones); view = ones = None mul = torch.ops.aten.mul.Tensor(add, add); add = None return mul """, ) def test_multi_out(self): def f(x): # aminmax.out returns a tuple of tensors. # functionalization should properly handle the tuple. out_min = torch.empty(4) out_max = torch.empty(4) torch.aminmax(x, dim=0, out=(out_max, out_min)) return out_max self.assert_functionalization(f, torch.arange(8, dtype=torch.float32)) logs = self.get_logs(f, torch.arange(8, dtype=torch.float32)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): empty = torch.ops.aten.empty.memory_format([4], device = device(type='cpu'), pin_memory = False); empty = None empty_1 = torch.ops.aten.empty.memory_format([4], device = device(type='cpu'), pin_memory = False); empty_1 = None aminmax = torch.ops.aten.aminmax.default(arg0_1, dim = 0); arg0_1 = None getitem = aminmax[0] getitem_1 = aminmax[1]; aminmax = getitem_1 = None return getitem """, ) reinplaced_logs = self.get_logs( f, torch.arange(8, dtype=torch.float32), reapply_views=True, run_reinplace=True, ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): empty = torch.ops.aten.empty.memory_format([4], device = device(type='cpu'), pin_memory = False); empty = None empty_1 = torch.ops.aten.empty.memory_format([4], device = device(type='cpu'), pin_memory = False); empty_1 = None aminmax = torch.ops.aten.aminmax.default(arg0_1, dim = 0); arg0_1 = None getitem = aminmax[0] getitem_1 = aminmax[1]; aminmax = getitem_1 = None return getitem """, ) def test_tensor_ctr(self): def f(x): y = torch.tensor((1, 2, 3)) z = y.view(-1) z.add_(1) return y inpt = torch.arange(3, dtype=torch.float32) self.assert_functionalization(f, inpt) logs = self.get_logs(f, inpt) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): _tensor_constant0 = self._tensor_constant0 lift_fresh_copy = torch.ops.aten.lift_fresh_copy.default(_tensor_constant0); _tensor_constant0 = None view_copy = torch.ops.aten.view_copy.default(lift_fresh_copy, [-1]); lift_fresh_copy = None add = torch.ops.aten.add.Tensor(view_copy, 1); view_copy = None view_copy_1 = torch.ops.aten.view_copy.default(add, [3]); add = None view_copy_2 = torch.ops.aten.view_copy.default(view_copy_1, [-1]); view_copy_2 = None return view_copy_1 """, ) reinplaced_logs = self.get_logs(f, inpt, reapply_views=True, run_reinplace=True) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): _tensor_constant0 = self._tensor_constant0 lift_fresh_copy = torch.ops.aten.lift_fresh_copy.default(_tensor_constant0); _tensor_constant0 = None view = torch.ops.aten.view.default(lift_fresh_copy, [-1]); lift_fresh_copy = None add = torch.ops.aten.add_.Tensor(view, 1); add = None view_1 = torch.ops.aten.view.default(view, [3]); view = None view_2 = torch.ops.aten.view.default(view_1, [-1]); view_2 = None return view_1 """, ) def test_advanced_indexing_correct_strides(self): def f(a): # This test requires that *_scatter ops are able to return # non-contiguous tensors. b = a.clone()[:, 1] c = torch.ones_like(b, dtype=torch.bool) d = b.masked_fill_(c, 0) return d self.assert_functionalization(f, torch.ones(2, 2), reapply_views=True) def test_tensor_list_mixed_functional_nonfunctional(self): nonfunctional_tensor = torch.ones(2, dtype=torch.long) def f(x): # simple test: 1 view op, 1 inplace op functional_tensor = torch.ones(2, dtype=torch.long) out = x[functional_tensor, nonfunctional_tensor] return out out = f(torch.ones(2, 2)) out_functional = _functionalize(f, reapply_views=True, crossref=self.crossref)( torch.ones(2, 2) ) self.assertEqual(out, out_functional) def test_inplace_on_non_view(self): def f(x): # test for the case where we functionalize an inplace op on the other tensor - not a view. # This is worth checking because the tensor will have an empty ViewMeta stack, which needs to be special cased. tmp = torch.ones(4, 2) y = x.view(4, 2) x.add_(tmp) return y self.assert_functionalization(f, torch.ones(4, 2)) logs = self.get_logs(f, torch.ones(4, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False) view_copy = torch.ops.aten.view_copy.default(arg0_1, [4, 2]); view_copy = None add = torch.ops.aten.add.Tensor(arg0_1, ones); ones = None copy_ = torch.ops.aten.copy_.default(arg0_1, add); arg0_1 = copy_ = None view_copy_1 = torch.ops.aten.view_copy.default(add, [4, 2]); add = None return view_copy_1 """, ) reinplaced_logs = self.get_logs( f, torch.ones(4, 2), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False) view = torch.ops.aten.view.default(arg0_1, [4, 2]); view = None add = torch.ops.aten.add.Tensor(arg0_1, ones); ones = None copy_ = torch.ops.aten.copy_.default(arg0_1, add); arg0_1 = copy_ = None view_1 = torch.ops.aten.view.default(add, [4, 2]); add = None return view_1 """, ) # Some ops that are mutable are neither inplace nor out= ops. # They also need special handling. def test_mutable_op_not_inplace_or_other(self): def f(x): return torch._fused_moving_avg_obs_fq_helper( x, x, x, x, x, x, x, 1.0, 0, 1, 0 ) logs = self.get_logs(f, torch.ones(1)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): _fused_moving_avg_obs_fq_helper_functional = torch.ops.aten._fused_moving_avg_obs_fq_helper_functional.default(arg0_1, arg0_1, arg0_1, arg0_1, arg0_1, arg0_1, arg0_1, 1.0, 0, 1, 0) getitem = _fused_moving_avg_obs_fq_helper_functional[0] getitem_1 = _fused_moving_avg_obs_fq_helper_functional[1] getitem_2 = _fused_moving_avg_obs_fq_helper_functional[2]; getitem_2 = None getitem_3 = _fused_moving_avg_obs_fq_helper_functional[3]; getitem_3 = None getitem_4 = _fused_moving_avg_obs_fq_helper_functional[4]; getitem_4 = None getitem_5 = _fused_moving_avg_obs_fq_helper_functional[5]; _fused_moving_avg_obs_fq_helper_functional = None copy_ = torch.ops.aten.copy_.default(arg0_1, getitem_5); arg0_1 = getitem_5 = copy_ = None return (getitem, getitem_1) """, # noqa: B950 ) def test_as_strided(self): def f(x): y = x.as_strided((2,), (2,), 1) y.add_(1) return x self.assert_functionalization(f, torch.ones(9)) logs = self.get_logs(f, torch.ones(9)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): as_strided_copy = torch.ops.aten.as_strided_copy.default(arg0_1, [2], [2], 1) add = torch.ops.aten.add.Tensor(as_strided_copy, 1); as_strided_copy = None as_strided_scatter = torch.ops.aten.as_strided_scatter.default(arg0_1, add, [2], [2], 1); add = None as_strided_copy_1 = torch.ops.aten.as_strided_copy.default(as_strided_scatter, [2], [2], 1); as_strided_copy_1 = None copy_ = torch.ops.aten.copy_.default(arg0_1, as_strided_scatter); arg0_1 = copy_ = None return as_strided_scatter """, ) # NB: even with reapply_views=True, we expect to see scatter op reinplaced_logs = self.get_logs( f, torch.ones(2, 2), reapply_views=True, run_reinplace=False ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): as_strided = torch.ops.aten.as_strided.default(arg0_1, [2], [2], 1) add = torch.ops.aten.add.Tensor(as_strided, 1); as_strided = None as_strided_scatter = torch.ops.aten.as_strided_scatter.default(arg0_1, add, [2], [2], 1); add = None as_strided_1 = torch.ops.aten.as_strided.default(as_strided_scatter, [2], [2], 1); as_strided_1 = None copy_ = torch.ops.aten.copy_.default(arg0_1, as_strided_scatter); arg0_1 = copy_ = None return as_strided_scatter """, ) def test_tensor_list_composite(self): def f(x): # Test an op with TensorList input y = torch.block_diag(x, x) return y self.assert_functionalization(f, torch.ones(2, 2)) logs = self.get_logs(f, torch.ones(2, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): block_diag = torch.ops.aten.block_diag.default([arg0_1, arg0_1]); arg0_1 = None return block_diag """, ) def test_cat(self): def f(x): out = torch.empty(0) torch.cat((x,), out=out) return out self.assert_functionalization(f, torch.ones(2, 2)) logs = self.get_logs(f, torch.ones(2, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): empty = torch.ops.aten.empty.memory_format([0], device = device(type='cpu'), pin_memory = False); empty = None cat = torch.ops.aten.cat.default([arg0_1]); arg0_1 = None return cat """, ) reinplaced_logs = self.get_logs( f, torch.ones(2, 2), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): empty = torch.ops.aten.empty.memory_format([0], device = device(type='cpu'), pin_memory = False); empty = None cat = torch.ops.aten.cat.default([arg0_1]); arg0_1 = None return cat """, ) def test_diagonal(self): def f(x): # test: view ops that take a subset of the original tensor (select/diagonal) tmp = torch.ones(2) y = x.clone().diagonal() y.add_(tmp) z = x * x return z self.assert_functionalization(f, torch.ones(2, 2)) logs = self.get_logs(f, torch.ones(2, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([2], device = device(type='cpu'), pin_memory = False) clone = torch.ops.aten.clone.default(arg0_1) diagonal_copy = torch.ops.aten.diagonal_copy.default(clone) add = torch.ops.aten.add.Tensor(diagonal_copy, ones); diagonal_copy = ones = None diagonal_scatter = torch.ops.aten.diagonal_scatter.default(clone, add); clone = add = None diagonal_copy_1 = torch.ops.aten.diagonal_copy.default(diagonal_scatter); diagonal_scatter = diagonal_copy_1 = None mul = torch.ops.aten.mul.Tensor(arg0_1, arg0_1); arg0_1 = None return mul """, ) reinplaced_logs = self.get_logs( f, torch.ones(2, 2), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([2], device = device(type='cpu'), pin_memory = False) clone = torch.ops.aten.clone.default(arg0_1) diagonal = torch.ops.aten.diagonal.default(clone) add = torch.ops.aten.add_.Tensor(diagonal, ones); diagonal = ones = add = None diagonal_1 = torch.ops.aten.diagonal.default(clone); clone = diagonal_1 = None mul = torch.ops.aten.mul.Tensor(arg0_1, arg0_1); arg0_1 = None return mul """, ) def test_diagonal_mutated_input(self): def f(x): # simple test: there are pending updates afterwards, which the test syncs manually tmp = torch.ones(2) y = x.diagonal() y.add_(tmp) return x x = torch.ones(2, 2) self.assert_functionalization(f, x) logs = self.get_logs(f, torch.ones(2, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([2], device = device(type='cpu'), pin_memory = False) diagonal_copy = torch.ops.aten.diagonal_copy.default(arg0_1) add = torch.ops.aten.add.Tensor(diagonal_copy, ones); diagonal_copy = ones = None diagonal_scatter = torch.ops.aten.diagonal_scatter.default(arg0_1, add); add = None diagonal_copy_1 = torch.ops.aten.diagonal_copy.default(diagonal_scatter); diagonal_copy_1 = None copy_ = torch.ops.aten.copy_.default(arg0_1, diagonal_scatter); arg0_1 = copy_ = None return diagonal_scatter """, ) # NB: even with reapply_views=True, we expect to see scatter op reinplaced_logs = self.get_logs( f, torch.ones(2, 2), reapply_views=True, run_reinplace=False ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([2], device = device(type='cpu'), pin_memory = False) diagonal = torch.ops.aten.diagonal.default(arg0_1) add = torch.ops.aten.add.Tensor(diagonal, ones); diagonal = ones = None diagonal_scatter = torch.ops.aten.diagonal_scatter.default(arg0_1, add); add = None diagonal_1 = torch.ops.aten.diagonal.default(diagonal_scatter); diagonal_1 = None copy_ = torch.ops.aten.copy_.default(arg0_1, diagonal_scatter); arg0_1 = copy_ = None return diagonal_scatter """, ) def test_channels_last_contiguous(self): def f(x): return x.contiguous(memory_format=torch.channels_last) tmp = torch.ones(2) y = x.diagonal() y.add_(tmp) return x x = torch.randn(4, 8, 8, 3).permute(0, 3, 1, 2) self.assert_functionalization(f, x) logs = self.get_logs(f, x).strip() # There should be no clone in the graph self.assertExpectedInline( logs, """\ def forward(self, arg0_1): return arg0_1""", ) def test_split(self): def f(x): # test: view ops that return multiple tensors (split) tmp = torch.ones(2) y1, y2 = x.split(2) y3 = y2.diagonal() y3.add_(tmp) z = x * x return y3 self.assert_functionalization(f, torch.ones(4, 2)) logs = self.get_logs(f, torch.ones(4, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([2], device = device(type='cpu'), pin_memory = False) split_copy = torch.ops.aten.split_copy.Tensor(arg0_1, 2) getitem = split_copy[0]; getitem = None getitem_1 = split_copy[1]; split_copy = None diagonal_copy = torch.ops.aten.diagonal_copy.default(getitem_1); getitem_1 = None add = torch.ops.aten.add.Tensor(diagonal_copy, ones); diagonal_copy = ones = None split_copy_1 = torch.ops.aten.split_copy.Tensor(arg0_1, 2) getitem_2 = split_copy_1[0]; getitem_2 = None getitem_3 = split_copy_1[1]; split_copy_1 = None diagonal_scatter = torch.ops.aten.diagonal_scatter.default(getitem_3, add); getitem_3 = add = None slice_scatter = torch.ops.aten.slice_scatter.default(arg0_1, diagonal_scatter, 0, 2, 4); diagonal_scatter = None split_copy_2 = torch.ops.aten.split_copy.Tensor(slice_scatter, 2) getitem_4 = split_copy_2[0]; getitem_4 = None getitem_5 = split_copy_2[1]; split_copy_2 = None diagonal_copy_1 = torch.ops.aten.diagonal_copy.default(getitem_5); getitem_5 = None mul = torch.ops.aten.mul.Tensor(slice_scatter, slice_scatter); mul = None copy_ = torch.ops.aten.copy_.default(arg0_1, slice_scatter); arg0_1 = slice_scatter = copy_ = None return diagonal_copy_1 """, ) # noqa: B950 # NB: even with reapply_views=True, we expect to see scatter op reinplaced_logs = self.get_logs( f, torch.ones(4, 2), reapply_views=True, run_reinplace=False ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([2], device = device(type='cpu'), pin_memory = False) split = torch.ops.aten.split.Tensor(arg0_1, 2) getitem = split[0]; getitem = None getitem_1 = split[1]; split = None diagonal = torch.ops.aten.diagonal.default(getitem_1); getitem_1 = None add = torch.ops.aten.add.Tensor(diagonal, ones); diagonal = ones = None split_1 = torch.ops.aten.split.Tensor(arg0_1, 2) getitem_2 = split_1[0]; getitem_2 = None getitem_3 = split_1[1]; split_1 = None diagonal_scatter = torch.ops.aten.diagonal_scatter.default(getitem_3, add); getitem_3 = add = None slice_scatter = torch.ops.aten.slice_scatter.default(arg0_1, diagonal_scatter, 0, 2, 4); diagonal_scatter = None split_2 = torch.ops.aten.split.Tensor(slice_scatter, 2) getitem_4 = split_2[0]; getitem_4 = None getitem_5 = split_2[1]; split_2 = None diagonal_1 = torch.ops.aten.diagonal.default(getitem_5); getitem_5 = None mul = torch.ops.aten.mul.Tensor(slice_scatter, slice_scatter); mul = None copy_ = torch.ops.aten.copy_.default(arg0_1, slice_scatter); arg0_1 = slice_scatter = copy_ = None return diagonal_1 """, ) # noqa: B950 def test_split_with_sizes(self): def f(x): # test: view ops that return multiple tensors (split_with_sizes) tmp = torch.ones(2) y1, y2 = x.split_with_sizes([2, 2]) y3 = y1.diagonal() y3.add_(tmp) z = x * x return y3 self.assert_functionalization(f, torch.ones(4, 2)) logs = self.get_logs(f, torch.ones(4, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([2], device = device(type='cpu'), pin_memory = False) split_with_sizes_copy = torch.ops.aten.split_with_sizes_copy.default(arg0_1, [2, 2]) getitem = split_with_sizes_copy[0] getitem_1 = split_with_sizes_copy[1]; split_with_sizes_copy = getitem_1 = None diagonal_copy = torch.ops.aten.diagonal_copy.default(getitem); getitem = None add = torch.ops.aten.add.Tensor(diagonal_copy, ones); diagonal_copy = ones = None split_with_sizes_copy_1 = torch.ops.aten.split_with_sizes_copy.default(arg0_1, [2, 2]) getitem_2 = split_with_sizes_copy_1[0] getitem_3 = split_with_sizes_copy_1[1]; split_with_sizes_copy_1 = getitem_3 = None diagonal_scatter = torch.ops.aten.diagonal_scatter.default(getitem_2, add); getitem_2 = add = None slice_scatter = torch.ops.aten.slice_scatter.default(arg0_1, diagonal_scatter, 0, 0, 2); diagonal_scatter = None split_with_sizes_copy_2 = torch.ops.aten.split_with_sizes_copy.default(slice_scatter, [2, 2]) getitem_4 = split_with_sizes_copy_2[0] getitem_5 = split_with_sizes_copy_2[1]; split_with_sizes_copy_2 = getitem_5 = None diagonal_copy_1 = torch.ops.aten.diagonal_copy.default(getitem_4); getitem_4 = None mul = torch.ops.aten.mul.Tensor(slice_scatter, slice_scatter); mul = None copy_ = torch.ops.aten.copy_.default(arg0_1, slice_scatter); arg0_1 = slice_scatter = copy_ = None return diagonal_copy_1 """, ) # noqa: B950 # NB: even with reapply_views=True, we expect to see scatter op reinplaced_logs = self.get_logs( f, torch.ones(4, 2), reapply_views=True, run_reinplace=False ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([2], device = device(type='cpu'), pin_memory = False) split_with_sizes = torch.ops.aten.split_with_sizes.default(arg0_1, [2, 2]) getitem = split_with_sizes[0] getitem_1 = split_with_sizes[1]; split_with_sizes = getitem_1 = None diagonal = torch.ops.aten.diagonal.default(getitem); getitem = None add = torch.ops.aten.add.Tensor(diagonal, ones); diagonal = ones = None split_with_sizes_1 = torch.ops.aten.split_with_sizes.default(arg0_1, [2, 2]) getitem_2 = split_with_sizes_1[0] getitem_3 = split_with_sizes_1[1]; split_with_sizes_1 = getitem_3 = None diagonal_scatter = torch.ops.aten.diagonal_scatter.default(getitem_2, add); getitem_2 = add = None slice_scatter = torch.ops.aten.slice_scatter.default(arg0_1, diagonal_scatter, 0, 0, 2); diagonal_scatter = None split_with_sizes_2 = torch.ops.aten.split_with_sizes.default(slice_scatter, [2, 2]) getitem_4 = split_with_sizes_2[0] getitem_5 = split_with_sizes_2[1]; split_with_sizes_2 = getitem_5 = None diagonal_1 = torch.ops.aten.diagonal.default(getitem_4); getitem_4 = None mul = torch.ops.aten.mul.Tensor(slice_scatter, slice_scatter); mul = None copy_ = torch.ops.aten.copy_.default(arg0_1, slice_scatter); arg0_1 = slice_scatter = copy_ = None return diagonal_1 """, ) # noqa: B950 def test_slice(self): def f(x): tmp = torch.ones(4) x.transpose_(1, 0) y = x[0:2] y.add_(tmp) return x self.assert_functionalization(f, torch.ones(4, 2), mutated_input_metadata=True) logs = self.get_logs(f, torch.ones(4, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([4], device = device(type='cpu'), pin_memory = False) transpose_copy = torch.ops.aten.transpose_copy.int(arg0_1, 1, 0) slice_copy = torch.ops.aten.slice_copy.Tensor(transpose_copy, 0, 0, 2); transpose_copy = None add = torch.ops.aten.add.Tensor(slice_copy, ones); slice_copy = ones = None transpose_copy_1 = torch.ops.aten.transpose_copy.int(arg0_1, 1, 0); arg0_1 = None slice_scatter = torch.ops.aten.slice_scatter.default(transpose_copy_1, add, 0, 0, 2); transpose_copy_1 = add = None transpose_copy_2 = torch.ops.aten.transpose_copy.int(slice_scatter, 1, 0); slice_scatter = None transpose_copy_3 = torch.ops.aten.transpose_copy.int(transpose_copy_2, 1, 0) slice_copy_1 = torch.ops.aten.slice_copy.Tensor(transpose_copy_3, 0, 0, 2); transpose_copy_3 = slice_copy_1 = None transpose_copy_4 = torch.ops.aten.transpose_copy.int(transpose_copy_2, 1, 0); transpose_copy_2 = None return transpose_copy_4 """, ) # noqa: B950 # NB: even with reapply_views=True, we expect to see scatter op reinplaced_logs = self.get_logs( f, torch.ones(4, 2), reapply_views=True, run_reinplace=False ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([4], device = device(type='cpu'), pin_memory = False) transpose = torch.ops.aten.transpose.int(arg0_1, 1, 0) slice_1 = torch.ops.aten.slice.Tensor(transpose, 0, 0, 2); transpose = None add = torch.ops.aten.add.Tensor(slice_1, ones); slice_1 = ones = None transpose_1 = torch.ops.aten.transpose.int(arg0_1, 1, 0); arg0_1 = None slice_scatter = torch.ops.aten.slice_scatter.default(transpose_1, add, 0, 0, 2); transpose_1 = add = None transpose_2 = torch.ops.aten.transpose.int(slice_scatter, 1, 0); slice_scatter = None transpose_3 = torch.ops.aten.transpose.int(transpose_2, 1, 0) slice_2 = torch.ops.aten.slice.Tensor(transpose_3, 0, 0, 2); transpose_3 = slice_2 = None transpose_4 = torch.ops.aten.transpose.int(transpose_2, 1, 0); transpose_2 = None return transpose_4 """, ) # noqa: B950 def test_view_inplace(self): def f(x): # test: view + inplace op (transpose_) tmp = torch.ones(4) x.transpose_(1, 0) y = x[0] y.add_(tmp) return x self.assert_functionalization(f, torch.ones(4, 2), mutated_input_metadata=True) logs = self.get_logs(f, torch.ones(4, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([4], device = device(type='cpu'), pin_memory = False) transpose_copy = torch.ops.aten.transpose_copy.int(arg0_1, 1, 0) select_copy = torch.ops.aten.select_copy.int(transpose_copy, 0, 0); transpose_copy = None add = torch.ops.aten.add.Tensor(select_copy, ones); select_copy = ones = None transpose_copy_1 = torch.ops.aten.transpose_copy.int(arg0_1, 1, 0); arg0_1 = None select_scatter = torch.ops.aten.select_scatter.default(transpose_copy_1, add, 0, 0); transpose_copy_1 = add = None transpose_copy_2 = torch.ops.aten.transpose_copy.int(select_scatter, 1, 0); select_scatter = None transpose_copy_3 = torch.ops.aten.transpose_copy.int(transpose_copy_2, 1, 0) select_copy_1 = torch.ops.aten.select_copy.int(transpose_copy_3, 0, 0); transpose_copy_3 = select_copy_1 = None transpose_copy_4 = torch.ops.aten.transpose_copy.int(transpose_copy_2, 1, 0); transpose_copy_2 = None return transpose_copy_4 """, ) # noqa: B950 # NB: even with reapply_views=True, we expect to see scatter op reinplaced_logs = self.get_logs( f, torch.ones(4, 2), reapply_views=True, run_reinplace=False ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([4], device = device(type='cpu'), pin_memory = False) transpose = torch.ops.aten.transpose.int(arg0_1, 1, 0) select = torch.ops.aten.select.int(transpose, 0, 0); transpose = None add = torch.ops.aten.add.Tensor(select, ones); select = ones = None transpose_1 = torch.ops.aten.transpose.int(arg0_1, 1, 0); arg0_1 = None select_scatter = torch.ops.aten.select_scatter.default(transpose_1, add, 0, 0); transpose_1 = add = None transpose_2 = torch.ops.aten.transpose.int(select_scatter, 1, 0); select_scatter = None transpose_3 = torch.ops.aten.transpose.int(transpose_2, 1, 0) select_1 = torch.ops.aten.select.int(transpose_3, 0, 0); transpose_3 = select_1 = None transpose_4 = torch.ops.aten.transpose.int(transpose_2, 1, 0); transpose_2 = None return transpose_4 """, ) # noqa: B950 def test_unbind(self): def f(x): # test: view + inplace op (transpose_) tmp = torch.ones(4) x.transpose_(1, 0) y, _ = x.unbind(0) y.add_(tmp) return x self.assert_functionalization(f, torch.ones(4, 2), mutated_input_metadata=True) logs = self.get_logs(f, torch.ones(4, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([4], device = device(type='cpu'), pin_memory = False) transpose_copy = torch.ops.aten.transpose_copy.int(arg0_1, 1, 0) unbind_copy = torch.ops.aten.unbind_copy.int(transpose_copy); transpose_copy = None getitem = unbind_copy[0] getitem_1 = unbind_copy[1]; unbind_copy = getitem_1 = None add = torch.ops.aten.add.Tensor(getitem, ones); getitem = ones = None transpose_copy_1 = torch.ops.aten.transpose_copy.int(arg0_1, 1, 0); arg0_1 = None select_scatter = torch.ops.aten.select_scatter.default(transpose_copy_1, add, 0, 0); transpose_copy_1 = add = None transpose_copy_2 = torch.ops.aten.transpose_copy.int(select_scatter, 1, 0); select_scatter = None transpose_copy_3 = torch.ops.aten.transpose_copy.int(transpose_copy_2, 1, 0) unbind_copy_1 = torch.ops.aten.unbind_copy.int(transpose_copy_3); transpose_copy_3 = None getitem_2 = unbind_copy_1[0]; getitem_2 = None getitem_3 = unbind_copy_1[1]; unbind_copy_1 = getitem_3 = None transpose_copy_4 = torch.ops.aten.transpose_copy.int(transpose_copy_2, 1, 0); transpose_copy_2 = None return transpose_copy_4 """, ) # noqa: B950 # NB: even with reapply_views=True, we expect to see scatter op reinplaced_logs = self.get_logs( f, torch.ones(4, 2), reapply_views=True, run_reinplace=False ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([4], device = device(type='cpu'), pin_memory = False) transpose = torch.ops.aten.transpose.int(arg0_1, 1, 0) unbind = torch.ops.aten.unbind.int(transpose); transpose = None getitem = unbind[0] getitem_1 = unbind[1]; unbind = getitem_1 = None add = torch.ops.aten.add.Tensor(getitem, ones); getitem = ones = None transpose_1 = torch.ops.aten.transpose.int(arg0_1, 1, 0); arg0_1 = None select_scatter = torch.ops.aten.select_scatter.default(transpose_1, add, 0, 0); transpose_1 = add = None transpose_2 = torch.ops.aten.transpose.int(select_scatter, 1, 0); select_scatter = None transpose_3 = torch.ops.aten.transpose.int(transpose_2, 1, 0) unbind_1 = torch.ops.aten.unbind.int(transpose_3); transpose_3 = None getitem_2 = unbind_1[0]; getitem_2 = None getitem_3 = unbind_1[1]; unbind_1 = getitem_3 = None transpose_4 = torch.ops.aten.transpose.int(transpose_2, 1, 0); transpose_2 = None return transpose_4 """, ) # noqa: B950 def test_optional_tensor_list(self): def f(x): # test: an operator that takes in a List[Optional[Tensor]] argument # (index_put) y = x.view(8) indices = torch.arange(4) values = torch.arange(4, dtype=y.dtype) y.index_put_((indices,), values, accumulate=False) return y self.assert_functionalization(f, torch.ones(4, 2)) logs = self.get_logs(f, torch.ones(4, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): view_copy = torch.ops.aten.view_copy.default(arg0_1, [8]) arange = torch.ops.aten.arange.default(4, device = device(type='cpu'), pin_memory = False) arange_1 = torch.ops.aten.arange.default(4, dtype = torch.float32, device = device(type='cpu'), pin_memory = False) index_put = torch.ops.aten.index_put.default(view_copy, [arange], arange_1); view_copy = arange = arange_1 = None view_copy_1 = torch.ops.aten.view_copy.default(index_put, [4, 2]); index_put = None view_copy_2 = torch.ops.aten.view_copy.default(view_copy_1, [8]) copy_ = torch.ops.aten.copy_.default(arg0_1, view_copy_1); arg0_1 = view_copy_1 = copy_ = None return view_copy_2 """, ) # noqa: B950 def test_scalars(self): def f(x): # test: the pass can handle scalar inputs properly tmp = torch.ones(4, 2) y = x.view(4, 2) y.add_(1) z = 2 * y z.div_(1) return z self.assert_functionalization(f, torch.ones(4, 2)) logs = self.get_logs(f, torch.ones(4, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False); ones = None view_copy = torch.ops.aten.view_copy.default(arg0_1, [4, 2]) add = torch.ops.aten.add.Tensor(view_copy, 1); view_copy = None view_copy_1 = torch.ops.aten.view_copy.default(add, [4, 2]); add = None view_copy_2 = torch.ops.aten.view_copy.default(view_copy_1, [4, 2]) mul = torch.ops.aten.mul.Tensor(view_copy_2, 2); view_copy_2 = None div = torch.ops.aten.div.Tensor(mul, 1); mul = None copy_ = torch.ops.aten.copy_.default(arg0_1, view_copy_1); arg0_1 = view_copy_1 = copy_ = None return div """, ) @skipIfTorchDynamo("Test does not work with TorchDynamo") def test_metadata_change(self): def f(x): # ops like ge_() are allowed to change the dtype of the input. # functionalization should pick up on that. y = x.clone() out = y.ge_(0) return out self.assert_functionalization(f, torch.ones(4, 2)) logs = self.get_logs(f, torch.ones(4, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): clone = torch.ops.aten.clone.default(arg0_1); arg0_1 = None ge = torch.ops.aten.ge.Scalar(clone, 0); clone = None _to_copy = torch.ops.aten._to_copy.default(ge, dtype = torch.float32, layout = torch.strided); ge = None return _to_copy """, ) reinplaced_logs = self.get_logs( f, torch.ones(2, 2), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): clone = torch.ops.aten.clone.default(arg0_1); arg0_1 = None ge = torch.ops.aten.ge.Scalar(clone, 0); clone = None _to_copy = torch.ops.aten._to_copy.default(ge, dtype = torch.float32, layout = torch.strided); ge = None return _to_copy """, ) # noqa: B950 @skipIfTorchDynamo("Test does not work with TorchDynamo") def test_metadata_change_out_op(self): def f(t, y): out_1 = torch.ones(1) return torch.add(t, y, out=out_1) inpt1, inpt2 = torch.tensor([1]), torch.tensor([1]) inpt1_func, inpt2_func = ( torch._to_functional_tensor(inpt1), torch._to_functional_tensor(inpt2), ) out_ref = f(inpt1, inpt2) torch._enable_functionalization(reapply_views=True) try: out_functional = f(inpt1_func, inpt2_func) finally: torch._disable_functionalization() self.assertEqual(out_ref, torch._from_functional_tensor(out_functional)) def test_only_one_view(self): def f(x): # This tests that we don't have any unnecessary views in the trace. # If the input wasn't mutated, we don't need to regenerate it, # so there should be a total of 1 op in the output trace. return x.view(4, 2) logs = self.get_logs(f, torch.ones(4, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): view_copy = torch.ops.aten.view_copy.default(arg0_1, [4, 2]); arg0_1 = None return view_copy """, ) def test_everything(self): def f(x): # test: everything tmp = torch.ones(2, 2) x2 = x + x y = x2.view(8) z0 = y.reshape(2, 4) z1 = z0.transpose(1, 0) z1.unsqueeze_(0) z1.squeeze_() z2, z3 = z1.split(2) z2.add_(tmp) z4 = z0[0] + z2.reshape(4) return z2 self.assert_functionalization(f, torch.ones(4, 2)) logs = self.get_logs(f, torch.ones(4, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([2, 2], device = device(type='cpu'), pin_memory = False) add = torch.ops.aten.add.Tensor(arg0_1, arg0_1); arg0_1 = None view_copy = torch.ops.aten.view_copy.default(add, [8]) view_copy_1 = torch.ops.aten.view_copy.default(view_copy, [2, 4]); view_copy = None transpose_copy = torch.ops.aten.transpose_copy.int(view_copy_1, 1, 0) unsqueeze_copy = torch.ops.aten.unsqueeze_copy.default(transpose_copy, 0); transpose_copy = None squeeze_copy = torch.ops.aten.squeeze_copy.default(unsqueeze_copy); unsqueeze_copy = None split_copy = torch.ops.aten.split_copy.Tensor(squeeze_copy, 2); squeeze_copy = None getitem = split_copy[0] getitem_1 = split_copy[1]; split_copy = getitem_1 = None add_1 = torch.ops.aten.add.Tensor(getitem, ones); getitem = ones = None view_copy_2 = torch.ops.aten.view_copy.default(add, [8]); add = None view_copy_3 = torch.ops.aten.view_copy.default(view_copy_2, [2, 4]); view_copy_2 = None transpose_copy_1 = torch.ops.aten.transpose_copy.int(view_copy_3, 1, 0); view_copy_3 = None unsqueeze_copy_1 = torch.ops.aten.unsqueeze_copy.default(transpose_copy_1, 0); transpose_copy_1 = None squeeze_copy_1 = torch.ops.aten.squeeze_copy.default(unsqueeze_copy_1); unsqueeze_copy_1 = None slice_scatter = torch.ops.aten.slice_scatter.default(squeeze_copy_1, add_1, 0, 0, 2); squeeze_copy_1 = add_1 = None unsqueeze_copy_2 = torch.ops.aten.unsqueeze_copy.default(slice_scatter, 0); slice_scatter = None squeeze_copy_2 = torch.ops.aten.squeeze_copy.dim(unsqueeze_copy_2, 0); unsqueeze_copy_2 = None transpose_copy_2 = torch.ops.aten.transpose_copy.int(squeeze_copy_2, 1, 0); squeeze_copy_2 = None view_copy_4 = torch.ops.aten.view_copy.default(transpose_copy_2, [8]); transpose_copy_2 = None view_copy_5 = torch.ops.aten.view_copy.default(view_copy_4, [4, 2]); view_copy_4 = None view_copy_6 = torch.ops.aten.view_copy.default(view_copy_5, [8]) view_copy_7 = torch.ops.aten.view_copy.default(view_copy_6, [2, 4]); view_copy_6 = None transpose_copy_3 = torch.ops.aten.transpose_copy.int(view_copy_7, 1, 0); view_copy_7 = None unsqueeze_copy_3 = torch.ops.aten.unsqueeze_copy.default(transpose_copy_3, 0); transpose_copy_3 = None squeeze_copy_3 = torch.ops.aten.squeeze_copy.default(unsqueeze_copy_3); unsqueeze_copy_3 = None split_copy_1 = torch.ops.aten.split_copy.Tensor(squeeze_copy_3, 2); squeeze_copy_3 = None getitem_2 = split_copy_1[0] getitem_3 = split_copy_1[1]; split_copy_1 = getitem_3 = None select_copy = torch.ops.aten.select_copy.int(view_copy_1, 0, 0); view_copy_1 = select_copy = None view_copy_8 = torch.ops.aten.view_copy.default(getitem_2, [4]); view_copy_8 = None view_copy_9 = torch.ops.aten.view_copy.default(view_copy_5, [8]) view_copy_10 = torch.ops.aten.view_copy.default(view_copy_9, [2, 4]); view_copy_9 = None select_copy_1 = torch.ops.aten.select_copy.int(view_copy_10, 0, 0); view_copy_10 = None view_copy_11 = torch.ops.aten.view_copy.default(view_copy_5, [8]); view_copy_5 = None view_copy_12 = torch.ops.aten.view_copy.default(view_copy_11, [2, 4]); view_copy_11 = None transpose_copy_4 = torch.ops.aten.transpose_copy.int(view_copy_12, 1, 0); view_copy_12 = None unsqueeze_copy_4 = torch.ops.aten.unsqueeze_copy.default(transpose_copy_4, 0); transpose_copy_4 = None squeeze_copy_4 = torch.ops.aten.squeeze_copy.default(unsqueeze_copy_4); unsqueeze_copy_4 = None split_copy_2 = torch.ops.aten.split_copy.Tensor(squeeze_copy_4, 2); squeeze_copy_4 = None getitem_4 = split_copy_2[0] getitem_5 = split_copy_2[1]; split_copy_2 = getitem_5 = None view_copy_13 = torch.ops.aten.view_copy.default(getitem_4, [4]); getitem_4 = None add_2 = torch.ops.aten.add.Tensor(select_copy_1, view_copy_13); select_copy_1 = view_copy_13 = add_2 = None return getitem_2 """, ) # noqa: B950 reinplaced_logs = self.get_logs( f, torch.ones(4, 2), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([2, 2], device = device(type='cpu'), pin_memory = False) add = torch.ops.aten.add.Tensor(arg0_1, arg0_1); arg0_1 = None view = torch.ops.aten.view.default(add, [8]) view_1 = torch.ops.aten.view.default(view, [2, 4]); view = None transpose = torch.ops.aten.transpose.int(view_1, 1, 0) unsqueeze = torch.ops.aten.unsqueeze.default(transpose, 0); transpose = None squeeze = torch.ops.aten.squeeze.default(unsqueeze); unsqueeze = None split = torch.ops.aten.split.Tensor(squeeze, 2); squeeze = None getitem = split[0] getitem_1 = split[1]; split = getitem_1 = None add_1 = torch.ops.aten.add_.Tensor(getitem, ones); getitem = ones = add_1 = None view_2 = torch.ops.aten.view.default(add, [8]); add = None view_3 = torch.ops.aten.view.default(view_2, [2, 4]); view_2 = None transpose_1 = torch.ops.aten.transpose.int(view_3, 1, 0); view_3 = None unsqueeze_1 = torch.ops.aten.unsqueeze.default(transpose_1, 0); transpose_1 = None squeeze_1 = torch.ops.aten.squeeze.default(unsqueeze_1); unsqueeze_1 = None unsqueeze_2 = torch.ops.aten.unsqueeze.default(squeeze_1, 0); squeeze_1 = None squeeze_2 = torch.ops.aten.squeeze.dim(unsqueeze_2, 0); unsqueeze_2 = None transpose_2 = torch.ops.aten.transpose.int(squeeze_2, 1, 0); squeeze_2 = None view_4 = torch.ops.aten.view.default(transpose_2, [8]); transpose_2 = None view_5 = torch.ops.aten.view.default(view_4, [4, 2]); view_4 = None view_6 = torch.ops.aten.view.default(view_5, [8]) view_7 = torch.ops.aten.view.default(view_6, [2, 4]); view_6 = None transpose_3 = torch.ops.aten.transpose.int(view_7, 1, 0); view_7 = None unsqueeze_3 = torch.ops.aten.unsqueeze.default(transpose_3, 0); transpose_3 = None squeeze_3 = torch.ops.aten.squeeze.default(unsqueeze_3); unsqueeze_3 = None split_1 = torch.ops.aten.split.Tensor(squeeze_3, 2); squeeze_3 = None getitem_2 = split_1[0] getitem_3 = split_1[1]; split_1 = getitem_3 = None select = torch.ops.aten.select.int(view_1, 0, 0); view_1 = select = None clone = torch.ops.aten.clone.default(getitem_2, memory_format = torch.contiguous_format) _unsafe_view = torch.ops.aten._unsafe_view.default(clone, [4]); clone = None view_8 = torch.ops.aten.view.default(view_5, [8]); view_5 = None view_9 = torch.ops.aten.view.default(view_8, [2, 4]); view_8 = None select_1 = torch.ops.aten.select.int(view_9, 0, 0); view_9 = None add_2 = torch.ops.aten.add.Tensor(select_1, _unsafe_view); select_1 = _unsafe_view = add_2 = None return getitem_2 """, ) def test_reapply_views_simple(self): def f(x): tmp = torch.ones(4, 2) y = x.view(4, 2) y.add_(tmp) z = x * x return y self.assert_functionalization(f, torch.ones(4, 2), reapply_views=True) logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): ones = torch.ops.aten.ones.default([4, 2], device = device(type='cpu'), pin_memory = False) view = torch.ops.aten.view.default(arg0_1, [4, 2]) add = torch.ops.aten.add.Tensor(view, ones); view = ones = None view_1 = torch.ops.aten.view.default(add, [4, 2]); add = None view_2 = torch.ops.aten.view.default(view_1, [4, 2]) mul = torch.ops.aten.mul.Tensor(view_1, view_1); mul = None copy_ = torch.ops.aten.copy_.default(arg0_1, view_1); arg0_1 = view_1 = copy_ = None return view_2 """, ) def test_aliases_maintained_after_pass_when_reapplying_views(self): def f(x): tmp = torch.ones(4, 2) y = x.view(4, 2) z = x.view(4, 2) y.add_(tmp) return y, z input_functional = torch._to_functional_tensor(torch.ones(4, 2)) torch._enable_functionalization(reapply_views=True) try: y, z = f(input_functional) torch._sync(y) torch._sync(z) finally: torch._disable_functionalization() # y and z are aliases inside of the function, and that aliasing relationship should be maintained. _y = torch._from_functional_tensor(y) _z = torch._from_functional_tensor(z) self.assertTrue(are_aliased(_y, _z)) # copy_() gets its own test, because it used to be special cased in functionalization. # However, now it works pretty similar to other functional ops def test_copy_(self): def f(x): tmp = torch.zeros(2, 2) tmp_slice = tmp.diagonal() y = tmp_slice.copy_(x) z = y.add_(x) return z # Test 1: copy_() with same dtype and shape # to() is a composite op that noops when the dtype/shape match, so nothing gets logged. # self.assert_functionalization(f, torch.ones(2)) logs = self.get_logs(f, torch.ones(2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False) diagonal_copy = torch.ops.aten.diagonal_copy.default(zeros) copy = torch.ops.aten.copy.default(diagonal_copy, arg0_1); diagonal_copy = None diagonal_scatter = torch.ops.aten.diagonal_scatter.default(zeros, copy); zeros = copy = None diagonal_copy_1 = torch.ops.aten.diagonal_copy.default(diagonal_scatter) add = torch.ops.aten.add.Tensor(diagonal_copy_1, arg0_1); diagonal_copy_1 = arg0_1 = None diagonal_scatter_1 = torch.ops.aten.diagonal_scatter.default(diagonal_scatter, add); diagonal_scatter = add = None diagonal_copy_2 = torch.ops.aten.diagonal_copy.default(diagonal_scatter_1); diagonal_scatter_1 = None return diagonal_copy_2 """, ) reinplaced_logs = self.get_logs( f, torch.ones(2), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False) diagonal = torch.ops.aten.diagonal.default(zeros) copy = torch.ops.aten.copy_.default(diagonal, arg0_1); diagonal = copy = None diagonal_1 = torch.ops.aten.diagonal.default(zeros) add = torch.ops.aten.add_.Tensor(diagonal_1, arg0_1); diagonal_1 = arg0_1 = add = None diagonal_2 = torch.ops.aten.diagonal.default(zeros); zeros = None return diagonal_2 """, ) # Test 2: copy_() with same dtype, different shape self.assert_functionalization(f, torch.ones(1)) logs = self.get_logs(f, torch.ones(1)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False) diagonal_copy = torch.ops.aten.diagonal_copy.default(zeros) copy = torch.ops.aten.copy.default(diagonal_copy, arg0_1); diagonal_copy = None diagonal_scatter = torch.ops.aten.diagonal_scatter.default(zeros, copy); zeros = copy = None diagonal_copy_1 = torch.ops.aten.diagonal_copy.default(diagonal_scatter) add = torch.ops.aten.add.Tensor(diagonal_copy_1, arg0_1); diagonal_copy_1 = arg0_1 = None diagonal_scatter_1 = torch.ops.aten.diagonal_scatter.default(diagonal_scatter, add); diagonal_scatter = add = None diagonal_copy_2 = torch.ops.aten.diagonal_copy.default(diagonal_scatter_1); diagonal_scatter_1 = None return diagonal_copy_2 """, ) reinplaced_logs = self.get_logs( f, torch.ones(1), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False) diagonal = torch.ops.aten.diagonal.default(zeros) copy = torch.ops.aten.copy_.default(diagonal, arg0_1); diagonal = copy = None diagonal_1 = torch.ops.aten.diagonal.default(zeros) add = torch.ops.aten.add_.Tensor(diagonal_1, arg0_1); diagonal_1 = arg0_1 = add = None diagonal_2 = torch.ops.aten.diagonal.default(zeros); zeros = None return diagonal_2 """, ) # Test 3: copy_() with different dtype, same shape self.assert_functionalization(f, torch.ones(2, dtype=torch.long)) logs = self.get_logs(f, torch.ones(2, dtype=torch.long)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False) diagonal_copy = torch.ops.aten.diagonal_copy.default(zeros) copy = torch.ops.aten.copy.default(diagonal_copy, arg0_1); diagonal_copy = None diagonal_scatter = torch.ops.aten.diagonal_scatter.default(zeros, copy); zeros = copy = None diagonal_copy_1 = torch.ops.aten.diagonal_copy.default(diagonal_scatter) add = torch.ops.aten.add.Tensor(diagonal_copy_1, arg0_1); diagonal_copy_1 = arg0_1 = None diagonal_scatter_1 = torch.ops.aten.diagonal_scatter.default(diagonal_scatter, add); diagonal_scatter = add = None diagonal_copy_2 = torch.ops.aten.diagonal_copy.default(diagonal_scatter_1); diagonal_scatter_1 = None return diagonal_copy_2 """, ) # noqa: B950 reinplaced_logs = self.get_logs( f, torch.ones(2, dtype=torch.long), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False) diagonal = torch.ops.aten.diagonal.default(zeros) copy = torch.ops.aten.copy_.default(diagonal, arg0_1); diagonal = copy = None diagonal_1 = torch.ops.aten.diagonal.default(zeros) add = torch.ops.aten.add_.Tensor(diagonal_1, arg0_1); diagonal_1 = arg0_1 = add = None diagonal_2 = torch.ops.aten.diagonal.default(zeros); zeros = None return diagonal_2 """, ) # noqa: B950 # Test 4: copy_() with different dtype, different shape self.assert_functionalization(f, torch.ones(1, dtype=torch.long)) logs = self.get_logs(f, torch.ones(1, dtype=torch.long)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False) diagonal_copy = torch.ops.aten.diagonal_copy.default(zeros) copy = torch.ops.aten.copy.default(diagonal_copy, arg0_1); diagonal_copy = None diagonal_scatter = torch.ops.aten.diagonal_scatter.default(zeros, copy); zeros = copy = None diagonal_copy_1 = torch.ops.aten.diagonal_copy.default(diagonal_scatter) add = torch.ops.aten.add.Tensor(diagonal_copy_1, arg0_1); diagonal_copy_1 = arg0_1 = None diagonal_scatter_1 = torch.ops.aten.diagonal_scatter.default(diagonal_scatter, add); diagonal_scatter = add = None diagonal_copy_2 = torch.ops.aten.diagonal_copy.default(diagonal_scatter_1); diagonal_scatter_1 = None return diagonal_copy_2 """, ) # noqa: B950 reinplaced_logs = self.get_logs( f, torch.ones(1, dtype=torch.long), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): zeros = torch.ops.aten.zeros.default([2, 2], device = device(type='cpu'), pin_memory = False) diagonal = torch.ops.aten.diagonal.default(zeros) copy = torch.ops.aten.copy_.default(diagonal, arg0_1); diagonal = copy = None diagonal_1 = torch.ops.aten.diagonal.default(zeros) add = torch.ops.aten.add_.Tensor(diagonal_1, arg0_1); diagonal_1 = arg0_1 = add = None diagonal_2 = torch.ops.aten.diagonal.default(zeros); zeros = None return diagonal_2 """, ) # noqa: B950 def test_expand_symint(self): # Once some existing SymInt bugs are ironed out, we should update # this test to plumb FakeSymbolicTensors through it def f(x): return x.expand(x.size(0), x.size(1)) self.assert_functionalization(f, torch.ones(2, 2)) logs = self.get_logs(f, torch.ones(2, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): expand_copy = torch.ops.aten.expand_copy.default(arg0_1, [2, 2]); arg0_1 = None return expand_copy """, ) def test_fill_(self): def f(x): y = x + x z = y.diagonal() z.fill_(0) return y self.assert_functionalization(f, torch.ones(2, 2)) logs = self.get_logs(f, torch.ones(2, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): add = torch.ops.aten.add.Tensor(arg0_1, arg0_1); arg0_1 = None diagonal_copy = torch.ops.aten.diagonal_copy.default(add) fill = torch.ops.aten.fill.Scalar(diagonal_copy, 0); diagonal_copy = None diagonal_scatter = torch.ops.aten.diagonal_scatter.default(add, fill); add = fill = None diagonal_copy_1 = torch.ops.aten.diagonal_copy.default(diagonal_scatter); diagonal_copy_1 = None return diagonal_scatter """, ) reinplaced_logs = self.get_logs( f, torch.ones(2, 2), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): add = torch.ops.aten.add.Tensor(arg0_1, arg0_1); arg0_1 = None diagonal = torch.ops.aten.diagonal.default(add) fill = torch.ops.aten.fill_.Scalar(diagonal, 0); diagonal = fill = None diagonal_1 = torch.ops.aten.diagonal.default(add); diagonal_1 = None return add """, ) def test_resize_smaller(self): def f(w): # Resizing to a smaller size doesn't affect storage x = w + 1 y = x.view(4, 4) y.resize_(3, 3) y2 = y.view(-1) y2.add_(1) z = y + 1 return z self.assert_functionalization(f, torch.ones(8, 2)) logs = self.get_logs(f, torch.ones(8, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): add = torch.ops.aten.add.Tensor(arg0_1, 1); arg0_1 = None view_copy = torch.ops.aten.view_copy.default(add, [4, 4]) resize = torch.ops.aten.resize.default(view_copy, [3, 3]); resize = None as_strided_copy = torch.ops.aten.as_strided_copy.default(view_copy, [3, 3], [3, 1]); view_copy = None view_copy_1 = torch.ops.aten.view_copy.default(as_strided_copy, [-1]); as_strided_copy = None add_1 = torch.ops.aten.add.Tensor(view_copy_1, 1); view_copy_1 = None view_copy_2 = torch.ops.aten.view_copy.default(add, [4, 4]); add = None as_strided_copy_1 = torch.ops.aten.as_strided_copy.default(view_copy_2, [3, 3], [3, 1]); as_strided_copy_1 = None view_copy_3 = torch.ops.aten.view_copy.default(add_1, [3, 3]); add_1 = None as_strided_scatter = torch.ops.aten.as_strided_scatter.default(view_copy_2, view_copy_3, [3, 3], [3, 1]); view_copy_2 = view_copy_3 = None view_copy_4 = torch.ops.aten.view_copy.default(as_strided_scatter, [8, 2]); as_strided_scatter = None view_copy_5 = torch.ops.aten.view_copy.default(view_copy_4, [4, 4]) as_strided_copy_2 = torch.ops.aten.as_strided_copy.default(view_copy_5, [3, 3], [3, 1]); view_copy_5 = None view_copy_6 = torch.ops.aten.view_copy.default(as_strided_copy_2, [-1]); as_strided_copy_2 = view_copy_6 = None view_copy_7 = torch.ops.aten.view_copy.default(view_copy_4, [4, 4]); view_copy_4 = None as_strided_copy_3 = torch.ops.aten.as_strided_copy.default(view_copy_7, [3, 3], [3, 1]); view_copy_7 = None add_2 = torch.ops.aten.add.Tensor(as_strided_copy_3, 1); as_strided_copy_3 = None return add_2 """, # noqa: B950 ) reinplaced_logs = self.get_logs( f, torch.ones(8, 2), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): add = torch.ops.aten.add.Tensor(arg0_1, 1); arg0_1 = None view = torch.ops.aten.view.default(add, [4, 4]) resize = torch.ops.aten.resize.default(view, [3, 3]); resize = None as_strided = torch.ops.aten.as_strided.default(view, [3, 3], [3, 1]); view = None view_1 = torch.ops.aten.view.default(as_strided, [-1]); as_strided = None add_1 = torch.ops.aten.add_.Tensor(view_1, 1); add_1 = None view_2 = torch.ops.aten.view.default(add, [4, 4]); add = None as_strided_1 = torch.ops.aten.as_strided.default(view_2, [3, 3], [3, 1]); as_strided_1 = None view_3 = torch.ops.aten.view.default(view_1, [3, 3]); view_1 = view_3 = None view_4 = torch.ops.aten.view.default(view_2, [8, 2]); view_2 = None view_5 = torch.ops.aten.view.default(view_4, [4, 4]) as_strided_2 = torch.ops.aten.as_strided.default(view_5, [3, 3], [3, 1]); view_5 = None view_6 = torch.ops.aten.view.default(as_strided_2, [-1]); as_strided_2 = view_6 = None view_7 = torch.ops.aten.view.default(view_4, [4, 4]); view_4 = None as_strided_3 = torch.ops.aten.as_strided.default(view_7, [3, 3], [3, 1]); view_7 = None add_2 = torch.ops.aten.add_.Tensor(as_strided_3, 1); add_2 = None return as_strided_3 """, ) def test_resize_same_size_diff_rank(self): def f(x): y = x.clone() y.resize_(25, 5) return y self.assert_functionalization(f, torch.ones(5, 5, 5)) def test_resize_larger_valid(self): def f(x): y = x + 1 # resizing a tensor to a larger size is only currently allowed # if the tensor-to-resize is not a view / has no outstanding views. # See Note [resize_() in functionalization pass] y.resize_(5, 5) y2 = y.view(25) # Do a mutation to ensure that aliases of the output of resize_() # propagate mutations correctly. # I'm using fill_ specifically because I want to guarantee that # none of the output has uninitialized memory at the end # (since these tests compare the data output against a reference impl) y2.fill_(1) out = y + 1 return y, out self.assert_functionalization(f, torch.ones(8, 2)) logs = self.get_logs(f, torch.ones(8, 2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): add = torch.ops.aten.add.Tensor(arg0_1, 1); arg0_1 = None resize = torch.ops.aten.resize.default(add, [5, 5]); add = None view_copy = torch.ops.aten.view_copy.default(resize, [25]); resize = None fill = torch.ops.aten.fill.Scalar(view_copy, 1); view_copy = None view_copy_1 = torch.ops.aten.view_copy.default(fill, [5, 5]); fill = None view_copy_2 = torch.ops.aten.view_copy.default(view_copy_1, [25]); view_copy_2 = None add_1 = torch.ops.aten.add.Tensor(view_copy_1, 1) return (view_copy_1, add_1) """, ) reinplaced_logs = self.get_logs( f, torch.ones(8, 2), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): add = torch.ops.aten.add.Tensor(arg0_1, 1); arg0_1 = None resize = torch.ops.aten.resize_.default(add, [5, 5]); resize = None view = torch.ops.aten.view.default(add, [25]); add = None fill = torch.ops.aten.fill_.Scalar(view, 1); fill = None view_1 = torch.ops.aten.view.default(view, [5, 5]); view = None view_2 = torch.ops.aten.view.default(view_1, [25]); view_2 = None add_1 = torch.ops.aten.add.Tensor(view_1, 1) return (view_1, add_1) """, ) def test_resize_larger_invalid(self): def f(x): y = x + 1 z = y.view(4, 4) # resizing a tensor to a larger size is only currently allowed # if the tensor-to-resize is not a view / has no outstanding views. # See Note [resize_() in functionalization pass] # This should fail z.resize_(5, 5) z2 = z.view(25) z2.fill_(1) out = z + 1 return y, out with self.assertRaisesRegex( RuntimeError, r"Attempted to resize a view tensor to a larger size. This is not allowed in the functionalization pass", ): self.assert_functionalization(f, torch.ones(8, 2)) def test_nested_functions_propagate_updates(self): def g(x): # Create a view of x y = x[0] y.add_(1) # The view, y, gets deallocated at the end of this function def f(x): # Calling g(x) should mutate x g(x) # We expect x to be synced here, even though the alias created in g() has been deallocated! y = x + x return y self.assert_functionalization(f, torch.ones(2, 2)) def test_mixed_wrappers_valid(self): def f(x, y): z = x + y z.add_(1) return z x1_not_functional = LoggingTensor(torch.ones(4)) x2_functional = torch._to_functional_tensor(LoggingTensor(torch.ones(4))) with capture_logs() as logs: y = f(x1_not_functional, x2_functional) # Make sure that functionalization ran the "+" kernel # with a functional + non-functional tensor, and wrapped the output appropriately. self.assertExpectedInline( "\n".join(logs), """\ $2: f32[4] = torch._ops.aten.add.Tensor($0, $1) $3: f32[4] = torch._ops.aten.add.Tensor($2, 1)""", ) def test_mixed_wrappers_invalid(self): x1_not_functional = torch.ones(4) x2_functional = torch._to_functional_tensor(torch.ones(4)) # When dealing with mixed functional + non functional tensors, # normal_tensor.add_(functional_tensor) is not valid # because normal_tensor would need to be "promoted" to a functional tensor. with self.assertRaises(RuntimeError): x1_not_functional.add_(x2_functional) def test_index_mutation_on_non_input(self): def f(x): tmp = torch.zeros(10) tmp[5].fill_(1) return tmp self.assert_functionalization(f, torch.ones(2)) logs = self.get_logs(f, torch.ones(2)) self.assertExpectedInline( logs, """\ def forward(self, arg0_1): zeros = torch.ops.aten.zeros.default([10], device = device(type='cpu'), pin_memory = False) select_copy = torch.ops.aten.select_copy.int(zeros, 0, 5) fill = torch.ops.aten.fill.Scalar(select_copy, 1); select_copy = None select_scatter = torch.ops.aten.select_scatter.default(zeros, fill, 0, 5); zeros = fill = None select_copy_1 = torch.ops.aten.select_copy.int(select_scatter, 0, 5); select_copy_1 = None return select_scatter """, ) # noqa: B950 reinplaced_logs = self.get_logs( f, torch.ones(2), reapply_views=True, run_reinplace=True ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1): zeros = torch.ops.aten.zeros.default([10], device = device(type='cpu'), pin_memory = False) select = torch.ops.aten.select.int(zeros, 0, 5) fill = torch.ops.aten.fill_.Scalar(select, 1); select = fill = None select_1 = torch.ops.aten.select.int(zeros, 0, 5); select_1 = None return zeros """, ) def test_instance_norm(self): size = 100 def f(x, running_mean, running_var): with enable_python_dispatcher(): return torch.instance_norm( x, None, None, running_mean, running_var, use_input_stats=True, momentum=0.1, eps=1e-5, cudnn_enabled=False, ) self.assert_functionalization( f, torch.randn(20, size, 35, 45), torch.zeros(size), torch.ones(size) ) # On Windows, for instance_norm, the alias_copy's are reordered to come right before they need to be used # whereas on other platforms, the alias_copy's are before the view_copy's. # e.g., the alias_copy after the getitem_4 assignment would be moved to be right before the copy assignment. if not IS_WINDOWS: logs = self.get_logs( f, torch.randn(20, size, 35, 45), torch.zeros(size), torch.ones(size) ) self.assertExpectedInline( logs, """\ def forward(self, arg0_1, arg1_1, arg2_1): repeat = torch.ops.aten.repeat.default(arg1_1, [20]) repeat_1 = torch.ops.aten.repeat.default(arg2_1, [20]) view_copy = torch.ops.aten.view_copy.default(arg0_1, [1, 2000, 35, 45]); arg0_1 = None empty = torch.ops.aten.empty.memory_format([0], dtype = torch.uint8, layout = torch.strided, device = device(type='cpu')); empty = None _native_batch_norm_legit_functional = torch.ops.aten._native_batch_norm_legit_functional.default(view_copy, None, None, repeat, repeat_1, True, 0.1, 1e-05); view_copy = repeat = repeat_1 = None getitem = _native_batch_norm_legit_functional[0] getitem_1 = _native_batch_norm_legit_functional[1]; getitem_1 = None getitem_2 = _native_batch_norm_legit_functional[2]; getitem_2 = None getitem_3 = _native_batch_norm_legit_functional[3] getitem_4 = _native_batch_norm_legit_functional[4]; _native_batch_norm_legit_functional = None alias_copy = torch.ops.aten.alias_copy.default(arg1_1) view_copy_1 = torch.ops.aten.view_copy.default(getitem_3, [20, 100]); view_copy_1 = None view_copy_2 = torch.ops.aten.view_copy.default(getitem_3, [20, 100]); getitem_3 = None mean = torch.ops.aten.mean.dim(view_copy_2, [0]); view_copy_2 = None copy = torch.ops.aten.copy.default(alias_copy, mean); alias_copy = mean = None alias_copy_1 = torch.ops.aten.alias_copy.default(copy); copy = None alias_copy_2 = torch.ops.aten.alias_copy.default(alias_copy_1); alias_copy_2 = None alias_copy_3 = torch.ops.aten.alias_copy.default(arg2_1) view_copy_3 = torch.ops.aten.view_copy.default(getitem_4, [20, 100]); view_copy_3 = None view_copy_4 = torch.ops.aten.view_copy.default(getitem_4, [20, 100]); getitem_4 = None mean_1 = torch.ops.aten.mean.dim(view_copy_4, [0]); view_copy_4 = None copy_1 = torch.ops.aten.copy.default(alias_copy_3, mean_1); alias_copy_3 = mean_1 = None alias_copy_4 = torch.ops.aten.alias_copy.default(copy_1); copy_1 = None alias_copy_5 = torch.ops.aten.alias_copy.default(alias_copy_4); alias_copy_5 = None view_copy_5 = torch.ops.aten.view_copy.default(getitem, [20, 100, 35, 45]); getitem = None copy_ = torch.ops.aten.copy_.default(arg1_1, alias_copy_1); arg1_1 = alias_copy_1 = copy_ = None copy__1 = torch.ops.aten.copy_.default(arg2_1, alias_copy_4); arg2_1 = alias_copy_4 = copy__1 = None return view_copy_5 """, # noqa: B950 ) reinplaced_logs = self.get_logs( f, torch.randn(20, size, 35, 45), torch.zeros(size), torch.ones(size), reapply_views=True, run_reinplace=True, ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1, arg1_1, arg2_1): repeat = torch.ops.aten.repeat.default(arg1_1, [20]) repeat_1 = torch.ops.aten.repeat.default(arg2_1, [20]) view = torch.ops.aten.view.default(arg0_1, [1, 2000, 35, 45]); arg0_1 = None empty = torch.ops.aten.empty.memory_format([0], dtype = torch.uint8, layout = torch.strided, device = device(type='cpu')); empty = None _native_batch_norm_legit_functional = torch.ops.aten._native_batch_norm_legit_functional.default(view, None, None, repeat, repeat_1, True, 0.1, 1e-05); view = repeat = repeat_1 = None getitem = _native_batch_norm_legit_functional[0] getitem_1 = _native_batch_norm_legit_functional[1]; getitem_1 = None getitem_2 = _native_batch_norm_legit_functional[2]; getitem_2 = None getitem_3 = _native_batch_norm_legit_functional[3] getitem_4 = _native_batch_norm_legit_functional[4]; _native_batch_norm_legit_functional = None alias = torch.ops.aten.alias.default(arg1_1) view_1 = torch.ops.aten.view.default(getitem_3, [20, 100]); view_1 = None view_2 = torch.ops.aten.view.default(getitem_3, [20, 100]); getitem_3 = None mean = torch.ops.aten.mean.dim(view_2, [0]); view_2 = None copy = torch.ops.aten.copy.default(alias, mean); alias = mean = None alias_1 = torch.ops.aten.alias.default(copy); copy = None alias_2 = torch.ops.aten.alias.default(alias_1); alias_2 = None alias_3 = torch.ops.aten.alias.default(arg2_1) view_3 = torch.ops.aten.view.default(getitem_4, [20, 100]); view_3 = None view_4 = torch.ops.aten.view.default(getitem_4, [20, 100]); getitem_4 = None mean_1 = torch.ops.aten.mean.dim(view_4, [0]); view_4 = None copy_1 = torch.ops.aten.copy.default(alias_3, mean_1); alias_3 = mean_1 = None alias_4 = torch.ops.aten.alias.default(copy_1); copy_1 = None alias_5 = torch.ops.aten.alias.default(alias_4); alias_5 = None view_5 = torch.ops.aten.view.default(getitem, [20, 100, 35, 45]); getitem = None copy_ = torch.ops.aten.copy_.default(arg1_1, alias_1); arg1_1 = alias_1 = copy_ = None copy__1 = torch.ops.aten.copy_.default(arg2_1, alias_4); arg2_1 = alias_4 = copy__1 = None return view_5 """, # noqa: B950 ) def test_mutation_overlapping_mem(self): def fn(x): # x: (1, 5) t1 = torch.add(x, x) t2 = t1.unfold(1, 3, 2) t3 = t2.abs_() return t3 with self.assertRaisesRegex( RuntimeError, r"encountered a tensor being mutated that has internal overlap", ): x = torch.ones(1, 5) out = _functionalize(fn, reapply_views=True, crossref=False)(x) def test_batch_norm(self): def f(x, running_mean, running_var): with enable_python_dispatcher(): return torch.batch_norm( x, None, None, running_mean, running_var, True, 0.1, 1e-5, False ) self.assert_functionalization( f, torch.randn(20, 100, 35, 45), torch.zeros(100), torch.ones(100) ) logs = self.get_logs( f, torch.randn(20, 100, 35, 45), torch.zeros(100), torch.ones(100) ) self.assertExpectedInline( logs, """\ def forward(self, arg0_1, arg1_1, arg2_1): empty = torch.ops.aten.empty.memory_format([0], dtype = torch.uint8, layout = torch.strided, device = device(type='cpu')); empty = None _native_batch_norm_legit_functional = torch.ops.aten._native_batch_norm_legit_functional.default(arg0_1, None, None, arg1_1, arg2_1, True, 0.1, 1e-05); arg0_1 = None getitem = _native_batch_norm_legit_functional[0] getitem_1 = _native_batch_norm_legit_functional[1]; getitem_1 = None getitem_2 = _native_batch_norm_legit_functional[2]; getitem_2 = None getitem_3 = _native_batch_norm_legit_functional[3] getitem_4 = _native_batch_norm_legit_functional[4]; _native_batch_norm_legit_functional = None copy_ = torch.ops.aten.copy_.default(arg1_1, getitem_3); arg1_1 = getitem_3 = copy_ = None copy__1 = torch.ops.aten.copy_.default(arg2_1, getitem_4); arg2_1 = getitem_4 = copy__1 = None return getitem """, # noqa: B950 ) reinplaced_logs = self.get_logs( f, torch.randn(20, 100, 35, 45), torch.zeros(100), torch.ones(100), reapply_views=True, run_reinplace=True, ) self.assertExpectedInline( reinplaced_logs, """\ def forward(self, arg0_1, arg1_1, arg2_1): empty = torch.ops.aten.empty.memory_format([0], dtype = torch.uint8, layout = torch.strided, device = device(type='cpu')); empty = None _native_batch_norm_legit_functional = torch.ops.aten._native_batch_norm_legit_functional.default(arg0_1, None, None, arg1_1, arg2_1, True, 0.1, 1e-05); arg0_1 = None getitem = _native_batch_norm_legit_functional[0] getitem_1 = _native_batch_norm_legit_functional[1]; getitem_1 = None getitem_2 = _native_batch_norm_legit_functional[2]; getitem_2 = None getitem_3 = _native_batch_norm_legit_functional[3] getitem_4 = _native_batch_norm_legit_functional[4]; _native_batch_norm_legit_functional = None copy_ = torch.ops.aten.copy_.default(arg1_1, getitem_3); arg1_1 = getitem_3 = copy_ = None copy__1 = torch.ops.aten.copy_.default(arg2_1, getitem_4); arg2_1 = getitem_4 = copy__1 = None return getitem """, # noqa: B950 ) # This tests our python shims around C++ Functionalization: FunctionalTensor and FunctionalTensorMode def test_python_functionalization(self): def f(x): x_view = x.view(-1) x.mul_(2) return x_view + 1 def f_functionalized(x): # Note [Disabling Functionalize TLS Above Python Functionalization] # This UX is pretty annoying (although python functionalization's main customer is AOTAutograd, # and is not really advertised as a user API). # We need to explicitly disable functionalization when using python FunctionalTensor and FunctionalTensorMode. # Why? FunctionalTensor is a wrapper tensor that holds an inner FunctionalTensorWrapper. # Since the inner tensor has `DispatchKey.Functionalize` in its keyset, then by default, # our FunctionalTensor will inherit the same keyset. # We don't have an easy way of directly mutating a tensor's keyset from python, # so globally disabling functionalization here is easier. maybe_disable = torch._C._ExcludeDispatchKeyGuard( torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize) ) with maybe_disable, FunctionalTensorMode(): x_wrapped = FunctionalTensor.to_functional(x) out_wrapped = f(x_wrapped) out_unwrapped = out_wrapped.elem torch._sync(out_unwrapped) return torch._from_functional_tensor(out_unwrapped) # Make a non-leaf x = torch.randn(2, requires_grad=True) + 1 fx_g = make_fx(f_functionalized)(x) # NB: view_1 below is expected (though unused) due to view replay. AOTAutograd runs a # DCE pass that will remove nodes like this later on. self.assertExpectedInline( fx_g.code.strip(), """\ def forward(self, x_1): view = torch.ops.aten.view.default(x_1, [-1]); view = None mul = torch.ops.aten.mul.Tensor(x_1, 2); x_1 = None view_1 = torch.ops.aten.view.default(mul, [-1]); view_1 = None view_2 = torch.ops.aten.view.default(mul, [-1]); mul = None add = torch.ops.aten.add.Tensor(view_2, 1); view_2 = None return add""", ) def test_python_functionalization_zero_tensor(self): def f(x): y = torch.ops.aten._efficientzerotensor([4]) out = x + y out.mul_(2) return out x = torch.randn(4) out_ref = f(x) out_test = dispatch_functionalize(f)(x) out_test_cpp = _functionalize( f, reapply_views=True, crossref=False, skip_input_mutations=True )(x) self.assertEqual(out_ref, out_test) self.assertEqual(out_ref, out_test_cpp) fx_g = make_fx(dispatch_functionalize(f))(x) fx_g_cpp = make_fx( _functionalize( f, reapply_views=True, crossref=False, skip_input_mutations=True ) )(x) self.assertEqual(fx_g_cpp.code.strip(), fx_g.code.strip()) def test_python_functionalization_is_conj(self): def f(x): out = x.conj() return out, out.is_conj() x = torch.randn(4, dtype=torch.complex64) out_ref = f(x) out_test = dispatch_functionalize(f)(x) out_test_cpp = _functionalize(f, reapply_views=True, crossref=False)(x) self.assertEqual(out_ref[0], out_test[0]) self.assertEqual(out_ref[1], out_test[1]) self.assertEqual(out_ref[0], out_test_cpp[0]) self.assertEqual(out_ref[1], out_test_cpp[1]) def test_python_functionalization_is_neg(self): def f(x): out = x.neg() return out, out.is_neg() x = torch.randn(4, dtype=torch.complex64) out_ref = f(x) out_test = dispatch_functionalize(f)(x) out_test_cpp = _functionalize(f, reapply_views=True, crossref=False)(x) self.assertEqual(out_ref[0], out_test[0]) self.assertEqual(out_ref[1], out_test[1]) self.assertEqual(out_ref[0], out_test_cpp[0]) self.assertEqual(out_ref[1], out_test_cpp[1]) def test_python_functionalization_conj(self): def f(x): y = x.clone().conj() y.mul_(2) return torch.view_as_real(y.resolve_conj()) x = torch.randn(4, dtype=torch.complex64) out_ref = f(x) out_test = dispatch_functionalize(f)(x) out_test_cpp = _functionalize( f, reapply_views=True, crossref=False, skip_input_mutations=True )(x) self.assertEqual(out_ref, out_test) self.assertEqual(out_test, out_test_cpp) fx_g = make_fx(dispatch_functionalize(f))(x) fx_g_cpp = make_fx( _functionalize( f, reapply_views=True, crossref=False, skip_input_mutations=True ) )(x) self.assertExpectedInline( fx_g.code.strip(), """\ def forward(self, arg0_1): clone = torch.ops.aten.clone.default(arg0_1); arg0_1 = None _conj = torch.ops.aten._conj.default(clone); clone = None clone_1 = torch.ops.aten.clone.default(_conj) mul = torch.ops.aten.mul.Tensor(clone_1, 2); clone_1 = None clone_2 = torch.ops.aten.clone.default(_conj); _conj = None copy = torch.ops.aten.copy.default(clone_2, mul); clone_2 = mul = None _conj_1 = torch.ops.aten._conj.default(copy); copy = None _conj_2 = torch.ops.aten._conj.default(_conj_1); _conj_1 = None clone_3 = torch.ops.aten.clone.default(_conj_2); _conj_2 = None view_as_real = torch.ops.aten.view_as_real.default(clone_3); clone_3 = None return view_as_real""", ) self.assertEqual(fx_g_cpp.code.strip(), fx_g.code.strip()) def test_python_functionalization_neg(self): def f(x): y = x._neg_view() z = y.resolve_neg() return z + 1 x = torch.randn(4) out_ref = f(x) out_test = dispatch_functionalize(f)(x) out_test_cpp = _functionalize( f, reapply_views=True, crossref=False, skip_input_mutations=True )(x) self.assertEqual(out_ref, out_test) self.assertEqual(out_ref, out_test_cpp) fx_g = make_fx(dispatch_functionalize(f))(x) fx_g_cpp = make_fx( _functionalize( f, reapply_views=True, crossref=False, skip_input_mutations=True ) )(x) self.assertExpectedInline( fx_g.code.strip(), """\ def forward(self, arg0_1): _neg_view = torch.ops.aten._neg_view.default(arg0_1); arg0_1 = None clone = torch.ops.aten.clone.default(_neg_view); _neg_view = None add = torch.ops.aten.add.Tensor(clone, 1); clone = None return add""", ) self.assertEqual(fx_g_cpp.code.strip(), fx_g.code.strip()) def test_python_functionalization_lift_fresh_storage(self): unlifted = torch.tensor([0.0]) maybe_disable = torch._C._ExcludeDispatchKeyGuard( torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize) ) with maybe_disable, FunctionalTensorMode(): lifted = torch.ops.aten.lift_fresh.default(unlifted) self.assertNotEqual(unlifted.untyped_storage(), lifted.untyped_storage()) def test_python_functionalization_lift_fresh(self): def f(x): tmp = torch.tensor([0.0]) return tmp + x x = torch.randn(4) out_ref = f(x) out_test = dispatch_functionalize(f)(x) out_test_cpp = _functionalize( f, reapply_views=True, crossref=False, skip_input_mutations=True )(x) self.assertEqual(out_ref, out_test) self.assertEqual(out_ref, out_test_cpp) fx_g = make_fx(dispatch_functionalize(f))(x) fx_g_cpp = make_fx( _functionalize( f, reapply_views=True, crossref=False, skip_input_mutations=True ) )(x) self.assertExpectedInline( fx_g.code.strip(), """\ def forward(self, arg0_1): _tensor_constant0 = self._tensor_constant0 lift_fresh_copy = torch.ops.aten.lift_fresh_copy.default(_tensor_constant0); _tensor_constant0 = None add = torch.ops.aten.add.Tensor(lift_fresh_copy, arg0_1); lift_fresh_copy = arg0_1 = None return add""", ) self.assertEqual(fx_g_cpp.code.strip(), fx_g.code.strip()) @xfail_inherited_tests( [ "test_as_strided", "test_copy_", "test_diagonal", "test_diagonal_mutated_input", "test_everything", "test_fill_", "test_slice", "test_split", "test_split_with_sizes", "test_unbind", "test_view_clone_view_inplace", "test_view_inplace", ] ) @unittest.skipIf( TEST_WITH_TORCHDYNAMO, "dynamo-ing code with proxy + fake doesn't work well" )
TestFunctionalization
python
getsentry__sentry
tests/sentry/db/postgres/schema/safe_migrations/integration/test_migrations.py
{ "start": 3326, "end": 3771 }
class ____(BaseSafeMigrationTest): app = "bad_flow_add_column_with_notnull_app" migrate_from = "0001_initial" migrate_to = "0002_add_field_notnull" def test(self) -> None: with pytest.raises( UnsafeOperationException, match="Adding TestTable.field as a not null column with no default is unsafe. Provide a default using db_default", ): self.run_migration()
AddColWithNotNullTest
python
pypa__warehouse
warehouse/utils/zipfiles.py
{ "start": 628, "end": 15509 }
class ____(Exception): """Internal exception used by this module""" def _seek_check(fp: typing.IO[bytes], amt: int, /) -> None: """Call seek and check that the seeked amount is correct. Returns True if the seeked amount is less than what is expected. """ if amt < 0: # pragma: no cover raise InvalidZipFileError("Negative offset") fp.seek(amt, os.SEEK_CUR) def _read_check(fp: typing.IO[bytes], amt: int, /) -> bytes: """Read and assert there was enough data available.""" if amt < 0: # pragma: no cover raise InvalidZipFileError("Negative offset") data = fp.read(amt) if len(data) != amt: raise InvalidZipFileError("Malformed zip file") return data def _contains_unprintable_chars(value: bytes) -> bool: return any(ch in UNPRINTABLE_CHARS for ch in value) def _handle_local_file_header( fp: typing.IO[bytes], zipfile_files_and_sizes: dict[str, int] ) -> bytes: """ Parses the body of a Local File header. Returns the contained filename field of the record. See section 4.3.7 of APPNOTE.TXT. """ data = _read_check(fp, 26) gpbf, compress_method, compressed_size, filename_size, extra_size = struct.unpack( "<xxHHxxxxxxxxLxxxxHH", data ) filename = _read_check(fp, filename_size) extra = _read_check(fp, extra_size) if _contains_unprintable_chars(filename): raise InvalidZipFileError("Invalid character in filename") # Search for the ZIP64 extension in extras. seen_extra_ids = set() while extra: if len(extra) < 4: raise InvalidZipFileError("Malformed zip file") extra_id, extra_data_size = struct.unpack("<HH", extra[:4]) if extra_data_size + 4 > len(extra): raise InvalidZipFileError("Malformed zip file") if extra_id in seen_extra_ids and extra_id in DISALLOW_DUPLICATE_EXTRA_IDS: raise InvalidZipFileError("Invalid duplicate extra in local file") seen_extra_ids.add(extra_id) if extra_id == 0x0001: # ZIP64 extras must be one of these lengths. if extra_data_size not in (0, 8, 16, 24, 28): raise InvalidZipFileError("Malformed zip file") # This is a ZIP64 archive, but the file size is # less than 0xFFFFFFFF, so we use the compressed # size from the record itself. if extra_data_size == 0: if compressed_size == 0xFFFFFFFF: raise InvalidZipFileError("Malformed zip file") # We only have uncompressed size, so we have to # double-check that we're NOT using compression # so we know that compressed and uncompressed # data sizes are the same. elif extra_data_size == 8: if compress_method != 0x0000: # "STORE" method raise InvalidZipFileError("Malformed zip file") # Use uncompressed size, the first field in the extra data. (compressed_size,) = struct.unpack("<Q", extra[4:12]) else: # We receive an explicit compressed ZIP64 size. # This is the second field in the extra data. (compressed_size,) = struct.unpack("<Q", extra[12:20]) elif extra_id == 0x7075: # Info ZIP Unicode Path Extra layout # 0x7075 2 bytes # TSize 2 bytes # Version 1 byte # NameCRC32 4 bytes # UnicodeName TSize - 5 unicode_name = extra[9 : 4 + extra_data_size] if _contains_unprintable_chars(unicode_name): raise InvalidZipFileError("Invalid character in filename") try: unicode_name.decode("utf-8") except UnicodeError: raise InvalidZipFileError("Filename not valid UTF-8") extra = extra[extra_data_size + 4 :] # If the local file is using streaming mode then # use the compression size from central directory. has_data_descriptor = gpbf & 0x08 if has_data_descriptor: raise InvalidZipFileError("ZIP contains a data descriptor") try: filename_as_str = filename.decode("utf-8") if zipfile_files_and_sizes[filename_as_str] != compressed_size: raise InvalidZipFileError("Mis-matched data size") except UnicodeError: raise InvalidZipFileError("Filename not unicode") except KeyError: raise InvalidZipFileError("Filename not in central directory") _seek_check(fp, compressed_size) return filename def _handle_central_directory_header(fp: typing.IO[bytes]) -> tuple[bytes, bytes]: """ Parses the body of a Central Directory (CD) header. Returns the contained filename field of the record. See section 4.3.12 of APPNOTE.TXT. """ data = _read_check(fp, 42) compressed_size, filename_size, extra_size, comment_size, offset = struct.unpack( "<xxxxxxxxxxxxxxxxLxxxxHHHxxxxxxxxL", data ) if comment_size != 0: raise InvalidZipFileError("Comment in central directory") filename = _read_check(fp, filename_size) extra = _read_check(fp, extra_size) if _contains_unprintable_chars(filename): raise InvalidZipFileError("Invalid character in filename") return filename, extra def _handle_eocd(fp: typing.IO[bytes]) -> tuple[int, int, int]: """ Parses the body of an End of Central Directory (EOCD) record. See section 4.3.16 of APPNOTE.TXT. """ data = _read_check(fp, 18) ( cd_records_on_disk, cd_records, cd_size, cd_offset, comment_size, ) = struct.unpack("<xxxxHHLLH", data) if cd_records_on_disk != cd_records: raise InvalidZipFileError("Malformed zip file") _seek_check(fp, comment_size) return cd_records, cd_size, cd_offset def _handle_eocd64(fp: typing.IO[bytes]) -> tuple[int, int, int]: """ Parses the body of an ZIP64 End of Central Directory (EOCD64) record. See section 4.3.14 of APPNOTE.TXT. """ data = _read_check(fp, 52) (eocd64_size, cd_records_on_disk, cd_records, cd_size, cd_offset) = struct.unpack( "<QxxxxxxxxxxxxQQQQ", data ) if cd_records_on_disk != cd_records: raise InvalidZipFileError("Malformed zip file") _seek_check(fp, eocd64_size - 44) return cd_records, cd_size, cd_offset def _handle_eocd64_locator(fp: typing.IO[bytes]) -> int: """ Parses the body of an ZIP64 End of Central Directory Locator record. See section 4.3.15 of APPNOTE.TXT. """ data = _read_check(fp, 16) (eocd64_offset,) = struct.unpack("<xxxxQxxxx", data) return eocd64_offset def validate_zipfile(zip_filepath: str) -> tuple[bool, str | None]: """ Validates that a ZIP file would parse the same through a ZIP implementation that checks the Central Directory and an implementation that streams Local File headers without checking the Central Directory (CD). This is done mostly by ensuring there are no duplicate or mismatched files between Local Files and CD. Implemented using the ZIP standard (APPNOTE.TXT): https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT """ # Process the zipfile through Python's # zipfile processor, the same used by # pip and other Python installers. try: zfp = zipfile.ZipFile(zip_filepath, mode="r") # Store compression sizes from the CD for use later. zipfile_files = {zfi.orig_filename: zfi.compress_size for zfi in zfp.filelist} except zipfile.BadZipfile as e: return False, e.args[0] with open(zip_filepath, mode="rb") as fp: # Track filenames that have been seen in # Local File and Central Directory headers # to avoid duplicates or missing entries. local_filenames = set() cd_filenames = set() # These variables enforce the requirements # of EOCD for ZIP64. ZIP64 has its own EOCD # record, but that record may be followed by # a EOCD64 Locator and/or a '0xFF'-filled # non-ZIP64 EOCD record. expected_eocd64_offset = None actual_eocd64_offset = None # Track the number of CD records # and their sizes. cd_records = 0 cd_offset = None cd_size = 0 # Values from EOCD or EOCD64. eocd_cd_records = None eocd_cd_offset = None eocd_cd_size = None while True: try: signature = _read_check(fp, 4) # Only accept EOCD after an EOCD64 if we've # seen the EOCD64 Locator first. if ( signature == RECORD_SIG_EOCD and expected_eocd64_offset is not None and actual_eocd64_offset is None ): return False, "Malformed zip file" # Only accept a single EOCD64 Locator after EOCD64. if signature == RECORD_SIG_EOCD64_LOCATOR and ( expected_eocd64_offset is None or actual_eocd64_offset is not None ): return False, "Malformed zip file" # If we've seen an EOCD64 record then we only # accept an EOCD64 Locator or an EOCD. if ( signature not in (RECORD_SIG_EOCD64_LOCATOR, RECORD_SIG_EOCD) and expected_eocd64_offset is not None ): return False, "Malformed zip file" # Central Directory File Header if signature == RECORD_SIG_CENTRAL_DIRECTORY: # Record the first CD record we find as # the start of the central directory. if cd_offset is None: cd_offset = fp.tell() - 4 cd_records += 1 filename, extra = _handle_central_directory_header(fp) cd_size += 46 + len(filename) + len(extra) if filename in cd_filenames: raise InvalidZipFileError( "Duplicate filename in central directory" ) if filename not in local_filenames: raise InvalidZipFileError("Missing filename in local headers") cd_filenames.add(filename) # Local File Header elif signature == RECORD_SIG_LOCAL_FILE: filename = _handle_local_file_header(fp, zipfile_files) if filename in local_filenames: raise InvalidZipFileError("Duplicate filename in local headers") local_filenames.add(filename) # End of Central Directory elif signature == RECORD_SIG_EOCD: # If the ZIP is empty then we expect # to see zero CD entries. if cd_offset is None: cd_offset = fp.tell() - 4 # If this archive is ZIP64 we use the values # from the EOCD64 values, otherwise use EOCD values. if actual_eocd64_offset is not None and eocd_cd_offset is not None: _handle_eocd(fp) else: eocd_cd_records, eocd_cd_size, eocd_cd_offset = _handle_eocd(fp) if eocd_cd_records != cd_records: raise InvalidZipFileError( "Mismatched central directory records" ) if cd_offset is None or eocd_cd_offset != cd_offset: raise InvalidZipFileError("Mismatched central directory offset") # This branch is tough to cover, as CPython's ZIP archive # implementation already doesn't like mismatches between size # and offset of the CD. if cd_size is None or eocd_cd_size != cd_size: # pragma: no cover raise InvalidZipFileError("Mismatched central directory size") break # This always means the end of a ZIP. # End of Central Directory (ZIP64) elif signature == RECORD_SIG_EOCD64: # We cross-check this value if # we see EOCD64 Locator later. # -4 because we just read signature bytes. expected_eocd64_offset = fp.tell() - 4 eocd_cd_records, eocd_cd_size, eocd_cd_offset = _handle_eocd64(fp) # End of Central Directory (ZIP64) Locator elif signature == RECORD_SIG_EOCD64_LOCATOR: actual_eocd64_offset = _handle_eocd64_locator(fp) # Cross-check the offset specified in the EOCD64 Locator # record with the one we ourselves recorded earlier. if ( expected_eocd64_offset is None or expected_eocd64_offset != actual_eocd64_offset ): return False, "Mis-matched EOCD64 record and locator offset" # Note that there are other record types, # but I didn't find any on PyPI, and they don't # seem relevant to Python packaging use-case # ie: encrypted ZIP files. So maybe we want # to reject these anyway? else: return False, "Unknown record signature" except InvalidZipFileError as e: return False, e.args[0] # Defensive, this shouldn't be possible in regular operation. if cd_filenames != local_filenames: # pragma: no cover return False, "Mis-matched local headers and central directory" # Detect whether there is trailing data # after the end of the zip file. # This can indicate ZIP files that are # concatenated together. cur = fp.tell() fp.seek(0, os.SEEK_END) if cur != fp.tell(): return False, "Trailing data" return True, None def main(argv) -> int: # pragma: no cover if len(argv) != 1: print("Usage: python -m warehouse.utils.zipfiles <ZIP path>") return 1 zip_filepath = argv[0] zip_filename = os.path.basename(zip_filepath) ok, error = validate_zipfile(zip_filepath) if ok: print(f"{zip_filename}: OK") else: print(f"{zip_filename}: {error}") return 0 if ok else 1 if __name__ == "__main__": # pragma: no cover sys.exit(main(sys.argv[1:]))
InvalidZipFileError
python
giampaolo__psutil
tests/test_testutils.py
{ "start": 14214, "end": 14406 }
class ____(PsutilTestCase): def test_is_namedtuple(self): assert is_namedtuple(collections.namedtuple('foo', 'a b c')(1, 2, 3)) assert not is_namedtuple(tuple())
TestOtherUtils
python
huggingface__transformers
src/transformers/models/conditional_detr/modeling_conditional_detr.py
{ "start": 46285, "end": 51190 }
class ____(ConditionalDetrPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`ConditionalDetrEncoderLayer`]. The encoder updates the flattened feature map through multiple self-attention layers. Small tweak for ConditionalDETR: - object_queries are added to the forward pass. Args: config: ConditionalDetrConfig """ def __init__(self, config: ConditionalDetrConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.layers = nn.ModuleList([ConditionalDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) # in the original ConditionalDETR, no layernorm is used at the end of the encoder, as "normalize_before" is set to False by default # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Object queries that are added to the queries in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: # we add object_queries as extra input to the encoder_layer layer_outputs = encoder_layer( hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
ConditionalDetrEncoder
python
rq__rq
tests/test_scheduler.py
{ "start": 19146, "end": 23642 }
class ____(RQTestCase): def test_enqueue_at(self): """queue.enqueue_at() puts job in the scheduled""" queue = Queue(connection=self.connection) registry = ScheduledJobRegistry(queue=queue) scheduler = RQScheduler([queue], connection=self.connection) scheduler.acquire_locks() # Jobs created using enqueue_at is put in the ScheduledJobRegistry job = queue.enqueue_at(datetime(2019, 1, 1, tzinfo=timezone.utc), say_hello) self.assertEqual(len(queue), 0) self.assertEqual(len(registry), 1) # enqueue_at set job status to "scheduled" self.assertEqual(job.get_status(), 'scheduled') # After enqueue_scheduled_jobs() is called, the registry is empty # and job is enqueued scheduler.enqueue_scheduled_jobs() self.assertEqual(len(queue), 1) self.assertEqual(len(registry), 0) def test_enqueue_at_at_front(self): """queue.enqueue_at() accepts at_front argument. When true, job will be put at position 0 of the queue when the time comes for the job to be scheduled""" queue = Queue(connection=self.connection) registry = ScheduledJobRegistry(queue=queue) scheduler = RQScheduler([queue], connection=self.connection) scheduler.acquire_locks() # Jobs created using enqueue_at is put in the ScheduledJobRegistry # job_first should be enqueued first job_first = queue.enqueue_at(datetime(2019, 1, 1, tzinfo=timezone.utc), say_hello) # job_second will be enqueued second, but "at_front" job_second = queue.enqueue_at(datetime(2019, 1, 2, tzinfo=timezone.utc), say_hello, at_front=True) self.assertEqual(len(queue), 0) self.assertEqual(len(registry), 2) # enqueue_at set job status to "scheduled" self.assertEqual(job_first.get_status(), 'scheduled') self.assertEqual(job_second.get_status(), 'scheduled') # After enqueue_scheduled_jobs() is called, the registry is empty # and job is enqueued scheduler.enqueue_scheduled_jobs() self.assertEqual(len(queue), 2) self.assertEqual(len(registry), 0) self.assertEqual(0, queue.get_job_position(job_second.id)) self.assertEqual(1, queue.get_job_position(job_first.id)) def test_enqueue_in(self): """queue.enqueue_in() schedules job correctly""" queue = Queue(connection=self.connection) registry = ScheduledJobRegistry(queue=queue) job = queue.enqueue_in(timedelta(seconds=30), say_hello) now = datetime.now(timezone.utc) scheduled_time = registry.get_scheduled_time(job) # Ensure that job is scheduled roughly 30 seconds from now self.assertTrue(now + timedelta(seconds=28) < scheduled_time < now + timedelta(seconds=32)) def test_enqueue_in_with_retry(self): """Ensure that the retry parameter is passed to the enqueue_at function from enqueue_in. """ queue = Queue(connection=self.connection) job = queue.enqueue_in(timedelta(seconds=30), say_hello, retry=Retry(3, [2])) self.assertEqual(job.retries_left, 3) self.assertEqual(job.retry_intervals, [2]) def test_custom_connection_pool(self): """Connection pool customizing. Ensure that we can properly set a custom connection pool class and pass extra arguments""" custom_conn = redis.Redis( connection_pool=redis.ConnectionPool( connection_class=CustomRedisConnection, db=4, custom_arg='foo', ) ) queue = Queue(connection=custom_conn) scheduler = RQScheduler([queue], connection=custom_conn) scheduler_connection = scheduler.connection.connection_pool.get_connection('info') self.assertEqual(scheduler_connection.__class__, CustomRedisConnection) self.assertEqual(scheduler_connection.get_custom_arg(), 'foo') def test_no_custom_connection_pool(self): """Connection pool customizing must not interfere if we're using a standard connection (non-pooled)""" standard_conn = redis.Redis(db=5) queue = Queue(connection=standard_conn) scheduler = RQScheduler([queue], connection=standard_conn) scheduler_connection = scheduler.connection.connection_pool.get_connection('info') self.assertEqual(scheduler_connection.__class__, redis.Connection)
TestQueue
python
numpy__numpy
numpy/distutils/tests/test_ccompiler_opt_conf.py
{ "start": 5862, "end": 6347 }
class ____(unittest.TestCase): def __init__(self, methodName="runTest"): unittest.TestCase.__init__(self, methodName) self._setup() def _setup(self): FakeCCompilerOpt.conf_nocache = True def test_features(self): for arch, compilers in arch_compilers.items(): for cc in compilers: FakeCCompilerOpt.fake_info = (arch, cc, "") _TestConfFeatures() if is_standalone: unittest.main()
TestConfFeatures
python
PrefectHQ__prefect
src/prefect/client/schemas/actions.py
{ "start": 10315, "end": 13663 }
class ____(ActionBaseModel): """Data used by the Prefect REST API to update a deployment.""" @model_validator(mode="before") @classmethod def remove_old_fields(cls, values: dict[str, Any]) -> dict[str, Any]: return remove_old_deployment_fields(values) version: Optional[str] = Field(default=None) version_info: Optional[objects.VersionInfo] = Field( default=None, description="Version information for the deployment." ) description: Optional[str] = Field(default=None) parameters: Optional[dict[str, Any]] = Field( default=None, description="Parameters for flow runs scheduled by the deployment.", ) paused: Optional[bool] = Field( default=None, description="Whether or not the deployment is paused." ) schedules: Optional[list[DeploymentScheduleUpdate]] = Field( default=None, description="A list of schedules for the deployment.", ) concurrency_limit: Optional[int] = Field( default=None, description="The concurrency limit for the deployment.", ) concurrency_options: Optional[objects.ConcurrencyOptions] = Field( default=None, description="The concurrency options for the deployment.", ) tags: list[str] = Field(default_factory=list) work_queue_name: Optional[str] = Field(default=None) work_pool_name: Optional[str] = Field( default=None, description="The name of the deployment's work pool.", examples=["my-work-pool"], ) path: Optional[str] = Field(default=None) job_variables: Optional[dict[str, Any]] = Field( default_factory=dict, description="Overrides to apply to flow run infrastructure at runtime.", ) entrypoint: Optional[str] = Field(default=None) storage_document_id: Optional[UUID] = Field(default=None) infrastructure_document_id: Optional[UUID] = Field(default=None) enforce_parameter_schema: Optional[bool] = Field( default=None, description=( "Whether or not the deployment should enforce the parameter schema." ), ) parameter_openapi_schema: Optional[ParameterSchema] = Field( default_factory=lambda: {"type": "object", "properties": {}} ) pull_steps: Optional[list[dict[str, Any]]] = Field(default=None) def check_valid_configuration(self, base_job_template: dict[str, Any]) -> None: """Check that the combination of base_job_template defaults and job_variables conforms to the specified schema. """ variables_schema = deepcopy(base_job_template.get("variables")) if variables_schema is not None: # jsonschema considers required fields, even if that field has a default, # to still be required. To get around this we remove the fields from # required if there is a default present. required = variables_schema.get("required") properties = variables_schema.get("properties") if required is not None and properties is not None: for k, v in properties.items(): if "default" in v and k in required: required.remove(k) if variables_schema is not None: jsonschema.validate(self.job_variables, variables_schema)
DeploymentUpdate
python
PrefectHQ__prefect
src/prefect/server/schemas/actions.py
{ "start": 22216, "end": 24407 }
class ____(ActionBaseModel): """Data used by the Prefect REST API to create a flow run from a deployment.""" # FlowRunCreate states must be provided as StateCreate objects state: Optional[StateCreate] = Field( default=None, description="The state of the flow run to create" ) name: str = Field( default_factory=lambda: generate_slug(2), description=( "The name of the flow run. Defaults to a random slug if not specified." ), examples=["my-flow-run"], ) parameters: Dict[str, Any] = Field( default_factory=dict, json_schema_extra={"additionalProperties": True}, ) enforce_parameter_schema: Optional[bool] = Field( default=None, description="Whether or not to enforce the parameter schema on this run.", ) context: Dict[str, Any] = Field(default_factory=dict) infrastructure_document_id: Optional[UUID] = Field(None) empirical_policy: schemas.core.FlowRunPolicy = Field( default_factory=schemas.core.FlowRunPolicy, description="The empirical policy for the flow run.", ) tags: List[str] = Field( default_factory=list, description="A list of tags for the flow run.", examples=[["tag-1", "tag-2"]], ) idempotency_key: Optional[str] = Field( None, description=( "An optional idempotency key. If a flow run with the same idempotency key" " has already been created, the existing flow run will be returned." ), ) labels: Union[KeyValueLabels, None] = Field( None, description="A dictionary of key-value labels. Values can be strings, numbers, or booleans.", examples=[{"key": "value1", "key2": 42}], ) parent_task_run_id: Optional[UUID] = Field(None) work_queue_name: Optional[str] = Field(None) job_variables: Optional[Dict[str, Any]] = Field( default_factory=dict, json_schema_extra={"additionalProperties": True}, ) @field_validator("name", mode="before") @classmethod def set_name(cls, name: str) -> str: return get_or_create_run_name(name)
DeploymentFlowRunCreate
python
ethereum__web3.py
web3/contract/contract.py
{ "start": 17715, "end": 19533 }
class ____(BaseContractCaller): # mypy types w3: "Web3" def __init__( self, abi: ABI, w3: "Web3", address: ChecksumAddress, transaction: TxParams | None = None, block_identifier: BlockIdentifier = None, ccip_read_enabled: bool | None = None, decode_tuples: bool | None = False, contract_functions: ContractFunctions | None = None, ) -> None: super().__init__(abi, w3, address, decode_tuples=decode_tuples) if self.abi: if transaction is None: transaction = {} if contract_functions is None: contract_functions = ContractFunctions( abi, w3, address=address, decode_tuples=decode_tuples ) self._functions = contract_functions._functions for fn in contract_functions.__iter__(): caller_method = partial( self.call_function, fn, transaction=transaction, block_identifier=block_identifier, ccip_read_enabled=ccip_read_enabled, ) setattr(self, str(fn.abi_element_identifier), caller_method) def __call__( self, transaction: TxParams | None = None, block_identifier: BlockIdentifier = None, ccip_read_enabled: bool | None = None, ) -> "ContractCaller": if transaction is None: transaction = {} return type(self)( self.abi, self.w3, self.address, transaction=transaction, block_identifier=block_identifier, ccip_read_enabled=ccip_read_enabled, decode_tuples=self.decode_tuples, )
ContractCaller
python
mlflow__mlflow
mlflow/transformers/flavor_config.py
{ "start": 534, "end": 9115 }
class ____: TASK = "task" INSTANCE_TYPE = "instance_type" TORCH_DTYPE = "torch_dtype" FRAMEWORK = "framework" MODEL = "model" MODEL_TYPE = "pipeline_model_type" MODEL_BINARY = "model_binary" MODEL_NAME = "source_model_name" MODEL_REVISION = "source_model_revision" PEFT = "peft_adaptor" COMPONENTS = "components" COMPONENT_NAME = "{}_name" # e.g. tokenizer_name COMPONENT_REVISION = "{}_revision" COMPONENT_TYPE = "{}_type" TOKENIZER = "tokenizer" FEATURE_EXTRACTOR = "feature_extractor" IMAGE_PROCESSOR = "image_processor" PROCESSOR = "processor" PROCESSOR_TYPE = "processor_type" PROMPT_TEMPLATE = "prompt_template" def build_flavor_config( pipeline: transformers.Pipeline, processor=None, torch_dtype=None, save_pretrained=True ) -> dict[str, Any]: """ Generates the base flavor metadata needed for reconstructing a pipeline from saved components. This is important because the ``Pipeline`` class does not have a loader functionality. The serialization of a Pipeline saves the model, configurations, and metadata for ``FeatureExtractor``s, ``Processor``s, and ``Tokenizer``s exclusively. This function extracts key information from the submitted model object so that the precise instance types can be loaded correctly. Args: pipeline: Transformer pipeline to generate the flavor configuration for. processor: Optional processor instance to save alongside the pipeline. torch_dtype: Torch tensor data type. save_pretrained: Whether to save the pipeline and components weights to local disk. Returns: A dictionary containing the flavor configuration for the pipeline and its components, i.e. the configurations stored in "transformers" key in the MLModel YAML file. """ flavor_conf = _generate_base_config(pipeline, torch_dtype=torch_dtype) if is_peft_model(pipeline.model): flavor_conf[FlavorKey.PEFT] = _PEFT_ADAPTOR_DIR_NAME model = get_peft_base_model(pipeline.model) else: model = pipeline.model flavor_conf.update(_get_model_config(model, save_pretrained)) components = _get_components_from_pipeline(pipeline, processor) for key, instance in components.items(): # Some components don't have name_or_path, then we fallback to the one from the model. flavor_conf.update( _get_component_config(instance, key, save_pretrained, default_repo=model.name_or_path) ) # "components" field doesn't include processor components.pop(FlavorKey.PROCESSOR, None) flavor_conf[FlavorKey.COMPONENTS] = list(components.keys()) return flavor_conf def _generate_base_config(pipeline, torch_dtype=None): flavor_conf = { FlavorKey.TASK: pipeline.task, FlavorKey.INSTANCE_TYPE: _get_instance_type(pipeline), } if framework := getattr(pipeline, "framework", None): flavor_conf[FlavorKey.FRAMEWORK] = framework # User-provided torch_dtype takes precedence if torch_dtype := (torch_dtype or _extract_torch_dtype_if_set(pipeline)): flavor_conf[FlavorKey.TORCH_DTYPE] = str(torch_dtype) return flavor_conf def _get_model_config(model, save_pretrained=True): conf = { FlavorKey.MODEL_TYPE: _get_instance_type(model), FlavorKey.MODEL_NAME: model.name_or_path, } if save_pretrained: # log local path to model binary file from mlflow.transformers.model_io import _MODEL_BINARY_FILE_NAME conf[FlavorKey.MODEL_BINARY] = _MODEL_BINARY_FILE_NAME else: # log HuggingFace repo name and commit hash conf[FlavorKey.MODEL_REVISION] = get_latest_commit_for_repo(model.name_or_path) return conf def _get_component_config( component: Any, key: str, save_pretrained: bool = True, default_repo: str | None = None, commit_sha: str | None = None, ): conf = {FlavorKey.COMPONENT_TYPE.format(key): _get_instance_type(component)} # Log source repo name and commit sha for the component if not save_pretrained: repo = getattr(component, "name_or_path", default_repo) revision = commit_sha or get_latest_commit_for_repo(repo) conf[FlavorKey.COMPONENT_NAME.format(key)] = repo conf[FlavorKey.COMPONENT_REVISION.format(key)] = revision return conf def _get_components_from_pipeline(pipeline, processor=None): supported_component_names = [ FlavorKey.FEATURE_EXTRACTOR, FlavorKey.TOKENIZER, FlavorKey.IMAGE_PROCESSOR, ] components = {} for name in supported_component_names: if instance := getattr(pipeline, name, None): components[name] = instance if processor: components[FlavorKey.PROCESSOR] = processor return components def _get_instance_type(obj): """ Utility for extracting the saved object type or, if the `base` argument is set to `True`, the base ABC type of the model. """ return obj.__class__.__name__ def build_flavor_config_from_local_checkpoint( local_checkpoint_dir: str, task: str, processor=None, torch_dtype=None, ) -> dict[str, Any]: """ Generates the flavor metadata from a Hugging Face model repository ID e.g. "meta-llama/Meta-Llama-3.1-405B, instead of the pipeline instance in-memory. """ from transformers import AutoTokenizer, pipelines from transformers.utils import is_torch_available from mlflow.transformers.model_io import _MODEL_BINARY_FILE_NAME config_path = os.path.join(local_checkpoint_dir, "config.json") if not os.path.exists(config_path): raise MlflowException( f"The provided directory {local_checkpoint_dir} does not contain a config.json file." "Please ensure that the directory contains a valid transformers model checkpoint.", error_code=INVALID_PARAMETER_VALUE, ) with open(config_path) as f: config = json.load(f) task_metadata = pipelines.check_task(task) pipeline_class = task_metadata[1]["impl"].__name__ flavor_conf = { FlavorKey.TASK: task, FlavorKey.INSTANCE_TYPE: pipeline_class, FlavorKey.FRAMEWORK: "pt" if is_torch_available() else "tf", FlavorKey.TORCH_DTYPE: str(torch_dtype) if torch_dtype else None, FlavorKey.MODEL_TYPE: config["architectures"][0], FlavorKey.MODEL_NAME: local_checkpoint_dir, FlavorKey.MODEL_BINARY: _MODEL_BINARY_FILE_NAME, } components = {FlavorKey.TOKENIZER} try: tokenizer = AutoTokenizer.from_pretrained(local_checkpoint_dir) except OSError as e: raise MlflowException( f"Error loading tokenizer from {local_checkpoint_dir}. When logging a " "Transformers model from a local checkpoint, please make sure that the " "checkpoint directory contains a valid tokenizer configuration as well.", error_code=INVALID_PARAMETER_VALUE, ) from e tokenizer_conf = _get_component_config(tokenizer, FlavorKey.TOKENIZER) flavor_conf.update(tokenizer_conf) if processor: flavor_conf.update(_get_component_config(processor, FlavorKey.PROCESSOR)) flavor_conf[FlavorKey.COMPONENTS] = list(components) return flavor_conf def update_flavor_conf_to_persist_pretrained_model( original_flavor_conf: dict[str, Any], ) -> dict[str, Any]: """ Updates the flavor configuration that was saved with save_pretrained=False to the one that includes the local path to the model binary file. """ flavor_conf = original_flavor_conf.copy() # Replace model commit path with local path if FlavorKey.MODEL_BINARY in original_flavor_conf: raise MlflowException( "It appears that the pretrained model weight is already saved to the artifact path.", error_code=ALREADY_EXISTS, ) from mlflow.transformers.model_io import _MODEL_BINARY_FILE_NAME flavor_conf[FlavorKey.MODEL_BINARY] = _MODEL_BINARY_FILE_NAME flavor_conf.pop(FlavorKey.MODEL_REVISION, None) # Remove component repo name and commit hash components = original_flavor_conf.get(FlavorKey.COMPONENTS, []) if FlavorKey.PROCESSOR_TYPE in original_flavor_conf: components.append(FlavorKey.PROCESSOR) for component in components: flavor_conf.pop(FlavorKey.COMPONENT_NAME.format(component), None) flavor_conf.pop(FlavorKey.COMPONENT_REVISION.format(component), None) return flavor_conf
FlavorKey
python
ansible__ansible
test/units/_internal/templating/test_datatag.py
{ "start": 695, "end": 5022 }
class ____(_TestDatatagTarget): later = t.cast(t.Self, Later(locals(), _TestDatatagTarget)) lazy_serializable_types: t.Annotated[ list[type[c.Collection]], ParamDesc(["lazy_type"]) ] = list( t.cast(type[c.Collection], known_type) for known_type in AnsibleSerializable._known_type_map.values() if issubclass(known_type, _AnsibleLazyTemplateMixin) ) serializable_instances: t.Annotated[list[object], ParamDesc(["non_lazy_value"])] taggable_container_instances: t.Annotated[list[c.Collection], ParamDesc(["non_lazy_value"])] = _TestDatatagTarget.taggable_container_instances taggable_instances: t.Annotated[list[object], ParamDesc(["non_lazy_value"])] = t.cast(list[object], taggable_container_instances) @classmethod def post_init(cls, **kwargs): cls.serializable_types = t.cast(list[type[AnsibleSerializable]], cls.lazy_serializable_types) cls.serializable_instances = [(obj, ) for obj in cls.taggable_container_instances] @classmethod def container_test_parameters(cls, test_case: ContainerTestCase) -> tuple[t.Any, t.Optional[type], type]: """ Return container test parameters for the given test case. Called during each test run to create the test value on-demand. """ # pylint: disable=unidiomatic-typecheck candidates = [instance for instance in cls.taggable_container_instances if type(instance) is test_case.type_under_test.__mro__[2]] assert len(candidates) == 1 value = candidates[0] value = _AnsibleLazyTemplateMixin._try_create(value) return create_container_test_parameters(test_case, value) @classmethod def container_test_cases(cls) -> t.Annotated[list[tuple[t.Any, type]], ParamDesc(["non_lazy_value", "type_under_test"])]: # type: ignore[override] # for each lazy_serializable type, find exactly one matching taggable container instance # create the lazy type from the instance out_values = [] for type_under_test in cls.lazy_serializable_types: candidates = [instance for instance in cls.taggable_container_instances if type(instance) is type_under_test.__mro__[2]] # pylint: disable=unidiomatic-typecheck assert len(candidates) == 1 out_values.append((candidates[0], type_under_test)) return out_values @pytest.fixture(name="value_type", params=["as_value", "as_generator"]) def generator_or_no(self, request, type_under_test: type) -> type | None: return type_under_test if request.param == "as_generator" else None @pytest.fixture(name="value") def lazy_value(self, non_lazy_value, request: pytest.FixtureRequest, template_context): value_type = None # DTFIX-FUTURE: get from request when needed, can't easily add to the static fixture list- the generator cases are not being tested if value_type: if isinstance(non_lazy_value, c.Mapping): generator = ((k, v) for k, v in non_lazy_value.items()) else: generator = (item for item in non_lazy_value) value = generator else: value = _AnsibleLazyTemplateMixin._try_create(non_lazy_value) # any tests not marked `pytest.mark.allow_delazify` will supply a lazy with its _templar removed and assert that it's still empty afterward allow_delazify = any(request.node.iter_markers('allow_delazify')) if isinstance(value, _AnsibleLazyTemplateMixin) and not allow_delazify: assert value._templar # supply a non-functional, but non-None templar, forcing an error if lazy behavior is triggered during tagging value._templar = object() # type: ignore[assignment] yield value # yield to the test; we'll validate later # LazyAccessTuple can't template, so we can't induce this failure if isinstance(value, _AnsibleLazyTemplateMixin) and not allow_delazify and not isinstance(value, _AnsibleLazyAccessTuple): with pytest.raises(AttributeError): # verify using the templar fails by using a method which relies on it (to ensure our templar hack above worked) t.cast(AnsibleTaggedObject, value)._native_copy()
TestDatatagTemplar
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_axis26.py
{ "start": 315, "end": 1475 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_axis26.xlsx") self.ignore_elements = {"xl/charts/chart1.xml": ["<a:defRPr"]} def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "line"}) chart.axis_ids = [108315392, 108329216] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] chart.set_x_axis({"num_font": {"rotation": 45}}) worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5"}) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
lepture__authlib
authlib/jose/rfc7516/models.py
{ "start": 3769, "end": 4381 }
class ____(dict): """Header object for JWE. Combines protected header, shared unprotected header and specific recipient's unprotected header together. """ def __init__(self, protected, unprotected, header): obj = {} if unprotected: obj.update(unprotected) if header: obj.update(header) if protected: obj.update(protected) super().__init__(obj) self.protected = protected if protected else {} self.unprotected = unprotected if unprotected else {} self.header = header if header else {}
JWEHeader
python
palantir__python-language-server
test/plugins/test_symbols.py
{ "start": 396, "end": 2838 }
class ____: def __init__(self): x = 2 self.y = x def main(x): y = 2 * x return y """ def helper_check_symbols_all_scope(symbols): # All eight symbols (import sys, a, B, __init__, x, y, main, y) assert len(symbols) == 8 def sym(name): return [s for s in symbols if s['name'] == name][0] # Check we have some sane mappings to VSCode constants assert sym('a')['kind'] == SymbolKind.Variable assert sym('B')['kind'] == SymbolKind.Class assert sym('__init__')['kind'] == SymbolKind.Method assert sym('main')['kind'] == SymbolKind.Function # Not going to get too in-depth here else we're just testing Jedi assert sym('a')['location']['range']['start'] == {'line': 2, 'character': 0} def test_symbols(config, workspace): doc = Document(DOC_URI, workspace, DOC) config.update({'plugins': {'jedi_symbols': {'all_scopes': False}}}) symbols = pyls_document_symbols(config, doc) # All four symbols (import sys, a, B, main) # y is not in the root scope, it shouldn't be returned assert len(symbols) == 5 def sym(name): return [s for s in symbols if s['name'] == name][0] # Check we have some sane mappings to VSCode constants assert sym('a')['kind'] == SymbolKind.Variable assert sym('B')['kind'] == SymbolKind.Class assert sym('main')['kind'] == SymbolKind.Function # Not going to get too in-depth here else we're just testing Jedi assert sym('a')['location']['range']['start'] == {'line': 2, 'character': 0} # Ensure that the symbol range spans the whole definition assert sym('main')['location']['range']['start'] == {'line': 9, 'character': 0} assert sym('main')['location']['range']['end'] == {'line': 12, 'character': 0} def test_symbols_all_scopes(config, workspace): doc = Document(DOC_URI, workspace, DOC) symbols = pyls_document_symbols(config, doc) helper_check_symbols_all_scope(symbols) @pytest.mark.skipif(PY2 or not LINUX or not CI, reason="tested on linux and python 3 only") def test_symbols_all_scopes_with_jedi_environment(workspace): doc = Document(DOC_URI, workspace, DOC) # Update config extra environment env_path = '/tmp/pyenv/bin/python' settings = {'pyls': {'plugins': {'jedi': {'environment': env_path}}}} doc.update_config(settings) symbols = pyls_document_symbols(doc._config, doc) helper_check_symbols_all_scope(symbols)
B
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 3966, "end": 4107 }
class ____(str, Enum): ONE_BIT = "one_bit" TWO_BITS = "two_bits" ONE_AND_HALF_BITS = "one_and_half_bits"
BinaryQuantizationEncoding
python
kamyu104__LeetCode-Solutions
Python/number-of-unique-categories.py
{ "start": 68, "end": 158 }
class ____: def haveSameCategory(self, a, b): pass # brute force
CategoryHandler
python
automl__auto-sklearn
test/test_pipeline/components/classification/test_lda.py
{ "start": 163, "end": 769 }
class ____(BaseClassificationComponentTest): __test__ = True res = dict() res["default_iris"] = 1.0 res["default_iris_iterative"] = -1 res["default_iris_proba"] = 0.5614481896257509 res["default_iris_sparse"] = -1 res["default_digits"] = 0.88585306618093507 res["default_digits_iterative"] = -1 res["default_digits_binary"] = 0.9811778992106861 res["default_digits_multilabel"] = 0.82204896441795205 res["default_digits_multilabel_proba"] = 0.9833070018235553 sk_mod = sklearn.discriminant_analysis.LinearDiscriminantAnalysis module = LDA
LDAComponentTest
python
huggingface__transformers
src/transformers/models/luke/modeling_luke.py
{ "start": 88467, "end": 96420 }
class ____(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.dropout = nn.Dropout( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, LukeMultipleChoiceModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) entity_ids = entity_ids.view(-1, entity_ids.size(-1)) if entity_ids is not None else None entity_attention_mask = ( entity_attention_mask.view(-1, entity_attention_mask.size(-1)) if entity_attention_mask is not None else None ) entity_token_type_ids = ( entity_token_type_ids.view(-1, entity_token_type_ids.size(-1)) if entity_token_type_ids is not None else None ) entity_position_ids = ( entity_position_ids.view(-1, entity_position_ids.size(-2), entity_position_ids.size(-1)) if entity_position_ids is not None else None ) outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) pooled_output = outputs.pooler_output pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: # move labels to correct device labels = labels.to(reshaped_logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: return tuple( v for v in [ loss, reshaped_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions, ] if v is not None ) return LukeMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) __all__ = [ "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", "LukeForMultipleChoice", "LukeForQuestionAnswering", "LukeForSequenceClassification", "LukeForTokenClassification", "LukeForMaskedLM", "LukeModel", "LukePreTrainedModel", ]
LukeForMultipleChoice
python
kamyu104__LeetCode-Solutions
Python/maximum-length-of-repeated-subarray.py
{ "start": 2273, "end": 3080 }
class ____(object): def findLength(self, A, B): """ :type A: List[int] :type B: List[int] :rtype: int """ if len(A) > len(B): return self.findLength(B, A) def check(length): lookup = set(A[i:i+length] \ for i in xrange(len(A)-length+1)) return any(B[j:j+length] in lookup \ for j in xrange(len(B)-length+1)) A = ''.join(map(chr, A)) B = ''.join(map(chr, B)) left, right = 0, min(len(A), len(B)) + 1 while left < right: mid = left + (right-left)/2 if not check(mid): # find the min idx such that check(idx) == false right = mid else: left = mid+1 return left-1
Solution3
python
huggingface__transformers
src/transformers/models/vivit/modeling_vivit.py
{ "start": 14870, "end": 15644 }
class ____(PreTrainedModel): config: VivitConfig base_model_prefix = "vivit" main_input_name = "pixel_values" input_modalities = "video" supports_gradient_checkpointing = True _no_split_modules = [] _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True _supports_attention_backend = True _can_record_outputs = { "hidden_states": VivitLayer, "attentions": VivitSelfAttention, } @torch.no_grad() def _init_weights(self, module): """Initialize the weights""" super()._init_weights(module) if isinstance(module, VivitEmbeddings): init.zeros_(module.cls_token) init.zeros_(module.position_embeddings) @auto_docstring
VivitPreTrainedModel
python
astropy__astropy
astropy/modeling/tests/test_quantities_evaluation.py
{ "start": 2993, "end": 12460 }
class ____: def setup_method(self, method): self.model = MyTestModel() def test_evaluate(self): # We should be able to evaluate with anything assert_quantity_allclose(self.model(3, 5), 15) assert_quantity_allclose(self.model(4 * u.m, 5), 20 * u.m) assert_quantity_allclose(self.model(3 * u.deg, 5), 15 * u.deg) def test_input_units(self): self.model._input_units = {"x": u.deg} assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg) assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad) assert_quantity_allclose(self.model(4 * u.rad, 2 * u.s), 8 * u.rad * u.s) with pytest.raises(UnitsError, match=MESSAGE.format("MyTestModel", "s", "deg")): self.model(4 * u.s, 3) with pytest.raises(UnitsError, match=MESSAGE.format("MyTestModel", "", "deg")): self.model(3, 3) def test_input_units_allow_dimensionless(self): self.model._input_units = {"x": u.deg} self.model._input_units_allow_dimensionless = True assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg) assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad) with pytest.raises(UnitsError, match=MESSAGE.format("MyTestModel", "s", "deg")): self.model(4 * u.s, 3) assert_quantity_allclose(self.model(3, 3), 9) def test_input_units_strict(self): self.model._input_units = {"x": u.deg} self.model._input_units_strict = True assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg) result = self.model(np.pi * u.rad, 2) assert_quantity_allclose(result, 360 * u.deg) assert result.unit is u.deg def test_input_units_equivalencies(self): self.model._input_units = {"x": u.micron} with pytest.raises( UnitsError, match=MESSAGE.format("MyTestModel", "PHz", "micron") ): self.model(3 * u.PHz, 3) self.model.input_units_equivalencies = {"x": u.spectral()} assert_quantity_allclose( self.model(3 * u.PHz, 3), 3 * (3 * u.PHz).to(u.micron, equivalencies=u.spectral()), ) def test_return_units(self): self.model._input_units = {"z": u.deg} self.model._return_units = {"z": u.rad} result = self.model(3 * u.deg, 4) assert_quantity_allclose(result, 12 * u.deg) assert result.unit is u.rad def test_return_units_scalar(self): # Check that return_units also works when giving a single unit since # there is only one output, so is unambiguous. self.model._input_units = {"x": u.deg} self.model._return_units = u.rad result = self.model(3 * u.deg, 4) assert_quantity_allclose(result, 12 * u.deg) assert result.unit is u.rad def test_and_input_units(): """ Test units to first model in chain. """ s1 = Shift(10 * u.deg) s2 = Shift(10 * u.deg) cs = s1 & s2 out = cs(10 * u.arcsecond, 20 * u.arcsecond) assert_quantity_allclose(out[0], 10 * u.deg + 10 * u.arcsec) assert_quantity_allclose(out[1], 10 * u.deg + 20 * u.arcsec) def test_plus_input_units(): """ Test units to first model in chain. """ s1 = Shift(10 * u.deg) s2 = Shift(10 * u.deg) cs = s1 + s2 out = cs(10 * u.arcsecond) assert_quantity_allclose(out, 20 * u.deg + 20 * u.arcsec) def test_compound_input_units(): """ Test units to first model in chain. """ s1 = Shift(10 * u.deg) s2 = Shift(10 * u.deg) cs = s1 | s2 out = cs(10 * u.arcsecond) assert_quantity_allclose(out, 20 * u.deg + 10 * u.arcsec) def test_compound_input_units_fail(): """ Test incompatible units to first model in chain. """ s1 = Shift(10 * u.deg) s2 = Shift(10 * u.deg) cs = s1 | s2 with pytest.raises(UnitsError, match=MESSAGE.format("Shift", "pix", "deg")): cs(10 * u.pix) def test_compound_incompatible_units_fail(): """ Test incompatible model units in chain. """ s1 = Shift(10 * u.pix) s2 = Shift(10 * u.deg) cs = s1 | s2 with pytest.raises(UnitsError, match=MESSAGE.format("Shift", "pix", "deg")): cs(10 * u.pix) def test_compound_pipe_equiv_call(): """ Check that equivalencies work when passed to evaluate, for a chained model (which has one input). """ s1 = Shift(10 * u.deg) s2 = Shift(10 * u.deg) cs = s1 | s2 out = cs(10 * u.pix, equivalencies={"x": u.pixel_scale(0.5 * u.deg / u.pix)}) assert_quantity_allclose(out, 25 * u.deg) def test_compound_and_equiv_call(): """ Check that equivalencies work when passed to evaluate, for a composite model with two inputs. """ s1 = Shift(10 * u.deg) s2 = Shift(10 * u.deg) cs = s1 & s2 out = cs( 10 * u.pix, 10 * u.pix, equivalencies={ "x0": u.pixel_scale(0.5 * u.deg / u.pix), "x1": u.pixel_scale(0.5 * u.deg / u.pix), }, ) assert_quantity_allclose(out[0], 15 * u.deg) assert_quantity_allclose(out[1], 15 * u.deg) def test_compound_input_units_equivalencies(): """ Test setting input_units_equivalencies on one of the models. """ s1 = Shift(10 * u.deg) s1.input_units_equivalencies = {"x": u.pixel_scale(0.5 * u.deg / u.pix)} s2 = Shift(10 * u.deg) sp = Shift(10 * u.pix) cs = s1 | s2 assert cs.input_units_equivalencies == {"x": u.pixel_scale(0.5 * u.deg / u.pix)} out = cs(10 * u.pix) assert_quantity_allclose(out, 25 * u.deg) cs = sp | s1 assert cs.input_units_equivalencies is None out = cs(10 * u.pix) assert_quantity_allclose(out, 20 * u.deg) cs = s1 & s2 assert cs.input_units_equivalencies == {"x0": u.pixel_scale(0.5 * u.deg / u.pix)} cs = cs.rename("TestModel") out = cs(20 * u.pix, 10 * u.deg) assert_quantity_allclose(out, 20 * u.deg) with pytest.raises(UnitsError, match=MESSAGE.format("Shift", "pix", "deg")): out = cs(20 * u.pix, 10 * u.pix) def test_compound_input_units_strict(): """ Test setting input_units_strict on one of the models. """ class ScaleDegrees(Scale): input_units = {"x": u.deg} s1 = ScaleDegrees(2) s2 = Scale(2) cs = s1 | s2 out = cs(10 * u.arcsec) assert_quantity_allclose(out, 40 * u.arcsec) assert out.unit is u.deg # important since this tests input_units_strict cs = s2 | s1 out = cs(10 * u.arcsec) assert_quantity_allclose(out, 40 * u.arcsec) assert out.unit is u.deg # important since this tests input_units_strict cs = s1 & s2 out = cs(10 * u.arcsec, 10 * u.arcsec) assert_quantity_allclose(out, 20 * u.arcsec) assert out[0].unit is u.deg assert out[1].unit is u.arcsec def test_compound_input_units_allow_dimensionless(): """ Test setting input_units_allow_dimensionless on one of the models. """ class ScaleDegrees(Scale): input_units = {"x": u.deg} s1 = ScaleDegrees(2) s1._input_units_allow_dimensionless = True s2 = Scale(2) cs = s1 | s2 cs = cs.rename("TestModel") out = cs(10) assert_quantity_allclose(out, 40 * u.one) out = cs(10 * u.arcsec) assert_quantity_allclose(out, 40 * u.arcsec) with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "m", "deg")): out = cs(10 * u.m) s1._input_units_allow_dimensionless = False cs = s1 | s2 cs = cs.rename("TestModel") with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "", "deg")): out = cs(10) s1._input_units_allow_dimensionless = True cs = s2 | s1 cs = cs.rename("TestModel") out = cs(10) assert_quantity_allclose(out, 40 * u.one) out = cs(10 * u.arcsec) assert_quantity_allclose(out, 40 * u.arcsec) with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "m", "deg")): out = cs(10 * u.m) s1._input_units_allow_dimensionless = False cs = s2 | s1 with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "", "deg")): out = cs(10) s1._input_units_allow_dimensionless = True s1 = ScaleDegrees(2) s1._input_units_allow_dimensionless = True s2 = ScaleDegrees(2) s2._input_units_allow_dimensionless = False cs = s1 & s2 cs = cs.rename("TestModel") out = cs(10, 10 * u.arcsec) assert_quantity_allclose(out[0], 20 * u.one) assert_quantity_allclose(out[1], 20 * u.arcsec) with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "", "deg")): out = cs(10, 10) def test_compound_return_units(): """ Test that return_units on the first model in the chain is respected for the input to the second. """ class PassModel(Model): n_inputs = 2 n_outputs = 2 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @property def input_units(self): """Input units.""" return {"x0": u.deg, "x1": u.deg} @property def return_units(self): """Output units.""" return {"x0": u.deg, "x1": u.deg} def evaluate(self, x, y): return x.value, y.value cs = Pix2Sky_TAN() | PassModel() assert_quantity_allclose(cs(0 * u.deg, 0 * u.deg), (0, 90) * u.deg)
TestInputUnits
python
openai__openai-python
src/openai/types/beta/chatkit/chat_session_chatkit_configuration.py
{ "start": 368, "end": 689 }
class ____(BaseModel): automatic_thread_titling: ChatSessionAutomaticThreadTitling """Automatic thread titling preferences.""" file_upload: ChatSessionFileUpload """Upload settings for the session.""" history: ChatSessionHistory """History retention configuration."""
ChatSessionChatKitConfiguration
python
huggingface__transformers
tests/models/janus/test_modeling_janus.py
{ "start": 13840, "end": 15462 }
class ____(ModelTesterMixin, unittest.TestCase): all_model_classes = (JanusVQVAE,) if is_torch_available() else () has_attentions = False test_resize_embeddings = False def setUp(self): self.model_tester = JanusVQModelTester(self) self.config_tester = ConfigTester( self, config_class=JanusVQVAEConfig, has_text_modality=False, common_properties=["embed_dim", "num_embeddings"], ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip("Janus VQ module cannot offload due to using `self.weight` directly") def test_cpu_offload(self): pass @unittest.skip("Janus VQ module cannot offload due to using `self.weight` directly") def test_disk_offload_bin(self): pass @unittest.skip("Janus VQ module cannot offload due to using `self.weight` directly") def test_disk_offload_safetensors(self): pass @unittest.skip("Janus VQ module has no hidden states") def test_hidden_states_output(self): pass @unittest.skip("Janus VQ module has no hidden states") def test_model_outputs_equivalence(self): pass @unittest.skip("Janus VQ module has no get/set embeddings method") def test_model_get_set_embeddings(self): pass @unittest.skip("Janus VQ module has no hidden states") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip("Janus VQ module has no gradient checkpointing layers") def test_gradient_checkpointing_enable_disable(self): pass
JanusVQModelTest
python
ray-project__ray
rllib/offline/tests/test_offline_evaluation_runner_group.py
{ "start": 239, "end": 6786 }
class ____(unittest.TestCase): def setUp(self) -> None: data_path = "offline/tests/data/cartpole/cartpole-v1_large" self.base_path = Path(__file__).parents[2] self.data_path = "local://" + self.base_path.joinpath(data_path).as_posix() # Assign the observation and action spaces. env = gym.make("CartPole-v1") self.observation_space = env.observation_space self.action_space = env.action_space # Create a simple config. self.config = ( BCConfig() .environment( observation_space=self.observation_space, action_space=self.action_space, ) .api_stack( enable_env_runner_and_connector_v2=True, enable_rl_module_and_learner=True, ) .offline_data( input_=[self.data_path], dataset_num_iters_per_learner=1, ) .learners( num_learners=0, ) .training( train_batch_size_per_learner=256, ) .evaluation( num_offline_eval_runners=2, offline_evaluation_type="eval_loss", offline_eval_batch_size_per_runner=256, ) ) # Start ray. ray.init(ignore_reinit_error=True) def tearDown(self) -> None: ray.shutdown() def test_offline_evaluation_runner_group_setup(self): # Build the algorithm. algo = self.config.build() # The module state is needed for the `OfflinePreLearner`. module_state = algo.learner_group._learner.module.get_state() # Setup the runner group. offline_runner_group = OfflineEvaluationRunnerGroup( config=self.config, local_runner=False, module_state=module_state, ) # Ensure we have indeed 2 `OfflineEvalautionRunner`s. self.assertEqual( offline_runner_group.num_runners, self.config.num_offline_eval_runners ) # Make sure we have no local runner. self.assertEqual( offline_runner_group.num_runners, offline_runner_group.num_remote_runners ) self.assertIsNone(offline_runner_group.local_runner) # Make sure that an `OfflineData` instance is created. from ray.rllib.offline.offline_data import OfflineData self.assertIsInstance(offline_runner_group._offline_data, OfflineData) # Ensure that there are as many iterators as there are workers. self.assertEqual( len(offline_runner_group._offline_data_iterators), offline_runner_group.num_runners, ) # Ensure that all iterators are indeed `DataIterator` instances. from ray.data.iterator import DataIterator for iter in offline_runner_group._offline_data_iterators: self.assertIsInstance(iter, DataIterator) # Clean up. algo.cleanup() def test_offline_evaluation_runner_group_run(self): algo = self.config.build() # The module state is needed for the `OfflinePreLearner`. module_state = algo.learner_group._learner.module.get_state() # Setup the runner group. offline_runner_group = OfflineEvaluationRunnerGroup( config=self.config, local_runner=False, module_state=module_state, ) # Run the runner group and receive metrics. metrics = offline_runner_group.foreach_runner( "run", local_runner=False, ) from ray.rllib.utils.metrics.stats import Stats # Ensure that `metrics`` is a list of 2 metric dictionaries. self.assertIsInstance(metrics, list) self.assertEqual(len(metrics), offline_runner_group.num_runners) # Ensure that the `eval_total_loss_key` is part of the runner metrics. from ray.rllib.core import ALL_MODULES, DEFAULT_MODULE_ID from ray.rllib.offline.offline_evaluation_runner import TOTAL_EVAL_LOSS_KEY from ray.rllib.utils.metrics import ( NUM_ENV_STEPS_SAMPLED, NUM_ENV_STEPS_SAMPLED_LIFETIME, NUM_MODULE_STEPS_SAMPLED, NUM_MODULE_STEPS_SAMPLED_LIFETIME, ) for metric_dict in metrics: # Ensure the most generic metrics are contained in the `ResultDict`. self.assertIn(TOTAL_EVAL_LOSS_KEY, metric_dict[DEFAULT_MODULE_ID]) self.assertIn(NUM_ENV_STEPS_SAMPLED, metric_dict[ALL_MODULES]) self.assertIn(NUM_ENV_STEPS_SAMPLED_LIFETIME, metric_dict[ALL_MODULES]) self.assertIn(NUM_MODULE_STEPS_SAMPLED, metric_dict[ALL_MODULES]) self.assertIn(NUM_MODULE_STEPS_SAMPLED_LIFETIME, metric_dict[ALL_MODULES]) # Ensure all entries are `Stats` instances. for metric in metric_dict[DEFAULT_MODULE_ID].values(): self.assertIsInstance(metric, Stats) # Clean up. algo.cleanup() def test_offline_evaluation_runner_group_with_local_runner(self): algo = self.config.build() # The module state is needed for the `OfflinePreLearner`. module_state = algo.learner_group._learner.module.get_state() self.config.evaluation(num_offline_eval_runners=0) # Setup the runner group. offline_runner_group = OfflineEvaluationRunnerGroup( config=self.config, local_runner=True, module_state=module_state, ) # Ensure that we have a local runner. self.assertTrue( offline_runner_group.num_runners == offline_runner_group.num_remote_runners ) self.assertIsNotNone(offline_runner_group.local_runner) # Make sure that the local runner has also a data split stream iterator. self.assertIsNotNone(offline_runner_group.local_runner._dataset_iterator) from ray.data.iterator import DataIterator self.assertIsInstance( offline_runner_group.local_runner._dataset_iterator, DataIterator ) # Ensure that we can run the group together with a local runner. metrics = offline_runner_group.foreach_runner( "run", local_runner=True, ) self.assertEqual(len(metrics), 1) # Clean up. algo.cleanup() if __name__ == "__main__": import sys import pytest sys.exit(pytest.main(["-v", __file__]))
TestOfflineData
python
kamyu104__LeetCode-Solutions
Python/falling-squares.py
{ "start": 2813, "end": 6041 }
class ____(object): def __init__(self, nums, query_fn=min, update_fn=lambda x, y: y, default_val=float("inf")): """ initialize your data structure here. :type nums: List[int] """ N = len(nums) self.__original_length = N self.__tree_length = 2**(N.bit_length() + (N&(N-1) != 0))-1 self.__query_fn = query_fn self.__update_fn = update_fn self.__default_val = default_val self.__tree = [default_val for _ in range(self.__tree_length)] self.__lazy = [None for _ in range(self.__tree_length)] self.__constructTree(nums, 0, self.__original_length-1, 0) def update(self, i, j, val): self.__updateTree(val, i, j, 0, self.__original_length-1, 0) def query(self, i, j): return self.__queryRange(i, j, 0, self.__original_length-1, 0) def __constructTree(self, nums, left, right, idx): if left > right: return if left == right: self.__tree[idx] = self.__update_fn(self.__tree[idx], nums[left]) return mid = left + (right-left)//2 self.__constructTree(nums, left, mid, idx*2 + 1) self.__constructTree(nums, mid+1, right, idx*2 + 2) self.__tree[idx] = self.__query_fn(self.__tree[idx*2 + 1], self.__tree[idx*2 + 2]) def __apply(self, left, right, idx, val): self.__tree[idx] = self.__update_fn(self.__tree[idx], val) if left != right: self.__lazy[idx*2 + 1] = self.__update_fn(self.__lazy[idx*2 + 1], val) self.__lazy[idx*2 + 2] = self.__update_fn(self.__lazy[idx*2 + 2], val) def __updateTree(self, val, range_left, range_right, left, right, idx): if left > right: return if self.__lazy[idx] is not None: self.__apply(left, right, idx, self.__lazy[idx]) self.__lazy[idx] = None if range_left > right or range_right < left: return if range_left <= left and right <= range_right: self.__apply(left, right, idx, val) return mid = left + (right-left)//2 self.__updateTree(val, range_left, range_right, left, mid, idx*2 + 1) self.__updateTree(val, range_left, range_right, mid+1, right, idx*2 + 2) self.__tree[idx] = self.__query_fn(self.__tree[idx*2 + 1], self.__tree[idx*2 + 2]) def __queryRange(self, range_left, range_right, left, right, idx): if left > right: return self.__default_val if self.__lazy[idx] is not None: self.__apply(left, right, idx, self.__lazy[idx]) self.__lazy[idx] = None if right < range_left or left > range_right: return self.__default_val if range_left <= left and right <= range_right: return self.__tree[idx] mid = left + (right-left)//2 return self.__query_fn(self.__queryRange(range_left, range_right, left, mid, idx*2 + 1), self.__queryRange(range_left, range_right, mid + 1, right, idx*2 + 2)) # Time: O(nlogn) # Space: O(n) # Segment Tree solution.
SegmentTree2
python
has2k1__plotnine
plotnine/guides/guide_legend.py
{ "start": 10894, "end": 14212 }
class ____(GuideElements): """ Access & calculate theming for the legend """ @cached_property def text(self): size = self.theme.getp(("legend_text_legend", "size")) ha = self.theme.getp(("legend_text_legend", "ha"), "center") va = self.theme.getp(("legend_text_legend", "va"), "center") is_blank = self.theme.T.is_blank("legend_text_legend") # The original ha & va values are used by the HPacker/VPacker # to align the TextArea with the DrawingArea. # We set ha & va to values that combine best with the aligning # for the text area. align = va if self.text_position in {"left", "right"} else ha return NS( margin=self._text_margin, align=align, fontsize=size, ha="center", va="baseline", is_blank=is_blank, ) @cached_property def text_position(self) -> Side: if not (pos := self.theme.getp("legend_text_position")): pos = "right" return pos @cached_property def key_spacing_x(self) -> float: return self.theme.getp("legend_key_spacing_x", 0) @cached_property def key_spacing_y(self) -> float: return self.theme.getp("legend_key_spacing_y", 0) @cached_property def _key_dimensions(self) -> list[tuple[float, float]]: """ key width and key height for each legend entry Take a peak into data['size'] to make sure the legend key dimensions are big enough. """ # Note the different height sizes for the entries guide = cast("guide_legend", self.guide) min_size = ( self.theme.getp("legend_key_width"), self.theme.getp("legend_key_height"), ) # Find the size that fits each key in the legend, sizes: list[list[tuple[float, float]]] = [] for params in guide._layer_parameters: sizes.append([]) get_key_size = params.geom.legend_key_size for i in range(len(params.data)): key_data = params.data.iloc[i] sizes[-1].append( get_key_size(key_data, min_size, params.layer) ) # The maximum size across each layer arr = np.max(sizes, axis=0) return [(row[0], row[1]) for row in arr] @cached_property def key_widths(self) -> list[float]: """ Widths of the keys If legend is vertical, key widths must be equal, so we use the maximum. So a plot like (ggplot(diamonds, aes(x="cut", y="clarity")) + stat_sum(aes(group="cut")) + scale_size(range=(3, 25)) ) would have keys with variable heights, but fixed width. """ ws = [w for w, _ in self._key_dimensions] if self.is_vertical: return [max(ws)] * len(ws) return ws @cached_property def key_heights(self) -> list[float]: """ Heights of the keys If legend is horizontal, then key heights must be equal, so we use the maximum """ hs = [h for _, h in self._key_dimensions] if self.is_horizontal: return [max(hs)] * len(hs) return hs
GuideElementsLegend
python
keras-team__keras
keras/src/ops/numpy.py
{ "start": 241012, "end": 241993 }
class ____(Operation): def call(self, x): return backend.numpy.slogdet(x) def compute_output_spec(self, x): sign = KerasTensor((), dtype=x.dtype) logabsdet = KerasTensor(x.shape[:-2], dtype=x.dtype) return (sign, logabsdet) @keras_export(["keras.ops.slogdet", "keras.ops.numpy.slogdet"]) def slogdet(x): """Compute the sign and natural logarithm of the determinant of a matrix. Args: x: Input matrix. It must 2D and square. Returns: A tuple `(sign, logabsdet)`. `sign` is a number representing the sign of the determinant. For a real matrix, this is 1, 0, or -1. For a complex matrix, this is a complex number with absolute value 1 (i.e., it is on the unit circle), or else 0. `logabsdet` is the natural log of the absolute value of the determinant. """ if any_symbolic_tensors((x,)): return Slogdet().symbolic_call(x) return backend.numpy.slogdet(x)
Slogdet
python
hynek__structlog
tests/test_frames.py
{ "start": 4821, "end": 5619 }
class ____: def test_returns_str(self): """ Always returns a native string. """ assert isinstance(_format_stack(sys._getframe()), str) def test_formats(self): """ The passed stack is formatted. """ assert _format_stack(sys._getframe()).startswith( "Stack (most recent call last):\n" ) def test_no_trailing_nl(self, monkeypatch): """ Trailing newlines are snipped off but if the string does not contain one nothing is removed. """ from structlog._frames import traceback monkeypatch.setattr( traceback, "print_stack", lambda frame, file: file.write("foo") ) assert _format_stack(sys._getframe()).endswith("foo")
TestFormatStack
python
getsentry__sentry
tests/sentry/seer/similarity/test_config.py
{ "start": 2726, "end": 4272 }
class ____(TestCase): def test_returns_false_when_no_rollout(self): """Returns False when no new version is being rolled out""" with patch("sentry.seer.similarity.config.SEER_GROUPING_NEW_VERSION", None): result = should_send_new_model_embeddings(self.project, None) assert result is False def test_returns_false_when_feature_not_enabled(self): """Returns False when feature flag is not enabled for project""" result = should_send_new_model_embeddings(self.project, None) assert result is False def test_returns_true_when_no_metadata(self): """Returns True when grouphash has no metadata (never sent to Seer)""" with self.feature(SEER_GROUPING_NEW_MODEL_ROLLOUT_FEATURE): result = should_send_new_model_embeddings(self.project, None) assert result is True def test_returns_true_when_metadata_not_new_version(self): """Returns True when grouphash was sent to Seer but not with new version""" with self.feature(SEER_GROUPING_NEW_MODEL_ROLLOUT_FEATURE): result = should_send_new_model_embeddings(self.project, "v1") assert result is True def test_returns_false_when_already_sent_to_new_version(self): """Returns False when grouphash was already sent to new version""" with self.feature(SEER_GROUPING_NEW_MODEL_ROLLOUT_FEATURE): result = should_send_new_model_embeddings(self.project, "v2") assert result is False
ShouldSendNewModelEmbeddingsTest
python
astropy__astropy
astropy/io/votable/tests/test_vo.py
{ "start": 25927, "end": 27344 }
class ____(TestParse): def setup_class(self): with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 votable = parse(get_pkg_data_filename("data/regression.xml")) votable.get_first_table().format = "binary" self.xmlout = bio = io.BytesIO() # W39: Bit values can not be masked with pytest.warns(W39): votable.to_xml(bio) bio.seek(0) self.votable = parse(bio) self.table = self.votable.get_first_table() self.array = self.table.array self.mask = self.table.array.mask # Masked values in bit fields don't roundtrip through the binary # representation -- that's not a bug, just a limitation, so # override the mask array checks here. def test_bit_mask(self): assert not np.any(self.mask["bit"]) def test_bitarray_mask(self): assert not np.any(self.mask["bitarray"]) def test_bit_array2_mask(self): assert not np.any(self.mask["bitarray2"]) def test_null_integer_binary(self): # BINARY1 requires magic value to be specified self.array.mask["intNoNull"][0] = True bio = io.BytesIO() # W31: NaN's can not be represented in integer field with pytest.warns(W31): # https://github.com/astropy/astropy/issues/16090 self.votable.to_xml(bio)
TestThroughBinary
python
pytorch__pytorch
test/test_serialization.py
{ "start": 2587, "end": 3614 }
class ____: def __init__(self, data, has_fileno=True, has_readinto=False): if has_readinto: self.readinto = self.readinto_opt if has_fileno: # Python 2's StringIO.StringIO has no fileno attribute. # This is used to test that. self.fileno = self.fileno_opt self.calls = set() self.bytesio = io.BytesIO(data) def trace(fn, name): def result(*args, **kwargs): self.calls.add(name) return fn(*args, **kwargs) return result for attr in ['read', 'readline', 'seek', 'tell', 'write', 'flush']: traced_fn = trace(getattr(self.bytesio, attr), attr) setattr(self, attr, traced_fn) def fileno_opt(self): raise io.UnsupportedOperation('Not a real file') def readinto_opt(self, view): self.calls.add('readinto') return self.bytesio.readinto(view) def was_called(self, name): return name in self.calls
FilelikeMock
python
bokeh__bokeh
src/bokeh/models/widgets/groups.py
{ "start": 3364, "end": 3718 }
class ____(ToggleInputGroup): ''' A group of radio boxes. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) active = Nullable(Int, help=""" The index of the selected radio box, or ``None`` if nothing is selected. """)
RadioGroup
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/triggers/test_dms.py
{ "start": 1622, "end": 2829 }
class ____(TestBaseDmsTrigger): EXPECTED_WAITER_NAME = "replication_complete" REPLICATION_CONFIG_ARN = "arn:aws:dms:region:account:config" def test_serialization(self): trigger = DmsReplicationCompleteTrigger(replication_config_arn=self.REPLICATION_CONFIG_ARN) classpath, kwargs = trigger.serialize() assert classpath == BASE_TRIGGER_CLASSPATH + "DmsReplicationCompleteTrigger" assert kwargs.get("replication_config_arn") == self.REPLICATION_CONFIG_ARN @pytest.mark.asyncio @mock.patch.object(DmsHook, "get_waiter") @mock.patch.object(DmsHook, "get_async_conn") async def test_complete(self, mock_async_conn, mock_get_waiter): mock_async_conn.__aenter__.return_value = mock.MagicMock() mock_get_waiter().wait = AsyncMock() trigger = DmsReplicationCompleteTrigger(replication_config_arn=self.REPLICATION_CONFIG_ARN) generator = trigger.run() response = await generator.asend(None) assert response == TriggerEvent( {"status": "success", "replication_config_arn": self.REPLICATION_CONFIG_ARN} ) mock_get_waiter().wait.assert_called_once()
TestDmsReplicationCompleteTrigger