language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
pypa__setuptools
setuptools/_vendor/importlib_metadata/_adapters.py
{ "start": 80, "end": 2317 }
class ____(email.message.Message): multiple_use_keys = set( map( FoldedCase, [ 'Classifier', 'Obsoletes-Dist', 'Platform', 'Project-URL', 'Provides-Dist', 'Provides-Extra', 'Requires-Dist', 'Requires-External', 'Supported-Platform', 'Dynamic', ], ) ) """ Keys that may be indicated multiple times per PEP 566. """ def __new__(cls, orig: email.message.Message): res = super().__new__(cls) vars(res).update(vars(orig)) return res def __init__(self, *args, **kwargs): self._headers = self._repair_headers() # suppress spurious error from mypy def __iter__(self): return super().__iter__() def __getitem__(self, item): """ Override parent behavior to typical dict behavior. ``email.message.Message`` will emit None values for missing keys. Typical mappings, including this ``Message``, will raise a key error for missing keys. Ref python/importlib_metadata#371. """ res = super().__getitem__(item) if res is None: raise KeyError(item) return res def _repair_headers(self): def redent(value): "Correct for RFC822 indentation" if not value or '\n' not in value: return value return textwrap.dedent(' ' * 8 + value) headers = [(key, redent(value)) for key, value in vars(self)['_headers']] if self._payload: headers.append(('Description', self.get_payload())) return headers @property def json(self): """ Convert PackageMetadata to a JSON-compatible format per PEP 0566. """ def transform(key): value = self.get_all(key) if key in self.multiple_use_keys else self[key] if key == 'Keywords': value = re.split(r'\s+', value) tk = key.lower().replace('-', '_') return tk, value return dict(map(transform, map(FoldedCase, self)))
Message
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-deepset/destination_deepset/api.py
{ "start": 598, "end": 777 }
class ____(APIError): """Raised when the server is unable to successfully upload the file.""" def __str__(self) -> str: return "File upload failed."
FileUploadError
python
wandb__wandb
wandb/vendor/pygments/lexers/jvm.py
{ "start": 51009, "end": 54630 }
class ____(RegexLexer): """ For `Golo <http://golo-lang.org/>`_ source code. .. versionadded:: 2.0 """ name = 'Golo' filenames = ['*.golo'] aliases = ['golo'] tokens = { 'root': [ (r'[^\S\n]+', Text), (r'#.*$', Comment), (r'(\^|\.\.\.|:|\?:|->|==|!=|=|\+|\*|%|/|<=|<|>=|>|=|\.)', Operator), (r'(?<=[^-])(-)(?=[^-])', Operator), (r'(?<=[^`])(is|isnt|and|or|not|oftype|in|orIfNull)\b', Operator.Word), (r'[]{}|(),[]', Punctuation), (r'(module|import)(\s+)', bygroups(Keyword.Namespace, Text), 'modname'), (r'\b([a-zA-Z_][\w$.]*)(::)', bygroups(Name.Namespace, Punctuation)), (r'\b([a-zA-Z_][\w$]*(?:\.[a-zA-Z_][\w$]*)+)\b', Name.Namespace), (r'(let|var)(\s+)', bygroups(Keyword.Declaration, Text), 'varname'), (r'(struct)(\s+)', bygroups(Keyword.Declaration, Text), 'structname'), (r'(function)(\s+)', bygroups(Keyword.Declaration, Text), 'funcname'), (r'(null|true|false)\b', Keyword.Constant), (r'(augment|pimp' r'|if|else|case|match|return' r'|case|when|then|otherwise' r'|while|for|foreach' r'|try|catch|finally|throw' r'|local' r'|continue|break)\b', Keyword), (r'(map|array|list|set|vector|tuple)(\[)', bygroups(Name.Builtin, Punctuation)), (r'(print|println|readln|raise|fun' r'|asInterfaceInstance)\b', Name.Builtin), (r'(`?[a-zA-Z_][\w$]*)(\()', bygroups(Name.Function, Punctuation)), (r'-?[\d_]*\.[\d_]*([eE][+-]?\d[\d_]*)?F?', Number.Float), (r'0[0-7]+j?', Number.Oct), (r'0[xX][a-fA-F0-9]+', Number.Hex), (r'-?\d[\d_]*L', Number.Integer.Long), (r'-?\d[\d_]*', Number.Integer), ('`?[a-zA-Z_][\w$]*', Name), (r'@[a-zA-Z_][\w$.]*', Name.Decorator), (r'"""', String, combined('stringescape', 'triplestring')), (r'"', String, combined('stringescape', 'doublestring')), (r"'", String, combined('stringescape', 'singlestring')), (r'----((.|\n)*?)----', String.Doc) ], 'funcname': [ (r'`?[a-zA-Z_][\w$]*', Name.Function, '#pop'), ], 'modname': [ (r'[a-zA-Z_][\w$.]*\*?', Name.Namespace, '#pop') ], 'structname': [ (r'`?[\w.]+\*?', Name.Class, '#pop') ], 'varname': [ (r'`?[a-zA-Z_][\w$]*', Name.Variable, '#pop'), ], 'string': [ (r'[^\\\'"\n]+', String), (r'[\'"\\]', String) ], 'stringescape': [ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|' r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) ], 'triplestring': [ (r'"""', String, '#pop'), include('string'), (r'\n', String), ], 'doublestring': [ (r'"', String.Double, '#pop'), include('string'), ], 'singlestring': [ (r"'", String, '#pop'), include('string'), ], 'operators': [ (r'[#=,./%+\-?]', Operator), (r'(eq|gt|lt|gte|lte|neq|matches)\b', Operator), (r'(==|<=|<|>=|>|!=)', Operator), ], }
GoloLexer
python
apache__airflow
airflow-ctl/src/airflowctl/api/datamodels/generated.py
{ "start": 2087, "end": 2881 }
class ____(BaseModel): """ Serializer for individual bulk action responses. Represents the outcome of a single bulk operation (create, update, or delete). The response includes a list of successful keys and any errors encountered during the operation. This structure helps users understand which key actions succeeded and which failed. """ success: Annotated[ list[str] | None, Field(description="A list of unique id/key representing successful operations.", title="Success"), ] = [] errors: Annotated[ list[dict[str, Any]] | None, Field( description="A list of errors encountered during the operation, each containing details about the issue.", title="Errors", ), ] = []
BulkActionResponse
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/linalg/determinant_op_test.py
{ "start": 8095, "end": 9849 }
class ____(test.Benchmark): shapes = [ (4, 4), (10, 10), (16, 16), (101, 101), (256, 256), (1000, 1000), (1024, 1024), (2048, 2048), (513, 4, 4), (513, 16, 16), (513, 256, 256), ] def _GenerateMatrix(self, shape): batch_shape = shape[:-2] shape = shape[-2:] assert shape[0] == shape[1] n = shape[0] matrix = np.ones(shape).astype(np.float32) / ( 2.0 * n) + np.diag(np.ones(n).astype(np.float32)) return variables.Variable(np.tile(matrix, batch_shape + (1, 1))) def benchmarkMatrixDeterminantOp(self): for shape in self.shapes: with ops.Graph().as_default(), session.Session( config=benchmark.benchmark_config()) as sess, ops.device("/cpu:0"): matrix = self._GenerateMatrix(shape) d = linalg_ops.matrix_determinant(matrix) self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group( d,), min_iters=25, name="matrix_determinant_cpu_{shape}".format(shape=shape)) if test.is_gpu_available(True): with ops.Graph().as_default(), session.Session( config=benchmark.benchmark_config()) as sess, ops.device("/gpu:0"): matrix = self._GenerateMatrix(shape) d = linalg_ops.matrix_determinant(matrix) self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group( d,), min_iters=25, name="matrix_determinant_gpu_{shape}".format(shape=shape)) if __name__ == "__main__": test.main()
MatrixDeterminantBenchmark
python
walkccc__LeetCode
solutions/1343. Number of Sub-arrays of Size K and Average Greater than or Equal to Threshold/1343.py
{ "start": 0, "end": 313 }
class ____: def numOfSubarrays(self, arr: list[int], k: int, threshold: int) -> int: ans = 0 windowSum = 0 for i in range(len(arr)): windowSum += arr[i] if i >= k: windowSum -= arr[i - k] if i >= k - 1 and windowSum // k >= threshold: ans += 1 return ans
Solution
python
huggingface__transformers
src/transformers/models/swiftformer/modeling_swiftformer.py
{ "start": 13779, "end": 15073 }
class ____(PreTrainedModel): config: SwiftFormerConfig base_model_prefix = "swiftformer" main_input_name = "pixel_values" input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["SwiftFormerEncoderBlock"] @torch.no_grad() def _init_weights(self, module: nn.Module) -> None: """Initialize the weights""" if isinstance(module, (nn.Conv2d, nn.Linear)): init.trunc_normal_(module.weight, std=0.02) if module.bias is not None: init.constant_(module.bias, 0) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d)): init.constant_(module.bias, 0) init.constant_(module.weight, 1.0) elif isinstance(module, (SwiftFormerConvEncoder, SwiftFormerLocalRepresentation)): init.ones_(module.layer_scale) elif isinstance(module, SwiftFormerEncoderBlock): if self.config.use_layer_scale: init.constant_(module.layer_scale_1, self.config.layer_scale_init_value) init.constant_(module.layer_scale_2, self.config.layer_scale_init_value) elif isinstance(module, SwiftFormerEfficientAdditiveAttention): init.normal_(module.w_g) @auto_docstring
SwiftFormerPreTrainedModel
python
chroma-core__chroma
chromadb/execution/expression/operator.py
{ "start": 9608, "end": 9786 }
class ____(Where): """Equality comparison""" key: str value: Any def to_dict(self) -> Dict[str, Any]: return {self.key: {"$eq": self.value}} @dataclass
Eq
python
pydantic__pydantic
tests/mypy/outputs/mypy-plugin-strict_ini/plugin_fail.py
{ "start": 5207, "end": 5422 }
class ____(BaseModel): x: str = Field(..., alias='y') z: int AliasModel(y=1, z=2) # MYPY: error: Argument "y" to "AliasModel" has incompatible type "int"; expected "str" [arg-type] x_alias = 'y'
AliasModel
python
ray-project__ray
rllib/policy/eager_tf_policy_v2.py
{ "start": 1809, "end": 36066 }
class ____(Policy): """A TF-eager / TF2 based tensorflow policy. This class is intended to be used and extended by sub-classing. """ def __init__( self, observation_space: gym.spaces.Space, action_space: gym.spaces.Space, config: AlgorithmConfigDict, **kwargs, ): self.framework = config.get("framework", "tf2") # Log device. logger.info( "Creating TF-eager policy running on {}.".format( "GPU" if get_gpu_devices() else "CPU" ) ) Policy.__init__(self, observation_space, action_space, config) self._is_training = False # Global timestep should be a tensor. self.global_timestep = tf.Variable(0, trainable=False, dtype=tf.int64) self.explore = tf.Variable( self.config["explore"], trainable=False, dtype=tf.bool ) # Log device and worker index. num_gpus = self._get_num_gpus_for_policy() if num_gpus > 0: gpu_ids = get_gpu_devices() logger.info(f"Found {len(gpu_ids)} visible cuda devices.") self._is_training = False self._loss_initialized = False # Backward compatibility workaround so Policy will call self.loss() directly. # TODO(jungong): clean up after all policies are migrated to new sub-class # implementation. self._loss = None self.batch_divisibility_req = self.get_batch_divisibility_req() self._max_seq_len = self.config["model"]["max_seq_len"] self.validate_spaces(observation_space, action_space, self.config) # If using default make_model(), dist_class will get updated when # the model is created next. self.dist_class = self._init_dist_class() self.model = self.make_model() self._init_view_requirements() self.exploration = self._create_exploration() self._state_inputs = self.model.get_initial_state() self._is_recurrent = len(self._state_inputs) > 0 # Got to reset global_timestep again after fake run-throughs. self.global_timestep.assign(0) # Lock used for locking some methods on the object-level. # This prevents possible race conditions when calling the model # first, then its value function (e.g. in a loss function), in # between of which another model call is made (e.g. to compute an # action). self._lock = threading.RLock() # Only for `config.eager_tracing=True`: A counter to keep track of # how many times an eager-traced method (e.g. # `self._compute_actions_helper`) has been re-traced by tensorflow. # We will raise an error if more than n re-tracings have been # detected, since this would considerably slow down execution. # The variable below should only get incremented during the # tf.function trace operations, never when calling the already # traced function after that. self._re_trace_counter = 0 @staticmethod def enable_eager_execution_if_necessary(): # If this class runs as a @ray.remote actor, eager mode may not # have been activated yet. if tf1 and not tf1.executing_eagerly(): tf1.enable_eager_execution() @OverrideToImplementCustomLogic def validate_spaces( self, obs_space: gym.spaces.Space, action_space: gym.spaces.Space, config: AlgorithmConfigDict, ): return {} @OverrideToImplementCustomLogic @override(Policy) def loss( self, model: Union[ModelV2, "tf.keras.Model"], dist_class: Type[TFActionDistribution], train_batch: SampleBatch, ) -> Union[TensorType, List[TensorType]]: """Compute loss for this policy using model, dist_class and a train_batch. Args: model: The Model to calculate the loss for. dist_class: The action distr. class. train_batch: The training data. Returns: A single loss tensor or a list of loss tensors. """ raise NotImplementedError @OverrideToImplementCustomLogic def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]: """Stats function. Returns a dict of statistics. Args: train_batch: The SampleBatch (already) used for training. Returns: The stats dict. """ return {} @OverrideToImplementCustomLogic def grad_stats_fn( self, train_batch: SampleBatch, grads: ModelGradients ) -> Dict[str, TensorType]: """Gradient stats function. Returns a dict of statistics. Args: train_batch: The SampleBatch (already) used for training. Returns: The stats dict. """ return {} @OverrideToImplementCustomLogic def make_model(self) -> ModelV2: """Build underlying model for this Policy. Returns: The Model for the Policy to use. """ # Default ModelV2 model. _, logit_dim = ModelCatalog.get_action_dist( self.action_space, self.config["model"] ) return ModelCatalog.get_model_v2( self.observation_space, self.action_space, logit_dim, self.config["model"], framework=self.framework, ) @OverrideToImplementCustomLogic def compute_gradients_fn( self, policy: Policy, optimizer: LocalOptimizer, loss: TensorType ) -> ModelGradients: """Gradients computing function (from loss tensor, using local optimizer). Args: policy: The Policy object that generated the loss tensor and that holds the given local optimizer. optimizer: The tf (local) optimizer object to calculate the gradients with. loss: The loss tensor for which gradients should be calculated. Returns: ModelGradients: List of the possibly clipped gradients- and variable tuples. """ return None @OverrideToImplementCustomLogic def apply_gradients_fn( self, optimizer: "tf.keras.optimizers.Optimizer", grads: ModelGradients, ) -> "tf.Operation": """Gradients computing function (from loss tensor, using local optimizer). Args: optimizer: The tf (local) optimizer object to calculate the gradients with. grads: The gradient tensor to be applied. Returns: "tf.Operation": TF operation that applies supplied gradients. """ return None @OverrideToImplementCustomLogic def action_sampler_fn( self, model: ModelV2, *, obs_batch: TensorType, state_batches: TensorType, **kwargs, ) -> Tuple[TensorType, TensorType, TensorType, List[TensorType]]: """Custom function for sampling new actions given policy. Args: model: Underlying model. obs_batch: Observation tensor batch. state_batches: Action sampling state batch. Returns: Sampled action Log-likelihood Action distribution inputs Updated state """ return None, None, None, None @OverrideToImplementCustomLogic def action_distribution_fn( self, model: ModelV2, *, obs_batch: TensorType, state_batches: TensorType, **kwargs, ) -> Tuple[TensorType, type, List[TensorType]]: """Action distribution function for this Policy. Args: model: Underlying model. obs_batch: Observation tensor batch. state_batches: Action sampling state batch. Returns: Distribution input. ActionDistribution class. State outs. """ return None, None, None @OverrideToImplementCustomLogic def get_batch_divisibility_req(self) -> int: """Get batch divisibility request. Returns: Size N. A sample batch must be of size K*N. """ # By default, any sized batch is ok, so simply return 1. return 1 @OverrideToImplementCustomLogic_CallToSuperRecommended def extra_action_out_fn(self) -> Dict[str, TensorType]: """Extra values to fetch and return from compute_actions(). Returns: Dict[str, TensorType]: An extra fetch-dict to be passed to and returned from the compute_actions() call. """ return {} @OverrideToImplementCustomLogic_CallToSuperRecommended def extra_learn_fetches_fn(self) -> Dict[str, TensorType]: """Extra stats to be reported after gradient computation. Returns: Dict[str, TensorType]: An extra fetch-dict. """ return {} @override(Policy) @OverrideToImplementCustomLogic_CallToSuperRecommended def postprocess_trajectory( self, sample_batch: SampleBatch, other_agent_batches: Optional[SampleBatch] = None, episode=None, ): """Post process trajectory in the format of a SampleBatch. Args: sample_batch: sample_batch: batch of experiences for the policy, which will contain at most one episode trajectory. other_agent_batches: In a multi-agent env, this contains a mapping of agent ids to (policy, agent_batch) tuples containing the policy and experiences of the other agents. episode: An optional multi-agent episode object to provide access to all of the internal episode state, which may be useful for model-based or multi-agent algorithms. Returns: The postprocessed sample batch. """ assert tf.executing_eagerly() return Policy.postprocess_trajectory(self, sample_batch) @OverrideToImplementCustomLogic def optimizer( self, ) -> Union["tf.keras.optimizers.Optimizer", List["tf.keras.optimizers.Optimizer"]]: """TF optimizer to use for policy optimization. Returns: A local optimizer or a list of local optimizers to use for this Policy's Model. """ return tf.keras.optimizers.Adam(self.config["lr"]) def _init_dist_class(self): if is_overridden(self.action_sampler_fn) or is_overridden( self.action_distribution_fn ): if not is_overridden(self.make_model): raise ValueError( "`make_model` is required if `action_sampler_fn` OR " "`action_distribution_fn` is given" ) return None else: dist_class, _ = ModelCatalog.get_action_dist( self.action_space, self.config["model"] ) return dist_class def _init_view_requirements(self): # Auto-update model's inference view requirements, if recurrent. self._update_model_view_requirements_from_init_state() # Combine view_requirements for Model and Policy. self.view_requirements.update(self.model.view_requirements) # Disable env-info placeholder. if SampleBatch.INFOS in self.view_requirements: self.view_requirements[SampleBatch.INFOS].used_for_training = False def maybe_initialize_optimizer_and_loss(self): optimizers = force_list(self.optimizer()) if self.exploration: # Policies with RLModules don't have an exploration object. optimizers = self.exploration.get_exploration_optimizer(optimizers) # The list of local (tf) optimizers (one per loss term). self._optimizers: List[LocalOptimizer] = optimizers # Backward compatibility: A user's policy may only support a single # loss term and optimizer (no lists). self._optimizer: LocalOptimizer = optimizers[0] if optimizers else None self._initialize_loss_from_dummy_batch( auto_remove_unneeded_view_reqs=True, ) self._loss_initialized = True @override(Policy) def compute_actions_from_input_dict( self, input_dict: Dict[str, TensorType], explore: bool = None, timestep: Optional[int] = None, episodes=None, **kwargs, ) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]: self._is_training = False explore = explore if explore is not None else self.explore timestep = timestep if timestep is not None else self.global_timestep if isinstance(timestep, tf.Tensor): timestep = int(timestep.numpy()) # Pass lazy (eager) tensor dict to Model as `input_dict`. input_dict = self._lazy_tensor_dict(input_dict) input_dict.set_training(False) # Pack internal state inputs into (separate) list. state_batches = [ input_dict[k] for k in input_dict.keys() if "state_in" in k[:8] ] self._state_in = state_batches self._is_recurrent = len(tree.flatten(self._state_in)) > 0 # Call the exploration before_compute_actions hook. if self.exploration: # Policies with RLModules don't have an exploration object. self.exploration.before_compute_actions( timestep=timestep, explore=explore, tf_sess=self.get_session() ) ret = self._compute_actions_helper( input_dict, state_batches, # TODO: Passing episodes into a traced method does not work. None if self.config["eager_tracing"] else episodes, explore, timestep, ) # Update our global timestep by the batch size. self.global_timestep.assign_add(tree.flatten(ret[0])[0].shape.as_list()[0]) return convert_to_numpy(ret) # TODO(jungong) : deprecate this API and make compute_actions_from_input_dict the # only canonical entry point for inference. @override(Policy) def compute_actions( self, obs_batch, state_batches=None, prev_action_batch=None, prev_reward_batch=None, info_batch=None, episodes=None, explore=None, timestep=None, **kwargs, ): # Create input dict to simply pass the entire call to # self.compute_actions_from_input_dict(). input_dict = SampleBatch( { SampleBatch.CUR_OBS: obs_batch, }, _is_training=tf.constant(False), ) if state_batches is not None: for s in enumerate(state_batches): input_dict["state_in_{i}"] = s if prev_action_batch is not None: input_dict[SampleBatch.PREV_ACTIONS] = prev_action_batch if prev_reward_batch is not None: input_dict[SampleBatch.PREV_REWARDS] = prev_reward_batch if info_batch is not None: input_dict[SampleBatch.INFOS] = info_batch return self.compute_actions_from_input_dict( input_dict=input_dict, explore=explore, timestep=timestep, episodes=episodes, **kwargs, ) @with_lock @override(Policy) def compute_log_likelihoods( self, actions: Union[List[TensorType], TensorType], obs_batch: Union[List[TensorType], TensorType], state_batches: Optional[List[TensorType]] = None, prev_action_batch: Optional[Union[List[TensorType], TensorType]] = None, prev_reward_batch: Optional[Union[List[TensorType], TensorType]] = None, actions_normalized: bool = True, in_training: bool = True, ) -> TensorType: if is_overridden(self.action_sampler_fn) and not is_overridden( self.action_distribution_fn ): raise ValueError( "Cannot compute log-prob/likelihood w/o an " "`action_distribution_fn` and a provided " "`action_sampler_fn`!" ) seq_lens = tf.ones(len(obs_batch), dtype=tf.int32) input_batch = SampleBatch( { SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_batch), SampleBatch.ACTIONS: actions, }, _is_training=False, ) if prev_action_batch is not None: input_batch[SampleBatch.PREV_ACTIONS] = tf.convert_to_tensor( prev_action_batch ) if prev_reward_batch is not None: input_batch[SampleBatch.PREV_REWARDS] = tf.convert_to_tensor( prev_reward_batch ) # Exploration hook before each forward pass. if self.exploration: # Policies with RLModules don't have an exploration object. self.exploration.before_compute_actions(explore=False) # Action dist class and inputs are generated via custom function. if is_overridden(self.action_distribution_fn): dist_inputs, self.dist_class, _ = self.action_distribution_fn( self, self.model, input_batch, explore=False, is_training=False ) action_dist = self.dist_class(dist_inputs, self.model) # Default log-likelihood calculation. else: dist_inputs, _ = self.model(input_batch, state_batches, seq_lens) action_dist = self.dist_class(dist_inputs, self.model) # Normalize actions if necessary. if not actions_normalized and self.config["normalize_actions"]: actions = normalize_action(actions, self.action_space_struct) log_likelihoods = action_dist.logp(actions) return log_likelihoods @with_lock @override(Policy) def learn_on_batch(self, postprocessed_batch): # Callback handling. learn_stats = {} self.callbacks.on_learn_on_batch( policy=self, train_batch=postprocessed_batch, result=learn_stats ) pad_batch_to_sequences_of_same_size( postprocessed_batch, max_seq_len=self._max_seq_len, shuffle=False, batch_divisibility_req=self.batch_divisibility_req, view_requirements=self.view_requirements, ) self._is_training = True postprocessed_batch = self._lazy_tensor_dict(postprocessed_batch) postprocessed_batch.set_training(True) stats = self._learn_on_batch_helper(postprocessed_batch) self.num_grad_updates += 1 stats.update( { "custom_metrics": learn_stats, NUM_AGENT_STEPS_TRAINED: postprocessed_batch.count, NUM_GRAD_UPDATES_LIFETIME: self.num_grad_updates, # -1, b/c we have to measure this diff before we do the update above. DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY: ( self.num_grad_updates - 1 - (postprocessed_batch.num_grad_updates or 0) ), } ) return convert_to_numpy(stats) @override(Policy) def compute_gradients( self, postprocessed_batch: SampleBatch ) -> Tuple[ModelGradients, Dict[str, TensorType]]: pad_batch_to_sequences_of_same_size( postprocessed_batch, shuffle=False, max_seq_len=self._max_seq_len, batch_divisibility_req=self.batch_divisibility_req, view_requirements=self.view_requirements, ) self._is_training = True self._lazy_tensor_dict(postprocessed_batch) postprocessed_batch.set_training(True) grads_and_vars, grads, stats = self._compute_gradients_helper( postprocessed_batch ) return convert_to_numpy((grads, stats)) @override(Policy) def apply_gradients(self, gradients: ModelGradients) -> None: self._apply_gradients_helper( list( zip( [ (tf.convert_to_tensor(g) if g is not None else None) for g in gradients ], self.model.trainable_variables(), ) ) ) @override(Policy) def get_weights(self, as_dict=False): variables = self.variables() if as_dict: return {v.name: v.numpy() for v in variables} return [v.numpy() for v in variables] @override(Policy) def set_weights(self, weights): variables = self.variables() assert len(weights) == len(variables), (len(weights), len(variables)) for v, w in zip(variables, weights): v.assign(w) @override(Policy) def get_exploration_state(self): return convert_to_numpy(self.exploration.get_state()) @override(Policy) def is_recurrent(self): return self._is_recurrent @override(Policy) def num_state_tensors(self): return len(self._state_inputs) @override(Policy) def get_initial_state(self): if hasattr(self, "model"): return self.model.get_initial_state() return [] @override(Policy) @OverrideToImplementCustomLogic_CallToSuperRecommended def get_state(self) -> PolicyState: # Legacy Policy state (w/o keras model and w/o PolicySpec). state = super().get_state() state["global_timestep"] = state["global_timestep"].numpy() # In the new Learner API stack, the optimizers live in the learner. state["_optimizer_variables"] = [] if self._optimizer and len(self._optimizer.variables()) > 0: state["_optimizer_variables"] = self._optimizer.variables() # Add exploration state. if self.exploration: # This is not compatible with RLModules, which have a method # `forward_exploration` to specify custom exploration behavior. state["_exploration_state"] = self.exploration.get_state() return state @override(Policy) @OverrideToImplementCustomLogic_CallToSuperRecommended def set_state(self, state: PolicyState) -> None: # Set optimizer vars. optimizer_vars = state.get("_optimizer_variables", None) if optimizer_vars and self._optimizer.variables(): if not type(self).__name__.endswith("_traced") and log_once( "set_state_optimizer_vars_tf_eager_policy_v2" ): logger.warning( "Cannot restore an optimizer's state for tf eager! Keras " "is not able to save the v1.x optimizers (from " "tf.compat.v1.train) since they aren't compatible with " "checkpoints." ) for opt_var, value in zip(self._optimizer.variables(), optimizer_vars): opt_var.assign(value) # Set exploration's state. if hasattr(self, "exploration") and "_exploration_state" in state: self.exploration.set_state(state=state["_exploration_state"]) # Restore glbal timestep (tf vars). self.global_timestep.assign(state["global_timestep"]) # Then the Policy's (NN) weights and connectors. super().set_state(state) @override(Policy) def export_model(self, export_dir, onnx: Optional[int] = None) -> None: if onnx: try: import tf2onnx except ImportError as e: raise RuntimeError( "Converting a TensorFlow model to ONNX requires " "`tf2onnx` to be installed. Install with " "`pip install tf2onnx`." ) from e model_proto, external_tensor_storage = tf2onnx.convert.from_keras( self.model.base_model, output_path=os.path.join(export_dir, "model.onnx"), ) # Save the tf.keras.Model (architecture and weights, so it can be retrieved # w/o access to the original (custom) Model or Policy code). elif ( hasattr(self, "model") and hasattr(self.model, "base_model") and isinstance(self.model.base_model, tf.keras.Model) ): try: self.model.base_model.save(export_dir, save_format="tf") except Exception: logger.warning(ERR_MSG_TF_POLICY_CANNOT_SAVE_KERAS_MODEL) else: logger.warning(ERR_MSG_TF_POLICY_CANNOT_SAVE_KERAS_MODEL) def variables(self): """Return the list of all savable variables for this policy.""" if isinstance(self.model, tf.keras.Model): return self.model.variables else: return self.model.variables() def loss_initialized(self): return self._loss_initialized @with_lock def _compute_actions_helper( self, input_dict, state_batches, episodes, explore, timestep, _ray_trace_ctx=None, ): # Increase the tracing counter to make sure we don't re-trace too # often. If eager_tracing=True, this counter should only get # incremented during the @tf.function trace operations, never when # calling the already traced function after that. self._re_trace_counter += 1 # Calculate RNN sequence lengths. if SampleBatch.SEQ_LENS in input_dict: seq_lens = input_dict[SampleBatch.SEQ_LENS] else: batch_size = tree.flatten(input_dict[SampleBatch.OBS])[0].shape[0] seq_lens = tf.ones(batch_size, dtype=tf.int32) if state_batches else None # Add default and custom fetches. extra_fetches = {} with tf.variable_creator_scope(_disallow_var_creation): if is_overridden(self.action_sampler_fn): actions, logp, dist_inputs, state_out = self.action_sampler_fn( self.model, obs_batch=input_dict[SampleBatch.OBS], state_batches=state_batches, seq_lens=seq_lens, explore=explore, timestep=timestep, episodes=episodes, ) else: # Try `action_distribution_fn`. if is_overridden(self.action_distribution_fn): ( dist_inputs, self.dist_class, state_out, ) = self.action_distribution_fn( self.model, obs_batch=input_dict[SampleBatch.OBS], state_batches=state_batches, seq_lens=seq_lens, explore=explore, timestep=timestep, is_training=False, ) elif isinstance(self.model, tf.keras.Model): if state_batches and "state_in_0" not in input_dict: for i, s in enumerate(state_batches): input_dict[f"state_in_{i}"] = s self._lazy_tensor_dict(input_dict) dist_inputs, state_out, extra_fetches = self.model(input_dict) else: dist_inputs, state_out = self.model( input_dict, state_batches, seq_lens ) action_dist = self.dist_class(dist_inputs, self.model) # Get the exploration action from the forward results. actions, logp = self.exploration.get_exploration_action( action_distribution=action_dist, timestep=timestep, explore=explore, ) # Action-logp and action-prob. if logp is not None: extra_fetches[SampleBatch.ACTION_PROB] = tf.exp(logp) extra_fetches[SampleBatch.ACTION_LOGP] = logp # Action-dist inputs. if dist_inputs is not None: extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = dist_inputs # Custom extra fetches. extra_fetches.update(self.extra_action_out_fn()) return actions, state_out, extra_fetches # TODO: Figure out, why _ray_trace_ctx=None helps to prevent a crash in # AlphaStar w/ framework=tf2; eager_tracing=True on the policy learner actors. # It seems there may be a clash between the traced-by-tf function and the # traced-by-ray functions (for making the policy class a ray actor). def _learn_on_batch_helper(self, samples, _ray_trace_ctx=None): # Increase the tracing counter to make sure we don't re-trace too # often. If eager_tracing=True, this counter should only get # incremented during the @tf.function trace operations, never when # calling the already traced function after that. self._re_trace_counter += 1 with tf.variable_creator_scope(_disallow_var_creation): grads_and_vars, _, stats = self._compute_gradients_helper(samples) self._apply_gradients_helper(grads_and_vars) return stats def _get_is_training_placeholder(self): return tf.convert_to_tensor(self._is_training) @with_lock def _compute_gradients_helper(self, samples): """Computes and returns grads as eager tensors.""" # Increase the tracing counter to make sure we don't re-trace too # often. If eager_tracing=True, this counter should only get # incremented during the @tf.function trace operations, never when # calling the already traced function after that. self._re_trace_counter += 1 # Gather all variables for which to calculate losses. if isinstance(self.model, tf.keras.Model): variables = self.model.trainable_variables else: variables = self.model.trainable_variables() # Calculate the loss(es) inside a tf GradientTape. with tf.GradientTape( persistent=is_overridden(self.compute_gradients_fn) ) as tape: losses = self.loss(self.model, self.dist_class, samples) losses = force_list(losses) # User provided a custom compute_gradients_fn. if is_overridden(self.compute_gradients_fn): # Wrap our tape inside a wrapper, such that the resulting # object looks like a "classic" tf.optimizer. This way, custom # compute_gradients_fn will work on both tf static graph # and tf-eager. optimizer = _OptimizerWrapper(tape) # More than one loss terms/optimizers. if self.config["_tf_policy_handles_more_than_one_loss"]: grads_and_vars = self.compute_gradients_fn( [optimizer] * len(losses), losses ) # Only one loss and one optimizer. else: grads_and_vars = [self.compute_gradients_fn(optimizer, losses[0])] # Default: Compute gradients using the above tape. else: grads_and_vars = [ list(zip(tape.gradient(loss, variables), variables)) for loss in losses ] if log_once("grad_vars"): for g_and_v in grads_and_vars: for g, v in g_and_v: if g is not None: logger.info(f"Optimizing variable {v.name}") # `grads_and_vars` is returned a list (len=num optimizers/losses) # of lists of (grad, var) tuples. if self.config["_tf_policy_handles_more_than_one_loss"]: grads = [[g for g, _ in g_and_v] for g_and_v in grads_and_vars] # `grads_and_vars` is returned as a list of (grad, var) tuples. else: grads_and_vars = grads_and_vars[0] grads = [g for g, _ in grads_and_vars] stats = self._stats(samples, grads) return grads_and_vars, grads, stats def _apply_gradients_helper(self, grads_and_vars): # Increase the tracing counter to make sure we don't re-trace too # often. If eager_tracing=True, this counter should only get # incremented during the @tf.function trace operations, never when # calling the already traced function after that. self._re_trace_counter += 1 if is_overridden(self.apply_gradients_fn): if self.config["_tf_policy_handles_more_than_one_loss"]: self.apply_gradients_fn(self._optimizers, grads_and_vars) else: self.apply_gradients_fn(self._optimizer, grads_and_vars) else: if self.config["_tf_policy_handles_more_than_one_loss"]: for i, o in enumerate(self._optimizers): o.apply_gradients( [(g, v) for g, v in grads_and_vars[i] if g is not None] ) else: self._optimizer.apply_gradients( [(g, v) for g, v in grads_and_vars if g is not None] ) def _stats(self, samples, grads): fetches = {} if is_overridden(self.stats_fn): fetches[LEARNER_STATS_KEY] = dict(self.stats_fn(samples)) else: fetches[LEARNER_STATS_KEY] = {} fetches.update(dict(self.extra_learn_fetches_fn())) fetches.update(dict(self.grad_stats_fn(samples, grads))) return fetches def _lazy_tensor_dict(self, postprocessed_batch: SampleBatch): # TODO: (sven): Keep for a while to ensure backward compatibility. if not isinstance(postprocessed_batch, SampleBatch): postprocessed_batch = SampleBatch(postprocessed_batch) postprocessed_batch.set_get_interceptor(_convert_to_tf) return postprocessed_batch @classmethod def with_tracing(cls): return _traced_eager_policy(cls)
EagerTFPolicyV2
python
dask__dask
dask/dataframe/backends.py
{ "start": 17026, "end": 26381 }
class ____(SimpleSizeof, dict): def __sizeof__(self) -> int: """ The result of the shuffle split are typically small dictionaries (#keys << 100; typically <= 32) The splits are often non-uniformly distributed. Some of the splits may even be empty. Sampling the dictionary for size estimation can cause severe errors. See also https://github.com/dask/distributed/issues/4962 """ total_size = super().__sizeof__() for k, df in self.items(): total_size += sizeof(k) total_size += sizeof(df) return total_size @group_split_dispatch.register((pd.DataFrame, pd.Series, pd.Index)) def group_split_pandas(df, c, k, ignore_index=False): if is_series_like(c): c = c.values indexer, locations = pd._libs.algos.groupsort_indexer( c.astype(np.intp, copy=False), k ) df2 = df.take(indexer) locations = locations.cumsum() parts = [ df2.iloc[a:b].reset_index(drop=True) if ignore_index else df2.iloc[a:b] for a, b in zip(locations[:-1], locations[1:]) ] return ShuffleGroupResult(zip(range(k), parts)) def _union_categoricals_wrapper( dfs: list[pd.CategoricalIndex] | list[pd.Series], **kwargs ) -> pd.Categorical: """ A wrapper around pandas' union_categoricals that handles some dtype issues. union_categoricals requires that the dtype of each array's categories match. So you can't union ``Categorical(['a', 'b'])`` and ``Categorical([1, 2])`` since the dtype (str vs. int) doesn't match. *Somewhere* in Dask, we're possibly creating an empty ``Categorical`` with a dtype of ``object``. In pandas 2.x, we could union that with string categories since they both used object dtype. But pandas 3.x uses string dtype for categories. This wrapper handles that by creating a new ``Categorical`` with the correct dtype. """ categories_dtypes = {cat.dtype.categories.dtype.name for cat in dfs} if "object" in categories_dtypes and "str" in categories_dtypes: dfs = [ ( type(cat)(pd.Categorical(pd.Index([], dtype="str")), name=cat.name) if cat.dtype.categories.dtype.name == "object" and len(cat) == 0 else cat ) for cat in dfs ] return union_categoricals(dfs, **kwargs) @concat_dispatch.register((pd.DataFrame, pd.Series, pd.Index)) def concat_pandas( dfs, axis=0, join="outer", uniform=False, filter_warning=True, ignore_index=False, **kwargs, ): ignore_order = kwargs.pop("ignore_order", False) if axis == 1: return pd.concat(dfs, axis=axis, join=join, **kwargs) # Support concatenating indices along axis 0 if isinstance(dfs[0], pd.Index): if isinstance(dfs[0], pd.CategoricalIndex): for i in range(1, len(dfs)): if not isinstance(dfs[i], pd.CategoricalIndex): dfs[i] = dfs[i].astype("category") return pd.CategoricalIndex( _union_categoricals_wrapper(dfs, ignore_order=ignore_order), name=dfs[0].name, ) elif isinstance(dfs[0], pd.MultiIndex): first, rest = dfs[0], dfs[1:] if all( (isinstance(o, pd.MultiIndex) and o.nlevels >= first.nlevels) for o in rest ): arrays = [ concat([i._get_level_values(n) for i in dfs]) for n in range(first.nlevels) ] return pd.MultiIndex.from_arrays(arrays, names=first.names) to_concat = (first.values,) + tuple(k._values for k in rest) new_tuples = np.concatenate(to_concat) try: return pd.MultiIndex.from_tuples(new_tuples, names=first.names) except Exception: return pd.Index(new_tuples) return dfs[0].append(dfs[1:]) # Handle categorical index separately dfs0_index = dfs[0].index has_categoricalindex = isinstance(dfs0_index, pd.CategoricalIndex) or ( isinstance(dfs0_index, pd.MultiIndex) and any(isinstance(i, pd.CategoricalIndex) for i in dfs0_index.levels) ) if has_categoricalindex: dfs2 = [df.reset_index(drop=True) for df in dfs] ind = concat([df.index for df in dfs]) else: dfs2 = dfs ind = None # Concatenate the partitions together, handling categories as needed if ( isinstance(dfs2[0], pd.DataFrame) if uniform else any(isinstance(df, pd.DataFrame) for df in dfs2) ): if uniform or PANDAS_GE_220: dfs3 = dfs2 cat_mask = dfs2[0].dtypes == "category" else: # When concatenating mixed dataframes and series on axis 1, Pandas <2.2 # converts series to dataframes with a single column named 0, then # concatenates. dfs3 = [ ( df if isinstance(df, pd.DataFrame) else df.to_frame().rename(columns={df.name: 0}) ) for df in dfs2 ] # pandas may raise a RuntimeWarning for comparing ints and strs with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) if filter_warning: warnings.simplefilter("ignore", FutureWarning) cat_mask = pd.concat( [(df.dtypes == "category").to_frame().T for df in dfs3], join=join, **kwargs, ).any() if isinstance(cat_mask, pd.Series) and cat_mask.any(): not_cat = cat_mask[~cat_mask].index # this should be aligned, so no need to filter warning out = pd.concat( [df[df.columns.intersection(not_cat)] for df in dfs3], join=join, **kwargs, ) temp_ind = out.index for col in cat_mask.index.difference(not_cat): # Find an example of categoricals in this column for df in dfs3: sample = df.get(col) if sample is not None: break # Extract partitions, subbing in missing if needed parts = [] for df in dfs3: if col in df.columns: parts.append(df[col]) else: codes = np.full(len(df), -1, dtype="i8") data = pd.Categorical.from_codes( codes, sample.cat.categories, sample.cat.ordered ) parts.append(data) out[col] = _union_categoricals_wrapper(parts, ignore_order=ignore_order) # Pandas resets index type on assignment if frame is empty # https://github.com/pandas-dev/pandas/issues/17101 if not len(temp_ind): out.index = temp_ind out = out.reindex(columns=cat_mask.index) else: # pandas may raise a RuntimeWarning for comparing ints and strs with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) if filter_warning: warnings.simplefilter("ignore", FutureWarning) out = pd.concat(dfs3, join=join, sort=False) else: if isinstance(dfs2[0].dtype, pd.CategoricalDtype): if ind is None: ind = concat([df.index for df in dfs2]) return pd.Series( union_categoricals(dfs2, ignore_order=ignore_order), index=ind, name=dfs2[0].name, ) with warnings.catch_warnings(): if filter_warning: warnings.simplefilter("ignore", FutureWarning) out = pd.concat(dfs2, join=join, **kwargs) # Re-add the index if needed if ind is not None: out.index = ind return out @categorical_dtype_dispatch.register((pd.DataFrame, pd.Series, pd.Index)) def categorical_dtype_pandas(categories=None, ordered=False): return pd.api.types.CategoricalDtype(categories=categories, ordered=ordered) @tolist_dispatch.register((np.ndarray, pd.Series, pd.Index, pd.Categorical)) def tolist_numpy_or_pandas(obj): return obj.tolist() @is_categorical_dtype_dispatch.register( (pd.Series, pd.Index, pd.api.extensions.ExtensionDtype, np.dtype) ) def is_categorical_dtype_pandas(obj): if hasattr(obj, "dtype"): dtype = obj.dtype else: dtype = obj return isinstance(dtype, pd.CategoricalDtype) @grouper_dispatch.register((pd.DataFrame, pd.Series)) def get_grouper_pandas(obj): return pd.core.groupby.Grouper @percentile_lookup.register((pd.Series, pd.Index)) def percentile(a, q, interpolation="linear"): if isinstance(a.dtype, pd.ArrowDtype): a = a.to_numpy() return _percentile(a, q, interpolation) @to_pandas_dispatch.register((pd.DataFrame, pd.Series, pd.Index)) def to_pandas_dispatch_from_pandas(data, **kwargs): return data
ShuffleGroupResult
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_lookup.py
{ "start": 29680, "end": 30292 }
class ____: def __init__(self, value=-1) -> None: pass @pytest.mark.parametrize( "typ,repr_", [ (int, "integers()"), (list[str], "lists(text())"), (_List[str], "lists(text())"), ("not a type", "from_type('not a type')"), (random.Random, "randoms()"), (_EmptyClass, "from_type(tests.cover.test_lookup._EmptyClass)"), ( st.SearchStrategy[str], "from_type(hypothesis.strategies.SearchStrategy[str])", ), ], ) def test_repr_passthrough(typ, repr_): assert repr(st.from_type(typ)) == repr_
_EmptyClass
python
pydantic__pydantic
tests/test_validate_call.py
{ "start": 37300, "end": 37805 }
class ____[T]: def g(self): @validate_call(validate_return=True) def inner(a: T) -> T: ... def h[S](self): @validate_call(validate_return=True) def inner(a: T) -> S: ... """ ) A = module.A a = A[int]() with pytest.raises(NameError): a.g() with pytest.raises(NameError): a.h() with pytest.raises(NameError): create_module( """ from __future__ import annotations from pydantic import validate_call
A
python
django__django
tests/custom_lookups/tests.py
{ "start": 1826, "end": 2069 }
class ____(models.Transform): lookup_name = "lastdigit" def as_sql(self, compiler, connection): lhs, lhs_params = compiler.compile(self.lhs) return "SUBSTR(CAST(%s AS CHAR(2)), 2, 1)" % lhs, lhs_params
LastDigitTransform
python
pydantic__pydantic
tests/mypy/modules/plugin_fail.py
{ "start": 3666, "end": 3854 }
class ____(BaseModel, alias_generator=lambda x: x + '_'): x: int KwargsAliasGeneratorModel(x=1) KwargsAliasGeneratorModel(x_=1) KwargsAliasGeneratorModel(z=1)
KwargsAliasGeneratorModel
python
pytorch__pytorch
torch/_inductor/codegen/debug_utils.py
{ "start": 1175, "end": 1873 }
class ____(Enum): # OFF: No intermediate tensor value debug info will be printed or saved. OFF = "0" # LEVEL 1: Save all intermediate tensor values to individual `.pt` files. No debug printing will be displayed. SAVE_ONLY = "1" # LEVEL 2: Print all intermediate tensor values by default to the console. No debug saving will be performed. PRINT_ONLY = "2" # LEVEL 3: Print all kernel names to the console only. No debug saving/printing for input tensor value info will be performed. # This mode can be helpful in cases when you just want to pinpointing what kernel is running into a CUDA IMA issue, etc. PRINT_KERNEL_NAMES_ONLY = "3"
IntermediateValueDebuggingLevel
python
pyqtgraph__pyqtgraph
pyqtgraph/flowchart/library/Display.py
{ "start": 6183, "end": 10274 }
class ____(CtrlNode): """Generates a scatter plot from a record array or nested dicts""" nodeName = 'ScatterPlot' uiTemplate = [ ('x', 'combo', {'values': [], 'index': 0}), ('y', 'combo', {'values': [], 'index': 0}), ('sizeEnabled', 'check', {'value': False}), ('size', 'combo', {'values': [], 'index': 0}), ('absoluteSize', 'check', {'value': False}), ('colorEnabled', 'check', {'value': False}), ('color', 'colormap', {}), ('borderEnabled', 'check', {'value': False}), ('border', 'colormap', {}), ] def __init__(self, name): CtrlNode.__init__(self, name, terminals={ 'input': {'io': 'in'}, 'plot': {'io': 'out'} }) self.item = ScatterPlotItem() self.keys = [] #self.ui = QtWidgets.QWidget() #self.layout = QtWidgets.QGridLayout() #self.ui.setLayout(self.layout) #self.xCombo = QtWidgets.QComboBox() #self.yCombo = QtWidgets.QComboBox() def process(self, input, display=True): #print "scatterplot process" if not display: return {'plot': None} self.updateKeys(input[0]) x = str(self.ctrls['x'].currentText()) y = str(self.ctrls['y'].currentText()) size = str(self.ctrls['size'].currentText()) pen = QtGui.QPen(QtGui.QColor(0,0,0,0)) points = [] for i in input: pt = {'pos': (i[x], i[y])} if self.ctrls['sizeEnabled'].isChecked(): pt['size'] = i[size] if self.ctrls['borderEnabled'].isChecked(): pt['pen'] = QtGui.QPen(self.ctrls['border'].getColor(i)) else: pt['pen'] = pen if self.ctrls['colorEnabled'].isChecked(): pt['brush'] = QtGui.QBrush(self.ctrls['color'].getColor(i)) points.append(pt) self.item.setPxMode(not self.ctrls['absoluteSize'].isChecked()) self.item.setPoints(points) return {'plot': self.item} def updateKeys(self, data): if isinstance(data, dict): keys = list(data.keys()) elif isinstance(data, list) or isinstance(data, tuple): keys = data elif isinstance(data, np.ndarray) or isinstance(data, np.void): keys = data.dtype.names else: print("Unknown data type:", type(data), data) return for c in self.ctrls.values(): c.blockSignals(True) for c in [self.ctrls['x'], self.ctrls['y'], self.ctrls['size']]: cur = str(c.currentText()) c.clear() for k in keys: c.addItem(k) if k == cur: c.setCurrentIndex(c.count()-1) for c in [self.ctrls['color'], self.ctrls['border']]: c.setArgList(keys) for c in self.ctrls.values(): c.blockSignals(False) self.keys = keys def saveState(self): state = CtrlNode.saveState(self) return {'keys': self.keys, 'ctrls': state} def restoreState(self, state): self.updateKeys(state['keys']) CtrlNode.restoreState(self, state['ctrls']) #class ImageItem(Node): #"""Creates an ImageItem for display in a canvas from a file handle.""" #nodeName = 'Image' #def __init__(self, name): #Node.__init__(self, name, terminals={ #'file': {'io': 'in'}, #'image': {'io': 'out'} #}) #self.imageItem = graphicsItems.ImageItem() #self.handle = None #def process(self, file, display=True): #if not display: #return {'image': None} #if file != self.handle: #self.handle = file #data = file.read() #self.imageItem.updateImage(data) #pos = file.
ScatterPlot
python
pallets__flask
src/flask/blueprints.py
{ "start": 456, "end": 4541 }
class ____(SansioBlueprint): def __init__( self, name: str, import_name: str, static_folder: str | os.PathLike[str] | None = None, static_url_path: str | None = None, template_folder: str | os.PathLike[str] | None = None, url_prefix: str | None = None, subdomain: str | None = None, url_defaults: dict[str, t.Any] | None = None, root_path: str | None = None, cli_group: str | None = _sentinel, # type: ignore ) -> None: super().__init__( name, import_name, static_folder, static_url_path, template_folder, url_prefix, subdomain, url_defaults, root_path, cli_group, ) #: The Click command group for registering CLI commands for this #: object. The commands are available from the ``flask`` command #: once the application has been discovered and blueprints have #: been registered. self.cli = AppGroup() # Set the name of the Click group in case someone wants to add # the app's commands to another CLI tool. self.cli.name = self.name def get_send_file_max_age(self, filename: str | None) -> int | None: """Used by :func:`send_file` to determine the ``max_age`` cache value for a given file path if it wasn't passed. By default, this returns :data:`SEND_FILE_MAX_AGE_DEFAULT` from the configuration of :data:`~flask.current_app`. This defaults to ``None``, which tells the browser to use conditional requests instead of a timed cache, which is usually preferable. Note this is a duplicate of the same method in the Flask class. .. versionchanged:: 2.0 The default configuration is ``None`` instead of 12 hours. .. versionadded:: 0.9 """ value = current_app.config["SEND_FILE_MAX_AGE_DEFAULT"] if value is None: return None if isinstance(value, timedelta): return int(value.total_seconds()) return value # type: ignore[no-any-return] def send_static_file(self, filename: str) -> Response: """The view function used to serve files from :attr:`static_folder`. A route is automatically registered for this view at :attr:`static_url_path` if :attr:`static_folder` is set. Note this is a duplicate of the same method in the Flask class. .. versionadded:: 0.5 """ if not self.has_static_folder: raise RuntimeError("'static_folder' must be set to serve static_files.") # send_file only knows to call get_send_file_max_age on the app, # call it here so it works for blueprints too. max_age = self.get_send_file_max_age(filename) return send_from_directory( t.cast(str, self.static_folder), filename, max_age=max_age ) def open_resource( self, resource: str, mode: str = "rb", encoding: str | None = "utf-8" ) -> t.IO[t.AnyStr]: """Open a resource file relative to :attr:`root_path` for reading. The blueprint-relative equivalent of the app's :meth:`~.Flask.open_resource` method. :param resource: Path to the resource relative to :attr:`root_path`. :param mode: Open the file in this mode. Only reading is supported, valid values are ``"r"`` (or ``"rt"``) and ``"rb"``. :param encoding: Open the file with this encoding when opening in text mode. This is ignored when opening in binary mode. .. versionchanged:: 3.1 Added the ``encoding`` parameter. """ if mode not in {"r", "rt", "rb"}: raise ValueError("Resources can only be opened for reading.") path = os.path.join(self.root_path, resource) if mode == "rb": return open(path, mode) # pyright: ignore return open(path, mode, encoding=encoding)
Blueprint
python
django__django
tests/delete/models.py
{ "start": 4810, "end": 4926 }
class ____(models.Model): m = models.ForeignKey(M, models.CASCADE) r = models.ForeignKey(R, models.CASCADE)
MR
python
xlwings__xlwings
xlwings/constants.py
{ "start": 78052, "end": 78156 }
class ____: xlVerbOpen = 2 # from enum XlOLEVerb xlVerbPrimary = 1 # from enum XlOLEVerb
OLEVerb
python
huggingface__transformers
src/transformers/models/exaone4/modeling_exaone4.py
{ "start": 13761, "end": 15551 }
class ____(GradientCheckpointingLayer): def __init__(self, config: Exaone4Config, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Exaone4Attention(config=config, layer_idx=layer_idx) self.mlp = Exaone4MLP(config) self.post_attention_layernorm = Exaone4RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_feedforward_layernorm = Exaone4RMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, **kwargs: Unpack[TransformersKwargs], ) -> torch.Tensor: residual = hidden_states hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.mlp(hidden_states) hidden_states = self.post_feedforward_layernorm(hidden_states) hidden_states = residual + hidden_states return hidden_states @auto_docstring
Exaone4DecoderLayer
python
tensorflow__tensorflow
tensorflow/dtensor/python/tests/spmd_test.py
{ "start": 186767, "end": 190432 }
class ____(test_util.DTensorBaseTest): def setUp(self): super(DTensorRelayoutTest, self).setUp() self.skipForDeviceType(['TPU'], 'all tests require 8 TPU cores.', unless_device_count_equals_to=8) global_ids = test_util.create_device_ids_array((2, 4)) local_ids = np.ravel(global_ids).tolist() mesh_dict = { device: Mesh([_MESH_DIM_X, _MESH_DIM_Y], global_ids, local_ids, test_util.create_device_list((2, 4), device), use_xla_spmd=test_util.get_use_xla_spmd(device)) for device in ('CPU', 'GPU', 'TPU') } self.mesh = self.configTestMesh(mesh_dict) context.ensure_initialized() def testRelayoutEagerAllConcat(self): op = gen_nn_ops.relu a = constant_op.constant([[[1.], [-2.], [3.], [-4.]], [[5.], [-6.], [-7.], [8.]]]) assert a.shape == [2, 4, 1] expected_result = op(a) init_layout = Layout([_MESH_DIM_X, _MESH_DIM_Y, layout_lib.UNSHARDED], self.mesh) a = api.relayout(a, init_layout) dtensor_output = op(a) final_layout = Layout( [_MESH_DIM_X, layout_lib.UNSHARDED, layout_lib.UNSHARDED], self.mesh) # eager relayout dtensor_result = api.relayout(dtensor_output, final_layout) self.assertDTensorEqual(expected_result, final_layout, dtensor_result) def testRelayoutEagerSlice(self): op = gen_nn_ops.relu a = constant_op.constant([[[1.], [-2.], [3.], [-4.]], [[5.], [-6.], [-7.], [8.]]]) assert a.shape == [2, 4, 1] expected_result = op(a) init_layout = Layout( [layout_lib.UNSHARDED, layout_lib.UNSHARDED, layout_lib.UNSHARDED], self.mesh) a = api.relayout(a, init_layout) dtensor_output = op(a) final_layout = Layout([_MESH_DIM_X, _MESH_DIM_Y, layout_lib.UNSHARDED], self.mesh) # eager relayout dtensor_result = api.relayout(dtensor_output, final_layout) self.assertDTensorEqual(expected_result, final_layout, dtensor_result) def testRelayoutGraphAllConcat(self): op = gen_nn_ops.relu a = constant_op.constant([[[1.], [-2.], [3.], [-4.]], [[5.], [-6.], [-7.], [8.]]]) assert a.shape == [2, 4, 1] expected_result = op(a) init_layout = Layout([_MESH_DIM_X, _MESH_DIM_Y, layout_lib.UNSHARDED], self.mesh) a = api.relayout(a, init_layout) final_layout = Layout( [_MESH_DIM_X, layout_lib.UNSHARDED, layout_lib.UNSHARDED], self.mesh) @polymorphic_function.function def wrap_fn(x): dtensor_output = op(x) return api.relayout(dtensor_output, final_layout) dtensor_result = wrap_fn(a) self.assertDTensorEqual(expected_result, final_layout, dtensor_result) def testRelayoutGraphSlice(self): op = gen_nn_ops.relu a = constant_op.constant([[[1.], [-2.], [3.], [-4.]], [[5.], [-6.], [-7.], [8.]]]) assert a.shape == [2, 4, 1] expected_result = op(a) init_layout = Layout( [_MESH_DIM_X, layout_lib.UNSHARDED, layout_lib.UNSHARDED], self.mesh) a = api.relayout(a, init_layout) final_layout = Layout([_MESH_DIM_X, _MESH_DIM_Y, layout_lib.UNSHARDED], self.mesh) @polymorphic_function.function def wrap_fn(x): dtensor_output = op(x) return api.relayout(dtensor_output, final_layout) dtensor_result = wrap_fn(a) self.assertDTensorEqual(expected_result, final_layout, dtensor_result) if __name__ == '__main__': tf_test.main()
DTensorRelayoutTest
python
matplotlib__matplotlib
lib/matplotlib/projections/polar.py
{ "start": 27914, "end": 54915 }
class ____(Axes): """ A polar graph projection, where the input dimensions are *theta*, *r*. Theta starts pointing east and goes anti-clockwise. """ name = 'polar' def __init__(self, *args, theta_offset=0, theta_direction=1, rlabel_position=22.5, **kwargs): # docstring inherited self._default_theta_offset = theta_offset self._default_theta_direction = theta_direction self._default_rlabel_position = np.deg2rad(rlabel_position) super().__init__(*args, **kwargs) self.use_sticky_edges = True self.set_aspect('equal', adjustable='box', anchor='C') self.clear() def clear(self): # docstring inherited super().clear() self.title.set_y(1.05) start = self.spines.get('start', None) if start: start.set_visible(False) end = self.spines.get('end', None) if end: end.set_visible(False) self.set_xlim(0.0, 2 * np.pi) self.grid(mpl.rcParams['polaraxes.grid']) inner = self.spines.get('inner', None) if inner: inner.set_visible(False) self.set_rorigin(None) self.set_theta_offset(self._default_theta_offset) self.set_theta_direction(self._default_theta_direction) def _init_axis(self): # This is moved out of __init__ because non-separable axes don't use it self.xaxis = ThetaAxis(self, clear=False) self.yaxis = RadialAxis(self, clear=False) self.spines['polar'].register_axis(self.yaxis) inner_spine = self.spines.get('inner', None) if inner_spine is not None: # Subclasses may not have inner spine. inner_spine.register_axis(self.yaxis) def _set_lim_and_transforms(self): # A view limit where the minimum radius can be locked if the user # specifies an alternate origin. self._originViewLim = mtransforms.LockableBbox(self.viewLim) # Handle angular offset and direction. self._direction = mtransforms.Affine2D() \ .scale(self._default_theta_direction, 1.0) self._theta_offset = mtransforms.Affine2D() \ .translate(self._default_theta_offset, 0.0) self.transShift = self._direction + self._theta_offset # A view limit shifted to the correct location after accounting for # orientation and offset. self._realViewLim = mtransforms.TransformedBbox(self.viewLim, self.transShift) # Transforms the x and y axis separately by a scale factor # It is assumed that this part will have non-linear components self.transScale = mtransforms.TransformWrapper( mtransforms.IdentityTransform()) # Scale view limit into a bbox around the selected wedge. This may be # smaller than the usual unit axes rectangle if not plotting the full # circle. self.axesLim = _WedgeBbox((0.5, 0.5), self._realViewLim, self._originViewLim) # Scale the wedge to fill the axes. self.transWedge = mtransforms.BboxTransformFrom(self.axesLim) # Scale the axes to fill the figure. self.transAxes = mtransforms.BboxTransformTo(self.bbox) # A (possibly non-linear) projection on the (already scaled) # data. This one is aware of rmin self.transProjection = self.PolarTransform( self, scale_transform=self.transScale ) # Add dependency on rorigin. self.transProjection.set_children(self._originViewLim) # An affine transformation on the data, generally to limit the # range of the axes self.transProjectionAffine = self.PolarAffine(self.transScale, self._originViewLim) # The complete data transformation stack -- from data all the # way to display coordinates # # 1. Remove any radial axis scaling (e.g. log scaling) # 2. Shift data in the theta direction # 3. Project the data from polar to cartesian values # (with the origin in the same place) # 4. Scale and translate the cartesian values to Axes coordinates # (here the origin is moved to the lower left of the Axes) # 5. Move and scale to fill the Axes # 6. Convert from Axes coordinates to Figure coordinates self.transData = ( self.transScale + self.transShift + self.transProjection + ( self.transProjectionAffine + self.transWedge + self.transAxes ) ) # This is the transform for theta-axis ticks. It is # equivalent to transData, except it always puts r == 0.0 and r == 1.0 # at the edge of the axis circles. self._xaxis_transform = ( mtransforms.blended_transform_factory( mtransforms.IdentityTransform(), mtransforms.BboxTransformTo(self.viewLim)) + self.transData) # The theta labels are flipped along the radius, so that text 1 is on # the outside by default. This should work the same as before. flipr_transform = mtransforms.Affine2D() \ .translate(0.0, -0.5) \ .scale(1.0, -1.0) \ .translate(0.0, 0.5) self._xaxis_text_transform = flipr_transform + self._xaxis_transform # This is the transform for r-axis ticks. It scales the theta # axis so the gridlines from 0.0 to 1.0, now go from thetamin to # thetamax. self._yaxis_transform = ( mtransforms.blended_transform_factory( mtransforms.BboxTransformTo(self.viewLim), mtransforms.IdentityTransform()) + self.transData) # The r-axis labels are put at an angle and padded in the r-direction self._r_label_position = mtransforms.Affine2D() \ .translate(self._default_rlabel_position, 0.0) self._yaxis_text_transform = mtransforms.TransformWrapper( self._r_label_position + self.transData) def get_xaxis_transform(self, which='grid'): _api.check_in_list(['tick1', 'tick2', 'grid'], which=which) return self._xaxis_transform def get_xaxis_text1_transform(self, pad): return self._xaxis_text_transform, 'center', 'center' def get_xaxis_text2_transform(self, pad): return self._xaxis_text_transform, 'center', 'center' def get_yaxis_transform(self, which='grid'): if which in ('tick1', 'tick2'): return self._yaxis_text_transform elif which == 'grid': return self._yaxis_transform else: _api.check_in_list(['tick1', 'tick2', 'grid'], which=which) def get_yaxis_text1_transform(self, pad): thetamin, thetamax = self._realViewLim.intervalx if _is_full_circle_rad(thetamin, thetamax): return self._yaxis_text_transform, 'bottom', 'left' elif self.get_theta_direction() > 0: halign = 'left' pad_shift = _ThetaShift(self, pad, 'min') else: halign = 'right' pad_shift = _ThetaShift(self, pad, 'max') return self._yaxis_text_transform + pad_shift, 'center', halign def get_yaxis_text2_transform(self, pad): if self.get_theta_direction() > 0: halign = 'right' pad_shift = _ThetaShift(self, pad, 'max') else: halign = 'left' pad_shift = _ThetaShift(self, pad, 'min') return self._yaxis_text_transform + pad_shift, 'center', halign def draw(self, renderer): self._unstale_viewLim() thetamin, thetamax = np.rad2deg(self._realViewLim.intervalx) if thetamin > thetamax: thetamin, thetamax = thetamax, thetamin rscale_tr = self.yaxis.get_transform() rmin, rmax = ((rscale_tr.transform(self._realViewLim.intervaly) - rscale_tr.transform(self.get_rorigin())) * self.get_rsign()) if isinstance(self.patch, mpatches.Wedge): # Backwards-compatibility: Any subclassed Axes might override the # patch to not be the Wedge that PolarAxes uses. center = self.transWedge.transform((0.5, 0.5)) self.patch.set_center(center) self.patch.set_theta1(thetamin) self.patch.set_theta2(thetamax) edge, _ = self.transWedge.transform((1, 0)) radius = edge - center[0] width = min(radius * (rmax - rmin) / rmax, radius) self.patch.set_radius(radius) self.patch.set_width(width) inner_width = radius - width inner = self.spines.get('inner', None) if inner: inner.set_visible(inner_width != 0.0) visible = not _is_full_circle_deg(thetamin, thetamax) # For backwards compatibility, any subclassed Axes might override the # spines to not include start/end that PolarAxes uses. start = self.spines.get('start', None) end = self.spines.get('end', None) if start: start.set_visible(visible) if end: end.set_visible(visible) if visible: yaxis_text_transform = self._yaxis_transform else: yaxis_text_transform = self._r_label_position + self.transData if self._yaxis_text_transform != yaxis_text_transform: self._yaxis_text_transform.set(yaxis_text_transform) self.yaxis.reset_ticks() self.yaxis.set_clip_path(self.patch) super().draw(renderer) def _gen_axes_patch(self): return mpatches.Wedge((0.5, 0.5), 0.5, 0.0, 360.0) def _gen_axes_spines(self): spines = { 'polar': Spine.arc_spine(self, 'top', (0.5, 0.5), 0.5, 0, 360), 'start': Spine.linear_spine(self, 'left'), 'end': Spine.linear_spine(self, 'right'), 'inner': Spine.arc_spine(self, 'bottom', (0.5, 0.5), 0.0, 0, 360), } spines['polar'].set_transform(self.transWedge + self.transAxes) spines['inner'].set_transform(self.transWedge + self.transAxes) spines['start'].set_transform(self._yaxis_transform) spines['end'].set_transform(self._yaxis_transform) return spines def set_thetamax(self, thetamax): """Set the maximum theta limit in degrees.""" self.viewLim.x1 = np.deg2rad(thetamax) def get_thetamax(self): """Return the maximum theta limit in degrees.""" return np.rad2deg(self.viewLim.xmax) def set_thetamin(self, thetamin): """Set the minimum theta limit in degrees.""" self.viewLim.x0 = np.deg2rad(thetamin) def get_thetamin(self): """Get the minimum theta limit in degrees.""" return np.rad2deg(self.viewLim.xmin) def set_thetalim(self, *args, **kwargs): r""" Set the minimum and maximum theta values. Can take the following signatures: - ``set_thetalim(minval, maxval)``: Set the limits in radians. - ``set_thetalim(thetamin=minval, thetamax=maxval)``: Set the limits in degrees. where minval and maxval are the minimum and maximum limits. Values are wrapped in to the range :math:`[0, 2\pi]` (in radians), so for example it is possible to do ``set_thetalim(-np.pi / 2, np.pi / 2)`` to have an axis symmetric around 0. A ValueError is raised if the absolute angle difference is larger than a full circle. """ orig_lim = self.get_xlim() # in radians if 'thetamin' in kwargs: kwargs['xmin'] = np.deg2rad(kwargs.pop('thetamin')) if 'thetamax' in kwargs: kwargs['xmax'] = np.deg2rad(kwargs.pop('thetamax')) new_min, new_max = self.set_xlim(*args, **kwargs) # Parsing all permutations of *args, **kwargs is tricky; it is simpler # to let set_xlim() do it and then validate the limits. if abs(new_max - new_min) > 2 * np.pi: self.set_xlim(orig_lim) # un-accept the change raise ValueError("The angle range must be less than a full circle") return tuple(np.rad2deg((new_min, new_max))) def set_theta_offset(self, offset): """ Set the offset for the location of 0 in radians. """ mtx = self._theta_offset.get_matrix() mtx[0, 2] = offset self._theta_offset.invalidate() def get_theta_offset(self): """ Get the offset for the location of 0 in radians. """ return self._theta_offset.get_matrix()[0, 2] def set_theta_zero_location(self, loc, offset=0.0): """ Set the location of theta's zero. This simply calls `set_theta_offset` with the correct value in radians. Parameters ---------- loc : str May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE". offset : float, default: 0 An offset in degrees to apply from the specified *loc*. **Note:** this offset is *always* applied counter-clockwise regardless of the direction setting. """ mapping = { 'N': np.pi * 0.5, 'NW': np.pi * 0.75, 'W': np.pi, 'SW': np.pi * 1.25, 'S': np.pi * 1.5, 'SE': np.pi * 1.75, 'E': 0, 'NE': np.pi * 0.25} return self.set_theta_offset(mapping[loc] + np.deg2rad(offset)) def set_theta_direction(self, direction): """ Set the direction in which theta increases. clockwise, -1: Theta increases in the clockwise direction counterclockwise, anticlockwise, 1: Theta increases in the counterclockwise direction """ mtx = self._direction.get_matrix() if direction in ('clockwise', -1): mtx[0, 0] = -1 elif direction in ('counterclockwise', 'anticlockwise', 1): mtx[0, 0] = 1 else: _api.check_in_list( [-1, 1, 'clockwise', 'counterclockwise', 'anticlockwise'], direction=direction) self._direction.invalidate() def get_theta_direction(self): """ Get the direction in which theta increases. -1: Theta increases in the clockwise direction 1: Theta increases in the counterclockwise direction """ return self._direction.get_matrix()[0, 0] def set_rmax(self, rmax): """ Set the outer radial limit. Parameters ---------- rmax : float """ self.viewLim.y1 = rmax def get_rmax(self): """ Returns ------- float Outer radial limit. """ return self.viewLim.ymax def set_rmin(self, rmin): """ Set the inner radial limit. Parameters ---------- rmin : float """ self.viewLim.y0 = rmin def get_rmin(self): """ Returns ------- float The inner radial limit. """ return self.viewLim.ymin def set_rorigin(self, rorigin): """ Update the radial origin. Parameters ---------- rorigin : float """ self._originViewLim.locked_y0 = rorigin def get_rorigin(self): """ Returns ------- float """ return self._originViewLim.y0 def get_rsign(self): return np.sign(self._originViewLim.y1 - self._originViewLim.y0) def set_rlim(self, bottom=None, top=None, *, emit=True, auto=False, **kwargs): """ Set the radial axis view limits. This function behaves like `.Axes.set_ylim`, but additionally supports *rmin* and *rmax* as aliases for *bottom* and *top*. See Also -------- .Axes.set_ylim """ if 'rmin' in kwargs: if bottom is None: bottom = kwargs.pop('rmin') else: raise ValueError('Cannot supply both positional "bottom"' 'argument and kwarg "rmin"') if 'rmax' in kwargs: if top is None: top = kwargs.pop('rmax') else: raise ValueError('Cannot supply both positional "top"' 'argument and kwarg "rmax"') return self.set_ylim(bottom=bottom, top=top, emit=emit, auto=auto, **kwargs) def get_rlabel_position(self): """ Returns ------- float The theta position of the radius labels in degrees. """ return np.rad2deg(self._r_label_position.get_matrix()[0, 2]) def set_rlabel_position(self, value): """ Update the theta position of the radius labels. Parameters ---------- value : number The angular position of the radius labels in degrees. """ self._r_label_position.clear().translate(np.deg2rad(value), 0.0) def set_rscale(self, *args, **kwargs): return Axes.set_yscale(self, *args, **kwargs) def set_rticks(self, *args, **kwargs): return Axes.set_yticks(self, *args, **kwargs) def set_thetagrids(self, angles, labels=None, fmt=None, **kwargs): """ Set the theta gridlines in a polar plot. Parameters ---------- angles : tuple with floats, degrees The angles of the theta gridlines. labels : tuple with strings or None The labels to use at each theta gridline. The `.projections.polar.ThetaFormatter` will be used if None. fmt : str or None Format string used in `matplotlib.ticker.FormatStrFormatter`. For example '%f'. Note that the angle that is used is in radians. Returns ------- lines : list of `.lines.Line2D` The theta gridlines. labels : list of `.text.Text` The tick labels. Other Parameters ---------------- **kwargs *kwargs* are optional `.Text` properties for the labels. .. warning:: This only sets the properties of the current ticks. Ticks are not guaranteed to be persistent. Various operations can create, delete and modify the Tick instances. There is an imminent risk that these settings can get lost if you work on the figure further (including also panning/zooming on a displayed figure). Use `.set_tick_params` instead if possible. See Also -------- .PolarAxes.set_rgrids .Axis.get_gridlines .Axis.get_ticklabels """ # Make sure we take into account unitized data angles = self.convert_yunits(angles) angles = np.deg2rad(angles) self.set_xticks(angles) if labels is not None: self.set_xticklabels(labels) elif fmt is not None: self.xaxis.set_major_formatter(mticker.FormatStrFormatter(fmt)) for t in self.xaxis.get_ticklabels(): t._internal_update(kwargs) return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels() def set_rgrids(self, radii, labels=None, angle=None, fmt=None, **kwargs): """ Set the radial gridlines on a polar plot. Parameters ---------- radii : tuple with floats The radii for the radial gridlines labels : tuple with strings or None The labels to use at each radial gridline. The `matplotlib.ticker.ScalarFormatter` will be used if None. angle : float The angular position of the radius labels in degrees. fmt : str or None Format string used in `matplotlib.ticker.FormatStrFormatter`. For example '%f'. Returns ------- lines : list of `.lines.Line2D` The radial gridlines. labels : list of `.text.Text` The tick labels. Other Parameters ---------------- **kwargs *kwargs* are optional `.Text` properties for the labels. .. warning:: This only sets the properties of the current ticks. Ticks are not guaranteed to be persistent. Various operations can create, delete and modify the Tick instances. There is an imminent risk that these settings can get lost if you work on the figure further (including also panning/zooming on a displayed figure). Use `.set_tick_params` instead if possible. See Also -------- .PolarAxes.set_thetagrids .Axis.get_gridlines .Axis.get_ticklabels """ # Make sure we take into account unitized data radii = self.convert_xunits(radii) radii = np.asarray(radii) self.set_yticks(radii) if labels is not None: self.set_yticklabels(labels) elif fmt is not None: self.yaxis.set_major_formatter(mticker.FormatStrFormatter(fmt)) if angle is None: angle = self.get_rlabel_position() self.set_rlabel_position(angle) for t in self.yaxis.get_ticklabels(): t._internal_update(kwargs) return self.yaxis.get_gridlines(), self.yaxis.get_ticklabels() def format_coord(self, theta, r): # docstring inherited screen_xy = self.transData.transform((theta, r)) screen_xys = screen_xy + np.stack( np.meshgrid([-1, 0, 1], [-1, 0, 1])).reshape((2, -1)).T ts, rs = self.transData.inverted().transform(screen_xys).T delta_t = abs((ts - theta + np.pi) % (2 * np.pi) - np.pi).max() delta_t_halfturns = delta_t / np.pi delta_t_degrees = delta_t_halfturns * 180 delta_r = abs(rs - r).max() if theta < 0: theta += 2 * np.pi theta_halfturns = theta / np.pi theta_degrees = theta_halfturns * 180 # See ScalarFormatter.format_data_short. For r, use #g-formatting # (as for linear axes), but for theta, use f-formatting as scientific # notation doesn't make sense and the trailing dot is ugly. def format_sig(value, delta, opt, fmt): # For "f", only count digits after decimal point. prec = (max(0, -math.floor(math.log10(delta))) if fmt == "f" else cbook._g_sig_digits(value, delta)) return f"{value:-{opt}.{prec}{fmt}}" # In case fmt_xdata was not specified, resort to default if self.fmt_ydata is None: r_label = format_sig(r, delta_r, "#", "g") else: r_label = self.format_ydata(r) if self.fmt_xdata is None: return ('\N{GREEK SMALL LETTER THETA}={}\N{GREEK SMALL LETTER PI} ' '({}\N{DEGREE SIGN}), r={}').format( format_sig(theta_halfturns, delta_t_halfturns, "", "f"), format_sig(theta_degrees, delta_t_degrees, "", "f"), r_label ) else: return '\N{GREEK SMALL LETTER THETA}={}, r={}'.format( self.format_xdata(theta), r_label ) def get_data_ratio(self): """ Return the aspect ratio of the data itself. For a polar plot, this should always be 1.0 """ return 1.0 # # # Interactive panning def can_zoom(self): """ Return whether this Axes supports the zoom box button functionality. A polar Axes does not support zoom boxes. """ return False def can_pan(self): """ Return whether this Axes supports the pan/zoom button functionality. For a polar Axes, this is slightly misleading. Both panning and zooming are performed by the same button. Panning is performed in azimuth while zooming is done along the radial. """ return True def start_pan(self, x, y, button): angle = np.deg2rad(self.get_rlabel_position()) mode = '' if button == 1: epsilon = np.pi / 45.0 t, r = self.transData.inverted().transform((x, y)) if angle - epsilon <= t <= angle + epsilon: mode = 'drag_r_labels' elif button == 3: mode = 'zoom' self._pan_start = types.SimpleNamespace( rmax=self.get_rmax(), trans=self.transData.frozen(), trans_inverse=self.transData.inverted().frozen(), r_label_angle=self.get_rlabel_position(), x=x, y=y, mode=mode) def end_pan(self): del self._pan_start def drag_pan(self, button, key, x, y): p = self._pan_start if p.mode == 'drag_r_labels': (startt, startr), (t, r) = p.trans_inverse.transform( [(p.x, p.y), (x, y)]) # Deal with theta dt = np.rad2deg(startt - t) self.set_rlabel_position(p.r_label_angle - dt) trans, vert1, horiz1 = self.get_yaxis_text1_transform(0.0) trans, vert2, horiz2 = self.get_yaxis_text2_transform(0.0) for t in self.yaxis.majorTicks + self.yaxis.minorTicks: t.label1.set_va(vert1) t.label1.set_ha(horiz1) t.label2.set_va(vert2) t.label2.set_ha(horiz2) elif p.mode == 'zoom': (startt, startr), (t, r) = p.trans_inverse.transform( [(p.x, p.y), (x, y)]) # Deal with r scale = r / startr self.set_rmax(p.rmax / scale) # To keep things all self-contained, we can put aliases to the Polar classes # defined above. This isn't strictly necessary, but it makes some of the # code more readable, and provides a backwards compatible Polar API. In # particular, this is used by the :doc:`/gallery/specialty_plots/radar_chart` # example to override PolarTransform on a PolarAxes subclass, so make sure that # that example is unaffected before changing this. PolarAxes.PolarTransform = PolarTransform PolarAxes.PolarAffine = PolarAffine PolarAxes.InvertedPolarTransform = InvertedPolarTransform PolarAxes.ThetaFormatter = ThetaFormatter PolarAxes.RadialLocator = RadialLocator PolarAxes.ThetaLocator = ThetaLocator
PolarAxes
python
realpython__materials
python-serialize/tabular-data/csv-demo/models.py
{ "start": 223, "end": 1048 }
class ____(NamedTuple): id: int name: str email: str language: Language registered_at: datetime @classmethod def fake(cls): language = random.choice(list(Language)) generator = Faker(language) return cls( generator.pyint(), generator.name(), generator.email(), language, generator.date_time_this_year(), ) @classmethod def from_dict(cls, row_dict): transforms = { "id": int, "name": str.title, "language": Language, "registered_at": datetime.fromisoformat, } return cls( **{ key: transforms.get(key, lambda x: x)(value) for key, value in row_dict.items() } )
User
python
bokeh__bokeh
src/bokeh/colors/groups.py
{ "start": 2823, "end": 3718 }
class ____(ColorGroup): ''' CSS "Brown" Color Group as defined by https://www.w3schools.com/colors/colors_groups.asp .. bokeh-color:: cornsilk .. bokeh-color:: blanchedalmond .. bokeh-color:: bisque .. bokeh-color:: navajowhite .. bokeh-color:: wheat .. bokeh-color:: burlywood .. bokeh-color:: tan .. bokeh-color:: rosybrown .. bokeh-color:: sandybrown .. bokeh-color:: goldenrod .. bokeh-color:: darkgoldenrod .. bokeh-color:: peru .. bokeh-color:: chocolate .. bokeh-color:: saddlebrown .. bokeh-color:: sienna .. bokeh-color:: brown .. bokeh-color:: maroon ''' _colors = ('Cornsilk', 'BlanchedAlmond', 'Bisque', 'NavajoWhite', 'Wheat', 'BurlyWood', 'Tan', 'RosyBrown', 'SandyBrown', 'Goldenrod', 'DarkGoldenrod', 'Peru', 'Chocolate', 'SaddleBrown', 'Sienna', 'Brown', 'Maroon')
brown
python
apache__airflow
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_backfills.py
{ "start": 2947, "end": 3704 }
class ____: @provide_session def _create_dag_models(self, *, count=1, dag_id_prefix="TEST_DAG", is_paused=False, session=None): bundle_name = "dags-folder" orm_dag_bundle = DagBundleModel(name=bundle_name) session.add(orm_dag_bundle) session.flush() dags = [] for num in range(1, count + 1): dag_model = DagModel( dag_id=f"{dag_id_prefix}_{num}", bundle_name=bundle_name, fileloc=f"/tmp/dag_{num}.py", is_stale=False, timetable_summary="0 0 * * *", is_paused=is_paused, ) session.add(dag_model) dags.append(dag_model) return dags
TestBackfillEndpoint
python
allegroai__clearml
clearml/utilities/requests_toolbelt/_compat.py
{ "start": 2143, "end": 9956 }
class ____(MutableMapping): """ :param headers: An iterable of field-value pairs. Must not contain multiple field names when compared case-insensitively. :param kwargs: Additional field-value pairs to pass in to ``dict.update``. A ``dict`` like container for storing HTTP Headers. Field names are stored and compared case-insensitively in compliance with RFC 7230. Iteration provides the first case-sensitive key seen for each case-insensitive pair. Using ``__setitem__`` syntax overwrites fields that compare equal case-insensitively in order to maintain ``dict``'s api. For fields that compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add`` in a loop. If multiple fields that are equal case-insensitively are passed to the constructor or ``.update``, the behavior is undefined and some will be lost. >>> headers = HTTPHeaderDict() >>> headers.add('Set-Cookie', 'foo=bar') >>> headers.add('set-cookie', 'baz=quxx') >>> headers['content-length'] = '7' >>> headers['SET-cookie'] 'foo=bar, baz=quxx' >>> headers['Content-Length'] '7' """ def __init__(self, headers=None, **kwargs): super(HTTPHeaderDict, self).__init__() self._container = {} if headers is not None: if isinstance(headers, HTTPHeaderDict): self._copy_from(headers) else: self.extend(headers) if kwargs: self.extend(kwargs) def __setitem__(self, key, val): self._container[key.lower()] = (key, val) return self._container[key.lower()] def __getitem__(self, key): val = self._container[key.lower()] return ', '.join(val[1:]) def __delitem__(self, key): del self._container[key.lower()] def __contains__(self, key): return key.lower() in self._container def __eq__(self, other): if not isinstance(other, Mapping) and not hasattr(other, 'keys'): return False if not isinstance(other, type(self)): other = type(self)(other) return ({k.lower(): v for k, v in self.itermerged()} == {k.lower(): v for k, v in other.itermerged()}) def __ne__(self, other): return not self.__eq__(other) if not PY3: # Python 2 iterkeys = MutableMapping.iterkeys itervalues = MutableMapping.itervalues __marker = object() def __len__(self): return len(self._container) def __iter__(self): # Only provide the originally cased names for vals in self._container.values(): yield vals[0] def pop(self, key, default=__marker): """D.pop(k[,d]) -> v, remove specified key and return its value. If key is not found, d is returned if given, otherwise KeyError is raised. """ # Using the MutableMapping function directly fails due to the private # marker. # Using ordinary dict.pop would expose the internal structures. # So let's reinvent the wheel. try: value = self[key] except KeyError: if default is self.__marker: raise return default else: del self[key] return value def discard(self, key): try: del self[key] except KeyError: pass def add(self, key, val): """Adds a (name, value) pair, doesn't overwrite the value if it already exists. >>> headers = HTTPHeaderDict(foo='bar') >>> headers.add('Foo', 'baz') >>> headers['foo'] 'bar, baz' """ key_lower = key.lower() new_vals = key, val # Keep the common case aka no item present as fast as possible vals = self._container.setdefault(key_lower, new_vals) if new_vals is not vals: # new_vals was not inserted, as there was a previous one if isinstance(vals, list): # If already several items got inserted, we have a list vals.append(val) else: # vals should be a tuple then, i.e. only one item so far # Need to convert the tuple to list for further extension self._container[key_lower] = [vals[0], vals[1], val] def extend(self, *args, **kwargs): """Generic import function for any type of header-like object. Adapted version of MutableMapping.update in order to insert items with self.add instead of self.__setitem__ """ if len(args) > 1: raise TypeError("extend() takes at most 1 positional " "arguments ({} given)".format(len(args))) other = args[0] if len(args) >= 1 else () if isinstance(other, HTTPHeaderDict): for key, val in other.iteritems(): self.add(key, val) elif isinstance(other, Mapping): for key in other: self.add(key, other[key]) elif hasattr(other, "keys"): for key in other.keys(): self.add(key, other[key]) else: for key, value in other: self.add(key, value) for key, value in kwargs.items(): self.add(key, value) def getlist(self, key): """Returns a list of all the values for the named field. Returns an empty list if the key doesn't exist.""" try: vals = self._container[key.lower()] except KeyError: return [] else: if isinstance(vals, tuple): return [vals[1]] else: return vals[1:] # Backwards compatibility for httplib getheaders = getlist getallmatchingheaders = getlist iget = getlist def __repr__(self): return "%s(%s)" % (type(self).__name__, dict(self.itermerged())) def _copy_from(self, other): for key in other: val = other.getlist(key) if isinstance(val, list): # Don't need to convert tuples val = list(val) self._container[key.lower()] = [key] + val def copy(self): clone = type(self)() clone._copy_from(self) return clone def iteritems(self): """Iterate over all header lines, including duplicate ones.""" for key in self: vals = self._container[key.lower()] for val in vals[1:]: yield vals[0], val def itermerged(self): """Iterate over all headers, merging duplicate ones together.""" for key in self: val = self._container[key.lower()] yield val[0], ', '.join(val[1:]) def items(self): return list(self.iteritems()) @classmethod def from_httplib(cls, message): # Python 2 """Read headers from a Python 2 httplib message object.""" # python2.7 does not expose a proper API for exporting multiheaders # efficiently. This function re-reads raw lines from the message # object and extracts the multiheaders properly. headers = [] for line in message.headers: if line.startswith((' ', '\t')): key, value = headers[-1] headers[-1] = (key, value + '\r\n' + line.rstrip()) continue key, value = line.split(':', 1) headers.append((key, value.strip())) return cls(headers) __all__ = ( 'basestring', 'connection', 'fields', 'filepost', 'poolmanager', 'timeout', 'HTTPHeaderDict', 'queue', 'urlencode', 'gaecontrib', 'urljoin', 'PyOpenSSLContext', )
HTTPHeaderDict
python
graphql-python__graphene
graphene/types/inputfield.py
{ "start": 130, "end": 2752 }
class ____(MountedType): """ Makes a field available on an ObjectType in the GraphQL schema. Any type can be mounted as a Input Field except Interface and Union: - Object Type - Scalar Type - Enum Input object types also can't have arguments on their input fields, unlike regular ``graphene.Field``. All class attributes of ``graphene.InputObjectType`` are implicitly mounted as InputField using the below arguments. .. code:: python from graphene import InputObjectType, String, InputField class Person(InputObjectType): # implicitly mounted as Input Field first_name = String(required=True) # explicitly mounted as Input Field last_name = InputField(String, description="Surname") args: type (class for a graphene.UnmountedType): Must be a class (not an instance) of an unmounted graphene type (ex. scalar or object) which is used for the type of this field in the GraphQL schema. name (optional, str): Name of the GraphQL input field (must be unique in a type). Defaults to attribute name. default_value (optional, Any): Default value to use as input if none set in user operation ( query, mutation, etc.). deprecation_reason (optional, str): Setting this value indicates that the field is depreciated and may provide instruction or reason on how for clients to proceed. description (optional, str): Description of the GraphQL field in the schema. required (optional, bool): Indicates this input field as not null in the graphql schema. Raises a validation error if argument not provided. Same behavior as graphene.NonNull. Default False. **extra_args (optional, Dict): Not used. """ def __init__( self, type_, name=None, default_value=Undefined, deprecation_reason=None, description=None, required=False, _creation_counter=None, **extra_args, ): super(InputField, self).__init__(_creation_counter=_creation_counter) self.name = name if required: assert ( deprecation_reason is None ), f"InputField {name} is required, cannot deprecate it." type_ = NonNull(type_) self._type = type_ self.deprecation_reason = deprecation_reason self.default_value = default_value self.description = description @property def type(self): return get_type(self._type)
InputField
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/paramSpec19.py
{ "start": 1892, "end": 2063 }
class ____(Protocol[P]): def __call__(self, /, *args: P.args, **kwargs: P.kwargs) -> None: ... list_of_handler_protocols: list[HandlerProtocol[...]] = []
HandlerProtocol
python
dateutil__dateutil
tests/test_tz.py
{ "start": 28168, "end": 29998 }
class ____(unittest.TestCase): def testEquality(self): tz1 = tz.tzlocal() tz2 = tz.tzlocal() # Explicitly calling == and != here to ensure the operators work self.assertTrue(tz1 == tz2) self.assertFalse(tz1 != tz2) def testInequalityFixedOffset(self): tzl = tz.tzlocal() tzos = tz.tzoffset('LST', tzl._std_offset.total_seconds()) tzod = tz.tzoffset('LDT', tzl._std_offset.total_seconds()) self.assertFalse(tzl == tzos) self.assertFalse(tzl == tzod) self.assertTrue(tzl != tzos) self.assertTrue(tzl != tzod) def testInequalityInvalid(self): tzl = tz.tzlocal() self.assertTrue(tzl != 1) self.assertFalse(tzl == 1) # TODO: Use some sort of universal local mocking so that it's clear # that we're expecting tzlocal to *not* be Pacific/Kiritimati LINT = tz.gettz('Pacific/Kiritimati') self.assertTrue(tzl != LINT) self.assertFalse(tzl == LINT) def testInequalityUnsupported(self): tzl = tz.tzlocal() self.assertTrue(tzl == ComparesEqual) self.assertFalse(tzl != ComparesEqual) def testRepr(self): tzl = tz.tzlocal() self.assertEqual(repr(tzl), 'tzlocal()') @pytest.mark.parametrize('args,kwargs', [ (('EST', -18000), {}), (('EST', timedelta(hours=-5)), {}), (('EST',), {'offset': -18000}), (('EST',), {'offset': timedelta(hours=-5)}), (tuple(), {'name': 'EST', 'offset': -18000}) ]) def test_tzoffset_is(args, kwargs): tz_ref = tz.tzoffset('EST', -18000) assert tz.tzoffset(*args, **kwargs) is tz_ref def test_tzoffset_is_not(): assert tz.tzoffset('EDT', -14400) is not tz.tzoffset('EST', -18000) @pytest.mark.tzlocal @unittest.skipIf(IS_WIN, "requires Unix")
TzLocalTest
python
coleifer__peewee
tests/base_models.py
{ "start": 2267, "end": 2430 }
class ____(TestModel): name = CharField() dflt1 = IntegerField(default=1) dflt2 = IntegerField(default=lambda: 2) dfltn = IntegerField(null=True)
DfltM
python
Pylons__pyramid
src/pyramid/interfaces.py
{ "start": 627, "end": 955 }
class ____(Interface): """ An event type that is emitted after :app:`Pyramid` attempted to find a route but before it calls any traversal or view code. See the documentation attached to :class:`pyramid.events.Routefound` for more information. """ request = Attribute('The request object')
IBeforeTraversal
python
conda__conda
conda/core/solve.py
{ "start": 1658, "end": 57248 }
class ____: """ A high-level API to conda's solving logic. Three public methods are provided to access a solution in various forms. * :meth:`solve_final_state` * :meth:`solve_for_diff` * :meth:`solve_for_transaction` """ _index: ReducedIndex | None _r: Resolve | None def __init__( self, prefix: str, channels: Iterable[Channel], subdirs: Iterable[str] = (), specs_to_add: Iterable[MatchSpec] = (), specs_to_remove: Iterable[MatchSpec] = (), repodata_fn: str = REPODATA_FN, command=NULL, ): """ Args: prefix (str): The conda prefix / environment location for which the :class:`Solver` is being instantiated. channels (Sequence[:class:`Channel`]): A prioritized list of channels to use for the solution. subdirs (Sequence[str]): A prioritized list of subdirs to use for the solution. specs_to_add (set[:class:`MatchSpec`]): The set of package specs to add to the prefix. specs_to_remove (set[:class:`MatchSpec`]): The set of package specs to remove from the prefix. """ self.prefix = prefix self._channels = channels or context.channels self.channels = IndexedSet(Channel(c) for c in self._channels) self.subdirs = tuple(s for s in subdirs or context.subdirs) self.specs_to_add = frozenset(MatchSpec.merge(s for s in specs_to_add)) self.specs_to_add_names = frozenset(_.name for _ in self.specs_to_add) self.specs_to_remove = frozenset(MatchSpec.merge(s for s in specs_to_remove)) self.neutered_specs = () self._command = command if unknown_subdirs := set(self.subdirs) - context.known_subdirs: raise ValueError(f"Unknown subdir(s):{dashlist(sorted(unknown_subdirs))}") self._repodata_fn = repodata_fn self._index = None self._r = None self._prepared = False self._pool_cache = {} def solve_for_transaction( self, update_modifier=NULL, deps_modifier=NULL, prune=NULL, ignore_pinned=NULL, force_remove=NULL, force_reinstall=NULL, should_retry_solve=False, ): """Gives an UnlinkLinkTransaction instance that can be used to execute the solution on an environment. Args: deps_modifier (DepsModifier): See :meth:`solve_final_state`. prune (bool): See :meth:`solve_final_state`. ignore_pinned (bool): See :meth:`solve_final_state`. force_remove (bool): See :meth:`solve_final_state`. force_reinstall (bool): See :meth:`solve_for_diff`. should_retry_solve (bool): See :meth:`solve_final_state`. Returns: UnlinkLinkTransaction: """ if self.prefix == context.root_prefix and context.enable_private_envs: # This path has the ability to generate a multi-prefix transaction. The basic logic # is in the commented out get_install_transaction() function below. Exercised at # the integration level in the PrivateEnvIntegrationTests in test_create.py. raise NotImplementedError() # run pre-solve processes here before solving for a solution context.plugin_manager.invoke_pre_solves( self.specs_to_add, self.specs_to_remove, ) unlink_precs, link_precs = self.solve_for_diff( update_modifier, deps_modifier, prune, ignore_pinned, force_remove, force_reinstall, should_retry_solve, ) # TODO: Only explicitly requested remove and update specs are being included in # History right now. Do we need to include other categories from the solve? # run post-solve processes here before performing the transaction context.plugin_manager.invoke_post_solves( self._repodata_fn, unlink_precs, link_precs, ) self._notify_conda_outdated(link_precs) return UnlinkLinkTransaction( PrefixSetup( self.prefix, unlink_precs, link_precs, self.specs_to_remove, self.specs_to_add, self.neutered_specs, ) ) def solve_for_diff( self, update_modifier=NULL, deps_modifier=NULL, prune=NULL, ignore_pinned=NULL, force_remove=NULL, force_reinstall=NULL, should_retry_solve=False, ) -> tuple[tuple[PackageRecord, ...], tuple[PackageRecord, ...]]: """Gives the package references to remove from an environment, followed by the package references to add to an environment. Args: deps_modifier (DepsModifier): See :meth:`solve_final_state`. prune (bool): See :meth:`solve_final_state`. ignore_pinned (bool): See :meth:`solve_final_state`. force_remove (bool): See :meth:`solve_final_state`. force_reinstall (bool): For requested specs_to_add that are already satisfied in the environment, instructs the solver to remove the package and spec from the environment, and then add it back--possibly with the exact package instance modified, depending on the spec exactness. should_retry_solve (bool): See :meth:`solve_final_state`. Returns: tuple[PackageRef], tuple[PackageRef]: A two-tuple of PackageRef sequences. The first is the group of packages to remove from the environment, in sorted dependency order from leaves to roots. The second is the group of packages to add to the environment, in sorted dependency order from roots to leaves. """ final_precs = self.solve_final_state( update_modifier, deps_modifier, prune, ignore_pinned, force_remove, should_retry_solve, ) unlink_precs, link_precs = diff_for_unlink_link_precs( self.prefix, final_precs, self.specs_to_add, force_reinstall ) # assert that all unlink_precs are manageable unmanageable = groupby(lambda prec: prec.is_unmanageable, unlink_precs).get( True ) if unmanageable: raise RuntimeError( f"Cannot unlink unmanageable packages:{dashlist(prec.record_id() for prec in unmanageable)}" ) return unlink_precs, link_precs def solve_final_state( self, update_modifier=NULL, deps_modifier=NULL, prune=NULL, ignore_pinned=NULL, force_remove=NULL, should_retry_solve=False, ): """Gives the final, solved state of the environment. Args: update_modifier (UpdateModifier): An optional flag directing how updates are handled regarding packages already existing in the environment. deps_modifier (DepsModifier): An optional flag indicating special solver handling for dependencies. The default solver behavior is to be as conservative as possible with dependency updates (in the case the dependency already exists in the environment), while still ensuring all dependencies are satisfied. Options include * NO_DEPS * ONLY_DEPS * UPDATE_DEPS * UPDATE_DEPS_ONLY_DEPS * FREEZE_INSTALLED prune (bool): If ``True``, the solution will not contain packages that were previously brought into the environment as dependencies but are no longer required as dependencies and are not user-requested. ignore_pinned (bool): If ``True``, the solution will ignore pinned package configuration for the prefix. force_remove (bool): Forces removal of a package without removing packages that depend on it. should_retry_solve (bool): Indicates whether this solve will be retried. This allows us to control whether to call find_conflicts (slow) in ssc.r.solve Returns: tuple[PackageRef]: In sorted dependency order from roots to leaves, the package references for the solved state of the environment. """ if prune and update_modifier == UpdateModifier.FREEZE_INSTALLED: update_modifier = NULL if update_modifier is NULL: update_modifier = context.update_modifier else: update_modifier = UpdateModifier(str(update_modifier).lower()) if deps_modifier is NULL: deps_modifier = context.deps_modifier else: deps_modifier = DepsModifier(str(deps_modifier).lower()) ignore_pinned = ( context.ignore_pinned if ignore_pinned is NULL else ignore_pinned ) force_remove = context.force_remove if force_remove is NULL else force_remove log.debug( "solving prefix %s\n specs_to_remove: %s\n specs_to_add: %s\n prune: %s", self.prefix, self.specs_to_remove, self.specs_to_add, prune, ) retrying = hasattr(self, "ssc") if not retrying: ssc = SolverStateContainer( self.prefix, update_modifier, deps_modifier, prune, ignore_pinned, force_remove, should_retry_solve, ) self.ssc = ssc else: ssc = self.ssc ssc.update_modifier = update_modifier ssc.deps_modifier = deps_modifier ssc.should_retry_solve = should_retry_solve # force_remove is a special case where we return early if self.specs_to_remove and force_remove: if self.specs_to_add: raise NotImplementedError() solution = tuple( prec for prec in ssc.solution_precs if not any(spec.match(prec) for spec in self.specs_to_remove) ) return IndexedSet(PrefixGraph(solution).graph) # Check if specs are satisfied by current environment. If they are, exit early. if ( update_modifier == UpdateModifier.SPECS_SATISFIED_SKIP_SOLVE and not self.specs_to_remove and not prune ): for spec in self.specs_to_add: if not next(ssc.prefix_data.query(spec), None): break else: # All specs match a package in the current environment. # Return early, with a solution that should just be PrefixData().iter_records() return IndexedSet(PrefixGraph(ssc.solution_precs).graph) if not ssc.r: with get_spinner(f"Collecting package metadata ({self._repodata_fn})"): ssc = self._collect_all_metadata(ssc) if should_retry_solve and update_modifier == UpdateModifier.FREEZE_INSTALLED: fail_message = ( "unsuccessful initial attempt using frozen solve. Retrying" " with flexible solve.\n" ) elif self._repodata_fn != REPODATA_FN: fail_message = ( f"unsuccessful attempt using repodata from {self._repodata_fn}, retrying" " with next repodata source.\n" ) else: fail_message = "failed\n" with get_spinner("Solving environment", fail_message=fail_message): ssc = self._remove_specs(ssc) ssc = self._add_specs(ssc) solution_precs = copy.copy(ssc.solution_precs) pre_packages = self.get_request_package_in_solution( ssc.solution_precs, ssc.specs_map ) ssc = self._find_inconsistent_packages(ssc) # this will prune precs that are deps of precs that get removed due to conflicts ssc = self._run_sat(ssc) post_packages = self.get_request_package_in_solution( ssc.solution_precs, ssc.specs_map ) if ssc.update_modifier == UpdateModifier.UPDATE_SPECS: constrained = self.get_constrained_packages( pre_packages, post_packages, ssc.index.keys() ) if len(constrained) > 0: for spec in constrained: self.determine_constricting_specs(spec, ssc.solution_precs) # if there were any conflicts, we need to add their orphaned deps back in if ssc.add_back_map: orphan_precs = ( set(solution_precs) - set(ssc.solution_precs) - set(ssc.add_back_map) ) solution_prec_names = [_.name for _ in ssc.solution_precs] ssc.solution_precs.extend( [ _ for _ in orphan_precs if _.name not in ssc.specs_map and _.name not in solution_prec_names ] ) ssc = self._post_sat_handling(ssc) time_recorder.log_totals() ssc.solution_precs = IndexedSet(PrefixGraph(ssc.solution_precs).graph) log.debug( "solved prefix %s\n solved_linked_dists:\n %s\n", self.prefix, "\n ".join(prec.dist_str() for prec in ssc.solution_precs), ) return ssc.solution_precs def determine_constricting_specs(self, spec, solution_precs): highest_version = [ VersionOrder(sp.version) for sp in solution_precs if sp.name == spec.name ][0] constricting = [] for prec in solution_precs: if any(j for j in prec.depends if spec.name in j): for dep in prec.depends: m_dep = MatchSpec(dep) if ( m_dep.name == spec.name and m_dep.version is not None and (m_dep.version.exact_value or "<" in m_dep.version.spec) ): if "," in m_dep.version.spec: constricting.extend( [ (prec.name, MatchSpec(f"{m_dep.name} {v}")) for v in m_dep.version.tup if "<" in v.spec ] ) else: constricting.append((prec.name, m_dep)) hard_constricting = [ i for i in constricting if i[1].version.matcher_vo <= highest_version ] if len(hard_constricting) == 0: return None print(f"\n\nUpdating {spec.name} is constricted by \n") for const in hard_constricting: print(f"{const[0]} -> requires {const[1]}") print( "\nIf you are sure you want an update of your package either try " "`conda update --all` or install a specific version of the " "package you want using `conda install <pkg>=<version>`\n" ) return hard_constricting def get_request_package_in_solution(self, solution_precs, specs_map): requested_packages = {} for pkg in self.specs_to_add: update_pkg_request = pkg.name requested_packages[update_pkg_request] = [ (i.name, str(i.version)) for i in solution_precs if i.name == update_pkg_request and i.version is not None ] requested_packages[update_pkg_request].extend( [ (v.name, str(v.version)) for k, v in specs_map.items() if k == update_pkg_request and v.version is not None ] ) return requested_packages def get_constrained_packages(self, pre_packages, post_packages, index_keys): update_constrained = set() def empty_package_list(pkg): for k, v in pkg.items(): if len(v) == 0: return True return False if empty_package_list(pre_packages) or empty_package_list(post_packages): return update_constrained for pkg in self.specs_to_add: if pkg.name.startswith("__"): # ignore virtual packages continue current_version = max(i[1] for i in pre_packages[pkg.name]) if current_version == max( i.version for i in index_keys if i.name == pkg.name ): continue else: if post_packages == pre_packages: update_constrained = update_constrained | {pkg} return update_constrained @time_recorder(module_name=__name__) def _collect_all_metadata(self, ssc): if ssc.prune: # When pruning DO NOT consider history of already installed packages when solving. prepared_specs = {*self.specs_to_remove, *self.specs_to_add} else: # add in historically-requested specs ssc.specs_map.update(ssc.specs_from_history_map) # these are things that we want to keep even if they're not explicitly specified. This # is to compensate for older installers not recording these appropriately for them # to be preserved. for pkg_name in ( "anaconda", "conda", "conda-build", "python.app", "console_shortcut", "powershell_shortcut", ): if pkg_name not in ssc.specs_map and ssc.prefix_data.get( pkg_name, None ): ssc.specs_map[pkg_name] = MatchSpec(pkg_name) # Add virtual packages so they are taken into account by the solver virtual_pkg_index = Index().system_packages virtual_pkgs = [p.name for p in virtual_pkg_index.keys()] for virtual_pkgs_name in virtual_pkgs: if virtual_pkgs_name not in ssc.specs_map: ssc.specs_map[virtual_pkgs_name] = MatchSpec(virtual_pkgs_name) for prec in ssc.prefix_data.iter_records(): # first check: add everything if we have no history to work with. # This happens with "update --all", for example. # # second check: add in aggressively updated packages # # third check: add in foreign stuff (e.g. from pip) into the specs # map. We add it so that it can be left alone more. This is a # declaration that it is manually installed, much like the # history map. It may still be replaced if it is in conflict, # but it is not just an indirect dep that can be pruned. if ( not ssc.specs_from_history_map or MatchSpec(prec.name) in context.aggressive_update_packages or prec.subdir == "pypi" ): ssc.specs_map.update({prec.name: MatchSpec(prec.name)}) prepared_specs = { *self.specs_to_remove, *self.specs_to_add, *ssc.specs_from_history_map.values(), } index, r = self._prepare(prepared_specs) ssc.set_repository_metadata(index, r) return ssc def _remove_specs(self, ssc): if self.specs_to_remove: # In a previous implementation, we invoked SAT here via `r.remove()` to help with # spec removal, and then later invoking SAT again via `r.solve()`. Rather than invoking # SAT for spec removal determination, we can use the PrefixGraph and simple tree # traversal if we're careful about how we handle features. We still invoke sat via # `r.solve()` later. _track_fts_specs = ( spec for spec in self.specs_to_remove if "track_features" in spec ) feature_names = set( chain.from_iterable( spec.get_raw_value("track_features") for spec in _track_fts_specs ) ) graph = PrefixGraph(ssc.solution_precs, ssc.specs_map.values()) all_removed_records = [] no_removed_records_specs = [] for spec in self.specs_to_remove: # If the spec was a track_features spec, then we need to also remove every # package with a feature that matches the track_feature. The # `graph.remove_spec()` method handles that for us. log.log(TRACE, "using PrefixGraph to remove records for %s", spec) removed_records = graph.remove_spec(spec) if removed_records: all_removed_records.extend(removed_records) else: no_removed_records_specs.append(spec) # ensure that each spec in specs_to_remove is actually associated with removed records unmatched_specs_to_remove = tuple( spec for spec in no_removed_records_specs if not any(spec.match(rec) for rec in all_removed_records) ) if unmatched_specs_to_remove: raise PackagesNotFoundError( tuple(sorted(str(s) for s in unmatched_specs_to_remove)) ) for rec in all_removed_records: # We keep specs (minus the feature part) for the non provides_features packages # if they're in the history specs. Otherwise, we pop them from the specs_map. rec_has_a_feature = set(rec.features or ()) & feature_names if rec_has_a_feature and rec.name in ssc.specs_from_history_map: spec = ssc.specs_map.get(rec.name, MatchSpec(rec.name)) spec._match_components = frozendict( { key: value for key, value in spec._match_components.items() if key != "features" } ) ssc.specs_map[spec.name] = spec else: ssc.specs_map.pop(rec.name, None) ssc.solution_precs = tuple(graph.graph) return ssc @time_recorder(module_name=__name__) def _find_inconsistent_packages(self, ssc): # We handle as best as possible environments in inconsistent states. To do this, # we remove now from consideration the set of packages causing inconsistencies, # and then we add them back in following the main SAT call. _, inconsistent_precs = ssc.r.bad_installed(ssc.solution_precs, ()) if inconsistent_precs: # It is possible that the package metadata is incorrect, for example when # un-patched metadata from the Miniconda or Anaconda installer is present, see: # https://github.com/conda/conda/issues/8076 # Update the metadata with information from the index and see if that makes the # environment consistent. ssc.solution_precs = tuple(ssc.index.get(k, k) for k in ssc.solution_precs) _, inconsistent_precs = ssc.r.bad_installed(ssc.solution_precs, ()) if log.isEnabledFor(DEBUG): log.debug( "inconsistent precs: %s", dashlist(inconsistent_precs) if inconsistent_precs else "None", ) if inconsistent_precs: print( dedent( """ The environment is inconsistent, please check the package plan carefully The following packages are causing the inconsistency:""" ), file=sys.stderr, ) print(dashlist(inconsistent_precs), file=sys.stderr) for prec in inconsistent_precs: # pop and save matching spec in specs_map spec = ssc.specs_map.pop(prec.name, None) ssc.add_back_map[prec.name] = (prec, spec) # let the package float. This is essential to keep the package's dependencies # in the solution ssc.specs_map[prec.name] = MatchSpec(prec.name, target=prec.dist_str()) # inconsistent environments should maintain the python version # unless explicitly requested by the user. This along with the logic in # _add_specs maintains the major.minor version if prec.name == "python" and spec: ssc.specs_map["python"] = spec ssc.solution_precs = tuple( prec for prec in ssc.solution_precs if prec not in inconsistent_precs ) return ssc def _package_has_updates(self, ssc, spec, installed_pool): installed_prec = installed_pool.get(spec.name) has_update = False if installed_prec: installed_prec = installed_prec[0] for prec in ssc.r.groups.get(spec.name, []): if prec.version > installed_prec.version: has_update = True break elif ( prec.version == installed_prec.version and prec.build_number > installed_prec.build_number ): has_update = True break # let conda determine the latest version by just adding a name spec return ( MatchSpec(spec.name, version=prec.version, build_number=prec.build_number) if has_update else spec ) def _should_freeze( self, ssc, target_prec, conflict_specs, explicit_pool, installed_pool ): # never, ever freeze anything if we have no history. if not ssc.specs_from_history_map: return False # never freeze if not in FREEZE_INSTALLED mode if ssc.update_modifier != UpdateModifier.FREEZE_INSTALLED: return False # if all package specs have overlapping package choices (satisfiable in at least one way) pkg_name = target_prec.name no_conflict = pkg_name not in conflict_specs and ( pkg_name not in explicit_pool or target_prec in explicit_pool[pkg_name] ) return no_conflict def _add_specs(self, ssc): # For the remaining specs in specs_map, add target to each spec. `target` is a reference # to the package currently existing in the environment. Setting target instructs the # solver to not disturb that package if it's not necessary. # If the spec.name is being modified by inclusion in specs_to_add, we don't set `target`, # since we *want* the solver to modify/update that package. # # TLDR: when working with MatchSpec objects, # - to minimize the version change, set MatchSpec(name=name, target=prec.dist_str()) # - to freeze the package, set all the components of MatchSpec individually installed_pool = groupby(lambda x: x.name, ssc.prefix_data.iter_records()) # the only things we should consider freezing are things that don't conflict with the new # specs being added. explicit_pool = ssc.r._get_package_pool(self.specs_to_add) if ssc.prune: # Ignore installed specs on prune. installed_specs = () else: installed_specs = [ record.to_match_spec() for record in ssc.prefix_data.iter_records() ] conflict_specs = ( ssc.r.get_conflicting_specs(installed_specs, self.specs_to_add) or tuple() ) conflict_specs = {spec.name for spec in conflict_specs} for pkg_name, spec in ssc.specs_map.items(): matches_for_spec = tuple( prec for prec in ssc.solution_precs if spec.match(prec) ) if matches_for_spec: if len(matches_for_spec) != 1: raise CondaError( dals( """ Conda encountered an error with your environment. Please report an issue at https://github.com/conda/conda/issues. In your report, please include the output of 'conda info' and 'conda list' for the active environment, along with the command you invoked that resulted in this error. pkg_name: %s spec: %s matches_for_spec: %s """ ) % ( pkg_name, spec, dashlist((str(s) for s in matches_for_spec), indent=4), ) ) target_prec = matches_for_spec[0] if target_prec.is_unmanageable: ssc.specs_map[pkg_name] = target_prec.to_match_spec() elif MatchSpec(pkg_name) in context.aggressive_update_packages: ssc.specs_map[pkg_name] = MatchSpec(pkg_name) elif self._should_freeze( ssc, target_prec, conflict_specs, explicit_pool, installed_pool ): ssc.specs_map[pkg_name] = target_prec.to_match_spec() elif pkg_name in ssc.specs_from_history_map: ssc.specs_map[pkg_name] = MatchSpec( ssc.specs_from_history_map[pkg_name], target=target_prec.dist_str(), ) else: ssc.specs_map[pkg_name] = MatchSpec( pkg_name, target=target_prec.dist_str() ) pin_overrides = set() for s in ssc.pinned_specs: if s.name in explicit_pool: if s.name not in self.specs_to_add_names and not ssc.ignore_pinned: ssc.specs_map[s.name] = MatchSpec(s, optional=False) elif explicit_pool[s.name] & ssc.r._get_package_pool([s]).get( s.name, set() ): ssc.specs_map[s.name] = MatchSpec(s, optional=False) pin_overrides.add(s.name) else: log.warning( "pinned spec %s conflicts with explicit specs. " "Overriding pinned spec.", s, ) # we want to freeze any packages in the env that are not conflicts, so that the # solve goes faster. This is kind of like an iterative solve, except rather # than just providing a starting place, we are preventing some solutions. # A true iterative solve would probably be better in terms of reaching the # optimal output all the time. It would probably also get rid of the need # to retry with an unfrozen (UPDATE_SPECS) solve. if ssc.update_modifier == UpdateModifier.FREEZE_INSTALLED: precs = [ _ for _ in ssc.prefix_data.iter_records() if _.name not in ssc.specs_map ] for prec in precs: if prec.name not in conflict_specs: ssc.specs_map[prec.name] = prec.to_match_spec() else: ssc.specs_map[prec.name] = MatchSpec( prec.name, target=prec.to_match_spec(), optional=True ) log.debug("specs_map with targets: %s", ssc.specs_map) # If we're in UPDATE_ALL mode, we need to drop all the constraints attached to specs, # so they can all float and the solver can find the most up-to-date solution. In the case # of UPDATE_ALL, `specs_map` wasn't initialized with packages from the current environment, # but *only* historically-requested specs. This lets UPDATE_ALL drop dependencies if # they're no longer needed, and their presence would otherwise prevent the updated solution # the user most likely wants. if ssc.update_modifier == UpdateModifier.UPDATE_ALL: # history is preferable because it has explicitly installed stuff in it. # that simplifies our solution. if ssc.specs_from_history_map: ssc.specs_map = dict( (spec, MatchSpec(spec)) if MatchSpec(spec).name not in (_.name for _ in ssc.pinned_specs) else (MatchSpec(spec).name, ssc.specs_map[MatchSpec(spec).name]) for spec in ssc.specs_from_history_map ) for prec in ssc.prefix_data.iter_records(): # treat pip-installed stuff as explicitly installed, too. if prec.subdir == "pypi": ssc.specs_map.update({prec.name: MatchSpec(prec.name)}) else: ssc.specs_map = { prec.name: ( MatchSpec(prec.name) if prec.name not in (_.name for _ in ssc.pinned_specs) else ssc.specs_map[prec.name] ) for prec in ssc.prefix_data.iter_records() } # ensure that our self.specs_to_add are not being held back by packages in the env. # This factors in pins and also ignores specs from the history. It is unfreezing only # for the indirect specs that otherwise conflict with update of the immediate request elif ssc.update_modifier == UpdateModifier.UPDATE_SPECS: skip = lambda x: ( ( x.name not in pin_overrides and any(x.name == _.name for _ in ssc.pinned_specs) and not ssc.ignore_pinned ) or x.name in ssc.specs_from_history_map ) specs_to_add = tuple( self._package_has_updates(ssc, _, installed_pool) for _ in self.specs_to_add if not skip(_) ) # the index is sorted, so the first record here gives us what we want. conflicts = ssc.r.get_conflicting_specs( tuple(MatchSpec(_) for _ in ssc.specs_map.values()), specs_to_add ) for conflict in conflicts or (): # neuter the spec due to a conflict if ( conflict.name in ssc.specs_map and ( # add optional because any pinned specs will include it MatchSpec(conflict, optional=True) not in ssc.pinned_specs or ssc.ignore_pinned ) and conflict.name not in ssc.specs_from_history_map ): ssc.specs_map[conflict.name] = MatchSpec(conflict.name) # As a business rule, we never want to update python beyond the current minor version, # unless that's requested explicitly by the user (which we actively discourage). py_in_prefix = any(_.name == "python" for _ in ssc.solution_precs) py_requested_explicitly = any(s.name == "python" for s in self.specs_to_add) if py_in_prefix and not py_requested_explicitly: python_prefix_rec = ssc.prefix_data.get("python") freeze_installed = ssc.update_modifier == UpdateModifier.FREEZE_INSTALLED if "python" not in conflict_specs and freeze_installed: ssc.specs_map["python"] = python_prefix_rec.to_match_spec() else: # will our prefix record conflict with any explicit spec? If so, don't add # anything here - let python float when it hasn't been explicitly specified python_spec = ssc.specs_map.get("python", MatchSpec("python")) if not python_spec.get("version"): pinned_version = ( get_major_minor_version(python_prefix_rec.version) + ".*" ) python_spec = MatchSpec(python_spec, version=pinned_version) spec_set = (python_spec,) + tuple(self.specs_to_add) if ssc.r.get_conflicting_specs(spec_set, self.specs_to_add): if self._command != "install" or ( self._repodata_fn == REPODATA_FN and (not ssc.should_retry_solve or not freeze_installed) ): # raises a hopefully helpful error message ssc.r.find_conflicts(spec_set) else: raise UnsatisfiableError({}) ssc.specs_map["python"] = python_spec # For the aggressive_update_packages configuration parameter, we strip any target # that's been set. if not context.offline: for spec in context.aggressive_update_packages: if spec.name in ssc.specs_map: ssc.specs_map[spec.name] = spec # add in explicitly requested specs from specs_to_add # this overrides any name-matching spec already in the spec map ssc.specs_map.update( (s.name, s) for s in self.specs_to_add if s.name not in pin_overrides ) # As a business rule, we never want to downgrade conda below the current version, # unless that's requested explicitly by the user (which we actively discourage). if "conda" in ssc.specs_map and paths_equal(self.prefix, context.conda_prefix): conda_prefix_rec = ssc.prefix_data.get("conda") if conda_prefix_rec: version_req = f">={conda_prefix_rec.version}" conda_requested_explicitly = any( s.name == "conda" for s in self.specs_to_add ) conda_spec = ssc.specs_map["conda"] conda_in_specs_to_add_version = ssc.specs_map.get("conda", {}).get( "version" ) if not conda_in_specs_to_add_version: conda_spec = MatchSpec(conda_spec, version=version_req) if context.auto_update_conda and not conda_requested_explicitly: conda_spec = MatchSpec("conda", version=version_req, target=None) ssc.specs_map["conda"] = conda_spec return ssc @time_recorder(module_name=__name__) def _run_sat(self, ssc): final_environment_specs = IndexedSet( ( *ssc.specs_map.values(), *ssc.track_features_specs, # pinned specs removed here - added to specs_map in _add_specs instead ) ) absent_specs = [s for s in ssc.specs_map.values() if not ssc.r.find_matches(s)] if absent_specs: raise PackagesNotFoundError(absent_specs) # We've previously checked `solution` for consistency (which at that point was the # pre-solve state of the environment). Now we check our compiled set of # `final_environment_specs` for the possibility of a solution. If there are conflicts, # we can often avoid them by neutering specs that have a target (e.g. removing version # constraint) and also making them optional. The result here will be less cases of # `UnsatisfiableError` handed to users, at the cost of more packages being modified # or removed from the environment. # # get_conflicting_specs() returns a "minimal unsatisfiable subset" which # may not be the only unsatisfiable subset. We may have to call get_conflicting_specs() # several times, each time making modifications to loosen constraints. conflicting_specs = set( ssc.r.get_conflicting_specs( tuple(final_environment_specs), self.specs_to_add ) or [] ) while conflicting_specs: specs_modified = False if log.isEnabledFor(DEBUG): log.debug( "conflicting specs: %s", dashlist(s.target or s for s in conflicting_specs), ) # Are all conflicting specs in specs_map? If not, that means they're in # track_features_specs or pinned_specs, which we should raise an error on. specs_map_set = set(ssc.specs_map.values()) grouped_specs = groupby(lambda s: s in specs_map_set, conflicting_specs) # force optional to true. This is what it is originally in # pinned_specs, but we override that in _add_specs to make it # non-optional when there's a name match in the explicit package # pool conflicting_pinned_specs = groupby( lambda s: MatchSpec(s, optional=True) in ssc.pinned_specs, conflicting_specs, ) if conflicting_pinned_specs.get(True): in_specs_map = grouped_specs.get(True, ()) pinned_conflicts = conflicting_pinned_specs.get(True, ()) in_specs_map_or_specs_to_add = ( set(in_specs_map) | set(self.specs_to_add) ) - set(pinned_conflicts) raise SpecsConfigurationConflictError( sorted(s.__str__() for s in in_specs_map_or_specs_to_add), sorted(s.__str__() for s in {s for s in pinned_conflicts}), self.prefix, ) for spec in conflicting_specs: if spec.target and not spec.optional: specs_modified = True final_environment_specs.remove(spec) if spec.get("version"): neutered_spec = MatchSpec(spec.name, version=spec.version) else: neutered_spec = MatchSpec(spec.name) final_environment_specs.add(neutered_spec) ssc.specs_map[spec.name] = neutered_spec if specs_modified: conflicting_specs = set( ssc.r.get_conflicting_specs( tuple(final_environment_specs), self.specs_to_add ) ) else: # Let r.solve() use r.find_conflicts() to report conflict chains. break # Finally! We get to call SAT. if log.isEnabledFor(DEBUG): log.debug( "final specs to add: %s", dashlist(sorted(str(s) for s in final_environment_specs)), ) # this will raise for unsatisfiable stuff. We can if not conflicting_specs or context.unsatisfiable_hints: ssc.solution_precs = ssc.r.solve( tuple(final_environment_specs), specs_to_add=self.specs_to_add, history_specs=ssc.specs_from_history_map, should_retry_solve=ssc.should_retry_solve, ) else: # shortcut to raise an unsat error without needing another solve step when # unsatisfiable_hints is off raise UnsatisfiableError({}) self.neutered_specs = tuple( v for k, v in ssc.specs_map.items() if k in ssc.specs_from_history_map and v.strictness < ssc.specs_from_history_map[k].strictness ) # add back inconsistent packages to solution if ssc.add_back_map: for name, (prec, spec) in ssc.add_back_map.items(): # spec here will only be set if the conflicting prec was in the original specs_map # if it isn't there, then we restore the conflict. If it is there, though, # we keep the new, consistent solution if not spec: # filter out solution precs and reinsert the conflict. Any resolution # of the conflict should be explicit (i.e. it must be in ssc.specs_map) ssc.solution_precs = [ _ for _ in ssc.solution_precs if _.name != name ] ssc.solution_precs.append(prec) final_environment_specs.add(spec) ssc.final_environment_specs = final_environment_specs return ssc def _post_sat_handling(self, ssc): # Special case handling for various DepsModifier flags. final_environment_specs = ssc.final_environment_specs if ssc.deps_modifier == DepsModifier.NO_DEPS: # In the NO_DEPS case, we need to start with the original list of packages in the # environment, and then only modify packages that match specs_to_add or # specs_to_remove. # # Help information notes that use of NO_DEPS is expected to lead to broken # environments. _no_deps_solution = IndexedSet(ssc.prefix_data.iter_records()) only_remove_these = { prec for spec in self.specs_to_remove for prec in _no_deps_solution if spec.match(prec) } _no_deps_solution -= only_remove_these only_add_these = { prec for spec in self.specs_to_add for prec in ssc.solution_precs if spec.match(prec) } remove_before_adding_back = {prec.name for prec in only_add_these} _no_deps_solution = IndexedSet( prec for prec in _no_deps_solution if prec.name not in remove_before_adding_back ) _no_deps_solution |= only_add_these ssc.solution_precs = _no_deps_solution # TODO: check if solution is satisfiable, and emit warning if it's not elif ( ssc.deps_modifier == DepsModifier.ONLY_DEPS and ssc.update_modifier != UpdateModifier.UPDATE_DEPS ): # Using a special instance of PrefixGraph to remove youngest child nodes that match # the original specs_to_add. It's important to remove only the *youngest* child nodes, # because a typical use might be `conda install --only-deps python=2 flask`, and in # that case we'd want to keep python. # # What are we supposed to do if flask was already in the environment? # We can't be removing stuff here that's already in the environment. # # What should be recorded for the user-requested specs in this case? Probably all # direct dependencies of flask. graph = PrefixGraph(ssc.solution_precs, self.specs_to_add) removed_nodes = graph.remove_youngest_descendant_nodes_with_specs() self.specs_to_add = set(self.specs_to_add) for prec in removed_nodes: for dep in prec.depends: dep = MatchSpec(dep) if dep.name not in ssc.specs_map: self.specs_to_add.add(dep) # unfreeze self.specs_to_add = frozenset(self.specs_to_add) # Add back packages that are already in the prefix. specs_to_remove_names = {spec.name for spec in self.specs_to_remove} add_back = tuple( ssc.prefix_data.get(node.name, None) for node in removed_nodes if node.name not in specs_to_remove_names ) ssc.solution_precs = tuple( PrefixGraph((*graph.graph, *filter(None, add_back))).graph ) # TODO: check if solution is satisfiable, and emit warning if it's not elif ssc.update_modifier == UpdateModifier.UPDATE_DEPS: # Here we have to SAT solve again :( It's only now that we know the dependency # chain of specs_to_add. # # UPDATE_DEPS is effectively making each spec in the dependency chain a user-requested # spec. We don't modify pinned_specs, track_features_specs, or specs_to_add. For # all other specs, we drop all information but name, drop target, and add them to # the specs_to_add that gets recorded in the history file. # # It's like UPDATE_ALL, but only for certain dependency chains. graph = PrefixGraph(ssc.solution_precs) update_names = set() for spec in self.specs_to_add: node = graph.get_node_by_name(spec.name) update_names.update( ancest_rec.name for ancest_rec in graph.all_ancestors(node) ) specs_map = {name: MatchSpec(name) for name in update_names} # Remove pinned_specs and any python spec (due to major-minor pinning business rule). # Add in the original specs_to_add on top. for spec in ssc.pinned_specs: specs_map.pop(spec.name, None) if "python" in specs_map: python_rec = ssc.prefix_data.get("python") py_ver = ".".join(python_rec.version.split(".")[:2]) + ".*" specs_map["python"] = MatchSpec(name="python", version=py_ver) specs_map.update({spec.name: spec for spec in self.specs_to_add}) new_specs_to_add = tuple(specs_map.values()) # It feels wrong/unsafe to modify this instance, but I guess let's go with it for now. self.specs_to_add = new_specs_to_add ssc.solution_precs = self.solve_final_state( update_modifier=UpdateModifier.UPDATE_SPECS, deps_modifier=ssc.deps_modifier, prune=ssc.prune, ignore_pinned=ssc.ignore_pinned, force_remove=ssc.force_remove, ) ssc.prune = False if ssc.prune: graph = PrefixGraph(ssc.solution_precs, final_environment_specs) graph.prune() ssc.solution_precs = tuple(graph.graph) return ssc def _notify_conda_outdated(self, link_precs): if not context.notify_outdated_conda or context.quiet: return current_conda_prefix_rec = PrefixData(context.conda_prefix).get("conda", None) if current_conda_prefix_rec: channel_name = current_conda_prefix_rec.channel.canonical_name if channel_name == UNKNOWN_CHANNEL: channel_name = "defaults" # only look for a newer conda in the channel conda is currently installed from conda_newer_spec = MatchSpec(f"{channel_name}::conda>{CONDA_VERSION}") if paths_equal(self.prefix, context.conda_prefix): if any(conda_newer_spec.match(prec) for prec in link_precs): return conda_newer_precs = sorted( SubdirData.query_all( conda_newer_spec, self.channels, self.subdirs, repodata_fn=self._repodata_fn, ), key=lambda x: VersionOrder(x.version), # VersionOrder is fine here rather than r.version_key because all precs # should come from the same channel ) if conda_newer_precs: latest_version = conda_newer_precs[-1].version # If conda comes from defaults, ensure we're giving instructions to users # that should resolve release timing issues between defaults and conda-forge. print( dedent( f""" ==> WARNING: A newer version of conda exists. <== current version: {CONDA_VERSION} latest version: {latest_version} Please update conda by running $ conda update -n base -c {channel_name} conda Or to minimize the number of packages updated during conda update use conda install conda={latest_version} """ ), file=sys.stderr, ) def _prepare(self, prepared_specs) -> tuple[ReducedIndex, Resolve]: # All of this _prepare() method is hidden away down here. Someday we may want to further # abstract away the use of `index` or the Resolve object. if self._prepared and prepared_specs == self._prepared_specs: return self._index, self._r if hasattr(self, "_index") and self._index: # added in install_actions for conda-build back-compat self._prepared_specs = prepared_specs self._r = Resolve(self._index, channels=self.channels) else: # add in required channels that aren't explicitly given in the channels list # For correctness, we should probably add to additional_channels any channel that # is given by PrefixData(self.prefix).all_subdir_urls(). However that causes # usability problems with bad / expired tokens. additional_channels = set() for spec in self.specs_to_add: # TODO: correct handling for subdir isn't yet done channel = spec.get_exact_value("channel") if channel: additional_channels.add(Channel(channel)) self.channels.update(additional_channels) self._prepared_specs = prepared_specs self._index = reduced_index = ReducedIndex( prepared_specs, channels=self.channels, prepend=False, subdirs=self.subdirs, use_local=False, use_cache=False, prefix=self.prefix, repodata_fn=self._repodata_fn, use_system=True, ) self._r = Resolve(reduced_index, channels=self.channels) self._prepared = True return self._index, self._r
Solver
python
boto__boto3
tests/functional/test_crt.py
{ "start": 2017, "end": 5306 }
class ____: @requires_crt() @MockOptimizedInstance() def test_create_transfer_manager_on_optimized_instance(self): client = create_mock_client() config = TransferConfig() transfer_manager = create_transfer_manager(client, config) assert isinstance(transfer_manager, CRTTransferManager) @requires_crt() def test_create_transfer_manager_with_crt_preferred(self): client = create_mock_client() config = TransferConfig( preferred_transfer_client='crt', ) transfer_manager = create_transfer_manager(client, config) assert isinstance(transfer_manager, CRTTransferManager) @mock.patch("boto3.s3.transfer.HAS_CRT", False) def test_create_transfer_manager_with_crt_preferred_no_crt(self): client = create_mock_client() config = TransferConfig( preferred_transfer_client='crt', ) with pytest.raises(MissingDependencyException) as exc: create_transfer_manager(client, config) assert "missing minimum CRT" in str(exc.value) @requires_crt() @mock.patch("awscrt.__version__", "0.19.0") def test_create_transfer_manager_with_crt_preferred_bad_version(self): client = create_mock_client() config = TransferConfig( preferred_transfer_client='crt', ) with pytest.raises(MissingDependencyException) as exc: create_transfer_manager(client, config) assert "missing minimum CRT" in str(exc.value) assert "with version: 0.19.0" in str(exc.value) @requires_crt() def test_minimum_crt_version(self): assert has_minimum_crt_version((0, 16, 12)) is True @pytest.mark.parametrize( "bad_version", ( None, "0.1.0-dev", "0.20", object(), ), ) @requires_crt() def test_minimum_crt_version_bad_crt_version(self, bad_version): with mock.patch("awscrt.__version__") as vers: vers.return_value = bad_version assert has_minimum_crt_version((0, 16, 12)) is False @requires_crt() def test_crt_transfer_manager_raises_with_invalid_crt_config(self): client = create_mock_client() config = TransferConfig( preferred_transfer_client='crt', # `max_bandwidth` is not an allowed CRT config option. max_bandwidth=1024, ) with pytest.raises(InvalidCrtTransferConfigError) as exc: create_transfer_manager(client, config) assert "transfer config options are invalid" in str(exc.value) assert "max_bandwidth" in str(exc.value) @requires_crt() @MockOptimizedInstance() def test_auto_transfer_manager_succeeds_with_invalid_crt_config(self): client = create_mock_client() config = TransferConfig( preferred_transfer_client='auto', # `max_bandwidth` is not an allowed CRT config option. # But config should only be validated when # `preferred_transfer_client` == `crt`. max_bandwidth=1024, ) transfer_manager = create_transfer_manager(client, config) assert isinstance(transfer_manager, CRTTransferManager)
TestS3TransferWithCRT
python
sympy__sympy
sympy/vector/vector.py
{ "start": 14856, "end": 15412 }
class ____(BasisDependentMul, Vector): """ Class to denote products of scalars and BaseVectors. """ def __new__(cls, *args, **options): obj = BasisDependentMul.__new__(cls, *args, **options) return obj @property def base_vector(self): """ The BaseVector involved in the product. """ return self._base_instance @property def measure_number(self): """ The scalar expression involved in the definition of this VectorMul. """ return self._measure_number
VectorMul
python
bokeh__bokeh
src/bokeh/models/graphs.py
{ "start": 3666, "end": 3953 }
class ____(GraphCoordinates): ''' Node coordinate expression obtained from ``LayoutProvider`` ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) @abstract
EdgeCoordinates
python
scipy__scipy
scipy/optimize/tests/test_linprog.py
{ "start": 87308, "end": 90533 }
class ____: method = "interior-point" # the following tests don't need to be performed separately for # sparse presolve, sparse after presolve, and dense def test_solver_select(self): # check that default solver is selected as expected if has_cholmod: options = {'sparse': True, 'cholesky': True} elif has_umfpack: options = {'sparse': True, 'cholesky': False} else: options = {'sparse': True, 'cholesky': False, 'sym_pos': False} A, b, c = lpgen_2d(20, 20) res1 = linprog(c, A_ub=A, b_ub=b, method=self.method, options=options) res2 = linprog(c, A_ub=A, b_ub=b, method=self.method) # default solver assert_allclose(res1.fun, res2.fun, err_msg="linprog default solver unexpected result", rtol=2e-15, atol=1e-15) def test_unbounded_below_no_presolve_original(self): # formerly caused segfault in TravisCI w/ "cholesky":True c = [-1] bounds = [(None, 1)] res = linprog(c=c, bounds=bounds, method=self.method, options={"presolve": False, "cholesky": True}) _assert_success(res, desired_fun=-1) def test_cholesky(self): # use cholesky factorization and triangular solves A, b, c = lpgen_2d(20, 20) res = linprog(c, A_ub=A, b_ub=b, method=self.method, options={"cholesky": True}) # only for dense _assert_success(res, desired_fun=-63.47967608020187) # method='highs' solution def test_alternate_initial_point(self): # use "improved" initial point A, b, c = lpgen_2d(20, 20) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "scipy.linalg.solve\nIll...", RuntimeWarning) warnings.filterwarnings( "ignore", "Solving system with option...", OptimizeWarning) warnings.filterwarnings( "ignore", "Ill-conditioned matrix...", LinAlgWarning) warnings.filterwarnings( "ignore", "An ill-conditioned...", LinAlgWarning) res = linprog(c, A_ub=A, b_ub=b, method=self.method, options={"ip": True, "disp": True}) # ip code is independent of sparse/dense _assert_success(res, desired_fun=-63.47967608020187) # method='highs' solution def test_bug_8664(self): # interior-point has trouble with this when presolve is off c = [4] A_ub = [[2], [5]] b_ub = [4, 4] A_eq = [[0], [-8], [9]] b_eq = [3, 2, 10] with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) warnings.filterwarnings( "ignore", "Solving system with option...", OptimizeWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options={"presolve": False}) assert_(not res.success, "Incorrectly reported success") ######################################## # Revised Simplex Option-Specific Tests# ########################################
TestLinprogIPSpecific
python
encode__django-rest-framework
tests/test_response.py
{ "start": 8436, "end": 8858 }
class ____(TestCase): """ Tests that covers #122. """ def test_only_html_renderer(self): """ Test if no infinite recursion occurs. """ self.client.get('/html') def test_html_renderer_is_first(self): """ Test if no infinite recursion occurs. """ self.client.get('/html1') @override_settings(ROOT_URLCONF='tests.test_response')
Issue122Tests
python
huggingface__transformers
src/transformers/pipelines/text_generation.py
{ "start": 541, "end": 24901 }
class ____(Pipeline): """ Language generation pipeline using any `ModelWithLMHead` or `ModelForCausalLM`. This pipeline predicts the words that will follow a specified text prompt. When the underlying model is a conversational model, it can also accept one or more chats, in which case the pipeline will operate in chat mode and will continue the chat(s) by adding its response(s). Each chat takes the form of a list of dicts, where each dict contains "role" and "content" keys. Unless the model you're using explicitly sets these generation parameters in its configuration files (`generation_config.json`), the following default values will be used: - max_new_tokens: 256 - do_sample: True - temperature: 0.7 Examples: ```python >>> from transformers import pipeline >>> generator = pipeline(model="openai-community/gpt2") >>> generator("I can't believe you did such a ", do_sample=False) [{'generated_text': "I can't believe you did such a icky thing to me. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I"}] >>> # These parameters will return suggestions, and only the newly created text making it easier for prompting suggestions. >>> outputs = generator("My tart needs some", num_return_sequences=4, return_full_text=False) ``` ```python >>> from transformers import pipeline >>> generator = pipeline(model="HuggingFaceH4/zephyr-7b-beta") >>> # Zephyr-beta is a conversational model, so let's pass it a chat instead of a single string >>> generator([{"role": "user", "content": "What is the capital of France? Answer in one word."}], do_sample=False, max_new_tokens=2) [{'generated_text': [{'role': 'user', 'content': 'What is the capital of France? Answer in one word.'}, {'role': 'assistant', 'content': 'Paris'}]}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about text generation parameters in [Text generation strategies](../generation_strategies) and [Text generation](text_generation). This language generation pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"text-generation"`. The models that this pipeline can use are models that have been trained with an autoregressive language modeling objective. See the list of available [text completion models](https://huggingface.co/models?filter=text-generation) and the list of [conversational models](https://huggingface.co/models?other=conversational) on [huggingface.co/models]. """ # Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia # in https://github.com/rusiaaman/XLNet-gen#methodology # and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e XL_PREFIX = """ In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> """ _pipeline_calls_generate = True _load_processor = False _load_image_processor = False _load_feature_extractor = False _load_tokenizer = True # Make sure the docstring is updated when the default generation config is changed _default_generation_config = GenerationConfig( max_new_tokens=256, do_sample=True, # free-form text generation often uses sampling temperature=0.7, ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. prefix = None if self.prefix is not None: prefix = self.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. prefix = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. preprocess_params, forward_params, _ = self._sanitize_parameters(prefix=prefix, **self._forward_params) self._preprocess_params = {**self._preprocess_params, **preprocess_params} self._forward_params = {**self._forward_params, **forward_params} def _sanitize_parameters( self, return_full_text=None, return_tensors=None, return_text=None, return_type=None, clean_up_tokenization_spaces=None, prefix=None, handle_long_generation=None, stop_sequence=None, truncation=None, max_length=None, continue_final_message=None, skip_special_tokens=None, tokenizer_encode_kwargs=None, tools=None, documents=None, **generate_kwargs, ): # preprocess kwargs preprocess_params = {} add_special_tokens = False if "add_special_tokens" in generate_kwargs: add_special_tokens = preprocess_params["add_special_tokens"] = generate_kwargs.pop("add_special_tokens") if "padding" in generate_kwargs: preprocess_params["padding"] = generate_kwargs.pop("padding") if truncation is not None: preprocess_params["truncation"] = truncation if max_length is not None: preprocess_params["max_length"] = max_length generate_kwargs["max_length"] = max_length if tools is not None: preprocess_params["tools"] = tools if documents is not None: preprocess_params["documents"] = documents if prefix is not None: preprocess_params["prefix"] = prefix if prefix: prefix_inputs = self.tokenizer( prefix, padding=False, add_special_tokens=add_special_tokens, return_tensors="pt" ) generate_kwargs["prefix_length"] = prefix_inputs["input_ids"].shape[-1] if handle_long_generation is not None: if handle_long_generation != "hole": raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" " [None, 'hole']" ) preprocess_params["handle_long_generation"] = handle_long_generation if continue_final_message is not None: preprocess_params["continue_final_message"] = continue_final_message if tokenizer_encode_kwargs is not None: preprocess_params["tokenizer_encode_kwargs"] = tokenizer_encode_kwargs preprocess_params.update(generate_kwargs) # forward kwargs if stop_sequence is not None: stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False) generate_kwargs["eos_token_id"] = stop_sequence_ids forward_params = generate_kwargs if self.assistant_model is not None: forward_params["assistant_model"] = self.assistant_model if self.assistant_tokenizer is not None: forward_params["tokenizer"] = self.tokenizer forward_params["assistant_tokenizer"] = self.assistant_tokenizer # postprocess kwargs postprocess_params = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_full_text`") if return_tensors is not None: raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`") return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_tensors`") return_type = ReturnType.TENSORS if return_type is not None: postprocess_params["return_type"] = return_type if clean_up_tokenization_spaces is not None: postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces if continue_final_message is not None: postprocess_params["continue_final_message"] = continue_final_message if skip_special_tokens is not None: postprocess_params["skip_special_tokens"] = skip_special_tokens return preprocess_params, forward_params, postprocess_params # overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments def _parse_and_tokenize(self, *args, **kwargs): """ Parse arguments and tokenize """ # Parse arguments if self.model.__class__.__name__ == "TransfoXLLMHeadModel": kwargs.update({"add_space_before_punct_symbol": True}) return super()._parse_and_tokenize(*args, **kwargs) @overload def __call__(self, text_inputs: str, **kwargs: Any) -> list[dict[str, str]]: ... @overload def __call__(self, text_inputs: list[str], **kwargs: Any) -> list[list[dict[str, str]]]: ... @overload def __call__(self, text_inputs: ChatType, **kwargs: Any) -> list[dict[str, ChatType]]: ... @overload def __call__(self, text_inputs: list[ChatType], **kwargs: Any) -> list[list[dict[str, ChatType]]]: ... def __call__(self, text_inputs, **kwargs): """ Complete the prompt(s) given as inputs. Args: text_inputs (`str`, `list[str]`, `ChatType`, or `list[ChatType]`): One or several prompts (or one list of prompts) to complete. If strings or a list of string are passed, this pipeline will continue each prompt. Alternatively, a "chat", in the form of a list of dicts with "role" and "content" keys, can be passed, or a list of such chats. When chats are passed, the model's chat template will be used to format them before passing them to the model. return_tensors (`bool`, *optional*, defaults to `False`): Returns the tensors of predictions (as token indices) in the outputs. If set to `True`, the decoded text is not returned. return_text (`bool`, *optional*): Returns the decoded texts in the outputs. return_full_text (`bool`, *optional*, defaults to `True`): If set to `False` only added text is returned, otherwise the full text is returned. Cannot be specified at the same time as `return_text`. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to clean up the potential extra spaces in the text output. continue_final_message( `bool`, *optional*): This indicates that you want the model to continue the last message in the input chat rather than starting a new one, allowing you to "prefill" its response. By default this is `True` when the final message in the input chat has the `assistant` role and `False` otherwise, but you can manually override that behaviour by setting this flag. prefix (`str`, *optional*): Prefix added to prompt. handle_long_generation (`str`, *optional*): By default, this pipelines does not handle long generation (ones that exceed in one form or the other the model maximum length). There is no perfect way to address this (more info :https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227). This provides common strategies to work around that problem depending on your use case. - `None` : default strategy where nothing in particular happens - `"hole"`: Truncates left of input, and leaves a gap wide enough to let generation happen (might truncate a lot of the prompt and not suitable when generation exceed the model capacity) tokenizer_encode_kwargs (`dict`, *optional*): Additional keyword arguments to pass along to the encoding step of the tokenizer. If the text input is a chat, it is passed to `apply_chat_template`. Otherwise, it is passed to `__call__`. generate_kwargs (`dict`, *optional*): Additional keyword arguments to pass along to the generate method of the model (see the generate method [here](./text_generation)). Return: A list or a list of lists of `dict`: Returns one of the following dictionaries (cannot return a combination of both `generated_text` and `generated_token_ids`): - **generated_text** (`str`, present when `return_text=True`) -- The generated text. - **generated_token_ids** (`torch.Tensor`, present when `return_tensors=True`) -- The token ids of the generated text. """ return super().__call__(text_inputs, **kwargs) def preprocess( self, prompt_text, prefix="", handle_long_generation=None, add_special_tokens=None, truncation=None, padding=None, max_length=None, continue_final_message=None, tokenizer_encode_kwargs=None, tools=None, documents=None, **generate_kwargs, ): # Only set non-None tokenizer kwargs, so as to rely on the tokenizer's defaults tokenizer_kwargs = { "add_special_tokens": add_special_tokens, "truncation": truncation, "padding": padding, "max_length": max_length, # NOTE: `max_length` is also a `generate` arg. Use `tokenizer_encode_kwargs` to avoid a name clash } tokenizer_kwargs = {key: value for key, value in tokenizer_kwargs.items() if value is not None} tokenizer_kwargs.update(tokenizer_encode_kwargs or {}) if isinstance(prompt_text, Chat): tokenizer_kwargs.pop("add_special_tokens", None) # ignore add_special_tokens on chats # If the user passes a chat that ends in an assistant message, we treat it as a prefill by default # because very few models support multiple separate, consecutive assistant messages if continue_final_message is None: continue_final_message = prompt_text.messages[-1]["role"] == "assistant" inputs = self.tokenizer.apply_chat_template( prompt_text.messages, add_generation_prompt=not continue_final_message, continue_final_message=continue_final_message, return_dict=True, return_tensors="pt", tools=tools, documents=documents, **tokenizer_kwargs, ) else: inputs = self.tokenizer(prefix + prompt_text, return_tensors="pt", **tokenizer_kwargs) inputs["prompt_text"] = prompt_text if handle_long_generation == "hole": cur_len = inputs["input_ids"].shape[-1] if "max_new_tokens" in generate_kwargs: new_tokens = generate_kwargs["max_new_tokens"] else: new_tokens = generate_kwargs.get("max_length", self.generation_config.max_length) - cur_len if new_tokens < 0: raise ValueError("We cannot infer how many new tokens are expected") if cur_len + new_tokens > self.tokenizer.model_max_length: keep_length = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( "We cannot use `hole` to handle this generation the number of desired tokens exceeds the" " models max length" ) inputs["input_ids"] = inputs["input_ids"][:, -keep_length:] if "attention_mask" in inputs: inputs["attention_mask"] = inputs["attention_mask"][:, -keep_length:] return inputs def _forward(self, model_inputs, **generate_kwargs): input_ids = model_inputs["input_ids"] attention_mask = model_inputs.get("attention_mask", None) # Allow empty prompts if input_ids.shape[1] == 0: input_ids = None attention_mask = None in_b = 1 else: in_b = input_ids.shape[0] prompt_text = model_inputs.pop("prompt_text") # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. prefix_length = generate_kwargs.pop("prefix_length", 0) if prefix_length > 0: has_max_new_tokens = "max_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].max_new_tokens is not None ) if not has_max_new_tokens: generate_kwargs["max_length"] = generate_kwargs.get("max_length") or self.generation_config.max_length generate_kwargs["max_length"] += prefix_length has_min_new_tokens = "min_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # User-defined `generation_config` passed to the pipeline call take precedence if "generation_config" not in generate_kwargs: generate_kwargs["generation_config"] = self.generation_config output = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **generate_kwargs) if isinstance(output, ModelOutput): generated_sequence = output.sequences other_outputs = {k: v for k, v in output.items() if k not in {"sequences", "past_key_values"}} out_b = generated_sequence.shape[0] for key, value in other_outputs.items(): if isinstance(value, torch.Tensor) and value.shape[0] == out_b: other_outputs[key] = value.reshape(in_b, out_b // in_b, *value.shape[1:]) if isinstance(value, tuple) and len(value[0]) == out_b: value = torch.stack(value).swapaxes(0, 1) other_outputs[key] = value else: generated_sequence = output other_outputs = {} out_b = generated_sequence.shape[0] generated_sequence = generated_sequence.reshape(in_b, out_b // in_b, *generated_sequence.shape[1:]) model_outputs = { "generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text, } if other_outputs: model_outputs.update({"additional_outputs": other_outputs}) return model_outputs def postprocess( self, model_outputs, return_type=ReturnType.FULL_TEXT, clean_up_tokenization_spaces=True, continue_final_message=None, skip_special_tokens=None, ): generated_sequence = model_outputs["generated_sequence"][0] input_ids = model_outputs["input_ids"] prompt_text = model_outputs["prompt_text"] generated_sequence = generated_sequence.numpy().tolist() records = [] other_outputs = model_outputs.get("additional_outputs", {}) split_keys = {} if other_outputs: for k, v in other_outputs.items(): if isinstance(v, torch.Tensor) and v.shape[0] == len(generated_sequence): split_keys[k] = v.numpy().tolist() skip_special_tokens = skip_special_tokens if skip_special_tokens is not None else True for idx, sequence in enumerate(generated_sequence): if return_type == ReturnType.TENSORS: record = {"generated_token_ids": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text text = self.tokenizer.decode( sequence, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: prompt_length = 0 else: prompt_length = len( self.tokenizer.decode( input_ids[0], skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) ) all_text = text[prompt_length:] if return_type == ReturnType.FULL_TEXT: if isinstance(prompt_text, str): all_text = prompt_text + all_text elif isinstance(prompt_text, Chat): if continue_final_message is None: # If the user passes a chat ending in an assistant message, we treat it as a prefill by # default because very few models support multiple separate, consecutive assistant messages continue_final_message = prompt_text.messages[-1]["role"] == "assistant" if continue_final_message: # With assistant prefill, concat onto the end of the last message all_text = list(prompt_text.messages)[:-1] + [ { "role": prompt_text.messages[-1]["role"], "content": prompt_text.messages[-1]["content"] + all_text, } ] else: # When we're not starting from a prefill, the output is a new assistant message if self.tokenizer.response_schema: assistant_message = self.tokenizer.parse_response(all_text) else: # If there's no schema, then we have to assume it's all content assistant_message = {"role": "assistant", "content": all_text} all_text = list(prompt_text.messages) + [assistant_message] record = {"generated_text": all_text} for key, values in split_keys.items(): record[key] = values[idx] records.append(record) return records
TextGenerationPipeline
python
pytorch__pytorch
test/jit/test_exception.py
{ "start": 170, "end": 5970 }
class ____(TestCase): def test_pyop_exception_message(self): class Foo(torch.jit.ScriptModule): def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(1, 10, kernel_size=5) @torch.jit.script_method def forward(self, x): return self.conv(x) foo = Foo() # testing that the correct error message propagates with self.assertRaisesRegex( RuntimeError, r"Expected 3D \(unbatched\) or 4D \(batched\) input to conv2d" ): foo(torch.ones([123])) # wrong size def test_builtin_error_messsage(self): with self.assertRaisesRegex(RuntimeError, "Arguments for call are not valid"): @torch.jit.script def close_match(x): return x.masked_fill(True) with self.assertRaisesRegex( RuntimeError, "This op may not exist or may not be currently supported in TorchScript", ): @torch.jit.script def unknown_op(x): torch.set_anomaly_enabled(True) return x def test_exceptions(self): cu = torch.jit.CompilationUnit( """ def foo(cond): if bool(cond): raise ValueError(3) return 1 """ ) cu.foo(torch.tensor(0)) with self.assertRaisesRegex(torch.jit.Error, "3"): cu.foo(torch.tensor(1)) def foo(cond): a = 3 if bool(cond): raise ArbitraryError(a, "hi") # noqa: F821 if 1 == 2: raise ArbitraryError # noqa: F821 return a with self.assertRaisesRegex(RuntimeError, "undefined value ArbitraryError"): torch.jit.script(foo) def exception_as_value(): a = Exception() print(a) with self.assertRaisesRegex(RuntimeError, "cannot be used as a value"): torch.jit.script(exception_as_value) @torch.jit.script def foo_no_decl_always_throws(): raise RuntimeError("Hi") # function that has no declared type but always throws set to None output_type = next(foo_no_decl_always_throws.graph.outputs()).type() self.assertTrue(str(output_type) == "NoneType") @torch.jit.script def foo_decl_always_throws(): # type: () -> Tensor raise Exception("Hi") # noqa: TRY002 output_type = next(foo_decl_always_throws.graph.outputs()).type() self.assertTrue(str(output_type) == "Tensor") def foo(): raise 3 + 4 with self.assertRaisesRegex(RuntimeError, "must derive from BaseException"): torch.jit.script(foo) # a escapes scope @torch.jit.script def foo(): if 1 == 1: a = 1 else: if 1 == 1: raise Exception("Hi") # noqa: TRY002 else: raise Exception("Hi") # noqa: TRY002 return a self.assertEqual(foo(), 1) @torch.jit.script def tuple_fn(): raise RuntimeError("hello", "goodbye") with self.assertRaisesRegex(torch.jit.Error, "hello, goodbye"): tuple_fn() @torch.jit.script def no_message(): raise RuntimeError with self.assertRaisesRegex(torch.jit.Error, "RuntimeError"): no_message() def test_assertions(self): cu = torch.jit.CompilationUnit( """ def foo(cond): assert bool(cond), "hi" return 0 """ ) cu.foo(torch.tensor(1)) with self.assertRaisesRegex(torch.jit.Error, "AssertionError: hi"): cu.foo(torch.tensor(0)) @torch.jit.script def foo(cond): assert bool(cond), "hi" foo(torch.tensor(1)) # we don't currently validate the name of the exception with self.assertRaisesRegex(torch.jit.Error, "AssertionError: hi"): foo(torch.tensor(0)) def test_python_op_exception(self): @torch.jit.ignore def python_op(x): raise Exception("bad!") # noqa: TRY002 @torch.jit.script def fn(x): return python_op(x) with self.assertRaisesRegex( RuntimeError, "operation failed in the TorchScript interpreter" ): fn(torch.tensor(4)) def test_dict_expansion_raises_error(self): def fn(self): d = {"foo": 1, "bar": 2, "baz": 3} return {**d} with self.assertRaisesRegex( torch.jit.frontend.NotSupportedError, "Dict expansion " ): torch.jit.script(fn) def test_custom_python_exception(self): class MyValueError(ValueError): pass @torch.jit.script def fn(): raise MyValueError("test custom exception") with self.assertRaisesRegex( torch.jit.Error, "jit.test_exception.MyValueError: test custom exception" ): fn() def test_custom_python_exception_defined_elsewhere(self): from jit.myexception import MyKeyError @torch.jit.script def fn(): raise MyKeyError("This is a user defined key error") with self.assertRaisesRegex( torch.jit.Error, "jit.myexception.MyKeyError: This is a user defined key error", ): fn() if __name__ == "__main__": raise RuntimeError( "This test is not currently used and should be " "enabled in discover_tests.py if required." )
TestException
python
facebook__pyre-check
client/backend_arguments.py
{ "start": 3152, "end": 5198 }
class ____: source_root: Path artifact_root: Path checked_directory: Path targets: Sequence[str] = dataclasses.field(default_factory=list) targets_fallback_sources: Optional[Sequence[search_path.Element]] = None mode: Optional[str] = None isolation_prefix: Optional[str] = None bxl_builder: Optional[str] = None kill_buck_after_build: bool = False number_of_threads: Optional[int] = None def serialize(self) -> Dict[str, object]: mode = self.mode isolation_prefix = self.isolation_prefix bxl_builder = self.bxl_builder targets_fallback_sources = self.targets_fallback_sources return { "kind": "buck", "targets": self.targets, **( {} if targets_fallback_sources is None else { "targets_fallback_sources": [ element.command_line_argument() for element in targets_fallback_sources ], } ), **({} if mode is None else {"mode": mode}), **( {} if isolation_prefix is None else {"isolation_prefix": isolation_prefix} ), **({} if bxl_builder is None else {"bxl_builder": bxl_builder}), "source_root": str(self.source_root), "artifact_root": str(self.artifact_root), "kill_buck_after_build": self.kill_buck_after_build, **( {} if self.number_of_threads is None else {"number_of_threads": self.number_of_threads} ), } def get_checked_directory_allowlist(self) -> Set[str]: return {str(self.checked_directory)} def cleanup(self) -> None: shutil.rmtree(str(self.artifact_root), ignore_errors=True) SourcePath = Union[SimpleSourcePath, WithUnwatchedDependencySourcePath, BuckSourcePath] @dataclasses.dataclass(frozen=True)
BuckSourcePath
python
pytorch__pytorch
test/dynamo/test_subclasses.py
{ "start": 49580, "end": 68745 }
class ____(torch.nn.Module): def forward(self, L_x_: "f32[3, 4]"): l_x_ = L_x_ wrap_body_0 = self.wrap_body_0 wrap = torch.ops.higher_order.wrap(wrap_body_0, l_x_); wrap_body_0 = l_x_ = None getitem: "f32[3, 4]" = wrap[0]; wrap = None return (getitem,) class wrap_body_0(torch.nn.Module): def forward(self, l_x_: "f32[3, 4]"): add_: "f32[3, 4]" = l_x_.add_(1.0); l_x_ = None return (add_,) """, ) def test_has_torch_function(self): class MyTensor: @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): if kwargs is None: kwargs = {} if func is torch.max: return torch.tensor(123) return func(*args, **kwargs) class LocalSubclass(torch.Tensor): @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): if kwargs is None: kwargs = {} return func(*args, **kwargs) def fn(x): return torch.overrides.has_torch_function_unary( x ), torch.overrides.has_torch_function_variadic(x) for test_class in [MyTensor, LocalSubclass]: x = test_class() ref0 = fn(x) ref1 = fn(4) opt_fn = torch.compile(fn, backend="eager") res0 = opt_fn(x) res1 = opt_fn(4) self.assertEqual(ref0, res0) self.assertEqual(ref1, res1) def test_wrapper_subclass_guards_on_inner_tensor(self): # Holds an inner tensor, that has a distinct shape from the outer wrapper tensor. # Also adds additional guards on the inner tensor's sizes. # When the first input to an op has x.shape[0] > 5, we insert an extra add node. class DoubleSizeMaybeAddGeThreeTensor(torch.Tensor): @staticmethod def __new__(cls, inner): # Double the outer-most dimension outer_shape = (inner.shape[0] * 2,) + inner.shape[1:] return torch.Tensor._make_wrapper_subclass( # TODO: right now, _make_wrapper_subclass's dynamic shape interaction is not great. # Calling the overload that has kwargs causes us to go down the first overload path, # which will **always** specialize sizes. # We should probably eventually fix this so that the first overload can just handle dynamic shapes. cls, outer_shape, inner.stride(), None, None, inner.dtype, inner.layout, inner.device, False, inner.requires_grad, ) def __init__(self, inner): self.inner_elem = inner def __tensor_flatten__(self): return ["inner_elem"], None @staticmethod def __tensor_unflatten__(inner_tensors, _, outer_size, outer_stride): return DoubleSizeMaybeAddGeThreeTensor(inner_tensors["inner_elem"]) def __repr__(self): return f"DoubleSizeMayberAddGeThreeTensor({repr(self.inner_elem)})" @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): if kwargs is None: kwargs = {} args_inner = torch.utils._pytree.tree_map_only( DoubleSizeMaybeAddGeThreeTensor, lambda x: x.inner_elem, args ) out_inner = func(*args_inner, **kwargs) # Add guards on the inner tensor's sizes if args_inner[0].shape[0] > 3: out_inner += 2 return DoubleSizeMaybeAddGeThreeTensor(out_inner) curr_var_to_val = None curr_var_to_sources = None guards = None def backend(gm, args): context = torch._guards.TracingContext.get() # Grab info on sources and guards from the shapeenv nonlocal curr_var_to_val nonlocal curr_var_to_sources nonlocal guards guards = [str(g.expr) for g in context.fake_mode.shape_env.guards] curr_var_to_val = { str(k): v for k, v in context.fake_mode.shape_env.var_to_val.items() } curr_var_to_sources = { str(k): v[0].name() for k, v in context.fake_mode.shape_env.var_to_sources.items() } return gm @torch.compile(backend=backend) def fn(x): if x.shape[0] < 13: return torch.mul(x, x) else: return torch.div(x, x) inp = torch.ones(4, 4) x = DoubleSizeMaybeAddGeThreeTensor(inp) torch._dynamo.mark_dynamic(x, 0) res = fn(x) # noqa: F841 # During fakeifying, we end up allocating a separate symint # for the outer and inner tensor (in this test, s0 is unused). expected_var_to_val = { "s50": 4, "s77": 8, } expected_var_to_sources = { "s50": "L['x'].inner_elem.size()[0]", "s77": "L['x'].size()[0]", } self.assertEqual(curr_var_to_val, expected_var_to_val) self.assertEqual(curr_var_to_sources, expected_var_to_sources) self.assertExpectedInline( "\n".join(guards), """\ Eq(2*s50, s77) 2*s50 < 13 s50 > 3""", ) def test_wrapper_subclass_with_same_sized_inner_tensor(self): # shouldn't recompile for different sizes when dynamic=True sub1 = ScaledTensor(torch.randn(2, 4), torch.randn(6)) sub2 = ScaledTensor(torch.randn(3, 5), torch.randn(7)) self.assertFalse(_recompiles_for_inputs(func, (sub1,), (sub2,), dynamic=True)) # should recompile for different data size when dynamic=False sub1 = ScaledTensor(torch.randn(2, 4), torch.randn(6)) sub2 = ScaledTensor(torch.randn(3, 5), torch.randn(6)) self.assertTrue(_recompiles_for_inputs(func, (sub1,), (sub2,), dynamic=False)) # avoid recompile using manual mark_dynamic() for different data size sub1 = ScaledTensor(torch.randn(2, 4), torch.randn(6)) # NB: mark_dynamic() on outer tensor should translate to inner tensors of the same size torch._dynamo.mark_dynamic(sub1, 0) torch._dynamo.mark_dynamic(sub1, 1) sub2 = ScaledTensor(torch.randn(3, 5), torch.randn(6)) self.assertFalse(_recompiles_for_inputs(func, (sub1,), (sub2,), dynamic=False)) def test_wrapper_subclass_with_differently_sized_inner_tensor(self): # should recompile for different scale size when dynamic=False sub1 = ScaledTensor(torch.randn(2, 4), torch.randn(3)) sub2 = ScaledTensor(torch.randn(2, 4), torch.randn(5)) self.assertTrue(_recompiles_for_inputs(func, (sub1,), (sub2,), dynamic=False)) # still recompiles using manual mark_dynamic() on outer for different scale size sub1 = ScaledTensor(torch.randn(2, 4), torch.randn(3)) # NB: mark_dynamic() on outer tensor doesn't translate to inner tensors of different size torch._dynamo.mark_dynamic(sub1, 0) torch._dynamo.mark_dynamic(sub1, 1) sub2 = ScaledTensor(torch.randn(2, 4), torch.randn(5)) self.assertTrue(_recompiles_for_inputs(func, (sub1,), (sub2,), dynamic=False)) def test_recompiles_with_optional_inner_tensor(self): def f(x): return x + 1 # sub1 does not have the optional tensor specified while sub2 does sub1 = OptionalScaledTensor(torch.randn(2, 4), None) sub2 = OptionalScaledTensor(torch.randn(2, 4), torch.randn(2, 4)) # sanity check; don't recompile for same input self.assertFalse(_recompiles_for_inputs(f, (sub1,), (sub1,), dynamic=True)) self.assertFalse(_recompiles_for_inputs(f, (sub2,), (sub2,), dynamic=True)) # these should recompile; optional tensor changes between specified and unspecified self.assertTrue(_recompiles_for_inputs(f, (sub1,), (sub2,), dynamic=True)) self.assertTrue(_recompiles_for_inputs(f, (sub2,), (sub1,), dynamic=True)) f_compiled = torch.compile(f, backend="aot_eager") self.assertEqual(f(sub1)._data, f_compiled(sub1)._data) self.assertEqual(f(sub2)._data, f_compiled(sub2)._data) def test_torch_dispatch_subclass_guard_recompile(self): x = torch.ones(2, 2) x_two = TwoTensor(x.clone(), x.clone()) def fn(w): return torch.add(w, 1.0) fn_opt = torch.compile(backend="eager")(fn) ref = fn(x_two) res = fn_opt(x_two) self.assertEqual(ref, res) # ensure no recompilation on same input type with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True): fn_opt(TwoTensor(x + 1, x + 2)) # recompile! ref = fn(x) res = fn_opt(x) self.assertEqual(ref, res) def test_tensor_subclass_ctx_guards(self): x = CtxSubclassTensor(torch.ones(2), 3) x2 = CtxSubclassTensor(torch.ones(2), 3) x3 = CtxSubclassTensor(torch.ones(2), 4) _check_recompiles(self, lambda x: x * x, (x,), (x2,), False) _check_recompiles(self, lambda x: x * x, (x,), (x3,), True) def test_tensor_subclass_ctx_recursive_guards(self): x0 = torch.ones(2, 2) x1 = CtxSubclassTensor(x0.clone(), 2) x2 = CtxSubclassTensor(x0.clone(), 3) tt0 = TwoTensor(x0.clone(), x1) tt1 = TwoTensor(x0.clone(), x2) _check_recompiles(self, lambda x: x * x, (tt0,), (tt1,), True) def test_tensor_subclass_ctx_custom_guards_override(self): class CtxSubclassTensorCustomGuardFn(CtxSubclassTensor): @classmethod def __metadata_guard__(cls, orig_data, other): return orig_data[0] <= other[0] x = CtxSubclassTensorCustomGuardFn(torch.ones(2), 2) x2 = CtxSubclassTensorCustomGuardFn(torch.ones(2), 3) x3 = CtxSubclassTensorCustomGuardFn(torch.ones(2), 1) _check_recompiles(self, lambda x: x * x, (x,), (x2,), False) _check_recompiles(self, lambda x: x * x, (x,), (x3,), True) def test_tensor_subclass_ctx_custom_guards_error_arg_num(self): import torch._dynamo.exc class CtxSubclassTensorCustomGuardFn(CtxSubclassTensor): @classmethod def __metadata_guard__(cls, y): # Shouldn't reach here return False x = CtxSubclassTensorCustomGuardFn(torch.ones(2), 3) self.assertRaisesRegex( torch._dynamo.exc.InternalTorchDynamoError, "Tensor subclass method __metadata_guard__ must take exactly two subclass metadata arguments", lambda: torch.compile(lambda x: x * x)(x), ) def test_tensor_subclass_ctx_custom_guards_error_not_classmethod(self): import torch._dynamo.exc class CtxSubclassTensorCustomGuardFn(CtxSubclassTensor): def __metadata_guard__(self, x, y): return False x = CtxSubclassTensorCustomGuardFn(torch.ones(2), 3) self.assertRaisesRegex( torch._dynamo.exc.InternalTorchDynamoError, "Tensor subclass method __metadata_guard__ must be a classmethod", lambda: torch.compile(lambda x: x * x)(x), ) def test_subclass_constructor_proxying(self): import dataclasses from collections import namedtuple from typing import Any @dataclasses.dataclass(frozen=True) class SubclassTensorArgs: original_shape: torch.Size device: torch.device inner_meta: Any SubclassTensorArgs2 = namedtuple( "SubclassTensorArgs2", [ "original_shape", "device", "inner_meta", ], ) class SubclassTensor(torch.Tensor): @staticmethod def __new__(cls, a, meta): shape = a.shape kwargs = {} kwargs["strides"] = a.stride() kwargs["storage_offset"] = a.storage_offset() kwargs["device"] = a.device kwargs["layout"] = a.layout kwargs["requires_grad"] = a.requires_grad kwargs["dtype"] = a.dtype out = torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) return out def __init__(self, a, meta): self.a = a self.meta = meta def __repr__(self): a_repr = repr(self.a) return f"SubclassTensor({a_repr})" def __tensor_flatten__(self): return ["a"], self.meta @staticmethod def __tensor_unflatten__(inner_tensors, meta, _, __): a = inner_tensors["a"] return SubclassTensor(a, meta) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if kwargs is None: kwargs = {} args_a = pytree.tree_map( lambda x: x.a if isinstance(x, SubclassTensor) else x, args ) kwargs_a = pytree.tree_map( lambda x: x.a if isinstance(x, SubclassTensor) else x, kwargs ) out_a = func(*args_a, **kwargs_a) out = pytree.tree_map( lambda x: ( SubclassTensor(x, SubclassTensorArgs2(x.shape, x.device, None)) if isinstance(x, torch.Tensor) else x ), out_a, ) return return_and_correct_aliasing(func, args, kwargs, out) @torch.compile(fullgraph=True) def f1(x): meta = SubclassTensorArgs( x.shape, x.device, SubclassTensorArgs(x.shape, x.device, None) ) out = SubclassTensor(x, meta) return out * out x = torch.randn(3, 3) f1(x) @torch.compile(fullgraph=True) def f1(x): meta = SubclassTensorArgs2( x.shape, x.device, SubclassTensorArgs2(x.shape, x.device, None) ) out = SubclassTensor(x, meta) return out * out x = torch.randn(3, 3) f1(x) def test_torch_function_subclass_survives_into_aot_autograd(self): # If you have a tensor subclass that relies on dispatch into the same op # without unwrapping and calling torch._C.DisableTorchFunctionSubclass(), # the torch function-ness will survive into AOTAutograd. Today, NestedTensor # actually relies on this behavior! Because that torch function logic # runs during AOTAutograd, this test tests that there is no logic below # that relies torch function that gets unexpectedly disabled after we # redispatch from the subclass's torch function. class SubTensor(torch.Tensor): @staticmethod def __new__(cls, t): return torch.Tensor._make_wrapper_subclass( cls, t.shape, t.stride(), t.storage_offset(), torch.contiguous_format, t.dtype, torch.strided, t.device, False, t.requires_grad, "sizes", False, False, None, ) def __init__(self, t): super().__init__() self._t = t def __tensor_flatten__(self): return ["_t"], {} @staticmethod def __tensor_unflatten__(inner_tensors, ctx, outer_size, outer_stride): t = inner_tensors["_t"] return SubTensor(t) def __repr__(self): return f"SubTensor({self._t})" @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): if kwargs is None: kwargs = {} with torch._C.DisableTorchFunctionSubclass(): return func(*args, **kwargs) @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): kwargs = {} if kwargs is None else kwargs new_args = pytree.tree_map_only(SubTensor, lambda s: s._t, args) output = func(*new_args, **kwargs) output = pytree.tree_map_only( torch.Tensor, lambda t: SubTensor(t), output ) return output @torch.compile(dynamic=True) def f(x): return x.unflatten(-1, [2, 5]) s = SubTensor(torch.randn(3, 10)) f(s) # Guard validation upsets the guard # https://github.com/pytorch/pytorch/issues/129936 @unittest.expectedFailure def test_recompile_with_symbool_inputs(self): def f(pred: bool): if pred: return torch.ones([3, 4]) else: return torch.ones([4, 3]) def test_recompilation( f, x, sizes, exp_graphs, exp_frame_count, exp_shape_env_guards ): torch._dynamo.reset() shape_env = ShapeEnv() backend = torch._dynamo.testing.EagerAndRecordGraphs() cnt = torch._dynamo.testing.CompileCounterWithBackend(backend) f_cond = torch.compile(f, backend=cnt, fullgraph=True) with torch._subclasses.fake_tensor.FakeTensorMode( shape_env=shape_env ) as fake_mode: fake_inp = fake_mode.from_tensor( x, symbolic_context=StatelessSymbolicContext( dynamic_sizes=[DimDynamic.DYNAMIC for i in range(x.dim())] ), ) for i, size in enumerate(sizes): pred = fake_inp.size(0) == size f_cond(pred) actual = normalize_gm( backend.graphs[exp_frame_count[i] - 1].print_readable( print_output=False ) ) actual_guard_str = [str(guard.expr) for guard in shape_env.guards] self.assertExpectedInline(actual, exp_graphs[i]) self.assertEqual(cnt.frame_count, exp_frame_count[i]) self.assertEqual(actual_guard_str, exp_shape_env_guards[i]) true_graph = """\
GraphModule
python
pyqtgraph__pyqtgraph
pyqtgraph/widgets/DataFilterWidget.py
{ "start": 184, "end": 1176 }
class ____(ptree.ParameterTree): """ This class allows the user to filter multi-column data sets by specifying multiple criteria Wraps methods from DataFilterParameter: setFields, generateMask, filterData, and describe. """ sigFilterChanged = QtCore.Signal(object) def __init__(self): ptree.ParameterTree.__init__(self, showHeader=False) self.params = DataFilterParameter() self.setParameters(self.params) self.params.sigFilterChanged.connect(self.sigFilterChanged) self.setFields = self.params.setFields self.generateMask = self.params.generateMask self.filterData = self.params.filterData self.describe = self.params.describe def parameters(self): return self.params def addFilter(self, name): """Add a new filter and return the created parameter item. """ return self.params.addNew(name)
DataFilterWidget
python
numpy__numpy
numpy/ma/tests/test_subclassing.py
{ "start": 3174, "end": 4548 }
class ____(SubArray): def __str__(self): return f'myprefix {self.view(SubArray)} mypostfix' def __repr__(self): # Return a repr that does not start with 'name(' return f'<{self.__class__.__name__} {self}>' def _validate_input(self, value): if not isinstance(value, ComplicatedSubArray): raise ValueError("Can only set to MySubArray values") return value def __setitem__(self, item, value): # validation ensures direct assignment with ndarray or # masked_print_option will fail super().__setitem__(item, self._validate_input(value)) def __getitem__(self, item): # ensure getter returns our own class also for scalars value = super().__getitem__(item) if not isinstance(value, np.ndarray): # scalar value = value.__array__().view(ComplicatedSubArray) return value @property def flat(self): return CSAIterator(self) @flat.setter def flat(self, value): y = self.ravel() y[:] = value def __array_wrap__(self, obj, context=None, return_scalar=False): obj = super().__array_wrap__(obj, context, return_scalar) if context is not None and context[0] is np.multiply: obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1 return obj
ComplicatedSubArray
python
django__django
django/template/backends/jinja2.py
{ "start": 1803, "end": 2730 }
class ____: def __init__(self, template, backend): self.template = template self.backend = backend self.origin = Origin( name=template.filename, template_name=template.name, ) def render(self, context=None, request=None): if context is None: context = {} if request is not None: context["request"] = request context["csrf_input"] = csrf_input_lazy(request) context["csrf_token"] = csrf_token_lazy(request) for context_processor in self.backend.template_context_processors: context.update(context_processor(request)) try: return self.template.render(context) except jinja2.TemplateSyntaxError as exc: new = TemplateSyntaxError(exc.args) new.template_debug = get_exception_info(exc) raise new from exc
Template
python
sphinx-doc__sphinx
sphinx/environment/__init__.py
{ "start": 3076, "end": 35485 }
class ____: """The environment in which the ReST files are translated. Stores an inventory of cross-file targets and provides doctree transformations to resolve links to them. """ # --------- ENVIRONMENT INITIALIZATION ------------------------------------- srcdir = _StrPathProperty() doctreedir = _StrPathProperty() # builder is created after the environment. _builder_cls: type[Builder] def __init__(self, app: Sphinx) -> None: self._app: Sphinx = app self.doctreedir = app.doctreedir self.srcdir = app.srcdir self.config: Config = None # type: ignore[assignment] self.config_status: int = CONFIG_UNSET self.config_status_extra: str = '' self.events: EventManager = app.events self.project: Project = app.project self.version: Mapping[str, int] = _get_env_version(app.extensions) # the method of doctree versioning; see set_versioning_method self.versioning_condition: Literal[False] | Callable[[Node], bool] | None = None self.versioning_compare: bool | None = None # the docutils settings for building self.settings: dict[str, Any] = default_settings.copy() self.settings['env'] = self # All "docnames" here are /-separated and relative and exclude # the source suffix. # docname -> time of reading (in integer microseconds) # contains all read docnames self.all_docs: dict[str, int] = {} # docname -> set of dependent file # names, relative to documentation root self.dependencies: dict[str, set[_StrPath]] = defaultdict(set) # docname -> set of included file # docnames included from other documents self.included: dict[str, set[str]] = defaultdict(set) # docnames to re-read unconditionally on next build self.reread_always: set[str] = set() self._pickled_doctree_cache: dict[str, bytes] = {} """In-memory cache for reading pickled doctrees from disk. docname -> pickled doctree This cache is used in the ``get_doctree`` method to avoid reading the doctree from disk multiple times. """ self._write_doc_doctree_cache: dict[str, nodes.document] = {} """In-memory cache for unpickling doctrees from disk. docname -> doctree Items are added in ``Builder.write_doctree``, during the read phase, then used only in the ``get_and_resolve_doctree`` method. """ # File metadata # docname -> dict of metadata items self.metadata: dict[str, dict[str, Any]] = defaultdict(dict) # TOC inventory # docname -> title node self.titles: dict[str, nodes.title] = {} # docname -> title node; only different if # set differently with title directive self.longtitles: dict[str, nodes.title] = {} # docname -> table of contents nodetree self.tocs: dict[str, nodes.bullet_list] = {} # docname -> number of real entries self.toc_num_entries: dict[str, int] = {} # used to determine when to show the TOC # in a sidebar (don't show if it's only one item) # docname -> dict of sectionid -> number self.toc_secnumbers: dict[str, dict[str, tuple[int, ...]]] = {} # docname -> dict of figtype -> dict of figureid -> number self.toc_fignumbers: dict[str, dict[str, dict[str, tuple[int, ...]]]] = {} # docname -> list of toctree includefiles self.toctree_includes: dict[str, list[str]] = {} # docname -> set of files (containing its TOCs) to rebuild too self.files_to_rebuild: dict[str, set[str]] = {} # docnames that have :glob: toctrees self.glob_toctrees: set[str] = set() # docnames that have :numbered: toctrees self.numbered_toctrees: set[str] = set() # domain-specific inventories, here to be pickled # domainname -> domain-specific dict self.domaindata: dict[str, dict[str, Any]] = {} # these map absolute path -> (docnames, unique filename) self.images: FilenameUniqDict = FilenameUniqDict() # filename -> (set of docnames, destination) self.dlfiles: DownloadFiles = DownloadFiles() # the original URI for images self.original_image_uri: dict[_StrPath, str] = {} # temporary data storage while reading a document self.current_document: _CurrentDocument = _CurrentDocument() # context for cross-references (e.g. current module or class) # this is similar to ``self.current_document``, # but will for example be copied to attributes of "any" cross references self.ref_context: dict[str, Any] = {} # search index data # docname -> title self._search_index_titles: dict[str, str | None] = {} # docname -> filename self._search_index_filenames: dict[str, str] = {} # stemmed words -> set(docname) self._search_index_mapping: dict[str, set[str]] = {} # stemmed words in titles -> set(docname) self._search_index_title_mapping: dict[str, set[str]] = {} # docname -> all titles in document self._search_index_all_titles: dict[str, list[tuple[str, str | None]]] = {} # docname -> list(index entry) self._search_index_index_entries: dict[str, list[tuple[str, str, str]]] = {} # objtype -> index self._search_index_objtypes: dict[tuple[str, str], int] = {} # objtype index -> (domain, type, objname (localized)) self._search_index_objnames: dict[int, tuple[str, str, str]] = {} # all the registered domains, set by the application self.domains: _DomainsContainer = _DomainsContainer._from_environment( self, registry=app.registry ) # set up environment self.setup(app) def __getstate__(self) -> dict[str, Any]: """Obtains serializable data for pickling.""" __dict__ = self.__dict__.copy() # clear unpickleable attributes __dict__.update(_app=None, domains=None, events=None) # clear in-memory doctree caches, to reduce memory consumption and # ensure that, upon restoring the state, the most recent pickled files # on the disk are used instead of those from a possibly outdated state __dict__.update(_pickled_doctree_cache={}, _write_doc_doctree_cache={}) return __dict__ def __setstate__(self, state: dict[str, Any]) -> None: self.__dict__.update(state) def setup(self, app: Sphinx) -> None: """Set up BuildEnvironment object.""" if self.version and self.version != _get_env_version(app.extensions): raise BuildEnvironmentError(__('build environment version not current')) if self.srcdir and self.srcdir != app.srcdir: raise BuildEnvironmentError(__('source directory has changed')) if self.project: app.project.restore(self.project) self._app = app self.doctreedir = app.doctreedir self.events = app.events self.srcdir = app.srcdir self.project = app.project self.version = _get_env_version(app.extensions) # initialise domains if self.domains is None: # if we are unpickling an environment, we need to recreate the domains self.domains = _DomainsContainer._from_environment( self, registry=app.registry ) # setup domains (must do after all initialization) self.domains._setup() # Initialise config. # The old config is self.config, restored from the pickled environment. # The new config is app.config, always recreated from ``conf.py`` self.config_status, self.config_status_extra = self._config_status( old_config=self.config, new_config=app.config, verbosity=app.config.verbosity, ) self.config = app.config # initialize settings self._update_settings(app.config) @property def app(self) -> Sphinx: _deprecation_warning(__name__, 'BuildEnvironment.app', remove=(11, 0)) return self._app @app.setter def app(self, app: Sphinx) -> None: _deprecation_warning(__name__, 'BuildEnvironment.app', remove=(11, 0)) self._app = app @app.deleter def app(self) -> None: _deprecation_warning(__name__, 'BuildEnvironment.app', remove=(11, 0)) del self._app @property def _registry(self) -> SphinxComponentRegistry: return self._app.registry @property def _tags(self) -> Tags: return self._app.tags @staticmethod def _config_status( *, old_config: Config | None, new_config: Config, verbosity: int ) -> tuple[int, str]: """Report the differences between two Config objects. Returns a triple of: 1. The new configuration 2. A status code indicating how the configuration has changed. 3. A status message indicating what has changed. """ if old_config is None: return CONFIG_NEW, '' if old_config.extensions != new_config.extensions: old_extensions = set(old_config.extensions) new_extensions = set(new_config.extensions) extensions = old_extensions ^ new_extensions if len(extensions) == 1: extension = extensions.pop() else: extension = f'{len(extensions)}' return CONFIG_EXTENSIONS_CHANGED, f' ({extension!r})' # Log any changes in configuration keys if changed_keys := _differing_config_keys(old_config, new_config): changed_num = len(changed_keys) if changed_num == 1: logger.info( __('The configuration has changed (1 option: %r)'), next(iter(changed_keys)), ) elif changed_num <= 5 or verbosity >= 1: logger.info( __('The configuration has changed (%d options: %s)'), changed_num, ', '.join(map(repr, sorted(changed_keys))), ) else: logger.info( __('The configuration has changed (%d options: %s, ...)'), changed_num, ', '.join(map(repr, sorted(changed_keys)[:5])), ) # check if a config value was changed that affects how doctrees are read for item in new_config.filter(frozenset({'env'})): if old_config[item.name] != item.value: return CONFIG_CHANGED, f' ({item.name!r})' return CONFIG_OK, '' def _update_settings(self, config: Config) -> None: """Update settings by new config.""" self.settings['input_encoding'] = config.source_encoding self.settings['trim_footnote_reference_space'] = ( config.trim_footnote_reference_space ) self.settings['language_code'] = config.language # Allow to disable by 3rd party extension (workaround) self.settings.setdefault('smart_quotes', True) def set_versioning_method( self, method: str | Callable[[Node], bool], compare: bool ) -> None: """Set the doctree versioning method for this environment. Versioning methods are a builder property; only builders with the same versioning method can share the same doctree directory. Therefore, we raise an exception if the user tries to use an environment with an incompatible versioning method. """ condition: Literal[False] | Callable[[Node], bool] if callable(method): condition = method else: if method not in versioning_conditions: raise ValueError('invalid versioning method: %r' % method) condition = versioning_conditions[method] if self.versioning_condition not in {None, condition}: msg = __( 'This environment is incompatible with the ' 'selected builder, please choose another ' 'doctree directory.' ) raise SphinxError(msg) self.versioning_condition = condition self.versioning_compare = compare def clear_doc(self, docname: str) -> None: """Remove all traces of a source file in the inventory.""" if docname in self.all_docs: self.all_docs.pop(docname, None) self.included.pop(docname, None) self.reread_always.discard(docname) self.domains._clear_doc(docname) def merge_info_from( self, docnames: Iterable[str], other: BuildEnvironment, app: Sphinx ) -> None: """Merge global information gathered about *docnames* while reading them from the *other* environment. This possibly comes from a parallel build process. """ docnames = frozenset(docnames) for docname in docnames: self.all_docs[docname] = other.all_docs[docname] self.included[docname] = other.included[docname] if docname in other.reread_always: self.reread_always.add(docname) self.domains._merge_domain_data(docnames, other.domaindata) self.events.emit('env-merge-info', self, docnames, other) def path2doc(self, filename: str | os.PathLike[str]) -> str | None: """Return the docname for the filename if the file is document. *filename* should be absolute or relative to the source directory. """ return self.project.path2doc(filename) def doc2path(self, docname: str, base: bool = True) -> _StrPath: """Return the filename for the document name. If *base* is True, return absolute path under self.srcdir. If *base* is False, return relative path to self.srcdir. """ return self.project.doc2path(docname, absolute=base) def relfn2path( self, filename: str | Path, docname: str | None = None ) -> tuple[str, str]: """Return paths to a file referenced from a document, relative to documentation root and absolute. In the input "filename", absolute filenames are taken as relative to the source dir, while relative filenames are relative to the dir of the containing document. """ file_name = Path(filename) if file_name.parts[:1] in {('/',), ('\\',)}: abs_fn = self.srcdir.joinpath(*file_name.parts[1:]).resolve() else: if not docname: if self.docname: docname = self.docname else: msg = 'docname' raise KeyError(msg) doc_dir = self.doc2path(docname, base=False).parent abs_fn = self.srcdir.joinpath(doc_dir, file_name).resolve() rel_fn = _relative_path(abs_fn, self.srcdir) return rel_fn.as_posix(), os.fspath(abs_fn) @property def found_docs(self) -> set[str]: """Contains all existing docnames.""" return self.project.docnames def find_files(self, config: Config, builder: Builder) -> None: """Find all source files in the source dir and put them in self.found_docs. """ try: exclude_paths = ( self.config.exclude_patterns + self.config.templates_path + builder.get_asset_paths() ) self.project.discover(exclude_paths, self.config.include_patterns) # Current implementation is applying translated messages in the reading # phase.Therefore, in order to apply the updated message catalog, it is # necessary to re-process from the reading phase. Here, if dependency # is set for the doc source and the mo file, it is processed again from # the reading phase when mo is updated. In the future, we would like to # move i18n process into the writing phase, and remove these lines. if builder.use_message_catalog: # add catalog mo file dependency repo = CatalogRepository( self.srcdir, self.config.locale_dirs, self.config.language, self.config.source_encoding, ) mo_paths = {c.domain: c.mo_path for c in repo.catalogs} for docname in self.found_docs: domain = docname_to_domain(docname, self.config.gettext_compact) if domain in mo_paths: self.note_dependency(mo_paths[domain], docname=docname) except OSError as exc: raise DocumentError( __('Failed to scan documents in %s: %r') % (self.srcdir, exc) ) from exc def get_outdated_files( self, config_changed: bool ) -> tuple[set[str], set[str], set[str]]: """Return (added, changed, removed) sets.""" # clear all files no longer present removed = self.all_docs.keys() - self.found_docs added: set[str] = set() changed: set[str] = set() if config_changed: # config values affect e.g. substitutions added = self.found_docs return added, changed, removed for docname in self.found_docs: if docname not in self.all_docs: logger.debug('[build target] added %r', docname) added.add(docname) continue # if the document has changed, rebuild if _has_doc_changed( docname, filename=self.doc2path(docname), reread_always=self.reread_always, doctreedir=self.doctreedir, all_docs=self.all_docs, dependencies=self.dependencies, ): changed.add(docname) continue return added, changed, removed def check_dependents(self, app: Sphinx, already: set[str]) -> Iterator[str]: to_rewrite: list[str] = [] for docnames in self.events.emit('env-get-updated', self): to_rewrite.extend(docnames) for docname in set(to_rewrite): if docname not in already: yield docname # --------- SINGLE FILE READING -------------------------------------------- def prepare_settings(self, docname: str) -> None: """Prepare to set up environment for reading.""" self.current_document = _CurrentDocument( docname=docname, # defaults to the global default, but can be re-set in a document default_role=self.config.default_role, default_domain=self.domains.get(self.config.primary_domain), ) # utilities to use while reading a document @property def temp_data(self) -> _CurrentDocument: """Returns the temporary data storage for the current document. Kept for backwards compatibility. """ return self.current_document @property def docname(self) -> str: """Returns the docname of the document currently being parsed.""" return self.current_document.docname @property def parser(self) -> Parser: """Returns the parser being used for to parse the current document.""" if (parser := self.current_document._parser) is not None: return parser msg = 'parser' raise KeyError(msg) def new_serialno(self, category: str = '') -> int: """Return a serial number, e.g. for index entry targets. The number is guaranteed to be unique in the current document. """ return self.current_document.new_serial_number(category) def note_dependency( self, filename: str | os.PathLike[str], *, docname: str | None = None ) -> None: """Add *filename* as a dependency of the current document. This means that the document will be rebuilt if this file changes. *filename* should be absolute or relative to the source directory. """ if docname is None: docname = self.docname # this will do the right thing when *filename* is absolute too filename = self.srcdir / filename self.dependencies.setdefault(docname, set()).add(filename) def note_included(self, filename: str | os.PathLike[str]) -> None: """Add *filename* as a included from other document. This means the document is not orphaned. *filename* should be absolute or relative to the source directory. """ doc = self.path2doc(filename) if doc: self.included.setdefault(self.docname, set()).add(doc) def note_reread(self) -> None: """Add the current document to the list of documents that will automatically be re-read at the next build. """ self.reread_always.add(self.docname) def get_domain(self, domainname: str) -> Domain: """Return the domain instance with the specified name. Raises an ExtensionError if the domain is not registered. """ try: return self.domains[domainname] except KeyError as exc: msg = __('Domain %r is not registered') % domainname raise ExtensionError(msg) from exc # --------- RESOLVING REFERENCES AND TOCTREES ------------------------------ def get_doctree(self, docname: str) -> nodes.document: """Read the doctree for a file from the pickle and return it.""" try: serialised = self._pickled_doctree_cache[docname] except KeyError: filename = self.doctreedir / f'{docname}.doctree' with open(filename, 'rb') as f: serialised = self._pickled_doctree_cache[docname] = f.read() doctree = pickle.loads(serialised) doctree.settings.env = self doctree.reporter = LoggingReporter(str(self.doc2path(docname))) return doctree @functools.cached_property def master_doctree(self) -> nodes.document: return self.get_doctree(self.config.root_doc) def get_and_resolve_doctree( self, docname: str, builder: Builder, *, tags: Tags = ..., # type: ignore[assignment] doctree: nodes.document | None = None, prune_toctrees: bool = True, includehidden: bool = False, ) -> nodes.document: """Read the doctree from the pickle, resolve cross-references and toctrees and return it. """ if tags is ...: warnings.warn( "'tags' will become a required keyword argument " 'for global_toctree_for_doc() in Sphinx 11.0.', RemovedInSphinx11Warning, stacklevel=2, ) tags = builder.tags if doctree is None: try: doctree = self._write_doc_doctree_cache.pop(docname) doctree.settings.env = self doctree.reporter = LoggingReporter(str(self.doc2path(docname))) except KeyError: doctree = self.get_doctree(docname) # resolve all pending cross-references self.apply_post_transforms(doctree, docname) # now, resolve all toctree nodes for toctreenode in doctree.findall(addnodes.toctree): result = toctree_adapters._resolve_toctree( self, docname, builder, toctreenode, prune=prune_toctrees, includehidden=includehidden, tags=tags, ) if result is None: toctreenode.parent.replace(toctreenode, []) else: toctreenode.replace_self(result) return doctree def resolve_toctree( self, docname: str, builder: Builder, toctree: addnodes.toctree, prune: bool = True, maxdepth: int = 0, titles_only: bool = False, collapse: bool = False, includehidden: bool = False, ) -> Node | None: """Resolve a *toctree* node into individual bullet lists with titles as items, returning None (if no containing titles are found) or a new node. If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0, to the value of the *maxdepth* option on the *toctree* node. If *titles_only* is True, only toplevel document titles will be in the resulting tree. If *collapse* is True, all branches not containing docname will be collapsed. """ return toctree_adapters._resolve_toctree( self, docname, builder, toctree, prune=prune, maxdepth=maxdepth, titles_only=titles_only, collapse=collapse, includehidden=includehidden, tags=self._tags, ) def resolve_references( self, doctree: nodes.document, fromdocname: str, builder: Builder ) -> None: self.apply_post_transforms(doctree, fromdocname) def apply_post_transforms(self, doctree: nodes.document, docname: str) -> None: """Apply all post-transforms.""" backup = self.current_document new = deepcopy(backup) new.docname = docname try: # set env.current_document.docname during applying post-transforms self.current_document = new transformer = SphinxTransformer(doctree) transformer.set_environment(self) transformer.add_transforms(self._registry.get_post_transforms()) transformer.apply_transforms() finally: self.current_document = backup # allow custom references to be resolved self.events.emit('doctree-resolved', doctree, docname) def collect_relations(self) -> dict[str, list[str | None]]: traversed: set[str] = set() relations = {} docnames = _traverse_toctree( traversed, None, self.config.root_doc, self.toctree_includes ) prev_doc = None parent, docname = next(docnames) for next_parent, next_doc in docnames: relations[docname] = [parent, prev_doc, next_doc] prev_doc = docname docname = next_doc parent = next_parent relations[docname] = [parent, prev_doc, None] return relations def check_consistency(self) -> None: """Do consistency checks.""" included = set().union(*self.included.values()) for docname in sorted(self.all_docs): if docname not in self.files_to_rebuild: if docname == self.config.root_doc: # the master file is not included anywhere ;) continue if docname in included: # the document is included from other documents continue if 'orphan' in self.metadata[docname]: continue logger.warning( __("document isn't included in any toctree"), location=docname, type='toc', subtype='not_included', ) # Call _check_toc_parents here rather than in _get_toctree_ancestors() # because that method is called multiple times per document and would # lead to duplicate warnings. _check_toc_parents(self.toctree_includes) # call check-consistency for all extensions self.domains._check_consistency() self.events.emit('env-check-consistency', self) def _get_env_version(extensions: Mapping[str, Extension]) -> Mapping[str, int]: env_version = { ext.name: ext_env_version for ext in extensions.values() if (ext_env_version := ext.metadata.get('env_version')) } env_version['sphinx'] = ENV_VERSION return env_version def _differing_config_keys(old: Config, new: Config) -> frozenset[str]: """Return a set of keys that differ between two config objects.""" old_vals = {c.name: c.value for c in old} new_vals = {c.name: c.value for c in new} not_in_both = old_vals.keys() ^ new_vals.keys() different_values = { key for key in old_vals.keys() & new_vals.keys() if stable_str(old_vals[key]) != stable_str(new_vals[key]) } return frozenset(not_in_both | different_values) def _has_doc_changed( docname: str, *, filename: Path, reread_always: Set[str], doctreedir: Path, all_docs: Mapping[str, int], dependencies: Mapping[str, Set[Path]], ) -> bool: # check the "reread always" list if docname in reread_always: logger.debug('[build target] changed %r: re-read forced', docname) return True # if the doctree file is not there, rebuild doctree_path = doctreedir / f'{docname}.doctree' if not doctree_path.is_file(): logger.debug('[build target] changed %r: doctree file does not exist', docname) return True # check the mtime of the document mtime = all_docs[docname] new_mtime = _last_modified_time(filename) if new_mtime > mtime: logger.debug( '[build target] changed: %r is outdated (%s -> %s)', docname, _format_rfc3339_microseconds(mtime), _format_rfc3339_microseconds(new_mtime), ) return True # finally, check the mtime of dependencies if docname not in dependencies: return False for dep_path in dependencies[docname]: try: dep_path_is_file = dep_path.is_file() except OSError: return True # give it another chance if not dep_path_is_file: logger.debug( '[build target] changed: %r is missing dependency %r', docname, dep_path, ) return True try: dep_mtime = _last_modified_time(dep_path) except OSError: return True # give it another chance if dep_mtime > mtime: logger.debug( '[build target] changed: %r is outdated due to dependency %r (%s -> %s)', docname, dep_path, _format_rfc3339_microseconds(mtime), _format_rfc3339_microseconds(dep_mtime), ) return True return False def _traverse_toctree( traversed: set[str], parent: str | None, docname: str, toctree_includes: dict[str, list[str]], ) -> Iterator[tuple[str | None, str]]: if parent == docname: logger.warning( __('self referenced toctree found. Ignored.'), location=docname, type='toc', subtype='circular', ) return # traverse toctree by pre-order yield parent, docname traversed.add(docname) for child in toctree_includes.get(docname, ()): for sub_parent, sub_docname in _traverse_toctree( traversed, docname, child, toctree_includes ): if sub_docname not in traversed: yield sub_parent, sub_docname traversed.add(sub_docname) def _check_toc_parents(toctree_includes: dict[str, list[str]]) -> None: toc_parents: dict[str, list[str]] = {} for parent, children in toctree_includes.items(): for child in children: toc_parents.setdefault(child, []).append(parent) for doc, parents in sorted(toc_parents.items()): if len(parents) > 1: logger.info( __( 'document is referenced in multiple toctrees: %s, selecting: %s <- %s' ), parents, max(parents), doc, location=doc, type='toc', subtype='multiple_toc_parents', )
BuildEnvironment
python
ray-project__ray
python/ray/serve/exceptions.py
{ "start": 202, "end": 279 }
class ____(Exception): pass @PublicAPI(stability="alpha")
RayServeException
python
bokeh__bokeh
src/bokeh/models/annotations/html/labels.py
{ "start": 5818, "end": 8783 }
class ____(HTMLAnnotation, DataAnnotation): ''' Render multiple text labels as annotations. ``LabelSet`` will render multiple text labels at given ``x`` and ``y`` coordinates, which can be in either screen (pixel) space, or data (axis range) space. In this case (as opposed to the single ``Label`` model), ``x`` and ``y`` can also be the name of a column from a :class:`~bokeh.models.sources.ColumnDataSource`, in which case the labels will be "vectorized" using coordinate values from the specified columns. The label can also be configured with a screen space offset from ``x`` and ``y``, by using the ``x_offset`` and ``y_offset`` properties. These offsets may be vectorized by giving the name of a data source column. Additionally, the label can be rotated with the ``angle`` property (which may also be a column name.) There are also standard text, fill, and line properties to control the appearance of the text, its background, as well as the rectangular bounding box border. The data source is provided by setting the ``source`` property. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) x = NumberSpec(default=field("x"), help=""" The x-coordinates to locate the text anchors. """) x_units = Enum(CoordinateUnits, default='data', help=""" The unit type for the ``xs`` attribute. Interpreted as |data units| by default. """) y = NumberSpec(default=field("y"), help=""" The y-coordinates to locate the text anchors. """) y_units = Enum(CoordinateUnits, default='data', help=""" The unit type for the ``ys`` attribute. Interpreted as |data units| by default. """) text = NullStringSpec(default=field("text"), help=""" The text values to render. """) angle = AngleSpec(default=0, help=""" The angles to rotate the text, as measured from the horizontal. """) x_offset = NumberSpec(default=0, help=""" Offset values to apply to the x-coordinates. This is useful, for instance, if it is desired to "float" text a fixed distance in |screen units| from a given data position. """) y_offset = NumberSpec(default=0, help=""" Offset values to apply to the y-coordinates. This is useful, for instance, if it is desired to "float" text a fixed distance in |screen units| from a given data position. """) text_props = Include(TextProps, help=""" The {prop} values for the text. """) background_props = Include(FillProps, prefix="background", help=""" The {prop} values for the text bounding box. """) background_fill_color = Override(default=None) border_props = Include(LineProps, prefix="border", help=""" The {prop} values for the text bounding box. """) border_line_color = Override(default=None)
HTMLLabelSet
python
getsentry__sentry
src/sentry/api/endpoints/organization_plugins_index.py
{ "start": 684, "end": 2198 }
class ____(OrganizationEndpoint): owner = ApiOwner.INTEGRATIONS publish_status = { "GET": ApiPublishStatus.PRIVATE, } def get(self, request: Request, organization: Organization) -> Response: all_plugins = {p.slug: p for p in plugins.all()} if "plugins" in request.GET: if request.GET.get("plugins") == "_all": return Response( serialize([p for p in plugins.all()], request.user, PluginSerializer()) ) desired_plugins = set(request.GET.getlist("plugins")) else: desired_plugins = set(all_plugins.keys()) # Ignore plugins that are not available to this Sentry install. desired_plugins = desired_plugins & set(all_plugins.keys()) # Each tuple represents an enabled Plugin (of only the ones we care # about) and its corresponding Project. enabled_plugins = ProjectOption.objects.filter( key__in=["%s:enabled" % slug for slug in desired_plugins], project__organization=organization, ).select_related("project") resources = [] for project_option in enabled_plugins: resources.append( serialize( all_plugins[project_option.key.split(":")[0]], request.user, OrganizationPluginSerializer(project_option.project), ) ) return Response(resources)
OrganizationPluginsEndpoint
python
google__flatbuffers
python/flatbuffers/builder.py
{ "start": 1837, "end": 1962 }
class ____(RuntimeError): """Error caused by not calling `Finish` before calling `Output`.""" pass
BuilderNotFinishedError
python
great-expectations__great_expectations
contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_geometry_to_intersect_shape.py
{ "start": 3296, "end": 10544 }
class ____(ColumnMapExpectation): """Expect that column values as geometries intersect a given reference shape. expect_column_values_geometry_to_intersect_shape is a \ [Column Map Expectation](https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations). Args: column (str): \ The column name. \ Column values must be provided in WKT or WKB format, which are commom formats for GIS Database formats. \ WKT can be accessed thhrough the ST_AsText() or ST_AsBinary() functions in queries for PostGIS and MSSQL. Keyword Args: shape (str or list of str): \ The reference geometry shape_format (str): \ Geometry format for 'shape' string(s). Can be provided as 'Well Known Text' (WKT), 'Well Known Binary' (WKB), or as GeoJSON. \ Must be one of: [wkt, wkb, geojson]. Default: wkt column_shape_format (str): \ Geometry format for 'column'. Column values must be provided in WKT or WKB format, which are commom formats for GIS Database formats. WKT can be accessed thhrough the ST_AsText() or ST_AsBinary() functions in queries for PostGIS and MSSQL. Returns: An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result) Notes: * Convention is (X Y Z) for points, which would map to (Longitude Latitude Elevation) for geospatial cases. * Any convention can be followed as long as the test and reference shapes are consistent. * The reference shape allows for an array, but will union (merge) all the shapes into 1 and check the contains condition. """ # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "points_only": [ "POINT(1 1)", "POINT(2 2)", "POINT(6 4)", "POINT(3 9)", "POINT(8 9.999)", ], "points_and_lines": [ "POINT(1 1)", "POINT(2 2)", "POINT(6 4)", "POINT(3 9)", "LINESTRING(5 5, 8 10)", ], "points_and_lines_negative": [ "POINT(1 1)", "POINT(2 2)", "POINT(6 4)", "LINESTRING(5 5, 8 10)", "LINESTRING(11 5, 10 20)", ], }, "tests": [ { "title": "positive_test_with_points", "exact_match_out": False, "include_in_gallery": True, "in": { "column": "points_only", "shape": "POLYGON ((0 0, 0 10, 10 10, 10 0, 0 0))", "shape_format": "wkt", }, "out": { "success": True, }, }, { "title": "positive_test_with_points_and_lines", "exact_match_out": False, "include_in_gallery": True, "in": { "column": "points_and_lines", "shape": "POLYGON ((0 0, 0 10, 10 10, 10 0, 0 0))", "shape_format": "wkt", }, "out": { "success": True, }, }, { "title": "positive_test_with_points_wkb_reference_shape", "exact_match_out": False, "include_in_gallery": True, "in": { "column": "points_only", "shape": "010300000001000000050000000000000000000000000000000000000000000000000000000000000000002440000000000000244000000000000024400000000000002440000000000000000000000000000000000000000000000000", "shape_format": "wkb", }, "out": { "success": True, }, }, { "title": "positive_test_with_points_geojson_reference_shape", "exact_match_out": False, "include_in_gallery": True, "in": { "column": "points_only", "shape": '{"type":"Polygon","coordinates":[[[0.0,0.0],[0.0,10.0],[10.0,10.0],[10.0,0.0],[0.0,0.0]]]}', "shape_format": "geojson", }, "out": { "success": True, }, }, { "title": "negative_test_with_points", "exact_match_out": False, "include_in_gallery": True, "in": { "column": "points_only", "shape": "POLYGON ((0 0, 0 7.5, 7.5 7.5, 7.5 0, 0 0))", "shape_format": "wkt", }, "out": {"success": False, "unexpected_index_list": [3, 4]}, }, { "title": "negative_test_with_points_and_lines", "exact_match_out": False, "include_in_gallery": True, "in": { "column": "points_and_lines_negative", "shape": "POLYGON ((0 0, 0 10, 10 10, 10 0, 0 0))", "shape_format": "wkt", }, "out": {"success": False, "unexpected_index_list": [4]}, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.geometry.intersects_shape" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly", "shape", "shape_format", "column_shape_format") # This dictionary contains default values for any parameters that should have default values default_kwarg_values = { "mostly": 1, "shape_format": "wkt", "column_shape_format": "wkt", } # This object contains metadata for display in the public Gallery library_metadata = { "tags": [ "geospatial", "hackathon-22", ], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@pjdobson", # Don't forget to add your github handle here! ], "requirements": ["pygeos"], } if __name__ == "__main__": ExpectColumnValuesGeometryToIntersectShape().print_diagnostic_checklist()
ExpectColumnValuesGeometryToIntersectShape
python
pytorch__pytorch
test/distributed/fsdp/test_fsdp_dtensor_state_dict.py
{ "start": 2064, "end": 13054 }
class ____(DTensorTestBase): def _create_model(self, is_even_sharded_model, device_mesh=None): dummy_model = ( TestDummyModel() if is_even_sharded_model else TestDummyModelUneven() ) model = FSDP(dummy_model.to(device_type), device_mesh=device_mesh) optim = torch.optim.Adam(model.parameters(), lr=0.1) model(model.get_input()).sum().backward() optim.step() return model, optim @with_comms @skip_if_lt_x_gpu(2) @parametrize("is_even_sharded_model", [True, False]) def test_fsdp_init_with_device_mesh(self, is_even_sharded_model): device_mesh = init_device_mesh(device_type.type, (self.world_size,)) model, optim = self._create_model(is_even_sharded_model, device_mesh) FSDP.set_state_dict_type( model, StateDictType.SHARDED_STATE_DICT, ) state_dict = model.state_dict() optim_state_dict = FSDP.optim_state_dict(model, optim) for v in state_dict.values(): self.assertEqual(type(v), DTensor) self.assertEqual(len(v.placements), 1) self.assertEqual(v.placements[0], (Shard(dim=0))) self.assertEqual(v.device_mesh, device_mesh) for state in optim_state_dict["state"].values(): for k, v in state.items(): if k != "step": self.assertEqual(type(v), DTensor) self.assertEqual(len(v.placements), 1) self.assertEqual(v.placements[0], (Shard(dim=0))) self.assertEqual(v.device_mesh, device_mesh) state_dict_type = FSDP.get_state_dict_type(model) # If device_mesh is used when initializing FSDP, the field _use_dtensor will # automatically be set to True if StateDictType is set to SHARDED_STATE_DICT. self.assertEqual(state_dict_type.state_dict_config._use_dtensor, True) self.assertEqual(state_dict_type.optim_state_dict_config._use_dtensor, True) @with_comms @skip_if_lt_x_gpu(2) @parametrize("offload_to_cpu", [True, False]) @parametrize("is_even_sharded_model", [True, False]) def test_dtensor_sharded_tensor_state_dict_identical( self, offload_to_cpu, is_even_sharded_model ): device_mesh = init_device_mesh(device_type.type, (self.world_size,)) model, optim = self._create_model(is_even_sharded_model, device_mesh) FSDP.set_state_dict_type( model, StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(offload_to_cpu=offload_to_cpu), optim_state_dict_config=ShardedOptimStateDictConfig( offload_to_cpu=offload_to_cpu ), ) dtensor_sd = model.state_dict() dtensor_osd = FSDP.optim_state_dict(model, optim) ref_model, ref_optim = self._create_model(is_even_sharded_model) FSDP.set_state_dict_type( ref_model, StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(offload_to_cpu=offload_to_cpu), optim_state_dict_config=ShardedOptimStateDictConfig( offload_to_cpu=offload_to_cpu ), ) sharded_tensor_sd = ref_model.state_dict() sharded_tensor_osd = FSDP.optim_state_dict(ref_model, ref_optim) # Check dtensor and sharded_tensor model state dict values are identical for dtensor_sd_item, sharded_tensor_sd_item in zip( dtensor_sd.items(), sharded_tensor_sd.items() ): k1, v1 = dtensor_sd_item k2, v2 = sharded_tensor_sd_item self.assertEqual(k1, k2) # if the ShardedTensor is an empty shard, # then the local tensor of DTensor should be local_tensor=tensor([]) if len(v2.local_shards()) == 0: self.assertEqual(v1.to_local().numel(), 0) else: self.assertEqual(type(v1), DTensor) self.assertEqual(type(v2), ShardedTensor) # check whether local_tensor are the same self.assertEqual(v1.to_local(), v2.local_tensor()) # check whether device are the same self.assertEqual(v1.to_local().device, v2.local_tensor().device) # Check dtensor and sharde_tensor optim state dict values are identical for dtensor_osd_state, sharded_tensor_osd_state in zip( dtensor_osd["state"].items(), sharded_tensor_osd["state"].items() ): # check FQN are the same self.assertEqual(dtensor_osd_state[0], sharded_tensor_osd_state[0]) for dtensor_hyper_param, sharded_tensor_hyper_param in zip( dtensor_osd_state[1].items(), sharded_tensor_osd_state[1].items(), ): k1, v1 = dtensor_hyper_param k2, v2 = sharded_tensor_hyper_param self.assertEqual(k1, k2) if k1 != "step": # if the ShardedTensor is an empty shard, # then the local tensor of DTensor should be local_tensor=tensor([]) if len(v2.local_shards()) == 0: self.assertEqual(v1.to_local().numel(), 0) else: self.assertEqual(type(v1), DTensor) self.assertEqual(type(v2), ShardedTensor) # check whether local_tensor are the same self.assertEqual(v1.to_local(), v2.local_tensor()) # check whether device are the same self.assertEqual(v1.to_local().device, v2.local_tensor().device) else: self.assertEqual(v1, v2) @with_comms @skip_if_lt_x_gpu(2) @parametrize("offload_to_cpu", [True, False]) @parametrize("is_even_sharded_model", [True, False]) def test_dtensor_sharded_optim_load_state_dict( self, offload_to_cpu, is_even_sharded_model ): device_mesh = init_device_mesh(device_type.type, (self.world_size,)) model, optim = self._create_model(is_even_sharded_model, device_mesh) FSDP.set_state_dict_type( model, StateDictType.SHARDED_STATE_DICT, optim_state_dict_config=ShardedOptimStateDictConfig( offload_to_cpu=offload_to_cpu ), ) checkpoint = io.BytesIO() torch.save(FSDP.optim_state_dict(model, optim), checkpoint) # Deepcopy to save current optim_state_dict to compare with the optim_state_dict loaded back below. ref_optim_state_dict = deepcopy(FSDP.optim_state_dict(model, optim)) # Update the parameters so FSDP.optim_state_dict() will be different from ref_optim_state_dict. model(model.get_input()).sum().backward() optim.step() # Load ref_optim_state_dict back. checkpoint.seek(0) load_ref_optim_state_dict = torch.load(checkpoint) optim.load_state_dict( FSDP.optim_state_dict_to_load(model, optim, load_ref_optim_state_dict) ) new_optim_state_dict = FSDP.optim_state_dict(model, optim) # Check whether new_optim_state_dict is the same as ref_optim_state_dict. for new_optim_state_dict_item, ref_optim_state_dict_item in zip( new_optim_state_dict["state"].items(), ref_optim_state_dict["state"].items(), ): # check FQN are the same self.assertEqual(new_optim_state_dict_item[0], ref_optim_state_dict_item[0]) for new_optim_hyper_param, ref_optim_hyper_param in zip( new_optim_state_dict_item[1].items(), ref_optim_state_dict_item[1].items(), ): k1, v1 = new_optim_hyper_param k2, v2 = ref_optim_hyper_param # check whether keys are the same self.assertEqual(k1, k2) # check whether values are the same self.assertEqual(v1, v2) if k1 != "step": self.assertEqual(type(v1), DTensor) self.assertEqual(type(v2), DTensor) @with_comms() @skip_if_lt_x_gpu(2) @parametrize("offload_to_cpu", [True, False]) @parametrize("is_even_sharded_model", [True, False]) def test_dtensor_sharded_model_load_state_dict( self, offload_to_cpu, is_even_sharded_model ): device_mesh = init_device_mesh(device_type.type, (self.world_size,)) model, optim = self._create_model(is_even_sharded_model, device_mesh) FSDP.set_state_dict_type( model, StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(offload_to_cpu=offload_to_cpu), ) checkpoint = io.BytesIO() torch.save(model.state_dict(), checkpoint) # Deepcopy to save current state_dict to compare with the state_dict loaded back below. ref_state_dict = deepcopy(model.state_dict()) # Update the parameters so model.state_dict() will be different from ref_dtensor_sd. model(model.get_input()).sum().backward() optim.step() # Load ref_state_dict back. checkpoint.seek(0) load_ref_state_dict = torch.load(checkpoint) model.load_state_dict(load_ref_state_dict) new_state_dict = model.state_dict() # Check whether new_state_dict is the same as ref_state_dict. for (k1, v1), (k2, v2) in zip(ref_state_dict.items(), new_state_dict.items()): # check whether fqn are the same self.assertEqual(k1, k2) self.assertEqual(type(v1), DTensor) self.assertEqual(type(v2), DTensor) # check whether DTensor are the same self.assertEqual(v1, v2) @with_comms @skip_if_lt_x_gpu(4) def test_raises_warning_or_errors(self): device_mesh = init_device_mesh(device_type.type, (self.world_size,)) model, optim = self._create_model( is_even_sharded_model=True, device_mesh=device_mesh ) # initialize optim model(model.get_input()).sum().backward() optim.step() with self.assertRaisesRegex( RuntimeError, "DeviceMesh is not compatible with LOCAL_STATE_DICT." ): with FSDP.state_dict_type(model, StateDictType.LOCAL_STATE_DICT): model.state_dict() with self.assertRaisesRegex( RuntimeError, "DeviceMesh is not compatible with LOCAL_STATE_DICT." ): with FSDP.state_dict_type(model, StateDictType.LOCAL_STATE_DICT): FSDP.optim_state_dict(model, optim) devices = ("cuda", "hpu", "xpu") instantiate_device_type_tests( TestFSDPWithDeviceMeshAndDTensor, globals(), only_for=devices, allow_xpu=True ) if __name__ == "__main__": run_tests()
TestFSDPWithDeviceMeshAndDTensor
python
huggingface__transformers
src/transformers/models/video_llama_3/modular_video_llama_3.py
{ "start": 66426, "end": 74592 }
class ____(Qwen2VLVideoProcessor): use_token_compression = True image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD temporal_patch_size = 1 max_frames = 180 return_metadata = True valid_kwargs = VideoLlama3VideoProcessorInitKwargs model_input_names = ["pixel_values_videos", "video_grid_thw", "video_merge_sizes", "video_compression_mask"] def _get_compression_mask( self, pixel_values_videos: torch.FloatTensor, video_grid_thw: torch.LongTensor, video_merge_sizes: torch.LongTensor, threshold: Optional[float] = 0.1, min_tokens: Optional[int] = 1, ) -> torch.BoolTensor: """ Get the compression mask for video tokens based on pixel differences. Args: pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input videos. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. video_merge_sizes (`torch.Tensor` of shape `(num_videos,)`): The spatial downsampling ratio of each video feature. threshold (`float`, *optional*, defaults to 0.1): The threshold to determine whether a token should be kept based on pixel differences. min_tokens (`int`, *optional*, defaults to 1): The minimum number of tokens to keep for each frame. """ videos = pixel_values_videos.split(video_grid_thw.prod(dim=1).tolist(), dim=0) compression_masks = [] for images, grid_size, merge_size in zip(videos, video_grid_thw, video_merge_sizes): t, h, w = grid_size if t == 1: num_tokens = images.size(0) // (merge_size**2) compression_masks.append(torch.ones((num_tokens,), dtype=torch.bool, device=images.device)) else: # NOTE: video token compressor images = images.view(t, (h // merge_size) * (w // merge_size), -1) pixel_diff = images[1:] - images[:-1] pixel_diff = torch.abs(pixel_diff).mean(dim=-1) * 255 pixel_diff = torch.cat([torch.full_like(pixel_diff[0:1], threshold + 1), pixel_diff], dim=0) mask = pixel_diff > threshold padding_ids = torch.nonzero(mask.sum(dim=1) < min_tokens)[:, 0] mask[padding_ids, :min_tokens] = 1 compression_masks.append(mask.flatten()) return torch.cat(compression_masks) def _preprocess( self, videos: list["torch.Tensor"], do_convert_rgb: bool, do_resize: bool, size: SizeDict, interpolation: Optional["F.InterpolationMode"], do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], min_pixels: Optional[int] = None, max_pixels: Optional[int] = None, patch_size: Optional[int] = None, temporal_patch_size: Optional[int] = None, merge_size: Optional[int] = None, use_token_compression: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, device: Optional["torch.Tensor"] = None, **kwargs, ): # Group videos by size for batched resizing grouped_videos, grouped_videos_index = group_videos_by_shape(videos) resized_videos_grouped = {} for shape, stacked_videos in grouped_videos.items(): height, width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST) resized_height, resized_width = height, width if do_resize: resized_height, resized_width = smart_resize( height, width, factor=patch_size * merge_size, min_pixels=min_pixels, max_pixels=max_pixels // shape[0], ) stacked_videos = self.resize( image=stacked_videos, size=SizeDict(height=resized_height, width=resized_width), interpolation=interpolation, ) resized_videos_grouped[shape] = stacked_videos resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index) # Group videos by size for further processing # Needed in case do_resize is False, or resize returns videos with different sizes grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos) processed_videos_grouped = {} processed_grids = {} for shape, stacked_videos in grouped_videos.items(): resized_height, resized_width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST) # Fused rescale and normalize stacked_videos = self.rescale_and_normalize( stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) patches = stacked_videos # Check that videos have `num_frames` divisible by `temporal_patch_size` if patches.shape[1] % temporal_patch_size != 0: repeats = patches[:, -1:].repeat(1, self.temporal_patch_size - 1, 1, 1, 1) patches = torch.cat([patches, repeats], dim=1) batch_size, grid_t, channel = patches.shape[:3] grid_t = grid_t // temporal_patch_size grid_h, grid_w = resized_height // patch_size, resized_width // patch_size patches = patches.view( batch_size, grid_t, temporal_patch_size, channel, grid_h // merge_size, merge_size, patch_size, grid_w // merge_size, merge_size, patch_size, ) patches = patches.permute(0, 1, 4, 7, 5, 8, 3, 2, 6, 9) flatten_patches = patches.reshape( batch_size, grid_t * grid_h * grid_w, channel * temporal_patch_size * patch_size * patch_size, ) processed_videos_grouped[shape] = flatten_patches processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index) processed_grids = reorder_videos(processed_grids, grouped_videos_index) pixel_values_videos = torch.cat(processed_videos, dim=0) video_grid_thw = torch.tensor(processed_grids) video_merge_sizes = torch.tensor([merge_size] * video_grid_thw.size(0)).to(video_grid_thw) if use_token_compression: video_compression_mask = self._get_compression_mask( pixel_values_videos=pixel_values_videos, video_grid_thw=video_grid_thw, video_merge_sizes=video_merge_sizes, ) else: num_video_tokens = video_grid_thw.prod(-1).sum() // (merge_size**2) video_compression_mask = torch.ones( (num_video_tokens,), dtype=torch.bool, device=pixel_values_videos.device ) return BatchFeature( data={ "pixel_values_videos": pixel_values_videos, "video_grid_thw": video_grid_thw, "video_merge_sizes": video_merge_sizes, "video_compression_mask": video_compression_mask, }, tensor_type=return_tensors, ) __all__ = [ "VideoLlama3VisionConfig", "VideoLlama3Config", "VideoLlama3VisionModel", "VideoLlama3PreTrainedModel", "VideoLlama3Model", "VideoLlama3ForConditionalGeneration", "VideoLlama3Processor", "VideoLlama3ImageProcessor", "VideoLlama3ImageProcessorFast", "VideoLlama3VideoProcessor", ]
VideoLlama3VideoProcessor
python
pallets__werkzeug
src/werkzeug/wsgi.py
{ "start": 10226, "end": 11762 }
class ____: """This class can be used to convert a :class:`file`-like object into an iterable. It yields `buffer_size` blocks until the file is fully read. You should not use this class directly but rather use the :func:`wrap_file` function that uses the WSGI server's file wrapper support if it's available. .. versionadded:: 0.5 If you're using this object together with a :class:`Response` you have to use the `direct_passthrough` mode. :param file: a :class:`file`-like object with a :meth:`~file.read` method. :param buffer_size: number of bytes for one iteration. """ def __init__(self, file: t.IO[bytes], buffer_size: int = 8192) -> None: self.file = file self.buffer_size = buffer_size def close(self) -> None: if hasattr(self.file, "close"): self.file.close() def seekable(self) -> bool: if hasattr(self.file, "seekable"): return self.file.seekable() if hasattr(self.file, "seek"): return True return False def seek(self, *args: t.Any) -> None: if hasattr(self.file, "seek"): self.file.seek(*args) def tell(self) -> int | None: if hasattr(self.file, "tell"): return self.file.tell() return None def __iter__(self) -> FileWrapper: return self def __next__(self) -> bytes: data = self.file.read(self.buffer_size) if data: return data raise StopIteration()
FileWrapper
python
pytorch__pytorch
test/dynamo/test_streams.py
{ "start": 15635, "end": 16236 }
class ____(torch.nn.Module): def forward(self, primals_1: "f32[2, 2]", primals_2: "f32[2, 2]"): # Annotation: {'stream': 1} mul: "f32[2, 2]" = torch.ops.aten.mul.Tensor(primals_1, 2); primals_1 = None add: "f32[2, 2]" = torch.ops.aten.add.Tensor(mul, primals_2) # Annotation: {'stream': 0} add_1: "f32[2, 2]" = torch.ops.aten.add.Tensor(mul, primals_2); mul = primals_2 = None return (add, add_1) """, ) actual[1].sum().backward() self.assertExpectedInline( print_graph(bw_graphs[0]), """\
GraphModule
python
tensorflow__tensorflow
tensorflow/python/keras/layers/convolutional.py
{ "start": 114570, "end": 119165 }
class ____(Layer): """Zero-padding layer for 2D input (e.g. picture). This layer can add rows and columns of zeros at the top, bottom, left and right side of an image tensor. Examples: >>> input_shape = (1, 1, 2, 2) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> print(x) [[[[0 1] [2 3]]]] >>> y = tf.keras.layers.ZeroPadding2D(padding=1)(x) >>> print(y) tf.Tensor( [[[[0 0] [0 0] [0 0] [0 0]] [[0 0] [0 1] [2 3] [0 0]] [[0 0] [0 0] [0 0] [0 0]]]], shape=(1, 3, 4, 2), dtype=int64) Args: padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric padding is applied to height and width. - If tuple of 2 ints: interpreted as two different symmetric padding values for height and width: `(symmetric_height_pad, symmetric_width_pad)`. - If tuple of 2 tuples of 2 ints: interpreted as `((top_pad, bottom_pad), (left_pad, right_pad))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch_size, rows, cols, channels)` - If `data_format` is `"channels_first"`: `(batch_size, channels, rows, cols)` Output shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch_size, padded_rows, padded_cols, channels)` - If `data_format` is `"channels_first"`: `(batch_size, channels, padded_rows, padded_cols)` """ def __init__(self, padding=(1, 1), data_format=None, **kwargs): super(ZeroPadding2D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) if isinstance(padding, int): self.padding = ((padding, padding), (padding, padding)) elif hasattr(padding, '__len__'): if len(padding) != 2: raise ValueError('`padding` should have two elements. ' 'Found: ' + str(padding)) height_padding = conv_utils.normalize_tuple(padding[0], 2, '1st entry of padding') width_padding = conv_utils.normalize_tuple(padding[1], 2, '2nd entry of padding') self.padding = (height_padding, width_padding) else: raise ValueError('`padding` should be either an int, ' 'a tuple of 2 ints ' '(symmetric_height_pad, symmetric_width_pad), ' 'or a tuple of 2 tuples of 2 ints ' '((top_pad, bottom_pad), (left_pad, right_pad)). ' 'Found: ' + str(padding)) self.input_spec = InputSpec(ndim=4) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': if input_shape[2] is not None: rows = input_shape[2] + self.padding[0][0] + self.padding[0][1] else: rows = None if input_shape[3] is not None: cols = input_shape[3] + self.padding[1][0] + self.padding[1][1] else: cols = None return tensor_shape.TensorShape( [input_shape[0], input_shape[1], rows, cols]) elif self.data_format == 'channels_last': if input_shape[1] is not None: rows = input_shape[1] + self.padding[0][0] + self.padding[0][1] else: rows = None if input_shape[2] is not None: cols = input_shape[2] + self.padding[1][0] + self.padding[1][1] else: cols = None return tensor_shape.TensorShape( [input_shape[0], rows, cols, input_shape[3]]) def call(self, inputs): return backend.spatial_2d_padding( inputs, padding=self.padding, data_format=self.data_format) def get_config(self): config = {'padding': self.padding, 'data_format': self.data_format} base_config = super(ZeroPadding2D, self).get_config() return dict(list(base_config.items()) + list(config.items()))
ZeroPadding2D
python
numba__numba
numba/cuda/tests/cudapy/test_boolean.py
{ "start": 194, "end": 547 }
class ____(CUDATestCase): def test_boolean(self): func = cuda.jit('void(float64[:], bool_)')(boolean_func) A = np.array([0], dtype='float64') func[1, 1](A, True) self.assertTrue(A[0] == 123) func[1, 1](A, False) self.assertTrue(A[0] == 321) if __name__ == '__main__': unittest.main()
TestCudaBoolean
python
walkccc__LeetCode
solutions/2656. Maximum Sum With Exactly K Elements/2656.py
{ "start": 0, "end": 117 }
class ____: def maximizeSum(self, nums: list[int], k: int) -> int: return max(nums) * k + k * (k - 1) // 2
Solution
python
tensorflow__tensorflow
tensorflow/python/util/fast_module_type_test.py
{ "start": 1391, "end": 3180 }
class ____(test.TestCase): def testAttributeAccessBeforeSuperInitDoesNotCrash(self): # Tests that the attribute access before super().__init__() does not crash. module = EarlyAttrAccessModule("early_attr") self.assertEqual(1, module.some_attr) def testMissingModuleNameCallDoesNotCrash(self): with self.assertRaises(TypeError): ChildFastModule() def testBaseGetattribute(self): # Tests that the default attribute lookup works. module = ChildFastModule("test") module.foo = 1 self.assertEqual(1, module.foo) def testGetattributeCallback(self): # Tests that functionality of __getattribute__ can be set as a callback. module = ChildFastModule("test") FastModuleType.set_getattribute_callback(module, ChildFastModule._getattribute1) self.assertEqual(2, module.foo) def testGetattrCallback(self): # Tests that functionality of __getattr__ can be set as a callback. module = ChildFastModule("test") FastModuleType.set_getattribute_callback(module, ChildFastModule._getattribute2) FastModuleType.set_getattr_callback(module, ChildFastModule._getattr) self.assertEqual(3, module.foo) def testFastdictApis(self): module = ChildFastModule("test") # At first "bar" does not exist in the module's attributes self.assertFalse(module._fastdict_key_in("bar")) with self.assertRaisesRegex(KeyError, "module has no attribute 'bar'"): module._fastdict_get("bar") module._fastdict_insert("bar", 1) # After _fastdict_insert() the attribute is added. self.assertTrue(module._fastdict_key_in("bar")) self.assertEqual(1, module.bar) if __name__ == "__main__": test.main()
FastModuleTypeTest
python
getsentry__sentry
src/sentry/replays/endpoints/project_replay_viewed_by.py
{ "start": 1387, "end": 6994 }
class ____(ProjectEndpoint): owner = ApiOwner.REPLAY publish_status = {"GET": ApiPublishStatus.PUBLIC, "POST": ApiPublishStatus.PRIVATE} permission_classes = (ProjectEventPermission,) @extend_schema( operation_id="List Users Who Have Viewed a Replay", parameters=[ GlobalParams.ORG_ID_OR_SLUG, GlobalParams.PROJECT_ID_OR_SLUG, ReplayParams.REPLAY_ID, ], responses={ 200: inline_sentry_response_serializer("GetReplayViewedBy", ReplayViewedByResponse), 400: RESPONSE_BAD_REQUEST, 403: RESPONSE_FORBIDDEN, 404: RESPONSE_NOT_FOUND, }, examples=ReplayExamples.GET_REPLAY_VIEWED_BY, ) def get(self, request: Request, project: Project, replay_id: str) -> Response: """Return a list of users who have viewed a replay.""" if not features.has( "organizations:session-replay", project.organization, actor=request.user ): return Response(status=404) try: uuid.UUID(replay_id) except ValueError: return Response(status=404) # query for user ids who viewed the replay filter_params = self.get_filter_params(request, project, date_filter_optional=False) # If no rows were found then the replay does not exist and a 404 is returned. viewed_by_ids_response: list[dict[str, Any]] = query_replay_viewed_by_ids( project_id=project.id, replay_id=replay_id, start=filter_params["start"], end=filter_params["end"], request_user_id=request.user.id, organization=project.organization, ) if not viewed_by_ids_response: return Response(status=404) viewed_by_ids = viewed_by_ids_response[0]["viewed_by_ids"] if viewed_by_ids == []: return Response({"data": {"viewed_by": []}}, status=200) serialized_users = user_service.serialize_many( filter=dict(user_ids=viewed_by_ids, organization_id=project.organization.id), as_user=serialize_generic_user(request.user), ) serialized_users = [_normalize_user(user) for user in serialized_users] return Response({"data": {"viewed_by": serialized_users}}, status=200) def post(self, request: Request, project: Project, replay_id: str) -> Response: """Create a replay-viewed event.""" if not request.user.is_authenticated: return Response(status=400) if not features.has( "organizations:session-replay", project.organization, actor=request.user ): return Response(status=404) try: replay_id = str(uuid.UUID(replay_id)) except ValueError: return Response(status=404) user_orgs = user_service.get_organizations(user_id=request.user.id) if project.organization.id not in [org.id for org in user_orgs]: # If the user is not in the same organization as the replay, we don't need to do anything. return Response(status=204) # make a query to avoid overwriting the `finished_at` column filter_params = self.get_filter_params(request, project, date_filter_optional=False) finished_at_response = execute_query( query=make_full_aggregation_query( fields=["finished_at"], replay_ids=[replay_id], project_ids=[project.id], period_start=filter_params["start"], period_end=filter_params["end"], request_user_id=request.user.id, ), tenant_id={"organization_id": project.organization.id} if project.organization else {}, referrer="replays.endpoints.viewed_by_post", )["data"] if not finished_at_response: return Response(status=404) finished_at = finished_at_response[0]["finished_at"] finished_at_ts = datetime.fromisoformat(finished_at).timestamp() message = viewed_event( project.id, replay_id, request.user.id, finished_at_ts, ) publish_replay_event(message, is_async=False) return Response(status=204) def _normalize_user(user: dict[str, Any]) -> dict[str, Any]: """Return a normalized user dictionary. The viewed-by resource is expected to return a subset of the user_service's response output. """ return { "avatar": { "avatarType": user["avatar"]["avatarType"], "avatarUuid": user["avatar"]["avatarUuid"], "avatarUrl": user["avatar"]["avatarUrl"], }, "avatarUrl": user["avatarUrl"], "dateJoined": user["dateJoined"], "email": user["email"], "emails": [ { "id": email["id"], "email": email["email"], "is_verified": email["is_verified"], } for email in user["emails"] ], "experiments": user["experiments"], "has2fa": user["has2fa"], "hasPasswordAuth": user["hasPasswordAuth"], "id": user["id"], "isActive": user["isActive"], "isManaged": user["isManaged"], "isStaff": user["isStaff"], "isSuperuser": user["isSuperuser"], "lastActive": user["lastActive"], "lastLogin": user["lastLogin"], "name": user["name"], "type": "user", "username": user["username"], }
ProjectReplayViewedByEndpoint
python
PyCQA__pylint
tests/functional/i/inner_classes.py
{ "start": 137, "end": 361 }
class ____: """docstring""" def __init__(self): self.__setattr__('a', 'b') def one_public(self): """docstring""" pass def another_public(self): """docstring""" pass
Aaa
python
numba__numba
numba/core/typing/builtins.py
{ "start": 4360, "end": 5570 }
class ____(AbstractTemplate): """ Given a heterogeneous pair, return the second element. """ key = "pair_second" def generic(self, args, kws): assert not kws [pair] = args if isinstance(pair, types.Pair): return signature(pair.second_type, pair) def choose_result_bitwidth(*inputs): return max(types.intp.bitwidth, *(tp.bitwidth for tp in inputs)) def choose_result_int(*inputs): """ Choose the integer result type for an operation on integer inputs, according to the integer typing NBEP. """ bitwidth = choose_result_bitwidth(*inputs) signed = any(tp.signed for tp in inputs) return types.Integer.from_bitwidth(bitwidth, signed) # The "machine" integer types to take into consideration for operator typing # (according to the integer typing NBEP) machine_ints = ( sorted(set((types.intp, types.int64))) + sorted(set((types.uintp, types.uint64))) ) # Explicit integer rules for binary operators; smaller ints will be # automatically upcast. integer_binop_cases = tuple( signature(choose_result_int(op1, op2), op1, op2) for op1, op2 in itertools.product(machine_ints, machine_ints) )
PairSecond
python
google__jax
tests/lax_scipy_special_functions_test.py
{ "start": 5839, "end": 16656 }
class ____(jtu.JaxTestCase): def _GetArgsMaker(self, rng, shapes, dtypes): return lambda: [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)] @parameterized.named_parameters(itertools.chain.from_iterable( map(_pretty_special_fun_name, jtu.sample_product_testcases( [dict(op=rec.name, rng_factory=rec.rng_factory, test_autodiff=rec.test_autodiff, nondiff_argnums=rec.nondiff_argnums)], shapes=itertools.combinations_with_replacement(all_shapes, rec.nargs), dtypes=(itertools.combinations_with_replacement(rec.dtypes, rec.nargs) if isinstance(rec.dtypes, list) else itertools.product(*rec.dtypes)), )) for rec in JAX_SPECIAL_FUNCTION_RECORDS )) @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion. @jax.numpy_dtype_promotion('standard') # This test explicitly exercises dtype promotion def testScipySpecialFun(self, op, rng_factory, shapes, dtypes, test_autodiff, nondiff_argnums): scipy_op = getattr(osp_special, op) lax_op = getattr(lsp_special, op) rng = rng_factory(self.rng()) args_maker = self._GetArgsMaker(rng, shapes, dtypes) args = args_maker() self.assertAllClose(scipy_op(*args), lax_op(*args), atol=1e-3, rtol=1e-3, check_dtypes=False) self._CompileAndCheck(lax_op, args_maker, rtol=1e-4) if test_autodiff: def partial_lax_op(*vals): list_args = list(vals) for i in nondiff_argnums: list_args.insert(i, args[i]) return lax_op(*list_args) assert list(nondiff_argnums) == sorted(set(nondiff_argnums)) diff_args = [x for i, x in enumerate(args) if i not in nondiff_argnums] jtu.check_grads(partial_lax_op, diff_args, order=1, atol=.1 if jtu.test_device_matches(["tpu"]) else 1e-3, rtol=.1, eps=1e-3) @jtu.sample_product( n=[0, 1, 2, 3, 10, 50] ) def testScipySpecialFunBernoulli(self, n): dtype = jnp.zeros(0).dtype # default float dtype. scipy_op = lambda: osp_special.bernoulli(n).astype(dtype) lax_op = functools.partial(lsp_special.bernoulli, n) args_maker = lambda: [] self._CheckAgainstNumpy(scipy_op, lax_op, args_maker, atol=0, rtol=1E-5) self._CompileAndCheck(lax_op, args_maker, atol=0, rtol=1E-5) def testGammaSign(self): dtype = jnp.zeros(0).dtype # default float dtype. typ = dtype.type testcases = [ (np.arange(-10, 0).astype(dtype), np.array([np.nan] * 10, dtype=dtype)), (np.nextafter(np.arange(-5, 0).astype(dtype), typ(-np.inf)), np.array([1, -1, 1, -1, 1], dtype=dtype)), (np.nextafter(np.arange(-5, 0).astype(dtype), typ(np.inf)), np.array([-1, 1, -1, 1, -1], dtype=dtype)), (np.arange(0, 10).astype(dtype), np.ones((10,), dtype)), (np.nextafter(np.arange(0, 10).astype(dtype), typ(np.inf)), np.ones((10,), dtype)), (np.nextafter(np.arange(1, 10).astype(dtype), typ(-np.inf)), np.ones((9,), dtype)), (np.array([-np.inf, -0.0, 0.0, np.inf, np.nan]), np.array([np.nan, -1.0, 1.0, 1.0, np.nan])) ] for inp, out in testcases: self.assertArraysEqual(out, lsp_special.gammasgn(inp)) self.assertArraysEqual(out, jnp.sign(lsp_special.gamma(inp))) if jtu.parse_version(scipy.__version__) >= (1, 15): self.assertArraysEqual(out, osp_special.gammasgn(inp)) self.assertAllClose(osp_special.gammasgn(inp), lsp_special.gammasgn(inp)) def testNdtriExtremeValues(self): # Testing at the extreme values (bounds (0. and 1.) and outside the bounds). dtype = jnp.zeros(0).dtype # default float dtype. args_maker = lambda: [np.arange(-10, 10).astype(dtype)] rtol = 1E-3 if jtu.test_device_matches(["tpu"]) else 1e-5 self._CheckAgainstNumpy(osp_special.ndtri, lsp_special.ndtri, args_maker, rtol=rtol) self._CompileAndCheck(lsp_special.ndtri, args_maker, rtol=rtol) @parameterized.parameters([True, False]) def testNdtriDebugInfs(self, with_jit): # ref: https://github.com/jax-ml/jax/issues/29328 f = jax.jit(lsp_special.ndtri) if with_jit else lsp_special.ndtri with jax.debug_infs(True): f(0.5) # Doesn't crash with self.assertRaisesRegex(FloatingPointError, "invalid value \\(inf\\)"): f(1.0) with self.assertRaisesRegex(FloatingPointError, "invalid value \\(inf\\)"): f(0.0) def testRelEntrExtremeValues(self): # Testing at the extreme values (bounds (0. and 1.) and outside the bounds). dtype = jnp.zeros(0).dtype # default float dtype. args_maker = lambda: [np.array([-2, -2, -2, -1, -1, -1, 0, 0, 0]).astype(dtype), np.array([-1, 0, 1, -1, 0, 1, -1, 0, 1]).astype(dtype)] rtol = 1E-3 if jtu.test_device_matches(["tpu"]) else 1e-5 self._CheckAgainstNumpy(osp_special.rel_entr, lsp_special.rel_entr, args_maker, rtol=rtol) self._CompileAndCheck(lsp_special.rel_entr, args_maker, rtol=rtol) def testBetaParameterDeprecation(self): with self.assertNoWarnings(): lsp_special.beta(1, 1) lsp_special.beta(1, b=1) lsp_special.beta(a=1, b=1) with self.assertRaises(TypeError): lsp_special.beta(x=1, y=1) def testExpnTracerLeaks(self): # Regression test for https://github.com/jax-ml/jax/issues/26972 with jax.checking_leaks(): lsp_special.expi(jnp.ones(())) def testExpiDisableJit(self): # Regression test for https://github.com/jax-ml/jax/issues/27019 x = jnp.array([-0.5]) with jax.disable_jit(True): result_nojit = lsp_special.expi(x) with jax.disable_jit(False): result_jit = lsp_special.expi(x) self.assertAllClose(result_jit, result_nojit) def testGammaIncBoundaryValues(self): dtype = dtypes.default_float_dtype() nan = float('nan') inf = float('inf') if jtu.parse_version(scipy.__version__) >= (1, 16): a_samples = [0, 0, 0, 1, nan, 1, nan, 0, 1, 1, nan] x_samples = [0, 1, 2, 0, 1, nan, nan, inf, inf, -1, inf] else: # disable samples that contradict with scipy/scipy#22441 a_samples = [0, 0, 0, 1, nan, 1, nan, 0, 1, 1] x_samples = [0, 1, 2, 0, 1, nan, nan, inf, inf, -1] args_maker = lambda: (np.array(a_samples, dtype=dtype), np.array(x_samples, dtype=dtype)) rtol = 1E-3 if jtu.test_device_matches(["tpu"]) else 1e-5 self._CheckAgainstNumpy(lsp_special.gammainc, osp_special.gammainc, args_maker, rtol=rtol) self._CompileAndCheck(lsp_special.gammainc, args_maker, rtol=rtol) def testGammaIncCBoundaryValues(self): dtype = dtypes.default_float_dtype() nan = float('nan') inf = float('inf') if jtu.parse_version(scipy.__version__) >= (1, 16): a_samples = [0, 0, 0, 1, nan, 1, nan, 0, 1, 1, nan] x_samples = [0, 1, 2, 0, 1, nan, nan, inf, inf, -1, inf] else: # disable samples that contradict with scipy/scipy#22441 a_samples = [0, 0, 0, 1, nan, 1, nan, 0, 1, 1] x_samples = [0, 1, 2, 0, 1, nan, nan, inf, inf, -1] args_maker = lambda: (np.array(a_samples, dtype=dtype), np.array(x_samples, dtype=dtype)) rtol = 1E-3 if jtu.test_device_matches(["tpu"]) else 1e-5 self._CheckAgainstNumpy(lsp_special.gammaincc, osp_special.gammaincc, args_maker, rtol=rtol) self._CompileAndCheck(lsp_special.gammaincc, args_maker, rtol=rtol) def testBetaIncBoundaryValues(self): dtype = dtypes.default_float_dtype() fi = jax.numpy.finfo(dtype) nan = float('nan') inf = float('inf') tiny = fi.tiny eps = fi.eps if jtu.parse_version(scipy.__version__) >= (1, 16): # TODO(pearu): enable tiny samples when a fix to scipy/scipy#22682 # will be available a_samples = [nan, -0.5, inf, 0, eps, 1, tiny][:-1] b_samples = [nan, -0.5, inf, 0, eps, 1, tiny][:-1] elif jtu.parse_version(scipy.__version__) >= (1, 12): # disabled samples that contradict with scipy/scipy#22425 a_samples = [nan, -0.5, 0.5] b_samples = [nan, -0.5, 0.5] else: a_samples = [-0.5, 0.5] b_samples = [-0.5, 0.5] x_samples = [nan, -0.5, 0, 0.5, 1, 1.5] a_samples = np.array(a_samples, dtype=dtype) b_samples = np.array(b_samples, dtype=dtype) x_samples = np.array(x_samples, dtype=dtype) args_maker = lambda: np.meshgrid(a_samples, b_samples, x_samples) rtol = 1E-3 if jtu.test_device_matches(["tpu"]) else 5e-5 self._CheckAgainstNumpy(osp_special.betainc, lsp_special.betainc, args_maker, rtol=rtol) self._CompileAndCheck(lsp_special.betainc, args_maker, rtol=rtol) def testHyp2f1SpecialCases(self): dtype = dtypes.default_float_dtype() a_samples = np.array([0, 1, 1, 1, 1, 5, 5, 0.245, 0.45, 0.45, 2, 0.4, 0.32, 4, 4], dtype=dtype) b_samples = np.array([1, 0, 1, 1, 1, 1, 1, 3, 0.7, 0.7, 1, 0.7, 0.76, 2, 3], dtype=dtype) c_samples = np.array([1, 1, 0, 1, -1, 3, 3, 3, 0.45, 0.45, 5, 0.3, 0.11, 7, 7], dtype=dtype) x_samples = np.array([1, 1, 1, 0, 1, 0.5, 1, 0.35, 0.35, 1.5, 1, 0.4, 0.95, 0.95, 0.95], dtype=dtype) args_maker = lambda: (a_samples, b_samples, c_samples, x_samples) rtol = 1E-3 if jtu.test_device_matches(["tpu"]) else 5e-5 self._CheckAgainstNumpy(osp_special.hyp2f1, lsp_special.hyp2f1, args_maker, rtol=rtol) self._CompileAndCheck(lsp_special.hyp2f1, args_maker, rtol=rtol) def testSiciEdgeCases(self): dtype = jnp.zeros(0).dtype x_samples = np.array([0.0, np.inf, -np.inf], dtype=dtype) scipy_op = lambda x: osp_special.sici(x) lax_op = lambda x: lsp_special.sici(x) si_scipy, ci_scipy = scipy_op(x_samples) si_jax, ci_jax = lax_op(x_samples) expected_si = np.array([0.0, np.pi/2, -np.pi/2], dtype=dtype) expected_ci = np.array([-np.inf, 0.0, np.nan], dtype=dtype) self.assertAllClose(si_jax, si_scipy, atol=1e-6, rtol=1e-6) self.assertAllClose(ci_jax, ci_scipy, atol=1e-6, rtol=1e-6) self.assertAllClose(si_jax, expected_si, atol=1e-6, rtol=1e-6) self.assertAllClose(ci_jax, expected_ci, atol=1e-6, rtol=1e-6) @jtu.sample_product( scale=[1, 10, 1e9], shape=[(5,), (10,)] ) def testSiciValueRanges(self, scale, shape): rng = jtu.rand_default(self.rng(), scale=scale) args_maker = lambda: [rng(shape, jnp.float32)] rtol = 5e-3 if jtu.test_device_matches(["tpu"]) else 1e-6 self._CheckAgainstNumpy( osp_special.sici, lsp_special.sici, args_maker, rtol=rtol) def testSiciRaiseOnComplexInput(self): samples = jnp.arange(5, dtype=complex) with self.assertRaisesRegex(ValueError, "Argument `x` to sici must be real-valued."): lsp_special.sici(samples) if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
LaxScipySpecialFunctionsTest
python
has2k1__plotnine
plotnine/facets/layout.py
{ "start": 563, "end": 8967 }
class ____: """ Layout of entire plot """ # facet facet: facet # coordinate system coord: coord # A dataframe with the layout information of the plot layout: pd.DataFrame # List of x scales panel_scales_x: Scales # List of y scales panel_scales_y: Scales # Range & breaks information for each panel panel_params: list[panel_view] axs: list[Axes] # MPL axes def setup(self, layers: Layers, plot: ggplot): """ Create a layout for the panels The layout is a dataframe that stores all the structural information about the panels that will make up the plot. The actual layout depends on the type of facet. This method ensures that each layer has a copy of the data it needs in `layer.data`. That data is also has column `PANEL` that indicates the panel onto which each data row/item will be plotted. """ data = [l.data for l in layers] # setup facets self.facet = plot.facet self.facet.setup_params(data) data = self.facet.setup_data(data) # setup coords self.coord = plot.coordinates self.coord.setup_params(data) data = self.coord.setup_data(data) # Generate panel layout data = self.facet.setup_data(data) self.layout = self.facet.compute_layout(data) self.layout = self.coord.setup_layout(self.layout) self.check_layout() # Map the data to the panels for layer, ldata in zip(layers, data): layer.data = self.facet.map(ldata, self.layout) def train_position(self, layers: Layers, scales: Scales): """ Create all the required x & y panel_scales And set the ranges for each scale according to the data Notes ----- The number of x or y scales depends on the facetting, particularly the scales parameter. e.g if `scales="free"`{.py} then each panel will have separate x and y scales, and if `scales="fixed"`{.py} then all panels will share an x scale and a y scale. """ layout = self.layout if not hasattr(self, "panel_scales_x") and scales.x: result = self.facet.init_scales(layout, scales.x, None) self.panel_scales_x = result.x if not hasattr(self, "panel_scales_y") and scales.y: result = self.facet.init_scales(layout, None, scales.y) self.panel_scales_y = result.y self.facet.train_position_scales(self, layers) def map_position(self, layers: Layers): """ Map x & y (position) aesthetics onto the scales. e.g If the x scale is scale_x_log10, after this function all x, xmax, xmin, ... columns in data will be mapped onto log10 scale (log10 transformed). The real mapping is handled by the scale.map """ _layout = self.layout for layer in layers: data = layer.data match_id = match(data["PANEL"], _layout["PANEL"]) if self.panel_scales_x: x_vars = list( set(self.panel_scales_x[0].aesthetics) & set(data.columns) ) SCALE_X = _layout["SCALE_X"].iloc[match_id].tolist() self.panel_scales_x.map(data, x_vars, SCALE_X) if self.panel_scales_y: y_vars = list( set(self.panel_scales_y[0].aesthetics) & set(data.columns) ) SCALE_Y = _layout["SCALE_Y"].iloc[match_id].tolist() self.panel_scales_y.map(data, y_vars, SCALE_Y) def get_scales(self, i: int) -> pos_scales: """ Return x & y scales for panel i Parameters ---------- i : int Panel id Returns ------- scales : types.SimpleNamespace Class attributes *x* for the x scale and *y* for the y scale of the panel """ # wrapping with np.asarray prevents an exception # on some datasets bool_idx = np.asarray(self.layout["PANEL"]) == i idx = self.layout["SCALE_X"].loc[bool_idx].iloc[0] xsc = self.panel_scales_x[idx - 1] idx = self.layout["SCALE_Y"].loc[bool_idx].iloc[0] ysc = self.panel_scales_y[idx - 1] return pos_scales(x=xsc, y=ysc) def reset_position_scales(self): """ Reset x and y scales """ if not self.facet.shrink: return with suppress(AttributeError): self.panel_scales_x.reset() with suppress(AttributeError): self.panel_scales_y.reset() def setup_panel_params(self, coord: coord): """ Calculate the x & y range & breaks information for each panel Parameters ---------- coord : coord Coordinate """ if not self.panel_scales_x: raise PlotnineError("Missing an x scale") if not self.panel_scales_y: raise PlotnineError("Missing a y scale") self.panel_params = [] cols = ["SCALE_X", "SCALE_Y"] for i, j in self.layout[cols].itertuples(index=False): i, j = i - 1, j - 1 params = coord.setup_panel_params( self.panel_scales_x[i], self.panel_scales_y[j] ) self.panel_params.append(params) def finish_data(self, layers: Layers): """ Modify data before it is drawn out by the geom Parameters ---------- layers : list List of layers """ for layer in layers: layer.data = self.facet.finish_data(layer.data, self) def check_layout(self): required = {"PANEL", "SCALE_X", "SCALE_Y"} common = self.layout.columns.intersection(list(required)) if len(required) != len(common): raise PlotnineError( "Facet layout has bad format. It must contain " f"the columns '{required}'" ) def xlabel(self, labels: labels_view) -> str: """ Determine x-axis label Parameters ---------- labels : labels_view Labels as specified by the user through the `labs` or `xlab` calls. Returns ------- out : str x-axis label """ if self.panel_scales_x[0].name is not None: return self.panel_scales_x[0].name elif labels.x is not None: return labels.x return "" def ylabel(self, labels: labels_view) -> str: """ Determine y-axis label Parameters ---------- labels : labels_view Labels as specified by the user through the `labs` or `ylab` calls. Returns ------- out : str y-axis label """ if self.panel_scales_y[0].name is not None: return self.panel_scales_y[0].name elif labels.y is not None: return labels.y return "" def set_xy_labels(self, labels: labels_view) -> labels_view: """ Determine x & y axis labels Parameters ---------- labels : labels_view Labels as specified by the user through the `labs` or `ylab` calls. Returns ------- out : labels_view Modified labels """ labels.x = self.xlabel(labels) labels.y = self.ylabel(labels) return labels def get_details(self) -> list[layout_details]: columns = [ "PANEL", "ROW", "COL", "SCALE_X", "SCALE_Y", "AXIS_X", "AXIS_Y", ] vcols = self.layout.columns.difference(columns) lst = [] nrow = self.layout["ROW"].max() ncol = self.layout["COL"].max() for pidx, row in self.layout.iterrows(): ld = layout_details( panel_index=pidx, # type: ignore nrow=nrow, ncol=ncol, variables={name: row[name] for name in vcols}, **{str.lower(k): row[k] for k in columns}, ) lst.append(ld) return lst
Layout
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/classVar7.py
{ "start": 106, "end": 555 }
class ____: a: ClassVar b: ClassVar = 2 c: ClassVar d: ClassVar d = 3 @classmethod def m1(cls) -> None: cls.c = "" reveal_type(A.a, expected_text="Unknown") A.a = 3 A.a = "" reveal_type(A.b, expected_text="int") A.b = 2 # This should generate an error A.b = "" reveal_type(A.c, expected_text="Unknown") A.c = 2 A.c = "" reveal_type(A.d, expected_text="int") A.d = 2 # This should generate an error A.d = ""
A
python
spack__spack
lib/spack/spack/fetch_strategy.py
{ "start": 2614, "end": 5917 }
class ____: """Superclass of all fetch strategies.""" #: The URL attribute must be specified either at the package class #: level, or as a keyword argument to ``version()``. It is used to #: distinguish fetchers for different versions in the package DSL. url_attr: Optional[str] = None #: Optional attributes can be used to distinguish fetchers when : #: classes have multiple ``url_attrs`` at the top-level. # optional attributes in version() args. optional_attrs: List[str] = [] def __init__(self, **kwargs): # The stage is initialized late, so that fetch strategies can be # constructed at package construction time. This is where things # will be fetched. self.stage = None # Enable or disable caching for this strategy based on # 'no_cache' option from version directive. self.cache_enabled = not kwargs.pop("no_cache", False) self.package = None def set_package(self, package): self.package = package # Subclasses need to implement these methods def fetch(self): """Fetch source code archive or repo. Returns: bool: True on success, False on failure. """ def check(self): """Checksum the archive fetched by this FetchStrategy.""" def expand(self): """Expand the downloaded archive into the stage source path.""" def reset(self): """Revert to freshly downloaded state. For archive files, this may just re-expand the archive. """ def archive(self, destination): """Create an archive of the downloaded data for a mirror. For downloaded files, this should preserve the checksum of the original file. For repositories, it should just create an expandable tarball out of the downloaded repository. """ @property def cachable(self): """Whether fetcher is capable of caching the resource it retrieves. This generally is determined by whether the resource is identifiably associated with a specific package version. Returns: bool: True if can cache, False otherwise. """ def source_id(self): """A unique ID for the source. It is intended that a human could easily generate this themselves using the information available to them in the Spack package. The returned value is added to the content which determines the full hash for a package using :class:`str`. """ raise NotImplementedError def mirror_id(self): """This is a unique ID for a source that is intended to help identify reuse of resources across packages. It is unique like source-id, but it does not include the package name and is not necessarily easy for a human to create themselves. """ raise NotImplementedError def __str__(self): # Should be human readable URL. return "FetchStrategy.__str___" @classmethod def matches(cls, args): """Predicate that matches fetch strategies to arguments of the version directive. Args: args: arguments of the version directive """ return cls.url_attr in args @fetcher
FetchStrategy
python
apache__airflow
task-sdk/tests/task_sdk/dags/super_basic_run.py
{ "start": 920, "end": 1282 }
class ____(BaseOperator): def execute(self, context): task_id = context["task_instance"].task_id print(f"Hello World {task_id}!") assert context["task_instance"].try_number == 1 assert context["dag"].dag_id == "super_basic_run" @dag() def super_basic_run(): CustomOperator(task_id="hello") super_basic_run()
CustomOperator
python
pypa__pip
src/pip/_vendor/pygments/lexers/python.py
{ "start": 28897, "end": 30360 }
class ____(RegexLexer): name = 'Python console session' aliases = ['pycon', 'python-console'] mimetypes = ['text/x-python-doctest'] """Auxiliary lexer for `PythonConsoleLexer`. Code tokens are output as ``Token.Other.Code``, traceback tokens as ``Token.Other.Traceback``. """ tokens = { 'root': [ (r'(>>> )(.*\n)', bygroups(Generic.Prompt, Other.Code), 'continuations'), # This happens, e.g., when tracebacks are embedded in documentation; # trailing whitespaces are often stripped in such contexts. (r'(>>>)(\n)', bygroups(Generic.Prompt, Whitespace)), (r'(\^C)?Traceback \(most recent call last\):\n', Other.Traceback, 'traceback'), # SyntaxError starts with this (r' File "[^"]+", line \d+', Other.Traceback, 'traceback'), (r'.*\n', Generic.Output), ], 'continuations': [ (r'(\.\.\. )(.*\n)', bygroups(Generic.Prompt, Other.Code)), # See above. (r'(\.\.\.)(\n)', bygroups(Generic.Prompt, Whitespace)), default('#pop'), ], 'traceback': [ # As soon as we see a traceback, consume everything until the next # >>> prompt. (r'(?=>>>( |$))', Text, '#pop'), (r'(KeyboardInterrupt)(\n)', bygroups(Name.Class, Whitespace)), (r'.*\n', Other.Traceback), ], }
_PythonConsoleLexerBase
python
hynek__structlog
src/structlog/twisted.py
{ "start": 6558, "end": 8500 }
class ____: """ Wrap a log *observer* and render non-`JSONRenderer` entries to JSON. Args: observer (ILogObserver): Twisted log observer to wrap. For example :class:`PlainFileObserver` or Twisted's stock `FileLogObserver <https://docs.twisted.org/en/stable/api/ twisted.python.log.FileLogObserver.html>`_ .. versionadded:: 0.2.0 """ def __init__(self, observer: Any) -> None: self._observer = observer def __call__(self, eventDict: EventDict) -> str: if "_structlog" not in eventDict: eventDict["message"] = ( json.dumps( { "event": textFromEventDict( eventDict # type: ignore[arg-type] ), "system": eventDict.get("system"), } ), ) eventDict["_structlog"] = True return self._observer(eventDict) def plainJSONStdOutLogger() -> JSONLogObserverWrapper: """ Return a logger that writes only the message to stdout. Transforms non-`JSONRenderer` messages to JSON. Ideal for JSONifying log entries from Twisted plugins and libraries that are outside of your control:: $ twistd -n --logger structlog.twisted.plainJSONStdOutLogger web {"event": "Log opened.", "system": "-"} {"event": "twistd 13.1.0 (python 2.7.3) starting up.", "system": "-"} {"event": "reactor class: twisted...EPollReactor.", "system": "-"} {"event": "Site starting on 8080", "system": "-"} {"event": "Starting factory <twisted.web.server.Site ...>", ...} ... Composes `PlainFileLogObserver` and `JSONLogObserverWrapper` to a usable logger. .. versionadded:: 0.2.0 """ return JSONLogObserverWrapper(PlainFileLogObserver(sys.stdout))
JSONLogObserverWrapper
python
huggingface__transformers
tests/models/gemma3n/test_processing_gemma3n.py
{ "start": 1136, "end": 2126 }
class ____(ProcessorTesterMixin, unittest.TestCase): processor_class = Gemma3nProcessor model_id = "hf-internal-testing/namespace-google-repo_name-gemma-3n-E4B-it" def prepare_image_inputs(self, batch_size: int | None = None, nested: bool = False): return super().prepare_image_inputs(batch_size=batch_size, nested=True) @classmethod def _setup_test_attributes(cls, processor): cls.image_token = processor.boi_token def test_audio_feature_extractor(self): processor = self.get_processor() feature_extractor = self.get_component("feature_extractor") raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="pt") input_processor = processor(text="Transcribe:", audio=raw_speech, return_tensors="pt") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
Gemma3nProcessorTest
python
langchain-ai__langchain
libs/core/tests/unit_tests/example_selectors/test_similarity.py
{ "start": 395, "end": 8308 }
class ____(VectorStore): def __init__(self, init_arg: str | None = None): self.texts: list[str] = [] self.metadatas: list[dict] = [] self._embeddings: Embeddings | None = None self.init_arg = init_arg @property def embeddings(self) -> Embeddings | None: return self._embeddings @override def add_texts( self, texts: Iterable[str], metadatas: list[dict] | None = None, **kwargs: Any, ) -> list[str]: self.texts.extend(texts) if metadatas: self.metadatas.extend(metadatas) return ["dummy_id"] @override def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> list[Document]: return [ Document( page_content=query, metadata={"query": query, "k": k, "other": "other"} ) ] * k @override def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> list[Document]: return [ Document( page_content=query, metadata={"query": query, "k": k, "fetch_k": fetch_k, "other": "other"}, ) ] * k @classmethod def from_texts( cls, texts: list[str], embedding: Embeddings, metadatas: list[dict] | None = None, **kwargs: Any, ) -> "DummyVectorStore": store = DummyVectorStore(**kwargs) store.add_texts(texts, metadatas) store._embeddings = embedding return store def test_add_example() -> None: vector_store = DummyVectorStore() selector = SemanticSimilarityExampleSelector( vectorstore=vector_store, input_keys=["foo", "foo3"] ) selector.add_example({"foo": "bar", "foo2": "bar2", "foo3": "bar3"}) assert vector_store.texts == ["bar bar3"] assert vector_store.metadatas == [{"foo": "bar", "foo2": "bar2", "foo3": "bar3"}] async def test_aadd_example() -> None: vector_store = DummyVectorStore() selector = SemanticSimilarityExampleSelector( vectorstore=vector_store, input_keys=["foo", "foo3"] ) await selector.aadd_example({"foo": "bar", "foo2": "bar2", "foo3": "bar3"}) assert vector_store.texts == ["bar bar3"] assert vector_store.metadatas == [{"foo": "bar", "foo2": "bar2", "foo3": "bar3"}] def test_select_examples() -> None: vector_store = DummyVectorStore() selector = SemanticSimilarityExampleSelector( vectorstore=vector_store, input_keys=["foo2"], example_keys=["query", "k"], k=2 ) examples = selector.select_examples({"foo": "bar", "foo2": "bar2"}) assert examples == [{"query": "bar2", "k": 2}] * 2 async def test_aselect_examples() -> None: vector_store = DummyVectorStore() selector = SemanticSimilarityExampleSelector( vectorstore=vector_store, input_keys=["foo2"], example_keys=["query", "k"], k=2 ) examples = await selector.aselect_examples({"foo": "bar", "foo2": "bar2"}) assert examples == [{"query": "bar2", "k": 2}] * 2 def test_from_examples() -> None: examples = [{"foo": "bar"}] embeddings = FakeEmbeddings(size=1) selector = SemanticSimilarityExampleSelector.from_examples( examples=examples, embeddings=embeddings, vectorstore_cls=DummyVectorStore, k=2, input_keys=["foo"], example_keys=["some_example_key"], vectorstore_kwargs={"vs_foo": "vs_bar"}, init_arg="some_init_arg", ) assert selector.input_keys == ["foo"] assert selector.example_keys == ["some_example_key"] assert selector.k == 2 assert selector.vectorstore_kwargs == {"vs_foo": "vs_bar"} assert isinstance(selector.vectorstore, DummyVectorStore) vector_store = selector.vectorstore assert vector_store.embeddings is embeddings assert vector_store.init_arg == "some_init_arg" assert vector_store.texts == ["bar"] assert vector_store.metadatas == [{"foo": "bar"}] async def test_afrom_examples() -> None: examples = [{"foo": "bar"}] embeddings = FakeEmbeddings(size=1) selector = await SemanticSimilarityExampleSelector.afrom_examples( examples=examples, embeddings=embeddings, vectorstore_cls=DummyVectorStore, k=2, input_keys=["foo"], example_keys=["some_example_key"], vectorstore_kwargs={"vs_foo": "vs_bar"}, init_arg="some_init_arg", ) assert selector.input_keys == ["foo"] assert selector.example_keys == ["some_example_key"] assert selector.k == 2 assert selector.vectorstore_kwargs == {"vs_foo": "vs_bar"} assert isinstance(selector.vectorstore, DummyVectorStore) vector_store = selector.vectorstore assert vector_store.embeddings is embeddings assert vector_store.init_arg == "some_init_arg" assert vector_store.texts == ["bar"] assert vector_store.metadatas == [{"foo": "bar"}] def test_mmr_select_examples() -> None: vector_store = DummyVectorStore() selector = MaxMarginalRelevanceExampleSelector( vectorstore=vector_store, input_keys=["foo2"], example_keys=["query", "k", "fetch_k"], k=2, fetch_k=5, ) examples = selector.select_examples({"foo": "bar", "foo2": "bar2"}) assert examples == [{"query": "bar2", "k": 2, "fetch_k": 5}] * 2 async def test_mmr_aselect_examples() -> None: vector_store = DummyVectorStore() selector = MaxMarginalRelevanceExampleSelector( vectorstore=vector_store, input_keys=["foo2"], example_keys=["query", "k", "fetch_k"], k=2, fetch_k=5, ) examples = await selector.aselect_examples({"foo": "bar", "foo2": "bar2"}) assert examples == [{"query": "bar2", "k": 2, "fetch_k": 5}] * 2 def test_mmr_from_examples() -> None: examples = [{"foo": "bar"}] embeddings = FakeEmbeddings(size=1) selector = MaxMarginalRelevanceExampleSelector.from_examples( examples=examples, embeddings=embeddings, vectorstore_cls=DummyVectorStore, k=2, fetch_k=5, input_keys=["foo"], example_keys=["some_example_key"], vectorstore_kwargs={"vs_foo": "vs_bar"}, init_arg="some_init_arg", ) assert selector.input_keys == ["foo"] assert selector.example_keys == ["some_example_key"] assert selector.k == 2 assert selector.fetch_k == 5 assert selector.vectorstore_kwargs == {"vs_foo": "vs_bar"} assert isinstance(selector.vectorstore, DummyVectorStore) vector_store = selector.vectorstore assert vector_store.embeddings is embeddings assert vector_store.init_arg == "some_init_arg" assert vector_store.texts == ["bar"] assert vector_store.metadatas == [{"foo": "bar"}] async def test_mmr_afrom_examples() -> None: examples = [{"foo": "bar"}] embeddings = FakeEmbeddings(size=1) selector = await MaxMarginalRelevanceExampleSelector.afrom_examples( examples=examples, embeddings=embeddings, vectorstore_cls=DummyVectorStore, k=2, fetch_k=5, input_keys=["foo"], example_keys=["some_example_key"], vectorstore_kwargs={"vs_foo": "vs_bar"}, init_arg="some_init_arg", ) assert selector.input_keys == ["foo"] assert selector.example_keys == ["some_example_key"] assert selector.k == 2 assert selector.fetch_k == 5 assert selector.vectorstore_kwargs == {"vs_foo": "vs_bar"} assert isinstance(selector.vectorstore, DummyVectorStore) vector_store = selector.vectorstore assert vector_store.embeddings is embeddings assert vector_store.init_arg == "some_init_arg" assert vector_store.texts == ["bar"] assert vector_store.metadatas == [{"foo": "bar"}]
DummyVectorStore
python
walkccc__LeetCode
solutions/565. Array Nesting/565.py
{ "start": 0, "end": 340 }
class ____: def arrayNesting(self, nums: list[int]) -> int: ans = 0 for num in nums: if num == -1: continue index = num count = 0 while nums[index] != -1: cache = index index = nums[index] nums[cache] = -1 count += 1 ans = max(ans, count) return ans
Solution
python
huggingface__transformers
src/transformers/models/funnel/modeling_funnel.py
{ "start": 32537, "end": 33326 }
class ____(ModelOutput): r""" loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss of the ELECTRA-style objective. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Prediction scores of the head (scores for each token before SoftMax). """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None @auto_docstring( custom_intro=""" The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called decoder) or any task-specific head on top. """ )
FunnelForPreTrainingOutput
python
apache__airflow
providers/google/tests/unit/google/cloud/links/test_dataplex.py
{ "start": 9769, "end": 10875 }
class ____: @pytest.mark.db_test def test_get_link(self, create_task_instance_of_operator, session, mock_supervisor_comms): expected_url = EXPECTED_DATAPLEX_CATALOG_ENTRY_TYPE_LINK link = DataplexCatalogEntryTypeLink() ti = create_task_instance_of_operator( DataplexCatalogGetEntryTypeOperator, dag_id="test_link_dag", task_id="test_link_task", location=TEST_LOCATION, entry_type_id=TEST_ENTRY_TYPE_ID, project_id=TEST_PROJECT_ID, ) session.add(ti) session.commit() if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms: mock_supervisor_comms.send.return_value = XComResult( key="key", value={ "entry_type_id": ti.task.entry_type_id, "location": ti.task.location, "project_id": ti.task.project_id, }, ) actual_url = link.get_link(operator=ti.task, ti_key=ti.key) assert actual_url == expected_url
TestDataplexCatalogEntryTypeLink
python
streamlit__streamlit
lib/streamlit/elements/lib/policies.py
{ "start": 3108, "end": 6877 }
class ____(StreamlitAPIWarning): def __init__(self) -> None: super().__init__( """ Your script uses a widget command in a cached function (function decorated with `@st.cache_data` or `@st.cache_resource`). This code will only be called when we detect a cache "miss", which can lead to unexpected results. To fix this, move all widget commands outside the cached function. """ ) def check_cache_replay_rules() -> None: """Check if a widget is allowed to be used in the current context. More specifically, this checks if the current context is inside a cached function that disallows widget usage. If so, it raises a warning. If there are other similar checks in the future, we could extend this function to check for those as well. And rename it to check_widget_usage_rules. """ if in_cached_function.get(): from streamlit import exception # We use an exception here to show a proper stack trace # that indicates to the user where the issue is. exception(CachedWidgetWarning()) def check_fragment_path_policy(dg: DeltaGenerator) -> None: """Ensures that the current widget is not written outside of the fragment's delta path. Should be called by ever element that acts as a widget. We don't allow writing widgets from within a widget to the outside path because it can lead to unexpected behavior. For elements, this is okay because they do not trigger a re-run. """ ctx = get_script_run_ctx() # Check is only relevant for fragments if ctx is None or ctx.current_fragment_id is None: return current_fragment_delta_path = ctx.current_fragment_delta_path current_cursor = dg._active_dg._cursor if current_cursor is None: return current_cursor_delta_path = current_cursor.delta_path # the elements delta path cannot be smaller than the fragment's delta path if it is # inside of the fragment if len(current_cursor_delta_path) < len(current_fragment_delta_path): raise StreamlitFragmentWidgetsNotAllowedOutsideError() # all path indices of the fragment-path must occur in the inner-elements delta path, # otherwise it is outside of the fragment container for index, path_index in enumerate(current_fragment_delta_path): if current_cursor_delta_path[index] != path_index: raise StreamlitFragmentWidgetsNotAllowedOutsideError() def check_widget_policies( dg: DeltaGenerator, key: str | None, on_change: WidgetCallback | None = None, *, default_value: Sequence[Any] | Any | None = None, writes_allowed: bool = True, enable_check_callback_rules: bool = True, ) -> None: """Check all widget policies for the given DeltaGenerator.""" check_fragment_path_policy(dg) check_cache_replay_rules() if enable_check_callback_rules: check_callback_rules(dg, on_change) check_session_state_rules( default_value=default_value, key=key, writes_allowed=writes_allowed ) def maybe_raise_label_warnings(label: str | None, label_visibility: str | None) -> None: if not label: _LOGGER.warning( "`label` got an empty value. This is discouraged for accessibility " "reasons and may be disallowed in the future by raising an exception. " "Please provide a non-empty label and hide it with label_visibility " "if needed.", stack_info=True, ) if label_visibility not in ("visible", "hidden", "collapsed"): raise errors.StreamlitAPIException( f"Unsupported label_visibility option '{label_visibility}'. " f"Valid values are 'visible', 'hidden' or 'collapsed'." )
CachedWidgetWarning
python
google__jax
jax/experimental/mosaic/gpu/core.py
{ "start": 8492, "end": 8571 }
class ____: num_barriers: int = 1 @dataclasses.dataclass(frozen=True)
TMABarrier
python
google__pytype
pytype/abstract/abstract_utils.py
{ "start": 3595, "end": 3806 }
class ____(AsInstance): """Specially mark return values, to handle Never properly.""" # For lazy evaluation of ParameterizedClass.formal_type_parameters @dataclasses.dataclass(eq=True, frozen=True)
AsReturnValue
python
dagster-io__dagster
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
{ "start": 152088, "end": 153687 }
class ____(GeneratedAirbyteSource): class AuthenticateViaRetentlyOAuth: @public def __init__( self, client_id: str, client_secret: str, refresh_token: str, auth_type: Optional[str] = None, ): self.auth_type = check.opt_str_param(auth_type, "auth_type") self.client_id = check.str_param(client_id, "client_id") self.client_secret = check.str_param(client_secret, "client_secret") self.refresh_token = check.str_param(refresh_token, "refresh_token") class AuthenticateWithAPIToken: @public def __init__(self, api_key: str, auth_type: Optional[str] = None): self.auth_type = check.opt_str_param(auth_type, "auth_type") self.api_key = check.str_param(api_key, "api_key") @public def __init__( self, name: str, credentials: Union[ "RetentlySource.AuthenticateViaRetentlyOAuth", "RetentlySource.AuthenticateWithAPIToken" ], ): """Airbyte Source for Retently. Args: name (str): The name of the destination. credentials (Union[RetentlySource.AuthenticateViaRetentlyOAuth, RetentlySource.AuthenticateWithAPIToken]): Choose how to authenticate to Retently """ self.credentials = check.inst_param( credentials, "credentials", (RetentlySource.AuthenticateViaRetentlyOAuth, RetentlySource.AuthenticateWithAPIToken), ) super().__init__("Retently", name)
RetentlySource
python
prabhupant__python-ds
data_structures/bst/convert_bst_to_right_node_tree.py
{ "start": 0, "end": 430 }
class ____(): def __init__(self, val): self.val = val self.left = None self.right = None def increasing_bst(root): def inorder(node): if node: yield from inorder(node.left) yield node.val yield from inorder(node.right) ans = curr = Node(None) for v in inorder(root): curr.right = Node(v) curr = curr.right return ans.right
Node
python
huggingface__transformers
src/transformers/models/siglip/configuration_siglip.py
{ "start": 783, "end": 5416 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`SiglipTextModel`]. It is used to instantiate a Siglip text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the text encoder of the Siglip [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the Siglip text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SiglipModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 64): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. pad_token_id (`int`, *optional*, defaults to 1): The id of the padding token in the vocabulary. bos_token_id (`int`, *optional*, defaults to 49406): The id of the beginning-of-sequence token in the vocabulary. eos_token_id (`int`, *optional*, defaults to 49407): The id of the end-of-sequence token in the vocabulary. projection_size (`int`, *optional*, defaults to `hidden_size`): The size of the projection head. Example: ```python >>> from transformers import SiglipTextConfig, SiglipTextModel >>> # Initializing a SiglipTextConfig with google/siglip-base-patch16-224 style configuration >>> configuration = SiglipTextConfig() >>> # Initializing a SiglipTextModel (with random weights) from the google/siglip-base-patch16-224 style configuration >>> model = SiglipTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "siglip_text_model" base_config_key = "text_config" def __init__( self, vocab_size=32000, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, max_position_embeddings=64, hidden_act="gelu_pytorch_tanh", layer_norm_eps=1e-6, attention_dropout=0.0, # This differs from `CLIPTokenizer`'s default and from openai/siglip # See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538 pad_token_id=1, bos_token_id=49406, eos_token_id=49407, projection_size=None, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.attention_dropout = attention_dropout self.projection_size = projection_size if projection_size is not None else hidden_size
SiglipTextConfig
python
weaviate__weaviate-python-client
weaviate/collections/queries/fetch_objects/generate/executor.py
{ "start": 869, "end": 10725 }
class ____( Generic[ConnectionType, Properties, References], _BaseExecutor[ConnectionType] ): @overload def fetch_objects( self, *, single_prompt: Union[str, _SinglePrompt, None] = None, grouped_task: Union[str, _GroupedTask, None] = None, grouped_properties: Optional[List[str]] = None, generative_provider: Optional[_GenerativeConfigRuntime] = None, limit: Optional[int] = None, offset: Optional[int] = None, after: Optional[UUID] = None, filters: Optional[_Filters] = None, sort: Optional[Sorting] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: Literal[None] = None, ) -> executor.Result[GenerativeReturn[Properties, References]]: ... @overload def fetch_objects( self, *, single_prompt: Union[str, _SinglePrompt, None] = None, grouped_task: Union[str, _GroupedTask, None] = None, grouped_properties: Optional[List[str]] = None, generative_provider: Optional[_GenerativeConfigRuntime] = None, limit: Optional[int] = None, offset: Optional[int] = None, after: Optional[UUID] = None, filters: Optional[_Filters] = None, sort: Optional[Sorting] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: REFERENCES, ) -> executor.Result[GenerativeReturn[Properties, CrossReferences]]: ... @overload def fetch_objects( self, *, single_prompt: Union[str, _SinglePrompt, None] = None, grouped_task: Union[str, _GroupedTask, None] = None, grouped_properties: Optional[List[str]] = None, generative_provider: Optional[_GenerativeConfigRuntime] = None, limit: Optional[int] = None, offset: Optional[int] = None, after: Optional[UUID] = None, filters: Optional[_Filters] = None, sort: Optional[Sorting] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: Type[TReferences], ) -> executor.Result[GenerativeReturn[Properties, TReferences]]: ... @overload def fetch_objects( self, *, single_prompt: Union[str, _SinglePrompt, None] = None, grouped_task: Union[str, _GroupedTask, None] = None, grouped_properties: Optional[List[str]] = None, generative_provider: Optional[_GenerativeConfigRuntime] = None, limit: Optional[int] = None, offset: Optional[int] = None, after: Optional[UUID] = None, filters: Optional[_Filters] = None, sort: Optional[Sorting] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: Literal[None] = None, ) -> executor.Result[GenerativeReturn[TProperties, References]]: ... @overload def fetch_objects( self, *, single_prompt: Union[str, _SinglePrompt, None] = None, grouped_task: Union[str, _GroupedTask, None] = None, grouped_properties: Optional[List[str]] = None, generative_provider: Optional[_GenerativeConfigRuntime] = None, limit: Optional[int] = None, offset: Optional[int] = None, after: Optional[UUID] = None, filters: Optional[_Filters] = None, sort: Optional[Sorting] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: REFERENCES, ) -> executor.Result[GenerativeReturn[TProperties, CrossReferences]]: ... @overload def fetch_objects( self, *, single_prompt: Union[str, _SinglePrompt, None] = None, grouped_task: Union[str, _GroupedTask, None] = None, grouped_properties: Optional[List[str]] = None, generative_provider: Optional[_GenerativeConfigRuntime] = None, limit: Optional[int] = None, offset: Optional[int] = None, after: Optional[UUID] = None, filters: Optional[_Filters] = None, sort: Optional[Sorting] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: Type[TReferences], ) -> executor.Result[GenerativeReturn[TProperties, TReferences]]: ... @overload def fetch_objects( self, *, single_prompt: Union[str, _SinglePrompt, None] = None, grouped_task: Union[str, _GroupedTask, None] = None, grouped_properties: Optional[List[str]] = None, generative_provider: Optional[_GenerativeConfigRuntime] = None, limit: Optional[int] = None, offset: Optional[int] = None, after: Optional[UUID] = None, filters: Optional[_Filters] = None, sort: Optional[Sorting] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Optional[ReturnProperties[TProperties]] = None, return_references: Optional[ReturnReferences[TReferences]] = None, ) -> executor.Result[ GenerativeReturnType[Properties, References, TProperties, TReferences] ]: ... def fetch_objects( self, *, single_prompt: Union[str, _SinglePrompt, None] = None, grouped_task: Union[str, _GroupedTask, None] = None, grouped_properties: Optional[List[str]] = None, generative_provider: Optional[_GenerativeConfigRuntime] = None, limit: Optional[int] = None, offset: Optional[int] = None, after: Optional[UUID] = None, filters: Optional[_Filters] = None, sort: Optional[Sorting] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Optional[ReturnProperties[TProperties]] = None, return_references: Optional[ReturnReferences[TReferences]] = None, ) -> executor.Result[GenerativeReturnType[Properties, References, TProperties, TReferences]]: """Perform retrieval-augmented generation (RaG) on the results of a simple get query of objects in this collection. Args: single_prompt: The prompt to use for RaG on each object individually. grouped_task: The prompt to use for RaG on the entire result set. grouped_properties: The properties to use in the RaG on the entire result set. limit: The maximum number of results to return. If not specified, the default limit specified by Weaviate is returned. offset: The offset to start from. If not specified, the retrieval begins from the first object in Weaviate. after: The UUID of the object to start from. If not specified, the retrieval begins from the first object in Weaviate. filters: The filters to apply to the retrieval. sort: The sorting to apply to the retrieval. include_vector: Whether to include the vector in the results. If not specified, this is set to False. return_metadata: The metadata to return for each object, defaults to `None`. return_properties: The properties to return for each object. return_references: The references to return for each object. NOTE: - If `return_properties` is not provided then all properties are returned except for blob properties. - If `return_metadata` is not provided then no metadata is provided. Use MetadataQuery.full() to retrieve all metadata. - If `return_references` is not provided then no references are provided. Returns: A `_GenerativeNearMediaReturn` object that includes the searched objects with per-object generated results and group generated results. Raises: weaviate.exceptions.WeaviateGRPCQueryError: If the network connection to Weaviate fails. """ def resp( res: search_get_pb2.SearchReply, ) -> GenerativeReturnType[Properties, References, TProperties, TReferences]: return cast( Any, self._result_to_generative_query_return( res, _QueryOptions.from_input( return_metadata, return_properties, include_vector, self._references, return_references, ), ), ) request = self._query.get( limit=limit, offset=offset, after=after, filters=filters, sort=sort, return_metadata=self._parse_return_metadata(return_metadata, include_vector), return_properties=self._parse_return_properties(return_properties), return_references=self._parse_return_references(return_references), generative=_Generative( single=single_prompt, grouped=grouped_task, grouped_properties=grouped_properties, generative_provider=generative_provider, ), ) return executor.execute( response_callback=resp, method=self._connection.grpc_search, request=request, )
_FetchObjectsGenerateExecutor
python
google__pytype
pytype/pyc/opcodes.py
{ "start": 1174, "end": 4561 }
class ____: """An opcode without arguments.""" __slots__ = ( "line", "endline", "col", "endcol", "index", "prev", "next", "target", "end_async_for_target", "block_target", "code", "annotation", "folded", "metadata", "push_exc_block", "pop_exc_block", ) _FLAGS = 0 def __init__(self, index, line, endline=None, col=None, endcol=None): self.index = index self.line = line self.endline = endline self.col = col self.endcol = endcol self.prev = None self.next = None self.target = None # The END_ASYNC_FOR instruction of which we want to make pytype jump to for # this instruction. self.end_async_for_target = None self.block_target = None self.code = None # If we have a CodeType or OrderedCode parent self.annotation = None self.folded = None # elided by constant folding self.metadata = OpcodeMetadata() # Filled in by the director self.push_exc_block = False self.pop_exc_block = False def at_line(self, line): """Return a new opcode similar to this one but with a different line.""" # Ignore the optional slots (prev, next, block_target). op = Opcode(self.index, line) op.target = self.target op.code = self.code return op def basic_str(self): """Helper function for the various __str__ formats.""" folded = "<<<<" if self.folded else "" return "%d: %d: %s %s" % ( self.line, self.index, self.__class__.__name__, folded, ) def __str__(self): if self.annotation: return f"{self.basic_str()} # type: {self.annotation}" else: return self.basic_str() def __repr__(self): return self.__class__.__name__ @property def name(self): return self.__class__.__name__ @classmethod def for_python_version( cls, version: tuple[int, int] # pylint: disable=unused-argument ): return cls @classmethod def has_const(cls): return bool(cls._FLAGS & HAS_CONST) @classmethod def has_name(cls): return bool(cls._FLAGS & HAS_NAME) @classmethod def has_jrel(cls): return bool(cls._FLAGS & HAS_JREL) @classmethod def has_jabs(cls): return bool(cls._FLAGS & HAS_JABS) @classmethod def has_known_jump(cls): return bool(cls._FLAGS & (HAS_JREL | HAS_JABS)) @classmethod def has_junknown(cls): return bool(cls._FLAGS & HAS_JUNKNOWN) @classmethod def has_jump(cls): return bool(cls._FLAGS & (HAS_JREL | HAS_JABS | HAS_JUNKNOWN)) @classmethod def has_local(cls): return bool(cls._FLAGS & HAS_LOCAL) @classmethod def has_free(cls): return bool(cls._FLAGS & HAS_FREE) @classmethod def has_nargs(cls): return bool(cls._FLAGS & HAS_NARGS) @classmethod def has_argument(cls): return bool(cls._FLAGS & HAS_ARGUMENT) @classmethod def no_next(cls): return bool(cls._FLAGS & NO_NEXT) @classmethod def carry_on_to_next(cls): return not cls._FLAGS & NO_NEXT @classmethod def store_jump(cls): return bool(cls._FLAGS & STORE_JUMP) @classmethod def does_jump(cls): return cls.has_jump() and not cls.store_jump() @classmethod def pushes_block(cls): return bool(cls._FLAGS & PUSHES_BLOCK) @classmethod def pops_block(cls): return bool(cls._FLAGS & POPS_BLOCK)
Opcode
python
ray-project__ray
python/ray/_private/thirdparty/pynvml/pynvml.py
{ "start": 59778, "end": 60035 }
class ____(Structure): _fields_ = [ ('pid', c_uint), ('usedGpuMemory', c_ulonglong), ('gpuInstanceId', c_uint), ('computeInstanceId', c_uint), ('usedGpuCcProtectedMemory', c_ulonglong), ]
c_nvmlProcessDetail_v1_t
python
langchain-ai__langchain
libs/langchain/langchain_classic/smith/evaluation/runner_utils.py
{ "start": 37540, "end": 37772 }
class ____(TypedDict, total=False): """A dictionary of the results for a single example row.""" feedback: list[EvaluationResult] | None execution_time: float | None run_id: str | None @dataclasses.dataclass
_RowResult
python
great-expectations__great_expectations
tests/integration/fluent/test_integration_datasource.py
{ "start": 3433, "end": 20168 }
class ____: def test_success_with_partitioners(self, empty_data_context): context = empty_data_context datasource = sqlite_datasource(context, "yellow_tripdata.db") passenger_count_value = 5 asset = datasource.add_query_asset( name="query_asset", query=f" SELECT * from yellow_tripdata_sample_2019_02 WHERE passenger_count = {passenger_count_value}", # noqa: E501 # FIXME CoP ) validator = context.get_validator( batch_request=asset.build_batch_request( options={"year": 2019}, partitioner=ColumnPartitionerMonthly(column_name="pickup_datetime"), ) ) result = validator.expect_column_distinct_values_to_equal_set( column="passenger_count", value_set=[passenger_count_value], result_format={"result_format": "BOOLEAN_ONLY"}, ) assert result.success def test_partitioner_filtering(self, empty_data_context): context = empty_data_context datasource = sqlite_datasource(context, "../../test_cases_for_sql_data_connector.db") asset = datasource.add_query_asset( name="trip_asset_partition_by_event_type", query="SELECT * FROM table_partitioned_by_date_column__A", ) batch_request = asset.build_batch_request( options={"event_type": "start"}, partitioner=PartitionerColumnValue(column_name="event_type"), ) validator = context.get_validator(batch_request=batch_request) # All rows returned by head have the start event_type. result = validator.execution_engine.batch_manager.active_batch.head(n_rows=50) unique_event_types = set(result.data["event_type"].unique()) print(f"{unique_event_types=}") assert unique_event_types == {"start"} @pytest.mark.sqlite @pytest.mark.parametrize( [ "database", "table_name", "partitioner_class", "partitioner_kwargs", "all_batches_cnt", "specified_batch_request", "specified_batch_cnt", "last_specified_batch_metadata", ], [ pytest.param( "yellow_tripdata_sample_2020_all_months_combined.db", "yellow_tripdata_sample_2020", ColumnPartitionerYearly, {"column_name": "pickup_datetime"}, 1, {"year": 2020}, 1, {"year": 2020}, id="year", ), pytest.param( "yellow_tripdata_sample_2020_all_months_combined.db", "yellow_tripdata_sample_2020", ColumnPartitionerMonthly, {"column_name": "pickup_datetime"}, 12, {"year": 2020, "month": 6}, 1, {"year": 2020, "month": 6}, id="year_and_month", ), pytest.param( "yellow_tripdata.db", "yellow_tripdata_sample_2019_02", ColumnPartitionerDaily, {"column_name": "pickup_datetime"}, 28, {"year": 2019, "month": 2, "day": 10}, 1, {"year": 2019, "month": 2, "day": 10}, id="year_and_month_and_day", ), pytest.param( "yellow_tripdata.db", "yellow_tripdata_sample_2019_02", PartitionerDatetimePart, { "column_name": "pickup_datetime", "datetime_parts": ["year", "month", "day"], }, 28, {"year": 2019, "month": 2}, 28, {"year": 2019, "month": 2, "day": 28}, id="datetime_part", ), pytest.param( "yellow_tripdata.db", "yellow_tripdata_sample_2019_02", PartitionerColumnValue, {"column_name": "passenger_count"}, 7, {"passenger_count": 3}, 1, {"passenger_count": 3}, id="column_value", ), pytest.param( "yellow_tripdata.db", "yellow_tripdata_sample_2019_02", PartitionerColumnValue, {"column_name": "pickup_datetime"}, 9977, {"pickup_datetime": "2019-02-07 15:48:06"}, 1, {"pickup_datetime": "2019-02-07 15:48:06"}, id="column_value_datetime", ), pytest.param( "yellow_tripdata.db", "yellow_tripdata_sample_2019_02", PartitionerDividedInteger, {"column_name": "passenger_count", "divisor": 3}, 3, {"quotient": 2}, 1, {"quotient": 2}, id="divisor", ), pytest.param( "yellow_tripdata.db", "yellow_tripdata_sample_2019_02", PartitionerModInteger, {"column_name": "passenger_count", "mod": 3}, 3, {"remainder": 2}, 1, {"remainder": 2}, id="mod_integer", ), pytest.param( "yellow_tripdata.db", "yellow_tripdata_sample_2019_02", PartitionerConvertedDatetime, {"column_name": "pickup_datetime", "date_format_string": "%Y-%m-%d"}, 28, {"datetime": "2019-02-23"}, 1, {"datetime": "2019-02-23"}, id="converted_datetime", ), pytest.param( "yellow_tripdata.db", "yellow_tripdata_sample_2019_02", PartitionerMultiColumnValue, {"column_names": ["passenger_count", "payment_type"]}, 23, {"passenger_count": 1, "payment_type": 1}, 1, {"passenger_count": 1, "payment_type": 1}, id="multi_column_values", ), ], ) def test_partitioner( empty_data_context, database, table_name, partitioner_class, partitioner_kwargs, all_batches_cnt, specified_batch_request, specified_batch_cnt, last_specified_batch_metadata, ): context = empty_data_context datasource = sqlite_datasource(context, database) asset = datasource.add_table_asset( name="table_asset", table_name=table_name, ) partitioner = partitioner_class(**partitioner_kwargs) # Test getting all batches all_batches = asset.get_batch_identifiers_list( asset.build_batch_request(partitioner=partitioner) ) assert len(all_batches) == all_batches_cnt # Test getting specified batches specified_batches = asset.get_batch_identifiers_list( asset.build_batch_request(specified_batch_request, partitioner=partitioner) ) assert len(specified_batches) == specified_batch_cnt assert specified_batches[-1] == last_specified_batch_metadata @pytest.mark.sqlite def test_partitioner_build_batch_request_allows_selecting_by_date_and_datetime_as_string( empty_data_context, ): context = empty_data_context datasource = sqlite_datasource(context, "yellow_tripdata.db") asset = datasource.add_query_asset( "query_asset", "SELECT date(pickup_datetime) as pickup_date, passenger_count FROM yellow_tripdata_sample_2019_02", # noqa: E501 # FIXME CoP ) partitioner = PartitionerColumnValue(column_name="pickup_date") # Test getting all batches all_batches = asset.get_batch_identifiers_list( asset.build_batch_request(partitioner=partitioner) ) assert len(all_batches) == 28 with mock.patch( "great_expectations.datasource.fluent.sql_datasource._partitioner_and_sql_asset_to_batch_identifier_data" ) as mock_batch_identifiers: mock_batch_identifiers.return_value = [ {"pickup_date": datetime.date(2019, 2, 1)}, {"pickup_date": datetime.date(2019, 2, 2)}, ] specified_batches = asset.get_batch_identifiers_list( asset.build_batch_request( options={"pickup_date": "2019-02-01"}, partitioner=partitioner ) ) assert len(specified_batches) == 1 with mock.patch( "great_expectations.datasource.fluent.sql_datasource._partitioner_and_sql_asset_to_batch_identifier_data" ) as mock_batch_identifiers: mock_batch_identifiers.return_value = [ {"pickup_date": datetime.datetime(2019, 2, 1)}, # noqa: DTZ001 # FIXME CoP {"pickup_date": datetime.datetime(2019, 2, 2)}, # noqa: DTZ001 # FIXME CoP ] specified_batches = asset.get_batch_identifiers_list( asset.build_batch_request( options={"pickup_date": "2019-02-01 00:00:00"}, partitioner=partitioner ) ) assert len(specified_batches) == 1 @pytest.mark.parametrize( ["month", "expected"], [ (1, 364), (2, 342), ], ) @pytest.mark.sqlite def test_success_with_partitioners_from_batch_definitions( empty_data_context, month: int, expected: int, ): """Integration test to ensure partitions from batch configs are used. The test is parameterized just to ensure that the partitioner is actually doing something. """ context = empty_data_context datasource = sqlite_datasource(context, "yellow_tripdata_sample_2020_all_months_combined.db") passenger_count_value = 5 asset = datasource.add_query_asset( name="query_asset", query=f"SELECT * from yellow_tripdata_sample_2020 WHERE passenger_count = {passenger_count_value}", # noqa: E501 # FIXME CoP ) batch_definition = asset.add_batch_definition( name="whatevs", partitioner=ColumnPartitionerMonthly(column_name="pickup_datetime"), ) validator = Validator( batch_definition=batch_definition, batch_parameters={"year": 2020, "month": month}, ) result = validator.validate_expectation(gxe.ExpectTableRowCountToEqual(value=expected)) assert result.success @pytest.mark.parametrize( ["add_asset_method", "add_asset_kwarg"], [ pytest.param( "add_table_asset", {"table_name": "yellow_tripdata_sample_2019_02"}, id="table_asset", ), pytest.param( "add_query_asset", {"query": "select * from yellow_tripdata_sample_2019_02"}, id="query_asset", ), ], ) @pytest.mark.sqlite def test_asset_specified_metadata(empty_data_context, add_asset_method, add_asset_kwarg): context = empty_data_context datasource = sqlite_datasource(context, "yellow_tripdata.db") asset_specified_metadata = {"pipeline_name": "my_pipeline"} asset = getattr(datasource, add_asset_method)( name="asset", batch_metadata=asset_specified_metadata, **add_asset_kwarg, ) partitioner = ColumnPartitionerMonthly(column_name="pickup_datetime") # Test getting all batches batch = asset.get_batch(asset.build_batch_request(partitioner=partitioner)) # Update the batch_metadata from the request with the metadata inherited from the asset assert batch.metadata == {**asset_specified_metadata, "year": 2019, "month": 2} # This is marked by the various backend used in testing in the datasource_test_data fixture. def test_batch_request_error_messages( datasource_test_data: tuple[AbstractDataContext, Datasource, DataAsset, BatchRequest], ) -> None: _, _, _, batch_request = datasource_test_data # DataAsset.build_batch_request() infers datasource_name and data_asset_name # which have already been confirmed as functional via test_connection() methods. with pytest.raises(TypeError): batch_request.datasource_name = "untested_datasource_name" with pytest.raises(TypeError): batch_request.data_asset_name = "untested_data_asset_name" # options can be added/updated if they take the correct form batch_request.options["new_option"] = 42 assert "new_option" in batch_request.options with pytest.raises(pydantic.ValidationError): batch_request.options = {10: "value for non-string key"} # type: ignore[dict-item] # FIXME CoP with pytest.raises(pydantic.ValidationError): batch_request.options = "not a dictionary" # type: ignore[assignment] # FIXME CoP # batch_slice can be updated if it takes the correct form batch_request.batch_slice = "[5:10]" assert batch_request.batch_slice == slice(5, 10, None) # batch_slice can be updated via update method batch_request.update_batch_slice("[2:10:2]") assert batch_request.batch_slice == slice(2, 10, 2) with pytest.raises(ValueError): batch_request.batch_slice = "nonsense slice" with pytest.raises(ValueError): batch_request.batch_slice = True @pytest.mark.cloud def test_pandas_data_adding_dataframe_in_cloud_context( unset_gx_env_variables: None, cloud_api_fake: RequestsMock, empty_cloud_context_fluent: CloudDataContext, ): df = pd.DataFrame({"column_name": [1, 2, 3, 4, 5]}) context = empty_cloud_context_fluent dataframe_asset: PandasDataFrameAsset = context.data_sources.add_or_update_pandas( name="fluent_pandas_datasource" ).add_dataframe_asset(name="my_df_asset") batch_def = dataframe_asset.add_batch_definition_whole_dataframe(name="bd") batch = batch_def.get_batch(batch_parameters={"dataframe": df}) assert isinstance(batch.data, PandasBatchData) assert batch.data.dataframe.equals(df) @pytest.mark.filesystem def test_pandas_data_adding_dataframe_in_file_reloaded_context( empty_file_context: FileDataContext, ): df = pd.DataFrame({"column_name": [1, 2, 3, 4, 5]}) context = empty_file_context datasource = context.data_sources.add_or_update_pandas(name="fluent_pandas_datasource") dataframe_asset: PandasDataFrameAsset = datasource.add_dataframe_asset(name="my_df_asset") batch_def = dataframe_asset.add_batch_definition_whole_dataframe(name="bd") batch = batch_def.get_batch(batch_parameters={"dataframe": df}) assert isinstance(batch.data, PandasBatchData) assert batch.data.dataframe.equals(df) # Reload the asset and see that we can re-add the df to the batch definition context = gx.get_context(context_root_dir=context.root_directory, cloud_mode=False) dataframe_asset = context.data_sources.get(name="fluent_pandas_datasource").get_asset( name="my_df_asset" ) reloaded_batch_def = dataframe_asset.get_batch_definition(name="bd") batch = reloaded_batch_def.get_batch(batch_parameters={"dataframe": df}) assert isinstance(batch.data, PandasBatchData) assert batch.data.dataframe.equals(df) @pytest.mark.spark def test_spark_data_adding_dataframe_in_cloud_context( spark_session, spark_df_from_pandas_df, cloud_api_fake: RequestsMock, empty_cloud_context_fluent: CloudDataContext, ): df = pd.DataFrame({"column_name": [1, 2, 3, 4, 5]}) spark_df = spark_df_from_pandas_df(spark_session, df) context = empty_cloud_context_fluent dataframe_asset: SparkDataFrameAsset = context.data_sources.add_or_update_spark( name="fluent_spark_datasource" ).add_dataframe_asset(name="my_df_asset") batch_def = dataframe_asset.add_batch_definition_whole_dataframe(name="bd") batch = batch_def.get_batch(batch_parameters={"dataframe": spark_df}) assert isinstance(batch.data, SparkDFBatchData) assert batch.data.dataframe.toPandas().equals(df) @pytest.mark.spark def test_spark_data_adding_dataframe_in_file_reloaded_context( spark_session, spark_df_from_pandas_df, empty_file_context: FileDataContext, ): df = pd.DataFrame({"column_name": [1, 2, 3, 4, 5]}) spark_df = spark_df_from_pandas_df(spark_session, df) context = empty_file_context dataframe_asset: SparkDataFrameAsset = context.data_sources.add_or_update_spark( name="fluent_spark_datasource" ).add_dataframe_asset(name="my_df_asset") batch_def = dataframe_asset.add_batch_definition_whole_dataframe(name="bd") batch = batch_def.get_batch(batch_parameters={"dataframe": spark_df}) assert isinstance(batch.data, SparkDFBatchData) assert batch.data.dataframe.toPandas().equals(df) context = gx.get_context(context_root_dir=context.root_directory, cloud_mode=False) retrieved_bd = ( context.data_sources.get(name="fluent_spark_datasource") .get_asset(name="my_df_asset") .get_batch_definition(name="bd") ) new_batch = retrieved_bd.get_batch(batch_parameters={"dataframe": spark_df}) assert isinstance(new_batch.data, SparkDFBatchData) assert new_batch.data.dataframe.toPandas().equals(df) @dataclass
TestQueryAssets
python
Textualize__textual
tests/test_masked_input.py
{ "start": 270, "end": 7266 }
class ____(App[None]): def __init__(self, template: str, placeholder: str = ""): super().__init__() self.messages: list[InputEvent] = [] self.template = template self.placeholder = placeholder def compose(self) -> ComposeResult: yield MaskedInput( template=self.template, placeholder=self.placeholder, select_on_focus=False ) @on(MaskedInput.Changed) @on(MaskedInput.Submitted) def on_changed_or_submitted(self, event: InputEvent) -> None: self.messages.append(event) async def test_missing_required(): app = InputApp(">9999-99-99") async with app.run_test() as pilot: input = app.query_one(MaskedInput) input.value = "2024-12" assert not input.is_valid await pilot.pause() assert len(app.messages) == 1 assert app.messages[0].validation_result == ValidationResult.failure( failures=[ Failure( value="2024-12", validator=input._template, description="Value does not match template!", ) ], ) async def test_valid_required(): app = InputApp(">9999-99-99") async with app.run_test() as pilot: input = app.query_one(MaskedInput) input.value = "2024-12-31" assert input.is_valid await pilot.pause() assert len(app.messages) == 1 assert app.messages[0].validation_result == ValidationResult.success() async def test_missing_optional(): app = InputApp(">9999-99-00") async with app.run_test() as pilot: input = app.query_one(MaskedInput) input.value = "2024-12" assert input.is_valid await pilot.pause() assert len(app.messages) == 1 assert app.messages[0].validation_result == ValidationResult.success() async def test_editing(): serial = "ABCDE-FGHIJ-KLMNO-PQRST" app = InputApp(">NNNNN-NNNNN-NNNNN-NNNNN;_") async with app.run_test() as pilot: input = app.query_one(MaskedInput) await pilot.press("A", "B", "C", "D") assert input.cursor_position == 4 assert input.value == "ABCD" await pilot.press("E") assert input.cursor_position == 6 assert input.value == "ABCDE-" await pilot.press("backspace") assert input.cursor_position == 4 assert input.value == "ABCD" input.value = serial input.action_end() assert input.is_valid app.set_focus(None) input.focus() await pilot.pause() assert input.cursor_position == len(serial) await pilot.press("U") assert input.cursor_position == len(serial) async def test_key_movement_actions(): serial = "ABCDE-FGHIJ-KLMNO-PQRST" app = InputApp(">NNNNN-NNNNN-NNNNN-NNNNN;_") async with app.run_test(): input = app.query_one(MaskedInput) input.value = serial input.action_home() assert input.is_valid input.action_cursor_right_word() assert input.cursor_position == 6 input.action_cursor_right() input.action_cursor_right_word() assert input.cursor_position == 12 input.action_cursor_left() input.action_cursor_left() assert input.cursor_position == 9 input.action_cursor_left_word() assert input.cursor_position == 6 async def test_key_modification_actions(): serial = "ABCDE-FGHIJ-KLMNO-PQRST" app = InputApp(">NNNNN-NNNNN-NNNNN-NNNNN;_") async with app.run_test() as pilot: input = app.query_one(MaskedInput) input.value = serial assert input.is_valid input.cursor_position = 0 input.action_delete_right() assert input.value == " BCDE-FGHIJ-KLMNO-PQRST" input.cursor_position = 3 input.action_delete_left() assert input.value == " B DE-FGHIJ-KLMNO-PQRST" input.cursor_position = 6 input.action_delete_left() assert input.value == " B D -FGHIJ-KLMNO-PQRST" input.cursor_position = 9 input.action_delete_left_word() assert input.value == " B D - IJ-KLMNO-PQRST" input.action_delete_left_word() assert input.value == " - IJ-KLMNO-PQRST" input.cursor_position = 15 input.action_delete_right_word() assert input.value == " - IJ-KLM -PQRST" input.action_delete_right_word() assert input.value == " - IJ-KLM" input.cursor_position = 10 input.action_delete_right_all() assert input.value == " - I" await pilot.press("J") assert input.value == " - IJ-" input.action_cursor_left() input.action_delete_left_all() assert input.value == " - J-" input.clear() assert input.value == "" async def test_cursor_word_right_after_last_separator(): app = InputApp(">NNN-NNN-NNN-NNNNN;_") async with app.run_test(): input = app.query_one(MaskedInput) input.value = "123-456-789-012" input.cursor_position = 13 input.action_cursor_right_word() assert input.cursor_position == 15 async def test_case_conversion_meta_characters(): app = InputApp("NN<-N!N>N") async with app.run_test() as pilot: input = app.query_one(MaskedInput) await pilot.press("a", "B", "C", "D", "e") assert input.value == "aB-cDE" assert input.is_valid async def test_case_conversion_override(): app = InputApp(">-<NN") async with app.run_test() as pilot: input = app.query_one(MaskedInput) await pilot.press("a", "B") assert input.value == "-ab" assert input.is_valid async def test_case_conversion_cancel(): app = InputApp("-!N-") async with app.run_test() as pilot: input = app.query_one(MaskedInput) await pilot.press("a") assert input.value == "-a-" assert input.is_valid async def test_only_separators__raises_ValueError(): app = InputApp("---") with pytest.raises(ValueError): async with app.run_test() as pilot: await pilot.press("a") async def test_custom_separator_escaping(): app = InputApp("N\\aN\\N\\cN") async with app.run_test() as pilot: input = app.query_one(MaskedInput) await pilot.press("D", "e", "F") assert input.value == "DaeNcF" assert input.is_valid async def test_digits_not_required(): app = InputApp("00;_") async with app.run_test() as pilot: input = app.query_one(MaskedInput) await pilot.press("a", "1") assert input.value == "1" assert input.is_valid async def test_digits_required(): app = InputApp("99;_") async with app.run_test() as pilot: input = app.query_one(MaskedInput) await pilot.press("a", "1") assert input.value == "1" assert not input.is_valid
InputApp
python
walkccc__LeetCode
solutions/655. Print Binary Tree/655.py
{ "start": 0, "end": 643 }
class ____: def printTree(self, root: TreeNode | None) -> list[list[str]]: def maxHeight(root: TreeNode | None) -> int: if not root: return 0 return 1 + max(maxHeight(root.left), maxHeight(root.right)) def dfs(root: TreeNode | None, row: int, left: int, right: int) -> None: if not root: return mid = (left + right) // 2 ans[row][mid] = str(root.val) dfs(root.left, row + 1, left, mid - 1) dfs(root.right, row + 1, mid + 1, right) m = maxHeight(root) n = pow(2, m) - 1 ans = [[''] * n for _ in range(m)] dfs(root, 0, 0, len(ans[0]) - 1) return ans
Solution
python
davidhalter__jedi
jedi/inference/compiled/access.py
{ "start": 4652, "end": 18914 }
class ____: def __init__(self, inference_state, obj): self._inference_state = inference_state self._obj = obj def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self.get_repr()) def _create_access(self, obj): return create_access(self._inference_state, obj) def _create_access_path(self, obj) -> AccessPath: return create_access_path(self._inference_state, obj) def py__bool__(self): return bool(self._obj) def py__file__(self) -> Optional[Path]: try: return Path(self._obj.__file__) except AttributeError: return None def py__doc__(self): return inspect.getdoc(self._obj) or '' def py__name__(self): if not _is_class_instance(self._obj) or \ inspect.ismethoddescriptor(self._obj): # slots cls = self._obj else: try: cls = self._obj.__class__ except AttributeError: # happens with numpy.core.umath._UFUNC_API (you get it # automatically by doing `import numpy`. return None try: return cls.__name__ except AttributeError: return None def py__mro__accesses(self): return tuple(self._create_access_path(cls) for cls in self._obj.__mro__[1:]) def py__getitem__all_values(self): if isinstance(self._obj, dict): return [self._create_access_path(v) for v in self._obj.values()] if isinstance(self._obj, (list, tuple)): return [self._create_access_path(v) for v in self._obj] if self.is_instance(): cls = DirectObjectAccess(self._inference_state, self._obj.__class__) return cls.py__getitem__all_values() try: getitem = self._obj.__getitem__ except AttributeError: pass else: annotation = DirectObjectAccess(self._inference_state, getitem).get_return_annotation() if annotation is not None: return [annotation] return None def py__simple_getitem__(self, index, *, safe=True): if safe and type(self._obj) not in ALLOWED_GETITEM_TYPES: # Get rid of side effects, we won't call custom `__getitem__`s. return None return self._create_access_path(self._obj[index]) def py__iter__list(self): try: iter_method = self._obj.__iter__ except AttributeError: return None else: p = DirectObjectAccess(self._inference_state, iter_method).get_return_annotation() if p is not None: return [p] if type(self._obj) not in ALLOWED_GETITEM_TYPES: # Get rid of side effects, we won't call custom `__getitem__`s. return [] lst = [] for i, part in enumerate(self._obj): if i > 20: # Should not go crazy with large iterators break lst.append(self._create_access_path(part)) return lst def py__class__(self): return self._create_access_path(self._obj.__class__) def py__bases__(self): return [self._create_access_path(base) for base in self._obj.__bases__] def py__path__(self): paths = getattr(self._obj, '__path__', None) # Avoid some weird hacks that would just fail, because they cannot be # used by pickle. if not isinstance(paths, list) \ or not all(isinstance(p, str) for p in paths): return None return paths @shorten_repr def get_repr(self): if inspect.ismodule(self._obj): return repr(self._obj) # Try to avoid execution of the property. if safe_getattr(self._obj, '__module__', default='') == 'builtins': return repr(self._obj) type_ = type(self._obj) if type_ == type: return type.__repr__(self._obj) if safe_getattr(type_, '__module__', default='') == 'builtins': # Allow direct execution of repr for builtins. return repr(self._obj) return object.__repr__(self._obj) def is_class(self): return inspect.isclass(self._obj) def is_function(self): return inspect.isfunction(self._obj) or inspect.ismethod(self._obj) def is_module(self): return inspect.ismodule(self._obj) def is_instance(self): return _is_class_instance(self._obj) def ismethoddescriptor(self): return inspect.ismethoddescriptor(self._obj) def get_qualified_names(self): def try_to_get_name(obj): return getattr(obj, '__qualname__', getattr(obj, '__name__', None)) if self.is_module(): return () name = try_to_get_name(self._obj) if name is None: name = try_to_get_name(type(self._obj)) if name is None: return () return tuple(name.split('.')) def dir(self): return dir(self._obj) def has_iter(self): try: iter(self._obj) return True except TypeError: return False def is_allowed_getattr(self, name, safe=True) -> Tuple[bool, bool, Optional[AccessPath]]: # TODO this API is ugly. try: attr, is_get_descriptor = getattr_static(self._obj, name) except AttributeError: if not safe: # Unsafe is mostly used to check for __getattr__/__getattribute__. # getattr_static works for properties, but the underscore methods # are just ignored (because it's safer and avoids more code # execution). See also GH #1378. # Avoid warnings, see comment in the next function. with warnings.catch_warnings(record=True): warnings.simplefilter("always") try: return hasattr(self._obj, name), False, None except Exception: # Obviously has an attribute (probably a property) that # gets executed, so just avoid all exceptions here. pass return False, False, None else: if is_get_descriptor and type(attr) not in ALLOWED_DESCRIPTOR_ACCESS: if isinstance(attr, property): if hasattr(attr.fget, '__annotations__'): a = DirectObjectAccess(self._inference_state, attr.fget) return True, True, a.get_return_annotation() # In case of descriptors that have get methods we cannot return # it's value, because that would mean code execution. return True, True, None return True, False, None def getattr_paths(self, name, default=_sentinel): try: # Make sure no warnings are printed here, this is autocompletion, # warnings should not be shown. See also GH #1383. with warnings.catch_warnings(record=True): warnings.simplefilter("always") return_obj = getattr(self._obj, name) except Exception as e: if default is _sentinel: if isinstance(e, AttributeError): # Happens e.g. in properties of # PyQt4.QtGui.QStyleOptionComboBox.currentText # -> just set it to None raise # Just in case anything happens, return an AttributeError. It # should not crash. raise AttributeError return_obj = default access = self._create_access(return_obj) if inspect.ismodule(return_obj): return [access] try: module = return_obj.__module__ except AttributeError: pass else: if module is not None and isinstance(module, str): try: __import__(module) # For some modules like _sqlite3, the __module__ for classes is # different, in this case it's sqlite3. So we have to try to # load that "original" module, because it's not loaded yet. If # we don't do that, we don't really have a "parent" module and # we would fall back to builtins. except ImportError: pass module = inspect.getmodule(return_obj) if module is None: module = inspect.getmodule(type(return_obj)) if module is None: module = builtins return [self._create_access(module), access] def get_safe_value(self): if type(self._obj) in (bool, bytes, float, int, str, slice) or self._obj is None: return self._obj raise ValueError("Object is type %s and not simple" % type(self._obj)) def get_api_type(self): return get_api_type(self._obj) def get_array_type(self): if isinstance(self._obj, dict): return 'dict' return None def get_key_paths(self): def iter_partial_keys(): # We could use list(keys()), but that might take a lot more memory. for (i, k) in enumerate(self._obj.keys()): # Limit key listing at some point. This is artificial, but this # way we don't get stalled because of slow completions if i > 50: break yield k return [self._create_access_path(k) for k in iter_partial_keys()] def get_access_path_tuples(self): accesses = [create_access(self._inference_state, o) for o in self._get_objects_path()] return [(access.py__name__(), access) for access in accesses] def _get_objects_path(self): def get(): obj = self._obj yield obj try: obj = obj.__objclass__ except AttributeError: pass else: yield obj try: # Returns a dotted string path. imp_plz = obj.__module__ except AttributeError: # Unfortunately in some cases like `int` there's no __module__ if not inspect.ismodule(obj): yield builtins else: if imp_plz is None: # Happens for example in `(_ for _ in []).send.__module__`. yield builtins else: try: yield sys.modules[imp_plz] except KeyError: # __module__ can be something arbitrary that doesn't exist. yield builtins return list(reversed(list(get()))) def execute_operation(self, other_access_handle, operator): other_access = other_access_handle.access op = _OPERATORS[operator] return self._create_access_path(op(self._obj, other_access._obj)) def get_annotation_name_and_args(self): """ Returns Tuple[Optional[str], Tuple[AccessPath, ...]] """ name = None args = () if safe_getattr(self._obj, '__module__', default='') == 'typing': m = re.match(r'typing.(\w+)\[', repr(self._obj)) if m is not None: name = m.group(1) import typing if sys.version_info >= (3, 8): args = typing.get_args(self._obj) else: args = safe_getattr(self._obj, '__args__', default=None) return name, tuple(self._create_access_path(arg) for arg in args) def needs_type_completions(self): return inspect.isclass(self._obj) and self._obj != type def _annotation_to_str(self, annotation): return inspect.formatannotation(annotation) def get_signature_params(self): return [ SignatureParam( name=p.name, has_default=p.default is not p.empty, default=self._create_access_path(p.default), default_string=repr(p.default), has_annotation=p.annotation is not p.empty, annotation=self._create_access_path(p.annotation), annotation_string=self._annotation_to_str(p.annotation), kind_name=str(p.kind) ) for p in self._get_signature().parameters.values() ] def _get_signature(self): obj = self._obj try: return inspect.signature(obj) except (RuntimeError, TypeError): # Reading the code of the function in Python 3.6 implies there are # at least these errors that might occur if something is wrong with # the signature. In that case we just want a simple escape for now. raise ValueError def get_return_annotation(self) -> Optional[AccessPath]: try: o = self._obj.__annotations__.get('return') except AttributeError: return None if o is None: return None try: o = typing.get_type_hints(self._obj).get('return') except Exception: pass return self._create_access_path(o) def negate(self): return self._create_access_path(-self._obj) def get_dir_infos(self): """ Used to return a couple of infos that are needed when accessing the sub objects of an objects """ tuples = dict( (name, self.is_allowed_getattr(name)) for name in self.dir() ) return self.needs_type_completions(), tuples def _is_class_instance(obj): """Like inspect.* methods.""" try: cls = obj.__class__ except AttributeError: return False else: # The isinstance check for cls is just there so issubclass doesn't # raise an exception. return cls != type and isinstance(cls, type) and not issubclass(cls, NOT_CLASS_TYPES)
DirectObjectAccess
python
Pylons__pyramid
src/pyramid/httpexceptions.py
{ "start": 17631, "end": 18007 }
class ____(_HTTPMove): """ subclass of :class:`~_HTTPMove` This indicates that the requested resource resides temporarily under a different URI. code: 302, title: Found """ code = 302 title = 'Found' explanation = 'The resource was found at' # This one is safe after a POST (the redirected location will be # retrieved with GET):
HTTPFound
python
scipy__scipy
scipy/stats/tests/test_distributions.py
{ "start": 189089, "end": 191318 }
class ____: def test_pdf_unity_area(self): from scipy.integrate import simpson # PDF should integrate to one p = stats.genexpon.pdf(np.arange(0, 10, 0.01), 0.5, 0.5, 2.0) assert_almost_equal(simpson(p, dx=0.01), 1, 1) def test_cdf_bounds(self): # CDF should always be positive cdf = stats.genexpon.cdf(np.arange(0, 10, 0.01), 0.5, 0.5, 2.0) assert np.all((0 <= cdf) & (cdf <= 1)) # The values of p in the following data were computed with mpmath. # E.g. the script # from mpmath import mp # mp.dps = 80 # x = mp.mpf('15.0') # a = mp.mpf('1.0') # b = mp.mpf('2.0') # c = mp.mpf('1.5') # print(float(mp.exp((-a-b)*x + (b/c)*-mp.expm1(-c*x)))) # prints # 1.0859444834514553e-19 @pytest.mark.parametrize('x, p, a, b, c', [(15, 1.0859444834514553e-19, 1, 2, 1.5), (0.25, 0.7609068232534623, 0.5, 2, 3), (0.25, 0.09026661397565876, 9.5, 2, 0.5), (0.01, 0.9753038265071597, 2.5, 0.25, 0.5), (3.25, 0.0001962824553094492, 2.5, 0.25, 0.5), (0.125, 0.9508674287164001, 0.25, 5, 0.5)]) def test_sf_isf(self, x, p, a, b, c): sf = stats.genexpon.sf(x, a, b, c) assert_allclose(sf, p, rtol=2e-14) isf = stats.genexpon.isf(p, a, b, c) assert_allclose(isf, x, rtol=2e-14) # The values of p in the following data were computed with mpmath. @pytest.mark.parametrize('x, p, a, b, c', [(0.25, 0.2390931767465377, 0.5, 2, 3), (0.25, 0.9097333860243412, 9.5, 2, 0.5), (0.01, 0.0246961734928403, 2.5, 0.25, 0.5), (3.25, 0.9998037175446906, 2.5, 0.25, 0.5), (0.125, 0.04913257128359998, 0.25, 5, 0.5)]) def test_cdf_ppf(self, x, p, a, b, c): cdf = stats.genexpon.cdf(x, a, b, c) assert_allclose(cdf, p, rtol=2e-14) ppf = stats.genexpon.ppf(p, a, b, c) assert_allclose(ppf, x, rtol=2e-14)
TestGenExpon
python
pytorch__pytorch
torch/utils/mkldnn.py
{ "start": 6428, "end": 8244 }
class ____(torch.jit.ScriptModule): def __init__(self, dense_module, dtype) -> None: super().__init__() self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype)) @torch.jit.script_method def __getstate__(self): return (self.weight.to_dense(), self.training) @torch.jit.script_method def __setstate__(self, state): self.weight = state[0].to_mkldnn() self.training = state[1] @torch.jit.script_method def forward(self, x): x_mkldnn = x if x.is_mkldnn else x.to_mkldnn() y_mkldnn = torch.prelu(x_mkldnn, self.weight) y = y_mkldnn if x.is_mkldnn else y_mkldnn.to_dense() return y def to_mkldnn(module, dtype=torch.float): if dtype not in (torch.float, torch.bfloat16, torch.half): raise AssertionError("MKLDNN only support float, bfloat16, and half path now") def m_fn(m, d): if isinstance(m, torch.nn.Linear): return MkldnnLinear(m, d) elif isinstance(m, torch.nn.Conv1d): return MkldnnConv1d(m, d) elif isinstance(m, torch.nn.Conv2d): return MkldnnConv2d(m, d) elif isinstance(m, torch.nn.Conv3d): return MkldnnConv3d(m, d) elif isinstance(m, (torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)): # For batchnorm bf16 path, OneDNN requires weight and bias need fp32 dtype. # so it doesn't need dtype argument. return MkldnnBatchNorm(m) elif isinstance(m, torch.nn.PReLU): return MkldnnPrelu(m, d) else: return m def m_fn_rec(m, d): new_m = m_fn(m, d) for name, sub_m in m.named_children(): setattr(new_m, name, m_fn_rec(sub_m, d)) return new_m return m_fn_rec(module, dtype)
MkldnnPrelu
python
rapidsai__cudf
python/cudf/cudf/core/dtypes.py
{ "start": 30682, "end": 30786 }
class ____(DecimalDtype): name = "decimal128" MAX_PRECISION = 38 ITEMSIZE = 16
Decimal128Dtype
python
huggingface__transformers
src/transformers/models/led/modeling_led.py
{ "start": 39739, "end": 42424 }
class ____(GradientCheckpointingLayer): def __init__(self, config: LEDConfig, layer_id: int): super().__init__() self.embed_dim = config.d_model self.self_attn = LEDEncoderAttention(config, layer_id) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape *(batch, seq_len, embed_dim)* attention_mask (`torch.FloatTensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. """ residual = hidden_states attn_outputs = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=output_attentions, ) hidden_states = attn_outputs[0] hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) return (hidden_states,) + attn_outputs[1:]
LEDEncoderLayer
python
doocs__leetcode
lcof/面试题68 - I. 二叉搜索树的最近公共祖先/Solution.py
{ "start": 164, "end": 524 }
class ____: def lowestCommonAncestor( self, root: TreeNode, p: TreeNode, q: TreeNode ) -> TreeNode: while 1: if root.val < p.val and root.val < q.val: root = root.right elif root.val > p.val and root.val > q.val: root = root.left else: return root
Solution
python
walkccc__LeetCode
solutions/3350. Adjacent Increasing Subarrays Detection II/3350.py
{ "start": 0, "end": 455 }
class ____: # Similar to 3349. Adjacent Increasing Subarrays Detection I def maxIncreasingSubarrays(self, nums: list[int]) -> int: ans = 0 increasing = 1 prevIncreasing = 0 for a, b in itertools.pairwise(nums): if b > a: increasing += 1 else: prevIncreasing = increasing increasing = 1 ans = max(ans, increasing // 2) ans = max(ans, min(prevIncreasing, increasing)) return ans
Solution
python
huggingface__transformers
src/transformers/models/granitemoehybrid/modular_granitemoehybrid.py
{ "start": 3810, "end": 3957 }
class ____(BambaRMSNormGated): def __init__(self, hidden_size, eps=1e-6): super().__init__(hidden_size, eps)
GraniteMoeHybridRMSNormGated
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/index1.py
{ "start": 1524, "end": 1741 }
class ____(Generic[T]): def __getitem__(self, args: int) -> Self: ... def get(self, index: int) -> Self: reveal_type(self[index], expected_text="Self@ClassF[T@ClassF]") return self[index]
ClassF
python
hynek__structlog
tests/processors/test_processors.py
{ "start": 20947, "end": 21821 }
class ____: def test_rename_once(self): """ Renaming event to something else works. """ assert {"msg": "hi", "foo": "bar"} == EventRenamer("msg")( None, None, {"event": "hi", "foo": "bar"} ) def test_rename_twice(self): """ Renaming both from and to `event` works. """ assert { "msg": "hi", "event": "fabulous", "foo": "bar", } == EventRenamer("msg", "_event")( None, None, {"event": "hi", "foo": "bar", "_event": "fabulous"} ) def test_replace_by_key_is_optional(self): """ The key that is renamed to `event` doesn't have to exist. """ assert {"msg": "hi", "foo": "bar"} == EventRenamer("msg", "missing")( None, None, {"event": "hi", "foo": "bar"} )
TestRenameKey
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_heapq.py
{ "start": 13363, "end": 17323 }
class ____: def test_non_sequence(self): for f in (self.module.heapify, self.module.heappop): self.assertRaises((TypeError, AttributeError), f, 10) for f in (self.module.heappush, self.module.heapreplace, self.module.nlargest, self.module.nsmallest): self.assertRaises((TypeError, AttributeError), f, 10, 10) def test_len_only(self): for f in (self.module.heapify, self.module.heappop): self.assertRaises((TypeError, AttributeError), f, LenOnly()) for f in (self.module.heappush, self.module.heapreplace): self.assertRaises((TypeError, AttributeError), f, LenOnly(), 10) for f in (self.module.nlargest, self.module.nsmallest): self.assertRaises(TypeError, f, 2, LenOnly()) def test_cmp_err(self): seq = [CmpErr(), CmpErr(), CmpErr()] for f in (self.module.heapify, self.module.heappop): self.assertRaises(ZeroDivisionError, f, seq) for f in (self.module.heappush, self.module.heapreplace): self.assertRaises(ZeroDivisionError, f, seq, 10) for f in (self.module.nlargest, self.module.nsmallest): self.assertRaises(ZeroDivisionError, f, 2, seq) def test_arg_parsing(self): for f in (self.module.heapify, self.module.heappop, self.module.heappush, self.module.heapreplace, self.module.nlargest, self.module.nsmallest): self.assertRaises((TypeError, AttributeError), f, 10) def test_iterable_args(self): for f in (self.module.nlargest, self.module.nsmallest): for s in ("123", "", range(1000), (1, 1.2), range(2000,2200,5)): for g in (G, I, Ig, L, R): self.assertEqual(list(f(2, g(s))), list(f(2,s))) self.assertEqual(list(f(2, S(s))), []) self.assertRaises(TypeError, f, 2, X(s)) self.assertRaises(TypeError, f, 2, N(s)) self.assertRaises(ZeroDivisionError, f, 2, E(s)) # Issue #17278: the heap may change size while it's being walked. def test_heappush_mutating_heap(self): heap = [] heap.extend(SideEffectLT(i, heap) for i in range(200)) # Python version raises IndexError, C version RuntimeError with self.assertRaises((IndexError, RuntimeError)): self.module.heappush(heap, SideEffectLT(5, heap)) def test_heappop_mutating_heap(self): heap = [] heap.extend(SideEffectLT(i, heap) for i in range(200)) # Python version raises IndexError, C version RuntimeError with self.assertRaises((IndexError, RuntimeError)): self.module.heappop(heap) def test_comparison_operator_modifiying_heap(self): # See bpo-39421: Strong references need to be taken # when comparing objects as they can alter the heap with torch._dynamo.error_on_graph_break(False): class EvilClass(int): def __lt__(self, o): heap.clear() return NotImplemented heap = [] self.module.heappush(heap, EvilClass(0)) self.assertRaises(IndexError, self.module.heappushpop, heap, 1) def test_comparison_operator_modifiying_heap_two_heaps(self): with torch._dynamo.error_on_graph_break(False): class h(int): def __lt__(self, o): list2.clear() return NotImplemented class g(int): def __lt__(self, o): list1.clear() return NotImplemented list1, list2 = [], [] self.module.heappush(list1, h(0)) self.module.heappush(list2, g(0)) self.assertRaises((IndexError, RuntimeError), self.module.heappush, list1, g(1)) self.assertRaises((IndexError, RuntimeError), self.module.heappush, list2, h(1))
_TestErrorHandling
python
scipy__scipy
scipy/io/tests/test_idl.py
{ "start": 15000, "end": 18972 }
class ____: # Test that structures are correctly read in def test_scalars(self): s = readsav(path.join(DATA_PATH, 'struct_pointers.sav'), verbose=False) assert_identical(s.pointers.g, np.array(np.float32(4.), dtype=np.object_)) assert_identical(s.pointers.h, np.array(np.float32(4.), dtype=np.object_)) assert_(id(s.pointers.g[0]) == id(s.pointers.h[0])) def test_pointers_replicated(self): s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated.sav'), verbose=False) assert_identical(s.pointers_rep.g, np.repeat(np.float32(4.), 5).astype(np.object_)) assert_identical(s.pointers_rep.h, np.repeat(np.float32(4.), 5).astype(np.object_)) assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h))) def test_pointers_replicated_3d(self): s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated_3d.sav'), verbose=False) s_expect = np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_) assert_identical(s.pointers_rep.g, s_expect) assert_identical(s.pointers_rep.h, s_expect) assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h))) def test_arrays(self): s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays.sav'), verbose=False) assert_array_identical(s.arrays.g[0], np.repeat(np.float32(4.), 2).astype(np.object_)) assert_array_identical(s.arrays.h[0], np.repeat(np.float32(4.), 3).astype(np.object_)) assert_(np.all(vect_id(s.arrays.g[0]) == id(s.arrays.g[0][0]))) assert_(np.all(vect_id(s.arrays.h[0]) == id(s.arrays.h[0][0]))) assert_(id(s.arrays.g[0][0]) == id(s.arrays.h[0][0])) def test_arrays_replicated(self): s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays_replicated.sav'), verbose=False) # Check column types assert_(s.arrays_rep.g.dtype.type is np.object_) assert_(s.arrays_rep.h.dtype.type is np.object_) # Check column shapes assert_equal(s.arrays_rep.g.shape, (5, )) assert_equal(s.arrays_rep.h.shape, (5, )) # Check values for i in range(5): assert_array_identical(s.arrays_rep.g[i], np.repeat(np.float32(4.), 2).astype(np.object_)) assert_array_identical(s.arrays_rep.h[i], np.repeat(np.float32(4.), 3).astype(np.object_)) assert_(np.all(vect_id(s.arrays_rep.g[i]) == id(s.arrays_rep.g[0][0]))) assert_(np.all(vect_id(s.arrays_rep.h[i]) == id(s.arrays_rep.h[0][0]))) def test_arrays_replicated_3d(self): pth = path.join(DATA_PATH, 'struct_pointer_arrays_replicated_3d.sav') s = readsav(pth, verbose=False) # Check column types assert_(s.arrays_rep.g.dtype.type is np.object_) assert_(s.arrays_rep.h.dtype.type is np.object_) # Check column shapes assert_equal(s.arrays_rep.g.shape, (4, 3, 2)) assert_equal(s.arrays_rep.h.shape, (4, 3, 2)) # Check values for i in range(4): for j in range(3): for k in range(2): assert_array_identical(s.arrays_rep.g[i, j, k], np.repeat(np.float32(4.), 2).astype(np.object_)) assert_array_identical(s.arrays_rep.h[i, j, k], np.repeat(np.float32(4.), 3).astype(np.object_)) g0 = vect_id(s.arrays_rep.g[i, j, k]) g1 = id(s.arrays_rep.g[0, 0, 0][0]) assert np.all(g0 == g1) h0 = vect_id(s.arrays_rep.h[i, j, k]) h1 = id(s.arrays_rep.h[0, 0, 0][0]) assert np.all(h0 == h1)
TestPointerStructures