language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
eriklindernoren__ML-From-Scratch
mlfromscratch/deep_learning/layers.py
{ "start": 12047, "end": 14614 }
class ____(Layer): """Batch normalization. """ def __init__(self, momentum=0.99): self.momentum = momentum self.trainable = True self.eps = 0.01 self.running_mean = None self.running_var = None def initialize(self, optimizer): # Initialize the parameters self.gamma = np.ones(self.input_shape) self.beta = np.zeros(self.input_shape) # parameter optimizers self.gamma_opt = copy.copy(optimizer) self.beta_opt = copy.copy(optimizer) def parameters(self): return np.prod(self.gamma.shape) + np.prod(self.beta.shape) def forward_pass(self, X, training=True): # Initialize running mean and variance if first run if self.running_mean is None: self.running_mean = np.mean(X, axis=0) self.running_var = np.var(X, axis=0) if training and self.trainable: mean = np.mean(X, axis=0) var = np.var(X, axis=0) self.running_mean = self.momentum * self.running_mean + (1 - self.momentum) * mean self.running_var = self.momentum * self.running_var + (1 - self.momentum) * var else: mean = self.running_mean var = self.running_var # Statistics saved for backward pass self.X_centered = X - mean self.stddev_inv = 1 / np.sqrt(var + self.eps) X_norm = self.X_centered * self.stddev_inv output = self.gamma * X_norm + self.beta return output def backward_pass(self, accum_grad): # Save parameters used during the forward pass gamma = self.gamma # If the layer is trainable the parameters are updated if self.trainable: X_norm = self.X_centered * self.stddev_inv grad_gamma = np.sum(accum_grad * X_norm, axis=0) grad_beta = np.sum(accum_grad, axis=0) self.gamma = self.gamma_opt.update(self.gamma, grad_gamma) self.beta = self.beta_opt.update(self.beta, grad_beta) batch_size = accum_grad.shape[0] # The gradient of the loss with respect to the layer inputs (use weights and statistics from forward pass) accum_grad = (1 / batch_size) * gamma * self.stddev_inv * ( batch_size * accum_grad - np.sum(accum_grad, axis=0) - self.X_centered * self.stddev_inv**2 * np.sum(accum_grad * self.X_centered, axis=0) ) return accum_grad def output_shape(self): return self.input_shape
BatchNormalization
python
huggingface__transformers
src/transformers/models/donut/modeling_donut_swin.py
{ "start": 2593, "end": 3861 }
class ____(ModelOutput): r""" pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: Optional[torch.FloatTensor] = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" DonutSwin outputs for image classification. """ ) # Copied from transformers.models.swin.modeling_swin.SwinImageClassifierOutput with Swin->DonutSwin
DonutSwinModelOutput
python
django__django
tests/postgres_tests/test_hstore.py
{ "start": 15305, "end": 17510 }
class ____(PostgreSQLSimpleTestCase): def test_simple_valid(self): validator = KeysValidator(keys=["a", "b"]) validator({"a": "foo", "b": "bar", "c": "baz"}) def test_missing_keys(self): validator = KeysValidator(keys=["a", "b"]) with self.assertRaises(exceptions.ValidationError) as cm: validator({"a": "foo", "c": "baz"}) self.assertEqual(cm.exception.messages[0], "Some keys were missing: b") self.assertEqual(cm.exception.code, "missing_keys") def test_strict_valid(self): validator = KeysValidator(keys=["a", "b"], strict=True) validator({"a": "foo", "b": "bar"}) def test_extra_keys(self): validator = KeysValidator(keys=["a", "b"], strict=True) with self.assertRaises(exceptions.ValidationError) as cm: validator({"a": "foo", "b": "bar", "c": "baz"}) self.assertEqual(cm.exception.messages[0], "Some unknown keys were provided: c") self.assertEqual(cm.exception.code, "extra_keys") def test_custom_messages(self): messages = { "missing_keys": "Foobar", } validator = KeysValidator(keys=["a", "b"], strict=True, messages=messages) with self.assertRaises(exceptions.ValidationError) as cm: validator({"a": "foo", "c": "baz"}) self.assertEqual(cm.exception.messages[0], "Foobar") self.assertEqual(cm.exception.code, "missing_keys") with self.assertRaises(exceptions.ValidationError) as cm: validator({"a": "foo", "b": "bar", "c": "baz"}) self.assertEqual(cm.exception.messages[0], "Some unknown keys were provided: c") self.assertEqual(cm.exception.code, "extra_keys") def test_deconstruct(self): messages = { "missing_keys": "Foobar", } validator = KeysValidator(keys=["a", "b"], strict=True, messages=messages) path, args, kwargs = validator.deconstruct() self.assertEqual(path, "django.contrib.postgres.validators.KeysValidator") self.assertEqual(args, ()) self.assertEqual( kwargs, {"keys": ["a", "b"], "strict": True, "messages": messages} )
TestValidator
python
huggingface__transformers
src/transformers/pipelines/any_to_any.py
{ "start": 1363, "end": 1445 }
class ____(enum.Enum): TENSORS = 0 NEW_TEXT = 1 FULL_TEXT = 2
ReturnType
python
sphinx-doc__sphinx
tests/roots/test-ext-autodoc/target/descriptor.py
{ "start": 0, "end": 304 }
class ____: """Descriptor class docstring.""" def __init__(self, doc): self.__doc__ = doc def __get__(self, obj, type=None): if obj is None: return self return 42 def meth(self): """Function.""" return 'The Answer'
CustomDataDescriptor
python
apache__thrift
lib/py/src/Thrift.py
{ "start": 4876, "end": 5555 }
class ____(dict): """A dictionary that is "frozen" like a frozenset""" def __init__(self, *args, **kwargs): super(TFrozenDict, self).__init__(*args, **kwargs) # Sort the items so they will be in a consistent order. # XOR in the hash of the class so we don't collide with # the hash of a list of tuples. self.__hashval = hash(TFrozenDict) ^ hash(tuple(sorted(self.items()))) def __setitem__(self, *args): raise TypeError("Can't modify frozen TFreezableDict") def __delitem__(self, *args): raise TypeError("Can't modify frozen TFreezableDict") def __hash__(self): return self.__hashval
TFrozenDict
python
numba__numba
numba/parfors/parfor.py
{ "start": 90878, "end": 97010 }
class ____: """ Find reduce() calls and convert them to parfors. """ def __init__(self, pass_states): self.pass_states = pass_states self.rewritten = [] def run(self, blocks): pass_states = self.pass_states topo_order = find_topo_order(blocks) for label in topo_order: block = blocks[label] new_body = [] equiv_set = pass_states.array_analysis.get_equiv_set(label) for instr in block.body: parfor = None if isinstance(instr, ir.Assign): loc = instr.loc lhs = instr.target expr = instr.value callname = guard(find_callname, pass_states.func_ir, expr) if (callname == ('reduce', 'builtins') or callname == ('reduce', '_functools')): # reduce function with generic function parfor = guard(self._reduce_to_parfor, equiv_set, lhs, expr.args, loc) if parfor: self.rewritten.append(dict( new=parfor, old=instr, reason='reduce', )) instr = parfor new_body.append(instr) block.body = new_body return def _reduce_to_parfor(self, equiv_set, lhs, args, loc): """ Convert a reduce call to a parfor. The call arguments should be (call_name, array, init_value). """ pass_states = self.pass_states scope = lhs.scope call_name = args[0] in_arr = args[1] arr_def = get_definition(pass_states.func_ir, in_arr.name) mask_var = None mask_indices = None # Search for array[boolean_mask] mask_query_result = guard(_find_mask, pass_states.typemap, pass_states.func_ir, arr_def) if mask_query_result: in_arr, mask_var, mask_typ, mask_indices = mask_query_result init_val = args[2] size_vars = equiv_set.get_shape(in_arr if mask_indices is None else mask_var) if size_vars is None: return None index_vars, loopnests = _mk_parfor_loops(pass_states.typemap, size_vars, scope, loc) mask_index = index_vars if mask_indices: # the following is never tested raise AssertionError("unreachable") index_vars = tuple(x if x else index_vars[0] for x in mask_indices) acc_var = lhs # init block has to init the reduction variable init_block = ir.Block(scope, loc) init_block.body.append(ir.Assign(init_val, acc_var, loc)) # produce loop body body_label = next_label() index_var, loop_body = self._mk_reduction_body(call_name, scope, loc, index_vars, in_arr, acc_var) if mask_indices: # the following is never tested raise AssertionError("unreachable") index_var = mask_index[0] if mask_var is not None: true_label = min(loop_body.keys()) false_label = max(loop_body.keys()) body_block = ir.Block(scope, loc) loop_body[body_label] = body_block mask = ir.Var(scope, mk_unique_var("$mask_val"), loc) pass_states.typemap[mask.name] = mask_typ mask_val = ir.Expr.getitem(mask_var, index_var, loc) body_block.body.extend([ ir.Assign(mask_val, mask, loc), ir.Branch(mask, true_label, false_label, loc) ]) parfor = Parfor(loopnests, init_block, loop_body, loc, index_var, equiv_set, ('{} function'.format(call_name), 'reduction'), pass_states.flags) if config.DEBUG_ARRAY_OPT >= 1: print("parfor from reduction") parfor.dump() return parfor def _mk_reduction_body(self, call_name, scope, loc, index_vars, in_arr, acc_var): """ Produce the body blocks for a reduction function indicated by call_name. """ from numba.core.inline_closurecall import check_reduce_func pass_states = self.pass_states reduce_func = get_definition(pass_states.func_ir, call_name) fcode = check_reduce_func(pass_states.func_ir, reduce_func) arr_typ = pass_states.typemap[in_arr.name] in_typ = arr_typ.dtype body_block = ir.Block(scope, loc) index_var, index_var_type = _make_index_var( pass_states.typemap, scope, index_vars, body_block) tmp_var = ir.Var(scope, mk_unique_var("$val"), loc) pass_states.typemap[tmp_var.name] = in_typ getitem_call = ir.Expr.getitem(in_arr, index_var, loc) pass_states.calltypes[getitem_call] = signature( in_typ, arr_typ, index_var_type) body_block.append(ir.Assign(getitem_call, tmp_var, loc)) reduce_f_ir = compile_to_numba_ir(fcode, pass_states.func_ir.func_id.func.__globals__, pass_states.typingctx, pass_states.targetctx, (in_typ, in_typ), pass_states.typemap, pass_states.calltypes) loop_body = reduce_f_ir.blocks end_label = next_label() end_block = ir.Block(scope, loc) loop_body[end_label] = end_block first_reduce_label = min(reduce_f_ir.blocks.keys()) first_reduce_block = reduce_f_ir.blocks[first_reduce_label] body_block.body.extend(first_reduce_block.body) first_reduce_block.body = body_block.body replace_arg_nodes(first_reduce_block, [acc_var, tmp_var]) replace_returns(loop_body, acc_var, end_label) return index_var, loop_body
ConvertReducePass
python
django-guardian__django-guardian
guardian/testapp/tests/test_direct_rel.py
{ "start": 566, "end": 4733 }
class ____(TestCase): def setUp(self): self.joe = User.objects.create_user("joe", "joe@example.com", "foobar") self.project = Project.objects.create(name="Foobar") def get_perm(self, codename): filters = {"content_type__app_label": "testapp", "codename": codename} return Permission.objects.get(**filters) def test_after_perm_is_created_without_shortcut(self): perm = self.get_perm("add_project") # we should not use assign here - if generic user obj perms model is # used then everything could go fine if using assign shortcut and we # would not be able to see any problem ProjectUserObjectPermission.objects.create( user=self.joe, permission=perm, content_object=self.project, ) self.assertTrue(self.joe.has_perm("add_project", self.project)) def test_assign_perm(self): assign_perm("add_project", self.joe, self.project) filters = { "content_object": self.project, "permission__codename": "add_project", "user": self.joe, } result = ProjectUserObjectPermission.objects.filter(**filters).count() self.assertEqual(result, 1) def test_remove_perm(self): assign_perm("add_project", self.joe, self.project) filters = { "content_object": self.project, "permission__codename": "add_project", "user": self.joe, } result = ProjectUserObjectPermission.objects.filter(**filters).count() self.assertEqual(result, 1) remove_perm("add_project", self.joe, self.project) result = ProjectUserObjectPermission.objects.filter(**filters).count() self.assertEqual(result, 0) def test_get_users_with_perms(self): User.objects.create_user("john", "john@foobar.com", "john") jane = User.objects.create_user("jane", "jane@foobar.com", "jane") assign_perm("add_project", self.joe, self.project) assign_perm("change_project", self.joe, self.project) assign_perm("change_project", jane, self.project) self.assertEqual( get_users_with_perms(self.project, attach_perms=True), { self.joe: ["add_project", "change_project"], jane: ["change_project"], }, ) def test_get_users_with_perms_plus_groups(self): User.objects.create_user("john", "john@foobar.com", "john") jane = User.objects.create_user("jane", "jane@foobar.com", "jane") group = Group.objects.create(name="devs") self.joe.groups.add(group) assign_perm("add_project", self.joe, self.project) assign_perm("change_project", group, self.project) assign_perm("change_project", jane, self.project) self.assertEqual( get_users_with_perms(self.project, attach_perms=True), { self.joe: ["add_project", "change_project"], jane: ["change_project"], }, ) def test_get_objects_for_user(self): foo = Project.objects.create(name="foo") bar = Project.objects.create(name="bar") assign_perm("add_project", self.joe, foo) assign_perm("add_project", self.joe, bar) assign_perm("change_project", self.joe, bar) result = get_objects_for_user(self.joe, "testapp.add_project") self.assertEqual(sorted(p.pk for p in result), sorted([foo.pk, bar.pk])) def test_get_all_permissions(self): foo = Project.objects.create(name="foo") assign_perm("add_project", self.joe, foo) assign_perm("change_project", self.joe, foo) result = self.joe.get_all_permissions(foo) self.assertEqual(result, {"add_project", "change_project"}) def test_get_all_permissions_no_object(self): foo = Project.objects.create(name="foo") assign_perm("add_project", self.joe, foo) assign_perm("change_project", self.joe, foo) result = self.joe.get_all_permissions() self.assertEqual(result, set()) @skipUnlessTestApp
TestDirectUserPermissions
python
huggingface__transformers
src/transformers/data/data_collator.py
{ "start": 27499, "end": 46685 }
class ____(DataCollatorMixin): """ Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they are not all of the same length. Args: tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]): The tokenizer used for encoding the data. mlm (`bool`, *optional*, defaults to `True`): Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked tokens and the value to predict for the masked token. whole_word_mask (`bool`, *optional*, defaults to `False`): Whether or not to mask whole words instead of individual tokens. mlm_probability (`float`, *optional*, defaults to 0.15): The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`. mask_replace_prob (`float`, *optional*, defaults to 0.8): The probability with which masked tokens are replaced by the tokenizer's mask token (e.g., `[MASK]`). Defaults to 0.8, meaning 80% of the masked tokens will be replaced with `[MASK]`. Only works when `mlm` is set to `True`. random_replace_prob (`float`, *optional*, defaults to 0.1): The probability with which masked tokens are replaced by random tokens from the tokenizer's vocabulary. Defaults to 0.1, meaning 10% of the masked tokens will be replaced with random tokens. The remaining masked tokens (1 - mask_replace_prob - random_replace_prob) are left unchanged. Only works when `mlm` is set to `True`. pad_to_multiple_of (`int`, *optional*): If set, will pad the sequence to a multiple of the provided value. return_tensors (`str`): The type of Tensor to return. Allowable values are "np", or "pt". seed (`int`, *optional*): The seed to use for the random number generator for masking. If not provided, the global RNG will be used. <Tip> For best performance, this data collator should be used with a dataset having items that are dictionaries or BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a [`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`. <Example Options and Expectations> 1. Default Behavior: - `mask_replace_prob=0.8`, `random_replace_prob=0.1`. - Expect 80% of masked tokens replaced with `[MASK]`, 10% replaced with random tokens, and 10% left unchanged. 2. All masked tokens replaced by `[MASK]`: - `mask_replace_prob=1.0`, `random_replace_prob=0.0`. - Expect all masked tokens to be replaced with `[MASK]`. No tokens are left unchanged or replaced with random tokens. 3. No `[MASK]` replacement, only random tokens: - `mask_replace_prob=0.0`, `random_replace_prob=1.0`. - Expect all masked tokens to be replaced with random tokens. No `[MASK]` replacements or unchanged tokens. 4. Balanced replacement: - `mask_replace_prob=0.5`, `random_replace_prob=0.4`. - Expect 50% of masked tokens replaced with `[MASK]`, 40% replaced with random tokens, and 10% left unchanged. Note: The sum of `mask_replace_prob` and `random_replace_prob` must not exceed 1. If their sum is less than 1, the remaining proportion will consist of masked tokens left unchanged. </Tip> """ tokenizer: PreTrainedTokenizerBase mlm: bool = True whole_word_mask: bool = False mlm_probability: float | None = 0.15 mask_replace_prob: float = 0.8 random_replace_prob: float = 0.1 pad_to_multiple_of: int | None = None return_tensors: str = "pt" seed: int | None = None def __post_init__(self): if self.mlm: if self.tokenizer.mask_token is None: raise ValueError( "This tokenizer does not have a mask token which is necessary for masked language modeling. " "You should pass `mlm=False` to train on causal language modeling instead." ) if self.mlm_probability is None or self.mlm_probability < 0 or self.mlm_probability > 1: raise ValueError("mlm_probability should be between 0 and 1.") self.mlm_probability = float(self.mlm_probability) elif self.whole_word_mask: raise ValueError( "Whole word masking can only be used with mlm=True." "If you want to use whole word masking, please set mlm=True." ) if self.mask_replace_prob + self.random_replace_prob > 1: raise ValueError("The sum of mask_replace_prob and random_replace_prob should not exceed 1") if self.mask_replace_prob < 0 or self.mask_replace_prob > 1: raise ValueError("mask_replace_prob should be between 0 and 1.") if self.random_replace_prob < 0 or self.random_replace_prob > 1: raise ValueError("random_replace_prob should be between 0 and 1.") self.mask_replace_prob = float(self.mask_replace_prob) self.random_replace_prob = float(self.random_replace_prob) if self.whole_word_mask: if not self.tokenizer.is_fast: warnings.warn( "Whole word masking depends on offset mapping which is only natively available with fast tokenizers.", UserWarning, ) if self.mask_replace_prob < 1: warnings.warn( "Random token replacement is not supported with whole word masking. " "Setting mask_replace_prob to 1.", ) self.mask_replace_prob = 1 self.random_replace_prob = 0 self.generator = None def get_generator(self, seed): if self.return_tensors == "pt": import torch return torch.Generator().manual_seed(seed) else: return np.random.default_rng(seed) def create_rng(self): if mp.current_process().name == "MainProcess": # If we are in the main process, we create a generator object with the seed self.generator = self.get_generator(self.seed) else: # If we are in a worker process (i.e using multiprocessing), we need to set a unique seed for each # worker's generator, generated as the main seed + the worker's ID. # (https://pytorch.org/docs/stable/data.html#randomness-in-multi-process-data-loading) # Only PyTorch DataLoader allows us to access the worker ID, and so we check for this. import torch worker_info = torch.utils.data.get_worker_info() if worker_info is None: error_string = ( "Worker process information is not available for seeding the generator. This may be because", "you are using multiprocessing without using a PyTorch DataLoader. The `seed` parameter can", "only be used when using multiprocessing with a PyTorch DataLoader. Please either use a", "single process or use a PyTorch DataLoader with multiple workers.", ) raise ValueError(error_string) self.generator = self.get_generator(self.seed + worker_info.id) def torch_call(self, examples: list[list[int] | Any | dict[str, Any]]) -> dict[str, Any]: # Handle dict or lists with proper padding and conversion to tensor. if self.seed and self.generator is None: # If we have a seed, we need to create a generator object. Subsequent calls to this function will use the same generator. # If no seed supplied, we will use the global RNG self.create_rng() if isinstance(examples[0], Mapping): batch = pad_without_fast_tokenizer_warning( self.tokenizer, examples, return_tensors="pt", pad_to_multiple_of=self.pad_to_multiple_of ) else: batch = { "input_ids": _torch_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of) } # If special token mask has been preprocessed, pop it from the dict. special_tokens_mask = batch.pop("special_tokens_mask", None) offset_mapping = batch.pop("offset_mapping", None) if self.mlm: batch["input_ids"], batch["labels"] = self.torch_mask_tokens( batch["input_ids"], special_tokens_mask=special_tokens_mask, offset_mapping=offset_mapping ) else: labels = batch["input_ids"].clone() if self.tokenizer.pad_token_id is not None: labels[labels == self.tokenizer.pad_token_id] = -100 batch["labels"] = labels return batch def torch_mask_tokens( self, inputs: Any, special_tokens_mask: Any | None = None, offset_mapping: Any | None = None ) -> tuple[Any, Any]: """ Prepare masked tokens inputs/labels for masked language modeling. """ import torch labels = inputs.clone() # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix = torch.full(labels.shape, self.mlm_probability) if special_tokens_mask is None: special_tokens_mask = [ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist() ] if self.whole_word_mask: word_ids, no_mask_mask = self._calc_word_ids_and_prob_mask( to_numpy(offset_mapping), to_numpy(special_tokens_mask) ) no_mask_mask = torch.tensor(no_mask_mask, dtype=torch.bool) else: no_mask_mask = ( special_tokens_mask.bool() if isinstance(special_tokens_mask, torch.Tensor) else torch.tensor(special_tokens_mask, dtype=torch.bool) ) probability_matrix.masked_fill_(no_mask_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix, generator=self.generator).bool() if self.whole_word_mask: masked_indices = torch.BoolTensor(self._whole_word_mask(word_ids, masked_indices)) labels[~masked_indices] = -100 # We only compute loss on masked tokens # mask_replace_prob% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = ( torch.bernoulli(torch.full(labels.shape, self.mask_replace_prob), generator=self.generator).bool() & masked_indices ) inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) if self.mask_replace_prob == 1 or self.random_replace_prob == 0: return inputs, labels remaining_prob = 1 - self.mask_replace_prob # scaling the random_replace_prob to the remaining probability for example if # mask_replace_prob = 0.8 and random_replace_prob = 0.1, # then random_replace_prob_scaled = 0.1 / 0.2 = 0.5 random_replace_prob_scaled = self.random_replace_prob / remaining_prob # random_replace_prob% of the time, we replace masked input tokens with random word indices_random = ( torch.bernoulli(torch.full(labels.shape, random_replace_prob_scaled), generator=self.generator).bool() & masked_indices & ~indices_replaced ) random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long, generator=self.generator) inputs[indices_random] = random_words[indices_random] # The rest of the time ((1-random_replace_prob-mask_replace_prob)% of the time) we keep the masked input tokens unchanged return inputs, labels def numpy_call(self, examples: list[list[int] | Any | dict[str, Any]]) -> dict[str, Any]: # Handle dict or lists with proper padding and conversion to tensor. if self.seed and self.generator is None: # If we have a seed, we need to create a generator object. Subsequent calls to this function will use the same generator. # If no seed supplied, we will use the global RNG self.create_rng() if isinstance(examples[0], Mapping): batch = pad_without_fast_tokenizer_warning( self.tokenizer, examples, return_tensors="np", pad_to_multiple_of=self.pad_to_multiple_of ) else: batch = { "input_ids": _numpy_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of) } # If special token mask has been preprocessed, pop it from the dict. special_tokens_mask = batch.pop("special_tokens_mask", None) offset_mapping = batch.pop("offset_mapping", None) if self.mlm: batch["input_ids"], batch["labels"] = self.numpy_mask_tokens( batch["input_ids"], special_tokens_mask=special_tokens_mask, offset_mapping=offset_mapping ) else: labels = np.copy(batch["input_ids"]) if self.tokenizer.pad_token_id is not None: labels[labels == self.tokenizer.pad_token_id] = -100 batch["labels"] = labels return batch def numpy_mask_tokens( self, inputs: Any, special_tokens_mask: Any | None = None, offset_mapping: Any | None = None, ) -> tuple[Any, Any]: """ Prepare masked tokens inputs/labels for masked language modeling. """ labels = np.copy(inputs) # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix = np.full(labels.shape, self.mlm_probability) if special_tokens_mask is None: special_tokens_mask = [ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist() ] if self.whole_word_mask: word_ids, no_mask_mask = self._calc_word_ids_and_prob_mask( to_numpy(offset_mapping), to_numpy(special_tokens_mask) ) else: no_mask_mask = ( special_tokens_mask.astype(bool) if isinstance(special_tokens_mask, np.ndarray) else np.array(special_tokens_mask, dtype=bool) ) probability_matrix[no_mask_mask] = 0 # Numpy doesn't have bernoulli, so we use a binomial with 1 trial if self.generator: masked_indices = self.generator.binomial(1, probability_matrix, size=probability_matrix.shape).astype(bool) else: masked_indices = np.random.binomial(1, probability_matrix, size=probability_matrix.shape).astype(bool) if self.whole_word_mask: masked_indices = self._whole_word_mask(word_ids, masked_indices) labels[~masked_indices] = -100 # We only compute loss on masked tokens # mask_replace_prob% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) if self.generator: indices_replaced = ( self.generator.binomial(1, self.mask_replace_prob, size=labels.shape).astype(bool) & masked_indices ) else: indices_replaced = ( np.random.binomial(1, self.mask_replace_prob, size=labels.shape).astype(bool) & masked_indices ) inputs[indices_replaced] = self.tokenizer.mask_token_id if self.mask_replace_prob == 1 or self.random_replace_prob == 0: return inputs, labels remaining_prob = 1 - self.mask_replace_prob # scaling the random_replace_prob to the remaining probability for example if # mask_replace_prob = 0.8 and random_replace_prob = 0.1, # then random_replace_prob_scaled = 0.1 / 0.2 = 0.5 random_replace_prob_scaled = self.random_replace_prob / remaining_prob if self.generator: indices_random = ( self.generator.binomial(1, random_replace_prob_scaled, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced ) random_words = self.generator.integers( low=0, high=len(self.tokenizer), size=np.count_nonzero(indices_random), dtype=np.int64 ) else: indices_random = ( np.random.binomial(1, random_replace_prob_scaled, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced ) random_words = np.random.randint( low=0, high=len(self.tokenizer), size=np.count_nonzero(indices_random), dtype=np.int64 ) inputs[indices_random] = random_words # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels @staticmethod def _calc_word_ids_and_prob_mask( offsets: np.ndarray[np.ndarray[tuple[int, int]]], special_tokens_mask: np.ndarray[np.ndarray[int]] ) -> tuple[np.ndarray[np.ndarray[int]], np.ndarray[np.ndarray[int]]]: """ Map tokens to word ids and create mask of tokens to not mask. Tokens that are part of the same word will have the same word id and we will only set a mask probability for the first token of each word. """ token_starts = offsets[:, :, 0] token_ends = offsets[:, :, 1] prev_token_ends = np.roll(token_ends, 1, axis=1) prev_token_ends[:, 0] = -1 # First token has no previous token prev_token_special = np.roll(special_tokens_mask, 1, axis=1) prev_token_special[:, 0] = 0 # Not special token AND (gap from previous or previous token was special) special_tokens_mask = special_tokens_mask.astype(bool) is_new_word = (~special_tokens_mask) & ((token_starts != prev_token_ends) | (prev_token_special == 1)) word_ids = np.cumsum(is_new_word, axis=1) word_ids[special_tokens_mask] = -1 prob_mask = ~is_new_word return word_ids, prob_mask @staticmethod def _whole_word_mask(word_ids: np.ndarray[np.ndarray[int]], mask: Any) -> Any: """ Mask whole words based on word ids and mask. """ mask = to_numpy(mask) valid_ids = word_ids != -1 # Create 3D mask where [batch, token_i, token_j] is True if token_i and token_j are the same word same_word = (word_ids[:, :, None] == word_ids[:, None, :]) & valid_ids[:, :, None] & valid_ids[:, None, :] # For each token, set True if any token in the same word is masked return np.any(same_word & mask[:, None, :], axis=2) @dataclass
DataCollatorForLanguageModeling
python
mamba-org__mamba
docs/source/tools/mermaid_inheritance.py
{ "start": 4464, "end": 4613 }
class ____(inheritance_diagram): """ A docutils node to use as a placeholder for the inheritance diagram. """ pass
mermaid_inheritance
python
mlflow__mlflow
mlflow/utils/databricks_utils.py
{ "start": 33582, "end": 44616 }
class ____: WORKSPACE_HOST_ENV_VAR = "_DATABRICKS_WORKSPACE_HOST" WORKSPACE_ID_ENV_VAR = "_DATABRICKS_WORKSPACE_ID" def __init__(self, host: str, workspace_id: str | None = None): self.host = host self.workspace_id = workspace_id @classmethod def from_environment(cls) -> DatabricksWorkspaceInfoType | None: if DatabricksWorkspaceInfo.WORKSPACE_HOST_ENV_VAR in os.environ: return DatabricksWorkspaceInfo( host=os.environ[DatabricksWorkspaceInfo.WORKSPACE_HOST_ENV_VAR], workspace_id=os.environ.get(DatabricksWorkspaceInfo.WORKSPACE_ID_ENV_VAR), ) else: return None def to_environment(self): env = { DatabricksWorkspaceInfo.WORKSPACE_HOST_ENV_VAR: self.host, } if self.workspace_id is not None: env[DatabricksWorkspaceInfo.WORKSPACE_ID_ENV_VAR] = self.workspace_id return env def get_databricks_workspace_info_from_uri(tracking_uri: str) -> DatabricksWorkspaceInfo | None: if not is_databricks_uri(tracking_uri): return None if is_databricks_default_tracking_uri(tracking_uri) and ( is_in_databricks_notebook() or is_in_databricks_job() ): workspace_host, workspace_id = get_workspace_info_from_dbutils() else: workspace_host, workspace_id = get_workspace_info_from_databricks_secrets(tracking_uri) if not workspace_id: _logger.info( "No workspace ID specified; if your Databricks workspaces share the same" " host URL, you may want to specify the workspace ID (along with the host" " information in the secret manager) for run lineage tracking. For more" " details on how to specify this information in the secret manager," " please refer to the Databricks MLflow documentation." ) if workspace_host: return DatabricksWorkspaceInfo(host=workspace_host, workspace_id=workspace_id) else: return None def check_databricks_secret_scope_access(scope_name): if dbutils := _get_dbutils(): try: dbutils.secrets.list(scope_name) except Exception as e: _logger.warning( f"Unable to access Databricks secret scope '{scope_name}' for OpenAI credentials " "that will be used to deploy the model to Databricks Model Serving. " "Please verify that the current Databricks user has 'READ' permission for " "this scope. For more information, see " "https://mlflow.org/docs/latest/python_api/openai/index.html#credential-management-for-openai-on-databricks. " # noqa: E501 f"Error: {e}" ) def get_sgc_job_run_id() -> str | None: """ Retrieves the Serverless GPU Compute (SGC) job run ID from Databricks task values. This function is used to enable automatic run resumption for SGC jobs by fetching the job run ID from the Databricks task context. The job run ID is set by the Databricks platform when running SGC jobs. Returns: str or None: The SGC job run ID if available, otherwise None. Returns None in non-Databricks environments or when the task value is not set. """ try: dbutils = _get_dbutils() except _NoDbutilsError: return None try: job_run_id = dbutils.widgets.get("SERVERLESS_GPU_COMPUTE_ASSOCIATED_JOB_RUN_ID") _logger.debug(f"SGC job run ID: {job_run_id}") return job_run_id except Exception as e: _logger.debug(f"Failed to retrieve SGC job run ID from task values: {e}", exc_info=True) return None def _construct_databricks_run_url( host: str, experiment_id: str, run_id: str, workspace_id: str | None = None, artifact_path: str | None = None, ) -> str: run_url = host if workspace_id and workspace_id != "0": run_url += "?o=" + str(workspace_id) run_url += f"#mlflow/experiments/{experiment_id}/runs/{run_id}" if artifact_path is not None: run_url += f"/artifactPath/{artifact_path.lstrip('/')}" return run_url def _construct_databricks_model_version_url( host: str, name: str, version: str, workspace_id: str | None = None ) -> str: model_version_url = host if workspace_id and workspace_id != "0": model_version_url += "?o=" + str(workspace_id) model_version_url += f"#mlflow/models/{name}/versions/{version}" return model_version_url def _construct_databricks_logged_model_url( workspace_url: str, experiment_id: str, model_id: str, workspace_id: str | None = None ) -> str: """ Get a Databricks URL for a given registered model version in Unity Catalog. Args: workspace_url: The URL of the workspace the registered model is in. experiment_id: The ID of the experiment the model is logged to. model_id: The ID of the logged model to create the URL for. workspace_id: The ID of the workspace to include as a query parameter (if provided). Returns: The Databricks URL for a registered model in Unity Catalog. """ query = f"?o={workspace_id}" if (workspace_id and workspace_id != "0") else "" return f"{workspace_url}/ml/experiments/{experiment_id}/models/{model_id}{query}" def _construct_databricks_uc_registered_model_url( workspace_url: str, registered_model_name: str, version: str, workspace_id: str | None = None ) -> str: """ Get a Databricks URL for a given registered model version in Unity Catalog. Args: workspace_url: The URL of the workspace the registered model is in. registered_model_name: The full name of the registered model containing the version. version: The version of the registered model to create the URL for. workspace_id: The ID of the workspace to include as a query parameter (if provided). Returns: The Databricks URL for a registered model in Unity Catalog. """ path = registered_model_name.replace(".", "/") query = f"?o={workspace_id}" if (workspace_id and workspace_id != "0") else "" return f"{workspace_url}/explore/data/models/{path}/version/{version}{query}" def _print_databricks_deployment_job_url( model_name: str, job_id: str, workspace_url: str | None = None, workspace_id: str | None = None, ) -> str: if not workspace_url: workspace_url = get_workspace_url() if not workspace_id: workspace_id = get_workspace_id() # If there is no workspace_url, we cannot print the job URL if not workspace_url: return None query = f"?o={workspace_id}" if (workspace_id and workspace_id != "0") else "" job_url = f"{workspace_url}/jobs/{job_id}{query}" eprint(f"🔗 Linked deployment job to '{model_name}': {job_url}") return job_url def _get_databricks_creds_config(tracking_uri): # Note: # `_get_databricks_creds_config` reads credential token values or password and # returns a `DatabricksConfig` object # Databricks-SDK API doesn't support reading credential token values, # so that in this function we still have to use # configuration providers defined in legacy Databricks CLI python library to # read token values. profile, key_prefix = get_db_info_from_uri(tracking_uri) config = None if profile and key_prefix: # legacy way to read credentials by setting `tracking_uri` to 'databricks://scope:prefix' providers = [TrackingURIConfigProvider(tracking_uri)] elif profile: # If `tracking_uri` is 'databricks://<profile>' # MLflow should only read credentials from this profile providers = [ProfileConfigProvider(profile)] else: providers = [ # `EnvironmentVariableConfigProvider` should be prioritized at the highest level, # to align with Databricks-SDK behavior. EnvironmentVariableConfigProvider(), _dynamic_token_config_provider, ProfileConfigProvider(None), SparkTaskContextConfigProvider(), DatabricksModelServingConfigProvider(), ] for provider in providers: if provider: _config = provider.get_config() if _config is not None and _config.is_valid: config = _config break if not config or not config.host: _fail_malformed_databricks_auth(tracking_uri) return config def get_databricks_env_vars(tracking_uri): if not mlflow.utils.uri.is_databricks_uri(tracking_uri): return {} config = _get_databricks_creds_config(tracking_uri) if config.auth_type == "databricks-cli": raise MlflowException( "You configured authentication type to 'databricks-cli', in this case, MLflow cannot " "read credential values, so that MLflow cannot construct the databricks environment " "variables for child process authentication." ) # We set these via environment variables so that only the current profile is exposed, rather # than all profiles in ~/.databrickscfg; maybe better would be to mount the necessary # part of ~/.databrickscfg into the container env_vars = {} env_vars[MLFLOW_TRACKING_URI.name] = "databricks" env_vars["DATABRICKS_HOST"] = config.host if config.username: env_vars["DATABRICKS_USERNAME"] = config.username if config.password: env_vars["DATABRICKS_PASSWORD"] = config.password if config.token: env_vars["DATABRICKS_TOKEN"] = config.token if config.insecure: env_vars["DATABRICKS_INSECURE"] = str(config.insecure) if config.client_id: env_vars["DATABRICKS_CLIENT_ID"] = config.client_id if config.client_secret: env_vars["DATABRICKS_CLIENT_SECRET"] = config.client_secret workspace_info = get_databricks_workspace_info_from_uri(tracking_uri) if workspace_info is not None: env_vars.update(workspace_info.to_environment()) return env_vars def _get_databricks_serverless_env_vars() -> dict[str, str]: """ Returns the environment variables required to to initialize WorkspaceClient in a subprocess with serverless compute. Note: Databricks authentication related environment variables such as DATABRICKS_HOST are set in the are set in the _capture_imported_modules function. """ envs = {} if "SPARK_REMOTE" in os.environ: envs["SPARK_LOCAL_REMOTE"] = os.environ["SPARK_REMOTE"] else: _logger.warning( "Missing required environment variable `SPARK_LOCAL_REMOTE` or `SPARK_REMOTE`. " "These are necessary to initialize the WorkspaceClient with serverless compute in " "a subprocess in Databricks for UC function execution. Setting the value to 'true'." ) envs["SPARK_LOCAL_REMOTE"] = "true" return envs
DatabricksWorkspaceInfo
python
pandas-dev__pandas
pandas/tests/extension/base/io.py
{ "start": 156, "end": 1474 }
class ____: @pytest.mark.parametrize("engine", ["c", "python"]) def test_EA_types(self, engine, data, request): if isinstance(data.dtype, pd.CategoricalDtype): # in parsers.pyx _convert_with_dtype there is special-casing for # Categorical that preempts _from_sequence_of_strings pass elif isinstance(data.dtype, pd.core.dtypes.dtypes.NumpyEADtype): # These get unwrapped internally so are treated as numpy dtypes # in the parsers.pyx code pass elif ( type(data)._from_sequence_of_strings.__func__ is ExtensionArray._from_sequence_of_strings.__func__ ): # i.e. the EA hasn't overridden _from_sequence_of_strings mark = pytest.mark.xfail( reason="_from_sequence_of_strings not implemented", raises=NotImplementedError, ) request.node.add_marker(mark) df = pd.DataFrame({"with_dtype": pd.Series(data, dtype=str(data.dtype))}) csv_output = df.to_csv(index=False, na_rep=np.nan) result = pd.read_csv( StringIO(csv_output), dtype={"with_dtype": str(data.dtype)}, engine=engine ) expected = df tm.assert_frame_equal(result, expected)
BaseParsingTests
python
encode__django-rest-framework
tests/test_response.py
{ "start": 2597, "end": 3718 }
class ____(generics.ListCreateAPIView): renderer_classes = (BrowsableAPIRenderer,) permission_classes = [] serializer_class = BasicModelSerializer queryset = BasicModel.objects.all() new_model_viewset_router = routers.DefaultRouter() new_model_viewset_router.register(r'', HTMLNewModelViewSet) urlpatterns = [ path('setbyview', MockViewSettingContentType.as_view(renderer_classes=[RendererA, RendererB, RendererC])), re_path(r'^.*\.(?P<format>.+)$', MockView.as_view(renderer_classes=[RendererA, RendererB, RendererC])), path('', MockView.as_view(renderer_classes=[RendererA, RendererB, RendererC])), path('html', HTMLView.as_view()), path('json', JSONView.as_view()), path('html1', HTMLView1.as_view()), path('html_new_model', HTMLNewModelView.as_view()), path('html_new_model_viewset', include(new_model_viewset_router.urls)), path('restframework', include('rest_framework.urls', namespace='rest_framework')) ] # TODO: Clean tests below - remove duplicates with above, better unit testing, ... @override_settings(ROOT_URLCONF='tests.test_response')
HTMLNewModelView
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 344247, "end": 344835 }
class ____(sgqlc.types.relay.Connection): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("edges", "nodes", "page_info", "total_count") edges = sgqlc.types.Field(sgqlc.types.list_of("UserEdge"), graphql_name="edges") nodes = sgqlc.types.Field(sgqlc.types.list_of("User"), graphql_name="nodes") page_info = sgqlc.types.Field( sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo" ) total_count = sgqlc.types.Field( sgqlc.types.non_null(Int), graphql_name="totalCount" )
FollowerConnection
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/dataclass_taint.py
{ "start": 3467, "end": 3796 }
class ____: foo: str bar: str def __init__(self, foo: str, bar: str) -> None: self.foo = bar self.bar = foo def test_dataclass_parameter_path(dc: DataClass): _test_sink(dc.bad) def test_dataclass_positional_parameter(x: int, y: str) -> None: _test_sink(DataClass(x, y))
DataClassSwapArguments
python
Farama-Foundation__Gymnasium
gymnasium/vector/vector_env.py
{ "start": 20843, "end": 22857 }
class ____(VectorWrapper): """Wraps the vectorized environment to allow a modular transformation of the observation. Equivalent to :class:`gymnasium.ObservationWrapper` for vectorized environments. """ def __init__(self, env: VectorEnv): """Vector observation wrapper that batch transforms observations. Args: env: Vector environment. """ super().__init__(env) if "autoreset_mode" not in env.metadata: warn( f"Vector environment ({env}) is missing `autoreset_mode` metadata key." ) else: assert ( env.metadata["autoreset_mode"] == AutoresetMode.NEXT_STEP or env.metadata["autoreset_mode"] == AutoresetMode.DISABLED ) def reset( self, *, seed: int | list[int] | None = None, options: dict[str, Any] | None = None, ) -> tuple[ObsType, dict[str, Any]]: """Modifies the observation returned from the environment ``reset`` using the :meth:`observation`.""" observations, infos = self.env.reset(seed=seed, options=options) return self.observations(observations), infos def step( self, actions: ActType ) -> tuple[ObsType, ArrayType, ArrayType, ArrayType, dict[str, Any]]: """Modifies the observation returned from the environment ``step`` using the :meth:`observation`.""" observations, rewards, terminations, truncations, infos = self.env.step(actions) return ( self.observations(observations), rewards, terminations, truncations, infos, ) def observations(self, observations: ObsType) -> ObsType: """Defines the vector observation transformation. Args: observations: A vector observation from the environment Returns: the transformed observation """ raise NotImplementedError
VectorObservationWrapper
python
walkccc__LeetCode
solutions/1583. Count Unhappy Friends/1583.py
{ "start": 0, "end": 598 }
class ____: def unhappyFriends( self, n: int, preferences: list[list[int]], pairs: list[list[int]], ) -> int: ans = 0 matches = [0] * n prefer = [{} for _ in range(n)] for x, y in pairs: matches[x] = y matches[y] = x for i in range(n): for j in range(n - 1): prefer[i][preferences[i][j]] = j for x in range(n): for u in prefer[x].keys(): y = matches[x] v = matches[u] if prefer[x][u] < prefer[x][y] and prefer[u][x] < prefer[u][v]: ans += 1 break return ans
Solution
python
spyder-ide__spyder
spyder/utils/syntaxhighlighters.py
{ "start": 66578, "end": 67666 }
class ____(CppSH): """NSIS Syntax Highlighter""" # Syntax highlighting rules: PROG = re.compile(make_nsis_patterns(), re.S) #============================================================================== # gettext highlighter #============================================================================== def make_gettext_patterns(): "Strongly inspired from idlelib.ColorDelegator.make_pat" kwstr = 'msgid msgstr' kw = r"\b" + any("keyword", kwstr.split()) + r"\b" fuzzy = any("builtin", [r"#,[^\n]*"]) links = any("normal", [r"#:[^\n]*"]) comment = any("comment", [r"#[^\n]*"]) number = any("number", [r"\b[+-]?[0-9]+[lL]?\b", r"\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b", r"\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b"]) sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*'?" dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*"?' string = any("string", [sqstring, dqstring]) return "|".join([kw, string, number, fuzzy, links, comment, any("SYNC", [r"\n"])])
NsisSH
python
vyperlang__vyper
vyper/exceptions.py
{ "start": 12582, "end": 12710 }
class ____(VyperInternalException): """An issue was not caught during type checking that should have been."""
TypeCheckFailure
python
Pylons__pyramid
tests/test_url.py
{ "start": 53657, "end": 54031 }
class ____: def __init__(self, result): self.result = result def generate(self, path, request, **kw): self.args = path, request, kw return self.result def makeabs(*elements): if WIN: # pragma: no cover return r'c:\\' + os.path.sep.join(elements) else: return os.path.sep + os.path.sep.join(elements)
DummyStaticURLInfo
python
scrapy__scrapy
tests/test_spidermiddleware_referer.py
{ "start": 22277, "end": 22508 }
class ____( MixinStrictOriginWhenCrossOrigin, TestRefererMiddleware ): settings = { "REFERRER_POLICY": "scrapy.spidermiddlewares.referer.StrictOriginWhenCrossOriginPolicy" }
TestSettingsStrictOriginWhenCrossOrigin
python
django__django
tests/sessions_tests/tests.py
{ "start": 49444, "end": 49896 }
class ____(SimpleTestCase): def test_clearsessions_unsupported(self): msg = ( "Session engine 'sessions_tests.no_clear_expired' doesn't " "support clearing expired sessions." ) with self.settings(SESSION_ENGINE="sessions_tests.no_clear_expired"): with self.assertRaisesMessage(management.CommandError, msg): management.call_command("clearsessions")
ClearSessionsCommandTests
python
sphinx-doc__sphinx
sphinx/domains/cpp/_ast.py
{ "start": 100738, "end": 102996 }
class ____(ASTDeclarator): def __init__(self, next: ASTDeclarator) -> None: assert next self.next = next def __eq__(self, other: object) -> bool: if not isinstance(other, ASTDeclaratorParamPack): return NotImplemented return self.next == other.next def __hash__(self) -> int: return hash(self.next) @property def name(self) -> ASTNestedName: return self.next.name @name.setter def name(self, name: ASTNestedName) -> None: self.next.name = name @property def function_params(self) -> list[ASTFunctionParameter]: return self.next.function_params @property def trailingReturn(self) -> ASTType: return self.next.trailingReturn @property def isPack(self) -> bool: return True def require_space_after_declSpecs(self) -> bool: return False def _stringify(self, transform: StringifyTransform) -> str: res = transform(self.next) if self.next.name: res = ' ' + res return '...' + res def get_modifiers_id(self, version: int) -> str: return self.next.get_modifiers_id(version) def get_param_id(self, version: int) -> str: # only the parameters (if any) return self.next.get_param_id(version) def get_ptr_suffix_id(self, version: int) -> str: if version == 1: return 'Dp' + self.next.get_ptr_suffix_id(version) else: return self.next.get_ptr_suffix_id(version) + 'Dp' def get_type_id(self, version: int, returnTypeId: str) -> str: assert version >= 2 # ReturnType... next, so we are part of the return type of 'next return self.next.get_type_id(version, returnTypeId='Dp' + returnTypeId) def is_function_type(self) -> bool: return self.next.is_function_type() def describe_signature( self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol ) -> None: verify_description_mode(mode) signode += addnodes.desc_sig_punctuation('...', '...') if self.next.name: signode += addnodes.desc_sig_space() self.next.describe_signature(signode, mode, env, symbol)
ASTDeclaratorParamPack
python
joke2k__faker
tests/providers/test_python.py
{ "start": 11129, "end": 12034 }
class ____(unittest.TestCase): def setUp(self): self.fake = Faker() Faker.seed(0) def test_pydict_with_default_nb_elements(self): result = self.fake.pydict() self.assertEqual(len(result), 10) def test_pydict_with_valid_number_of_nb_elements(self): result = self.fake.pydict(nb_elements=5) self.assertEqual(len(result), 5) def test_pydict_with_invalid_number_of_nb_elements(self): nb_elements = 10000 words_list_count = len(self.fake.get_words_list()) warning_msg = ( f"Number of nb_elements is greater than the number of words in the list." f" {words_list_count} words will be used." ) with pytest.warns(RuntimeWarning, match=warning_msg): result = self.fake.pydict(nb_elements=nb_elements) self.assertEqual(len(result), words_list_count)
TestPyDict
python
openai__gym
gym/envs/box2d/lunar_lander.py
{ "start": 1812, "end": 29272 }
class ____(gym.Env, EzPickle): """ ### Description This environment is a classic rocket trajectory optimization problem. According to Pontryagin's maximum principle, it is optimal to fire the engine at full throttle or turn it off. This is the reason why this environment has discrete actions: engine on or off. There are two environment versions: discrete or continuous. The landing pad is always at coordinates (0,0). The coordinates are the first two numbers in the state vector. Landing outside of the landing pad is possible. Fuel is infinite, so an agent can learn to fly and then land on its first attempt. To see a heuristic landing, run: ``` python gym/envs/box2d/lunar_lander.py ``` <!-- To play yourself, run: --> <!-- python examples/agents/keyboard_agent.py LunarLander-v2 --> ### Action Space There are four discrete actions available: do nothing, fire left orientation engine, fire main engine, fire right orientation engine. ### Observation Space The state is an 8-dimensional vector: the coordinates of the lander in `x` & `y`, its linear velocities in `x` & `y`, its angle, its angular velocity, and two booleans that represent whether each leg is in contact with the ground or not. ### Rewards After every step a reward is granted. The total reward of an episode is the sum of the rewards for all the steps within that episode. For each step, the reward: - is increased/decreased the closer/further the lander is to the landing pad. - is increased/decreased the slower/faster the lander is moving. - is decreased the more the lander is tilted (angle not horizontal). - is increased by 10 points for each leg that is in contact with the ground. - is decreased by 0.03 points each frame a side engine is firing. - is decreased by 0.3 points each frame the main engine is firing. The episode receive an additional reward of -100 or +100 points for crashing or landing safely respectively. An episode is considered a solution if it scores at least 200 points. ### Starting State The lander starts at the top center of the viewport with a random initial force applied to its center of mass. ### Episode Termination The episode finishes if: 1) the lander crashes (the lander body gets in contact with the moon); 2) the lander gets outside of the viewport (`x` coordinate is greater than 1); 3) the lander is not awake. From the [Box2D docs](https://box2d.org/documentation/md__d_1__git_hub_box2d_docs_dynamics.html#autotoc_md61), a body which is not awake is a body which doesn't move and doesn't collide with any other body: > When Box2D determines that a body (or group of bodies) has come to rest, > the body enters a sleep state which has very little CPU overhead. If a > body is awake and collides with a sleeping body, then the sleeping body > wakes up. Bodies will also wake up if a joint or contact attached to > them is destroyed. ### Arguments To use to the _continuous_ environment, you need to specify the `continuous=True` argument like below: ```python import gym env = gym.make( "LunarLander-v2", continuous: bool = False, gravity: float = -10.0, enable_wind: bool = False, wind_power: float = 15.0, turbulence_power: float = 1.5, ) ``` If `continuous=True` is passed, continuous actions (corresponding to the throttle of the engines) will be used and the action space will be `Box(-1, +1, (2,), dtype=np.float32)`. The first coordinate of an action determines the throttle of the main engine, while the second coordinate specifies the throttle of the lateral boosters. Given an action `np.array([main, lateral])`, the main engine will be turned off completely if `main < 0` and the throttle scales affinely from 50% to 100% for `0 <= main <= 1` (in particular, the main engine doesn't work with less than 50% power). Similarly, if `-0.5 < lateral < 0.5`, the lateral boosters will not fire at all. If `lateral < -0.5`, the left booster will fire, and if `lateral > 0.5`, the right booster will fire. Again, the throttle scales affinely from 50% to 100% between -1 and -0.5 (and 0.5 and 1, respectively). `gravity` dictates the gravitational constant, this is bounded to be within 0 and -12. If `enable_wind=True` is passed, there will be wind effects applied to the lander. The wind is generated using the function `tanh(sin(2 k (t+C)) + sin(pi k (t+C)))`. `k` is set to 0.01. `C` is sampled randomly between -9999 and 9999. `wind_power` dictates the maximum magnitude of linear wind applied to the craft. The recommended value for `wind_power` is between 0.0 and 20.0. `turbulence_power` dictates the maximum magnitude of rotational wind applied to the craft. The recommended value for `turbulence_power` is between 0.0 and 2.0. ### Version History - v2: Count energy spent and in v0.24, added turbulance with wind power and turbulence_power parameters - v1: Legs contact with ground added in state vector; contact with ground give +10 reward points, and -10 if then lose contact; reward renormalized to 200; harder initial random push. - v0: Initial version <!-- ### References --> ### Credits Created by Oleg Klimov """ metadata = { "render_modes": ["human", "rgb_array"], "render_fps": FPS, } def __init__( self, render_mode: Optional[str] = None, continuous: bool = False, gravity: float = -10.0, enable_wind: bool = False, wind_power: float = 15.0, turbulence_power: float = 1.5, ): EzPickle.__init__( self, render_mode, continuous, gravity, enable_wind, wind_power, turbulence_power, ) assert ( -12.0 < gravity and gravity < 0.0 ), f"gravity (current value: {gravity}) must be between -12 and 0" self.gravity = gravity if 0.0 > wind_power or wind_power > 20.0: warnings.warn( colorize( f"WARN: wind_power value is recommended to be between 0.0 and 20.0, (current value: {wind_power})", "yellow", ), ) self.wind_power = wind_power if 0.0 > turbulence_power or turbulence_power > 2.0: warnings.warn( colorize( f"WARN: turbulence_power value is recommended to be between 0.0 and 2.0, (current value: {turbulence_power})", "yellow", ), ) self.turbulence_power = turbulence_power self.enable_wind = enable_wind self.wind_idx = np.random.randint(-9999, 9999) self.torque_idx = np.random.randint(-9999, 9999) self.screen: pygame.Surface = None self.clock = None self.isopen = True self.world = Box2D.b2World(gravity=(0, gravity)) self.moon = None self.lander: Optional[Box2D.b2Body] = None self.particles = [] self.prev_reward = None self.continuous = continuous low = np.array( [ # these are bounds for position # realistically the environment should have ended # long before we reach more than 50% outside -1.5, -1.5, # velocity bounds is 5x rated speed -5.0, -5.0, -math.pi, -5.0, -0.0, -0.0, ] ).astype(np.float32) high = np.array( [ # these are bounds for position # realistically the environment should have ended # long before we reach more than 50% outside 1.5, 1.5, # velocity bounds is 5x rated speed 5.0, 5.0, math.pi, 5.0, 1.0, 1.0, ] ).astype(np.float32) # useful range is -1 .. +1, but spikes can be higher self.observation_space = spaces.Box(low, high) if self.continuous: # Action is two floats [main engine, left-right engines]. # Main engine: -1..0 off, 0..+1 throttle from 50% to 100% power. Engine can't work with less than 50% power. # Left-right: -1.0..-0.5 fire left engine, +0.5..+1.0 fire right engine, -0.5..0.5 off self.action_space = spaces.Box(-1, +1, (2,), dtype=np.float32) else: # Nop, fire left engine, main engine, right engine self.action_space = spaces.Discrete(4) self.render_mode = render_mode def _destroy(self): if not self.moon: return self.world.contactListener = None self._clean_particles(True) self.world.DestroyBody(self.moon) self.moon = None self.world.DestroyBody(self.lander) self.lander = None self.world.DestroyBody(self.legs[0]) self.world.DestroyBody(self.legs[1]) def reset( self, *, seed: Optional[int] = None, options: Optional[dict] = None, ): super().reset(seed=seed) self._destroy() self.world.contactListener_keepref = ContactDetector(self) self.world.contactListener = self.world.contactListener_keepref self.game_over = False self.prev_shaping = None W = VIEWPORT_W / SCALE H = VIEWPORT_H / SCALE # terrain CHUNKS = 11 height = self.np_random.uniform(0, H / 2, size=(CHUNKS + 1,)) chunk_x = [W / (CHUNKS - 1) * i for i in range(CHUNKS)] self.helipad_x1 = chunk_x[CHUNKS // 2 - 1] self.helipad_x2 = chunk_x[CHUNKS // 2 + 1] self.helipad_y = H / 4 height[CHUNKS // 2 - 2] = self.helipad_y height[CHUNKS // 2 - 1] = self.helipad_y height[CHUNKS // 2 + 0] = self.helipad_y height[CHUNKS // 2 + 1] = self.helipad_y height[CHUNKS // 2 + 2] = self.helipad_y smooth_y = [ 0.33 * (height[i - 1] + height[i + 0] + height[i + 1]) for i in range(CHUNKS) ] self.moon = self.world.CreateStaticBody( shapes=edgeShape(vertices=[(0, 0), (W, 0)]) ) self.sky_polys = [] for i in range(CHUNKS - 1): p1 = (chunk_x[i], smooth_y[i]) p2 = (chunk_x[i + 1], smooth_y[i + 1]) self.moon.CreateEdgeFixture(vertices=[p1, p2], density=0, friction=0.1) self.sky_polys.append([p1, p2, (p2[0], H), (p1[0], H)]) self.moon.color1 = (0.0, 0.0, 0.0) self.moon.color2 = (0.0, 0.0, 0.0) initial_y = VIEWPORT_H / SCALE self.lander: Box2D.b2Body = self.world.CreateDynamicBody( position=(VIEWPORT_W / SCALE / 2, initial_y), angle=0.0, fixtures=fixtureDef( shape=polygonShape( vertices=[(x / SCALE, y / SCALE) for x, y in LANDER_POLY] ), density=5.0, friction=0.1, categoryBits=0x0010, maskBits=0x001, # collide only with ground restitution=0.0, ), # 0.99 bouncy ) self.lander.color1 = (128, 102, 230) self.lander.color2 = (77, 77, 128) self.lander.ApplyForceToCenter( ( self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), ), True, ) self.legs = [] for i in [-1, +1]: leg = self.world.CreateDynamicBody( position=(VIEWPORT_W / SCALE / 2 - i * LEG_AWAY / SCALE, initial_y), angle=(i * 0.05), fixtures=fixtureDef( shape=polygonShape(box=(LEG_W / SCALE, LEG_H / SCALE)), density=1.0, restitution=0.0, categoryBits=0x0020, maskBits=0x001, ), ) leg.ground_contact = False leg.color1 = (128, 102, 230) leg.color2 = (77, 77, 128) rjd = revoluteJointDef( bodyA=self.lander, bodyB=leg, localAnchorA=(0, 0), localAnchorB=(i * LEG_AWAY / SCALE, LEG_DOWN / SCALE), enableMotor=True, enableLimit=True, maxMotorTorque=LEG_SPRING_TORQUE, motorSpeed=+0.3 * i, # low enough not to jump back into the sky ) if i == -1: rjd.lowerAngle = ( +0.9 - 0.5 ) # The most esoteric numbers here, angled legs have freedom to travel within rjd.upperAngle = +0.9 else: rjd.lowerAngle = -0.9 rjd.upperAngle = -0.9 + 0.5 leg.joint = self.world.CreateJoint(rjd) self.legs.append(leg) self.drawlist = [self.lander] + self.legs if self.render_mode == "human": self.render() return self.step(np.array([0, 0]) if self.continuous else 0)[0], {} def _create_particle(self, mass, x, y, ttl): p = self.world.CreateDynamicBody( position=(x, y), angle=0.0, fixtures=fixtureDef( shape=circleShape(radius=2 / SCALE, pos=(0, 0)), density=mass, friction=0.1, categoryBits=0x0100, maskBits=0x001, # collide only with ground restitution=0.3, ), ) p.ttl = ttl self.particles.append(p) self._clean_particles(False) return p def _clean_particles(self, all): while self.particles and (all or self.particles[0].ttl < 0): self.world.DestroyBody(self.particles.pop(0)) def step(self, action): assert self.lander is not None # Update wind assert self.lander is not None, "You forgot to call reset()" if self.enable_wind and not ( self.legs[0].ground_contact or self.legs[1].ground_contact ): # the function used for wind is tanh(sin(2 k x) + sin(pi k x)), # which is proven to never be periodic, k = 0.01 wind_mag = ( math.tanh( math.sin(0.02 * self.wind_idx) + (math.sin(math.pi * 0.01 * self.wind_idx)) ) * self.wind_power ) self.wind_idx += 1 self.lander.ApplyForceToCenter( (wind_mag, 0.0), True, ) # the function used for torque is tanh(sin(2 k x) + sin(pi k x)), # which is proven to never be periodic, k = 0.01 torque_mag = math.tanh( math.sin(0.02 * self.torque_idx) + (math.sin(math.pi * 0.01 * self.torque_idx)) ) * (self.turbulence_power) self.torque_idx += 1 self.lander.ApplyTorque( (torque_mag), True, ) if self.continuous: action = np.clip(action, -1, +1).astype(np.float32) else: assert self.action_space.contains( action ), f"{action!r} ({type(action)}) invalid " # Engines tip = (math.sin(self.lander.angle), math.cos(self.lander.angle)) side = (-tip[1], tip[0]) dispersion = [self.np_random.uniform(-1.0, +1.0) / SCALE for _ in range(2)] m_power = 0.0 if (self.continuous and action[0] > 0.0) or ( not self.continuous and action == 2 ): # Main engine if self.continuous: m_power = (np.clip(action[0], 0.0, 1.0) + 1.0) * 0.5 # 0.5..1.0 assert m_power >= 0.5 and m_power <= 1.0 else: m_power = 1.0 # 4 is move a bit downwards, +-2 for randomness ox = tip[0] * (4 / SCALE + 2 * dispersion[0]) + side[0] * dispersion[1] oy = -tip[1] * (4 / SCALE + 2 * dispersion[0]) - side[1] * dispersion[1] impulse_pos = (self.lander.position[0] + ox, self.lander.position[1] + oy) p = self._create_particle( 3.5, # 3.5 is here to make particle speed adequate impulse_pos[0], impulse_pos[1], m_power, ) # particles are just a decoration p.ApplyLinearImpulse( (ox * MAIN_ENGINE_POWER * m_power, oy * MAIN_ENGINE_POWER * m_power), impulse_pos, True, ) self.lander.ApplyLinearImpulse( (-ox * MAIN_ENGINE_POWER * m_power, -oy * MAIN_ENGINE_POWER * m_power), impulse_pos, True, ) s_power = 0.0 if (self.continuous and np.abs(action[1]) > 0.5) or ( not self.continuous and action in [1, 3] ): # Orientation engines if self.continuous: direction = np.sign(action[1]) s_power = np.clip(np.abs(action[1]), 0.5, 1.0) assert s_power >= 0.5 and s_power <= 1.0 else: direction = action - 2 s_power = 1.0 ox = tip[0] * dispersion[0] + side[0] * ( 3 * dispersion[1] + direction * SIDE_ENGINE_AWAY / SCALE ) oy = -tip[1] * dispersion[0] - side[1] * ( 3 * dispersion[1] + direction * SIDE_ENGINE_AWAY / SCALE ) impulse_pos = ( self.lander.position[0] + ox - tip[0] * 17 / SCALE, self.lander.position[1] + oy + tip[1] * SIDE_ENGINE_HEIGHT / SCALE, ) p = self._create_particle(0.7, impulse_pos[0], impulse_pos[1], s_power) p.ApplyLinearImpulse( (ox * SIDE_ENGINE_POWER * s_power, oy * SIDE_ENGINE_POWER * s_power), impulse_pos, True, ) self.lander.ApplyLinearImpulse( (-ox * SIDE_ENGINE_POWER * s_power, -oy * SIDE_ENGINE_POWER * s_power), impulse_pos, True, ) self.world.Step(1.0 / FPS, 6 * 30, 2 * 30) pos = self.lander.position vel = self.lander.linearVelocity state = [ (pos.x - VIEWPORT_W / SCALE / 2) / (VIEWPORT_W / SCALE / 2), (pos.y - (self.helipad_y + LEG_DOWN / SCALE)) / (VIEWPORT_H / SCALE / 2), vel.x * (VIEWPORT_W / SCALE / 2) / FPS, vel.y * (VIEWPORT_H / SCALE / 2) / FPS, self.lander.angle, 20.0 * self.lander.angularVelocity / FPS, 1.0 if self.legs[0].ground_contact else 0.0, 1.0 if self.legs[1].ground_contact else 0.0, ] assert len(state) == 8 reward = 0 shaping = ( -100 * np.sqrt(state[0] * state[0] + state[1] * state[1]) - 100 * np.sqrt(state[2] * state[2] + state[3] * state[3]) - 100 * abs(state[4]) + 10 * state[6] + 10 * state[7] ) # And ten points for legs contact, the idea is if you # lose contact again after landing, you get negative reward if self.prev_shaping is not None: reward = shaping - self.prev_shaping self.prev_shaping = shaping reward -= ( m_power * 0.30 ) # less fuel spent is better, about -30 for heuristic landing reward -= s_power * 0.03 terminated = False if self.game_over or abs(state[0]) >= 1.0: terminated = True reward = -100 if not self.lander.awake: terminated = True reward = +100 if self.render_mode == "human": self.render() return np.array(state, dtype=np.float32), reward, terminated, False, {} def render(self): if self.render_mode is None: gym.logger.warn( "You are calling render method without specifying any render mode. " "You can specify the render_mode at initialization, " f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' ) return try: import pygame from pygame import gfxdraw except ImportError: raise DependencyNotInstalled( "pygame is not installed, run `pip install gym[box2d]`" ) if self.screen is None and self.render_mode == "human": pygame.init() pygame.display.init() self.screen = pygame.display.set_mode((VIEWPORT_W, VIEWPORT_H)) if self.clock is None: self.clock = pygame.time.Clock() self.surf = pygame.Surface((VIEWPORT_W, VIEWPORT_H)) pygame.transform.scale(self.surf, (SCALE, SCALE)) pygame.draw.rect(self.surf, (255, 255, 255), self.surf.get_rect()) for obj in self.particles: obj.ttl -= 0.15 obj.color1 = ( int(max(0.2, 0.15 + obj.ttl) * 255), int(max(0.2, 0.5 * obj.ttl) * 255), int(max(0.2, 0.5 * obj.ttl) * 255), ) obj.color2 = ( int(max(0.2, 0.15 + obj.ttl) * 255), int(max(0.2, 0.5 * obj.ttl) * 255), int(max(0.2, 0.5 * obj.ttl) * 255), ) self._clean_particles(False) for p in self.sky_polys: scaled_poly = [] for coord in p: scaled_poly.append((coord[0] * SCALE, coord[1] * SCALE)) pygame.draw.polygon(self.surf, (0, 0, 0), scaled_poly) gfxdraw.aapolygon(self.surf, scaled_poly, (0, 0, 0)) for obj in self.particles + self.drawlist: for f in obj.fixtures: trans = f.body.transform if type(f.shape) is circleShape: pygame.draw.circle( self.surf, color=obj.color1, center=trans * f.shape.pos * SCALE, radius=f.shape.radius * SCALE, ) pygame.draw.circle( self.surf, color=obj.color2, center=trans * f.shape.pos * SCALE, radius=f.shape.radius * SCALE, ) else: path = [trans * v * SCALE for v in f.shape.vertices] pygame.draw.polygon(self.surf, color=obj.color1, points=path) gfxdraw.aapolygon(self.surf, path, obj.color1) pygame.draw.aalines( self.surf, color=obj.color2, points=path, closed=True ) for x in [self.helipad_x1, self.helipad_x2]: x = x * SCALE flagy1 = self.helipad_y * SCALE flagy2 = flagy1 + 50 pygame.draw.line( self.surf, color=(255, 255, 255), start_pos=(x, flagy1), end_pos=(x, flagy2), width=1, ) pygame.draw.polygon( self.surf, color=(204, 204, 0), points=[ (x, flagy2), (x, flagy2 - 10), (x + 25, flagy2 - 5), ], ) gfxdraw.aapolygon( self.surf, [(x, flagy2), (x, flagy2 - 10), (x + 25, flagy2 - 5)], (204, 204, 0), ) self.surf = pygame.transform.flip(self.surf, False, True) if self.render_mode == "human": assert self.screen is not None self.screen.blit(self.surf, (0, 0)) pygame.event.pump() self.clock.tick(self.metadata["render_fps"]) pygame.display.flip() elif self.render_mode == "rgb_array": return np.transpose( np.array(pygame.surfarray.pixels3d(self.surf)), axes=(1, 0, 2) ) def close(self): if self.screen is not None: import pygame pygame.display.quit() pygame.quit() self.isopen = False def heuristic(env, s): """ The heuristic for 1. Testing 2. Demonstration rollout. Args: env: The environment s (list): The state. Attributes: s[0] is the horizontal coordinate s[1] is the vertical coordinate s[2] is the horizontal speed s[3] is the vertical speed s[4] is the angle s[5] is the angular speed s[6] 1 if first leg has contact, else 0 s[7] 1 if second leg has contact, else 0 Returns: a: The heuristic to be fed into the step function defined above to determine the next step and reward. """ angle_targ = s[0] * 0.5 + s[2] * 1.0 # angle should point towards center if angle_targ > 0.4: angle_targ = 0.4 # more than 0.4 radians (22 degrees) is bad if angle_targ < -0.4: angle_targ = -0.4 hover_targ = 0.55 * np.abs( s[0] ) # target y should be proportional to horizontal offset angle_todo = (angle_targ - s[4]) * 0.5 - (s[5]) * 1.0 hover_todo = (hover_targ - s[1]) * 0.5 - (s[3]) * 0.5 if s[6] or s[7]: # legs have contact angle_todo = 0 hover_todo = ( -(s[3]) * 0.5 ) # override to reduce fall speed, that's all we need after contact if env.continuous: a = np.array([hover_todo * 20 - 1, -angle_todo * 20]) a = np.clip(a, -1, +1) else: a = 0 if hover_todo > np.abs(angle_todo) and hover_todo > 0.05: a = 2 elif angle_todo < -0.05: a = 3 elif angle_todo > +0.05: a = 1 return a def demo_heuristic_lander(env, seed=None, render=False): total_reward = 0 steps = 0 s, info = env.reset(seed=seed) while True: a = heuristic(env, s) s, r, terminated, truncated, info = step_api_compatibility(env.step(a), True) total_reward += r if render: still_open = env.render() if still_open is False: break if steps % 20 == 0 or terminated or truncated: print("observations:", " ".join([f"{x:+0.2f}" for x in s])) print(f"step {steps} total_reward {total_reward:+0.2f}") steps += 1 if terminated or truncated: break if render: env.close() return total_reward
LunarLander
python
langchain-ai__langchain
libs/langchain_v1/langchain/agents/structured_output.py
{ "start": 5691, "end": 7927 }
class ____(Generic[SchemaT]): """Use a tool calling strategy for model responses.""" schema: type[SchemaT] """Schema for the tool calls.""" schema_specs: list[_SchemaSpec[SchemaT]] """Schema specs for the tool calls.""" tool_message_content: str | None """The content of the tool message to be returned when the model calls an artificial structured output tool.""" handle_errors: ( bool | str | type[Exception] | tuple[type[Exception], ...] | Callable[[Exception], str] ) """Error handling strategy for structured output via `ToolStrategy`. - `True`: Catch all errors with default error template - `str`: Catch all errors with this custom message - `type[Exception]`: Only catch this exception type with default message - `tuple[type[Exception], ...]`: Only catch these exception types with default message - `Callable[[Exception], str]`: Custom function that returns error message - `False`: No retry, let exceptions propagate """ def __init__( self, schema: type[SchemaT], *, tool_message_content: str | None = None, handle_errors: bool | str | type[Exception] | tuple[type[Exception], ...] | Callable[[Exception], str] = True, ) -> None: """Initialize `ToolStrategy`. Initialize `ToolStrategy` with schemas, tool message content, and error handling strategy. """ self.schema = schema self.tool_message_content = tool_message_content self.handle_errors = handle_errors def _iter_variants(schema: Any) -> Iterable[Any]: """Yield leaf variants from Union and JSON Schema oneOf.""" if get_origin(schema) in (UnionType, Union): for arg in get_args(schema): yield from _iter_variants(arg) return if isinstance(schema, dict) and "oneOf" in schema: for sub in schema.get("oneOf", []): yield from _iter_variants(sub) return yield schema self.schema_specs = [_SchemaSpec(s) for s in _iter_variants(schema)] @dataclass(init=False)
ToolStrategy
python
keras-team__keras
keras/src/callbacks/backup_and_restore_test.py
{ "start": 1190, "end": 8018 }
class ____(testing.TestCase): def make_model(self): model = Sequential( [ layers.Input((3,)), CanaryLayer(), layers.Dense(1), ] ) model.compile( loss="mse", optimizer="sgd", metrics=["mse"], ) return model # Check invalid save_freq, both string and non integer def test_save_freq_unknown_error(self): with self.assertRaisesRegex(ValueError, expected_regex="Invalid value"): callbacks.BackupAndRestore( backup_dir="backup_dir", save_freq="batch" ) with self.assertRaisesRegex(ValueError, expected_regex="Invalid value"): callbacks.BackupAndRestore(backup_dir="backup_dir", save_freq=0.15) # Checking if after interruption, correct model params and # weights are loaded in step-wise backup @pytest.mark.requires_trainable_backend def test_best_case_step(self): temp_dir = self.get_temp_dir() backup_dir = file_utils.join(temp_dir, "subdir") self.assertFalse(file_utils.exists(backup_dir)) model = self.make_model() cbk = callbacks.BackupAndRestore(backup_dir, save_freq=1) x_train = np.random.random((10, 3)) y_train = np.random.random((10, 1)) try: model.fit( x_train, y_train, batch_size=4, callbacks=[ cbk, InterruptingCallback(steps_int=2, epoch_int=None), ], epochs=2, verbose=0, ) except RuntimeError: self.assertTrue(file_utils.exists(backup_dir)) self.assertEqual(cbk._current_epoch, 0) self.assertEqual(cbk._last_batch_seen, 1) self.assertEqual(int(model.layers[0].counter.value), 2) hist = model.fit( x_train, y_train, batch_size=4, callbacks=[cbk], epochs=5 ) self.assertEqual(cbk._current_epoch, 5) self.assertEqual(hist.epoch[-1], 4) self.assertEqual(int(model.layers[0].counter.value), 17) # Checking if after interruption, correct model params and # weights are loaded in epoch-wise backup @pytest.mark.requires_trainable_backend def test_best_case_epoch(self): temp_dir = self.get_temp_dir() backup_dir = file_utils.join(temp_dir, "subdir") self.assertFalse(file_utils.exists(backup_dir)) model = self.make_model() self.assertEqual(int(model.layers[0].counter.value), 0) cbk = callbacks.BackupAndRestore( backup_dir=backup_dir, save_freq="epoch" ) x_train = np.random.random((10, 3)) y_train = np.random.random((10, 1)) try: model.fit( x_train, y_train, batch_size=4, callbacks=[ cbk, InterruptingCallback(steps_int=None, epoch_int=2), ], epochs=6, verbose=0, ) except RuntimeError: self.assertEqual(cbk._current_epoch, 2) self.assertTrue(file_utils.exists(backup_dir)) self.assertEqual(int(model.layers[0].counter.value), 6) hist = model.fit( x_train, y_train, batch_size=4, callbacks=[cbk], epochs=5 ) self.assertEqual(cbk._current_epoch, 5) self.assertEqual(hist.epoch[-1], 4) self.assertEqual(int(model.layers[0].counter.value), 5 * 3) # Checking if after interruption and weights corruption, previous model # params and weights are loaded @pytest.mark.requires_trainable_backend def test_backup_corrupted(self): temp_dir = self.get_temp_dir() backup_dir = file_utils.join(temp_dir, "subdir") self.assertFalse(file_utils.exists(backup_dir)) model = self.make_model() self.assertEqual(int(model.layers[0].counter.value), 0) cbk = callbacks.BackupAndRestore( backup_dir=backup_dir, save_freq="epoch", double_checkpoint=True ) x_train = np.random.random((10, 3)) y_train = np.random.random((10, 1)) try: model.fit( x_train, y_train, batch_size=4, callbacks=[ cbk, InterruptingCallback(steps_int=None, epoch_int=2), ], epochs=6, verbose=0, ) except RuntimeError: self.assertEqual(cbk._current_epoch, 2) self.assertTrue(file_utils.exists(backup_dir)) self.assertTrue(file_utils.exists(cbk._weights_path)) self.assertTrue(file_utils.exists(cbk._training_metadata_path)) self.assertTrue(file_utils.exists(cbk._prev_weights_path)) self.assertTrue(file_utils.exists(cbk._prev_training_metadata_path)) self.assertEqual(int(model.layers[0].counter.value), 6) # Corruption weights with file_utils.File(cbk._weights_path, "w") as f: f.write("0") hist = model.fit( x_train, y_train, batch_size=4, callbacks=[cbk], epochs=5 ) self.assertEqual(cbk._current_epoch, 5) self.assertEqual(hist.epoch[-1], 4) self.assertEqual(int(model.layers[0].counter.value), 5 * 3) # Checking if after interruption, when model is deleted @pytest.mark.requires_trainable_backend def test_model_deleted_case_epoch(self): temp_dir = self.get_temp_dir() backup_dir = file_utils.join(temp_dir, "subdir") self.assertFalse(file_utils.exists(backup_dir)) model = self.make_model() cbk = callbacks.BackupAndRestore(backup_dir, save_freq="epoch") x_train = np.random.random((10, 3)) y_train = np.random.random((10, 1)) model.fit( x_train, y_train, batch_size=4, callbacks=[cbk], epochs=2, verbose=0, ) self.assertFalse(file_utils.exists(backup_dir)) def test_backup_dir_empty_error(self): with self.assertRaisesRegex( ValueError, expected_regex="Empty `backup_dir` argument passed" ): callbacks.BackupAndRestore(backup_dir="", save_freq="epoch") def test_backup_dir_none_error(self): with self.assertRaisesRegex( ValueError, expected_regex="Empty `backup_dir` argument passed" ): callbacks.BackupAndRestore(backup_dir=None, save_freq="epoch")
BackupAndRestoreCallbackTest
python
OmkarPathak__pygorithm
tests/test_sorting.py
{ "start": 3226, "end": 3408 }
class ____(unittest.TestCase, TestSortingAlgorithm): inplace = False alph_support = True @staticmethod def sort(arr): return quick_sort.sort(arr)
TestQuickSort
python
tensorflow__tensorflow
tensorflow/python/framework/extension_type.py
{ "start": 43615, "end": 47654 }
class ____(ExtensionTypeSpec): """TypeSpec for AnonymousExtensionType.""" def __init__(self, **fields): for name in fields: if extension_type_field.ExtensionTypeField.is_reserved_name(name) or ( name.startswith('__') and name.endswith('__') ): raise ValueError( f'Reserved field name {name} was encountered ' 'when trying to instantiate an AnonymousExtensionTypeSpec.' ) fields = [ (k, _convert_anonymous_fields(v, for_spec=True)) for (k, v) in fields.items() ] self.__dict__.update(fields) super().__init__() value_type = AnonymousExtensionType # TypeSpec API. def _serialize(self): # TypeSpec API. return tuple( (name, _change_nested_mappings_to(value, dict)) for (name, value) in self.__dict__.items() if not extension_type_field.ExtensionTypeField.is_reserved_name(name) ) def __setattr__(self, name, value): if name in type_spec.CACHED_FIXED_PROPERTIES: super().__setattr__(name, value) else: raise AttributeError( f'Cannot set attribute `{name}`. ' 'AnonymousExtensionTypeSpec instances are immutable.' ) def __delattr__(self, name): raise AttributeError( f'Cannot delete attribute `{name}`. ' 'AnonymousExtensionTypeSpec instances are immutable.' ) def _convert_anonymous_fields(value, for_spec=False): """Type-checks and converts `value` for inclusion in an AnonymousExtensionType.""" if isinstance( value, ( int, float, bool, str, bytes, type(None), dtypes.DType, tensor_shape.TensorShape, ), ): return value if isinstance(value, tuple): return tuple(_convert_anonymous_fields(v, for_spec) for v in value) if isinstance(value, typing.Mapping): return immutable_dict.ImmutableDict( [ ( _convert_anonymous_fields(k, for_spec), _convert_anonymous_fields(v, for_spec), ) for (k, v) in value.items() ] ) if ( isinstance(value, (tensor.Tensor, composite_tensor.CompositeTensor)) and not for_spec ): return value if isinstance(value, type_spec.TypeSpec) and for_spec: return value raise ValueError( 'Cannot convert anonymous fields from ' f'an unsupported `value` argument: {value!r}.' ) # ============================================================================== # reinterpret # ============================================================================== def reinterpret(value, new_type): """Converts a given `ExtensionType` to a new type with compatible fields. In particular, this can be used to convert a concrete subclass of `ExtensionType` to an `AnonymousExtensionType`, or vice versa. When converting to a non-anonymous ExtensionType, field values are type-checked to ensure they are consistent with `new_type`'s type annotations, and validated with `new_type.__validate__`. Args: value: An instance of a subclass of `tf.ExtensionType` new_type: A subclass of `tf.ExtensionType` Returns: An instance of `new_type`, whose fields are copied from `value`. """ if not isinstance(value, ExtensionType): raise ValueError( 'reinterpret expects `value` to be a tf.ExtensionType instance; ' f'got {value!r}' ) if not (isinstance(new_type, type) and issubclass(new_type, ExtensionType)): raise ValueError( 'reinterpret expects `new_type` to be a subclass of tf.ExtensionType; ' f'got {new_type!r}' ) fields = [ item for item in value.__dict__.items() if not extension_type_field.ExtensionTypeField.is_reserved_name(item[0]) ] new_value = _create_object_from_type_and_dict(new_type, fields) new_value._tf_extension_type_convert_fields() # pylint: disable=protected-access new_value.__validate__() return new_value
AnonymousExtensionTypeSpec
python
numpy__numpy
numpy/distutils/system_info.py
{ "start": 81268, "end": 86227 }
class ____(blas_info): section = 'openblas' dir_env_var = 'OPENBLAS' _lib_names = ['openblas'] _require_symbols = [] notfounderror = BlasNotFoundError @property def symbol_prefix(self): try: return self.cp.get(self.section, 'symbol_prefix') except NoOptionError: return '' @property def symbol_suffix(self): try: return self.cp.get(self.section, 'symbol_suffix') except NoOptionError: return '' def _calc_info(self): c = customized_ccompiler() lib_dirs = self.get_lib_dirs() # Prefer to use libraries over openblas_libs opt = self.get_option_single('openblas_libs', 'libraries') openblas_libs = self.get_libs(opt, self._lib_names) info = self.check_libs(lib_dirs, openblas_libs, []) if c.compiler_type == "msvc" and info is None: from numpy.distutils.fcompiler import new_fcompiler f = new_fcompiler(c_compiler=c) if f and f.compiler_type == 'gnu95': # Try gfortran-compatible library files info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) # Skip lapack check, we'd need build_ext to do it skip_symbol_check = True elif info: skip_symbol_check = False info['language'] = 'c' if info is None: return None # Add extra info for OpenBLAS extra_info = self.calc_extra_info() dict_append(info, **extra_info) if not (skip_symbol_check or self.check_symbols(info)): return None info['define_macros'] = [('HAVE_CBLAS', None)] if self.symbol_prefix: info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)] if self.symbol_suffix: info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)] return info def calc_info(self): info = self._calc_info() if info is not None: self.set_info(**info) def check_msvc_gfortran_libs(self, library_dirs, libraries): # First, find the full path to each library directory library_paths = [] for library in libraries: for library_dir in library_dirs: # MinGW static ext will be .a fullpath = os.path.join(library_dir, library + '.a') if os.path.isfile(fullpath): library_paths.append(fullpath) break else: return None # Generate numpy.distutils virtual static library file basename = self.__class__.__name__ tmpdir = os.path.join(os.getcwd(), 'build', basename) if not os.path.isdir(tmpdir): os.makedirs(tmpdir) info = {'library_dirs': [tmpdir], 'libraries': [basename], 'language': 'f77'} fake_lib_file = os.path.join(tmpdir, basename + '.fobjects') fake_clib_file = os.path.join(tmpdir, basename + '.cobjects') with open(fake_lib_file, 'w') as f: f.write("\n".join(library_paths)) with open(fake_clib_file, 'w') as f: pass return info def check_symbols(self, info): res = False c = customized_ccompiler() tmpdir = tempfile.mkdtemp() prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix, symbol_name, self.symbol_suffix) for symbol_name in self._require_symbols) calls = "\n".join("%s%s%s();" % (self.symbol_prefix, symbol_name, self.symbol_suffix) for symbol_name in self._require_symbols) s = textwrap.dedent("""\ %(prototypes)s int main(int argc, const char *argv[]) { %(calls)s return 0; }""") % dict(prototypes=prototypes, calls=calls) src = os.path.join(tmpdir, 'source.c') out = os.path.join(tmpdir, 'a.out') # Add the additional "extra" arguments try: extra_args = info['extra_link_args'] except Exception: extra_args = [] try: with open(src, 'w') as f: f.write(s) obj = c.compile([src], output_dir=tmpdir) try: c.link_executable(obj, out, libraries=info['libraries'], library_dirs=info['library_dirs'], extra_postargs=extra_args) res = True except distutils.ccompiler.LinkError: res = False finally: shutil.rmtree(tmpdir) return res
openblas_info
python
getsentry__sentry
src/social_auth/exceptions.py
{ "start": 742, "end": 948 }
class ____(SocialAuthBaseException): """Auth process exception.""" def __init__(self, backend, *args, **kwargs): self.backend = backend super().__init__(*args, **kwargs)
AuthException
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/dml.py
{ "start": 45973, "end": 54787 }
class ____(ValuesBase, HasSyntaxExtensions[Literal["post_values"]]): """Represent an INSERT construct. The :class:`_expression.Insert` object is created using the :func:`_expression.insert()` function. Available extension points: * ``post_values``: applies additional logic after the ``VALUES`` clause. """ __visit_name__ = "insert" _supports_multi_parameters = True select = None include_insert_from_select_defaults = False _sort_by_parameter_order: bool = False is_insert = True table: TableClause _traverse_internals = ( [ ("table", InternalTraversal.dp_clauseelement), ("_inline", InternalTraversal.dp_boolean), ("_select_names", InternalTraversal.dp_string_list), ("_values", InternalTraversal.dp_dml_values), ("_multi_values", InternalTraversal.dp_dml_multi_values), ("select", InternalTraversal.dp_clauseelement), ("_post_values_clause", InternalTraversal.dp_clauseelement), ("_returning", InternalTraversal.dp_clauseelement_tuple), ("_hints", InternalTraversal.dp_table_hint_list), ("_return_defaults", InternalTraversal.dp_boolean), ( "_return_defaults_columns", InternalTraversal.dp_clauseelement_tuple, ), ("_sort_by_parameter_order", InternalTraversal.dp_boolean), ] + HasPrefixes._has_prefixes_traverse_internals + DialectKWArgs._dialect_kwargs_traverse_internals + ExecutableStatement._executable_traverse_internals + HasCTE._has_ctes_traverse_internals ) _position_map = util.immutabledict( { "post_values": "_post_values_clause", } ) _post_values_clause: Optional[ClauseElement] = None """extension point for a ClauseElement that will be compiled directly after the VALUES portion of the :class:`.Insert` statement """ def __init__(self, table: _DMLTableArgument): super().__init__(table) def _apply_syntax_extension_to_self( self, extension: SyntaxExtension ) -> None: extension.apply_to_insert(self) @_generative def inline(self) -> Self: """Make this :class:`_expression.Insert` construct "inline" . When set, no attempt will be made to retrieve the SQL-generated default values to be provided within the statement; in particular, this allows SQL expressions to be rendered 'inline' within the statement without the need to pre-execute them beforehand; for backends that support "returning", this turns off the "implicit returning" feature for the statement. .. versionchanged:: 1.4 the :paramref:`_expression.Insert.inline` parameter is now superseded by the :meth:`_expression.Insert.inline` method. """ self._inline = True return self @_generative def from_select( self, names: Sequence[_DMLColumnArgument], select: Selectable, include_defaults: bool = True, ) -> Self: """Return a new :class:`_expression.Insert` construct which represents an ``INSERT...FROM SELECT`` statement. e.g.:: sel = select(table1.c.a, table1.c.b).where(table1.c.c > 5) ins = table2.insert().from_select(["a", "b"], sel) :param names: a sequence of string column names or :class:`_schema.Column` objects representing the target columns. :param select: a :func:`_expression.select` construct, :class:`_expression.FromClause` or other construct which resolves into a :class:`_expression.FromClause`, such as an ORM :class:`_query.Query` object, etc. The order of columns returned from this FROM clause should correspond to the order of columns sent as the ``names`` parameter; while this is not checked before passing along to the database, the database would normally raise an exception if these column lists don't correspond. :param include_defaults: if True, non-server default values and SQL expressions as specified on :class:`_schema.Column` objects (as documented in :ref:`metadata_defaults_toplevel`) not otherwise specified in the list of names will be rendered into the INSERT and SELECT statements, so that these values are also included in the data to be inserted. .. note:: A Python-side default that uses a Python callable function will only be invoked **once** for the whole statement, and **not per row**. """ if self._values: raise exc.InvalidRequestError( "This construct already inserts value expressions" ) self._select_names = [ coercions.expect(roles.DMLColumnRole, name, as_key=True) for name in names ] self._inline = True self.include_insert_from_select_defaults = include_defaults self.select = coercions.expect(roles.DMLSelectRole, select) return self if TYPE_CHECKING: # START OVERLOADED FUNCTIONS self.returning ReturningInsert 1-8 ", *, sort_by_parameter_order: bool = False" # noqa: E501 # code within this block is **programmatically, # statically generated** by tools/generate_tuple_map_overloads.py @overload def returning( self, __ent0: _TCCA[_T0], /, *, sort_by_parameter_order: bool = False, ) -> ReturningInsert[_T0]: ... @overload def returning( self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], /, *, sort_by_parameter_order: bool = False, ) -> ReturningInsert[_T0, _T1]: ... @overload def returning( self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], /, *, sort_by_parameter_order: bool = False, ) -> ReturningInsert[_T0, _T1, _T2]: ... @overload def returning( self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], __ent3: _TCCA[_T3], /, *, sort_by_parameter_order: bool = False, ) -> ReturningInsert[_T0, _T1, _T2, _T3]: ... @overload def returning( self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], __ent3: _TCCA[_T3], __ent4: _TCCA[_T4], /, *, sort_by_parameter_order: bool = False, ) -> ReturningInsert[_T0, _T1, _T2, _T3, _T4]: ... @overload def returning( self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], __ent3: _TCCA[_T3], __ent4: _TCCA[_T4], __ent5: _TCCA[_T5], /, *, sort_by_parameter_order: bool = False, ) -> ReturningInsert[_T0, _T1, _T2, _T3, _T4, _T5]: ... @overload def returning( self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], __ent3: _TCCA[_T3], __ent4: _TCCA[_T4], __ent5: _TCCA[_T5], __ent6: _TCCA[_T6], /, *, sort_by_parameter_order: bool = False, ) -> ReturningInsert[_T0, _T1, _T2, _T3, _T4, _T5, _T6]: ... @overload def returning( self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], __ent3: _TCCA[_T3], __ent4: _TCCA[_T4], __ent5: _TCCA[_T5], __ent6: _TCCA[_T6], __ent7: _TCCA[_T7], /, *entities: _ColumnsClauseArgument[Any], sort_by_parameter_order: bool = False, ) -> ReturningInsert[ _T0, _T1, _T2, _T3, _T4, _T5, _T6, _T7, Unpack[TupleAny] ]: ... # END OVERLOADED FUNCTIONS self.returning @overload def returning( self, *cols: _ColumnsClauseArgument[Any], sort_by_parameter_order: bool = False, **__kw: Any, ) -> ReturningInsert[Any]: ... def returning( self, *cols: _ColumnsClauseArgument[Any], sort_by_parameter_order: bool = False, **__kw: Any, ) -> ReturningInsert[Any]: ...
Insert
python
pytorch__pytorch
torchgen/model.py
{ "start": 6834, "end": 10257 }
class ____(Enum): FAKE = auto() PROXY = auto() FUNCTIONAL = auto() def codegen_per_backend_entries() -> str: r: list[str] = [] for fk in FUNCTIONALITY_KEYS: r.extend(f" {fk}{bc} = auto()" for bc in BACKEND_COMPONENTS) return "\n".join(r) for fk in FUNCTIONALITY_KEYS: for bc in BACKEND_COMPONENTS: if not hasattr(DispatchKey, fk + bc): r = codegen_per_backend_entries() print(r) raise RuntimeError( f"Missing {fk}{bc} from DispatchKey enum. Here is the autogenerated list we expect to have:\n\n{r}" ) STRUCTURED_DISPATCH_KEYS = { DispatchKey.MPS, DispatchKey.CUDA, DispatchKey.CPU, DispatchKey.XPU, DispatchKey.MTIA, } UFUNC_DISPATCH_KEYS = {DispatchKey.CUDA, DispatchKey.CPU} # Set of supported dispatch keys dispatch_keys = [ DispatchKey.CPU, DispatchKey.SparseCPU, DispatchKey.SparseCsrCPU, DispatchKey.MkldnnCPU, DispatchKey.CUDA, DispatchKey.MPS, DispatchKey.XPU, DispatchKey.SparseXPU, DispatchKey.SparseCsrXPU, DispatchKey.SparseCUDA, DispatchKey.SparseCsrCUDA, DispatchKey.SparseMPS, DispatchKey.SparseCsrMPS, DispatchKey.QuantizedCPU, DispatchKey.QuantizedCUDA, DispatchKey.CompositeImplicitAutograd, DispatchKey.CompositeImplicitAutogradNestedTensor, DispatchKey.CompositeExplicitAutograd, DispatchKey.CompositeExplicitAutogradNonFunctional, DispatchKey.NestedTensorCPU, DispatchKey.NestedTensorCUDA, DispatchKey.NestedTensorXPU, DispatchKey.NestedTensorHPU, # Meta is a magic key: it is automatically generated for structured # kernels DispatchKey.Meta, DispatchKey.SparseMeta, DispatchKey.SparseCsrMeta, DispatchKey.QuantizedMeta, DispatchKey.NestedTensorMeta, DispatchKey.ZeroTensor, DispatchKey.MTIA, ] # Dispatch keys that "support all backends". These codegen slightly differently # then backend specific keys. def is_generic_dispatch_key(dk: DispatchKey) -> bool: return dk in { DispatchKey.CompositeExplicitAutograd, DispatchKey.CompositeExplicitAutogradNonFunctional, DispatchKey.CompositeImplicitAutograd, DispatchKey.CompositeImplicitAutogradNestedTensor, } # CUDA specific dispatch keys def is_cuda_dispatch_key(dk: DispatchKey) -> bool: return dk in { DispatchKey.CUDA, DispatchKey.QuantizedCUDA, DispatchKey.SparseCUDA, DispatchKey.SparseCsrCUDA, DispatchKey.NestedTensorCUDA, DispatchKey.AutogradCUDA, } # XPU specific dispatcy keys def is_xpu_dispatch_key(dk: DispatchKey) -> bool: return dk in { DispatchKey.XPU, DispatchKey.QuantizedXPU, DispatchKey.SparseXPU, DispatchKey.SparseCsrXPU, DispatchKey.NestedTensorXPU, DispatchKey.AutogradXPU, } # Structured kernel generation is only supported for certain key types; # otherwise use old-style def is_structured_dispatch_key(dk: DispatchKey) -> bool: return dk in STRUCTURED_DISPATCH_KEYS def is_ufunc_dispatch_key(dk: DispatchKey) -> bool: # For now, ufunc dispatch keys coincide with structured keys return dk in UFUNC_DISPATCH_KEYS dispatch_device_map = {is_cuda_dispatch_key: "cuda", is_xpu_dispatch_key: "xpu"} # This is oddly named ScalarType and not DType for symmetry with C++
_TorchDispatchModeKey
python
joke2k__faker
faker/providers/color/bn_BD/__init__.py
{ "start": 98, "end": 6145 }
class ____(ColorProvider): """Implement color provider for ``bn_BD`` locale.""" all_colors = OrderedDict( ( ("এলিস নীল", "#F0F8FF"), ("এন্টিক সাদা", "#FAEBD7"), ("জল রং", "#00FFFF"), ("হালকা নীল সবুজ", "#7FFFD4"), ("উজ্জ্বল নীল", "#F0FFFF"), ("ফ্যাকাশে বেলে হলুদ বাদামী", "#F5F5DC"), ("বিস্কুট রং", "#FFE4C4"), ("কালো", "#000000"), ("বালু রং", "#FFEBCD"), ("নীল", "#0000FF"), ("নীলাভ রক্তবর্ণ", "#8A2BE2"), ("বাদামী", "#A52A2A"), ("কাঠ রং", "#DEB887"), ("সামরিক নীল", "#5F9EA0"), ("উজ্জ্বল হলুদাভ সবুজ", "#7FFF00"), ("চকলেট রং", "#D2691E"), ("প্রবাল রং", "#FF7F50"), ("ঝুমকা ফুলের নীল", "#6495ED"), ("সিল্ক রং", "#FFF8DC"), ("অগ্নি রং", "#DC143C"), ("সায়ান", "#00FFFF"), ("কালচে নীল", "#00008B"), ("কালচে সায়ান", "#008B8B"), ("কালচে ধাতব সোনালি", "#B8860B"), ("কালচে ধূসর", "#A9A9A9"), ("কালচে সবুজ", "#006400"), ("কালচে খাকী", "#BDB76B"), ("কালচে হালকা বেগুনী লাল", "#8B008B"), ("কালচে জলপাই সবুজ", "#556B2F"), ("কালচে কমলা", "#FF8C00"), ("কালচে অর্কিড রং", "#9932CC"), ("কালচে লাল", "#8B0000"), ("কালচে স্যামন রং", "#E9967A"), ("কালচে সামুদ্রিক সবুজ", "#8FBC8F"), ("কালচে পাথুরে নীল", "#483D8B"), ("কালচে পাথুরে ধূসর", "#2F4F4F"), ("কালচে ফিরোজা", "#00CED1"), ("কালচে বেগুনী", "#9400D3"), ("গাঢ় গোলাপি", "#FF1493"), ("গাঢ় আকাশী নীল", "#00BFFF"), ("আবছা ধূসর", "#696969"), ("ডজার নীল", "#1E90FF"), ("পোড়া ইট রং", "#B22222"), ("ফুলেল সাদা", "#FFFAF0"), ("বন্য সবুজ", "#228B22"), ("উজ্জ্বল গোলাপি বেগুনী", "#FF00FF"), ("মেটে রং", "#DCDCDC"), ("টাইটান সাদা", "#F8F8FF"), ("সোনালি", "#FFD700"), ("ধাতব সোনালি", "#DAA520"), ("ধূসর", "#808080"), ("সবুজ", "#008000"), ("সবুজাভ হলুদ", "#ADFF2F"), ("মধু রং", "#F0FFF0"), ("উষ্ণ গোলাপি", "#FF69B4"), ("ভারতীয় লাল", "#CD5C5C"), ("বেগুনী নীল", "#4B0082"), ("আইভরি", "#FFFFF0"), ("খাকী", "#F0E68C"), ("ল্যাভেণ্ডার রং", "#E6E6FA"), ("ল্যাভেন্ডার লাল", "#FFF0F5"), ("তৃণ সবুজ", "#7CFC00"), ("হালকা সিল্ক রং", "#FFFACD"), ("হালকা নীল", "#ADD8E6"), ("হালকা প্রবাল রং", "#F08080"), ("হালকা সায়ান", "#E0FFFF"), ("হালকা ধাতব সোনালি হলুদ", "#FAFAD2"), ("হালকা ধূসর", "#D3D3D3"), ("হালকা সবুজ", "#90EE90"), ("হালকা গোলাপি", "#FFB6C1"), ("হালকা স্যামন রং", "#FFA07A"), ("হালকা সামুদ্রিক সবুজ", "#20B2AA"), ("হালকা আকাশী নীল", "#87CEFA"), ("হালকা পাথুরে ধূসর", "#778899"), ("হালকা ধাতব নীল", "#B0C4DE"), ("হালকা হলুদ", "#FFFFE0"), ("লাইম রং", "#00FF00"), ("লাইম সবুজ", "#32CD32"), ("পাট রং", "#FAF0E6"), ("হালকা বেগুনী লাল", "#FF00FF"), ("মেরুন", "#800000"), ("মাঝারী নীল সবুজ", "#66CDAA"), ("মাঝারী নীল", "#0000CD"), ("মাঝারী অর্কিড রং", "#BA55D3"), ("মাঝারী বেগুনী", "#9370DB"), ("মাঝারী সামুদ্রিক সবুজ", "#3CB371"), ("মাঝারী পাথুরে নীল", "#7B68EE"), ("মাঝারী বাসন্তী সবুজ", "#00FA9A"), ("মাঝারী ফিরোজা", "#48D1CC"), ("মাঝারী বেগুনী লাল", "#C71585"), ("মিডনাইট নীল", "#191970"), ("হালকা পীত পুদিনা রং", "#F5FFFA"), ("ধোঁয়াটে গোলাপ রং", "#FFE4E1"), ("মোকাসিন", "#FFE4B5"), ("নাভাজো সাদা", "#FFDEAD"), ("নেভি ব্লু", "#000080"), ("ওল্ড লেইস রং", "#FDF5E6"), ("জলপাই রং", "#808000"), ("ম্যাটমাটে জলপাই রং", "#6B8E23"), ("কমলা", "#FFA500"), ("কমলা লাল", "#FF4500"), ("অর্কিড রং", "#DA70D6"), ("ফ্যাকাশে ধাতব সোনালি", "#EEE8AA"), ("ফ্যাকাশে সবুজ", "#98FB98"), ("ফ্যাকাশে ফিরোজা", "#AFEEEE"), ("ফ্যাকাশে বেগুনী লাল", "#DB7093"), ("পাপায়াহুপ", "#FFEFD5"), ("পীচ রং", "#FFDAB9"), ("পেরু রং", "#CD853F"), ("গোলাপি", "#FFC0CB"), ("জাম রং", "#DDA0DD"), ("গুঁড়া নীল", "#B0E0E6"), ("বেগুনী", "#800080"), ("লাল", "#FF0000"), ("গোলাপী লাল", "#BC8F8F"), ("রয়্যাল ব্লু", "#4169E1"), ("স্যাডল ব্রাউন", "#8B4513"), ("স্যামন রং", "#FA8072"), ("বেলে বাদামী", "#F4A460"), ("সামুদ্রিক সবুজ", "#2E8B57"), ("ঝিনুক রং", "#FFF5EE"), ("মেটে রং", "#A0522D"), ("রূপালী", "#C0C0C0"), ("আকাশী নীল", "#87CEEB"), ("পাথুরে নীল", "#6A5ACD"), ("পাথুরে ধূসর", "#708090"), ("তুষার শুভ্র রং", "#FFFAFA"), ("বাসন্তী সবুজ", "#00FF7F"), ("ধাতব নীল", "#4682B4"), ("তামাটে রং", "#D2B48C"), ("পেষ্ট রং", "#008080"), ("থিসল রং", "#D8BFD8"), ("টমেটো রং", "#FF6347"), ("ফিরোজা", "#40E0D0"), ("রক্তবেগুনী", "#EE82EE"), ("গম রং", "#F5DEB3"), ("সাদা", "#FFFFFF"), ("ধোঁয়াটে সাদা", "#F5F5F5"), ("হলুদ", "#FFFF00"), ("হলুদাভ সবুজ", "#9ACD32"), ) ) safe_colors = ( "কালো", "মেরুন", "সবুজ", "নেভি", "জলপাই রং", "বেগুনী", "পেষ্ট রং", "লাইম রং", "নীল", "রূপালী", "ধূসর", "হলুদ", "উজ্জ্বল গোলাপি বেগুনী", "জল রং", "সাদা", )
Provider
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_axis40.py
{ "start": 315, "end": 1415 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_axis40.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "column"}) chart.axis_ids = [108329216, 108635264] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5"}) chart.set_x_axis({"interval_unit": 3, "interval_tick": 2}) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
ray-project__ray
doc/source/ray-core/doc_code/monte_carlo_pi.py
{ "start": 161, "end": 2396 }
class ____: def __init__(self, total_num_samples: int): self.total_num_samples = total_num_samples self.num_samples_completed_per_task = {} def report_progress(self, task_id: int, num_samples_completed: int) -> None: self.num_samples_completed_per_task[task_id] = num_samples_completed def get_progress(self) -> float: return ( sum(self.num_samples_completed_per_task.values()) / self.total_num_samples ) # __defining_actor_end__ # fmt: on # fmt: off # __defining_task_start__ @ray.remote def sampling_task(num_samples: int, task_id: int, progress_actor: ray.actor.ActorHandle) -> int: num_inside = 0 for i in range(num_samples): x, y = random.uniform(-1, 1), random.uniform(-1, 1) if math.hypot(x, y) <= 1: num_inside += 1 # Report progress every 1 million samples. if (i + 1) % 1_000_000 == 0: # This is async. progress_actor.report_progress.remote(task_id, i + 1) # Report the final progress. progress_actor.report_progress.remote(task_id, num_samples) return num_inside # __defining_task_end__ # fmt: on # __creating_actor_start__ # Change this to match your cluster scale. NUM_SAMPLING_TASKS = 10 NUM_SAMPLES_PER_TASK = 10_000_000 TOTAL_NUM_SAMPLES = NUM_SAMPLING_TASKS * NUM_SAMPLES_PER_TASK # Create the progress actor. progress_actor = ProgressActor.remote(TOTAL_NUM_SAMPLES) # __creating_actor_end__ # __executing_task_start__ # Create and execute all sampling tasks in parallel. results = [ sampling_task.remote(NUM_SAMPLES_PER_TASK, i, progress_actor) for i in range(NUM_SAMPLING_TASKS) ] # __executing_task_end__ # __calling_actor_start__ # Query progress periodically. while True: progress = ray.get(progress_actor.get_progress.remote()) print(f"Progress: {int(progress * 100)}%") if progress == 1: break time.sleep(1) # __calling_actor_end__ # __calculating_pi_start__ # Get all the sampling tasks results. total_num_inside = sum(ray.get(results)) pi = (total_num_inside * 4) / TOTAL_NUM_SAMPLES print(f"Estimated value of π is: {pi}") # __calculating_pi_end__ assert str(pi).startswith("3.14")
ProgressActor
python
huggingface__transformers
src/transformers/models/depth_pro/image_processing_depth_pro.py
{ "start": 1652, "end": 18709 }
class ____(BaseImageProcessor): r""" Constructs a DepthPro image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `(size["height"], size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 1536, "width": 1536}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, **kwargs, ): super().__init__(**kwargs) size = size if size is not None else {"height": 1536, "width": 1536} size = get_size_dict(size) self.do_resize = do_resize self.do_rescale = do_rescale self.do_normalize = do_normalize self.size = size self.resample = resample self.rescale_factor = rescale_factor self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def resize( self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized images. """ requires_backends(self, "torch") size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) # we use torch interpolation instead of image.resize because DepthProImageProcessor # rescales, then normalizes, which may cause some values to become negative, before resizing the image. # image.resize expects all values to be in range [0, 1] or [0, 255] and throws an exception otherwise, # however pytorch interpolation works with negative values. # relevant issue here: https://github.com/huggingface/transformers/issues/34920 # input should be (B, C, H, W) image_tensor = torch.from_numpy(image).unsqueeze(0) resized_image = torch.nn.functional.interpolate( input=image_tensor, size=output_size, mode=pil_torch_interpolation_mapping[resample].value, ) resized_image = resized_image.squeeze(0).numpy() return resized_image def _validate_input_arguments( self, do_resize: bool, size: dict[str, int], resample: PILImageResampling, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Union[float, list[float]], image_std: Union[float, list[float]], data_format: Union[str, ChannelDimension], ): if do_resize and None in (size, resample): raise ValueError("Size and resample must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and None in (image_mean, image_std): raise ValueError("Image mean and standard deviation must be specified if do_normalize is True.") @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: Optional[PILImageResampling] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after resizing. resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize do_rescale = do_rescale if do_rescale is not None else self.do_rescale do_normalize = do_normalize if do_normalize is not None else self.do_normalize resample = resample if resample is not None else self.resample rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor") self._validate_input_arguments( do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) all_images = [] for image in images: if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize( image=image, mean=image_mean, std=image_std, input_data_format=input_data_format ) # depth-pro rescales and normalizes the image before resizing it # uses torch interpolation which requires ChannelDimension.FIRST if do_resize: image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_channel_dim=input_data_format) image = self.resize(image=image, size=size, resample=resample) image = to_channel_dimension_format(image, data_format, input_channel_dim=ChannelDimension.FIRST) else: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) all_images.append(image) data = {"pixel_values": all_images} return BatchFeature(data=data, tensor_type=return_tensors) def post_process_depth_estimation( self, outputs: "DepthProDepthEstimatorOutput", target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]] = None, ) -> list[dict[str, TensorType]]: """ Post-processes the raw depth predictions from the model to generate final depth predictions which is caliberated using the field of view if provided and resized to specified target sizes if provided. Args: outputs ([`DepthProDepthEstimatorOutput`]): Raw outputs of the model. target_sizes (`Optional[Union[TensorType, list[tuple[int, int]], None]]`, *optional*, defaults to `None`): Target sizes to resize the depth predictions. Can be a tensor of shape `(batch_size, 2)` or a list of tuples `(height, width)` for each image in the batch. If `None`, no resizing is performed. Returns: `list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth predictions, and field of view (degrees) and focal length (pixels) if `field_of_view` is given in `outputs`. Raises: `ValueError`: If the lengths of `predicted_depths`, `fovs`, or `target_sizes` are mismatched. """ requires_backends(self, "torch") predicted_depth = outputs.predicted_depth fov = outputs.field_of_view batch_size = len(predicted_depth) if target_sizes is not None and batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many fov values as the batch dimension of the predicted depth" ) results = [] fov = [None] * batch_size if fov is None else fov target_sizes = [None] * batch_size if target_sizes is None else target_sizes for depth, fov_value, target_size in zip(predicted_depth, fov, target_sizes): focal_length = None if target_size is not None: # scale image w.r.t fov if fov_value is not None: width = target_size[1] focal_length = 0.5 * width / torch.tan(0.5 * torch.deg2rad(fov_value)) depth = depth * width / focal_length # interpolate depth = torch.nn.functional.interpolate( # input should be (B, C, H, W) input=depth.unsqueeze(0).unsqueeze(1), size=target_size, mode=pil_torch_interpolation_mapping[self.resample].value, ).squeeze() # inverse the depth depth = 1.0 / torch.clamp(depth, min=1e-4, max=1e4) results.append( { "predicted_depth": depth, "field_of_view": fov_value, "focal_length": focal_length, } ) return results __all__ = ["DepthProImageProcessor"]
DepthProImageProcessor
python
donnemartin__interactive-coding-challenges
recursion_dynamic/longest_common_subsequence/test_longest_common_subseq.py
{ "start": 18, "end": 639 }
class ____(unittest.TestCase): def test_longest_common_subseq(self): str_comp = StringCompare() self.assertRaises(TypeError, str_comp.longest_common_subseq, None, None) self.assertEqual(str_comp.longest_common_subseq('', ''), '') str0 = 'ABCDEFGHIJ' str1 = 'FOOBCDBCDE' expected = 'BCDE' self.assertEqual(str_comp.longest_common_subseq(str0, str1), expected) print('Success: test_longest_common_subseq') def main(): test = TestLongestCommonSubseq() test.test_longest_common_subseq() if __name__ == '__main__': main()
TestLongestCommonSubseq
python
kamyu104__LeetCode-Solutions
Python/maximum-frequency-after-subarray-operation.py
{ "start": 67, "end": 426 }
class ____(object): def maxFrequency(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ result = 0 cnt = collections.defaultdict(int) for x in nums: cnt[x] = max(cnt[x], cnt[k])+1 result = max(result+int(x == k), cnt[x]) return result
Solution
python
ansible__ansible
hacking/azp/incidental.py
{ "start": 10862, "end": 11380 }
class ____: def __init__(self, result_path): with open(os.path.join(result_path, 'run.json')) as run_file: run = json.load(run_file) self.result_sha = run["resources"]["repositories"]["self"]["version"] self.result = run['result'] self.github_base_url = 'https://github.com/ansible/ansible/blob/%s/' % self.result_sha # locate available results self.paths = sorted(glob.glob(os.path.join(result_path, '*', 'coverage-analyze-targets.json')))
CoverageData
python
pandas-dev__pandas
pandas/core/tools/datetimes.py
{ "start": 2538, "end": 2665 }
class ____(TypedDict, total=True): year: DatetimeDictArg month: DatetimeDictArg day: DatetimeDictArg
YearMonthDayDict
python
sympy__sympy
sympy/logic/boolalg.py
{ "start": 8690, "end": 12392 }
class ____(BooleanAtom, metaclass=Singleton): """ SymPy version of ``True``, a singleton that can be accessed via ``S.true``. This is the SymPy version of ``True``, for use in the logic module. The primary advantage of using ``true`` instead of ``True`` is that shorthand Boolean operations like ``~`` and ``>>`` will work as expected on this class, whereas with True they act bitwise on 1. Functions in the logic module will return this class when they evaluate to true. Notes ===== There is liable to be some confusion as to when ``True`` should be used and when ``S.true`` should be used in various contexts throughout SymPy. An important thing to remember is that ``sympify(True)`` returns ``S.true``. This means that for the most part, you can just use ``True`` and it will automatically be converted to ``S.true`` when necessary, similar to how you can generally use 1 instead of ``S.One``. The rule of thumb is: "If the boolean in question can be replaced by an arbitrary symbolic ``Boolean``, like ``Or(x, y)`` or ``x > 1``, use ``S.true``. Otherwise, use ``True``" In other words, use ``S.true`` only on those contexts where the boolean is being used as a symbolic representation of truth. For example, if the object ends up in the ``.args`` of any expression, then it must necessarily be ``S.true`` instead of ``True``, as elements of ``.args`` must be ``Basic``. On the other hand, ``==`` is not a symbolic operation in SymPy, since it always returns ``True`` or ``False``, and does so in terms of structural equality rather than mathematical, so it should return ``True``. The assumptions system should use ``True`` and ``False``. Aside from not satisfying the above rule of thumb, the assumptions system uses a three-valued logic (``True``, ``False``, ``None``), whereas ``S.true`` and ``S.false`` represent a two-valued logic. When in doubt, use ``True``. "``S.true == True is True``." While "``S.true is True``" is ``False``, "``S.true == True``" is ``True``, so if there is any doubt over whether a function or expression will return ``S.true`` or ``True``, just use ``==`` instead of ``is`` to do the comparison, and it will work in either case. Finally, for boolean flags, it's better to just use ``if x`` instead of ``if x is True``. To quote PEP 8: Do not compare boolean values to ``True`` or ``False`` using ``==``. * Yes: ``if greeting:`` * No: ``if greeting == True:`` * Worse: ``if greeting is True:`` Examples ======== >>> from sympy import sympify, true, false, Or >>> sympify(True) True >>> _ is True, _ is true (False, True) >>> Or(true, false) True >>> _ is true True Python operators give a boolean result for true but a bitwise result for True >>> ~true, ~True # doctest: +SKIP (False, -2) >>> true >> true, True >> True (True, 0) See Also ======== sympy.logic.boolalg.BooleanFalse """ def __bool__(self): return True def __hash__(self): return hash(True) def __eq__(self, other): if other is True: return True if other is False: return False return super().__eq__(other) @property def negated(self): return false def as_set(self): """ Rewrite logic operators and relationals in terms of real sets. Examples ======== >>> from sympy import true >>> true.as_set() UniversalSet """ return S.UniversalSet
BooleanTrue
python
pytorch__pytorch
torch/_functorch/_aot_autograd/aot_autograd_result.py
{ "start": 8252, "end": 9107 }
class ____(GenericCompiledBackward[CompiledFxGraph], FxGraphCacheLoadable): """ Cacheable entry for a forward function """ def _is_backward(self) -> bool: return True def post_compile( self, result: CompiledFxGraph, fx_config: _CompileFxKwargs ) -> CompiledFxGraph: compiled_bw = super().post_compile(result, fx_config) # See note [Wrapping bw_compiler in disable] # This is done by _wrapped_bw_compiler in torch/_dynamo/backends/common.py # But since on cache hit we do not call the bw_compiler, we need to reapply the disable return torch._dynamo.disable( # type: ignore[return-value] compiled_bw, reason="do not trace generated backwards pass" ) # Generic bundled forward/backward classes that work with any OutputCode type @dataclass
CompiledBackward
python
getsentry__sentry
src/sentry/incidents/endpoints/serializers/alert_rule.py
{ "start": 1697, "end": 2910 }
class ____(TypedDict, total=False): environment: str | None projects: list[str] | None queryType: int | None resolveThreshold: float | None dataset: str | None thresholdType: int | None eventTypes: list[str] | None owner: str | None originalAlertRuleId: str | None comparisonDelta: float | None weeklyAvg: float | None totalThisWeek: int | None snooze: bool | None latestIncident: datetime | None errors: list[str] | None sensitivity: str | None seasonality: str | None extrapolationMode: str | None @extend_schema_serializer( exclude_fields=[ "status", "resolution", "thresholdPeriod", "weeklyAvg", "totalThisWeek", "latestIncident", "description", # TODO: remove this once the feature has been released to add to the public docs, being sure to denote it will only display in Slack notifications "sensitivity", # For anomaly detection, which is behind a feature flag "seasonality", # For anomaly detection, which is behind a feature flag "detectionType", # For anomaly detection, which is behind a feature flag ] )
AlertRuleSerializerResponseOptional
python
django__django
tests/migrations/test_migrations_squashed_extra/0003_third.py
{ "start": 35, "end": 125 }
class ____(migrations.Migration): dependencies = [("migrations", "0002_second")]
Migration
python
openai__openai-python
src/openai/resources/fine_tuning/alpha/graders.py
{ "start": 840, "end": 5125 }
class ____(SyncAPIResource): @cached_property def with_raw_response(self) -> GradersWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers """ return GradersWithRawResponse(self) @cached_property def with_streaming_response(self) -> GradersWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/openai/openai-python#with_streaming_response """ return GradersWithStreamingResponse(self) def run( self, *, grader: grader_run_params.Grader, model_sample: str, item: object | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GraderRunResponse: """ Run a grader. Args: grader: The grader used for the fine-tuning job. model_sample: The model sample to be evaluated. This value will be used to populate the `sample` namespace. See [the guide](https://platform.openai.com/docs/guides/graders) for more details. The `output_json` variable will be populated if the model sample is a valid JSON string. item: The dataset item provided to the grader. This will be used to populate the `item` namespace. See [the guide](https://platform.openai.com/docs/guides/graders) for more details. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return self._post( "/fine_tuning/alpha/graders/run", body=maybe_transform( { "grader": grader, "model_sample": model_sample, "item": item, }, grader_run_params.GraderRunParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=GraderRunResponse, ) def validate( self, *, grader: grader_validate_params.Grader, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GraderValidateResponse: """ Validate a grader. Args: grader: The grader used for the fine-tuning job. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return self._post( "/fine_tuning/alpha/graders/validate", body=maybe_transform({"grader": grader}, grader_validate_params.GraderValidateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=GraderValidateResponse, )
Graders
python
getsentry__sentry
tests/sentry/relocation/tasks/test_process.py
{ "start": 21970, "end": 37969 }
class ____(RelocationTaskTestCase): def setUp(self) -> None: super().setUp() self.relocation.step = Relocation.Step.UPLOADING.value self.relocation.latest_task = OrderedTask.UPLOADING_COMPLETE.name self.relocation.save() def test_success_admin_assisted_relocation( self, preprocessing_transfer_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock ): self.mock_message_builder(fake_message_builder) self.mock_kms_client(fake_kms_client) preprocessing_scan(self.uuid) assert fake_kms_client.return_value.asymmetric_decrypt.call_count == 1 assert fake_kms_client.return_value.get_public_key.call_count == 0 assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.started" fake_message_builder.return_value.send_async.assert_called_once_with( to=[self.owner.email, self.superuser.email] ) assert preprocessing_transfer_mock.call_count == 1 relocation: Relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.want_usernames == [ "admin@example.com", "member@example.com", ] assert relocation.latest_notified == Relocation.EmailKind.STARTED.value def test_success_self_service_relocation( self, preprocessing_transfer_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock ): self.mock_kms_client(fake_kms_client) self.relocation.creator_id = self.relocation.owner_id self.relocation.save() preprocessing_scan(self.uuid) assert fake_kms_client.return_value.asymmetric_decrypt.call_count == 1 assert fake_kms_client.return_value.get_public_key.call_count == 0 assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.started" fake_message_builder.return_value.send_async.assert_called_once_with(to=[self.owner.email]) assert preprocessing_transfer_mock.call_count == 1 relocation: Relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.want_usernames == [ "admin@example.com", "member@example.com", ] assert relocation.latest_notified == Relocation.EmailKind.STARTED.value def test_pause( self, preprocessing_transfer_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock ): self.mock_message_builder(fake_message_builder) self.mock_kms_client(fake_kms_client) self.relocation.scheduled_pause_at_step = Relocation.Step.PREPROCESSING.value self.relocation.save() preprocessing_scan(self.uuid) assert fake_kms_client.return_value.asymmetric_decrypt.call_count == 0 assert fake_kms_client.return_value.get_public_key.call_count == 0 assert fake_message_builder.call_count == 0 assert preprocessing_transfer_mock.call_count == 0 relocation: Relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.status == Relocation.Status.PAUSE.value assert relocation.step == Relocation.Step.PREPROCESSING.value assert relocation.scheduled_pause_at_step is None assert relocation.latest_task == OrderedTask.PREPROCESSING_SCAN.name def test_retry_if_attempts_left( self, preprocessing_transfer_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock ): RelocationFile.objects.filter(relocation=self.relocation).delete() self.mock_message_builder(fake_message_builder) self.mock_kms_client(fake_kms_client) # An exception being raised will trigger a retry task. with pytest.raises(Exception): preprocessing_scan(self.uuid) assert fake_kms_client.return_value.asymmetric_decrypt.call_count == 0 assert fake_kms_client.return_value.get_public_key.call_count == 0 assert fake_message_builder.call_count == 0 assert preprocessing_transfer_mock.call_count == 0 relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.status == Relocation.Status.IN_PROGRESS.value assert relocation.latest_notified != Relocation.EmailKind.FAILED.value assert not relocation.failure_reason def test_fail_if_no_attempts_left( self, preprocessing_transfer_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock ): self.relocation.latest_task = OrderedTask.PREPROCESSING_SCAN.name self.relocation.latest_task_attempts = MAX_FAST_TASK_RETRIES self.relocation.save() RelocationFile.objects.filter(relocation=self.relocation).delete() self.mock_message_builder(fake_message_builder) self.mock_kms_client(fake_kms_client) with pytest.raises(Exception): preprocessing_scan(self.uuid) assert fake_kms_client.return_value.asymmetric_decrypt.call_count == 0 assert fake_kms_client.return_value.get_public_key.call_count == 0 assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed" fake_message_builder.return_value.send_async.assert_called_once_with( to=[self.owner.email, self.superuser.email] ) assert preprocessing_transfer_mock.call_count == 0 relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.status == Relocation.Status.FAILURE.value assert relocation.latest_notified == Relocation.EmailKind.FAILED.value assert relocation.failure_reason == ERR_PREPROCESSING_INTERNAL def test_fail_invalid_tarball( self, preprocessing_transfer_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock ): file = RelocationFile.objects.get(relocation=self.relocation).file corrupted_tarball_bytes = bytearray(file.getfile().read())[9:] file.putfile(BytesIO(bytes(corrupted_tarball_bytes))) self.mock_message_builder(fake_message_builder) self.mock_kms_client(fake_kms_client) preprocessing_scan(self.uuid) assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed" fake_message_builder.return_value.send_async.assert_called_once_with( to=[self.owner.email, self.superuser.email] ) assert preprocessing_transfer_mock.call_count == 0 relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.status == Relocation.Status.FAILURE.value assert relocation.latest_notified == Relocation.EmailKind.FAILED.value assert relocation.failure_reason == ERR_PREPROCESSING_INVALID_TARBALL def test_fail_decryption_failure( self, preprocessing_transfer_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock ): # Add invalid 2-octet UTF-8 sequence to the returned plaintext. self.mock_message_builder(fake_message_builder) self.mock_kms_client(fake_kms_client) fake_kms_client.return_value.asymmetric_decrypt.return_value.plaintext += b"\xc3\x28" # We retry on decryption failures, just to account for flakiness on the KMS server's side. # Try this as the last attempt to see the actual error. self.relocation.latest_task = OrderedTask.PREPROCESSING_SCAN.name self.relocation.latest_task_attempts = MAX_FAST_TASK_RETRIES self.relocation.save() with pytest.raises(Exception): preprocessing_scan(self.uuid) assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed" fake_message_builder.return_value.send_async.assert_called_once_with( to=[self.owner.email, self.superuser.email] ) assert fake_kms_client.return_value.asymmetric_decrypt.call_count == 1 assert preprocessing_transfer_mock.call_count == 0 relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.status == Relocation.Status.FAILURE.value assert relocation.latest_notified == Relocation.EmailKind.FAILED.value assert relocation.failure_reason == ERR_PREPROCESSING_DECRYPTION def test_fail_invalid_json( self, preprocessing_transfer_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock ): file = RelocationFile.objects.get(relocation=self.relocation).file self.swap_relocation_file_with_data_from_fixture(file, "invalid-user.json") self.mock_message_builder(fake_message_builder) self.mock_kms_client(fake_kms_client) preprocessing_scan(self.uuid) assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed" fake_message_builder.return_value.send_async.assert_called_once_with( to=[self.owner.email, self.superuser.email] ) assert preprocessing_transfer_mock.call_count == 0 relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.status == Relocation.Status.FAILURE.value assert relocation.latest_notified == Relocation.EmailKind.FAILED.value assert relocation.failure_reason == ERR_PREPROCESSING_INVALID_JSON def test_fail_no_users( self, preprocessing_transfer_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock ): file = RelocationFile.objects.get(relocation=self.relocation).file self.swap_relocation_file_with_data_from_fixture(file, "single-option.json") self.mock_message_builder(fake_message_builder) self.mock_kms_client(fake_kms_client) preprocessing_scan(self.uuid) assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed" fake_message_builder.return_value.send_async.assert_called_once_with( to=[self.owner.email, self.superuser.email] ) assert preprocessing_transfer_mock.call_count == 0 relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.status == Relocation.Status.FAILURE.value assert relocation.latest_notified == Relocation.EmailKind.FAILED.value assert relocation.failure_reason == ERR_PREPROCESSING_NO_USERS @patch("sentry.relocation.tasks.process.MAX_USERS_PER_RELOCATION", 1) def test_fail_too_many_users( self, preprocessing_transfer_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock ): self.mock_message_builder(fake_message_builder) self.mock_kms_client(fake_kms_client) preprocessing_scan(self.uuid) assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed" fake_message_builder.return_value.send_async.assert_called_once_with( to=[self.owner.email, self.superuser.email] ) assert preprocessing_transfer_mock.call_count == 0 relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.status == Relocation.Status.FAILURE.value assert relocation.latest_notified == Relocation.EmailKind.FAILED.value assert relocation.failure_reason == ERR_PREPROCESSING_TOO_MANY_USERS.substitute(count=2) def test_fail_no_orgs( self, preprocessing_transfer_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock ): file = RelocationFile.objects.get(relocation=self.relocation).file self.swap_relocation_file_with_data_from_fixture(file, "user-with-minimum-privileges.json") self.mock_message_builder(fake_message_builder) self.mock_kms_client(fake_kms_client) preprocessing_scan(self.uuid) assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed" fake_message_builder.return_value.send_async.assert_called_once_with( to=[self.owner.email, self.superuser.email] ) assert preprocessing_transfer_mock.call_count == 0 relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.status == Relocation.Status.FAILURE.value assert relocation.latest_notified == Relocation.EmailKind.FAILED.value assert relocation.failure_reason == ERR_PREPROCESSING_MISSING_ORGS.substitute( orgs="testing" ) @patch("sentry.relocation.tasks.process.MAX_ORGS_PER_RELOCATION", 0) def test_fail_too_many_orgs( self, preprocessing_transfer_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock ): self.mock_message_builder(fake_message_builder) self.mock_kms_client(fake_kms_client) preprocessing_scan(self.uuid) assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed" fake_message_builder.return_value.send_async.assert_called_once_with( to=[self.owner.email, self.superuser.email] ) assert preprocessing_transfer_mock.call_count == 0 relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.status == Relocation.Status.FAILURE.value assert relocation.latest_notified == Relocation.EmailKind.FAILED.value assert relocation.failure_reason == ERR_PREPROCESSING_TOO_MANY_ORGS.substitute(count=1) def test_fail_missing_orgs( self, preprocessing_transfer_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock ): orgs = ["does-not-exist"] relocation = Relocation.objects.get(uuid=self.uuid) relocation.want_org_slugs = orgs relocation.save() self.mock_message_builder(fake_message_builder) self.mock_kms_client(fake_kms_client) preprocessing_scan(self.uuid) assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed" fake_message_builder.return_value.send_async.assert_called_once_with( to=[self.owner.email, self.superuser.email] ) assert preprocessing_transfer_mock.call_count == 0 relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.status == Relocation.Status.FAILURE.value assert relocation.latest_notified == Relocation.EmailKind.FAILED.value assert relocation.failure_reason == ERR_PREPROCESSING_MISSING_ORGS.substitute( orgs=",".join(orgs) ) def test_fail_invalid_org_slug( self, preprocessing_transfer_mock: Mock, fake_message_builder: Mock, fake_kms_client: Mock ): orgs = ["$$##"] relocation = Relocation.objects.get(uuid=self.uuid) relocation.want_org_slugs = orgs relocation.save() self.mock_message_builder(fake_message_builder) self.mock_kms_client(fake_kms_client) preprocessing_scan(self.uuid) assert fake_message_builder.call_count == 1 assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed" fake_message_builder.return_value.send_async.assert_called_once_with( to=[self.owner.email, self.superuser.email] ) assert preprocessing_transfer_mock.call_count == 0 relocation = Relocation.objects.get(uuid=self.uuid) assert relocation.status == Relocation.Status.FAILURE.value assert relocation.latest_notified == Relocation.EmailKind.FAILED.value assert relocation.failure_reason == ERR_PREPROCESSING_INVALID_ORG_SLUG.substitute( slug="$$##" ) @patch("sentry.relocation.utils.MessageBuilder") @patch("sentry.relocation.tasks.process.preprocessing_baseline_config.apply_async")
PreprocessingScanTest
python
walkccc__LeetCode
solutions/3249. Count the Number of Good Nodes/3249.py
{ "start": 0, "end": 724 }
class ____: def countGoodNodes(self, edges: list[list[int]]) -> int: n = len(edges) + 1 graph = [[] for _ in range(n)] for u, v in edges: graph[u].append(v) graph[v].append(u) ans = 0 def dfs(u: int, prev: int) -> int: """Returns the size of the subtree rooted at u.""" nonlocal ans size = 1 childrenSizes = [] for v in graph[u]: if v == prev: continue child_size = dfs(v, u) size += child_size childrenSizes.append(child_size) if not childrenSizes or all(s == childrenSizes[0] for s in childrenSizes): ans += 1 return size dfs(0, -1) return ans
Solution
python
faif__python-patterns
patterns/fundamental/delegation_pattern.py
{ "start": 1188, "end": 1436 }
class ____: def __init__(self) -> None: self.p1 = 123 def do_something(self, something: str, kw=None) -> str: return f"Doing {something}{kw or ''}" if __name__ == "__main__": import doctest doctest.testmod()
Delegate
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
{ "start": 9476, "end": 9660 }
class ____(graphene.ObjectType): class Meta: name = "LogRetrievalShellCommand" stdout = graphene.String() stderr = graphene.String()
GrapheneLogRetrievalShellCommand
python
getsentry__sentry
src/sentry/hybridcloud/models/outbox.py
{ "start": 17862, "end": 18758 }
class ____(OutboxBase): sharding_columns = ("region_name", "shard_scope", "shard_identifier") coalesced_columns = ( "region_name", "shard_scope", "shard_identifier", "category", "object_identifier", ) region_name = models.CharField(max_length=REGION_NAME_LENGTH) def send_signal(self) -> None: process_control_outbox.send( sender=OutboxCategory(self.category), payload=self.payload, region_name=self.region_name, object_identifier=self.object_identifier, shard_identifier=self.shard_identifier, shard_scope=self.shard_scope, date_added=self.date_added, scheduled_for=self.scheduled_for, ) class Meta: abstract = True __repr__ = sane_repr("payload", *coalesced_columns) @control_silo_model
ControlOutboxBase
python
django-haystack__django-haystack
haystack/fields.py
{ "start": 713, "end": 7152 }
class ____: """The base implementation of a search field.""" field_type = None def __init__( self, model_attr=None, use_template=False, template_name=None, document=False, indexed=True, stored=True, faceted=False, default=NOT_PROVIDED, null=False, index_fieldname=None, facet_class=None, boost=1.0, weight=None, analyzer=None, ): # Track what the index thinks this field is called. self.instance_name = None self.model_attr = model_attr self.use_template = use_template self.template_name = template_name self.document = document self.indexed = indexed self.stored = stored self.faceted = faceted self._default = default self.null = null self.index_fieldname = index_fieldname self.boost = weight or boost self.analyzer = analyzer self.is_multivalued = False # We supply the facet_class for making it easy to create a faceted # field based off of this field. self.facet_class = facet_class if self.facet_class is None: self.facet_class = FacetCharField self.set_instance_name(None) def set_instance_name(self, instance_name): self.instance_name = instance_name if self.index_fieldname is None: self.index_fieldname = self.instance_name def has_default(self): """Returns a boolean of whether this field has a default value.""" return self._default is not NOT_PROVIDED @property def default(self): """Returns the default value for the field.""" if callable(self._default): return self._default() return self._default def prepare(self, obj): """ Takes data from the provided object and prepares it for storage in the index. """ # Give priority to a template. if self.use_template: return self.prepare_template(obj) elif self.model_attr is not None: attrs = self.split_model_attr_lookups() current_objects = [obj] values = self.resolve_attributes_lookup(current_objects, attrs) if len(values) == 1: return values[0] elif len(values) > 1: return values if self.has_default(): return self.default else: return None def resolve_attributes_lookup(self, current_objects, attributes): """ Recursive method that looks, for one or more objects, for an attribute that can be multiple objects (relations) deep. """ values = [] for current_object in current_objects: if not hasattr(current_object, attributes[0]): raise SearchFieldError( "The model '%r' does not have a model_attr '%s'." % (repr(current_object), attributes[0]) ) if len(attributes) > 1: current_objects_in_attr = self.get_iterable_objects( getattr(current_object, attributes[0]) ) values.extend( self.resolve_attributes_lookup( current_objects_in_attr, attributes[1:] ) ) continue current_object = getattr(current_object, attributes[0]) if current_object is None: if self.has_default(): current_object = self._default elif self.null: current_object = None else: raise SearchFieldError( "The model '%s' combined with model_attr '%s' returned None, but doesn't allow " "a default or null value." % (repr(current_object), self.model_attr) ) if callable(current_object): values.append(current_object()) else: values.append(current_object) return values def split_model_attr_lookups(self): """Returns list of nested attributes for looking through the relation.""" return self.model_attr.split("__") @classmethod def get_iterable_objects(cls, current_objects): """ Returns iterable of objects that contain data. For example, resolves Django ManyToMany relationship so the attributes of the related models can then be accessed. """ if current_objects is None: return [] if hasattr(current_objects, "all"): # i.e, Django ManyToMany relationships if ismethod(current_objects.all): return current_objects.all() return [] elif not hasattr(current_objects, "__iter__"): current_objects = [current_objects] return current_objects def prepare_template(self, obj): """ Flattens an object for indexing. This loads a template (``search/indexes/{app_label}/{model_name}_{field_name}.txt``) and returns the result of rendering that template. ``object`` will be in its context. """ if self.instance_name is None and self.template_name is None: raise SearchFieldError( "This field requires either its instance_name variable to be populated or an explicit template_name in order to load the correct template." ) if self.template_name is not None: template_names = self.template_name if not isinstance(template_names, (list, tuple)): template_names = [template_names] else: app_label, model_name = get_model_ct_tuple(obj) template_names = [ "search/indexes/%s/%s_%s.txt" % (app_label, model_name, self.instance_name) ] t = loader.select_template(template_names) return t.render({"object": obj}) def convert(self, value): """ Handles conversion between the data found and the type of the field. Extending classes should override this method and provide correct data coercion. """ return value
SearchField
python
sqlalchemy__sqlalchemy
test/orm/declarative/test_typed_mapping.py
{ "start": 62169, "end": 95386 }
class ____(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = "default" @testing.combinations( (str, types.String), (Decimal, types.Numeric), (float, types.Float), (datetime.datetime, types.DateTime), (uuid.UUID, types.Uuid), argnames="pytype_arg,sqltype", ) def test_datatype_lookups(self, decl_base, pytype_arg, sqltype): # anno only: global pytype pytype = pytype_arg class MyClass(decl_base): __tablename__ = "mytable" id: Mapped[int] = mapped_column(primary_key=True) data: Mapped[pytype] assert isinstance(MyClass.__table__.c.data.type, sqltype) @testing.combinations( (BIGINT(),), (BIGINT,), (Integer().with_variant(BIGINT, "default")), (Integer().with_variant(BIGINT(), "default")), (BIGINT().with_variant(String(), "some_other_dialect")), ) def test_type_map_varieties(self, typ): Base = declarative_base(type_annotation_map={int: typ}) class MyClass(Base): __tablename__ = "mytable" id: Mapped[int] = mapped_column(primary_key=True) x: Mapped[int] y: Mapped[int] = mapped_column() z: Mapped[int] = mapped_column(typ) self.assert_compile( CreateTable(MyClass.__table__), "CREATE TABLE mytable (id BIGINT NOT NULL, " "x BIGINT NOT NULL, y BIGINT NOT NULL, z BIGINT NOT NULL, " "PRIMARY KEY (id))", ) def test_dont_ignore_unresolvable(self, decl_base): """test #8888""" with expect_raises_message( sa_exc.ArgumentError, r"Could not resolve all types within mapped annotation: " r"\".*Mapped\[.*fake.*\]\". Ensure all types are written " r"correctly and are imported within the module in use.", ): class A(decl_base): __tablename__ = "a" id: Mapped[int] = mapped_column(primary_key=True) data: Mapped["fake"] # noqa def test_type_dont_mis_resolve_on_superclass(self): """test for #8859. For subclasses of a type that's in the map, don't resolve this by default, even though we do a search through __mro__. """ # anno only: global int_sub class int_sub(int): pass Base = declarative_base( type_annotation_map={ int: Integer, } ) with expect_raises_message( orm_exc.MappedAnnotationError, "Could not locate SQLAlchemy Core type", ): class MyClass(Base): __tablename__ = "mytable" id: Mapped[int] = mapped_column(primary_key=True) data: Mapped[int_sub] @testing.variation("dict_key", ["typing", "plain"]) def test_type_dont_mis_resolve_on_non_generic(self, dict_key): """test for #8859. For a specific generic type with arguments, don't do any MRO lookup. """ Base = declarative_base( type_annotation_map={ dict: String, } ) with expect_raises_message( sa_exc.ArgumentError, "Could not locate SQLAlchemy Core type" ): class MyClass(Base): __tablename__ = "mytable" id: Mapped[int] = mapped_column(primary_key=True) if dict_key.plain: data: Mapped[dict[str, str]] elif dict_key.typing: data: Mapped[Dict[str, str]] def test_type_secondary_resolution(self): class MyString(String): def _resolve_for_python_type( self, python_type, matched_type, matched_on_flattened ): return String(length=42) Base = declarative_base(type_annotation_map={str: MyString}) class MyClass(Base): __tablename__ = "mytable" id: Mapped[int] = mapped_column(primary_key=True) data: Mapped[str] is_true(isinstance(MyClass.__table__.c.data.type, String)) eq_(MyClass.__table__.c.data.type.length, 42) def test_construct_lhs_type_missing(self, decl_base): # anno only: global MyClass class MyClass: pass with expect_raises_message( orm_exc.MappedAnnotationError, "Could not locate SQLAlchemy Core type when resolving for Python " r"type indicated by '.*class .*MyClass.*' inside the " r"Mapped\[\] annotation for the 'data' attribute; the type " "object is not resolvable by the registry", ): class User(decl_base): __tablename__ = "users" id: Mapped[int] = mapped_column(primary_key=True) data: Mapped[MyClass] = mapped_column() @testing.variation( "argtype", [ "type", "column", "mapped_column", "column_class", "ref_to_type", "ref_to_column", ], ) def test_construct_lhs_sqlalchemy_type(self, decl_base, argtype): """test for #12329. of note here are all the different messages we have for when the wrong thing is put into Mapped[], and in fact in #12329 we added another one. This is a lot of different messages, but at the same time they occur at different places in the interpretation of types. If we were to centralize all these messages, we'd still likely end up doing distinct messages for each scenario, so instead we added a new ArgumentError subclass MappedAnnotationError that provides some commonality to all of these cases. """ expect_future_annotations = "annotations" in globals() if argtype.type: with expect_raises_message( orm_exc.MappedAnnotationError, # properties.py -> _init_column_for_annotation, type is # a SQL type "The type provided inside the 'data' attribute Mapped " "annotation is the SQLAlchemy type .*BigInteger.*. Expected " "a Python type instead", ): class User(decl_base): __tablename__ = "users" id: Mapped[int] = mapped_column(primary_key=True) data: Mapped[BigInteger] = mapped_column() elif argtype.column: with expect_raises_message( orm_exc.MappedAnnotationError, # util.py -> _extract_mapped_subtype ( re.escape( "Could not interpret annotation " "Mapped[Column('q', BigInteger)]." ) if expect_future_annotations # properties.py -> _init_column_for_annotation, object is # not a SQL type or a python type, it's just some object else re.escape( "The object provided inside the 'data' attribute " "Mapped annotation is not a Python type, it's the " "object Column('q', BigInteger(), table=None). " "Expected a Python type." ) ), ): class User(decl_base): __tablename__ = "users" id: Mapped[int] = mapped_column(primary_key=True) data: Mapped[Column("q", BigInteger)] = ( # noqa: F821 mapped_column() ) elif argtype.mapped_column: with expect_raises_message( orm_exc.MappedAnnotationError, # properties.py -> _init_column_for_annotation, object is # not a SQL type or a python type, it's just some object # interestingly, this raises at the same point for both # future annotations mode and legacy annotations mode r"The object provided inside the 'data' attribute " "Mapped annotation is not a Python type, it's the object " r"\<sqlalchemy.orm.properties.MappedColumn.*\>. " "Expected a Python type.", ): class User(decl_base): __tablename__ = "users" id: Mapped[int] = mapped_column(primary_key=True) big_integer: Mapped[int] = mapped_column() data: Mapped[big_integer] = mapped_column() elif argtype.column_class: with expect_raises_message( orm_exc.MappedAnnotationError, # properties.py -> _init_column_for_annotation, type is not # a SQL type "Could not locate SQLAlchemy Core type when resolving for " "Python type indicated by " r"'.*class .*.Column.*' inside the " r"Mapped\[\] annotation for the 'data' attribute; the " "type object is not resolvable by the registry", ): class User(decl_base): __tablename__ = "users" id: Mapped[int] = mapped_column(primary_key=True) data: Mapped[Column] = mapped_column() elif argtype.ref_to_type: mytype = BigInteger with expect_raises_message( orm_exc.MappedAnnotationError, ( # decl_base.py -> _exract_mappable_attributes re.escape( "Could not resolve all types within mapped " 'annotation: "Mapped[mytype]"' ) if expect_future_annotations # properties.py -> _init_column_for_annotation, type is # a SQL type else re.escape( "The type provided inside the 'data' attribute Mapped " "annotation is the SQLAlchemy type " "<class 'sqlalchemy.sql.sqltypes.BigInteger'>. " "Expected a Python type instead" ) ), ): class User(decl_base): __tablename__ = "users" id: Mapped[int] = mapped_column(primary_key=True) data: Mapped[mytype] = mapped_column() elif argtype.ref_to_column: mycol = Column("q", BigInteger) with expect_raises_message( orm_exc.MappedAnnotationError, # decl_base.py -> _exract_mappable_attributes ( re.escape( "Could not resolve all types within mapped " 'annotation: "Mapped[mycol]"' ) if expect_future_annotations else # properties.py -> _init_column_for_annotation, object is # not a SQL type or a python type, it's just some object re.escape( "The object provided inside the 'data' attribute " "Mapped " "annotation is not a Python type, it's the object " "Column('q', BigInteger(), table=None). " "Expected a Python type." ) ), ): class User(decl_base): __tablename__ = "users" id: Mapped[int] = mapped_column(primary_key=True) data: Mapped[mycol] = mapped_column() else: argtype.fail() def test_plain_typealias_as_typemap_keys( self, decl_base: Type[DeclarativeBase] ): # anno only: global _StrTypeAlias, _UnionTypeAlias class _SomeDict1(TypedDict): type: Literal["1"] class _SomeDict2(TypedDict): type: Literal["2"] _StrTypeAlias = str _UnionTypeAlias = Union[_SomeDict1, _SomeDict2] decl_base.registry.update_type_annotation_map( {_UnionTypeAlias: JSON, _StrTypeAlias: String(30)} ) class Test(decl_base): __tablename__ = "test" id: Mapped[int] = mapped_column(primary_key=True) data: Mapped[_StrTypeAlias] structure: Mapped[_UnionTypeAlias] eq_(Test.__table__.c.data.type.length, 30) is_(Test.__table__.c.structure.type._type_affinity, JSON) @testing.variation( "option", [ "plain", "union", "union_604", "null", "union_null", "union_null_604", "optional", "optional_union", "optional_union_604", "union_newtype", "union_null_newtype", "union_695", "union_null_695", ], ) @testing.variation("in_map", ["yes", "no", "value"]) @testing.requires.python312 def test_pep695_behavior(self, decl_base, in_map, option): """Issue #11955; later issue #12829""" # anno only: global tat if option.plain: tat = TypeAliasType("tat", str) elif option.union: tat = TypeAliasType("tat", Union[str, int]) elif option.union_604: tat = TypeAliasType("tat", str | int) elif option.null: tat = TypeAliasType("tat", None) elif option.union_null: tat = TypeAliasType("tat", Union[str, int, None]) elif option.union_null_604: tat = TypeAliasType("tat", str | int | None) elif option.optional: tat = TypeAliasType("tat", Optional[str]) elif option.optional_union: tat = TypeAliasType("tat", Optional[Union[str, int]]) elif option.optional_union_604: tat = TypeAliasType("tat", Optional[str | int]) elif option.union_newtype: # this seems to be illegal for typing but "works" tat = NewType("tat", Union[str, int]) elif option.union_null_newtype: # this seems to be illegal for typing but "works" tat = NewType("tat", Union[str, int, None]) elif option.union_695: tat = TypeAliasType("tat", str | int) elif option.union_null_695: tat = TypeAliasType("tat", str | int | None) else: option.fail() is_newtype = "newtype" in option.name if in_map.yes: decl_base.registry.update_type_annotation_map({tat: String(99)}) elif in_map.value and not is_newtype: decl_base.registry.update_type_annotation_map( {tat.__value__: String(99)} ) def declare(): class Test(decl_base): __tablename__ = "test" id: Mapped[int] = mapped_column(primary_key=True) data: Mapped[tat] return Test.__table__.c.data if in_map.yes or (in_map.value and not is_newtype): col = declare() # String(99) inside the type_map is_true(isinstance(col.type, String)) eq_(col.type.length, 99) nullable = "null" in option.name or "optional" in option.name eq_(col.nullable, nullable) elif option.plain or option.optional: col = declare() # plain string from default lookup is_true(isinstance(col.type, String)) eq_(col.type.length, None) nullable = "null" in option.name or "optional" in option.name eq_(col.nullable, nullable) else: with expect_raises_message( orm_exc.MappedAnnotationError, r"Could not locate SQLAlchemy Core type when resolving " r"for Python type " r"indicated by '.*tat' inside the Mapped\[\] " r"annotation for the 'data' attribute;", ): declare() @testing.variation("in_map", ["yes", "no", "value"]) @testing.variation("lookup", ["A", "B", "value"]) def test_recursive_pep695_cases( self, decl_base, in_map: Variation, lookup: Variation ): # anno only: global A, B A = TypingTypeAliasType("A", Union[int, float]) B = TypingTypeAliasType("B", A) if in_map.yes: decl_base.registry.update_type_annotation_map({A: Numeric(10, 5)}) elif in_map.value: decl_base.registry.update_type_annotation_map( {A.__value__: Numeric(10, 5)} ) def declare(): class MyClass(decl_base): __tablename__ = "my_table" id: Mapped[int] = mapped_column(primary_key=True) if lookup.A: data: Mapped[A] elif lookup.B: data: Mapped[B] elif lookup.value: data: Mapped[Union[int, float]] else: lookup.fail() return MyClass if ( (in_map.value and lookup.B) or in_map.no or (in_map.yes and lookup.value) ): with expect_raises_message( orm_exc.MappedAnnotationError, "Could not locate SQLAlchemy Core type when resolving " "for Python type indicated by", ): declare() else: MyClass = declare() eq_(MyClass.data.expression.type.precision, 10) @testing.variation( "type_", [ "str_extension", "str_typing", "generic_extension", "generic_typing", "generic_typed_extension", "generic_typed_typing", ], ) @testing.requires.python312 def test_pep695_typealias_as_typemap_keys( self, decl_base: Type[DeclarativeBase], type_, pep_695_types ): """test #10807, #12829""" decl_base.registry.update_type_annotation_map( { _UnionPep695: JSON, _StrPep695: String(30), _TypingStrPep695: String(30), _GenericPep695: String(30), _TypingGenericPep695: String(30), _GenericPep695Typed: String(30), _TypingGenericPep695Typed: String(30), } ) class Test(decl_base): __tablename__ = "test" id: Mapped[int] = mapped_column(primary_key=True) if type_.str_extension: data: Mapped[_StrPep695] elif type_.str_typing: data: Mapped[_TypingStrPep695] elif type_.generic_extension: data: Mapped[_GenericPep695] elif type_.generic_typing: data: Mapped[_TypingGenericPep695] elif type_.generic_typed_extension: data: Mapped[_GenericPep695Typed] elif type_.generic_typed_typing: data: Mapped[_TypingGenericPep695Typed] else: type_.fail() structure: Mapped[_UnionPep695] eq_(Test.__table__.c.data.type._type_affinity, String) eq_(Test.__table__.c.data.type.length, 30) is_(Test.__table__.c.structure.type._type_affinity, JSON) def test_pep484_newtypes_as_typemap_keys( self, decl_base: Type[DeclarativeBase] ): # anno only: global str50, str30, str3050 str50 = NewType("str50", str) str30 = NewType("str30", str) str3050 = NewType("str30", str50) decl_base.registry.update_type_annotation_map( {str50: String(50), str30: String(30), str3050: String(150)} ) class MyClass(decl_base): __tablename__ = "my_table" id: Mapped[str50] = mapped_column(primary_key=True) data_one: Mapped[str30] data_two: Mapped[str50] data_three: Mapped[Optional[str30]] data_four: Mapped[str3050] eq_(MyClass.__table__.c.data_one.type.length, 30) is_false(MyClass.__table__.c.data_one.nullable) eq_(MyClass.__table__.c.data_two.type.length, 50) is_false(MyClass.__table__.c.data_two.nullable) eq_(MyClass.__table__.c.data_three.type.length, 30) is_true(MyClass.__table__.c.data_three.nullable) eq_(MyClass.__table__.c.data_four.type.length, 150) is_false(MyClass.__table__.c.data_four.nullable) def test_newtype_missing_from_map(self, decl_base): # anno only: global str50 str50 = NewType("str50", str) with expect_raises_message( orm_exc.MappedAnnotationError, "Could not locate SQLAlchemy Core type when resolving for Python " r"type indicated by '.*.str50' inside the Mapped\[\] annotation " "for the 'data_one' attribute; the type object is not " "resolvable by the registry", ): class MyClass(decl_base): __tablename__ = "my_table" id: Mapped[int] = mapped_column(primary_key=True) data_one: Mapped[str50] @testing.variation( "union", ["union", "pep604", "union_null", "pep604_null"], ) def test_unions(self, union): # anno only: global UnionType our_type = Numeric(10, 2) if union.union: UnionType = Union[float, Decimal] elif union.union_null: UnionType = Union[float, Decimal, None] elif union.pep604: UnionType = float | Decimal elif union.pep604_null: UnionType = float | Decimal | None else: union.fail() class Base(DeclarativeBase): type_annotation_map = {UnionType: our_type} class User(Base): __tablename__ = "users" id: Mapped[int] = mapped_column(primary_key=True) data: Mapped[Union[float, Decimal]] reverse_data: Mapped[Union[Decimal, float]] optional_data: Mapped[Optional[Union[float, Decimal]]] = ( mapped_column() ) # use Optional directly reverse_optional_data: Mapped[Optional[Union[Decimal, float]]] = ( mapped_column() ) # use Union with None, same as Optional but presents differently # (Optional object with __origin__ Union vs. Union) reverse_u_optional_data: Mapped[Union[Decimal, float, None]] = ( mapped_column() ) refer_union: Mapped[UnionType] refer_union_optional: Mapped[Optional[UnionType]] # py38, 37 does not automatically flatten unions, add extra tests # for this. maintain these in order to catch future regressions # in the behavior of ``Union`` unflat_union_optional_data: Mapped[ Union[Union[Decimal, float, None], None] ] = mapped_column() float_data: Mapped[float] = mapped_column() decimal_data: Mapped[Decimal] = mapped_column() pep604_data: Mapped[float | Decimal] = mapped_column() pep604_reverse: Mapped[Decimal | float] = mapped_column() pep604_optional: Mapped[Decimal | float | None] = mapped_column() pep604_data_fwd: Mapped["float | Decimal"] = mapped_column() pep604_reverse_fwd: Mapped["Decimal | float"] = mapped_column() pep604_optional_fwd: Mapped["Decimal | float | None"] = ( mapped_column() ) info = [ ("data", False), ("reverse_data", False), ("optional_data", True), ("reverse_optional_data", True), ("reverse_u_optional_data", True), ("refer_union", "null" in union.name), ("refer_union_optional", True), ("unflat_union_optional_data", True), ("pep604_data", False), ("pep604_reverse", False), ("pep604_optional", True), ("pep604_data_fwd", False), ("pep604_reverse_fwd", False), ("pep604_optional_fwd", True), ] for name, nullable in info: col = User.__table__.c[name] is_(col.type, our_type, name) is_(col.nullable, nullable, name) is_true(isinstance(User.__table__.c.float_data.type, Float)) ne_(User.__table__.c.float_data.type, our_type) is_true(isinstance(User.__table__.c.decimal_data.type, Numeric)) ne_(User.__table__.c.decimal_data.type, our_type) @testing.variation( "union", [ "union", "pep604", ("pep695", requires.python312), ], ) def test_optional_in_annotation_map(self, union): """See issue #11370""" global _Json, _JsonPep604, _JsonPep695 _JsonPrimitive = Union[str, int, float, bool, None] _JsonObject = Dict[str, "_Json"] _JsonArray = List["_Json"] _Json = Union[_JsonObject, _JsonArray, _JsonPrimitive] _JsonPrimitivePep604 = str | int | float | bool | None _JsonObjectPep604 = dict[str, "_JsonPep604"] _JsonArrayPep604 = list["_JsonPep604"] _JsonPep604 = ( _JsonObjectPep604 | _JsonArrayPep604 | _JsonPrimitivePep604 ) _JsonPep695 = TypeAliasType("_JsonPep695", _JsonPep604) class Base(DeclarativeBase): if union.union: type_annotation_map = {_Json: JSON} elif union.pep604: type_annotation_map = {_JsonPep604: JSON} elif union.pep695: type_annotation_map = {_JsonPep695: JSON} # noqa: F821 else: union.fail() class A(Base): __tablename__ = "a" id: Mapped[int] = mapped_column(primary_key=True) if union.union: json1: Mapped[_Json] json2: Mapped[_Json] = mapped_column(nullable=False) elif union.pep604: json1: Mapped[_JsonPep604] json2: Mapped[_JsonPep604] = mapped_column(nullable=False) elif union.pep695: json1: Mapped[_JsonPep695] # noqa: F821 json2: Mapped[_JsonPep695] = mapped_column( # noqa: F821 nullable=False ) else: union.fail() is_(A.__table__.c.json1.type._type_affinity, JSON) is_(A.__table__.c.json2.type._type_affinity, JSON) is_true(A.__table__.c.json1.nullable) is_false(A.__table__.c.json2.nullable) @testing.variation( "option", [ "not_optional", "optional", "optional_fwd_ref", "union_none", "pep604", "pep604_fwd_ref", ], ) @testing.variation("brackets", ["oneset", "twosets"]) @testing.combinations( "include_mc_type", "derive_from_anno", argnames="include_mc_type" ) def test_optional_styles_nested_brackets( self, option, brackets, include_mc_type ): """composed types test, includes tests that were added later for #12207""" class Base(DeclarativeBase): type_annotation_map = { Dict[str, Decimal]: JSON, dict[str, Decimal]: JSON, Union[List[int], List[str]]: JSON, list[int] | list[str]: JSON, } if include_mc_type == "include_mc_type": mc = mapped_column(JSON) mc2 = mapped_column(JSON) else: mc = mapped_column() mc2 = mapped_column() class A(Base): __tablename__ = "a" id: Mapped[int] = mapped_column(primary_key=True) data: Mapped[str] = mapped_column() if brackets.oneset: if option.not_optional: json: Mapped[Dict[str, Decimal]] = mapped_column() # type: ignore # noqa: E501 json2: Mapped[dict[str, Decimal]] = mapped_column() # type: ignore # noqa: E501 elif option.optional: json: Mapped[Optional[Dict[str, Decimal]]] = mc json2: Mapped[Optional[dict[str, Decimal]]] = mc2 elif option.optional_fwd_ref: json: Mapped["Optional[Dict[str, Decimal]]"] = mc json2: Mapped["Optional[dict[str, Decimal]]"] = mc2 elif option.union_none: json: Mapped[Union[Dict[str, Decimal], None]] = mc json2: Mapped[Union[None, Dict[str, Decimal]]] = mc2 elif option.pep604: json: Mapped[dict[str, Decimal] | None] = mc json2: Mapped[None | dict[str, Decimal]] = mc2 elif option.pep604_fwd_ref: json: Mapped["dict[str, Decimal] | None"] = mc json2: Mapped["None | dict[str, Decimal]"] = mc2 elif brackets.twosets: if option.not_optional: json: Mapped[Union[List[int], List[str]]] = mapped_column() # type: ignore # noqa: E501 elif option.optional: json: Mapped[Optional[Union[List[int], List[str]]]] = mc json2: Mapped[Optional[Union[list[int], list[str]]]] = mc2 elif option.optional_fwd_ref: json: Mapped["Optional[Union[List[int], List[str]]]"] = mc json2: Mapped["Optional[Union[list[int], list[str]]]"] = ( mc2 ) elif option.union_none: json: Mapped[Union[List[int], List[str], None]] = mc json2: Mapped[Union[None, list[int], list[str]]] = mc2 elif option.pep604: json: Mapped[list[int] | list[str] | None] = mc json2: Mapped[None | list[int] | list[str]] = mc2 elif option.pep604_fwd_ref: json: Mapped["list[int] | list[str] | None"] = mc json2: Mapped["None | list[int] | list[str]"] = mc2 else: brackets.fail() is_(A.__table__.c.json.type._type_affinity, JSON) if hasattr(A, "json2"): is_(A.__table__.c.json2.type._type_affinity, JSON) if option.not_optional: is_false(A.__table__.c.json2.nullable) else: is_true(A.__table__.c.json2.nullable) if option.not_optional: is_false(A.__table__.c.json.nullable) else: is_true(A.__table__.c.json.nullable) @testing.variation("optional", [True, False]) @testing.variation("provide_type", [True, False]) @testing.variation("add_to_type_map", [True, False]) def test_recursive_type( self, decl_base, optional, provide_type, add_to_type_map ): """test #9553""" global T T = Dict[str, Optional["T"]] if not provide_type and not add_to_type_map: with expect_raises_message( sa_exc.ArgumentError, r"Could not locate SQLAlchemy.*" r".*ForwardRef\('T'\).*", ): class TypeTest(decl_base): __tablename__ = "my_table" id: Mapped[int] = mapped_column(primary_key=True) if optional: type_test: Mapped[Optional[T]] = mapped_column() else: type_test: Mapped[T] = mapped_column() return else: if add_to_type_map: decl_base.registry.update_type_annotation_map({T: JSON()}) class TypeTest(decl_base): __tablename__ = "my_table" id: Mapped[int] = mapped_column(primary_key=True) if add_to_type_map: if optional: type_test: Mapped[Optional[T]] = mapped_column() else: type_test: Mapped[T] = mapped_column() else: if optional: type_test: Mapped[Optional[T]] = mapped_column(JSON()) else: type_test: Mapped[T] = mapped_column(JSON()) if optional: is_(TypeTest.__table__.c.type_test.nullable, True) else: is_(TypeTest.__table__.c.type_test.nullable, False) self.assert_compile( select(TypeTest), "SELECT my_table.id, my_table.type_test FROM my_table", )
TypeResolutionTests
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/ruff/RUF049.py
{ "start": 390, "end": 455 }
class ____(ReprEnum): ... @dataclass( frozen=True ) # Foobar
E
python
anthropics__anthropic-sdk-python
src/anthropic/types/beta/file_upload_params.py
{ "start": 364, "end": 631 }
class ____(TypedDict, total=False): file: Required[FileTypes] """The file to upload""" betas: Annotated[List[AnthropicBetaParam], PropertyInfo(alias="anthropic-beta")] """Optional header to specify the beta version(s) you want to use."""
FileUploadParams
python
tiangolo__fastapi
docs_src/sql_databases/tutorial002.py
{ "start": 448, "end": 499 }
class ____(HeroBase): secret_name: str
HeroCreate
python
huggingface__transformers
tests/generation/test_candidate_generator.py
{ "start": 4760, "end": 9891 }
class ____(unittest.TestCase): def setUp(self): # Clear the cache before each test AssistantVocabTranslatorCache._cache.clear() # Create mock tokenizers with different vocabularies self.target_tokenizer = MockTokenizer({"hello": 0, "world": 1}) self.assistant_tokenizer = MockTokenizer({"hello": 0, "world": 1, "foo": 2}) self.other_target_tokenizer = MockTokenizer({"foo": 2, "bar": 3}) self.other_assistant_tokenizer = MockTokenizer({"baz": 4, "qux": 5}) self.assistant_model = MagicMock(device=torch_device) self.target_vocab_size = 6 def test_same_instance_for_same_tokenizers(self): """Test that the same translator is returned for the same tokenizers.""" translator1 = AssistantVocabTranslatorCache.get_translator( self.target_tokenizer, self.assistant_tokenizer, target_vocab_size=self.target_vocab_size, assistant_model=self.assistant_model, assistant_prune_lm_head=False, ) translator2 = AssistantVocabTranslatorCache.get_translator( self.target_tokenizer, self.assistant_tokenizer, target_vocab_size=self.target_vocab_size, assistant_model=self.assistant_model, assistant_prune_lm_head=False, ) self.assertIs(translator1, translator2, "Translators should be cached and identical") def test_different_instances_for_different_tokenizers(self): """Test that different tokenizers produce different translators.""" translator1 = AssistantVocabTranslatorCache.get_translator( self.target_tokenizer, self.assistant_tokenizer, target_vocab_size=self.target_vocab_size, assistant_model=self.assistant_model, assistant_prune_lm_head=False, ) translator2 = AssistantVocabTranslatorCache.get_translator( self.other_target_tokenizer, self.other_assistant_tokenizer, target_vocab_size=self.target_vocab_size, assistant_model=self.assistant_model, assistant_prune_lm_head=False, ) self.assertIsNot(translator1, translator2, "Translators should differ for different tokenizers") def test_cache_with_weakref_key(self): """Ensure that the cache uses weak references as keys.""" initial_cache_size = len(AssistantVocabTranslatorCache._cache) target_tokenizer = MockTokenizer({"hello": 0}) assistant_tokenizer = MockTokenizer({"hello": 0}) # Store translator in a local variable to avoid it being kept alive translator = AssistantVocabTranslatorCache.get_translator( target_tokenizer, assistant_tokenizer, target_vocab_size=self.target_vocab_size, assistant_model=self.assistant_model, assistant_prune_lm_head=False, ) self.assertEqual(len(AssistantVocabTranslatorCache._cache), initial_cache_size + 1) # Delete all strong references del target_tokenizer del assistant_tokenizer del translator # Force garbage collection gc.collect() # Call cleanup to remove dead entries AssistantVocabTranslatorCache.cleanup() # The cache size remains increased due to strong references self.assertEqual(len(AssistantVocabTranslatorCache._cache), initial_cache_size + 1) def test_weakref_cache_cleanup(self): """Test that the cache cleans up translators when tokenizers are garbage collected.""" def create_translator(): target_tokenizer = MockTokenizer({"hello": 0}) assistant_tokenizer = MockTokenizer({"hello": 0}) translator = AssistantVocabTranslatorCache.get_translator( target_tokenizer, assistant_tokenizer, target_vocab_size=self.target_vocab_size, assistant_model=self.assistant_model, assistant_prune_lm_head=False, ) # Create weak references before returning refs = (weakref.ref(translator), weakref.ref(target_tokenizer), weakref.ref(assistant_tokenizer)) # Remove strong references inside the function del target_tokenizer del assistant_tokenizer del translator return refs translator_ref, target_ref, assistant_ref = create_translator() # Force garbage collection gc.collect() # Call cleanup to remove dead entries AssistantVocabTranslatorCache.cleanup() # The tokenizers and translator are not garbage collected due to strong references self.assertIsNotNone(target_ref(), "Target tokenizer should still be alive due to strong references") self.assertIsNotNone(assistant_ref(), "Assistant tokenizer should still be alive due to strong references") self.assertIsNotNone(translator_ref(), "Translator should still be alive due to strong references") @require_torch
TestAssistantVocabTranslatorCache
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/issues/github_client.py
{ "start": 247, "end": 693 }
class ____(Protocol): def get_all_endpoints(self) -> Dict[str, str]: ... async def request( self, endpoint: str, method: str, headers: Dict[str, Any] = {}, params: Dict[str, Any] = {}, **kwargs: Any, ) -> Any: ... async def get_issues( self, owner: str, repo: str, state: str = "open", page: int = 1, ) -> Dict: ...
BaseGitHubIssuesClient
python
wireservice__csvkit
tests/test_utilities/test_csvclean.py
{ "start": 210, "end": 9245 }
class ____(CSVKitTestCase, EmptyFileTests): Utility = CSVClean default_args = ['--length-mismatch'] def assertCleaned(self, args, output_rows, error_rows=[]): output_file = io.StringIO() error_file = io.StringIO() utility = CSVClean(args, output_file, error_file) if error_rows: with self.assertRaises(SystemExit) as e: utility.run() self.assertEqual(e.exception.code, 1) else: utility.run() output_file.seek(0) error_file.seek(0) if output_rows: reader = agate.csv.reader(output_file) for row in output_rows: self.assertEqual(next(reader), row) self.assertRaises(StopIteration, next, reader) if error_rows: reader = agate.csv.reader(error_file) for row in error_rows: self.assertEqual(next(reader), row) self.assertRaises(StopIteration, next, reader) output_file.close() error_file.close() def test_launch_new_instance(self): with patch.object(sys, 'argv', [self.Utility.__name__.lower()] + self.default_args + ['examples/dummy.csv']): launch_new_instance() def test_options(self): for args, message in ( ( [], 'No checks or fixes were enabled. See available options with: csvclean --help', ), ( ['--join-short-rows', '--fill-short-rows'], 'The --join-short-rows and --fill-short-rows options are mutually exclusive.', ), ): with self.subTest(args=args): self.assertError(launch_new_instance, args, message) def test_skip_lines(self): self.assertCleaned( ['--length-mismatch', '--omit-error-rows', '--skip-lines', '3', 'examples/bad_skip_lines.csv'], [ ['column_a', 'column_b', 'column_c'], ['0', 'mixed types.... uh oh', '17'], ], [ ['line_number', 'msg', 'column_a', 'column_b', 'column_c'], ['1', 'Expected 3 columns, found 4 columns', '1', '27', '', "I'm too long!"], ['2', 'Expected 3 columns, found 2 columns', '', "I'm too short!"], ], ) def test_simple(self): self.assertCleaned(['--length-mismatch', '--omit-error-rows', 'examples/bad.csv'], [ ['column_a', 'column_b', 'column_c'], ['0', 'mixed types.... uh oh', '17'], ], [ ['line_number', 'msg', 'column_a', 'column_b', 'column_c'], ['1', 'Expected 3 columns, found 4 columns', '1', '27', '', "I'm too long!"], ['2', 'Expected 3 columns, found 2 columns', '', "I'm too short!"], ]) def test_no_header_row(self): self.assertCleaned(['--length-mismatch', 'examples/no_header_row.csv'], [ ['1', '2', '3'], ]) def test_header_normalize_space(self): self.assertCleaned(['--header-normalize-space', 'examples/test_header_newline.csv'], [ ['start end', 'b', 'c'], ['d', 'e', 'f'], ]) def test_join_short_rows(self): self.assertCleaned(['--omit-error-rows', '--join-short-rows', 'examples/test_join_short_rows.csv'], [ ['a', 'b', 'c'], ['1', 'cat\ndog', 'c'], ['3', 'b', 'c'], ]) def test_join_short_rows_separator(self): self.assertCleaned( ['--omit-error-rows', '--join-short-rows', '--separator', 'XYZ', 'examples/test_join_short_rows.csv'], [ ['a', 'b', 'c'], ['1', 'catXYZdog', 'c'], ['3', 'b', 'c'], ], ) def test_fill_short_rows(self): self.assertCleaned(['--fill-short-rows', 'examples/test_join_short_rows.csv'], [ ['a', 'b', 'c'], ['1', 'cat', ''], ['dog', 'c', ''], ['3', 'b', 'c'], ]) def test_fill_short_rows_separator(self): self.assertCleaned(['--fill-short-rows', '--fillvalue', 'XYZ', 'examples/test_join_short_rows.csv'], [ ['a', 'b', 'c'], ['1', 'cat', 'XYZ'], ['dog', 'c', 'XYZ'], ['3', 'b', 'c'], ]) def test_empty_columns(self): self.assertCleaned(['--empty-columns', 'examples/test_empty_columns.csv'], [ ['a', 'b', 'c', '', ''], ['a', '', '', '', ''], ['', '', 'c', ''], ['', '', '', '', ''], ], [ ['line_number', 'msg', 'a', 'b', 'c', '', ''], ['1', "Empty columns named 'b', '', ''! Try: csvcut -C 2,4,5", '', '', '', '', ''], ]) def test_empty_columns_short_row(self): self.assertCleaned(['--empty-columns', 'examples/test_empty_columns_short_row.csv'], [ ['a', 'b', 'c'], ['', ''], ], [ ['line_number', 'msg', 'a', 'b', 'c'], ['1', "Empty columns named 'a', 'b', 'c'! Try: csvcut -C 1,2,3", '', '', ''], ]) def test_empty_columns_long_row(self): self.assertCleaned(['--empty-columns', 'examples/test_empty_columns_long_row.csv'], [ ['a', 'b', 'c'], ['', '', '', ''], ], [ ['line_number', 'msg', 'a', 'b', 'c'], ['1', "Empty columns named 'a', 'b', 'c'! Try: csvcut -C 1,2,3", '', '', ''], ]) def test_empty_columns_zero(self): self.assertCleaned(['--empty-columns', '--zero', 'examples/test_empty_columns.csv'], [ ['a', 'b', 'c', '', ''], ['a', '', '', '', ''], ['', '', 'c', ''], ['', '', '', '', ''], ], [ ['line_number', 'msg', 'a', 'b', 'c', '', ''], ['1', "Empty columns named 'b', '', ''! Try: csvcut -C 1,3,4", '', '', '', '', ''], ]) def test_enable_all_checks(self): self.assertCleaned(['-a', 'examples/test_empty_columns.csv'], [ ['a', 'b', 'c', '', ''], ['a', '', '', '', ''], ['', '', 'c', ''], ['', '', '', '', ''], ], [ ['line_number', 'msg', 'a', 'b', 'c', '', ''], ['2', 'Expected 5 columns, found 4 columns', '', '', 'c', ''], ['1', "Empty columns named 'b', '', ''! Try: csvcut -C 2,4,5", '', '', '', '', ''], ]) def test_label(self): self.assertCleaned(['-a', '--label', 'xyz', 'examples/test_empty_columns.csv'], [ ['a', 'b', 'c', '', ''], ['a', '', '', '', ''], ['', '', 'c', ''], ['', '', '', '', ''], ], [ ['label', 'line_number', 'msg', 'a', 'b', 'c', '', ''], ['xyz', '2', 'Expected 5 columns, found 4 columns', '', '', 'c', ''], ['xyz', '1', "Empty columns named 'b', '', ''! Try: csvcut -C 2,4,5", '', '', '', '', ''], ]) def test_label_default(self): self.assertCleaned(['-a', '--label', '-', 'examples/test_empty_columns.csv'], [ ['a', 'b', 'c', '', ''], ['a', '', '', '', ''], ['', '', 'c', ''], ['', '', '', '', ''], ], [ ['label', 'line_number', 'msg', 'a', 'b', 'c', '', ''], ['examples/test_empty_columns.csv', '2', 'Expected 5 columns, found 4 columns', '', '', 'c', ''], ['examples/test_empty_columns.csv', '1', "Empty columns named 'b', '', ''! Try: csvcut -C 2,4,5", '', '', '', '', ''], # noqa: E501 ]) def test_label_default_stdin(self): input_file = io.BytesIO(b'a,b,c\n,\n') with stdin_as_string(input_file): self.assertCleaned(['-a', '--label', '-'], [ ['a', 'b', 'c'], ['', ''], ], [ ['label', 'line_number', 'msg', 'a', 'b', 'c'], ['stdin', '1', 'Expected 3 columns, found 2 columns', '', ''], ['stdin', '1', "Empty columns named 'a', 'b', 'c'! Try: csvcut -C 1,2,3", '', '', ''], ]) def test_removes_optional_quote_characters(self): self.assertCleaned(['--length-mismatch', 'examples/optional_quote_characters.csv'], [ ['a', 'b', 'c'], ['1', '2', '3'], ]) def test_changes_line_endings(self): self.assertCleaned(['--length-mismatch', 'examples/mac_newlines.csv'], [ ['a', 'b', 'c'], ['1', '2', '3'], ['Once upon\na time', '5', '6'], ]) def test_changes_character_encoding(self): self.assertCleaned(['--length-mismatch', '-e', 'latin1', 'examples/test_latin1.csv'], [ ['a', 'b', 'c'], ['1', '2', '3'], ['4', '5', u'©'], ]) def test_removes_bom(self): self.assertCleaned(['--length-mismatch', 'examples/test_utf8_bom.csv'], [ ['foo', 'bar', 'baz'], ['1', '2', '3'], ['4', '5', 'ʤ'], ])
TestCSVClean
python
pytorch__pytorch
test/cpp_extensions/open_registration_extension/torch_openreg/tests/test_memory.py
{ "start": 13289, "end": 15694 }
class ____(TestCase): """Test basic multi-device allocation functionality.""" def setUp(self): self.device_count = torch.openreg.device_count() self.assertEqual(self.device_count, 2, "This test requires 2 OpenReg devices") gc.collect() def tearDown(self): """Restore device 0 to avoid affecting subsequent tests.""" torch.openreg.set_device(0) gc.collect() def test_allocation_on_device_1(self): torch.openreg.set_device(1) x = torch.empty(100, device="openreg:1") self.assertEqual(x.device.type, "openreg") self.assertEqual(x.device.index, 1) def test_simultaneous_device_allocations(self): """Test allocations on both devices simultaneously.""" x = torch.empty(100, device="openreg:0") y = torch.empty(200, device="openreg:1") self.assertEqual(x.device.index, 0) self.assertEqual(y.device.index, 1) self.assertNotEqual(x.data_ptr(), y.data_ptr()) def test_memory_isolation_between_devices(self): """Test that memory allocations are isolated between devices.""" tensors_dev0 = [torch.empty(1000, device="openreg:0") for _ in range(10)] tensors_dev1 = [torch.empty(1000, device="openreg:1") for _ in range(10)] # Verify all device 0 tensors are on device 0 for t in tensors_dev0: self.assertEqual(t.device.index, 0) # Verify all device 1 tensors are on device 1 for t in tensors_dev1: self.assertEqual(t.device.index, 1) # Pointers should be different ptrs_dev0 = {t.data_ptr() for t in tensors_dev0} ptrs_dev1 = {t.data_ptr() for t in tensors_dev1} self.assertEqual( len(ptrs_dev0 & ptrs_dev1), 0, "Devices should not share pointers" ) def test_alternating_device_allocations(self): """Test alternating allocations between devices.""" tensors = [] for i in range(20): device_idx = i % 2 t = torch.empty(100 + i, device=f"openreg:{device_idx}") self.assertEqual(t.device.index, device_idx) tensors.append(t) # Verify all tensors retained correct device assignment for i, t in enumerate(tensors): expected_device = i % 2 self.assertEqual(t.device.index, expected_device)
TestMultiDeviceAllocation
python
getsentry__sentry
src/sentry_plugins/jira/plugin.py
{ "start": 1257, "end": 27086 }
class ____(CorePluginMixin, IssuePlugin2): description = "Integrate JIRA issues by linking a project." slug = "jira" title = "JIRA" conf_title = title conf_key = slug required_field = "username" feature_descriptions = [ FeatureDescription( """ Create and link Sentry issue groups directly to a Jira ticket in any of your projects, providing a quick way to jump from a Sentry bug to tracked ticket. """, IntegrationFeatures.ISSUE_BASIC, ) ] def get_group_urls(self): _patterns = super().get_group_urls() _patterns.append( re_path( r"^autocomplete", IssueGroupActionEndpoint.as_view(view_method_name="view_autocomplete", plugin=self), name=f"sentry-api-0-plugins-{self.slug}-autocomplete", ) ) return _patterns def is_configured(self, project) -> bool: if not self.get_option("default_project", project): return False return True def get_group_description(self, group, event): # mostly the same as parent class, but change ``` to {code} output = [absolute_uri(group.get_absolute_url(params={"referrer": "jira_plugin"}))] body = self.get_group_body(group, event) if body: output.extend(["", "{code}", body, "{code}"]) return "\n".join(output) def build_dynamic_field(self, group, field_meta): """ Builds a field based on JIRA's meta field information """ schema = field_meta["schema"] # set up some defaults for form fields fieldtype = "text" fkwargs = {"label": field_meta["name"], "required": field_meta["required"]} # override defaults based on field configuration if ( schema["type"] in ["securitylevel", "priority"] or schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES["select"] ): fieldtype = "select" fkwargs["choices"] = self.make_choices(field_meta.get("allowedValues")) elif field_meta.get("autoCompleteUrl") and ( schema.get("items") == "user" or schema["type"] == "user" ): fieldtype = "select" sentry_url = f"/api/0/issues/{group.id}/plugins/{self.slug}/autocomplete" fkwargs["url"] = "{}?jira_url={}".format( sentry_url, quote_plus(field_meta["autoCompleteUrl"]), ) fkwargs["has_autocomplete"] = True fkwargs["placeholder"] = "Start typing to search for a user" elif schema["type"] in ["timetracking"]: # TODO: Implement timetracking (currently unsupported alltogether) return None elif schema.get("items") in ["worklog", "attachment"]: # TODO: Implement worklogs and attachments someday return None elif schema["type"] == "array" and schema["items"] != "string": fieldtype = "select" fkwargs.update( { "multiple": True, "choices": self.make_choices(field_meta.get("allowedValues")), "default": [], } ) # break this out, since multiple field types could additionally # be configured to use a custom property instead of a default. if schema.get("custom"): if schema["custom"] == JIRA_CUSTOM_FIELD_TYPES["textarea"]: fieldtype = "textarea" fkwargs["type"] = fieldtype return fkwargs def get_issue_type_meta(self, issue_type, meta): issue_types = meta["issuetypes"] issue_type_meta = None if issue_type: matching_type = [t for t in issue_types if t["id"] == issue_type] issue_type_meta = matching_type[0] if len(matching_type) > 0 else None # still no issue type? just use the first one. if not issue_type_meta: issue_type_meta = issue_types[0] return issue_type_meta def get_new_issue_fields(self, request: Request | None, group, event, **kwargs): fields = super()._get_new_issue_fields_impl(group, event) jira_project_key = self.get_option("default_project", group.project) client = self.get_jira_client(group.project) try: meta = client.get_create_meta_for_project(jira_project_key) except ApiUnauthorized: raise PluginError( "JIRA returned: Unauthorized. " "Please check your username, password, " "instance and project in your configuration settings." ) except ApiError as e: raise PluginError( f"JIRA responded with an error. We received a status code of {e.code}" ) if not meta: raise PluginError( "Error in JIRA configuration, no projects " "found for user %s." % client.username ) # check if the issuetype was passed as a GET parameter issue_type = None if request is not None: if request.method == "POST": issue_type = request.data.get("issuetype") else: issue_type = request.GET.get("issuetype") if issue_type is None: issue_type = self.get_option("default_issue_type", group.project) issue_type_meta = self.get_issue_type_meta(issue_type, meta) issue_type_choices = self.make_choices(meta["issuetypes"]) # make sure default issue type is actually # one that is allowed for project if issue_type: if not any(c for c in issue_type_choices if c[0] == issue_type): issue_type = issue_type_meta["id"] fields = [ { "name": "project", "label": "Jira Project", "choices": ((meta["id"], jira_project_key),), "default": meta["id"], "type": "select", "readonly": True, }, *fields, { "name": "issuetype", "label": "Issue Type", "default": issue_type or issue_type_meta["id"], "type": "select", "choices": issue_type_choices, }, ] # title is renamed to summary before sending to JIRA standard_fields = [f["name"] for f in fields] + ["summary"] ignored_fields = (self.get_option("ignored_fields", group.project) or "").split(",") # apply ordering to fields based on some known built-in JIRA fields. # otherwise weird ordering occurs. anti_gravity = {"priority": -150, "fixVersions": -125, "components": -100, "security": -50} dynamic_fields = list(issue_type_meta.get("fields").keys()) dynamic_fields.sort(key=lambda f: anti_gravity.get(f) or 0) # Build up some dynamic fields based on what is required. for field in dynamic_fields: if field in standard_fields or field in [x.strip() for x in ignored_fields]: # don't overwrite the fixed fields for the form. continue mb_field = self.build_dynamic_field(group, issue_type_meta["fields"][field]) if mb_field: mb_field["name"] = field fields.append(mb_field) for field in fields: if field["name"] == "priority": # whenever priorities are available, put the available ones in the list. # allowedValues for some reason doesn't pass enough info. field["choices"] = self.make_choices(client.get_priorities()) field["default"] = self.get_option("default_priority", group.project) or "" elif field["name"] == "fixVersions": field["choices"] = self.make_choices(client.get_versions(jira_project_key)) return fields def get_link_existing_issue_fields(self, request: Request, group, event, **kwargs): return [ { "name": "issue_id", "label": "Issue", "default": "", "type": "select", "has_autocomplete": True, }, { "name": "comment", "label": "Comment", "default": absolute_uri(group.get_absolute_url(params={"referrer": "jira_plugin"})), "type": "textarea", "help": ("Leave blank if you don't want to " "add a comment to the JIRA issue."), "required": False, }, ] def link_issue(self, request: Request, group, form_data, **kwargs): client = self.get_jira_client(group.project) try: issue = client.get_issue(form_data["issue_id"]) except Exception as e: self.raise_error(e) comment = form_data.get("comment") if comment: try: client.create_comment(issue["key"], comment) except Exception as e: self.raise_error(e) return {"title": issue["fields"]["summary"]} def get_issue_label(self, group, issue_id: str) -> str: return issue_id def get_issue_url(self, group, issue_id: str) -> str: instance = self.get_option("instance_url", group.project) return f"{instance}/browse/{issue_id}" def _get_formatted_user(self, user): display = "{} {}({})".format( user.get("displayName", user["name"]), "- %s " % user.get("emailAddress") if user.get("emailAddress") else "", user["name"], ) return {"id": user["name"], "text": display} def view_autocomplete(self, request: Request, group, **kwargs): query = request.GET.get("autocomplete_query", "") field = request.GET.get("autocomplete_field", "") project = self.get_option("default_project", group.project) if field == "issue_id": client = self.get_jira_client(group.project) try: response = client.search_issues(project, query) except ApiError as e: return Response( { "error_type": "validation", "errors": [{"__all__": self.message_from_error(e)}], }, status=400, ) else: issues = [ {"text": "({}) {}".format(i["key"], i["fields"]["summary"]), "id": i["key"]} for i in response.get("issues", []) ] return Response({field: issues}) jira_url = request.GET.get("jira_url") if not jira_url: return Response( { "error_type": "validation", "errors": [{"jira_url": "missing required parameter"}], }, status=400, ) jira_url = unquote_plus(jira_url) parsed = urlsplit(jira_url) instance_url = self.get_option("instance_url", group.project) if parsed.netloc != urlsplit(instance_url).netloc: return Response( { "error_type": "validation", "errors": [{"jira_url": "domain must match"}], }, status=400, ) parsed_mut = list(parsed) jira_query = parse_qs(parsed_mut[3]) jira_client = self.get_jira_client(group.project) is_user_api = re.search("/rest/api/(latest|[0-9])/user/", jira_url) is_user_picker = "/rest/api/1.0/users/picker" in jira_url if is_user_api: # its the JSON version of the autocompleter is_xml = False jira_query["username"] = [query] # some reason JIRA complains if this key is in the URL. jira_query.pop("issueKey", None) jira_query["project"] = [project] elif is_user_picker: is_xml = False # for whatever reason, the create meta api returns an # invalid path, so let's just use the correct, documented one here: # https://docs.atlassian.com/jira/REST/cloud/#api/2/user # also, only pass path so saved instance url will be used parsed_mut[0] = "" parsed_mut[1] = "" parsed_mut[2] = "/rest/api/2/user/picker" jira_query["query"] = [query] else: # its the stupid XML version of the API. is_xml = True jira_query["query"] = [query] if jira_query.get("fieldName"): # for some reason its a list. jira_query["fieldName"] = [jira_query["fieldName"][0]] parsed_mut[3] = urlencode(jira_query, doseq=True) final_url = urlunsplit(parsed_mut) autocomplete_response = jira_client.get_cached(final_url) if is_user_picker: autocomplete_response = autocomplete_response["users"] users = [] if is_xml: for userxml in autocomplete_response.xml.findAll("users"): users.append({"id": userxml.find("name").text, "text": userxml.find("html").text}) else: for user in autocomplete_response: if user.get("name"): users.append(self._get_formatted_user(user)) # if JIRA user doesn't have proper permission for user api, # try the assignee api instead if not users and is_user_api: try: autocomplete_response = jira_client.search_users_for_project( jira_query.get("project"), jira_query.get("username") ) except (ApiUnauthorized, ApiError) as e: return Response( { "error_type": "validation", "errors": [{"__all__": self.message_from_error(e)}], }, status=400, ) for user in autocomplete_response: if user.get("name"): users.append(self._get_formatted_user(user)) return Response({field: users}) def message_from_error(self, exc: Exception) -> str: if isinstance(exc, ApiUnauthorized): return "Unauthorized: either your username and password were invalid or you do not have access" return super().message_from_error(exc) def error_message_from_json(self, data): message = "" if data.get("errorMessages"): message = " ".join(data["errorMessages"]) if data.get("errors"): if message: message += " " message += " ".join(f"{k}: {v}" for k, v in data.get("errors").items()) return message def create_issue(self, request: Request | None, group, form_data): cleaned_data = {} # protect against mis-configured plugin submitting a form without an # issuetype assigned. if not form_data.get("issuetype"): raise PluginError("Issue Type is required.") jira_project_key = self.get_option("default_project", group.project) client = self.get_jira_client(group.project) meta = client.get_create_meta_for_project(jira_project_key) if not meta: raise PluginError("Something went wrong. Check your plugin configuration.") issue_type_meta = self.get_issue_type_meta(form_data["issuetype"], meta) fs = issue_type_meta["fields"] for field in fs.keys(): f = fs[field] if field == "description": cleaned_data[field] = form_data[field] continue elif field == "summary": cleaned_data["summary"] = form_data["title"] continue if field in form_data.keys(): v = form_data.get(field) if v: schema = f["schema"] if schema.get("type") == "string" and not schema.get("custom"): cleaned_data[field] = v continue if schema["type"] == "user" or schema.get("items") == "user": v = {"name": v} elif schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES.get("multiuserpicker"): # custom multi-picker v = [{"name": v}] elif schema["type"] == "array" and schema.get("items") != "string": v = [{"id": vx} for vx in v] elif schema["type"] == "array" and schema.get("items") == "string": v = [v] elif schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES.get("textarea"): v = v elif ( schema["type"] == "number" or schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES["tempo_account"] ): try: if "." in v: v = float(v) else: v = int(v) except ValueError: pass elif ( schema.get("type") != "string" or (schema.get("items") and schema.get("items") != "string") or schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES.get("select") ): v = {"id": v} cleaned_data[field] = v if not (isinstance(cleaned_data["issuetype"], dict) and "id" in cleaned_data["issuetype"]): # something fishy is going on with this field, working on some JIRA # instances, and some not. # testing against 5.1.5 and 5.1.4 does not convert (perhaps is no longer included # in the projectmeta API call, and would normally be converted in the # above clean method.) cleaned_data["issuetype"] = {"id": cleaned_data["issuetype"]} try: response = client.create_issue(cleaned_data) except Exception as e: self.raise_error(e) return response.get("key") def get_jira_client(self, project): instance = self.get_option("instance_url", project) username = self.get_option("username", project) pw = self.get_option("password", project) return JiraClient(instance, username, pw) def make_choices(self, x): return [(y["id"], y["name"] if "name" in y else y["value"]) for y in x] if x else [] def validate_config_field(self, project, name, value, actor=None): value = super().validate_config_field(project, name, value, actor) # Don't make people update password every time if name == "password": value = value or self.get_option("password", project) return value def validate_config(self, project, config, actor=None): """ ``` if config['foo'] and not config['bar']: raise PluginError('You cannot configure foo with bar') return config ``` """ client = JiraClient(config["instance_url"], config["username"], config["password"]) try: client.get_projects_list() except ApiError as e: self.raise_error(e) return config def get_configure_plugin_fields(self, project, **kwargs): instance = self.get_option("instance_url", project) username = self.get_option("username", project) pw = self.get_option("password", project) jira_project = self.get_option("default_project", project) default_priority = self.get_option("default_priority", project) default_issue_type = self.get_option("default_issue_type", project) project_choices = [] priority_choices = [] issue_type_choices = [] if instance and username and pw: client = JiraClient(instance, username, pw) try: projects = client.get_projects_list() except ApiError: projects = None else: if projects: project_choices = [ (p.get("key"), "{} ({})".format(p.get("name"), p.get("key"))) for p in projects ] jira_project = jira_project or projects[0]["key"] if jira_project: try: priorities = client.get_priorities() except ApiError: priorities = None else: if priorities: priority_choices = [ (p.get("id"), "%s" % (p.get("name"))) for p in priorities ] default_priority = default_priority or priorities[0]["id"] try: meta = client.get_create_meta_for_project(jira_project) except ApiError: meta = None else: if meta: issue_type_choices = self.make_choices(meta["issuetypes"]) if issue_type_choices: default_issue_type = default_issue_type or issue_type_choices[0][0] secret_field = get_secret_field_config(pw, "") secret_field.update({"name": "password", "label": "Password/API Token"}) return [ { "name": "instance_url", "label": "JIRA Instance URL", "default": instance, "type": "text", "placeholder": 'e.g. "https://jira.atlassian.com"', "help": "It must be visible to the Sentry server", }, { "name": "username", "label": "Username/Email", "default": username, "type": "text", "help": "Ensure the JIRA user has admin permissions on the project", }, secret_field, { "name": "default_project", "label": "Linked Project", "type": "select", "choices": project_choices, "default": jira_project, "required": False, }, { "name": "ignored_fields", "label": "Ignored Fields", "type": "textarea", "required": False, "placeholder": 'e.g. "components, security, customfield_10006"', "default": self.get_option("ignored_fields", project), "help": "Comma-separated list of properties that you don't want to show in the form", }, { "name": "default_priority", "label": "Default Priority", "type": "select", "choices": priority_choices, "required": False, "default": default_priority, }, { "name": "default_issue_type", "label": "Default Issue Type", "type": "select", "choices": issue_type_choices, "required": False, "default": default_issue_type, }, { "name": "auto_create", "label": "Automatically create JIRA Tickets", "default": self.get_option("auto_create", project) or False, "type": "bool", "required": False, "help": "Automatically create a JIRA ticket for EVERY new issue", }, ] def should_create(self, group, event, is_new): if not is_new: return False if not self.get_option("auto_create", group.project): return False # XXX(dcramer): Sentry doesn't expect GroupMeta referenced here so we # need to populate the cache GroupMeta.objects.populate_cache([group]) if GroupMeta.objects.get_value(group, "%s:tid" % self.get_conf_key(), None): return False return True def post_process(self, *, group, event, is_new, **kwargs): if not self.should_create(group, event, is_new): return fields = self.get_new_issue_fields(None, group, event, **kwargs) post_data = {} included_fields = {"priority", "issuetype", "title", "description", "project"} for field in fields: name = field["name"] if name in included_fields: post_data[name] = field.get("default") if not ( post_data.get("priority") and post_data.get("issuetype") and post_data.get("project") ): return interface = event.interfaces.get("exception") if interface: post_data["description"] += "\n{code}%s{code}" % interface.get_stacktrace( event, system_frames=False, max_frames=settings.SENTRY_MAX_STACKTRACE_FRAMES ) try: issue_id = self.create_issue(request=None, group=group, form_data=post_data) except PluginError as e: logger.info("post_process.fail", extra={"error": str(e)}) else: prefix = self.get_conf_key() GroupMeta.objects.set_value(group, "%s:tid" % prefix, issue_id)
JiraPlugin
python
PrefectHQ__prefect
src/prefect/runner/storage.py
{ "start": 3458, "end": 19668 }
class ____: """ Pulls the contents of a git repository to the local filesystem. Parameters: url: The URL of the git repository to pull from credentials: A dictionary of credentials to use when pulling from the repository. If a username is provided, an access token must also be provided. name: The name of the repository. If not provided, the name will be inferred from the repository URL. branch: The branch to pull from. Defaults to "main". pull_interval: The interval in seconds at which to pull contents from remote storage to local storage. If None, remote storage will perform a one-time sync. directories: The directories to pull from the Git repository (uses git sparse-checkout) Examples: Pull the contents of a private git repository to the local filesystem: ```python from prefect.runner.storage import GitRepository storage = GitRepository( url="https://github.com/org/repo.git", credentials={"username": "oauth2", "access_token": "my-access-token"}, ) await storage.pull_code() ``` """ def __init__( self, url: str, credentials: Union[GitCredentials, Block, dict[str, Any], None] = None, name: str | None = None, branch: str | None = None, commit_sha: str | None = None, include_submodules: bool = False, pull_interval: int | None = 60, directories: list[str] | None = None, ): if credentials is None: credentials = {} if ( isinstance(credentials, dict) and credentials.get("username") and not (credentials.get("access_token") or credentials.get("password")) ): raise ValueError( "If a username is provided, an access token or password must also be" " provided." ) if branch and commit_sha: raise ValueError( "Cannot provide both a branch and a commit SHA. Please provide only one." ) self._url = url self._branch = branch self._commit_sha = commit_sha self._credentials = credentials self._include_submodules = include_submodules repo_name = urlparse(url).path.split("/")[-1].replace(".git", "") safe_branch = branch.replace("/", "-") if branch else None default_name = f"{repo_name}-{safe_branch}" if safe_branch else repo_name self._name = name or default_name self._logger = get_logger(f"runner.storage.git-repository.{self._name}") self._storage_base_path = Path.cwd() self._pull_interval = pull_interval self._directories = directories @property def destination(self) -> Path: return self._storage_base_path / self._name def set_base_path(self, path: Path) -> None: self._storage_base_path = path @property def pull_interval(self) -> Optional[int]: return self._pull_interval @property def _repository_url_with_credentials(self) -> str: """Get the repository URL with credentials embedded.""" if not self._credentials: return self._url # If block implements the protocol, let it format the complete URL if isinstance(self._credentials, _GitCredentialsFormatter): return self._credentials.format_git_credentials(self._url) # Otherwise, use legacy formatting for plain dict credentials credentials = ( self._credentials.model_dump() if isinstance(self._credentials, Block) else deepcopy(self._credentials) ) for k, v in credentials.items(): if isinstance(v, Secret): credentials[k] = v.get() elif isinstance(v, SecretStr): credentials[k] = v.get_secret_value() # Get credential string for plain dict credentials block = self._credentials if isinstance(self._credentials, Block) else None credential_string = _format_token_from_credentials( urlparse(self._url).netloc, credentials, block ) # Insert credentials into URL components = urlparse(self._url) if components.scheme != "https": return self._url return urlunparse( components._replace(netloc=f"{credential_string}@{components.netloc}") ) @property def _git_config(self) -> list[str]: """Build a git configuration to use when running git commands.""" config: dict[str, str] = {} # Submodules can be private. The url in .gitmodules # will not include the credentials, we need to # propagate them down here if they exist. if self._include_submodules and self._credentials: # Get base URL (without path) with credentials base_url_parsed = urlparse(self._url)._replace(path="") base_url_without_auth = urlunparse(base_url_parsed) # Create a temporary URL with just the base to get credentials formatting if isinstance(self._credentials, _GitCredentialsFormatter): base_url_with_auth = self._credentials.format_git_credentials( base_url_without_auth ) else: # Use legacy credential insertion credentials_dict = ( self._credentials.model_dump() if isinstance(self._credentials, Block) else deepcopy(self._credentials) ) for k, v in credentials_dict.items(): if isinstance(v, Secret): credentials_dict[k] = v.get() elif isinstance(v, SecretStr): credentials_dict[k] = v.get_secret_value() block = ( self._credentials if isinstance(self._credentials, Block) else None ) credential_string = _format_token_from_credentials( base_url_parsed.netloc, credentials_dict, block ) base_url_with_auth = urlunparse( base_url_parsed._replace( netloc=f"{credential_string}@{base_url_parsed.netloc}" ) ) config[f"url.{base_url_with_auth}.insteadOf"] = base_url_without_auth return ["-c", " ".join(f"{k}={v}" for k, v in config.items())] if config else [] async def is_sparsely_checked_out(self) -> bool: """ Check if existing repo is sparsely checked out """ try: result = await run_process( ["git", "config", "--get", "core.sparseCheckout"], cwd=self.destination ) return result.stdout.decode().strip().lower() == "true" except Exception: return False async def is_shallow_clone(self) -> bool: """ Check if the repository is a shallow clone """ try: result = await run_process( ["git", "rev-parse", "--is-shallow-repository"], cwd=self.destination, ) return result.stdout.decode().strip().lower() == "true" except Exception: return False async def is_current_commit(self) -> bool: """ Check if the current commit is the same as the commit SHA """ if not self._commit_sha: raise ValueError("No commit SHA provided") try: result = await run_process( ["git", "rev-parse", self._commit_sha], cwd=self.destination, ) return result.stdout.decode().strip() == self._commit_sha except Exception: return False async def pull_code(self) -> None: """ Pulls the contents of the configured repository to the local filesystem. """ self._logger.debug( "Pulling contents from repository '%s' to '%s'...", self._name, self.destination, ) git_dir = self.destination / ".git" if git_dir.exists(): # Check if the existing repository matches the configured repository result = await run_process( ["git", "config", "--get", "remote.origin.url"], cwd=str(self.destination), ) existing_repo_url = None existing_repo_url = _strip_auth_from_url(result.stdout.decode().strip()) if existing_repo_url != self._url: raise ValueError( f"The existing repository at {str(self.destination)} " f"does not match the configured repository {self._url}" ) # Sparsely checkout the repository if directories are specified and the repo is not in sparse-checkout mode already if self._directories and not await self.is_sparsely_checked_out(): await run_process( ["git", "sparse-checkout", "set", *self._directories], cwd=self.destination, ) self._logger.debug("Pulling latest changes from origin/%s", self._branch) # Update the existing repository cmd = ["git"] # Add the git configuration, must be given after `git` and before the command cmd += self._git_config # If the commit is already checked out, skip the pull if self._commit_sha and await self.is_current_commit(): return # If checking out a specific commit, fetch the latest changes and unshallow the repository if necessary elif self._commit_sha: if await self.is_shallow_clone(): cmd += ["fetch", "origin", "--unshallow"] else: cmd += ["fetch", "origin", self._commit_sha] try: await run_process(cmd, cwd=self.destination) self._logger.debug("Successfully fetched latest changes") except subprocess.CalledProcessError as exc: self._logger.error( f"Failed to fetch latest changes with exit code {exc}" ) shutil.rmtree(self.destination) await self._clone_repo() await run_process( ["git", "checkout", self._commit_sha], cwd=self.destination, ) self._logger.debug( f"Successfully checked out commit {self._commit_sha}" ) # Otherwise, pull the latest changes from the branch else: cmd += ["pull", "origin"] if self._branch: cmd += [self._branch] if self._include_submodules: cmd += ["--recurse-submodules"] cmd += ["--depth", "1"] try: await run_process(cmd, cwd=self.destination) self._logger.debug("Successfully pulled latest changes") except subprocess.CalledProcessError as exc: self._logger.error( f"Failed to pull latest changes with exit code {exc}" ) shutil.rmtree(self.destination) await self._clone_repo() else: await self._clone_repo() async def _clone_repo(self): """ Clones the repository into the local destination. """ self._logger.debug("Cloning repository %s", self._url) repository_url = self._repository_url_with_credentials cmd = ["git"] # Add the git configuration, must be given after `git` and before the command cmd += self._git_config # Add the clone command and its parameters cmd += ["clone", repository_url] if self._include_submodules: cmd += ["--recurse-submodules"] # This will only checkout the top-level directory if self._directories: cmd += ["--sparse"] if self._commit_sha: cmd += ["--filter=blob:none", "--no-checkout"] else: if self._branch: cmd += ["--branch", self._branch] # Limit git history cmd += ["--depth", "1"] # Set path to clone to cmd += [str(self.destination)] try: await run_process(cmd) except subprocess.CalledProcessError as exc: # Hide the command used to avoid leaking the access token parsed_url = urlparse(self._url) exc_chain = ( None if self._credentials or parsed_url.password or parsed_url.username else exc ) raise RuntimeError( f"Failed to clone repository {_strip_auth_from_url(self._url)!r} with exit code" f" {exc.returncode}." ) from exc_chain if self._commit_sha: # Fetch the commit await run_process( ["git", "fetch", "origin", self._commit_sha], cwd=self.destination, ) # Checkout the specific commit await run_process( ["git", "checkout", self._commit_sha], cwd=self.destination, ) self._logger.debug(f"Successfully checked out commit {self._commit_sha}") # Once repository is cloned and the repo is in sparse-checkout mode then grow the working directory if self._directories: self._logger.debug("Will add %s", self._directories) await run_process( ["git", "sparse-checkout", "set", *self._directories], cwd=self.destination, ) def __eq__(self, __value: Any) -> bool: if isinstance(__value, GitRepository): return ( self._url == __value._url and self._branch == __value._branch and self._name == __value._name ) return False def __repr__(self) -> str: return ( f"GitRepository(name={self._name!r} repository={self._url!r}," f" branch={self._branch!r})" ) def to_pull_step(self) -> dict[str, Any]: pull_step: dict[str, Any] = { "prefect.deployments.steps.git_clone": { "repository": self._url, "branch": self._branch, } } if self._include_submodules: pull_step["prefect.deployments.steps.git_clone"]["include_submodules"] = ( self._include_submodules ) if self._commit_sha: pull_step["prefect.deployments.steps.git_clone"]["commit_sha"] = ( self._commit_sha ) if isinstance(self._credentials, Block): pull_step["prefect.deployments.steps.git_clone"]["credentials"] = ( f"{{{{ {self._credentials.get_block_placeholder()} }}}}" ) elif isinstance(self._credentials, dict): # pyright: ignore[reportUnnecessaryIsInstance] if isinstance( access_token := self._credentials.get("access_token"), Secret ): pull_step["prefect.deployments.steps.git_clone"]["credentials"] = { **self._credentials, "access_token": ( f"{{{{ {access_token.get_block_placeholder()} }}}}" ), } elif self._credentials.get("access_token") is not None: raise ValueError( "Please save your access token as a Secret block before converting" " this storage object to a pull step." ) return pull_step
GitRepository
python
imageio__imageio
tests/test_format.py
{ "start": 420, "end": 12462 }
class ____(Format): """TEST DOCS""" _closed: List[int] = [] def _can_read(self, request): return request.filename.lower().endswith(self.extensions + (".haha",)) def _can_write(self, request): return request.filename.lower().endswith(self.extensions + (".haha",)) class Reader(Format.Reader): _failmode = False _stream_mode = False def _open(self): self._read_frames = 0 def _close(self): self.format._closed.append(id(self)) def _get_length(self): if self._stream_mode: return np.inf return 3 def _get_data(self, index): if self._failmode == 2: raise IndexError() elif self._failmode: return "not an array", {} elif self._stream_mode and self._read_frames >= 5: raise IndexError() # Mark end of stream else: self._read_frames += 1 return np.ones((10, 10)) * index, self._get_meta_data(index) def _get_meta_data(self, index): if self._failmode: return "not a dict" return {"index": index} class Writer(Format.Writer): def _open(self): self._written_data = [] self._written_meta = [] self._meta = None def _close(self): self.format._closed.append(id(self)) def _append_data(self, im, meta): self._written_data.append(im) self._written_meta.append(meta) def _set_meta_data(self, meta): self._meta = meta @deprecated_test def test_format(test_images, tmp_path): """Test the working of the Format class""" filename1 = test_images / "chelsea.png" filename2 = tmp_path / "chelsea.out" # Test basic format creation F = Format("testname", "test description", "foo bar spam") assert F.name == "TESTNAME" assert F.description == "test description" assert F.name in repr(F) assert F.name in F.doc assert str(F) == F.doc assert set(F.extensions) == {".foo", ".bar", ".spam"} # Test setting extensions F1 = Format("test", "", "foo bar spam") F2 = Format("test", "", "foo, bar,spam") F3 = Format("test", "", ["foo", "bar", "spam"]) F4 = Format("test", "", ".foo .bar .spam") for F in (F1, F2, F3, F4): assert set(F.extensions) == {".foo", ".bar", ".spam"} # Fail raises(ValueError, Format, "test", "", 3) # not valid ext raises(ValueError, Format, "test", "", "", 3) # not valid mode raises(ValueError, Format, "test", "", "", "x") # not valid mode # Test subclassing F = MyFormat("test", "", modes="i") assert "TEST DOCS" in F.doc # Get and check reader and write classes R = F.get_reader(Request(filename1, "ri")) W = F.get_writer(Request(filename2, "wi")) assert isinstance(R, MyFormat.Reader) assert isinstance(W, MyFormat.Writer) assert R.format is F assert W.format is F assert Path(R.request.filename) == filename1 assert Path(W.request.filename) == filename2 # Fail raises(RuntimeError, F.get_reader, Request(filename1, "rI")) raises(RuntimeError, F.get_writer, Request(filename2, "wI")) # Use as context manager with R: pass with W: pass # Objects are now closed, cannot be used assert R.closed assert W.closed # raises(RuntimeError, R.__enter__) raises(RuntimeError, W.__enter__) # raises(RuntimeError, R.get_data, 0) raises(RuntimeError, W.append_data, np.zeros((10, 10))) # Test __del__ R = F.get_reader(Request(filename1, "ri")) W = F.get_writer(Request(filename2, "wi")) ids = id(R), id(W) F._closed[:] = [] del R del W gc.collect() # Invoke __del__ assert set(ids) == set(F._closed) @deprecated_test def test_reader_and_writer(test_images, tmp_path): # Prepare filename1 = test_images / "chelsea.png" filename2 = tmp_path / "chelsea.out" F = MyFormat("test", "", modes="i") # Test using reader n = 3 R = F.get_reader(Request(filename1, "ri")) assert len(R) == n ims = [im for im in R] assert len(ims) == n for i in range(3): assert ims[i][0, 0] == i assert ims[i].meta["index"] == i for i in range(3): assert R.get_meta_data(i)["index"] == i # Read next assert R.get_data(0)[0, 0] == 0 assert R.get_next_data()[0, 0] == 1 assert R.get_next_data()[0, 0] == 2 # Fail R._failmode = 1 raises(ValueError, R.get_data, 0) raises(ValueError, R.get_meta_data, 0) R._failmode = 2 with raises(IndexError): [im for im in R] # Test streaming reader R = F.get_reader(Request(filename1, "ri")) R._stream_mode = True assert R.get_length() == np.inf ims = [im for im in R] assert len(ims) == 5 # Test using writer im1 = np.zeros((10, 10)) im2 = imageio.core.Image(im1, {"foo": 1}) W = F.get_writer(Request(filename2, "wi")) W.append_data(im1) W.append_data(im2) W.append_data(im1, {"bar": 1}) W.append_data(im2, {"bar": 1}) # Test that no data is copies (but may be different views) assert len(W._written_data) == 4 for im in W._written_data: assert (im == im1).all() im1[2, 2] == 99 for im in W._written_data: assert (im == im1).all() # Test meta assert W._written_meta[0] == {} assert W._written_meta[1] == {"foo": 1} assert W._written_meta[2] == {"bar": 1} assert W._written_meta[3] == {"foo": 1, "bar": 1} # W.set_meta_data({"spam": 1}) assert W._meta == {"spam": 1} # Fail raises(ValueError, W.append_data, "not an array") raises(ValueError, W.append_data, im, "not a dict") raises(ValueError, W.set_meta_data, "not a dict") @deprecated_test def test_default_can_read_and_can_write(tmp_path): F = imageio.plugins.example.DummyFormat("test", "", "foo bar", "v") # Prepare files filename1 = str(tmp_path / "test") open(filename1 + ".foo", "wb") open(filename1 + ".bar", "wb") open(filename1 + ".spam", "wb") # Test _can_read() assert F.can_read(Request(filename1 + ".foo", "rv")) assert F.can_read(Request(filename1 + ".bar", "r?")) assert not F.can_read(Request(filename1 + ".spam", "r?")) # Test _can_write() assert F.can_write(Request(filename1 + ".foo", "wv")) assert F.can_write(Request(filename1 + ".bar", "w?")) assert not F.can_write(Request(filename1 + ".spam", "w?")) # Format manager @deprecated_test def test_format_manager(test_images, tmp_path): """Test working of the format manager""" formats = imageio.formats # Test basics of FormatManager assert isinstance(formats, FormatManager) assert len(formats) > 0 assert "FormatManager" in repr(formats) # Get docs smalldocs = str(formats) # fulldocs = formats.create_docs_for_all_formats() # Check each format ... for format in formats: # That each format is indeed a Format assert isinstance(format, Format) # That they are mentioned assert format.name in smalldocs # assert format.name in fulldocs fname = test_images / "chelsea.png" fname2 = tmp_path / "chelsea.noext" shutil.copy(fname, fname2) # Check getting F1 = formats["PNG"] F2 = formats[".png"] F3 = formats[fname2.as_posix()] # will look in file itself assert type(F1) is type(F2) assert type(F1) is type(F3) # Check getting F1 = formats["DICOM"] F2 = formats[".dcm"] F3 = formats["dcm"] # If omitting dot, format is smart enough to try with assert type(F1) is type(F2) assert type(F1) is type(F3) # Fail raises(ValueError, formats.__getitem__, 678) # must be str raises(IndexError, formats.__getitem__, ".nonexistentformat") # Adding a format myformat = Format("test", "test description", "testext1 testext2") formats.add_format(myformat) assert type(myformat) in [type(f) for f in formats] assert type(formats["testext1"]) is type(myformat) assert type(formats["testext2"]) is type(myformat) # Fail raises(ValueError, formats.add_format, 678) # must be Format raises(ValueError, formats.add_format, myformat) # cannot add twice # Adding a format with the same name myformat2 = Format("test", "other description", "foo bar") raises(ValueError, formats.add_format, myformat2) # same name formats.add_format(myformat2, True) # overwrite assert formats["test"].name is not myformat.name assert type(formats["test"]) is type(myformat2) # Test show (we assume it shows correctly) formats.show() # # Potential # bytes = b'x' * 300 # F = formats.search_read_format(Request(bytes, 'r?', dummy_potential=1)) # assert F is formats['DUMMY'] @deprecated_test def test_sorting_errors(): with raises(TypeError): imageio.formats.sort(3) with raises(ValueError): imageio.formats.sort("foo,bar") with raises(ValueError): imageio.formats.sort("foo.png") @deprecated_test def test_default_order(): assert imageio.formats[".tiff"].name == "TIFF" assert imageio.formats[".png"].name == "PNG-PIL" assert imageio.formats[".pfm"].name == "PFM-FI" @deprecated_test def test_preferring_fi(): # Prefer FI all the way imageio.formats.sort("-FI") assert imageio.formats[".tiff"].name == "TIFF-FI" assert imageio.formats[".png"].name == "PNG-FI" assert imageio.formats[".pfm"].name == "PFM-FI" # This would be better imageio.formats.sort("TIFF", "-FI") assert imageio.formats[".tiff"].name == "TIFF" assert imageio.formats[".png"].name == "PNG-FI" assert imageio.formats[".pfm"].name == "PFM-FI" @deprecated_test def test_preferring_arbitrary(): # Normally, these exotic formats are somewhere in the back imageio.formats.sort() names = [f.name for f in imageio.formats] assert "DICOM" not in names[:10] assert "FFMPEG" not in names[:10] assert "NPZ" not in names[:10] # But we can move them forward imageio.formats.sort("DICOM", "FFMPEG", "NPZ") names = [f.name for f in imageio.formats] assert names[0] == "DICOM" assert names[1] == "FFMPEG" assert names[2] == "NPZ" # And back to normal .. imageio.formats.sort() names = [f.name for f in imageio.formats] assert "DICOM" not in names[:10] assert "FFMPEG" not in names[:10] assert "NPZ" not in names[:10] @deprecated_test def test_bad_formats(tmp_path): # test looking up a read format from file bogus_file = tmp_path / "bogus.fil" bogus_file.write_text("abcdefg") with pytest.raises(IndexError): iio.formats[str(bogus_file)] # test empty format with pytest.raises(ValueError): iio.formats[""] @deprecated_test def test_write_format_search_fail(tmp_path): req = iio.core.Request(tmp_path / "foo.bogus", "w") assert iio.formats.search_write_format(req) is None @deprecated_test def test_format_by_filename(): iio.formats["test.jpg"] @pytest.fixture() def missing_ffmpeg(): old_ffmpeg = sys.modules.get("imageio_ffmpeg") old_plugin = sys.modules.get("imageio.plugins.ffmpeg") sys.modules["imageio_ffmpeg"] = None sys.modules.pop("imageio.plugins.ffmpeg") yield sys.modules["imageio_ffmpeg"] = old_ffmpeg sys.modules["imageio.plugins.ffmpeg"] = old_plugin def test_missing_format(missing_ffmpeg): # regression test for # https://github.com/imageio/imageio/issues/887 for format in imageio.formats: assert format.name != "FFMPEG" def test_touch_warnings(test_images, tmp_path): with pytest.deprecated_call(): imageio.formats.search_read_format(Request(test_images / "chelsea.png", "r")) with pytest.deprecated_call(): imageio.formats.search_write_format(Request(tmp_path / "chelsea.png", "w"))
MyFormat
python
ray-project__ray
python/ray/autoscaler/v2/instance_manager/instance_storage.py
{ "start": 257, "end": 5679 }
class ____: """Instance storage stores the states of instances in the storage.""" def __init__( self, cluster_id: str, storage: Storage, ) -> None: self._storage = storage self._cluster_id = cluster_id self._table_name = f"instance_table@{cluster_id}" def batch_upsert_instances( self, updates: List[Instance], expected_storage_version: Optional[int] = None, ) -> StoreStatus: """Upsert instances into the storage. If the instance already exists, it will be updated. Otherwise, it will be inserted. If the expected_storage_version is specified, the update will fail if the current storage version does not match the expected version. Note the version of the upserted instances will be set to the current storage version. Args: updates: A list of instances to be upserted. expected_storage_version: The expected storage version. Returns: StoreStatus: A tuple of (success, storage_version). """ mutations = {} version = self._storage.get_version() # handle version mismatch if expected_storage_version and expected_storage_version != version: return StoreStatus(False, version) for instance in updates: instance = copy.deepcopy(instance) # the instance version is set to 0, it will be # populated by the storage entry's verion on read instance.version = 0 mutations[instance.instance_id] = instance.SerializeToString() result, version = self._storage.batch_update( self._table_name, mutations, {}, expected_storage_version ) return StoreStatus(result, version) def upsert_instance( self, instance: Instance, expected_instance_version: Optional[int] = None, expected_storage_verison: Optional[int] = None, ) -> StoreStatus: """Upsert an instance in the storage. If the expected_instance_version is specified, the update will fail if the current instance version does not match the expected version. Similarly, if the expected_storage_version is specified, the update will fail if the current storage version does not match the expected version. Note the version of the upserted instances will be set to the current storage version. Args: instance: The instance to be updated. expected_instance_version: The expected instance version. expected_storage_version: The expected storage version. Returns: StoreStatus: A tuple of (success, storage_version). """ instance = copy.deepcopy(instance) # the instance version is set to 0, it will be # populated by the storage entry's verion on read instance.version = 0 result, version = self._storage.update( self._table_name, key=instance.instance_id, value=instance.SerializeToString(), expected_entry_version=expected_instance_version, expected_storage_version=expected_storage_verison, insert_only=False, ) return StoreStatus(result, version) def get_instances( self, instance_ids: List[str] = None, status_filter: Set[int] = None, ) -> Tuple[Dict[str, Instance], int]: """Get instances from the storage. Args: instance_ids: A list of instance ids to be retrieved. If empty, all instances will be retrieved. status_filter: Only instances with the specified status will be returned. Returns: Tuple[Dict[str, Instance], int]: A tuple of (instances, version). The instances is a dictionary of (instance_id, instance) pairs. """ instance_ids = instance_ids or [] status_filter = status_filter or set() pairs, version = self._storage.get(self._table_name, instance_ids) instances = {} for instance_id, (instance_data, entry_version) in pairs.items(): instance = Instance() instance.ParseFromString(instance_data) instance.version = entry_version if status_filter and instance.status not in status_filter: continue instances[instance_id] = instance return instances, version def batch_delete_instances( self, instance_ids: List[str], expected_storage_version: Optional[int] = None ) -> StoreStatus: """Delete instances from the storage. If the expected_version is specified, the update will fail if the current storage version does not match the expected version. Args: to_delete: A list of instances to be deleted. expected_version: The expected storage version. Returns: StoreStatus: A tuple of (success, storage_version). """ version = self._storage.get_version() if expected_storage_version and expected_storage_version != version: return StoreStatus(False, version) result = self._storage.batch_update( self._table_name, {}, instance_ids, expected_storage_version ) return result
InstanceStorage
python
kamyu104__LeetCode-Solutions
Python/swap-for-longest-repeated-character-substring.py
{ "start": 50, "end": 725 }
class ____(object): def maxRepOpt1(self, text): """ :type text: str :rtype: int """ K = 1 result = 0 total_count, count = collections.Counter(), collections.Counter() left, max_count = 0, 0 for i in xrange(len(text)): total_count[text[i]] += 1 count[text[i]] += 1 max_count = max(max_count, count[text[i]]) if i-left+1 - max_count > K: count[text[left]] -= 1 left += 1 result = max(result, min(i-left+1, total_count[text[i]])) return result # Time: O(n) # Space: O(n) import itertools
Solution
python
pytest-dev__pytest
src/_pytest/timing.py
{ "start": 1611, "end": 1970 }
class ____: """A span of time as measured by `Instant.elapsed()`.""" start: Instant stop: Instant @property def seconds(self) -> float: """Elapsed time of the duration in seconds, measured using a performance counter for precise timing.""" return self.stop.perf_count - self.start.perf_count @dataclasses.dataclass
Duration
python
kamyu104__LeetCode-Solutions
Python/subsequence-sum-after-capping-elements.py
{ "start": 795, "end": 1490 }
class ____(object): def subsequenceSumAfterCapping(self, nums, k): """ :type nums: List[int] :type k: int :rtype: List[bool] """ result = [False]*len(nums) nums.sort() dp = [False]*(k+1) dp[0] = True i = 0 for x in xrange(1, len(nums)+1): while i < len(nums) and nums[i] < x: for j in reversed(xrange(nums[i], k+1)): dp[j] = dp[j] or dp[j-nums[i]] i += 1 for j in xrange(max(k%x, k-(len(nums)-i)*x), k+1, x): if dp[j]: result[x-1] = True break return result
Solution2
python
keon__algorithms
algorithms/tree/segment_tree/segment_tree.py
{ "start": 180, "end": 1369 }
class ____: def __init__(self,arr,function): self.segment = [0 for x in range(3*len(arr)+3)] self.arr = arr self.fn = function self.make_tree(0,0,len(arr)-1) def make_tree(self,i,l,r): if l==r: self.segment[i] = self.arr[l] elif l<r: self.make_tree(2*i+1,l,int((l+r)/2)) self.make_tree(2*i+2,int((l+r)/2)+1,r) self.segment[i] = self.fn(self.segment[2*i+1],self.segment[2*i+2]) def __query(self,i,L,R,l,r): if l>R or r<L or L>R or l>r: return None if L>=l and R<=r: return self.segment[i] val1 = self.__query(2*i+1,L,int((L+R)/2),l,r) val2 = self.__query(2*i+2,int((L+R+2)/2),R,l,r) print(L,R," returned ",val1,val2) if val1 != None: if val2 != None: return self.fn(val1,val2) return val1 return val2 def query(self,L,R): return self.__query(0,0,len(self.arr)-1,L,R) ''' Example - mytree = SegmentTree([2,4,5,3,4],max) mytree.query(2,4) mytree.query(0,3) ... mytree = SegmentTree([4,5,2,3,4,43,3],sum) mytree.query(1,8) ... '''
SegmentTree
python
getsentry__sentry
fixtures/safe_migrations_apps/good_flow_delete_field_pending_with_fk_constraint_app/migrations/0002_remove_constraints_and_pending.py
{ "start": 252, "end": 1040 }
class ____(CheckedMigration): atomic = False dependencies = [ ("good_flow_delete_field_pending_with_fk_constraint_app", "0001_initial"), ] operations = [ migrations.AlterField( model_name="TestTable", name="fk_table", field=sentry.db.models.fields.foreignkey.FlexibleForeignKey( on_delete=django.db.models.deletion.CASCADE, to="good_flow_delete_field_pending_with_fk_constraint_app.fktable", db_index=False, db_constraint=False, null=True, ), ), SafeRemoveField( model_name="testtable", name="fk_table", deletion_action=DeletionAction.MOVE_TO_PENDING, ), ]
Migration
python
SmileyChris__easy-thumbnails
easy_thumbnails/fields.py
{ "start": 100, "end": 673 }
class ____(FileField): """ A file field which provides easier access for retrieving (and generating) thumbnails. To use a different file storage for thumbnails, provide the ``thumbnail_storage`` keyword argument. """ attr_class = files.ThumbnailerFieldFile def __init__(self, *args, **kwargs): # Arguments not explicitly defined so that the normal ImageField # positional arguments can be used. self.thumbnail_storage = kwargs.pop('thumbnail_storage', None) super().__init__(*args, **kwargs)
ThumbnailerField
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
{ "start": 7819, "end": 8106 }
class ____(graphene.ObjectType): class Meta: interfaces = (GrapheneMessageEvent, GrapheneStepEvent) name = "ExecutionStepInputEvent" input_name = graphene.NonNull(graphene.String) type_check = graphene.NonNull(GrapheneTypeCheck)
GrapheneExecutionStepInputEvent
python
mwaskom__seaborn
tests/test_categorical.py
{ "start": 40413, "end": 52328 }
class ____(SharedAxesLevelTests, SharedPatchArtistTests): func = staticmethod(boxenplot) @pytest.fixture def common_kws(self): return {"saturation": 1} def get_last_color(self, ax): fcs = ax.collections[-2].get_facecolors() return to_rgba(fcs[len(fcs) // 2]) def get_box_width(self, path, orient="x"): verts = path.vertices.T idx = ["y", "x"].index(orient) return np.ptp(verts[idx]) def check_boxen(self, patches, data, orient, pos, width=0.8): pos_idx, val_idx = self.orient_indices(orient) verts = np.stack([v.vertices for v in patches.get_paths()], 1).T assert verts[pos_idx].min().round(4) >= np.round(pos - width / 2, 4) assert verts[pos_idx].max().round(4) <= np.round(pos + width / 2, 4) assert np.isin( np.percentile(data, [25, 75]).round(4), verts[val_idx].round(4).flat ).all() assert_array_equal(verts[val_idx, 1:, 0], verts[val_idx, :-1, 2]) @pytest.mark.parametrize("orient,col", [("x", "y"), ("y", "z")]) def test_single_var(self, long_df, orient, col): var = {"x": "y", "y": "x"}[orient] ax = boxenplot(long_df, **{var: col}) patches = ax.collections[0] self.check_boxen(patches, long_df[col], orient, 0) @pytest.mark.parametrize("orient,col", [(None, "x"), ("x", "y"), ("y", "z")]) def test_vector_data(self, long_df, orient, col): orient = "x" if orient is None else orient ax = boxenplot(long_df[col], orient=orient) patches = ax.collections[0] self.check_boxen(patches, long_df[col], orient, 0) @pytest.mark.parametrize("orient", ["h", "v"]) def test_wide_data(self, wide_df, orient): orient = {"h": "y", "v": "x"}[orient] ax = boxenplot(wide_df, orient=orient) collections = ax.findobj(mpl.collections.PatchCollection) for i, patches in enumerate(collections): col = wide_df.columns[i] self.check_boxen(patches, wide_df[col], orient, i) @pytest.mark.parametrize("orient", ["x", "y"]) def test_grouped(self, long_df, orient): value = {"x": "y", "y": "x"}[orient] ax = boxenplot(long_df, **{orient: "a", value: "z"}) levels = categorical_order(long_df["a"]) collections = ax.findobj(mpl.collections.PatchCollection) for i, level in enumerate(levels): data = long_df.loc[long_df["a"] == level, "z"] self.check_boxen(collections[i], data, orient, i) @pytest.mark.parametrize("orient", ["x", "y"]) def test_hue_grouped(self, long_df, orient): value = {"x": "y", "y": "x"}[orient] ax = boxenplot(long_df, hue="c", **{orient: "a", value: "z"}) collections = iter(ax.findobj(mpl.collections.PatchCollection)) for i, level in enumerate(categorical_order(long_df["a"])): for j, hue_level in enumerate(categorical_order(long_df["c"])): rows = (long_df["a"] == level) & (long_df["c"] == hue_level) data = long_df.loc[rows, "z"] pos = i + [-.2, +.2][j] width = 0.4 self.check_boxen(next(collections), data, orient, pos, width) def test_dodge_native_scale(self, long_df): centers = categorical_order(long_df["s"]) hue_levels = categorical_order(long_df["c"]) spacing = min(np.diff(centers)) width = 0.8 * spacing / len(hue_levels) offset = width / len(hue_levels) ax = boxenplot(long_df, x="s", y="z", hue="c", native_scale=True) collections = iter(ax.findobj(mpl.collections.PatchCollection)) for center in centers: for i, hue_level in enumerate(hue_levels): rows = (long_df["s"] == center) & (long_df["c"] == hue_level) data = long_df.loc[rows, "z"] pos = center + [-offset, +offset][i] self.check_boxen(next(collections), data, "x", pos, width) def test_color(self, long_df): color = "#123456" ax = boxenplot(long_df, x="a", y="y", color=color, saturation=1) collections = ax.findobj(mpl.collections.PatchCollection) for patches in collections: fcs = patches.get_facecolors() assert same_color(fcs[len(fcs) // 2], color) def test_hue_colors(self, long_df): ax = boxenplot(long_df, x="a", y="y", hue="b", saturation=1) n_levels = long_df["b"].nunique() collections = ax.findobj(mpl.collections.PatchCollection) for i, patches in enumerate(collections): fcs = patches.get_facecolors() assert same_color(fcs[len(fcs) // 2], f"C{i % n_levels}") def test_linecolor(self, long_df): color = "#669913" ax = boxenplot(long_df, x="a", y="y", linecolor=color) for patches in ax.findobj(mpl.collections.PatchCollection): assert same_color(patches.get_edgecolor(), color) def test_linewidth(self, long_df): width = 5 ax = boxenplot(long_df, x="a", y="y", linewidth=width) for patches in ax.findobj(mpl.collections.PatchCollection): assert patches.get_linewidth() == width def test_saturation(self, long_df): color = "#8912b0" ax = boxenplot(long_df["x"], color=color, saturation=.5) fcs = ax.collections[0].get_facecolors() assert np.allclose(fcs[len(fcs) // 2, :3], desaturate(color, 0.5)) def test_gap(self, long_df): ax1, ax2 = mpl.figure.Figure().subplots(2) boxenplot(long_df, x="a", y="y", hue="s", ax=ax1) boxenplot(long_df, x="a", y="y", hue="s", gap=.2, ax=ax2) c1 = ax1.findobj(mpl.collections.PatchCollection) c2 = ax2.findobj(mpl.collections.PatchCollection) for p1, p2 in zip(c1, c2): w1 = np.ptp(p1.get_paths()[0].vertices[:, 0]) w2 = np.ptp(p2.get_paths()[0].vertices[:, 0]) assert (w2 / w1) == pytest.approx(0.8) def test_fill(self, long_df): ax = boxenplot(long_df, x="a", y="y", hue="s", fill=False) for c in ax.findobj(mpl.collections.PatchCollection): assert not c.get_facecolors().size def test_k_depth_int(self, rng): x = rng.normal(0, 1, 10_000) ax = boxenplot(x, k_depth=(k := 8)) assert len(ax.collections[0].get_paths()) == (k * 2 - 1) def test_k_depth_full(self, rng): x = rng.normal(0, 1, 10_000) ax = boxenplot(x=x, k_depth="full") paths = ax.collections[0].get_paths() assert len(paths) == 2 * int(np.log2(x.size)) + 1 verts = np.concatenate([p.vertices for p in paths]).T assert verts[0].min() == x.min() assert verts[0].max() == x.max() assert not ax.collections[1].get_offsets().size def test_trust_alpha(self, rng): x = rng.normal(0, 1, 10_000) ax = boxenplot(x, k_depth="trustworthy", trust_alpha=.1) boxenplot(x, k_depth="trustworthy", trust_alpha=.001, ax=ax) cs = ax.findobj(mpl.collections.PatchCollection) assert len(cs[0].get_paths()) > len(cs[1].get_paths()) def test_outlier_prop(self, rng): x = rng.normal(0, 1, 10_000) ax = boxenplot(x, k_depth="proportion", outlier_prop=.001) boxenplot(x, k_depth="proportion", outlier_prop=.1, ax=ax) cs = ax.findobj(mpl.collections.PatchCollection) assert len(cs[0].get_paths()) > len(cs[1].get_paths()) def test_exponential_width_method(self, rng): x = rng.normal(0, 1, 10_000) ax = boxenplot(x=x, width_method="exponential") c = ax.findobj(mpl.collections.PatchCollection)[0] ws = [self.get_box_width(p) for p in c.get_paths()] assert (ws[1] / ws[0]) == pytest.approx(ws[2] / ws[1]) def test_linear_width_method(self, rng): x = rng.normal(0, 1, 10_000) ax = boxenplot(x=x, width_method="linear") c = ax.findobj(mpl.collections.PatchCollection)[0] ws = [self.get_box_width(p) for p in c.get_paths()] assert (ws[1] - ws[0]) == pytest.approx(ws[2] - ws[1]) def test_area_width_method(self, rng): x = rng.uniform(0, 1, 10_000) ax = boxenplot(x=x, width_method="area", k_depth=2) ps = ax.findobj(mpl.collections.PatchCollection)[0].get_paths() ws = [self.get_box_width(p) for p in ps] assert np.greater(ws, 0.7).all() def test_box_kws(self, long_df): ax = boxenplot(long_df, x="a", y="y", box_kws={"linewidth": (lw := 7.1)}) for c in ax.findobj(mpl.collections.PatchCollection): assert c.get_linewidths() == lw def test_line_kws(self, long_df): ax = boxenplot(long_df, x="a", y="y", line_kws={"linewidth": (lw := 6.2)}) for line in ax.lines: assert line.get_linewidth() == lw def test_flier_kws(self, long_df): ax = boxenplot(long_df, x="a", y="y", flier_kws={"marker": (marker := "X")}) expected = mpl.markers.MarkerStyle(marker).get_path().vertices for c in ax.findobj(mpl.collections.PathCollection): assert_array_equal(c.get_paths()[0].vertices, expected) def test_k_depth_checks(self, long_df): with pytest.raises(ValueError, match="The value for `k_depth`"): boxenplot(x=long_df["y"], k_depth="auto") with pytest.raises(TypeError, match="The `k_depth` parameter"): boxenplot(x=long_df["y"], k_depth=(1, 2)) def test_width_method_check(self, long_df): with pytest.raises(ValueError, match="The value for `width_method`"): boxenplot(x=long_df["y"], width_method="uniform") def test_scale_deprecation(self, long_df): with pytest.warns(FutureWarning, match="The `scale` parameter has been"): boxenplot(x=long_df["y"], scale="linear") with pytest.warns(FutureWarning, match=".+result for 'area' will appear"): boxenplot(x=long_df["y"], scale="area") @pytest.mark.parametrize( "kwargs", [ dict(data="wide"), dict(data="wide", orient="h"), dict(data="flat"), dict(data="long", x="a", y="y"), dict(data=None, x="a", y="y"), dict(data="long", x="a", y="y", hue="a"), dict(data=None, x="a", y="y", hue="a"), dict(data="long", x="a", y="y", hue="b"), dict(data=None, x="s", y="y", hue="a"), dict(data="long", x="a", y="y", hue="s", showfliers=False), dict(data="null", x="a", y="y", hue="a", saturation=.5), dict(data="long", x="s", y="y", hue="a", native_scale=True), dict(data="long", x="d", y="y", hue="a", native_scale=True), dict(data="null", x="a", y="y", hue="b", fill=False, gap=.2), dict(data="null", x="a", y="y", linecolor="r", linewidth=5), dict(data="long", x="a", y="y", k_depth="trustworthy", trust_alpha=.1), dict(data="long", x="a", y="y", k_depth="proportion", outlier_prop=.1), dict(data="long", x="a", y="z", width_method="area"), dict(data="long", x="a", y="z", box_kws={"alpha": .2}, alpha=.4) ] ) def test_vs_catplot(self, long_df, wide_df, null_df, flat_series, kwargs): if kwargs["data"] == "long": kwargs["data"] = long_df elif kwargs["data"] == "wide": kwargs["data"] = wide_df elif kwargs["data"] == "flat": kwargs["data"] = flat_series elif kwargs["data"] == "null": kwargs["data"] = null_df elif kwargs["data"] is None: for var in ["x", "y", "hue"]: if var in kwargs: kwargs[var] = long_df[kwargs[var]] ax = boxenplot(**kwargs) g = catplot(**kwargs, kind="boxen") assert_plots_equal(ax, g.ax)
TestBoxenPlot
python
pdm-project__pdm
src/pdm/core.py
{ "start": 1320, "end": 1877 }
class ____: """State of the core object.""" config_settings: dict[str, Any] | None = None """The config settings map shared by all packages""" exclude_newer: datetime | None = None """The exclude newer than datetime for the lockfile""" build_isolation: bool = True """Whether to make an isolated environment and install requirements for build""" enable_cache: bool = True """Whether to enable the cache""" overrides: list[str] = dc.field(default_factory=list) """The requirement overrides for the resolver"""
State
python
ray-project__ray
doc/source/ray-core/doc_code/getting_started.py
{ "start": 454, "end": 1446 }
class ____: def __init__(self): self.i = 0 def get(self): return self.i def incr(self, value): self.i += value # Create a Counter actor. c = Counter.remote() # Submit calls to the actor. These calls run asynchronously but in # submission order on the remote actor process. for _ in range(10): c.incr.remote(1) # Retrieve final actor state. print(ray.get(c.get.remote())) # -> 10 # __calling_actor_end__ # fmt: on # fmt: off # __passing_object_start__ import numpy as np # Define a task that sums the values in a matrix. @ray.remote def sum_matrix(matrix): return np.sum(matrix) # Call the task with a literal argument value. print(ray.get(sum_matrix.remote(np.ones((100, 100))))) # -> 10000.0 # Put a large array into the object store. matrix_ref = ray.put(np.ones((1000, 1000))) # Call the task with the object reference as an argument. print(ray.get(sum_matrix.remote(matrix_ref))) # -> 1000000.0 # __passing_object_end__ # fmt: on
Counter
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/named_tuples.py
{ "start": 277, "end": 948 }
class ____(NamedTuple): benign: int bad: str def tainted_tuple() -> MyNamedTuple: return MyNamedTuple(bad=_test_source(), benign=1) def issue_with_bad(): a = tainted_tuple() _test_sink(a.bad) def no_issue_with_benign(): a = tainted_tuple() _test_sink(a.benign) OldSchoolNamedTuple = collections.namedtuple("OldSchoolNamedTuple", "benign bad") def tainted_old_tuple(): return OldSchoolNamedTuple(bad=_test_source(), benign=1) def issue_with_old_school_named_tuples(): a = tainted_old_tuple() _test_sink(a.bad) def no_issue_with_old_school_named_tuples(): a = tainted_old_tuple() _test_sink(a.benign)
MyNamedTuple
python
redis__redis-py
redis/event.py
{ "start": 8452, "end": 9740 }
class ____(EventListenerInterface): """ Listener that registers a re-authentication callback for pooled connections. Required by :class:`StreamingCredentialProvider`. """ def __init__(self): self._event = None def listen(self, event: AfterPooledConnectionsInstantiationEvent): if isinstance(event.credential_provider, StreamingCredentialProvider): self._event = event if event.client_type == ClientType.SYNC: event.credential_provider.on_next(self._re_auth) event.credential_provider.on_error(self._raise_on_error) else: event.credential_provider.on_next(self._re_auth_async) event.credential_provider.on_error(self._raise_on_error_async) def _re_auth(self, token): for pool in self._event.connection_pools: pool.re_auth_callback(token) async def _re_auth_async(self, token): for pool in self._event.connection_pools: await pool.re_auth_callback(token) def _raise_on_error(self, error: Exception): raise EventException(error, self._event) async def _raise_on_error_async(self, error: Exception): raise EventException(error, self._event)
RegisterReAuthForPooledConnections
python
django__django
tests/auth_tests/models/with_foreign_key.py
{ "start": 515, "end": 923 }
class ____(AbstractBaseUser): username = models.ForeignKey(Email, models.CASCADE, related_name="primary") email = models.ForeignKey( Email, models.CASCADE, to_field="email", related_name="secondary" ) group = models.ForeignKey(Group, models.CASCADE) custom_objects = CustomUserWithFKManager() USERNAME_FIELD = "username" REQUIRED_FIELDS = ["email", "group"]
CustomUserWithFK
python
django-haystack__django-haystack
test_haystack/test_indexes.py
{ "start": 22730, "end": 23070 }
class ____(indexes.ModelSearchIndex, indexes.Indexable): foo = indexes.IntegerField(model_attr="foo") class Meta: model = MockModel fields = ["author", "foo"] def get_index_fieldname(self, f): if f.name == "author": return "author_bar" return f.name
FieldsWithOverrideModelSearchIndex
python
matplotlib__matplotlib
lib/matplotlib/backends/backend_pdf.py
{ "start": 101448, "end": 105409 }
class ____: """ A multi-page PDF file. Examples -------- >>> import matplotlib.pyplot as plt >>> # Initialize: >>> with PdfPages('foo.pdf') as pdf: ... # As many times as you like, create a figure fig and save it: ... fig = plt.figure() ... pdf.savefig(fig) ... # When no figure is specified the current figure is saved ... pdf.savefig() Notes ----- In reality `PdfPages` is a thin wrapper around `PdfFile`, in order to avoid confusion when using `~.pyplot.savefig` and forgetting the format argument. """ @_api.delete_parameter("3.10", "keep_empty", addendum="This parameter does nothing.") def __init__(self, filename, keep_empty=None, metadata=None): """ Create a new PdfPages object. Parameters ---------- filename : str or path-like or file-like Plots using `PdfPages.savefig` will be written to a file at this location. The file is opened when a figure is saved for the first time (overwriting any older file with the same name). metadata : dict, optional Information dictionary object (see PDF reference section 10.2.1 'Document Information Dictionary'), e.g.: ``{'Creator': 'My software', 'Author': 'Me', 'Title': 'Awesome'}``. The standard keys are 'Title', 'Author', 'Subject', 'Keywords', 'Creator', 'Producer', 'CreationDate', 'ModDate', and 'Trapped'. Values have been predefined for 'Creator', 'Producer' and 'CreationDate'. They can be removed by setting them to `None`. """ self._filename = filename self._metadata = metadata self._file = None def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() def _ensure_file(self): if self._file is None: self._file = PdfFile(self._filename, metadata=self._metadata) # init. return self._file def close(self): """ Finalize this object, making the underlying file a complete PDF file. """ if self._file is not None: self._file.finalize() self._file.close() self._file = None def infodict(self): """ Return a modifiable information dictionary object (see PDF reference section 10.2.1 'Document Information Dictionary'). """ return self._ensure_file().infoDict def savefig(self, figure=None, **kwargs): """ Save a `.Figure` to this file as a new page. Any other keyword arguments are passed to `~.Figure.savefig`. Parameters ---------- figure : `.Figure` or int, default: the active figure The figure, or index of the figure, that is saved to the file. """ if not isinstance(figure, Figure): if figure is None: manager = Gcf.get_active() else: manager = Gcf.get_fig_manager(figure) if manager is None: raise ValueError(f"No figure {figure}") figure = manager.canvas.figure # Force use of pdf backend, as PdfPages is tightly coupled with it. figure.savefig(self, format="pdf", backend="pdf", **kwargs) def get_pagecount(self): """Return the current number of pages in the multipage pdf file.""" return len(self._ensure_file().pageList) def attach_note(self, text, positionRect=[-100, -100, 0, 0]): """ Add a new text note to the page to be saved next. The optional positionRect specifies the position of the new note on the page. It is outside the page per default to make sure it is invisible on printouts. """ self._ensure_file().newTextnote(text, positionRect)
PdfPages
python
pyca__cryptography
tests/x509/test_x509.py
{ "start": 231009, "end": 231968 }
class ____: def test_unsupported_subject_public_key_info(self, backend): cert = _load_cert( os.path.join( "x509", "custom", "unsupported_subject_public_key_info.pem" ), x509.load_pem_x509_certificate, ) with pytest.raises(UnsupportedAlgorithm): cert.public_key() def test_bad_time_in_validity(self, backend): with pytest.raises(ValueError, match="Validity::not_after"): _load_cert( os.path.join("x509", "badasn1time.pem"), x509.load_pem_x509_certificate, ) def test_invalid_empty_eku(self, backend): cert = _load_cert( os.path.join("x509", "custom", "empty-eku.pem"), x509.load_pem_x509_certificate, ) with pytest.raises(ValueError, match="InvalidSize"): cert.extensions.get_extension_for_class(ExtendedKeyUsage)
TestOtherCertificate
python
coleifer__peewee
playhouse/reflection.py
{ "start": 16450, "end": 31148 }
class ____(object): pk_classes = [AutoField, IntegerField] def __init__(self, metadata, schema=None): self.metadata = metadata self.schema = schema def __repr__(self): return '<Introspector: %s>' % self.metadata.database @classmethod def from_database(cls, database, schema=None): if isinstance(database, Proxy): if database.obj is None: raise ValueError('Cannot introspect an uninitialized Proxy.') database = database.obj # Reference the proxied db obj. if CockroachDatabase and isinstance(database, CockroachDatabase): metadata = CockroachDBMetadata(database) elif isinstance(database, PostgresqlDatabase): metadata = PostgresqlMetadata(database) elif isinstance(database, MySQLDatabase): metadata = MySQLMetadata(database) elif isinstance(database, SqliteDatabase): metadata = SqliteMetadata(database) else: raise ValueError('Introspection not supported for %r' % database) return cls(metadata, schema=schema) def get_database_class(self): return type(self.metadata.database) def get_database_name(self): return self.metadata.database.database def get_database_kwargs(self): return self.metadata.database.connect_params def get_additional_imports(self): if self.metadata.requires_extension: return '\n' + self.metadata.extension_import return '' def make_model_name(self, table, snake_case=True): if snake_case: table = make_snake_case(table) model = re.sub(r'[^\w]+', '', table) model_name = ''.join(sub.title() for sub in model.split('_')) if not model_name[0].isalpha(): model_name = 'T' + model_name return model_name def make_column_name(self, column, is_foreign_key=False, snake_case=True): column = column.strip() if snake_case: column = make_snake_case(column) column = column.lower() if is_foreign_key: # Strip "_id" from foreign keys, unless the foreign-key happens to # be named "_id", in which case the name is retained. column = re.sub('_id$', '', column) or column # Remove characters that are invalid for Python identifiers. column = re.sub(r'[^\w]+', '_', column) if column in RESERVED_WORDS: column += '_' if len(column) and column[0].isdigit(): column = '_' + column return column def introspect(self, table_names=None, literal_column_names=False, include_views=False, snake_case=True): # Retrieve all the tables in the database. tables = self.metadata.database.get_tables(schema=self.schema) if include_views: views = self.metadata.database.get_views(schema=self.schema) tables.extend([view.name for view in views]) if table_names is not None: tables = [table for table in tables if table in table_names] table_set = set(tables) # Store a mapping of table name -> dictionary of columns. columns = {} # Store a mapping of table name -> set of primary key columns. primary_keys = {} # Store a mapping of table -> foreign keys. foreign_keys = {} # Store a mapping of table name -> model name. model_names = {} # Store a mapping of table name -> indexes. indexes = {} # Gather the columns for each table. for table in tables: table_indexes = self.metadata.get_indexes(table, self.schema) table_columns = self.metadata.get_columns(table, self.schema) try: foreign_keys[table] = self.metadata.get_foreign_keys( table, self.schema) except ValueError as exc: foreign_keys[table] = [] else: # If there is a possibility we could exclude a dependent table, # ensure that we introspect it so FKs will work. if table_names is not None: for foreign_key in foreign_keys[table]: if foreign_key.dest_table not in table_set: tables.append(foreign_key.dest_table) table_set.add(foreign_key.dest_table) model_names[table] = self.make_model_name(table, snake_case) # Collect sets of all the column names as well as all the # foreign-key column names. lower_col_names = set(column_name.lower() for column_name in table_columns) fks = set(fk_col.column for fk_col in foreign_keys[table]) for col_name, column in table_columns.items(): if literal_column_names: new_name = re.sub(r'[^\w]+', '_', col_name) else: new_name = self.make_column_name(col_name, col_name in fks, snake_case) # If we have two columns, "parent" and "parent_id", ensure # that when we don't introduce naming conflicts. lower_name = col_name.lower() if lower_name.endswith('_id') and new_name in lower_col_names: new_name = col_name.lower() column.name = new_name for index in table_indexes: if len(index.columns) == 1: column = index.columns[0] if column in table_columns: table_columns[column].unique = index.unique table_columns[column].index = True primary_keys[table] = self.metadata.get_primary_keys( table, self.schema) columns[table] = table_columns indexes[table] = table_indexes # Gather all instances where we might have a `related_name` conflict, # either due to multiple FKs on a table pointing to the same table, # or a related_name that would conflict with an existing field. related_names = {} sort_fn = lambda foreign_key: foreign_key.column for table in tables: models_referenced = set() for foreign_key in sorted(foreign_keys[table], key=sort_fn): try: column = columns[table][foreign_key.column] except KeyError: continue dest_table = foreign_key.dest_table if dest_table in models_referenced: related_names[column] = '%s_%s_set' % ( dest_table, column.name) else: models_referenced.add(dest_table) # On the second pass convert all foreign keys. for table in tables: for foreign_key in foreign_keys[table]: src = columns[foreign_key.table][foreign_key.column] try: dest = columns[foreign_key.dest_table][ foreign_key.dest_column] except KeyError: dest = None src.set_foreign_key( foreign_key=foreign_key, model_names=model_names, dest=dest, related_name=related_names.get(src)) return DatabaseMetadata( columns, primary_keys, foreign_keys, model_names, indexes) def generate_models(self, skip_invalid=False, table_names=None, literal_column_names=False, bare_fields=False, include_views=False): database = self.introspect(table_names, literal_column_names, include_views) models = {} class BaseModel(Model): class Meta: database = self.metadata.database schema = self.schema pending = set() def _create_model(table, models): pending.add(table) for foreign_key in database.foreign_keys[table]: dest = foreign_key.dest_table if dest not in models and dest != table: if dest in pending: warnings.warn('Possible reference cycle found between ' '%s and %s' % (table, dest)) else: _create_model(dest, models) primary_keys = [] columns = database.columns[table] for column_name, column in columns.items(): if column.primary_key: primary_keys.append(column.name) multi_column_indexes = database.multi_column_indexes(table) column_indexes = database.column_indexes(table) class Meta: indexes = multi_column_indexes table_name = table # Fix models with multi-column primary keys. composite_key = False if len(primary_keys) == 0: if 'id' not in columns: Meta.primary_key = False else: primary_keys = columns.keys() if len(primary_keys) > 1: Meta.primary_key = CompositeKey(*[ field.name for col, field in columns.items() if col in primary_keys]) composite_key = True attrs = {'Meta': Meta} for column_name, column in columns.items(): FieldClass = column.field_class if FieldClass is not ForeignKeyField and bare_fields: FieldClass = BareField elif FieldClass is UnknownField: FieldClass = BareField params = { 'column_name': column_name, 'null': column.nullable} if column.primary_key and composite_key: if FieldClass is AutoField: FieldClass = IntegerField params['primary_key'] = False elif column.primary_key and FieldClass is not AutoField: params['primary_key'] = True if column.is_foreign_key(): if column.is_self_referential_fk(): params['model'] = 'self' else: dest_table = column.foreign_key.dest_table if dest_table in models: params['model'] = models[dest_table] else: FieldClass = DeferredForeignKey params['rel_model_name'] = dest_table if column.to_field: params['field'] = column.to_field # Generate a unique related name. params['backref'] = '%s_%s_rel' % (table, column_name) if column.default is not None: constraint = SQL('DEFAULT %s' % column.default) params['constraints'] = [constraint] if not column.is_primary_key(): if column_name in column_indexes: if column_indexes[column_name]: params['unique'] = True elif not column.is_foreign_key(): params['index'] = True else: params['index'] = False attrs[column.name] = FieldClass(**params) try: models[table] = type(str(table), (BaseModel,), attrs) except ValueError: if not skip_invalid: raise finally: if table in pending: pending.remove(table) # Actually generate Model classes. for table, model in sorted(database.model_names.items()): if table not in models: _create_model(table, models) return models def introspect(database, schema=None): introspector = Introspector.from_database(database, schema=schema) return introspector.introspect() def generate_models(database, schema=None, **options): introspector = Introspector.from_database(database, schema=schema) return introspector.generate_models(**options) def print_model(model, indexes=True, inline_indexes=False): print(model._meta.name) for field in model._meta.sorted_fields: parts = [' %s %s' % (field.name, field.field_type)] if field.primary_key: parts.append(' PK') elif inline_indexes: if field.unique: parts.append(' UNIQUE') elif field.index: parts.append(' INDEX') if isinstance(field, ForeignKeyField): parts.append(' FK: %s.%s' % (field.rel_model.__name__, field.rel_field.name)) print(''.join(parts)) if indexes: index_list = model._meta.fields_to_index() if not index_list: return print('\nindex(es)') for index in index_list: parts = [' '] ctx = model._meta.database.get_sql_context() with ctx.scope_values(param='%s', quote='""'): ctx.sql(CommaNodeList(index._expressions)) if index._where: ctx.literal(' WHERE ') ctx.sql(index._where) sql, params = ctx.query() clean = sql % tuple(map(_query_val_transform, params)) parts.append(clean.replace('"', '')) if index._unique: parts.append(' UNIQUE') print(''.join(parts)) def get_table_sql(model): sql, params = model._schema._create_table().query() if model._meta.database.param != '%s': sql = sql.replace(model._meta.database.param, '%s') # Format and indent the table declaration, simplest possible approach. match_obj = re.match(r'^(.+?\()(.+)(\).*)', sql) create, columns, extra = match_obj.groups() indented = ',\n'.join(' %s' % column for column in columns.split(', ')) clean = '\n'.join((create, indented, extra)).strip() return clean % tuple(map(_query_val_transform, params)) def print_table_sql(model): print(get_table_sql(model))
Introspector
python
xlwings__xlwings
xlwings/conversion/framework.py
{ "start": 1092, "end": 2519 }
class ____(list): def prepend_stage(self, stage, only_if=True): if only_if: self.insert(0, stage) return self def append_stage(self, stage, only_if=True): if only_if: self.append(stage) return self def insert_stage( self, stage, index=None, after=None, before=None, replace=None, only_if=True ): if only_if: if sum(x is not None for x in (index, after, before, replace)) != 1: raise ValueError( "Must specify exactly one of arguments: " "index, after, before, replace" ) if index is not None: indices = (index,) elif after is not None: indices = tuple( i + 1 for i, x in enumerate(self) if isinstance(x, after) ) elif before is not None: indices = tuple(i for i, x in enumerate(self) if isinstance(x, before)) elif replace is not None: for i, x in enumerate(self): if isinstance(x, replace): self[i] = stage return self for i in reversed(indices): self.insert(i, stage) return self def __call__(self, *args, **kwargs): for stage in self: stage(*args, **kwargs) accessors = {}
Pipeline
python
huggingface__transformers
src/transformers/models/deepseek_v3/modeling_deepseek_v3.py
{ "start": 2414, "end": 5426 }
class ____(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: DeepseekV3Config, device=None): super().__init__() self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_type = self.config.rope_parameters["rope_type"] rope_init_fn: Callable = self.compute_default_rope_parameters if self.rope_type != "default": rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = inv_freq @staticmethod def compute_default_rope_parameters( config: Optional[DeepseekV3Config] = None, device: Optional["torch.device"] = None, seq_len: Optional[int] = None, ) -> tuple["torch.Tensor", float]: """ Computes the inverse frequencies according to the original RoPE implementation Args: config ([`~transformers.PreTrainedConfig`]): The model configuration. device (`torch.device`): The device to use for initialization of the inverse frequencies. seq_len (`int`, *optional*): The current sequence length. Unused for this type of RoPE. Returns: Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). """ base = config.rope_parameters["rope_theta"] dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads attention_factor = 1.0 # Unused in this type of RoPE # Compute the inverse frequencies inv_freq = 1.0 / ( base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) ) return inv_freq, attention_factor @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
DeepseekV3RotaryEmbedding
python
apache__airflow
airflow-core/src/airflow/dag_processing/dagbag.py
{ "start": 2941, "end": 7174 }
class ____(NamedTuple): """ Information about single file. :param file: Loaded file. :param duration: Time spent on process file. :param dag_num: Total number of DAGs loaded in this file. :param task_num: Total number of Tasks loaded in this file. :param dags: DAGs names loaded in this file. :param warning_num: Total number of warnings captured from processing this file. """ file: str duration: timedelta dag_num: int task_num: int dags: str warning_num: int @contextlib.contextmanager def timeout(seconds=1, error_message="Timeout"): import logging log = logging.getLogger(__name__) error_message = error_message + ", PID: " + str(os.getpid()) def handle_timeout(signum, frame): """Log information and raises AirflowTaskTimeout.""" log.error("Process timed out, PID: %s", str(os.getpid())) from airflow.sdk.exceptions import AirflowTaskTimeout raise AirflowTaskTimeout(error_message) try: try: signal.signal(signal.SIGALRM, handle_timeout) signal.setitimer(signal.ITIMER_REAL, seconds) except ValueError: log.warning("timeout can't be used in the current context", exc_info=True) yield finally: with contextlib.suppress(ValueError): signal.setitimer(signal.ITIMER_REAL, 0) def _executor_exists(executor_name: str, team_name: str | None) -> bool: """Check if executor exists, with global fallback for teams.""" try: # First pass check for team-specific executor or a global executor (i.e. team_name=None) ExecutorLoader.lookup_executor_name_by_str(executor_name, team_name=team_name) return True except UnknownExecutorException: if team_name: # If we had a team_name but didn't find an executor, check if there is a global executor that # satisfies the request. try: ExecutorLoader.lookup_executor_name_by_str(executor_name, team_name=None) return True except UnknownExecutorException: pass return False def _validate_executor_fields(dag: DAG, bundle_name: str | None = None) -> None: """Validate that executors specified in tasks are available and owned by the same team as the dag bundle.""" import logging log = logging.getLogger(__name__) dag_team_name = None # Check if multi team is available by reading the multi_team configuration (which is boolean) if conf.getboolean("core", "multi_team"): # Get team name from bundle configuration if available if bundle_name: from airflow.dag_processing.bundles.manager import DagBundlesManager bundle_manager = DagBundlesManager() bundle_config = bundle_manager._bundle_config[bundle_name] dag_team_name = bundle_config.team_name if dag_team_name: log.debug( "Found team '%s' for DAG '%s' via bundle '%s'", dag_team_name, dag.dag_id, bundle_name ) for task in dag.tasks: if not task.executor: continue if not _executor_exists(task.executor, dag_team_name): if dag_team_name: raise UnknownExecutorException( f"Task '{task.task_id}' specifies executor '{task.executor}', which is not available " f"for team '{dag_team_name}' (the team associated with DAG '{dag.dag_id}') or as a global executor. " f"Make sure '{task.executor}' is configured for team '{dag_team_name}' or globally in your " "[core] executors configuration, or update the task's executor to use one of the " f"configured executors for team '{dag_team_name}' or available global executors." ) raise UnknownExecutorException( f"Task '{task.task_id}' specifies executor '{task.executor}', which is not available. " "Make sure it is listed in your [core] executors configuration, or update the task's " "executor to use one of the configured executors." )
FileLoadStat
python
ansible__ansible
lib/ansible/_internal/_ssh/_ssh_agent.py
{ "start": 10326, "end": 10680 }
class ____(PrivateKeyMsg): type: KeyAlgo n: mpint e: mpint d: mpint iqmp: mpint p: mpint q: mpint comments: unicode_string = dataclasses.field(default=unicode_string(''), compare=False) constraints: constraints = dataclasses.field(default=constraints(b'')) @dataclasses.dataclass(order=True, slots=True)
RSAPrivateKeyMsg
python
huggingface__transformers
src/transformers/models/videomae/image_processing_videomae.py
{ "start": 1899, "end": 16530 }
class ____(BaseImageProcessor): r""" Constructs a VideoMAE image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the output image after resizing. The shortest edge of the image will be resized to `size["shortest_edge"]` while maintaining the aspect ratio of the original image. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop` parameter in the `preprocess` method. crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_center_crop: bool = True, crop_size: Optional[dict[str, int]] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, param_name="crop_size") self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def resize( self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will have the size `(h, w)`. If `size` is of the form `{"shortest_edge": s}`, the output image will have its shortest edge of length `s` while keeping the aspect ratio of the original image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" in size: output_size = get_resize_output_image_size( image, size["shortest_edge"], default_to_square=False, input_data_format=input_data_format ) elif "height" in size and "width" in size: output_size = (size["height"], size["width"]) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}") return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def _preprocess_image( self, image: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: Optional[PILImageResampling] = None, do_center_crop: Optional[bool] = None, crop_size: Optional[dict[str, int]] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single image.""" validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. image = to_numpy_array(image) if do_rescale and is_scaled_image(image): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image @filter_out_non_signature_kwargs() def preprocess( self, videos: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: Optional[PILImageResampling] = None, do_center_crop: Optional[bool] = None, crop_size: Optional[dict[str, int]] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after applying resize. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`): Whether to centre crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after applying the centre crop. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the inferred channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") if not valid_images(videos): raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor") videos = make_batched(videos) videos = [ [ self._preprocess_image( image=img, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format, ) for img in video ] for video in videos ] data = {"pixel_values": videos} return BatchFeature(data=data, tensor_type=return_tensors) __all__ = ["VideoMAEImageProcessor"]
VideoMAEImageProcessor
python
gwtw__py-sorting
test/bubble_sort_optimised_test.py
{ "start": 416, "end": 802 }
class ____(unittest.TestCase, BaseCustomComparisonSortTest, BasePositiveIntegerSortTest, BaseNegativeIntegerSortTest, BaseStringSortTest): def setUp(self): self.sort = bubble_sort_optimised.sort if __name__ == '__main__': unittest.main()
BubbleSortOptimisedTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-iterable/source_iterable/streams.py
{ "start": 17789, "end": 17888 }
class ____(IterableExportEventsStreamAdjustableRange): data_field = "inAppSendSkip"
InAppSendSkip
python
huggingface__transformers
tests/quantization/fp_quant_integration/test_fp_quant.py
{ "start": 6693, "end": 6895 }
class ____(FPQuantBaseTest): @classmethod def getQuantizationConfig(cls): return FPQuantConfig(forward_dtype="nvfp4", pseudoquantization=False, hadamard_group_size=128)
FPQuantNVFP4GS128Test
python
getsentry__sentry
src/sentry/silo/client.py
{ "start": 3096, "end": 8345 }
class ____(BaseApiClient): integration_type = "silo_client" access_modes = [SiloMode.CONTROL] metrics_prefix = "silo_client.region" logger = logging.getLogger("sentry.silo.client.region") silo_client_name = "region" def __init__(self, region: Region, retry: bool = False) -> None: super().__init__() if SiloMode.get_current_mode() not in self.access_modes: access_mode_str = ", ".join(str(m) for m in self.access_modes) raise SiloClientError( f"Cannot invoke {self.__class__.__name__} from {SiloMode.get_current_mode()}. " f"Only available in: {access_mode_str}" ) if not isinstance(region, Region): raise SiloClientError(f"Invalid region provided. Received {type(region)} type instead.") # Ensure the region is registered self.region = get_region_by_name(region.name) self.base_url = self.region.address self.retry = retry def proxy_request(self, incoming_request: HttpRequest) -> HttpResponse: """ Directly proxy the provided request to the appropriate silo with minimal header changes. """ full_url = self.build_url(incoming_request.get_full_path()) prepared_request = Request( method=incoming_request.method, url=full_url, headers=clean_proxy_headers(incoming_request.headers), data=incoming_request.body, ).prepare() assert incoming_request.method is not None raw_response = super()._request( incoming_request.method, incoming_request.get_full_path(), prepared_request=prepared_request, raw_response=True, ) self.logger.info( "proxy_request", extra={"method": incoming_request.method, "path": incoming_request.path}, ) http_response = HttpResponse( content=raw_response.content, status=raw_response.status_code, reason=raw_response.reason, content_type=raw_response.headers.get("Content-Type"), # XXX: Can be added in Django 3.2 # headers=raw_response.headers ) valid_headers = clean_outbound_headers(raw_response.headers) for header, value in valid_headers.items(): http_response[header] = value http_response[PROXY_DIRECT_LOCATION_HEADER] = full_url return http_response def request( self, method: str, path: str, headers: Mapping[str, Any] | None = None, data: Any | None = None, params: Mapping[str, Any] | None = None, json: bool = True, raw_response: bool = False, prefix_hash: str | None = None, ) -> Any: """ Sends a request to the region silo. If prefix_hash is provided, the request will be retries up to REQUEST_ATTEMPTS_LIMIT times. """ if prefix_hash is not None: hash = sha256(f"{prefix_hash}{self.region.name}{method}{path}".encode()).hexdigest() self.check_request_attempts(hash=hash, method=method, path=path) return self._request( method=method, path=path, headers=clean_proxy_headers(headers), data=data, params=params, json=json, allow_text=True, raw_response=raw_response, ) def build_session(self) -> SafeSession: """ Generates a safe Requests session for the API client to use. This injects a custom is_ipaddress_permitted function to allow only connections to Region Silo IP addresses. """ if not self.retry: return build_session( is_ipaddress_permitted=validate_region_ip_address, ) return build_session( is_ipaddress_permitted=validate_region_ip_address, max_retries=Retry( total=options.get("hybridcloud.regionsiloclient.retries"), backoff_factor=0.1, status_forcelist=[503], allowed_methods=["PATCH", "HEAD", "PUT", "GET", "DELETE", "POST"], ), ) def _get_hash_cache_key(self, hash: str) -> str: return f"region_silo_client:request_attempts:{hash}" def check_request_attempts(self, hash: str, method: str, path: str) -> None: cache_key = self._get_hash_cache_key(hash=hash) request_attempts: int | None = cache.get(cache_key) if not isinstance(request_attempts, int): request_attempts = 0 self.logger.info( "silo_client.check_request_attempts", extra={ "path": path, "method": method, "request_hash": hash, "request_attempts": request_attempts, "configured_attempt_limit": REQUEST_ATTEMPTS_LIMIT, }, ) request_attempts += 1 cache.set(cache_key, request_attempts, timeout=CACHE_TIMEOUT) if request_attempts > REQUEST_ATTEMPTS_LIMIT: raise SiloClientError(f"Request attempts limit reached for: {method} {path}")
RegionSiloClient
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-weaviate/destination_weaviate/config.py
{ "start": 1951, "end": 3705 }
class ____(BaseModel): host: str = Field( ..., title="Public Endpoint", order=1, description="The public endpoint of the Weaviate cluster.", examples=["https://my-cluster.weaviate.network"], ) auth: Union[TokenAuth, UsernamePasswordAuth, NoAuth] = Field( ..., title="Authentication", description="Authentication method", discriminator="mode", type="object", order=2 ) batch_size: int = Field(title="Batch Size", description="The number of records to send to Weaviate in each batch", default=128) text_field: str = Field(title="Text Field", description="The field in the object that contains the embedded text", default="text") tenant_id: str = Field(title="Tenant ID", description="The tenant ID to use for multi tenancy", airbyte_secret=True, default="") default_vectorizer: str = Field( title="Default Vectorizer", description="The vectorizer to use if new classes need to be created", default="none", enum=[ "none", "text2vec-cohere", "text2vec-huggingface", "text2vec-openai", "text2vec-palm", "text2vec-contextionary", "text2vec-transformers", "text2vec-gpt4all", ], ) additional_headers: List[Header] = Field( title="Additional headers", description="Additional HTTP headers to send with every request.", default=[], examples=[{"header_key": "X-OpenAI-Api-Key", "value": "my-openai-api-key"}], ) class Config: title = "Indexing" schema_extra = { "group": "indexing", "description": "Indexing configuration", }
WeaviateIndexingConfigModel
python
airbytehq__airbyte
airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorMetadataDefinitionV0.py
{ "start": 7057, "end": 7186 }
class ____(BaseModel): all: Optional[Any] = None cloud: Optional[Any] = None oss: Optional[Any] = None
ConnectorMetrics
python
doocs__leetcode
solution/1200-1299/1292.Maximum Side Length of a Square with Sum Less than or Equal to Threshold/Solution.py
{ "start": 0, "end": 829 }
class ____: def maxSideLength(self, mat: List[List[int]], threshold: int) -> int: def check(k: int) -> bool: for i in range(m - k + 1): for j in range(n - k + 1): v = s[i + k][j + k] - s[i][j + k] - s[i + k][j] + s[i][j] if v <= threshold: return True return False m, n = len(mat), len(mat[0]) s = [[0] * (n + 1) for _ in range(m + 1)] for i, row in enumerate(mat, 1): for j, x in enumerate(row, 1): s[i][j] = s[i - 1][j] + s[i][j - 1] - s[i - 1][j - 1] + x l, r = 0, min(m, n) while l < r: mid = (l + r + 1) >> 1 if check(mid): l = mid else: r = mid - 1 return l
Solution
python
doocs__leetcode
solution/1200-1299/1214.Two Sum BSTs/Solution.py
{ "start": 192, "end": 875 }
class ____: def twoSumBSTs( self, root1: Optional[TreeNode], root2: Optional[TreeNode], target: int ) -> bool: def dfs(root: Optional[TreeNode], i: int): if root is None: return dfs(root.left, i) nums[i].append(root.val) dfs(root.right, i) nums = [[], []] dfs(root1, 0) dfs(root2, 1) i, j = 0, len(nums[1]) - 1 while i < len(nums[0]) and ~j: x = nums[0][i] + nums[1][j] if x == target: return True if x < target: i += 1 else: j -= 1 return False
Solution
python
walkccc__LeetCode
solutions/199. Binary Tree Right Side View/199-2.py
{ "start": 0, "end": 339 }
class ____: def rightSideView(self, root: TreeNode | None) -> list[int]: ans = [] def dfs(root: TreeNode | None, depth: int) -> None: if not root: return if depth == len(ans): ans.append(root.val) dfs(root.right, depth + 1) dfs(root.left, depth + 1) dfs(root, 0) return ans
Solution
python
django__django
tests/gis_tests/geogapp/models.py
{ "start": 210, "end": 329 }
class ____(NamedModel): point = models.PointField(geography=True) class Meta: app_label = "geogapp"
City
python
has2k1__plotnine
plotnine/geoms/geom.py
{ "start": 951, "end": 17687 }
class ____(ABC, metaclass=Register): """Base class of all Geoms""" DEFAULT_AES: dict[str, Any] = {} """Default aesthetics for the geom""" REQUIRED_AES: set[str] = set() """Required aesthetics for the geom""" NON_MISSING_AES: set[str] = set() """Required aesthetics for the geom""" DEFAULT_PARAMS: dict[str, Any] = {} """Required parameters for the geom""" data: DataLike """Geom/layer specific dataframe""" mapping: aes """Mappings i.e. `aes(x="col1", fill="col2")`{.py}""" aes_params: dict[str, Any] = {} # setting of aesthetic params: dict[str, Any] # parameter settings # Plot namespace, it gets its value when the plot is being # built. environment: Environment # The geom responsible for the legend if draw_legend is # not implemented legend_geom: str = "point" # Documentation for the aesthetics. It is added under the # documentation for mapping parameter. Use {aesthetics} # placeholder to insert a table for all the aesthetics and # their default values. _aesthetics_doc: str = "{aesthetics_table}" def __init__( self, mapping: aes | None = None, data: DataLike | None = None, **kwargs: Any, ): kwargs = rename_aesthetics(kwargs) kwargs = data_mapping_as_kwargs((data, mapping), kwargs) self._kwargs = kwargs # Will be used to create stat & layer # separate aesthetics and parameters self.aes_params = { ae: kwargs[ae] for ae in self.aesthetics() & set(kwargs) } self.params = self.DEFAULT_PARAMS | { k: v for k, v in kwargs.items() if k in self.DEFAULT_PARAMS } self.mapping = kwargs["mapping"] self.data = kwargs["data"] self._stat = stat.from_geom(self) self._position = position.from_geom(self) self._verify_arguments(kwargs) # geom, stat, layer @staticmethod def from_stat(stat: stat) -> geom: """ Return an instantiated geom object geoms should not override this method. Parameters ---------- stat : `stat` Returns ------- : A geom object Raises ------ PlotnineError If unable to create a `geom`. """ name = stat.params["geom"] if isinstance(name, geom): return name if isinstance(name, type) and issubclass(name, geom): klass = name elif isinstance(name, str): if not name.startswith("geom_"): name = f"geom_{name}" klass = Registry[name] else: raise PlotnineError(f"Unknown geom of type {type(name)}") return klass(stat=stat, **stat._kwargs) @classmethod def aesthetics(cls: type[geom]) -> set[str]: """ Return all the aesthetics for this geom geoms should not override this method. """ main = cls.DEFAULT_AES.keys() | cls.REQUIRED_AES other = {"group"} # Need to recognize both spellings if "color" in main: other.add("colour") if "outlier_color" in main: other.add("outlier_colour") return main | other def __deepcopy__(self, memo: dict[Any, Any]) -> geom: """ Deep copy without copying the self.data dataframe geoms should not override this method. """ cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result old = self.__dict__ new = result.__dict__ # don't make a deepcopy of data, or environment shallow = {"data", "_kwargs", "environment"} for key, item in old.items(): if key in shallow: new[key] = item memo[id(new[key])] = new[key] else: new[key] = deepcopy(item, memo) return result def setup_params(self, data: pd.DataFrame): """ Override this method to verify and/or adjust parameters Parameters ---------- data : Data """ def setup_aes_params(self, data: pd.DataFrame): """ Override this method to verify and/or adjust aesthetic parameters Parameters ---------- data : Data """ def setup_data(self, data: pd.DataFrame) -> pd.DataFrame: """ Modify the data before drawing takes place This function is called *before* position adjustments are done. It is used by geoms to create the final aesthetics used for drawing. The base class method does nothing, geoms can override this method for two reasons: 1. The `stat` does not create all the aesthetics (usually position aesthetics) required for drawing the `geom`, but those aesthetics can be computed from the available data. For example [](`~plotnine.geoms.geom_boxplot`) and [](`~plotnine.geoms.geom_violin`). 2. The `geom` inherits from another `geom` (superclass) which does the drawing and the superclass requires certain aesthetics to be present in the data. For example [](`~plotnine.geoms.geom_tile`) and [](`~plotnine.geoms.geom_area`). Parameters ---------- data : Data used for drawing the geom. Returns ------- : Data used for drawing the geom. """ return data def use_defaults( self, data: pd.DataFrame, aes_modifiers: dict[str, Any] ) -> pd.DataFrame: """ Combine data with defaults and set aesthetics from parameters geoms should not override this method. Parameters ---------- data : Data used for drawing the geom. aes_modifiers : Aesthetics to evaluate Returns ------- : Data used for drawing the geom. """ from plotnine.mapping import _atomic as atomic from plotnine.mapping._atomic import ae_value missing_aes = ( self.DEFAULT_AES.keys() - self.aes_params.keys() - set(data.columns.to_list()) ) # Not in data and not set, use default for ae in missing_aes: data[ae] = self.DEFAULT_AES[ae] # Evaluate/Modify the mapped aesthetics evaled = evaluate(aes_modifiers, data, self.environment) for ae in evaled.columns.intersection(data.columns): data[ae] = evaled[ae] num_panels = len(data["PANEL"].unique()) if "PANEL" in data else 1 across_panels = num_panels > 1 and not self.params["inherit_aes"] # Aesthetics set as parameters in the geom/stat for ae, value in self.aes_params.items(): if isinstance(value, (str, int, float, np.integer, np.floating)): data[ae] = value elif isinstance(value, ae_value): data[ae] = value * len(data) elif across_panels: value = list(chain(*repeat(value, num_panels))) data[ae] = value else: # Try to make sense of aesthetics whose values can be tuples # or sequences of sorts. ae_value_cls: type[ae_value] | None = getattr(atomic, ae, None) if ae_value_cls: with suppress(ValueError): data[ae] = ae_value_cls(value) * len(data) continue # This should catch the aesthetic assignments to # non-numeric or non-string values or sequence of values. # e.g. x=datetime, x=Sequence[datetime], # x=Sequence[float], shape=Sequence[str] try: data[ae] = value except ValueError as e: msg = f"'{ae}={value}' does not look like a valid value" raise PlotnineError(msg) from e return data def draw_layer(self, data: pd.DataFrame, layout: Layout, coord: coord): """ Draw layer across all panels geoms should not override this method. Parameters ---------- data : DataFrame specific for this layer layout : Layout object created when the plot is getting built coord : Type of coordinate axes params : Combined *geom* and *stat* parameters. Also includes the stacking order of the layer in the plot (*zorder*) """ for pid, pdata in data.groupby("PANEL", observed=True): if len(pdata) == 0: continue ploc = pdata["PANEL"].iloc[0] - 1 panel_params = layout.panel_params[ploc] ax = layout.axs[ploc] self.draw_panel(pdata, panel_params, coord, ax) def draw_panel( self, data: pd.DataFrame, panel_params: panel_view, coord: coord, ax: Axes, ): """ Plot all groups For efficiency, geoms that do not need to partition different groups before plotting should override this method and avoid the groupby. Parameters ---------- data : Data to be plotted by this geom. This is the dataframe created in the plot_build pipeline. panel_params : The scale information as may be required by the axes. At this point, that information is about ranges, ticks and labels. Attributes are of interest to the geom are: ```python "panel_params.x.range" # tuple "panel_params.y.range" # tuple ``` coord : Coordinate (e.g. coord_cartesian) system of the geom. ax : Axes on which to plot. params : Combined parameters for the geom and stat. Also includes the `zorder`. """ for _, gdata in data.groupby("group"): gdata.reset_index(inplace=True, drop=True) self.draw_group(gdata, panel_params, coord, ax, self.params) @staticmethod def draw_group( data: pd.DataFrame, panel_params: panel_view, coord: coord, ax: Axes, params: dict[str, Any], ): """ Plot data belonging to a group. Parameters ---------- data : Data to be plotted by this geom. This is the dataframe created in the plot_build pipeline. panel_params : The scale information as may be required by the axes. At this point, that information is about ranges, ticks and labels. Keys of interest to the geom are: ```python "x_range" # tuple "y_range" # tuple ``` coord : coord Coordinate (e.g. coord_cartesian) system of the geom. ax : axes Axes on which to plot. params : dict Combined parameters for the geom and stat. Also includes the `zorder`. """ msg = "The geom should implement this method." raise NotImplementedError(msg) @staticmethod def draw_unit( data: pd.DataFrame, panel_params: panel_view, coord: coord, ax: Axes, params: dict[str, Any], ): """ Plot data belonging to a unit. A matplotlib plot function may require that an aethestic have a single unique value. e.g. `linestyle="dashed"`{.py} and not `linestyle=["dashed", "dotted", ...]`{.py}. A single call to such a function can only plot lines with the same linestyle. However, if the plot we want has more than one line with different linestyles, we need to group the lines with the same linestyle and plot them as one unit. In this case, draw_group calls this function to do the plotting. For an example see [](`~plotnine.geoms.geom_point`). Parameters ---------- data : Data to be plotted by this geom. This is the dataframe created in the plot_build pipeline. panel_params : The scale information as may be required by the axes. At this point, that information is about ranges, ticks and labels. Keys of interest to the geom are: ```python "x_range" # tuple "y_range" # tuple ``` In rare cases a geom may need access to the x or y scales. Those are available at: ```python "scales" # SimpleNamespace ``` coord : Coordinate (e.g. coord_cartesian) system of the geom. ax : Axes on which to plot. params : Combined parameters for the geom and stat. Also includes the `zorder`. """ msg = "The geom should implement this method." raise NotImplementedError(msg) def __radd__(self, other: ggplot) -> ggplot: """ Add layer representing geom object on the right Parameters ---------- plot : ggplot object Returns ------- : ggplot object with added layer. """ other += self.to_layer() # Add layer return other def to_layer(self) -> layer: """ Make a layer that represents this geom Returns ------- : Layer """ return layer.from_geom(self) def _verify_arguments(self, kwargs: dict[str, Any]): """ Verify arguments passed to the geom """ geom_stat_args = kwargs.keys() | self._stat._kwargs.keys() unknown = ( geom_stat_args - self.aesthetics() - self.DEFAULT_PARAMS.keys() # geom aesthetics - self._stat.aesthetics() # geom parameters - self._stat.DEFAULT_PARAMS.keys() # stat aesthetics - { # stat parameters "data", "mapping", "show_legend", # layer parameters "inherit_aes", "raster", } ) # layer parameters if unknown: msg = ( "Parameters {}, are not understood by " "either the geom, stat or layer." ) raise PlotnineError(msg.format(unknown)) def handle_na(self, data: pd.DataFrame) -> pd.DataFrame: """ Remove rows with NaN values geoms that infer extra information from missing values should override this method. For example [](`~plotnine.geoms.geom_path`). Parameters ---------- data : Data Returns ------- : Data without the NaNs. Notes ----- Shows a warning if the any rows are removed and the `na_rm` parameter is False. It only takes into account the columns of the required aesthetics. """ return remove_missing( data, self.params["na_rm"], list(self.REQUIRED_AES | self.NON_MISSING_AES), self.__class__.__name__, ) @staticmethod def draw_legend( data: pd.Series[Any], da: DrawingArea, lyr: layer ) -> DrawingArea: """ Draw a rectangle in the box Parameters ---------- data : A row of the data plotted to this layer da : Canvas on which to draw lyr : Layer that the geom belongs to. Returns ------- : The DrawingArea after a layer has been drawn onto it. """ msg = "The geom should implement this method." raise NotImplementedError(msg) @staticmethod def legend_key_size( data: pd.Series[Any], min_size: tuple[int, int], lyr: layer ) -> tuple[int, int]: """ Calculate the size of key that would fit the layer contents Parameters ---------- data : A row of the data plotted to this layer min_size : Initial size which should be expanded to fit the contents. lyr : Layer """ return min_size
geom
python
getsentry__sentry
tests/sentry/rules/history/backends/test_postgres.py
{ "start": 672, "end": 1640 }
class ____(BasePostgresRuleHistoryBackendTest): def test(self) -> None: rule = Rule.objects.create(project=self.event.project) self.backend.record(rule, self.group) assert RuleFireHistory.objects.filter(rule=rule, group=self.group).count() == 1 self.backend.record(rule, self.group) assert RuleFireHistory.objects.filter(rule=rule, group=self.group).count() == 2 group_2 = self.create_group() self.backend.record(rule, group_2) assert RuleFireHistory.objects.filter(rule=rule, group=self.group).count() == 2 assert RuleFireHistory.objects.filter(rule=rule, group=group_2).count() == 1 assert RuleFireHistory.objects.filter(rule=rule).count() == 3 def test_returns_new_instance(self) -> None: rule = Rule.objects.create(project=self.event.project) new_instance = self.backend.record(rule, self.group) assert new_instance is not None @freeze_time()
RecordTest
python
huggingface__transformers
src/transformers/models/mobilevit/modeling_mobilevit.py
{ "start": 18821, "end": 21770 }
class ____(nn.Module): def __init__(self, config: MobileViTConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList() self.gradient_checkpointing = False # segmentation architectures like DeepLab and PSPNet modify the strides # of the classification backbones dilate_layer_4 = dilate_layer_5 = False if config.output_stride == 8: dilate_layer_4 = True dilate_layer_5 = True elif config.output_stride == 16: dilate_layer_5 = True dilation = 1 layer_1 = MobileViTMobileNetLayer( config, in_channels=config.neck_hidden_sizes[0], out_channels=config.neck_hidden_sizes[1], stride=1, num_stages=1, ) self.layer.append(layer_1) layer_2 = MobileViTMobileNetLayer( config, in_channels=config.neck_hidden_sizes[1], out_channels=config.neck_hidden_sizes[2], stride=2, num_stages=3, ) self.layer.append(layer_2) layer_3 = MobileViTLayer( config, in_channels=config.neck_hidden_sizes[2], out_channels=config.neck_hidden_sizes[3], stride=2, hidden_size=config.hidden_sizes[0], num_stages=2, ) self.layer.append(layer_3) if dilate_layer_4: dilation *= 2 layer_4 = MobileViTLayer( config, in_channels=config.neck_hidden_sizes[3], out_channels=config.neck_hidden_sizes[4], stride=2, hidden_size=config.hidden_sizes[1], num_stages=4, dilation=dilation, ) self.layer.append(layer_4) if dilate_layer_5: dilation *= 2 layer_5 = MobileViTLayer( config, in_channels=config.neck_hidden_sizes[4], out_channels=config.neck_hidden_sizes[5], stride=2, hidden_size=config.hidden_sizes[2], num_stages=3, dilation=dilation, ) self.layer.append(layer_5) def forward( self, hidden_states: torch.Tensor, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutputWithNoAttention]: all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states) @auto_docstring
MobileViTEncoder
python
tornadoweb__tornado
tornado/test/twisted_test.py
{ "start": 1446, "end": 2104 }
class ____(AsyncTestCase): @gen_test def test_success(self): @inlineCallbacks def fn(): if False: # inlineCallbacks doesn't work with regular functions; # must have a yield even if it's unreachable. yield return 42 res = yield fn() self.assertEqual(res, 42) @gen_test def test_failure(self): @inlineCallbacks def fn(): if False: yield 1 / 0 with self.assertRaises(ZeroDivisionError): yield fn() if __name__ == "__main__": unittest.main()
ConvertDeferredTest