language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
huggingface__transformers
src/transformers/models/modernbert/modular_modernbert.py
{ "start": 23553, "end": 28276 }
class ____(Gemma3RotaryEmbedding): def __init__(self, config: ModernBertConfig, device=None): super().__init__(config, device) @staticmethod def compute_default_rope_parameters( config: Optional[ModernBertConfig] = None, device: Optional["torch.device"] = None, seq_len: Optional[int] = None, layer_type: Optional[str] = None, ) -> tuple["torch.Tensor", float]: return super().compute_default_rope_parameters(config, device, seq_len, layer_type) def eager_attention_forward( module: "ModernBertAttention", qkv: torch.Tensor, attention_mask: torch.Tensor, sliding_window_mask: torch.Tensor, position_ids: Optional[torch.LongTensor], local_attention: tuple[int, int], bs: int, dim: int, position_embeddings: torch.Tensor, output_attentions: Optional[bool] = False, **_kwargs, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: # qkv: [batch_size, seqlen, 3, nheads, headdim] cos, sin = position_embeddings query, key, value = qkv.transpose(3, 1).unbind(dim=2) # query, key, value: [batch_size, heads, seq_len, head_dim] query, key = apply_rotary_pos_emb(query, key, cos, sin) scale = module.head_dim**-0.5 attn_weights = torch.matmul(query, key.transpose(2, 3)) * scale if local_attention != (-1, -1): attention_mask = sliding_window_mask attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=module.attention_dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bs, -1, dim) if output_attentions: return (attn_output, attn_weights) return (attn_output,) def flash_attention_forward( module: "ModernBertAttention", qkv: torch.Tensor, rotary_emb: ModernBertUnpaddedRotaryEmbedding, cu_seqlens: torch.Tensor, max_seqlen: int, local_attention: tuple[int, int], bs: int, dim: int, target_dtype: torch.dtype = torch.bfloat16, **_kwargs, ) -> tuple[torch.Tensor]: # (total_seqlen, 3, nheads, headdim) qkv = rotary_emb(qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen) convert_dtype = qkv.dtype not in (torch.float16, torch.bfloat16) if convert_dtype: # FA2 implementation only supports fp16 and bf16. If FA2 is supported, # bfloat16 must be supported as of FA2 2.5.7. (Turing GPUs not supported) orig_dtype = qkv.dtype qkv = qkv.to(target_dtype) attn = flash_attn_varlen_qkvpacked_func( qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, dropout_p=module.attention_dropout if module.training else 0.0, deterministic=module.deterministic_flash_attn, window_size=local_attention, ) attn = attn.to(orig_dtype) # type: ignore else: attn = flash_attn_varlen_qkvpacked_func( qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, dropout_p=module.attention_dropout if module.training else 0.0, deterministic=module.deterministic_flash_attn, window_size=local_attention, ) return (attn.view(bs, dim),) def sdpa_attention_forward( module: "ModernBertAttention", qkv: torch.Tensor, attention_mask: torch.Tensor, sliding_window_mask: torch.Tensor, position_ids: Optional[torch.LongTensor], local_attention: tuple[int, int], bs: int, dim: int, position_embeddings: torch.Tensor, **_kwargs, ) -> tuple[torch.Tensor]: # qkv: [batch_size, seqlen, 3, nheads, headdim] cos, sin = position_embeddings query, key, value = qkv.transpose(3, 1).unbind(dim=2) # query, key, value: [batch_size, heads, seq_len, head_dim] query, key = apply_rotary_pos_emb(query, key, cos, sin) if local_attention != (-1, -1): attention_mask = sliding_window_mask attn_output = ( F.scaled_dot_product_attention( query, key, value, dropout_p=module.attention_dropout if module.training else 0.0, attn_mask=attention_mask, ) .transpose(1, 2) .contiguous() ) attn_output = attn_output.view(bs, -1, dim) return (attn_output,) MODERNBERT_ATTENTION_FUNCTION = { "flash_attention_2": flash_attention_forward, "eager": eager_attention_forward, "sdpa": sdpa_attention_forward, }
ModernBertRotaryEmbedding
python
ansible__ansible
test/lib/ansible_test/_internal/containers.py
{ "start": 19251, "end": 23127 }
class ____: """Context object for tracking information relating to access of support containers.""" def __init__(self, containers: ContainerDatabase, process: t.Optional[SshProcess]) -> None: self.containers = containers self.process = process def close(self) -> None: """Close the process maintaining the port forwards.""" if not self.process: return # forwarding not in use self.process.terminate() display.info('Waiting for the session SSH port forwarding process to terminate.', verbosity=1) self.process.wait() @contextlib.contextmanager def support_container_context( args: EnvironmentConfig, ssh: t.Optional[SshConnectionDetail], ) -> c.Iterator[t.Optional[ContainerDatabase]]: """Create a context manager for integration tests that use support containers.""" if not isinstance(args, (IntegrationConfig, UnitsConfig, SanityConfig, ShellConfig)): yield None # containers are only needed for commands that have targets (hosts or pythons) return containers = get_container_database(args) if not containers.data: yield ContainerDatabase({}) # no containers are being used, return an empty database return context = create_support_container_context(args, ssh, containers) try: yield context.containers finally: context.close() def create_support_container_context( args: EnvironmentConfig, ssh: t.Optional[SshConnectionDetail], containers: ContainerDatabase, ) -> SupportContainerContext: """Context manager that provides SSH port forwards. Returns updated container metadata.""" host_type = HostType.control revised = ContainerDatabase(containers.data.copy()) source = revised.data.pop(HostType.origin, None) container_map: dict[tuple[str, int], tuple[str, str, int]] = {} if host_type not in revised.data: if not source: raise Exception('Missing origin container details.') for context_name, context in source.items(): for container_name, container in context.items(): if '-controller-' in container_name: continue # hack to avoid exposing the controller container to the controller for port, access_port in container.port_map(): container_map[(container.host_ip, access_port)] = (context_name, container_name, port) if not container_map: return SupportContainerContext(revised, None) if not ssh: raise Exception('The %s host was not pre-configured for container access and SSH forwarding is not available.' % host_type) forwards = list(container_map.keys()) process = create_ssh_port_forwards(args, ssh, forwards) result = SupportContainerContext(revised, process) try: port_forwards = process.collect_port_forwards() contexts: dict[str, dict[str, ContainerAccess]] = {} for forward, forwarded_port in port_forwards.items(): access_host, access_port = forward context_name, container_name, container_port = container_map[(access_host, access_port)] container = source[context_name][container_name] context = contexts.setdefault(context_name, {}) forwarded_container = context.setdefault(container_name, ContainerAccess('127.0.0.1', container.names, None, {})) forwarded_container.forwards[container_port] = forwarded_port display.info('Container "%s" port %d available at %s:%d is forwarded over SSH as port %d.' % ( container_name, container_port, access_host, access_port, forwarded_port, ), verbosity=1) revised.data[host_type] = contexts return result except Exception: result.close() raise
SupportContainerContext
python
python-excel__xlwt
xlwt/BIFFRecords.py
{ "start": 13161, "end": 13304 }
class ____(BiffRecord): """ """ _REC_ID = 0x01B7 def __init__(self): self._rec_data = pack('<H', 0x00)
RefreshAllRecord
python
doocs__leetcode
solution/0300-0399/0308.Range Sum Query 2D - Mutable/Solution.py
{ "start": 449, "end": 1310 }
class ____: def __init__(self, matrix: List[List[int]]): self.trees = [] n = len(matrix[0]) for row in matrix: tree = BinaryIndexedTree(n) for j, v in enumerate(row): tree.update(j + 1, v) self.trees.append(tree) def update(self, row: int, col: int, val: int) -> None: tree = self.trees[row] prev = tree.query(col + 1) - tree.query(col) tree.update(col + 1, val - prev) def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int: return sum( tree.query(col2 + 1) - tree.query(col1) for tree in self.trees[row1 : row2 + 1] ) # Your NumMatrix object will be instantiated and called as such: # obj = NumMatrix(matrix) # obj.update(row,col,val) # param_2 = obj.sumRegion(row1,col1,row2,col2)
NumMatrix
python
automl__auto-sklearn
autosklearn/pipeline/components/base.py
{ "start": 5466, "end": 5996 }
class ____(AutoSklearnComponent): def fit(self, X, y, sample_weight=None): self.iterative_fit(X, y, n_iter=2, refit=True) iteration = 2 while not self.configuration_fully_fitted(): n_iter = int(2**iteration / 2) self.iterative_fit(X, y, n_iter=n_iter, refit=False) iteration += 1 return self @staticmethod def get_max_iter(): raise NotImplementedError() def get_current_iter(self): raise NotImplementedError()
IterativeComponent
python
sympy__sympy
sympy/physics/biomechanics/curve.py
{ "start": 32778, "end": 46209 }
class ____(CharacteristicCurveFunction): r"""Active muscle fiber force-length curve based on De Groote et al., 2016 [1]_. Explanation =========== The function is defined by the equation: $fl_{\text{act}}^M = c_0 \exp\left(-\frac{1}{2}\left(\frac{\tilde{l}^M - c_1}{c_2 + c_3 \tilde{l}^M}\right)^2\right) + c_4 \exp\left(-\frac{1}{2}\left(\frac{\tilde{l}^M - c_5}{c_6 + c_7 \tilde{l}^M}\right)^2\right) + c_8 \exp\left(-\frac{1}{2}\left(\frac{\tilde{l}^M - c_9}{c_{10} + c_{11} \tilde{l}^M}\right)^2\right)$ with constant values of $c0 = 0.814$, $c1 = 1.06$, $c2 = 0.162$, $c3 = 0.0633$, $c4 = 0.433$, $c5 = 0.717$, $c6 = -0.0299$, $c7 = 0.2$, $c8 = 0.1$, $c9 = 1.0$, $c10 = 0.354$, and $c11 = 0.0$. While it is possible to change the constant values, these were carefully selected in the original publication to give the characteristic curve specific and required properties. For example, the function produces a active fiber force of 1 at a normalized fiber length of 1, and an active fiber force of 0 at normalized fiber lengths of 0 and 2. Examples ======== The preferred way to instantiate :class:`FiberForceLengthActiveDeGroote2016` is using the :meth:`~.with_defaults` constructor because this will automatically populate the constants within the characteristic curve equation with the floating point values from the original publication. This constructor takes a single argument corresponding to normalized muscle fiber length. We'll create a :class:`~.Symbol` called ``l_M_tilde`` to represent this. >>> from sympy import Symbol >>> from sympy.physics.biomechanics import FiberForceLengthActiveDeGroote2016 >>> l_M_tilde = Symbol('l_M_tilde') >>> fl_M = FiberForceLengthActiveDeGroote2016.with_defaults(l_M_tilde) >>> fl_M FiberForceLengthActiveDeGroote2016(l_M_tilde, 0.814, 1.06, 0.162, 0.0633, 0.433, 0.717, -0.0299, 0.2, 0.1, 1.0, 0.354, 0.0) It's also possible to populate the two constants with your own values too. >>> from sympy import symbols >>> c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11 = symbols('c0:12') >>> fl_M = FiberForceLengthActiveDeGroote2016(l_M_tilde, c0, c1, c2, c3, ... c4, c5, c6, c7, c8, c9, c10, c11) >>> fl_M FiberForceLengthActiveDeGroote2016(l_M_tilde, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11) You don't just have to use symbols as the arguments, it's also possible to use expressions. Let's create a new pair of symbols, ``l_M`` and ``l_M_opt``, representing muscle fiber length and optimal muscle fiber length respectively. We can then represent ``l_M_tilde`` as an expression, the ratio of these. >>> l_M, l_M_opt = symbols('l_M l_M_opt') >>> l_M_tilde = l_M/l_M_opt >>> fl_M = FiberForceLengthActiveDeGroote2016.with_defaults(l_M_tilde) >>> fl_M FiberForceLengthActiveDeGroote2016(l_M/l_M_opt, 0.814, 1.06, 0.162, 0.0633, 0.433, 0.717, -0.0299, 0.2, 0.1, 1.0, 0.354, 0.0) To inspect the actual symbolic expression that this function represents, we can call the :meth:`~.doit` method on an instance. We'll use the keyword argument ``evaluate=False`` as this will keep the expression in its canonical form and won't simplify any constants. >>> fl_M.doit(evaluate=False) 0.814*exp(-(l_M/l_M_opt - 1.06)**2/(2*(0.0633*l_M/l_M_opt + 0.162)**2)) + 0.433*exp(-(l_M/l_M_opt - 0.717)**2/(2*(0.2*l_M/l_M_opt - 0.0299)**2)) + 0.1*exp(-3.98991349867535*(l_M/l_M_opt - 1.0)**2) The function can also be differentiated. We'll differentiate with respect to l_M using the ``diff`` method on an instance with the single positional argument ``l_M``. >>> fl_M.diff(l_M) ((-0.79798269973507*l_M/l_M_opt + 0.79798269973507)*exp(-3.98991349867535*(l_M/l_M_opt - 1.0)**2) + (0.433*(-l_M/l_M_opt + 0.717)/(0.2*l_M/l_M_opt - 0.0299)**2 + 0.0866*(l_M/l_M_opt - 0.717)**2/(0.2*l_M/l_M_opt - 0.0299)**3)*exp(-(l_M/l_M_opt - 0.717)**2/(2*(0.2*l_M/l_M_opt - 0.0299)**2)) + (0.814*(-l_M/l_M_opt + 1.06)/(0.0633*l_M/l_M_opt + 0.162)**2 + 0.0515262*(l_M/l_M_opt - 1.06)**2/(0.0633*l_M/l_M_opt + 0.162)**3)*exp(-(l_M/l_M_opt - 1.06)**2/(2*(0.0633*l_M/l_M_opt + 0.162)**2)))/l_M_opt References ========== .. [1] De Groote, F., Kinney, A. L., Rao, A. V., & Fregly, B. J., Evaluation of direct collocation optimal control problem formulations for solving the muscle redundancy problem, Annals of biomedical engineering, 44(10), (2016) pp. 2922-2936 """ @classmethod def with_defaults(cls, l_M_tilde): r"""Recommended constructor that will use the published constants. Explanation =========== Returns a new instance of the inverse muscle fiber act force-length function using the four constant values specified in the original publication. These have the values: $c0 = 0.814$ $c1 = 1.06$ $c2 = 0.162$ $c3 = 0.0633$ $c4 = 0.433$ $c5 = 0.717$ $c6 = -0.0299$ $c7 = 0.2$ $c8 = 0.1$ $c9 = 1.0$ $c10 = 0.354$ $c11 = 0.0$ Parameters ========== fl_M_act : Any (sympifiable) Normalized passive muscle fiber force as a function of muscle fiber length. """ c0 = Float('0.814') c1 = Float('1.06') c2 = Float('0.162') c3 = Float('0.0633') c4 = Float('0.433') c5 = Float('0.717') c6 = Float('-0.0299') c7 = Float('0.2') c8 = Float('0.1') c9 = Float('1.0') c10 = Float('0.354') c11 = Float('0.0') return cls(l_M_tilde, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11) @classmethod def eval(cls, l_M_tilde, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11): """Evaluation of basic inputs. Parameters ========== l_M_tilde : Any (sympifiable) Normalized muscle fiber length. c0 : Any (sympifiable) The first constant in the characteristic equation. The published value is ``0.814``. c1 : Any (sympifiable) The second constant in the characteristic equation. The published value is ``1.06``. c2 : Any (sympifiable) The third constant in the characteristic equation. The published value is ``0.162``. c3 : Any (sympifiable) The fourth constant in the characteristic equation. The published value is ``0.0633``. c4 : Any (sympifiable) The fifth constant in the characteristic equation. The published value is ``0.433``. c5 : Any (sympifiable) The sixth constant in the characteristic equation. The published value is ``0.717``. c6 : Any (sympifiable) The seventh constant in the characteristic equation. The published value is ``-0.0299``. c7 : Any (sympifiable) The eighth constant in the characteristic equation. The published value is ``0.2``. c8 : Any (sympifiable) The ninth constant in the characteristic equation. The published value is ``0.1``. c9 : Any (sympifiable) The tenth constant in the characteristic equation. The published value is ``1.0``. c10 : Any (sympifiable) The eleventh constant in the characteristic equation. The published value is ``0.354``. c11 : Any (sympifiable) The tweflth constant in the characteristic equation. The published value is ``0.0``. """ pass def _eval_evalf(self, prec): """Evaluate the expression numerically using ``evalf``.""" return self.doit(deep=False, evaluate=False)._eval_evalf(prec) def doit(self, deep=True, evaluate=True, **hints): """Evaluate the expression defining the function. Parameters ========== deep : bool Whether ``doit`` should be recursively called. Default is ``True``. evaluate : bool. Whether the SymPy expression should be evaluated as it is constructed. If ``False``, then no constant folding will be conducted which will leave the expression in a more numerically- stable for values of ``l_M_tilde`` that correspond to a sensible operating range for a musculotendon. Default is ``True``. **kwargs : dict[str, Any] Additional keyword argument pairs to be recursively passed to ``doit``. """ l_M_tilde, *constants = self.args if deep: hints['evaluate'] = evaluate l_M_tilde = l_M_tilde.doit(deep=deep, **hints) constants = [c.doit(deep=deep, **hints) for c in constants] c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11 = constants if evaluate: return ( c0*exp(-(((l_M_tilde - c1)/(c2 + c3*l_M_tilde))**2)/2) + c4*exp(-(((l_M_tilde - c5)/(c6 + c7*l_M_tilde))**2)/2) + c8*exp(-(((l_M_tilde - c9)/(c10 + c11*l_M_tilde))**2)/2) ) return ( c0*exp(-((UnevaluatedExpr(l_M_tilde - c1)/(c2 + c3*l_M_tilde))**2)/2) + c4*exp(-((UnevaluatedExpr(l_M_tilde - c5)/(c6 + c7*l_M_tilde))**2)/2) + c8*exp(-((UnevaluatedExpr(l_M_tilde - c9)/(c10 + c11*l_M_tilde))**2)/2) ) def fdiff(self, argindex=1): """Derivative of the function with respect to a single argument. Parameters ========== argindex : int The index of the function's arguments with respect to which the derivative should be taken. Argument indexes start at ``1``. Default is ``1``. """ l_M_tilde, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11 = self.args if argindex == 1: return ( c0*( c3*(l_M_tilde - c1)**2/(c2 + c3*l_M_tilde)**3 + (c1 - l_M_tilde)/((c2 + c3*l_M_tilde)**2) )*exp(-(l_M_tilde - c1)**2/(2*(c2 + c3*l_M_tilde)**2)) + c4*( c7*(l_M_tilde - c5)**2/(c6 + c7*l_M_tilde)**3 + (c5 - l_M_tilde)/((c6 + c7*l_M_tilde)**2) )*exp(-(l_M_tilde - c5)**2/(2*(c6 + c7*l_M_tilde)**2)) + c8*( c11*(l_M_tilde - c9)**2/(c10 + c11*l_M_tilde)**3 + (c9 - l_M_tilde)/((c10 + c11*l_M_tilde)**2) )*exp(-(l_M_tilde - c9)**2/(2*(c10 + c11*l_M_tilde)**2)) ) elif argindex == 2: return exp(-(l_M_tilde - c1)**2/(2*(c2 + c3*l_M_tilde)**2)) elif argindex == 3: return ( c0*(l_M_tilde - c1)/(c2 + c3*l_M_tilde)**2 *exp(-(l_M_tilde - c1)**2 /(2*(c2 + c3*l_M_tilde)**2)) ) elif argindex == 4: return ( c0*(l_M_tilde - c1)**2/(c2 + c3*l_M_tilde)**3 *exp(-(l_M_tilde - c1)**2/(2*(c2 + c3*l_M_tilde)**2)) ) elif argindex == 5: return ( c0*l_M_tilde*(l_M_tilde - c1)**2/(c2 + c3*l_M_tilde)**3 *exp(-(l_M_tilde - c1)**2/(2*(c2 + c3*l_M_tilde)**2)) ) elif argindex == 6: return exp(-(l_M_tilde - c5)**2/(2*(c6 + c7*l_M_tilde)**2)) elif argindex == 7: return ( c4*(l_M_tilde - c5)/(c6 + c7*l_M_tilde)**2 *exp(-(l_M_tilde - c5)**2 /(2*(c6 + c7*l_M_tilde)**2)) ) elif argindex == 8: return ( c4*(l_M_tilde - c5)**2/(c6 + c7*l_M_tilde)**3 *exp(-(l_M_tilde - c5)**2/(2*(c6 + c7*l_M_tilde)**2)) ) elif argindex == 9: return ( c4*l_M_tilde*(l_M_tilde - c5)**2/(c6 + c7*l_M_tilde)**3 *exp(-(l_M_tilde - c5)**2/(2*(c6 + c7*l_M_tilde)**2)) ) elif argindex == 10: return exp(-(l_M_tilde - c9)**2/(2*(c10 + c11*l_M_tilde)**2)) elif argindex == 11: return ( c8*(l_M_tilde - c9)/(c10 + c11*l_M_tilde)**2 *exp(-(l_M_tilde - c9)**2 /(2*(c10 + c11*l_M_tilde)**2)) ) elif argindex == 12: return ( c8*(l_M_tilde - c9)**2/(c10 + c11*l_M_tilde)**3 *exp(-(l_M_tilde - c9)**2/(2*(c10 + c11*l_M_tilde)**2)) ) elif argindex == 13: return ( c8*l_M_tilde*(l_M_tilde - c9)**2/(c10 + c11*l_M_tilde)**3 *exp(-(l_M_tilde - c9)**2/(2*(c10 + c11*l_M_tilde)**2)) ) raise ArgumentIndexError(self, argindex) def _latex(self, printer): """Print a LaTeX representation of the function defining the curve. Parameters ========== printer : Printer The printer to be used to print the LaTeX string representation. """ l_M_tilde = self.args[0] _l_M_tilde = printer._print(l_M_tilde) return r'\operatorname{fl}^M_{act} \left( %s \right)' % _l_M_tilde
FiberForceLengthActiveDeGroote2016
python
keras-team__keras
keras/src/backend/torch/optimizers/torch_sgd.py
{ "start": 121, "end": 1175 }
class ____(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.SGD): def _parallel_update_step( self, grads, variables, learning_rate, ): keras_variables = variables variables = [v.value for v in variables] if self.momentum != 0: bufs = [ self.momentums[self._get_variable_index(variable)].value for variable in keras_variables ] for i in range(len(bufs)): if bufs[i] is None: bufs[i] = torch.clone(grads[i]).detach() torch._foreach_mul_(bufs, self.momentum) torch._foreach_add_(bufs, grads, alpha=-learning_rate) if self.nesterov: torch._foreach_add_(variables, grads, alpha=-learning_rate) torch._foreach_add_(variables, bufs, alpha=self.momentum) else: torch._foreach_add_(variables, bufs) else: torch._foreach_add_(variables, grads, alpha=-learning_rate)
SGD
python
astropy__astropy
astropy/table/tests/test_table.py
{ "start": 18370, "end": 19865 }
class ____(SetupData): def test_override_name(self, table_types): self._setup(table_types) t = table_types.Table() # Check that we can override the name of the input column in the Table t.add_column(self.a, name="b") t.add_column(self.b, name="a") assert t.colnames == ["b", "a"] # Check that we did not change the name of the input column assert self.a.info.name == "a" assert self.b.info.name == "b" # Now test with an input column from another table t2 = table_types.Table() t2.add_column(t["a"], name="c") assert t2.colnames == ["c"] # Check that we did not change the name of the input column assert t.colnames == ["b", "a"] # Check that we can give a name if none was present col = table_types.Column([1, 2, 3]) t.add_column(col, name="c") assert t.colnames == ["b", "a", "c"] def test_default_name(self, table_types): t = table_types.Table() col = table_types.Column([1, 2, 3]) t.add_column(col) assert t.colnames == ["col0"] def test_setting_column_name_to_with_invalid_type(self, table_types): t = table_types.Table() t["a"] = [1, 2] with pytest.raises( TypeError, match=r"Expected a str value, got None with type NoneType" ): t["a"].name = None assert t["a"].name == "a" @pytest.mark.usefixtures("table_types")
TestAddName
python
xlwings__xlwings
xlwings/constants.py
{ "start": 115157, "end": 115277 }
class ____: xlCodePage = 2 # from enum XlSortMethodOld xlSyllabary = 1 # from enum XlSortMethodOld
SortMethodOld
python
pydata__xarray
asv_bench/benchmarks/dataset_io.py
{ "start": 4781, "end": 6415 }
class ____(IOSingleNetCDF): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_ds() self.filepath = "test_single_file.nc4.nc" self.format = "NETCDF4" self.ds.to_netcdf(self.filepath, format=self.format) def time_load_dataset_netcdf4_with_block_chunks(self): xr.open_dataset( self.filepath, engine="netcdf4", chunks=self.block_chunks ).load() def time_load_dataset_netcdf4_with_block_chunks_oindexing(self): ds = xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.block_chunks) ds = ds.isel(**self.oinds).load() def time_load_dataset_netcdf4_with_block_chunks_vindexing(self): ds = xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.block_chunks) ds = ds.isel(**self.vinds).load() def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self): with dask.config.set(scheduler="multiprocessing"): xr.open_dataset( self.filepath, engine="netcdf4", chunks=self.block_chunks ).load() def time_load_dataset_netcdf4_with_time_chunks(self): xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.time_chunks).load() def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self): with dask.config.set(scheduler="multiprocessing"): xr.open_dataset( self.filepath, engine="netcdf4", chunks=self.time_chunks ).load()
IOReadSingleNetCDF4Dask
python
xlwings__xlwings
tests/common.py
{ "start": 356, "end": 1698 }
class ____(unittest.TestCase): def __init__(self, methodName): super(TestBase, self).__init__(methodName) # Patch the test method being run to skip the test if it # throws NotImplementedError. This allows us to not consider # such tests failures, though they will still show up (as # skipped tests). test_method = getattr(self, methodName) def wrapped_method(self, *args, **kwargs): try: return test_method(*args, **kwargs) except NotImplementedError: self.skipTest("Test body threw NotImplementedError.") setattr(self, methodName, types.MethodType(wrapped_method, self)) @classmethod def setUpClass(cls): cls.app1 = xw.App(visible=False, spec=SPEC) cls.app2 = xw.App(visible=False, spec=SPEC) def setUp(self): self.wb1 = self.app1.books.add() self.wb2 = self.app2.books.add() for wb in [self.wb1, self.wb2]: if len(wb.sheets) == 1: wb.sheets.add(after=1) wb.sheets.add(after=2) wb.sheets[0].select() def tearDown(self): for app in [self.app1, self.app2]: app.books[-1].close() @classmethod def tearDownClass(cls): cls.app1.kill() cls.app2.kill()
TestBase
python
huggingface__transformers
tests/models/llava_next_video/test_modeling_llava_next_video.py
{ "start": 6618, "end": 12612 }
class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `LlavaNextVideoForConditionalGeneration`. """ all_model_classes = ( ( LlavaNextVideoModel, LlavaNextVideoForConditionalGeneration, ) if is_torch_available() else () ) _is_composite = True def setUp(self): self.model_tester = LlavaNextVideoVisionText2TextModelTester(self) common_properties = ["image_token_index", "video_token_index", "vision_feature_layer", "image_seq_length"] self.config_tester = ConfigTester( self, config_class=LlavaNextVideoConfig, has_text_modality=False, common_properties=common_properties ) def test_config(self): self.config_tester.run_common_tests() def test_mismatching_num_image_tokens(self): """ Tests that VLMs through an error with explicit message saying what is wrong when number of images don't match number of image tokens in the text. Also we need to test multi-image cases when one prompr has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) model.eval() curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further _ = model(**curr_input_dict) # successful forward with no modifications # remove one image but leave the image token in text curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...] curr_input_dict["image_sizes"] = curr_input_dict["image_sizes"][-1:, ...] with self.assertRaises(ValueError): _ = model(**curr_input_dict) # simulate multi-image case by concatenating inputs where each has exactly one image/image-token input_ids = curr_input_dict["input_ids"][:1] pixel_values = curr_input_dict["pixel_values"][:1] image_sizes = curr_input_dict["image_sizes"][:1] input_ids = torch.cat([input_ids, input_ids], dim=0) # one image and two image tokens raise an error with self.assertRaises(ValueError): _ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes) # two images and two image tokens don't raise an error pixel_values = torch.cat([pixel_values, pixel_values], dim=0) image_sizes = torch.cat([image_sizes, image_sizes], dim=0) _ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes) def test_odd_sized_image(self): # prepare model configuration config = self.model_tester.get_config() # prepare input num_image_tokens = 24 pixel_values = floats_tensor([1, 5, 3, config.vision_config.image_size, config.vision_config.image_size]) input_ids = ids_tensor([1, 64], config.text_config.vocab_size - 2) + 2 input_ids[:, :num_image_tokens] = config.image_token_index attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device) inputs_dict = { "pixel_values": pixel_values, "image_sizes": torch.tensor([[13, 16]]), # odd-sized image "input_ids": input_ids, "attention_mask": attention_mask, } # forward with odd-sized image input for model_class in self.all_model_classes: model = model_class(config).to(torch_device) model(**inputs_dict) @parameterized.expand( [ (-1,), ([-1],), ([-1, -2],), ], ) def test_vision_feature_layers(self, vision_feature_layer): """ Test that we can use either one vision feature layer, or a list of vision feature layers. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.vision_feature_layer = vision_feature_layer num_feature_layers = 1 if isinstance(vision_feature_layer, int) else len(vision_feature_layer) hidden_size = config.vision_config.hidden_size expected_features = hidden_size * num_feature_layers for model_class in self.all_model_classes: model = model_class(config).to(torch_device) # We should have the right number of input features, # and should be able to run a forward pass without exploding base_model = getattr(model, "model", model) assert base_model.multi_modal_projector.linear_1.in_features == expected_features model(**input_dict) @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip("FlashAttention only support fp16 and bf16 data type") def test_flash_attn_2_fp32_ln(self): pass @unittest.skip( "VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test" ) def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): pass @require_torch
LlavaNextVideoForConditionalGenerationModelTest
python
tensorflow__tensorflow
tensorflow/compiler/tests/spacetobatch_op_test.py
{ "start": 7347, "end": 12316 }
class ____(xla_test.XLATestCase): """Tests input-output pairs for the SpaceToBatchND and BatchToSpaceND ops.""" def _testPad(self, inputs, block_shape, paddings, outputs): block_shape = np.array(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) with self.session() as sess, self.test_scope(): for dtype in self.float_types: # TODO(b/68813416): Skip bfloat16's as the input type for direct is # float32 and results in a mismatch, while making testDirect provide the # correctly typed input results in 'no fill-function for data-type' # error. if dtype == dtypes.bfloat16.as_numpy_dtype: continue if dtype == np.float16: actual_inputs = np.array(inputs).astype(dtype) actual_paddings = np.array(paddings).astype(dtype) expected_outputs = np.array(outputs).astype(dtype) else: actual_inputs = inputs actual_paddings = paddings expected_outputs = outputs placeholder = array_ops.placeholder(dtype) # outputs = space_to_batch(inputs) x_tf = array_ops.space_to_batch_nd(placeholder, block_shape, actual_paddings) self.assertAllEqual( sess.run(x_tf, {placeholder: actual_inputs}), expected_outputs) # inputs = batch_to_space(outputs) placeholder = array_ops.placeholder(dtype) x_tf = array_ops.batch_to_space_nd(placeholder, block_shape, actual_paddings) self.assertAllEqual( sess.run(x_tf, {placeholder: expected_outputs}), actual_inputs) def _testDirect(self, input_shape, block_shape, paddings): inputs = np.arange(np.prod(input_shape), dtype=np.float32) inputs = inputs.reshape(input_shape) self._testPad(inputs, block_shape, paddings, space_to_batch_direct(inputs, block_shape, paddings)) def testZeroBlockDimsZeroRemainingDims(self): self._testPad( inputs=[1, 2], block_shape=[], paddings=[], outputs=[1, 2],) def testZeroBlockDimsOneRemainingDim(self): self._testPad( inputs=[[1, 2], [3, 4]], block_shape=[], paddings=[], outputs=[[1, 2], [3, 4]]) # Same thing, but with a no-op block dim. self._testPad( inputs=[[1, 2], [3, 4]], block_shape=[1], paddings=[[0, 0]], outputs=[[1, 2], [3, 4]]) def testZeroBlockDimsTwoRemainingDims(self): self._testPad( inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], block_shape=[], paddings=[], outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # Same thing, but with a no-op block dim. self._testPad( inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], block_shape=[1], paddings=[[0, 0]], outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # Same thing, but with two no-op block dims. self._testPad( inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], block_shape=[1, 1], paddings=[[0, 0], [0, 0]], outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) def testOneBlockDimZeroRemainingDims(self): self._testPad( inputs=[[1, 2, 3], [4, 5, 6]], block_shape=[2], paddings=[1, 0], outputs=[[0, 2], [0, 5], [1, 3], [4, 6]]) def testOneBlockDimOneRemainingDim(self): self._testPad( inputs=[[[1, 11], [2, 21], [3, 31]], [[4, 41], [5, 51], [6, 61]]], block_shape=[2], paddings=[1, 0], outputs=[[[0, 0], [2, 21]], [[0, 0], [5, 51]], [[1, 11], [3, 31]], [[4, 41], [6, 61]]]) def testDirect0(self): # Test with zero-size remaining dimension. self._testDirect( input_shape=[3, 1, 2, 0], block_shape=[3], paddings=[[0, 2]]) def testDirect1(self): # Test with zero-size blocked dimension. self._testDirect( input_shape=[3, 0, 2, 5], block_shape=[3], paddings=[[0, 0]]) def testDirect2(self): # Test with padding up from zero size. self._testDirect( input_shape=[3, 0, 2, 5], block_shape=[3], paddings=[[1, 2]]) def testDirect3(self): self._testDirect( input_shape=[3, 3, 4, 5, 2], block_shape=[3, 4, 2], paddings=[[1, 2], [0, 0], [3, 0]]) def testDirect4(self): self._testDirect( input_shape=[3, 3, 4, 5, 2], block_shape=[3, 4, 2, 2], paddings=[[1, 2], [0, 0], [3, 0], [0, 0]]) def testDirect5(self): self._testDirect( input_shape=[3, 2, 2, 3, 4, 5, 2, 5], block_shape=[1, 1, 3, 4, 2, 2], paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0]]) def testDirect6(self): self._testDirect( input_shape=[3, 2, 2, 3, 4, 5, 2, 5], block_shape=[1, 1, 3, 4, 2, 2, 1], paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0], [0, 0]]) if __name__ == "__main__": test.main()
SpaceToBatchNDTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-shopify/unit_tests/integration/test_bulk_stream.py
{ "start": 3186, "end": 9363 }
class ____(TestCase): def setUp(self) -> None: self._http_mocker = HttpMocker() self._http_mocker.__enter__() set_up_shop(self._http_mocker, _SHOP_NAME) grant_all_scopes(self._http_mocker, _SHOP_NAME) def tearDown(self) -> None: self._http_mocker.__exit__(None, None, None) def test_when_read_then_extract_records(self) -> None: self._http_mocker.post( create_job_creation_request(_SHOP_NAME, _JOB_START_DATE, _JOB_END_DATE), JobCreationResponseBuilder().with_bulk_operation_id(_BULK_OPERATION_ID).build(), ) self._http_mocker.post( create_job_status_request(_SHOP_NAME, _BULK_OPERATION_ID), JobStatusResponseBuilder().with_completed_status(_BULK_OPERATION_ID, _JOB_RESULT_URL).build(), ) self._http_mocker.get( HttpRequest(_JOB_RESULT_URL), MetafieldOrdersJobResponseBuilder().with_record().with_record().build(), ) output = self._read(_get_config(_JOB_START_DATE)) assert output.errors == [] assert len(output.records) == 2 def test_given_errors_on_job_creation_when_read_then_do_not_retry(self) -> None: """ The purpose of this test is to document the current behavior as I'm not sure we have an example of such errors on the job creation """ job_creation_request = create_job_creation_request(_SHOP_NAME, _JOB_START_DATE, _JOB_END_DATE) self._http_mocker.post(job_creation_request, _AN_ERROR_RESPONSE) self._read(_get_config(_JOB_START_DATE)) self._http_mocker.assert_number_of_calls(job_creation_request, 1) def test_given_response_is_not_json_on_job_creation_when_read_then_retry(self) -> None: job_creation_request = create_job_creation_request(_SHOP_NAME, _JOB_START_DATE, _JOB_END_DATE) self._http_mocker.post( job_creation_request, [ HttpResponse("This is not json"), JobCreationResponseBuilder() .with_bulk_operation_id(_BULK_OPERATION_ID) .build(), # This will never get called (see assertion below) ], ) self._http_mocker.post( create_job_status_request(_SHOP_NAME, _BULK_OPERATION_ID), JobStatusResponseBuilder().with_completed_status(_BULK_OPERATION_ID, _JOB_RESULT_URL).build(), ) self._http_mocker.get( HttpRequest(_JOB_RESULT_URL), MetafieldOrdersJobResponseBuilder().with_record().with_record().build(), ) output = self._read(_get_config(_JOB_START_DATE)) assert output.errors == [] assert len(output.records) == 2 def test_given_connection_error_on_job_creation_when_read_then_retry_job_creation(self) -> None: inner_mocker = self._http_mocker.__getattribute__("_mocker") inner_mocker.register_uri( # TODO the testing library should have the ability to generate ConnectionError. As this might not be trivial, we will wait for another case before implementing "POST", _URL_GRAPHQL, [ {"exc": ConnectionError("ConnectionError")}, {"text": JobCreationResponseBuilder().with_bulk_operation_id(_BULK_OPERATION_ID).build().body, "status_code": 200}, ], additional_matcher=lambda request: request.text == create_job_creation_body(_JOB_START_DATE, _JOB_END_DATE), ) self._http_mocker.post( create_job_status_request(_SHOP_NAME, _BULK_OPERATION_ID), JobStatusResponseBuilder().with_completed_status(_BULK_OPERATION_ID, _JOB_RESULT_URL).build(), ) self._http_mocker.get( HttpRequest(_JOB_RESULT_URL), MetafieldOrdersJobResponseBuilder().with_record().with_record().build(), ) output = self._read(_get_config(_JOB_START_DATE)) assert output.errors == [] def test_given_retryable_error_on_first_get_job_status_when_read_then_retry(self) -> None: self._http_mocker.post( create_job_creation_request(_SHOP_NAME, _JOB_START_DATE, _JOB_END_DATE), JobCreationResponseBuilder().with_bulk_operation_id(_BULK_OPERATION_ID).build(), ) self._http_mocker.post( create_job_status_request(_SHOP_NAME, _BULK_OPERATION_ID), [ _AN_ERROR_RESPONSE, JobStatusResponseBuilder().with_completed_status(_BULK_OPERATION_ID, _JOB_RESULT_URL).build(), ], ) self._http_mocker.get( HttpRequest(_JOB_RESULT_URL), MetafieldOrdersJobResponseBuilder().with_record().with_record().build(), ) output = self._read(_get_config(_JOB_START_DATE)) assert output.errors == [] assert len(output.records) == 2 def test_given_retryable_error_on_get_job_status_when_read_then_retry(self) -> None: self._http_mocker.post( create_job_creation_request(_SHOP_NAME, _JOB_START_DATE, _JOB_END_DATE), JobCreationResponseBuilder().with_bulk_operation_id(_BULK_OPERATION_ID).build(), ) self._http_mocker.post( create_job_status_request(_SHOP_NAME, _BULK_OPERATION_ID), [ JobStatusResponseBuilder().with_running_status(_BULK_OPERATION_ID).build(), HttpResponse(json.dumps({"errors": ["an error"]})), JobStatusResponseBuilder().with_completed_status(_BULK_OPERATION_ID, _JOB_RESULT_URL).build(), ], ) self._http_mocker.get( HttpRequest(_JOB_RESULT_URL), MetafieldOrdersJobResponseBuilder().with_record().with_record().build(), ) output = self._read(_get_config(_JOB_START_DATE)) assert output.errors == [] assert len(output.records) == 2 def _read(self, config): catalog = CatalogBuilder().with_stream(_BULK_STREAM, SyncMode.full_refresh).build() output = read(SourceShopify(), config, catalog) return output @freeze_time(_INCREMENTAL_JOB_END_DATE)
GraphQlBulkStreamTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 67747, "end": 68224 }
class ____(sgqlc.types.Enum): """The review status of a pull request. Enumeration Choices: * `APPROVED`: The pull request has received an approving review. * `CHANGES_REQUESTED`: Changes have been requested on the pull request. * `REVIEW_REQUIRED`: A review is required before the pull request can be merged. """ __schema__ = github_schema __choices__ = ("APPROVED", "CHANGES_REQUESTED", "REVIEW_REQUIRED")
PullRequestReviewDecision
python
PyCQA__pylint
tests/functional/n/no/no_member_augassign.py
{ "start": 231, "end": 302 }
class ____: value: int obj_a = A() obj_a.value += 1 # [no-member]
A
python
dask__dask
dask/_expr.py
{ "start": 26020, "end": 28706 }
class ____(Expr): """A singleton Expr class This is used to treat the subclassed expression as a singleton. Singletons are deduplicated by expr._name which is typically based on the dask.tokenize output. This is a crucial performance optimization for expressions that walk through an optimizer and are recreated repeatedly but isn't safe for objects that cannot be reliably or quickly tokenized. """ _instances: weakref.WeakValueDictionary[str, SingletonExpr] def __new__(cls, *args, _determ_token=None, **kwargs): if not hasattr(cls, "_instances"): cls._instances = weakref.WeakValueDictionary() inst = super().__new__(cls, *args, _determ_token=_determ_token, **kwargs) _name = inst._name if _name in cls._instances and cls.__init__ == object.__init__: return cls._instances[_name] cls._instances[_name] = inst return inst def collect_dependents(expr) -> defaultdict: dependents = defaultdict(list) stack = [expr] seen = set() while stack: node = stack.pop() if node._name in seen: continue seen.add(node._name) for dep in node.dependencies(): stack.append(dep) dependents[dep._name].append(weakref.ref(node)) return dependents def optimize(expr: Expr, fuse: bool = True) -> Expr: """High level query optimization This leverages three optimization passes: 1. Class based simplification using the ``_simplify`` function and methods 2. Blockwise fusion Parameters ---------- expr: Input expression to optimize fuse: whether or not to turn on blockwise fusion See Also -------- simplify optimize_blockwise_fusion """ stage: OptimizerStage = "fused" if fuse else "simplified-physical" return optimize_until(expr, stage) def optimize_until(expr: Expr, stage: OptimizerStage) -> Expr: result = expr if stage == "logical": return result # Simplify expr = result.simplify() if stage == "simplified-logical": return expr # Manipulate Expression to make it more efficient expr = expr.rewrite(kind="tune", rewritten={}) if stage == "tuned-logical": return expr # Lower expr = expr.lower_completely() if stage == "physical": return expr # Simplify again expr = expr.simplify() if stage == "simplified-physical": return expr # Final graph-specific optimizations expr = expr.fuse() if stage == "fused": return expr raise ValueError(f"Stage {stage!r} not supported.")
SingletonExpr
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/testing/assertsql.py
{ "start": 1843, "end": 8252 }
class ____(SQLMatchRule): def __init__( self, statement, params=None, dialect="default", enable_returning=True ): self.statement = statement self.params = params self.dialect = dialect self.enable_returning = enable_returning def _compare_sql(self, execute_observed, received_statement): stmt = re.sub(r"[\n\t]", "", self.statement) return received_statement == stmt def _compile_dialect(self, execute_observed): if self.dialect == "default": dialect = DefaultDialect() # this is currently what tests are expecting # dialect.supports_default_values = True dialect.supports_default_metavalue = True if self.enable_returning: dialect.insert_returning = dialect.update_returning = ( dialect.delete_returning ) = True dialect.use_insertmanyvalues = True dialect.supports_multivalues_insert = True dialect.update_returning_multifrom = True dialect.delete_returning_multifrom = True # dialect.favor_returning_over_lastrowid = True # dialect.insert_null_pk_still_autoincrements = True # this is calculated but we need it to be True for this # to look like all the current RETURNING dialects assert dialect.insert_executemany_returning return dialect else: return url.URL.create(self.dialect).get_dialect()() def _received_statement(self, execute_observed): """reconstruct the statement and params in terms of a target dialect, which for CompiledSQL is just DefaultDialect.""" context = execute_observed.context compare_dialect = self._compile_dialect(execute_observed) # received_statement runs a full compile(). we should not need to # consider extracted_parameters; if we do this indicates some state # is being sent from a previous cached query, which some misbehaviors # in the ORM can cause, see #6881 cache_key = None # execute_observed.context.compiled.cache_key extracted_parameters = ( None # execute_observed.context.extracted_parameters ) if "schema_translate_map" in context.execution_options: map_ = context.execution_options["schema_translate_map"] else: map_ = None if isinstance(execute_observed.clauseelement, BaseDDLElement): compiled = execute_observed.clauseelement.compile( dialect=compare_dialect, schema_translate_map=map_, ) else: compiled = execute_observed.clauseelement.compile( cache_key=cache_key, dialect=compare_dialect, column_keys=context.compiled.column_keys, for_executemany=context.compiled.for_executemany, schema_translate_map=map_, ) _received_statement = re.sub(r"[\n\t]", "", str(compiled)) parameters = execute_observed.parameters if not parameters: _received_parameters = [ compiled.construct_params( extracted_parameters=extracted_parameters ) ] else: _received_parameters = [ compiled.construct_params( m, extracted_parameters=extracted_parameters ) for m in parameters ] return _received_statement, _received_parameters def process_statement(self, execute_observed): context = execute_observed.context _received_statement, _received_parameters = self._received_statement( execute_observed ) params = self._all_params(context) equivalent = self._compare_sql(execute_observed, _received_statement) if equivalent: if params is not None: all_params = list(params) all_received = list(_received_parameters) while all_params and all_received: param = dict(all_params.pop(0)) for idx, received in enumerate(list(all_received)): # do a positive compare only for param_key in param: # a key in param did not match current # 'received' if ( param_key not in received or received[param_key] != param[param_key] ): break else: # all keys in param matched 'received'; # onto next param del all_received[idx] break else: # param did not match any entry # in all_received equivalent = False break if all_params or all_received: equivalent = False if equivalent: self.is_consumed = True self.errormessage = None else: self.errormessage = self._failure_message( execute_observed, params ) % { "received_statement": _received_statement, "received_parameters": _received_parameters, } def _all_params(self, context): if self.params: if callable(self.params): params = self.params(context) else: params = self.params if not isinstance(params, list): params = [params] return params else: return None def _failure_message(self, execute_observed, expected_params): return ( "Testing for compiled statement\n%r partial params %s, " "received\n%%(received_statement)r with params " "%%(received_parameters)r" % ( self.statement.replace("%", "%%"), repr(expected_params).replace("%", "%%"), ) )
CompiledSQL
python
django__django
tests/file_storage/models.py
{ "start": 774, "end": 894 }
class ____(FileSystemStorage): def __call__(self): # no-op implementation. return self
CallableStorage
python
mkdocstrings__mkdocstrings
src/mkdocstrings/_internal/plugin.py
{ "start": 1642, "end": 2970 }
class ____(Config): """The configuration options of `mkdocstrings`, written in `mkdocs.yml`.""" handlers = opt.Type(dict, default={}) """ Global configuration of handlers. You can set global configuration per handler, applied everywhere, but overridable in each "autodoc" instruction. Example: ```yaml plugins: - mkdocstrings: handlers: python: options: option1: true option2: "value" rust: options: option9: 2 ``` """ default_handler = opt.Type(str, default="python") """The default handler to use. The value is the name of the handler module. Default is "python".""" custom_templates = opt.Optional(opt.Dir(exists=True)) """Location of custom templates to use when rendering API objects. Value should be the path of a directory relative to the MkDocs configuration file. """ enable_inventory = opt.Optional(opt.Type(bool)) """Whether to enable object inventory creation.""" enabled = opt.Type(bool, default=True) """Whether to enable the plugin. Default is true. If false, *mkdocstrings* will not collect or render anything.""" locale = opt.Optional(opt.Type(str)) """The locale to use for translations."""
PluginConfig
python
getsentry__sentry
fixtures/safe_migrations_apps/bad_flow_add_column_with_notnull_app/migrations/0002_add_field_notnull.py
{ "start": 153, "end": 451 }
class ____(CheckedMigration): dependencies = [ ("bad_flow_add_column_with_notnull_app", "0001_initial"), ] operations = [ migrations.AddField( model_name="testtable", name="field", field=models.IntegerField(), ), ]
Migration
python
python-openxml__python-docx
tests/oxml/test_xmlchemy.py
{ "start": 25866, "end": 26092 }
class ____(BaseOxmlElement): """ Oom standing for 'OneOrMore', ``<w:oomChild>`` element, representing a child element that can appear multiple times in sequence, but must appear at least once. """
CT_OomChild
python
getsentry__sentry
src/sentry/auth_v2/utils/session.py
{ "start": 2089, "end": 3495 }
class ____: """ Manages authentication session state and flags. This class sets session flags that control the authentication flow on the frontend, based on the current user's state. """ def __init__(self, request: Request): self.request = request def initialize_auth_flags(self) -> None: """ Initialize authentication flow flags in the session. This method sets flags that control what authentication steps the user needs to complete on the frontend. Flags are only set if they don't already exist in the session. """ user = self.request.user if not user or user.is_anonymous: return session = self.request.session # Keep the keys sorted in order of importance!! if session.get("todo_email_verification") is None: session["todo_email_verification"] = not user.has_verified_primary_email() if session.get("todo_2fa_verification") is None: session["todo_2fa_verification"] = user.has_2fa() if session.get("todo_password_reset") is None: session["todo_password_reset"] = ( user.is_password_expired or not user.has_usable_password() ) if session.get("todo_2fa_setup") is None: session["todo_2fa_setup"] = user.has_org_requiring_2fa() and not user.has_2fa()
SessionBuilder
python
getsentry__sentry
src/sentry/identity/vercel/provider.py
{ "start": 205, "end": 1081 }
class ____(OAuth2Provider): key = "vercel" name = "Vercel" # https://vercel.com/docs/integrations/reference#using-the-vercel-api/exchange-code-for-access-token oauth_access_token_url = "https://api.vercel.com/v2/oauth/access_token" def get_oauth_client_id(self) -> str | int: return options.get("vercel.client-id") def get_oauth_client_secret(self) -> str: return options.get("vercel.client-secret") def get_refresh_token_url(self) -> str: return self.oauth_access_token_url def get_pipeline_views(self) -> list[PipelineView[IdentityPipeline]]: return [ OAuth2CallbackView( access_token_url=self.oauth_access_token_url, client_id=self.get_oauth_client_id(), client_secret=self.get_oauth_client_secret(), ), ]
VercelIdentityProvider
python
huggingface__transformers
src/transformers/models/deberta_v2/modeling_deberta_v2.py
{ "start": 34979, "end": 36008 }
class ____(nn.Module): """https://github.com/microsoft/DeBERTa/blob/master/DeBERTa/deberta/bert.py#L270""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, elementwise_affine=True) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # note that the input embeddings must be passed as an argument def forward(self, hidden_states, word_embeddings): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) hidden_states = torch.matmul(hidden_states, word_embeddings.weight.t()) + self.bias return hidden_states
DebertaV2LMPredictionHead
python
getsentry__sentry
src/sentry/workflow_engine/migrations/0071_migrate_remaining_metric_alerts.py
{ "start": 31501, "end": 32827 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = True dependencies = [ ("workflow_engine", "0070_migrate_remaining_anomaly_detection_alerts"), ] operations = [ migrations.RunPython( migrate_metric_alerts, migrations.RunPython.noop, hints={"tables": ["sentry_alertrule"]}, ) ]
Migration
python
openai__openai-python
src/openai/types/images_response.py
{ "start": 926, "end": 1905 }
class ____(BaseModel): created: int """The Unix timestamp (in seconds) of when the image was created.""" background: Optional[Literal["transparent", "opaque"]] = None """The background parameter used for the image generation. Either `transparent` or `opaque`. """ data: Optional[List[Image]] = None """The list of generated images.""" output_format: Optional[Literal["png", "webp", "jpeg"]] = None """The output format of the image generation. Either `png`, `webp`, or `jpeg`.""" quality: Optional[Literal["low", "medium", "high"]] = None """The quality of the image generated. Either `low`, `medium`, or `high`.""" size: Optional[Literal["1024x1024", "1024x1536", "1536x1024"]] = None """The size of the image generated. Either `1024x1024`, `1024x1536`, or `1536x1024`. """ usage: Optional[Usage] = None """For `gpt-image-1` only, the token usage information for the image generation."""
ImagesResponse
python
readthedocs__readthedocs.org
readthedocs/core/views/__init__.py
{ "start": 2641, "end": 3070 }
class ____(PrivateViewMixin, TemplateView): form_class = SupportForm template_name = "support/index.html" def get_context_data(self, **kwargs): """Pass along endpoint for support form.""" context = super().get_context_data(**kwargs) context["SUPPORT_FORM_ENDPOINT"] = settings.SUPPORT_FORM_ENDPOINT context["form"] = self.form_class(self.request.user) return context
SupportView
python
scipy__scipy
scipy/optimize/tests/test_minimize_constrained.py
{ "start": 25187, "end": 28130 }
class ____: @pytest.mark.parametrize('bounds, x_opt', [(Bounds(-np.inf, np.inf), Rosenbrock().x_opt), (Bounds(-np.inf, -0.8), [-0.8, -0.8]), (Bounds(3.0, np.inf), [3.0, 9.0]), (Bounds([3.0, 1.0], [4.0, 5.0]), [3., 5.]), ]) def test_rosen_brock_with_bounds(self, bounds, x_opt): prob = Rosenbrock() with warnings.catch_warnings(): msg = "Initial guess is not within the specified bounds" warnings.filterwarnings("ignore", msg, UserWarning) result = minimize(prob.fun, [-10, -10], method='Nelder-Mead', bounds=bounds) assert np.less_equal(bounds.lb, result.x).all() assert np.less_equal(result.x, bounds.ub).all() assert np.allclose(prob.fun(result.x), result.fun) assert np.allclose(result.x, x_opt, atol=1.e-3) def test_equal_all_bounds(self): prob = Rosenbrock() bounds = Bounds([4.0, 5.0], [4.0, 5.0]) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "Initial guess is not within the specified bounds", UserWarning) result = minimize(prob.fun, [-10, 8], method='Nelder-Mead', bounds=bounds) assert np.allclose(result.x, [4.0, 5.0]) def test_equal_one_bounds(self): prob = Rosenbrock() bounds = Bounds([4.0, 5.0], [4.0, 20.0]) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "Initial guess is not within the specified bounds", UserWarning) result = minimize(prob.fun, [-10, 8], method='Nelder-Mead', bounds=bounds) assert np.allclose(result.x, [4.0, 16.0]) def test_invalid_bounds(self): prob = Rosenbrock() message = 'An upper bound is less than the corresponding lower bound.' with pytest.raises(ValueError, match=message): bounds = Bounds([-np.inf, 1.0], [4.0, -5.0]) minimize(prob.fun, [-10, 3], method='Nelder-Mead', bounds=bounds) @pytest.mark.xfail(reason="Failing on Azure Linux and macOS builds, " "see gh-13846") def test_outside_bounds_warning(self): prob = Rosenbrock() message = "Initial guess is not within the specified bounds" with pytest.warns(UserWarning, match=message): bounds = Bounds([-np.inf, 1.0], [4.0, 5.0]) minimize(prob.fun, [-10, 8], method='Nelder-Mead', bounds=bounds)
TestBoundedNelderMead
python
celery__celery
t/unit/tasks/test_stamping.py
{ "start": 6728, "end": 7924 }
class ____(StampingVisitor): def on_signature(self, actual_sig: Signature, **headers) -> dict: return {"on_signature": "StringStampingVisitor: on_signature-item1"} def on_group_start(self, actual_sig: Signature, **headers) -> dict: return {"on_group_start": "StringStampingVisitor: on_group_start-item1"} def on_chain_start(self, actual_sig: Signature, **headers) -> dict: return {"on_chain_start": "StringStampingVisitor: on_chain_start-item1"} def on_chord_header_start(self, actual_sig: Signature, **header) -> dict: s = super().on_chord_header_start(actual_sig, **header) s.update({"on_chord_header_start": "StringStampingVisitor: on_chord_header_start-item1"}) return s def on_chord_body(self, actual_sig: Signature, **header) -> dict: return {"on_chord_body": "StringStampingVisitor: on_chord_body-item1"} def on_callback(self, actual_sig: Signature, **header) -> dict: return {"on_callback": "StringStampingVisitor: on_callback-item1"} def on_errback(self, actual_sig: Signature, **header) -> dict: return {"on_errback": "StringStampingVisitor: on_errback-item1"}
StringStampingVisitor
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol24.py
{ "start": 598, "end": 752 }
class ____: var1: ClassVar[int] var2: ClassVar[int] d: ProtoD = E # This should generate an error because var2 is the wrong type. e: ProtoD = F
F
python
bokeh__bokeh
src/bokeh/core/property/struct.py
{ "start": 3768, "end": 4538 }
class ____(SimpleNamespace): """ Allow access unnamed struct with attributes and keys. .. note:: This feature is experimental and may change in the short term. """ def __getitem__(self, key: str) -> Any: return getattr(self, key) def __setitem__(self, key: str, val: Any) -> None: setattr(self, key, val) def __delitem__(self, key: str) -> None: delattr(self, key) #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
struct
python
kamyu104__LeetCode-Solutions
Python/categorize-box-according-to-criteria.py
{ "start": 52, "end": 626 }
class ____(object): def categorizeBox(self, length, width, height, mass): """ :type length: int :type width: int :type height: int :type mass: int :rtype: str """ bulky = any(x >= 10**4 for x in (length, width, height)) or length*width*height >= 10**9 heavy = mass >= 100 if bulky and heavy: return "Both" if bulky: return "Bulky" if heavy: return "Heavy" return "Neither" # Time: O(1) # Space: O(1) # math, implementation
Solution
python
kamyu104__LeetCode-Solutions
Python/bag-of-tokens.py
{ "start": 33, "end": 665 }
class ____(object): def bagOfTokensScore(self, tokens, P): """ :type tokens: List[int] :type P: int :rtype: int """ tokens.sort() result, points = 0, 0 left, right = 0, len(tokens)-1 while left <= right: if P >= tokens[left]: P -= tokens[left] left += 1 points += 1 result = max(result, points) elif points > 0: points -= 1 P += tokens[right] right -= 1 else: break return result
Solution
python
PyCQA__pylint
pylint/reporters/ureports/nodes.py
{ "start": 4589, "end": 5315 }
class ____(BaseLayout): """Some tabular data. attributes : * BaseLayout attributes * cols : the number of columns of the table (REQUIRED) * rheaders : the first row's elements are table's header * cheaders : the first col's elements are table's header * title : the table's optional title """ def __init__( self, cols: int, title: str | None = None, rheaders: int = 0, cheaders: int = 0, children: Iterable[Text | str] = (), ) -> None: super().__init__(children=children) assert isinstance(cols, int) self.cols = cols self.title = title self.rheaders = rheaders self.cheaders = cheaders
Table
python
keon__algorithms
tests/test_queues.py
{ "start": 151, "end": 1781 }
class ____(unittest.TestCase): """ Test suite for the Queue data structures. """ def test_ArrayQueue(self): queue = ArrayQueue() queue.enqueue(1) queue.enqueue(2) queue.enqueue(3) # test __iter__() it = iter(queue) self.assertEqual(1, next(it)) self.assertEqual(2, next(it)) self.assertEqual(3, next(it)) self.assertRaises(StopIteration, next, it) # test __len__() self.assertEqual(3, len(queue)) # test is_empty() self.assertFalse(queue.is_empty()) # test peek() self.assertEqual(1, queue.peek()) # test dequeue() self.assertEqual(1, queue.dequeue()) self.assertEqual(2, queue.dequeue()) self.assertEqual(3, queue.dequeue()) self.assertTrue(queue.is_empty()) def test_LinkedListQueue(self): queue = LinkedListQueue() queue.enqueue(1) queue.enqueue(2) queue.enqueue(3) # test __iter__() it = iter(queue) self.assertEqual(1, next(it)) self.assertEqual(2, next(it)) self.assertEqual(3, next(it)) self.assertRaises(StopIteration, next, it) # test __len__() self.assertEqual(3, len(queue)) # test is_empty() self.assertFalse(queue.is_empty()) # test peek() self.assertEqual(1, queue.peek()) # test dequeue() self.assertEqual(1, queue.dequeue()) self.assertEqual(2, queue.dequeue()) self.assertEqual(3, queue.dequeue()) self.assertTrue(queue.is_empty())
TestQueue
python
getsentry__sentry
src/sentry/releases/endpoints/project_release_details.py
{ "start": 1030, "end": 7532 }
class ____(ProjectEndpoint, ReleaseAnalyticsMixin): publish_status = { "DELETE": ApiPublishStatus.UNKNOWN, "GET": ApiPublishStatus.UNKNOWN, "PUT": ApiPublishStatus.UNKNOWN, } permission_classes = (ProjectReleasePermission,) def get(self, request: Request, project, version) -> Response: """ Retrieve a Project's Release ```````````````````````````` Return details on an individual release. :pparam string organization_id_or_slug: the id or slug of the organization the release belongs to. :pparam string project_id_or_slug: the id or slug of the project to retrieve the release of. :pparam string version: the version identifier of the release. :auth: required """ with_health = request.GET.get("health") == "1" summary_stats_period = request.GET.get("summaryStatsPeriod") or "14d" health_stats_period = request.GET.get("healthStatsPeriod") or ("24h" if with_health else "") if summary_stats_period not in STATS_PERIODS: raise ParseError(detail=get_stats_period_detail("summaryStatsPeriod", STATS_PERIODS)) if health_stats_period and health_stats_period not in STATS_PERIODS: raise ParseError(detail=get_stats_period_detail("healthStatsPeriod", STATS_PERIODS)) try: release = Release.objects.get( organization_id=project.organization_id, projects=project, version=version ) except Release.DoesNotExist: raise ResourceDoesNotExist if with_health: release._for_project_id = project.id return Response( serialize( release, request.user, project=project, with_health_data=with_health, summary_stats_period=summary_stats_period, health_stats_period=health_stats_period, ) ) def put(self, request: Request, project, version) -> Response: """ Update a Project's Release `````````````````````````` Update a release. This can change some metadata associated with the release (the ref, url, and dates). :pparam string organization_id_or_slug: the id or slug of the organization the release belongs to. :pparam string project_id_or_slug: the id or slug of the project to change the release of. :pparam string version: the version identifier of the release. :param string ref: an optional commit reference. This is useful if a tagged version has been provided. :param url url: a URL that points to the release. This can be the path to an online interface to the sourcecode for instance. :param datetime dateReleased: an optional date that indicates when the release went live. If not provided the current time is assumed. :auth: required """ bind_organization_context(project.organization) scope = sentry_sdk.get_isolation_scope() scope.set_tag("version", version) try: release = Release.objects.get( organization_id=project.organization_id, projects=project, version=version ) except Release.DoesNotExist: scope.set_tag("failure_reason", "Release.DoesNotExist") raise ResourceDoesNotExist serializer = ReleaseSerializer(data=request.data, partial=True) if not serializer.is_valid(): scope.set_tag("failure_reason", "serializer_error") return Response(serializer.errors, status=400) result = serializer.validated_data was_released = bool(release.date_released) kwargs = {} if result.get("dateReleased"): kwargs["date_released"] = result["dateReleased"] if result.get("ref"): kwargs["ref"] = result["ref"] if result.get("url"): kwargs["url"] = result["url"] if result.get("status"): kwargs["status"] = result["status"] if kwargs: release.update(**kwargs) commit_list = result.get("commits") if commit_list: hook = ReleaseHook(project) # TODO(dcramer): handle errors with release payloads hook.set_commits(release.version, commit_list) self.track_set_commits_local( request, organization_id=project.organization_id, project_ids=[project.id] ) if not was_released and release.date_released: Activity.objects.create( type=ActivityType.RELEASE.value, project=project, ident=Activity.get_version_ident(release.version), data={"version": release.version}, datetime=release.date_released, ) no_snuba_for_release_creation = options.get("releases.no_snuba_for_release_creation") return Response( serialize( release, request.user, no_snuba_for_release_creation=no_snuba_for_release_creation ) ) def delete(self, request: Request, project, version) -> Response: """ Delete a Project's Release `````````````````````````` Permanently remove a release and all of its files. :pparam string organization_id_or_slug: the id or slug of the organization the release belongs to. :pparam string project_id_or_slug: the id or slug of the project to delete the release of. :pparam string version: the version identifier of the release. :auth: required """ try: release = Release.objects.get( organization_id=project.organization_id, projects=project, version=version ) except Release.DoesNotExist: raise ResourceDoesNotExist try: release.safe_delete() except UnsafeReleaseDeletion as e: return Response({"detail": str(e)}, status=400) return Response(status=204)
ProjectReleaseDetailsEndpoint
python
jina-ai__jina
tests/integration/network_failures/test_network_failures.py
{ "start": 612, "end": 29531 }
class ____(Executor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._id = str(uuid.uuid4()) @requests(on='/foo') def foo(self, docs, *args, **kwargs): docs[0].text = self._id def _create_worker_runtime(port, name='', executor=None): args = _generate_pod_args() args.port = [port] args.uses = 'DummyExec' args.name = name if executor: args.uses = executor with AsyncNewLoopRuntime(args, req_handler_cls=WorkerRequestHandler) as runtime: runtime.run_forever() def _create_worker(port): # create a single worker runtime p = multiprocessing.Process(target=_create_worker_runtime, args=(port,)) p.start() time.sleep(0.1) return p def _create_gateway(port, graph, pod_addr, protocol, retries=-1, log_config='default'): # create a single worker runtime # create a single gateway runtime p = multiprocessing.Process( target=_create_gateway_runtime, args=(graph, pod_addr, port, protocol, retries, log_config), ) p.start() time.sleep(0.1) return p def _create_head(port, connection_list_dict, polling, retries=-1): p = multiprocessing.Process( target=_create_head_runtime, args=(port, connection_list_dict, 'head', polling, None, None, retries), ) p.start() time.sleep(0.1) return p def _check_all_replicas_connected(num_replicas, gateway_port, protocol): """check if all replicas are connected""" exec_ids = set() exec_id_list = [] for i in range(num_replicas + 1): id_ = _send_request(gateway_port, protocol, request_size=2)[0].text exec_ids.add(id_) exec_id_list.append(id_) print(exec_id_list) assert len(exec_ids) == num_replicas def _send_request(gateway_port, protocol, request_size=1): """send request to gateway and see what happens""" c = Client(host='localhost', port=gateway_port, protocol=protocol) res = c.post('/foo', inputs=DocumentArray.empty(2), request_size=request_size) assert len(res) == 2 return res def _test_error(gateway_port, error_ports, protocol): if not isinstance(error_ports, list): error_ports = [error_ports] with pytest.raises(ConnectionError) as err_info: # assert correct error is thrown _send_request(gateway_port, protocol) # assert error message contains the port(s) of the broken executor(s) for port in error_ports: assert str(port) in err_info.value.args[0] @pytest.mark.parametrize('protocol', ['http']) @pytest.mark.parametrize('fail_endpoint_discovery', [True, False]) @pytest.mark.asyncio async def test_runtimes_reconnect(port_generator, protocol, fail_endpoint_discovery): # create gateway and workers manually, then terminate worker process to provoke an error worker_port = port_generator() gateway_port = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}' gateway_process = _create_gateway( gateway_port, graph_description, pod_addresses, protocol ) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{gateway_port}', ready_or_shutdown_event=multiprocessing.Event(), ) try: if fail_endpoint_discovery: # send request while Executor is not UP, WILL FAIL p = multiprocessing.Process( target=_send_request, args=(gateway_port, protocol) ) p.start() p.join() assert p.exitcode != 0, f"The _send_request #0 Process exited with exitcode {p.exitcode}" # The request will fail and raise worker_process = _create_worker(worker_port) assert BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{worker_port}', ready_or_shutdown_event=multiprocessing.Event(), ), "The BaseServer wait_for_ready_or_shutdown for worker_port failed" time.sleep(3) p = multiprocessing.Process(target=_send_request, args=(gateway_port, protocol)) p.start() p.join() assert p.exitcode == 0, f"The _send_request #1 Process exited with exitcode {p.exitcode}" # The request will not fail and raise worker_process.terminate() # kill worker worker_process.join() assert not worker_process.is_alive() # send request while Executor is not UP, WILL FAIL p = multiprocessing.Process(target=_send_request, args=(gateway_port, protocol)) p.start() p.join() assert p.exitcode != 0, f"The _send_request #2 Process exited with exitcode {p.exitcode}" # The request will not fail and rais worker_process = _create_worker(worker_port) time.sleep(3) assert BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{worker_port}', ready_or_shutdown_event=multiprocessing.Event(), ), "The BaseServer wait_for_ready_or_shutdown for worker_port failed" p = multiprocessing.Process(target=_send_request, args=(gateway_port, protocol)) p.start() p.join() assert ( p.exitcode == 0 ), f"The _send_request #3 Process exited with exitcode {p.exitcode}" # The request will not fail and rais # if exitcode != 0 then test in other process did not pass and this should fail # ----------- 2. test that gateways remain alive ----------- # just do the same again, expecting the same failure worker_process.terminate() # kill worker worker_process.join() assert not worker_process.is_alive(), "Worker process is still alive" assert ( worker_process.exitcode == 0 ), f"The worker_process Process exited with exitcode {worker_process.exitcode}" # if exitcode != 0 then test in other process did not pass and this should fail except Exception as exc: print(f'===> Exception: {exc}') assert False finally: # clean up runtimes gateway_process.terminate() gateway_process.join() worker_process.terminate() worker_process.join() @pytest.mark.parametrize( 'fail_before_endpoint_discovery', [True, False] ) # if not before, then after @pytest.mark.parametrize('protocol', ['http', 'websocket']) @pytest.mark.asyncio async def test_runtimes_headless_topology( port_generator, protocol, fail_before_endpoint_discovery ): # create gateway and workers manually, then terminate worker process to provoke an error worker_port = port_generator() gateway_port = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}' worker_process = _create_worker(worker_port) gateway_process = _create_gateway( gateway_port, graph_description, pod_addresses, protocol ) time.sleep(1.0) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{worker_port}', ready_or_shutdown_event=multiprocessing.Event(), ) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{gateway_port}', ready_or_shutdown_event=multiprocessing.Event(), ) if ( fail_before_endpoint_discovery ): # kill worker before having sent the first request, so before endpoint discov. worker_process.terminate() worker_process.join() try: if fail_before_endpoint_discovery: # here worker is already dead before the first request, so endpoint discovery will fail # ----------- 1. test that useful errors are given when endpoint discovery fails ----------- # we have to do this in a new process because otherwise grpc will be sad and everything will crash :( p = multiprocessing.Process( target=_test_error, args=(gateway_port, worker_port, protocol) ) p.start() p.join() assert ( p.exitcode == 0 ) # if exitcode != 0 then test in other process did not pass and this should fail else: # just ping the Flow without having killed a worker before. This (also) performs endpoint discovery p = multiprocessing.Process( target=_send_request, args=(gateway_port, protocol) ) p.start() p.join() # only now do we kill the worker, after having performed successful endpoint discovery # so in this case, the actual request will fail, not the discovery, which is handled differently by Gateway worker_process.terminate() # kill worker worker_process.join() assert not worker_process.is_alive() # ----------- 2. test that gateways remain alive ----------- # just do the same again, expecting the same failure p = multiprocessing.Process( target=_test_error, args=(gateway_port, worker_port, protocol) ) p.start() p.join() assert ( p.exitcode == 0 ) # if exitcode != 0 then test in other process did not pass and this should fail except Exception: assert False finally: # clean up runtimes gateway_process.terminate() worker_process.terminate() gateway_process.join() worker_process.join() @pytest.mark.parametrize('protocol', ['http', 'websocket']) @pytest.mark.asyncio async def test_runtimes_resource_not_found(port_generator, protocol, monkeypatch): async def patch_endpoint_discovery(self, empty, context): import grpc context.set_code(grpc.StatusCode.NOT_FOUND) async def patch_process_data(self, requests_, context, **kwargs): import grpc context.set_code(grpc.StatusCode.NOT_FOUND) monkeypatch.setattr( WorkerRequestHandler, 'endpoint_discovery', patch_endpoint_discovery, ) monkeypatch.setattr( WorkerRequestHandler, 'process_data', patch_process_data, ) gateway_port = port_generator() worker_port = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}' worker_process = _create_worker(worker_port) gateway_process = _create_gateway( gateway_port, graph_description, pod_addresses, protocol ) time.sleep(1.0) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{gateway_port}', ready_or_shutdown_event=multiprocessing.Event(), ) try: # ----------- 1. test that useful errors are given when endpoint discovery fails ----------- # we have to do this in a new process because otherwise grpc will be sad and everything will crash :( p = multiprocessing.Process( target=_test_error, args=(gateway_port, worker_port, protocol), ) p.start() p.join() assert ( p.exitcode == 0 ) # if exitcode != 0 then test in other process did not pass and this should fail except Exception: assert False finally: # clean up runtimes gateway_process.terminate() gateway_process.join() worker_process.terminate() worker_process.join() @pytest.mark.parametrize('protocol', ['http']) @pytest.mark.parametrize('fail_endpoint_discovery', [False]) @pytest.mark.asyncio async def test_runtimes_reconnect_replicas( port_generator, protocol, fail_endpoint_discovery ): # create gateway and workers manually, then terminate worker process to provoke an error worker_ports = [port_generator() for _ in range(3)] worker0_port, worker1_port, worker2_port = worker_ports gateway_port = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{worker0_port}", "0.0.0.0:{worker1_port}", "0.0.0.0:{worker2_port}"]}}' worker_processes = [] for p in worker_ports: worker_processes.append(_create_worker(p)) time.sleep(1.0) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{p}', ready_or_shutdown_event=multiprocessing.Event(), ) gateway_process = _create_gateway( gateway_port, graph_description, pod_addresses, protocol ) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{gateway_port}', ready_or_shutdown_event=multiprocessing.Event(), ) p_first_check = multiprocessing.Process( target=_check_all_replicas_connected, args=(3, gateway_port, protocol) ) p_first_check.start() p_first_check.join() assert ( p_first_check.exitcode == 0 ) # all replicas are connected. At the end, the Flow should return to this state. worker_processes[1].terminate() # kill 'middle' worker worker_processes[1].join() p_second_check = None try: if fail_endpoint_discovery: # send request while Executor is not UP, WILL FAIL p = multiprocessing.Process( target=_send_request, args=(gateway_port, protocol) ) p.start() p.join() p = multiprocessing.Process(target=_send_request, args=(gateway_port, protocol)) p.start() p.join() p = multiprocessing.Process(target=_send_request, args=(gateway_port, protocol)) p.start() p.join() worker_processes[1] = _create_worker(worker_ports[1]) assert BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{worker_ports[1]}', ready_or_shutdown_event=multiprocessing.Event(), ) time.sleep(1) p_second_check = multiprocessing.Process( target=_check_all_replicas_connected, args=(3, gateway_port, protocol) ) p_second_check.start() p_second_check.join() assert p_second_check.exitcode == 0 # all replicas are connected again. except Exception: assert False finally: # clean up runtimes gateway_process.terminate() gateway_process.join() p_first_check.terminate() p_first_check.join() for p in worker_processes: p.terminate() p.join() if p_second_check: p_second_check.terminate() p_second_check.join() @pytest.mark.parametrize('protocol', ['http', 'websocket']) @pytest.mark.parametrize('fail_before_endpoint_discovery', [True, False]) @pytest.mark.asyncio async def test_runtimes_replicas( port_generator, protocol, fail_before_endpoint_discovery ): # create gateway and workers manually, then terminate worker process to provoke an error worker_ports = [port_generator() for _ in range(3)] worker0_port, worker1_port, worker2_port = worker_ports gateway_port = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{worker0_port}", "0.0.0.0:{worker1_port}", "0.0.0.0:{worker2_port}"]}}' worker_processes = [] for p in worker_ports: worker_processes.append(_create_worker(p)) time.sleep(0.1) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{p}', ready_or_shutdown_event=multiprocessing.Event(), ) gateway_process = _create_gateway( gateway_port, graph_description, pod_addresses, protocol ) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{gateway_port}', ready_or_shutdown_event=multiprocessing.Event(), ) if ( not fail_before_endpoint_discovery ): # make successful request and trigger endpoint discovery # we have to do this in a new process because otherwise grpc will be sad and everything will crash :( p = multiprocessing.Process(target=_send_request, args=(gateway_port, protocol)) p.start() p.join() # different replica should be picked, no error should be raised assert ( p.exitcode == 0 ) # if exitcode != 0 then test in other process did not pass and this should fail worker_processes[0].terminate() # kill first worker worker_processes[0].join() try: for _ in range( len(worker_ports) ): # make sure all workers are targeted by round robin # ----------- 1. test that useful errors are given ----------- # we have to do this in a new process because otherwise grpc will be sad and everything will crash :( p = multiprocessing.Process( target=_send_request, args=(gateway_port, protocol) ) p.start() p.join() # different replica should be picked, no error should be raised assert ( p.exitcode == 0 ) # if exitcode != 0 then test in other process did not pass and this should fail except Exception: assert False finally: # clean up runtimes gateway_process.terminate() gateway_process.join() for p in worker_processes: p.terminate() p.join() @pytest.mark.parametrize( 'terminate_head', [True] ) # option with False times out because backoffs accumulate @pytest.mark.parametrize('protocol', ['http', 'websocket']) @pytest.mark.asyncio async def test_runtimes_headful_topology(port_generator, protocol, terminate_head): # create gateway and workers manually, then terminate worker process to provoke an error worker_port = port_generator() gateway_port = port_generator() head_port = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}' connection_list_dict = {'0': [f'127.0.0.1:{worker_port}']} head_process = _create_head(head_port, connection_list_dict, 'ANY') worker_process = _create_worker(worker_port) gateway_process = _create_gateway( gateway_port, graph_description, pod_addresses, protocol ) time.sleep(5.0) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{head_port}', ready_or_shutdown_event=multiprocessing.Event(), ) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{worker_port}', ready_or_shutdown_event=multiprocessing.Event(), ) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{gateway_port}', ready_or_shutdown_event=multiprocessing.Event(), ) # terminate pod, either head or worker behind the head if terminate_head: head_process.terminate() head_process.join() error_port = head_port else: worker_process.terminate() # kill worker worker_process.join() error_port = worker_port error_port = ( head_port if protocol == 'websocket' else error_port ) # due to error msg length constraints ws will always report the head address try: # ----------- 1. test that useful errors are given ----------- # we have to do this in a new process because otherwise grpc will be sad and everything will crash :( p = multiprocessing.Process( target=_test_error, args=(gateway_port, error_port, protocol) ) p.start() p.join() assert ( p.exitcode == 0 ) # if exitcode != 0 then test in other process did not pass and this should fail # ----------- 2. test that gateways remain alive ----------- # just do the same again, expecting the same outcome p = multiprocessing.Process( target=_test_error, args=(gateway_port, error_port, protocol) ) p.start() p.join() assert ( p.exitcode == 0 ) # if exitcode != 0 then test in other process did not pass and this should fail except Exception: raise finally: # clean up runtimes gateway_process.terminate() worker_process.terminate() head_process.terminate() gateway_process.join() worker_process.join() head_process.join() def _send_gql_request(gateway_port): """send request to gateway and see what happens""" mutation = ( f'mutation {{' + '''docs(data: {text: "abcd"}) { id } } ''' ) c = Client(host='localhost', port=gateway_port, protocol='http') return c.mutate(mutation=mutation) def _test_gql_error(gateway_port, error_port): with pytest.raises(ConnectionError) as err_info: # assert correct error is thrown _send_gql_request(gateway_port) # assert error message contains useful info assert str(error_port) in err_info.value.args[0] def _create_gqlgateway_runtime(graph_description, pod_addresses, port): with AsyncNewLoopRuntime( set_gateway_parser().parse_args( [ '--graph-description', graph_description, '--deployments-addresses', pod_addresses, '--port', str(port), '--expose-graphql-endpoint', '--protocol', 'http', ] ), req_handler_cls=GatewayRequestHandler, ) as runtime: runtime.run_forever() def _create_gqlgateway(port, graph, pod_addr): p = multiprocessing.Process( target=_create_gqlgateway_runtime, args=(graph, pod_addr, port), ) p.start() return p @pytest.mark.asyncio async def test_runtimes_graphql(port_generator): # create gateway and workers manually, then terminate worker process to provoke an error worker_port = port_generator() gateway_port = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}' worker_process = _create_worker(worker_port) gateway_process = _create_gqlgateway(gateway_port, graph_description, pod_addresses) time.sleep(5.0) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{worker_port}', ready_or_shutdown_event=multiprocessing.Event(), ) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{gateway_port}', ready_or_shutdown_event=multiprocessing.Event(), ) worker_process.terminate() # kill worker worker_process.join() try: # ----------- 1. test that useful errors are given ----------- # we have to do this in a new process because otherwise grpc will be sad and everything will crash :( p = multiprocessing.Process( target=_test_gql_error, args=(gateway_port, worker_port) ) p.start() p.join() assert ( p.exitcode == 0 ) # if exitcode != 0 then test in other process did not pass and this should fail # ----------- 2. test that gateways remain alive ----------- # just do the same again, expecting the same outcome p = multiprocessing.Process( target=_test_gql_error, args=(gateway_port, worker_port) ) p.start() p.join() assert ( p.exitcode == 0 ) # if exitcode != 0 then test in other process did not pass and this should fail except Exception: raise finally: # clean up runtimes gateway_process.terminate() worker_process.terminate() gateway_process.join() worker_process.join() @pytest.mark.asyncio async def test_replica_retry(port_generator): # test that if one replica is down, the other replica(s) will be used # create gateway and workers manually, then terminate worker process to provoke an error worker_ports = [port_generator() for _ in range(3)] worker0_port, worker1_port, worker2_port = worker_ports gateway_port = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{worker0_port}", "0.0.0.0:{worker1_port}", "0.0.0.0:{worker2_port}"]}}' worker_processes = [] for p in worker_ports: worker_processes.append(_create_worker(p)) time.sleep(3.0) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{p}', ready_or_shutdown_event=multiprocessing.Event(), ) gateway_process = _create_gateway( gateway_port, graph_description, pod_addresses, 'grpc' ) time.sleep(3.0) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{gateway_port}', ready_or_shutdown_event=multiprocessing.Event(), ) try: # ----------- 1. ping Flow once to trigger endpoint discovery ----------- # we have to do this in a new process because otherwise grpc will be sad and everything will crash :( p = multiprocessing.Process(target=_send_request, args=(gateway_port, 'grpc')) p.start() p.join() assert p.exitcode == 0 # kill second worker, which would be responsible for the second call (round robin) worker_processes[1].terminate() worker_processes[1].join() # ----------- 2. test that redundant replicas take over ----------- # we have to do this in a new process because otherwise grpc will be sad and everything will crash :( p = multiprocessing.Process(target=_send_request, args=(gateway_port, 'grpc')) p.start() p.join() assert p.exitcode == 0 except Exception: assert False finally: # clean up runtimes gateway_process.terminate() gateway_process.join() for p in worker_processes: p.terminate() p.join() @pytest.mark.asyncio async def test_replica_retry_all_fail(port_generator): # test that if one replica is down, the other replica(s) will be used # create gateway and workers manually, then terminate worker process to provoke an error worker_ports = [port_generator() for _ in range(3)] worker0_port, worker1_port, worker2_port = worker_ports gateway_port = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{worker0_port}", "0.0.0.0:{worker1_port}", "0.0.0.0:{worker2_port}"]}}' worker_processes = [] for p in worker_ports: worker_processes.append(_create_worker(p)) time.sleep(3.0) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{p}', ready_or_shutdown_event=multiprocessing.Event(), ) gateway_process = _create_gateway( gateway_port, graph_description, pod_addresses, 'grpc' ) time.sleep(3.0) BaseServer.wait_for_ready_or_shutdown( timeout=5.0, ctrl_address=f'0.0.0.0:{gateway_port}', ready_or_shutdown_event=multiprocessing.Event(), ) try: # ----------- 1. ping Flow once to trigger endpoint discovery ----------- # we have to do this in a new process because otherwise grpc will be sad and everything will crash :( p = multiprocessing.Process(target=_send_request, args=(gateway_port, 'grpc')) p.start() p.join() assert p.exitcode == 0 # kill all workers for p in worker_processes: p.terminate() p.join() # ----------- 2. test that call fails with informative error message ----------- # we have to do this in a new process because otherwise grpc will be sad and everything will crash :( p = multiprocessing.Process( target=_test_error, args=(gateway_port, worker_ports, 'grpc') ) p.start() p.join() assert p.exitcode == 0 except Exception: assert False finally: # clean up runtimes gateway_process.terminate() gateway_process.join() for p in worker_processes: p.terminate() p.join()
DummyExec
python
pytorch__pytorch
torch/_inductor/tiling_utils.py
{ "start": 6788, "end": 8768 }
class ____: """ Normalized reads and writes for nodes in the same FusedSchedulerNode. """ index_vars: OrderedSet[sympy.Symbol] reduce_vars: OrderedSet[sympy.Symbol] reads: dict[sympy.Expr, OrderedSet[str]] writes: dict[sympy.Expr, OrderedSet[str]] var_ranges: dict[sympy.Symbol, int] @overload def get_pw_red_splits( n: "SchedulerNode", pointwise_numel: sympy.Expr, red_numel: sympy.Expr, none_if_not_divisible: Literal[True], ) -> Optional[tuple[VarsAndRanges, VarsAndRanges]]: ... @overload def get_pw_red_splits( n: "SchedulerNode", pointwise_numel: sympy.Expr, red_numel: sympy.Expr, none_if_not_divisible: Literal[False] = False, ) -> tuple[VarsAndRanges, VarsAndRanges]: ... def get_pw_red_splits( n: "SchedulerNode", pointwise_numel: sympy.Expr, red_numel: sympy.Expr, none_if_not_divisible: bool = False, ) -> Optional[tuple[VarsAndRanges, VarsAndRanges]]: if n.is_reduction() or sympy_product(n._body.sizes[0]) == pointwise_numel: return ( (n._body.iter_vars, n._body.sizes[0]), (n._body.reduce_vars, n._body.sizes[1]), ) # type: ignore[return-value] assert sympy_product(n._body.sizes[0]) == pointwise_numel * red_numel # type: ignore[operator] i = len(n._body.sizes[0]) - 1 prod = 1 while i >= 0: prod *= n._body.sizes[0][i] if prod == red_numel: break i -= 1 if i >= 0: pw_splits = n._body.sizes[0][0:i] iter_vars = n._body.iter_vars[0:i] red_splits = n._body.sizes[0][i:] red_vars = n._body.iter_vars[i:] return (iter_vars, pw_splits), (red_vars, red_splits) # type: ignore[return-value] if none_if_not_divisible: return None else: return ( (n._body.iter_vars, n._body.sizes[0]), (n._body.reduce_vars, n._body.sizes[1]), ) # type: ignore[return-value]
FusedNormalizedReadsWrites
python
davidhalter__jedi
test/completion/descriptors.py
{ "start": 492, "end": 915 }
class ____(object): x = RevealAccess(10, 'var "x"') #? RevealAccess() x #? ['just_a_method'] x.just_a_method y = 5.0 def __init__(self): #? int() self.x #? [] self.just_a_method #? [] C.just_a_method m = C() #? int() m.x #? float() m.y #? int() C.x #? [] m.just_a_method #? [] C.just_a_method # ----------------- # properties # -----------------
C
python
Textualize__textual
docs/examples/how-to/layout02.py
{ "start": 354, "end": 491 }
class ____(Screen): def compose(self) -> ComposeResult: yield Header(id="Header") yield Footer(id="Footer")
TweetScreen
python
nedbat__coveragepy
coverage/regions.py
{ "start": 488, "end": 4500 }
class ____: """An ast visitor that will find and track regions of code. Functions and classes are tracked by name. Results are in the .regions attribute. """ def __init__(self) -> None: self.regions: list[CodeRegion] = [] self.context: list[Context] = [] def parse_source(self, source: str) -> None: """Parse `source` and walk the ast to populate the .regions attribute.""" self.handle_node(ast.parse(source)) def fq_node_name(self) -> str: """Get the current fully qualified name we're processing.""" return ".".join(c.name for c in self.context) def handle_node(self, node: ast.AST) -> None: """Recursively handle any node.""" if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)): self.handle_FunctionDef(node) elif isinstance(node, ast.ClassDef): self.handle_ClassDef(node) else: self.handle_node_body(node) def handle_node_body(self, node: ast.AST) -> None: """Recursively handle the nodes in this node's body, if any.""" for body_node in getattr(node, "body", ()): self.handle_node(body_node) def handle_FunctionDef(self, node: ast.FunctionDef | ast.AsyncFunctionDef) -> None: """Called for `def` or `async def`.""" lines = set(range(node.body[0].lineno, cast(int, node.body[-1].end_lineno) + 1)) if self.context and self.context[-1].kind == "class": # Function bodies are part of their enclosing class. self.context[-1].lines |= lines # Function bodies should be excluded from the nearest enclosing function. for ancestor in reversed(self.context): if ancestor.kind == "function": ancestor.lines -= lines break self.context.append(Context(node.name, "function", lines)) self.regions.append( CodeRegion( kind="function", name=self.fq_node_name(), start=node.lineno, lines=lines, ) ) self.handle_node_body(node) self.context.pop() def handle_ClassDef(self, node: ast.ClassDef) -> None: """Called for `class`.""" # The lines for a class are the lines in the methods of the class. # We start empty, and count on visit_FunctionDef to add the lines it # finds. lines: set[int] = set() self.context.append(Context(node.name, "class", lines)) self.regions.append( CodeRegion( kind="class", name=self.fq_node_name(), start=node.lineno, lines=lines, ) ) self.handle_node_body(node) self.context.pop() # Class bodies should be excluded from the enclosing classes. for ancestor in reversed(self.context): if ancestor.kind == "class": ancestor.lines -= lines def code_regions(source: str) -> list[CodeRegion]: """Find function and class regions in source code. Analyzes the code in `source`, and returns a list of :class:`CodeRegion` objects describing functions and classes as regions of the code:: [ CodeRegion(kind="function", name="func1", start=8, lines={10, 11, 12}), CodeRegion(kind="function", name="MyClass.method", start=30, lines={34, 35, 36}), CodeRegion(kind="class", name="MyClass", start=25, lines={34, 35, 36}), ] The line numbers will include comments and blank lines. Later processing will need to ignore those lines as needed. Nested functions and classes are excluded from their enclosing region. No line should be reported as being part of more than one function, or more than one class. Lines in methods are reported as being in a function and in a class. """ rf = RegionFinder() rf.parse_source(source) return rf.regions
RegionFinder
python
arrow-py__arrow
arrow/locales.py
{ "start": 112079, "end": 113539 }
class ____(Locale): names = ["ms", "ms-my", "ms-bn"] past = "{0} yang lalu" future = "dalam {0}" and_word = "dan" timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = { "now": "sekarang", "second": "saat", "seconds": "{0} saat", "minute": "minit", "minutes": "{0} minit", "hour": "jam", "hours": "{0} jam", "day": "hari", "days": "{0} hari", "week": "minggu", "weeks": "{0} minggu", "month": "bulan", "months": "{0} bulan", "year": "tahun", "years": "{0} tahun", } month_names = [ "", "Januari", "Februari", "Mac", "April", "Mei", "Jun", "Julai", "Ogos", "September", "Oktober", "November", "Disember", ] month_abbreviations = [ "", "Jan.", "Feb.", "Mac", "Apr.", "Mei", "Jun", "Julai", "Og.", "Sept.", "Okt.", "Nov.", "Dis.", ] day_names = [ "", "Isnin", "Selasa", "Rabu", "Khamis", "Jumaat", "Sabtu", "Ahad", ] day_abbreviations = [ "", "Isnin", "Selasa", "Rabu", "Khamis", "Jumaat", "Sabtu", "Ahad", ]
MalayLocale
python
getsentry__sentry
src/social_auth/backends/__init__.py
{ "start": 14703, "end": 19136 }
class ____(OAuthAuth): """Consumer based mechanism OAuth authentication, fill the needed parameters to communicate properly with authentication service. AUTHORIZATION_URL Authorization service url REQUEST_TOKEN_URL Request token URL ACCESS_TOKEN_URL Access token URL """ AUTHORIZATION_URL = "" REQUEST_TOKEN_URL = "" ACCESS_TOKEN_URL = "" def auth_url(self): """Return redirect url""" token = self.unauthorized_token() name = self.AUTH_BACKEND.name + "unauthorized_token_name" if not isinstance(self.request.session.get(name), list): self.request.session[name] = [] self.request.session[name].append(token.to_string()) self.request.session.modified = True return self.oauth_authorization_request(token) def auth_complete(self, *args, **kwargs): """Return user, might be logged in""" # Multiple unauthorized tokens are supported (see #521) name = self.AUTH_BACKEND.name + "unauthorized_token_name" token = None unauthed_tokens = self.request.session.get(name) or [] if not unauthed_tokens: raise AuthTokenError(self, "Missing unauthorized token") for unauthed_token in unauthed_tokens: token = unauthed_token if not isinstance(unauthed_token, dict): token = parse_qs(unauthed_token) if token.get("oauth_token") == self.data.get("oauth_token"): unauthed_tokens = list(set(unauthed_tokens) - {unauthed_token}) self.request.session[name] = unauthed_tokens self.request.session.modified = True break else: raise AuthTokenError(self, "Incorrect tokens") try: access_token = self.access_token(token) except HTTPError as e: if e.code == 400: raise AuthCanceled(self) else: raise return self.do_auth(access_token, *args, **kwargs) def do_auth(self, access_token, *args, **kwargs): """Finish the auth process once the access_token was retrieved""" data = self.user_data(access_token) if data is not None: data["access_token"] = access_token.to_string() kwargs.update({"auth": self, "response": data, self.AUTH_BACKEND.name: True}) return authenticate(*args, **kwargs) def unauthorized_token(self): """Return request for unauthorized token (first stage)""" params = self.request_token_extra_arguments() params.update(self.get_scope_argument()) key, secret = self.get_key_and_secret() response = self.request( url=self.REQUEST_TOKEN_URL, params=params, auth=OAuth1(key, secret, callback_uri=self.redirect_uri), ) return response.content def oauth_authorization_request(self, token): """Generate OAuth request to authorize token.""" if not isinstance(token, dict): token = parse_qs(token) params = self.auth_extra_arguments() or {} params.update(self.get_scope_argument()) params["oauth_token"] = token.get("oauth_token") params["redirect_uri"] = self.redirect_uri return self.AUTHORIZATION_URL + "?" + urlencode(params) def oauth_auth(self, token=None, oauth_verifier=None): key, secret = self.get_key_and_secret() oauth_verifier = oauth_verifier or self.data.get("oauth_verifier") token = token or {} return OAuth1( key, secret, resource_owner_key=token.get("oauth_token"), resource_owner_secret=token.get("oauth_token_secret"), callback_uri=self.redirect_uri, verifier=oauth_verifier, ) def oauth_request(self, token, url, extra_params=None, method="GET"): """Generate OAuth request, setups callback url""" return self.request(url, auth=self.oauth_auth(token)) def fetch_response(self, request): """Executes request and fetches service response""" response = dsa_urlopen(request.to_url()) return "\n".join(response.readlines()) def access_token(self, token): """Return request for access token value""" return self.request(self.ACCESS_TOKEN_URL, auth=self.oauth_auth(token))
BaseOAuth1
python
ApeWorX__ape
src/ape/exceptions.py
{ "start": 2118, "end": 2360 }
class ____(ApeException): """ Raised when issues occur with local contract. **NOTE**: This error has nothing to do with on-chain contract logic errors; it is more about ABI-related issues and alike. """
ContractDataError
python
huggingface__transformers
src/transformers/models/glm46v/modular_glm46v.py
{ "start": 1179, "end": 4839 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Glm4vModel`]. It is used to instantiate a GLM-4.6V model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of GLM-4.1V-9B-Thinking [zai-org/GLM-4.1V-9B-Thinking](https://huggingface.co/zai-org/GLM-4.1V-9B-Thinking). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vTextConfig`): The config object or dictionary of the text backbone. vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vVisionConfig`): The config object or dictionary of the vision backbone. image_token_id (`int`, *optional*, defaults to 151343): The image token index to encode the image prompt. video_token_id (`int`, *optional*, defaults to 151344): The video token index to encode the image prompt. image_start_token_id (`int`, *optional*, defaults to 151339): The image start token index to encode the start of image. image_end_token_id (`int`, *optional*, defaults to 151340): The image end token index to encode the end of image. video_start_token_id (`int`, *optional*, defaults to 151361): The video start token index to encode the start of video. video_end_token_id (`int`, *optional*, defaults to 151362): The video end token index to encode the end of video. ```python >>> from transformers import Glm46VForConditionalGeneration, Glm46VConfig >>> # Initializing a GLM-4.6V style configuration >>> configuration = Glm46VConfig() >>> # Initializing a model from the GLM-4.6V style configuration >>> model = Glm4vForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "glm46v" sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig} keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, text_config=None, vision_config=None, image_token_id=151343, video_token_id=151344, image_start_token_id=151339, image_end_token_id=151340, video_start_token_id=151361, video_end_token_id=151362, **kwargs, ): if isinstance(vision_config, dict): vision_config["model_type"] = vision_config.get("model_type", "glm4v_vision") self.vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) elif vision_config is None: self.vision_config = CONFIG_MAPPING["glm4v_vision"]() if isinstance(text_config, dict): text_config["model_type"] = text_config.get("model_type", "glm4v_text") self.text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: self.text_config = CONFIG_MAPPING["glm4v_text"]() self.image_token_id = image_token_id self.video_token_id = video_token_id self.video_start_token_id = video_start_token_id self.video_end_token_id = video_end_token_id self.image_start_token_id = image_start_token_id self.image_end_token_id = image_end_token_id super().__init__(**kwargs)
Glm46VConfig
python
getsentry__sentry
src/sentry/api/serializers/models/rule.py
{ "start": 1731, "end": 1806 }
class ____(TypedDict): id: int name: str email: str
RuleCreatedBy
python
catalyst-team__catalyst
catalyst/contrib/data/sampler_inbatch.py
{ "start": 7999, "end": 14153 }
class ____(IInbatchTripletSampler): """ This sampler selects hardest triplets based on distance to mean vectors: anchor is a mean vector of features of i-th class in the batch, the hardest positive sample is the most distant from anchor sample of anchor's class, the hardest negative sample is the closest mean vector of another classes. The batch must contain k samples for p classes in it (k > 1, p > 1). """ def _check_input_labels(self, labels: List[int]) -> None: """ Check if the labels list is valid: contains k occurrences for each of p classes. Args: labels: labels in the batch Raises: ValueError: if batch is invalid (contains different samples for classes, contains only one class or only one sample for each class) """ labels_counter = Counter(labels) k = labels_counter[labels[0]] if not all(n == k for n in labels_counter.values()): raise ValueError("Expected equal number of samples for each class") if len(labels_counter) <= 1: raise ValueError("Expected at least 2 classes in the batch") if k == 1: raise ValueError("Expected more than one sample for each class") @staticmethod def _get_labels_mask(labels: List[int]) -> Tensor: """ Generate matrix of bool of shape (n_unique_labels, batch_size), where n_unique_labels is a number of unique labels in the batch; matrix[i, j] is True if j-th element of the batch relates to i-th class and False otherwise. Args: labels: labels of the batch, shape (batch_size) Returns: matrix of indices of classes in batch """ unique_labels = sorted(np.unique(labels)) labels_number = len(unique_labels) labels_mask = torch.zeros(size=(labels_number, len(labels))) for label_idx, label in enumerate(unique_labels): label_indices = find_value_ids(labels, label) labels_mask[label_idx][label_indices] = 1 return labels_mask.type(torch.bool) @staticmethod def _count_intra_class_distances(embeddings: Tensor, mean_vectors: Tensor) -> Tensor: """ Count matrix of distances from mean vector of each class to it's samples embeddings. Args: embeddings: tensor of shape (p, k, embed_dim) where p is a number of classes in the batch, k is a number of samples for each class mean_vectors: tensor of shape (p, embed_dim) -- mean vectors of each class in the batch Returns: tensor of shape (p, k) -- matrix of distances from mean vectors to related samples in the batch """ p, k, embed_dim = embeddings.shape # Create (p, k, embed_dim) tensor of mean vectors for each class mean_vectors = mean_vectors.unsqueeze(1).repeat((1, k, 1)) # Count euclidean distance between embeddings and mean vectors distances = torch.pow(embeddings - mean_vectors, 2).sum(2) return distances @staticmethod def _count_inter_class_distances(mean_vectors: Tensor) -> Tensor: """ Count matrix of distances from mean vectors of classes to each other Args: mean_vectors: tensor of shape (p, embed_dim) -- mean vectors of classes Returns: tensor of shape (p, p) -- matrix of distances between mean vectors """ distance = torch.cdist(x1=mean_vectors, x2=mean_vectors, p=2) return distance @staticmethod def _fill_diagonal(matrix: Tensor, value: float) -> Tensor: """ Set diagonal elements with the value. Args: matrix: tensor of shape (p, p) value: value that diagonal should be filled with Returns: modified matrix with inf on diagonal """ p, _ = matrix.shape indices = torch.diag(torch.ones(p)).type(torch.bool) matrix[indices] = value return matrix def sample(self, features: Tensor, labels: TLabels) -> TTriplets: """ This method samples the hardest triplets in the batch. Args: features: tensor of shape (batch_size; embed_dim) that contains k samples for each of p classes labels: labels of the batch, list or tensor of size (batch_size) Returns: p triplets of (mean_vector, positive, negative_mean_vector) """ # Convert labels to list labels = convert_labels2list(labels) self._check_input_labels(labels) # Get matrix of indices of labels in batch labels_mask = self._get_labels_mask(labels) p = labels_mask.shape[0] embed_dim = features.shape[-1] # Reshape embeddings to groups of (p, k, embed_dim) ones, # each i-th group contains embeddings of i-th class. features = features.repeat((p, 1, 1)) features = features[labels_mask].view((p, -1, embed_dim)) # Count mean vectors for each class in batch mean_vectors = features.mean(1) d_intra = self._count_intra_class_distances(features, mean_vectors) # Count the distances to the sample farthest from mean vector # for each class. pos_indices = d_intra.max(1).indices # Count matrix of distances from mean vectors to each other d_inter = self._count_inter_class_distances(mean_vectors) # For each class mean vector get the closest mean vector d_inter = self._fill_diagonal(d_inter, float("inf")) neg_indices = d_inter.min(1).indices positives = torch.stack( [features[idx][pos_idx] for idx, pos_idx in enumerate(pos_indices)] ) return mean_vectors, positives, mean_vectors[neg_indices] __all__ = [ "IInbatchTripletSampler", "InBatchTripletsSampler", "AllTripletsSampler", "HardTripletsSampler", "HardClusterSampler", ]
HardClusterSampler
python
huggingface__transformers
src/transformers/models/perceiver/modeling_perceiver.py
{ "start": 15308, "end": 19778 }
class ____(nn.Module): """The Perceiver Encoder: a scalable, fully attentional encoder.""" def __init__(self, config, kv_dim=None): super().__init__() self.config = config # Check that we can use multihead-attention with these shapes. if config.d_latents % config.num_self_attention_heads != 0: raise ValueError( f"num_z_channels ({config.d_latents}) must be divisible by" f" num_self_attend_heads ({config.num_self_attention_heads})." ) if config.d_latents % config.num_cross_attention_heads != 0: raise ValueError( f"num_z_channels ({config.d_latents}) must be divisible by" f" num_cross_attend_heads ({config.num_cross_attention_heads})." ) # Construct the cross attention layer. self.cross_attention = PerceiverLayer( config, is_cross_attention=True, qk_channels=config.qk_channels, v_channels=config.v_channels, num_heads=config.num_cross_attention_heads, q_dim=config.d_latents, kv_dim=kv_dim, widening_factor=config.cross_attention_widening_factor, use_query_residual=config.use_query_residual, ) # Construct a single block of self-attention layers. # We get deeper architectures by applying this block more than once. self_attention_layers = [] for _ in range(config.num_self_attends_per_block): layer = PerceiverLayer( config, is_cross_attention=False, qk_channels=config.qk_channels, v_channels=config.v_channels, num_heads=config.num_self_attention_heads, q_dim=config.d_latents, kv_dim=config.d_latents, widening_factor=config.self_attention_widening_factor, ) self_attention_layers.append(layer) self.self_attends = nn.ModuleList(self_attention_layers) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, inputs: Optional[torch.FloatTensor] = None, inputs_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[tuple, BaseModelOutputWithCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions else None # Apply the cross-attention between the latents (hidden_states) and inputs: layer_outputs = self.cross_attention( hidden_states, attention_mask=attention_mask, inputs=inputs, inputs_mask=inputs_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_cross_attentions = all_cross_attentions + (layer_outputs[1],) # Apply the block of self-attention layers more than once: for _ in range(self.config.num_blocks): for i, layer_module in enumerate(self.self_attends): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None ) return BaseModelOutputWithCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) @auto_docstring
PerceiverEncoder
python
spack__spack
lib/spack/spack/vendor/jinja2/bccache.py
{ "start": 9802, "end": 12748 }
class ____(BytecodeCache): """This class implements a bytecode cache that uses a memcache cache for storing the information. It does not enforce a specific memcache library (tummy's memcache or cmemcache) but will accept any class that provides the minimal interface required. Libraries compatible with this class: - `cachelib <https://github.com/pallets/cachelib>`_ - `python-memcached <https://pypi.org/project/python-memcached/>`_ (Unfortunately the django cache interface is not compatible because it does not support storing binary data, only text. You can however pass the underlying cache client to the bytecode cache which is available as `django.core.cache.cache._client`.) The minimal interface for the client passed to the constructor is this: .. class:: MinimalClientInterface .. method:: set(key, value[, timeout]) Stores the bytecode in the cache. `value` is a string and `timeout` the timeout of the key. If timeout is not provided a default timeout or no timeout should be assumed, if it's provided it's an integer with the number of seconds the cache item should exist. .. method:: get(key) Returns the value for the cache key. If the item does not exist in the cache the return value must be `None`. The other arguments to the constructor are the prefix for all keys that is added before the actual cache key and the timeout for the bytecode in the cache system. We recommend a high (or no) timeout. This bytecode cache does not support clearing of used items in the cache. The clear method is a no-operation function. .. versionadded:: 2.7 Added support for ignoring memcache errors through the `ignore_memcache_errors` parameter. """ def __init__( self, client: "_MemcachedClient", prefix: str = "spack.vendor.jinja2/bytecode/", timeout: t.Optional[int] = None, ignore_memcache_errors: bool = True, ): self.client = client self.prefix = prefix self.timeout = timeout self.ignore_memcache_errors = ignore_memcache_errors def load_bytecode(self, bucket: Bucket) -> None: try: code = self.client.get(self.prefix + bucket.key) except Exception: if not self.ignore_memcache_errors: raise else: bucket.bytecode_from_string(code) def dump_bytecode(self, bucket: Bucket) -> None: key = self.prefix + bucket.key value = bucket.bytecode_to_string() try: if self.timeout is not None: self.client.set(key, value, self.timeout) else: self.client.set(key, value) except Exception: if not self.ignore_memcache_errors: raise
MemcachedBytecodeCache
python
vyperlang__vyper
vyper/venom/basicblock.py
{ "start": 3617, "end": 3979 }
class ____(IROperand): """ IRLiteral represents a literal in IR """ value: int def __init__(self, value: int) -> None: assert isinstance(value, int), value super().__init__(value) def __repr__(self) -> str: if abs(self.value) < 1024: return str(self.value) return f"0x{self.value:x}"
IRLiteral
python
aio-libs__aiohttp
aiohttp/client_exceptions.py
{ "start": 1356, "end": 1439 }
class ____(Exception): """Base class for client connection errors."""
ClientError
python
coleifer__peewee
tests/base.py
{ "start": 6574, "end": 7008 }
class ____(BaseTestCase): database = db def setUp(self): if not self.database.is_closed(): self.database.close() self.database.connect() super(DatabaseTestCase, self).setUp() def tearDown(self): super(DatabaseTestCase, self).tearDown() self.database.close() def execute(self, sql, params=None): return self.database.execute_sql(sql, params)
DatabaseTestCase
python
PrefectHQ__prefect
src/integrations/prefect-dbt/prefect_dbt/cloud/exceptions.py
{ "start": 188, "end": 308 }
class ____(DbtCloudException): """Raised when unable to list dbt Cloud run artifacts"""
DbtCloudListRunArtifactsFailed
python
pola-rs__polars
py-polars/src/polars/string_cache.py
{ "start": 558, "end": 5483 }
class ____(contextlib.ContextDecorator): """ Context manager for enabling and disabling the global string cache. :class:`Categorical` columns created under the same global string cache have the same underlying physical value when string values are equal. This allows the columns to be concatenated or used in a join operation, for example. Notes ----- Enabling the global string cache introduces some overhead. The amount of overhead depends on the number of categories in your data. It is advised to enable the global string cache only when strictly necessary. If `StringCache` calls are nested, the global string cache will only be disabled and cleared when the outermost context exits. Examples -------- Construct two Series using the same global string cache. >>> with pl.StringCache(): ... s1 = pl.Series("color", ["red", "green", "red"], dtype=pl.Categorical) ... s2 = pl.Series("color", ["blue", "red", "green"], dtype=pl.Categorical) As both Series are constructed under the same global string cache, they can be concatenated. >>> pl.concat([s1, s2]) shape: (6,) Series: 'color' [cat] [ "red" "green" "red" "blue" "red" "green" ] The class can also be used as a function decorator, in which case the string cache is enabled during function execution, and disabled afterwards. >>> @pl.StringCache() ... def construct_categoricals() -> pl.Series: ... s1 = pl.Series("color", ["red", "green", "red"], dtype=pl.Categorical) ... s2 = pl.Series("color", ["blue", "red", "green"], dtype=pl.Categorical) ... return pl.concat([s1, s2]) """ def __enter__(self) -> Self: self._string_cache = PyStringCacheHolder() return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> None: del self._string_cache def enable_string_cache() -> None: """ Enable the global string cache. :class:`Categorical` columns created under the same global string cache have the same underlying physical value when string values are equal. This allows the columns to be concatenated or used in a join operation, for example. See Also -------- StringCache : Context manager for enabling and disabling the string cache. disable_string_cache : Function to disable the string cache. Notes ----- Enabling the global string cache introduces some overhead. The amount of overhead depends on the number of categories in your data. It is advised to enable the global string cache only when strictly necessary. Consider using the :class:`StringCache` context manager for a more reliable way of enabling and disabling the string cache. Examples -------- Construct two Series using the same global string cache. >>> pl.enable_string_cache() >>> s1 = pl.Series("color", ["red", "green", "red"], dtype=pl.Categorical) >>> s2 = pl.Series("color", ["blue", "red", "green"], dtype=pl.Categorical) >>> pl.disable_string_cache() As both Series are constructed under the same global string cache, they can be concatenated. >>> pl.concat([s1, s2]) shape: (6,) Series: 'color' [cat] [ "red" "green" "red" "blue" "red" "green" ] """ plr.enable_string_cache() def disable_string_cache() -> None: """ Disable and clear the global string cache. See Also -------- enable_string_cache : Function to enable the string cache. StringCache : Context manager for enabling and disabling the string cache. Notes ----- Consider using the :class:`StringCache` context manager for a more reliable way of enabling and disabling the string cache. When used in conjunction with the :class:`StringCache` context manager, the string cache will not be disabled until the context manager exits. Examples -------- Construct two Series using the same global string cache. >>> pl.enable_string_cache() >>> s1 = pl.Series("color", ["red", "green", "red"], dtype=pl.Categorical) >>> s2 = pl.Series("color", ["blue", "red", "green"], dtype=pl.Categorical) >>> pl.disable_string_cache() As both Series are constructed under the same global string cache, they can be concatenated. >>> pl.concat([s1, s2]) shape: (6,) Series: 'color' [cat] [ "red" "green" "red" "blue" "red" "green" ] """ def using_string_cache() -> bool: """Check whether the global string cache is enabled.""" return plr.using_string_cache()
StringCache
python
pydata__xarray
xarray/core/_typed_ops.py
{ "start": 49534, "end": 54365 }
class ____: __slots__ = () def _binary_op( self, other: T_Xarray, f: Callable, reflexive: bool = False ) -> T_Xarray: raise NotImplementedError def __add__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.add) def __sub__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.sub) def __mul__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mul) def __pow__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.pow) def __truediv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.truediv) def __floordiv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.floordiv) def __mod__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mod) def __and__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.and_) def __xor__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.xor) def __or__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.or_) def __lshift__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.lshift) def __rshift__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.rshift) def __lt__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.lt) def __le__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.le) def __gt__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.gt) def __ge__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.ge) def __eq__(self, other: T_Xarray) -> T_Xarray: # type:ignore[override] return self._binary_op(other, nputils.array_eq) def __ne__(self, other: T_Xarray) -> T_Xarray: # type:ignore[override] return self._binary_op(other, nputils.array_ne) # When __eq__ is defined but __hash__ is not, then an object is unhashable, # and it should be declared as follows: __hash__: None # type:ignore[assignment] def __radd__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.add, reflexive=True) def __rsub__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.sub, reflexive=True) def __rmul__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mul, reflexive=True) def __rpow__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.pow, reflexive=True) def __rtruediv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.truediv, reflexive=True) def __rfloordiv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.floordiv, reflexive=True) def __rmod__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mod, reflexive=True) def __rand__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.and_, reflexive=True) def __rxor__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.xor, reflexive=True) def __ror__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.or_, reflexive=True) __add__.__doc__ = operator.add.__doc__ __sub__.__doc__ = operator.sub.__doc__ __mul__.__doc__ = operator.mul.__doc__ __pow__.__doc__ = operator.pow.__doc__ __truediv__.__doc__ = operator.truediv.__doc__ __floordiv__.__doc__ = operator.floordiv.__doc__ __mod__.__doc__ = operator.mod.__doc__ __and__.__doc__ = operator.and_.__doc__ __xor__.__doc__ = operator.xor.__doc__ __or__.__doc__ = operator.or_.__doc__ __lshift__.__doc__ = operator.lshift.__doc__ __rshift__.__doc__ = operator.rshift.__doc__ __lt__.__doc__ = operator.lt.__doc__ __le__.__doc__ = operator.le.__doc__ __gt__.__doc__ = operator.gt.__doc__ __ge__.__doc__ = operator.ge.__doc__ __eq__.__doc__ = nputils.array_eq.__doc__ __ne__.__doc__ = nputils.array_ne.__doc__ __radd__.__doc__ = operator.add.__doc__ __rsub__.__doc__ = operator.sub.__doc__ __rmul__.__doc__ = operator.mul.__doc__ __rpow__.__doc__ = operator.pow.__doc__ __rtruediv__.__doc__ = operator.truediv.__doc__ __rfloordiv__.__doc__ = operator.floordiv.__doc__ __rmod__.__doc__ = operator.mod.__doc__ __rand__.__doc__ = operator.and_.__doc__ __rxor__.__doc__ = operator.xor.__doc__ __ror__.__doc__ = operator.or_.__doc__
DataArrayGroupByOpsMixin
python
django__django
tests/force_insert_update/models.py
{ "start": 241, "end": 318 }
class ____(Counter): tag = models.CharField(max_length=10)
InheritedCounter
python
ray-project__ray
rllib/offline/tests/test_feature_importance.py
{ "start": 210, "end": 1325 }
class ____(unittest.TestCase): def setUp(self): ray.init() def tearDown(self): ray.shutdown() def test_feat_importance_cartpole(self): config = ( MARWILConfig() .api_stack( enable_rl_module_and_learner=False, enable_env_runner_and_connector_v2=False, ) .environment("CartPole-v1") .framework("torch") ) algo = config.build() policy = algo.env_runner.get_policy() sample_batch = synchronous_parallel_sample(worker_set=algo.env_runner_group) for repeat in [1, 10]: evaluator = FeatureImportance(policy=policy, repeat=repeat) estimate = evaluator.estimate(sample_batch) # Check if the estimate is positive. assert all(val > 0 for val in estimate.values()) def test_feat_importance_estimate_on_dataset(self): # TODO (Kourosh): add a test for this pass if __name__ == "__main__": import sys import pytest sys.exit(pytest.main(["-v", __file__]))
TestFeatureImportance
python
huggingface__transformers
src/transformers/pipelines/object_detection.py
{ "start": 645, "end": 8346 }
class ____(Pipeline): """ Object detection pipeline using any `AutoModelForObjectDetection`. This pipeline predicts bounding boxes of objects and their classes. Example: ```python >>> from transformers import pipeline >>> detector = pipeline(model="facebook/detr-resnet-50") >>> detector("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") [{'score': 0.997, 'label': 'bird', 'box': {'xmin': 69, 'ymin': 171, 'xmax': 396, 'ymax': 507}}, {'score': 0.999, 'label': 'bird', 'box': {'xmin': 398, 'ymin': 105, 'xmax': 767, 'ymax': 507}}] >>> # x, y are expressed relative to the top left hand corner. ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"object-detection"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=object-detection). """ _load_processor = False _load_image_processor = True _load_feature_extractor = False _load_tokenizer = None def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "vision") mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES.copy() mapping.update(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES) self.check_model_type(mapping) def _sanitize_parameters(self, **kwargs): preprocess_params = {} if "timeout" in kwargs: preprocess_params["timeout"] = kwargs["timeout"] postprocess_kwargs = {} if "threshold" in kwargs: postprocess_kwargs["threshold"] = kwargs["threshold"] return preprocess_params, {}, postprocess_kwargs @overload def __call__(self, image: Union[str, "Image.Image"], *args: Any, **kwargs: Any) -> list[dict[str, Any]]: ... @overload def __call__( self, image: list[str] | list["Image.Image"], *args: Any, **kwargs: Any ) -> list[list[dict[str, Any]]]: ... def __call__(self, *args, **kwargs) -> list[dict[str, Any]] | list[list[dict[str, Any]]]: """ Detect objects (bounding boxes & classes) in the image(s) passed as inputs. Args: inputs (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`): The pipeline handles three types of images: - A string containing an HTTP(S) link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images. threshold (`float`, *optional*, defaults to 0.5): The probability necessary to make a prediction. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A list of dictionaries or a list of list of dictionaries containing the result. If the input is a single image, will return a list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries corresponding to each image. The dictionaries contain the following keys: - **label** (`str`) -- The class label identified by the model. - **score** (`float`) -- The score attributed by the model for that label. - **box** (`list[dict[str, int]]`) -- The bounding box of detected object in image's original size. """ # After deprecation of this is completed, remove the default `None` value for `images` if "images" in kwargs and "inputs" not in kwargs: kwargs["inputs"] = kwargs.pop("images") return super().__call__(*args, **kwargs) def preprocess(self, image, timeout=None): image = load_image(image, timeout=timeout) target_size = torch.IntTensor([[image.height, image.width]]) inputs = self.image_processor(images=[image], return_tensors="pt") inputs = inputs.to(self.dtype) if self.tokenizer is not None: inputs = self.tokenizer(text=inputs["words"], boxes=inputs["boxes"], return_tensors="pt") inputs["target_size"] = target_size return inputs def _forward(self, model_inputs): target_size = model_inputs.pop("target_size") outputs = self.model(**model_inputs) model_outputs = outputs.__class__({"target_size": target_size, **outputs}) if self.tokenizer is not None: model_outputs["bbox"] = model_inputs["bbox"] return model_outputs def postprocess(self, model_outputs, threshold=0.5): target_size = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. height, width = target_size[0].tolist() def unnormalize(bbox): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) scores, classes = model_outputs["logits"].squeeze(0).softmax(dim=-1).max(dim=-1) labels = [self.model.config.id2label[prediction] for prediction in classes.tolist()] boxes = [unnormalize(bbox) for bbox in model_outputs["bbox"].squeeze(0)] keys = ["score", "label", "box"] annotation = [dict(zip(keys, vals)) for vals in zip(scores.tolist(), labels, boxes) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel raw_annotations = self.image_processor.post_process_object_detection(model_outputs, threshold, target_size) raw_annotation = raw_annotations[0] scores = raw_annotation["scores"] labels = raw_annotation["labels"] boxes = raw_annotation["boxes"] raw_annotation["scores"] = scores.tolist() raw_annotation["labels"] = [self.model.config.id2label[label.item()] for label in labels] raw_annotation["boxes"] = [self._get_bounding_box(box) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] keys = ["score", "label", "box"] annotation = [ dict(zip(keys, vals)) for vals in zip(raw_annotation["scores"], raw_annotation["labels"], raw_annotation["boxes"]) ] return annotation def _get_bounding_box(self, box: "torch.Tensor") -> dict[str, int]: """ Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... } Args: box (`torch.Tensor`): Tensor containing the coordinates in corners format. Returns: bbox (`dict[str, int]`): Dict containing the coordinates in corners format. """ xmin, ymin, xmax, ymax = box.int().tolist() bbox = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
ObjectDetectionPipeline
python
wandb__wandb
wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/lone_anonymous_operation.py
{ "start": 97, "end": 897 }
class ____(ValidationRule): __slots__ = 'operation_count', def __init__(self, context): self.operation_count = 0 super(LoneAnonymousOperation, self).__init__(context) def enter_Document(self, node, key, parent, path, ancestors): self.operation_count = \ sum(1 for definition in node.definitions if isinstance(definition, ast.OperationDefinition)) def enter_OperationDefinition(self, node, key, parent, path, ancestors): if not node.name and self.operation_count > 1: self.context.report_error(GraphQLError(self.anonymous_operation_not_alone_message(), [node])) @staticmethod def anonymous_operation_not_alone_message(): return 'This anonymous operation must be the only defined operation.'
LoneAnonymousOperation
python
huggingface__transformers
tests/models/mpt/test_modeling_mpt.py
{ "start": 13651, "end": 17326 }
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( MptModel, MptForCausalLM, MptForSequenceClassification, MptForTokenClassification, MptForQuestionAnswering, ) if is_torch_available() else () ) test_missing_keys = False pipeline_model_mapping = ( { "feature-extraction": MptModel, "question-answering": MptForQuestionAnswering, "text-classification": MptForSequenceClassification, "text-generation": MptForCausalLM, "token-classification": MptForTokenClassification, "zero-shot": MptForSequenceClassification, } if is_torch_available() else {} ) def setUp(self): self.model_tester = MptModelTester(self) self.config_tester = MptConfigTester(self, config_class=MptConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_mpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model(*config_and_inputs) def test_mpt_model_alibi_tensor(self): # test creation of alibi tensor when num heads is not a power of two config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs[0].n_heads = 6 self.model_tester.create_and_check_mpt_model(*config_and_inputs) def test_mpt_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model_past(*config_and_inputs) def test_mpt_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model_attention_mask_past(*config_and_inputs) def test_mpt_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model_past_large_inputs(*config_and_inputs) def test_mpt_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_mpt_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_sequence_classification_model(*config_and_inputs) def test_mpt_token_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_token_classification_model(*config_and_inputs) def test_mpt_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def test_mpt_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_weight_initialization(*config_and_inputs) @unittest.skip(reason="For backward compatibility the lm_head is not in the model's state dict on the Hub.") def test_model_weights_reload_no_missing_tied_weights(self): pass @slow def test_model_from_pretrained(self): model_name = "mosaicml/mpt-7b" model = MptModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_accelerator @require_bitsandbytes
MptModelTest
python
jamielennox__requests-mock
tests/test_matcher.py
{ "start": 684, "end": 13704 }
class ____(base.TestCase): def match(self, target, url, matcher_method='GET', request_method='GET', complete_qs=False, headers=None, request_data=None, request_headers={}, additional_matcher=None, real_http=False, case_sensitive=False): matcher = adapter._Matcher(matcher_method, target, [], complete_qs=complete_qs, additional_matcher=additional_matcher, request_headers=request_headers, real_http=real_http, case_sensitive=case_sensitive) request = adapter._RequestObjectProxy._create(request_method, url, headers, data=request_data) return matcher._match(request) def assertMatch(self, target=ANY, url='http://example.com/requests-mock', matcher_method='GET', request_method='GET', **kwargs): self.assertEqual(True, self.match(target, url, matcher_method=matcher_method, request_method=request_method, **kwargs), 'Matcher %s %s failed to match %s %s' % (matcher_method, target, request_method, url)) def assertMatchBoth(self, target=ANY, url='http://example.com/requests-mock', matcher_method='GET', request_method='GET', **kwargs): self.assertMatch(target, url, matcher_method=matcher_method, request_method=request_method, **kwargs) self.assertMatch(url, target, matcher_method=request_method, request_method=matcher_method, **kwargs) def assertNoMatch(self, target=ANY, url='http://example.com/requests-mock', matcher_method='GET', request_method='GET', **kwargs): self.assertEqual(False, self.match(target, url, matcher_method=matcher_method, request_method=request_method, **kwargs), 'Matcher %s %s unexpectedly matched %s %s' % (matcher_method, target, request_method, url)) def assertNoMatchBoth(self, target=ANY, url='http://example.com/requests-mock', matcher_method='GET', request_method='GET', **kwargs): self.assertNoMatch(target, url, matcher_method=matcher_method, request_method=request_method, **kwargs) self.assertNoMatch(url, target, matcher_method=request_method, request_method=matcher_method, **kwargs) def assertMatchMethodBoth(self, matcher_method, request_method, **kwargs): url = 'http://www.test.com' self.assertMatchBoth(url, url, request_method=request_method, matcher_method=matcher_method, **kwargs) def assertNoMatchMethodBoth(self, matcher_method, request_method, **kwargs): url = 'http://www.test.com' self.assertNoMatchBoth(url, url, request_method=request_method, matcher_method=matcher_method, **kwargs) def test_url_matching(self): self.assertMatchBoth('http://www.test.com', 'http://www.test.com') self.assertMatchBoth('http://www.test.com', 'http://www.test.com/') self.assertMatchBoth('http://www.test.com/abc', 'http://www.test.com/abc') self.assertMatchBoth('http://www.test.com:5000/abc', 'http://www.test.com:5000/abc') self.assertNoMatchBoth('https://www.test.com', 'http://www.test.com') self.assertNoMatchBoth('http://www.test.com/abc', 'http://www.test.com') self.assertNoMatchBoth('http://test.com', 'http://www.test.com') self.assertNoMatchBoth('http://test.com', 'http://www.test.com') self.assertNoMatchBoth('http://test.com/abc', 'http://www.test.com/abc/') self.assertNoMatchBoth('http://test.com/abc/', 'http://www.test.com/abc') self.assertNoMatchBoth('http://test.com:5000/abc/', 'http://www.test.com/abc') self.assertNoMatchBoth('http://test.com/abc/', 'http://www.test.com:5000/abc') def test_quotation(self): self.assertMatchBoth('http://www.test.com/a string%url', 'http://www.test.com/a string%url') self.assertMatchBoth('http://www.test.com/ABC 123', 'http://www.test.com/ABC%20123') self.assertMatchBoth('http://www.test.com/user@example.com', 'http://www.test.com/user@example.com') def test_subset_match(self): self.assertMatch('/path', 'http://www.test.com/path') self.assertMatch('/path', 'http://www.test.com/path') self.assertMatch('//www.test.com/path', 'http://www.test.com/path') self.assertMatch('//www.test.com/path', 'https://www.test.com/path') def test_query_string(self): self.assertMatch('/path?a=1&b=2', 'http://www.test.com/path?a=1&b=2') self.assertMatch('/path?a=1', 'http://www.test.com/path?a=1&b=2', complete_qs=False) self.assertNoMatch('/path?a=1', 'http://www.test.com/path?a=1&b=2', complete_qs=True) self.assertNoMatch('/path?a=1&b=2', 'http://www.test.com/path?a=1') def test_query_empty_string(self): self.assertMatch('/path?a', 'http://www.test.com/path?a') self.assertMatch('/path?bob&paul', 'http://www.test.com/path?paul&bob') self.assertNoMatch('/path?bob', 'http://www.test.com/path?paul') self.assertNoMatch('/path?pual&bob', 'http://www.test.com/path?bob') def test_method_match(self): self.assertNoMatchMethodBoth('GET', 'POST') self.assertMatchMethodBoth('GET', 'get') self.assertMatchMethodBoth('GeT', 'geT') def test_match_ANY_url(self): self.assertMatch(ANY, 'http://anything') self.assertMatch(ANY, 'http://somethingelse') self.assertNoMatch(ANY, 'http://somethingelse', request_method='POST') def test_match_ANY_method(self): for m in ('GET', 'POST', 'HEAD', 'OPTION'): self.assertMatch('http://www.test.com', 'http://www.test.com', matcher_method=ANY, request_method=m) self.assertNoMatch('http://www.test.com', 'http://another', matcher_method=ANY) def test_match_with_regex(self): r1 = re.compile('test.com/a') r2 = re.compile('/b/c') self.assertMatch(r1, 'http://mock.test.com/a/b') self.assertMatch(r1, 'http://test.com/a/') self.assertMatch(r1, 'mock://test.com/a/b') self.assertNoMatch(r1, 'mock://test.com/') self.assertMatch(r2, 'http://anything/a/b/c/d') self.assertMatch(r2, 'mock://anything/a/b/c/d') def test_match_with_headers(self): self.assertMatch('/path', 'http://www.test.com/path', headers={'A': 'abc', 'b': 'def'}, request_headers={'a': 'abc'}) self.assertMatch('/path', 'http://www.test.com/path', headers={'A': 'abc', 'b': 'def'}) self.assertNoMatch('/path', 'http://www.test.com/path', headers={'A': 'abc', 'b': 'def'}, request_headers={'b': 'abc'}) self.assertNoMatch('/path', 'http://www.test.com/path', headers={'A': 'abc', 'b': 'def'}, request_headers={'c': 'ghi'}) # headers should be key insensitive and value sensitive, we have no # choice here because they go into an insensitive dict. self.assertMatch('/path', 'http://www.test.com/path', headers={'aBc': 'abc', 'DEF': 'def'}, request_headers={'abC': 'abc'}) self.assertNoMatch('/path', 'http://www.test.com/path', headers={'abc': 'aBC', 'DEF': 'def'}, request_headers={'abc': 'Abc'}) def test_case_sensitive_ignored_for_netloc_and_protocol(self): for case_sensitive in (True, False): self.assertMatch('http://AbC.CoM', 'http://aBc.CoM', case_sensitive=case_sensitive) self.assertMatch('htTP://abc.com', 'hTTp://abc.com', case_sensitive=case_sensitive) self.assertMatch('htTP://aBC.cOm', 'hTTp://AbC.Com', case_sensitive=case_sensitive) def assertSensitiveMatch(self, target, url, **kwargs): self.assertMatch(target, url, case_sensitive=False, **kwargs) self.assertNoMatch(target, url, case_sensitive=True, **kwargs) def test_case_sensitive_paths(self): self.assertSensitiveMatch('http://abc.com/pAtH', 'http://abc.com/path') self.assertSensitiveMatch('/pAtH', 'http://abc.com/path') def test_case_sensitive_query(self): self.assertSensitiveMatch('http://abc.com/path?abCD=efGH', 'http://abc.com/path?abCd=eFGH') self.assertSensitiveMatch('http://abc.com/path?abcd=efGH', 'http://abc.com/path?abcd=eFGH') def test_additional_matcher(self): def test_match_body(request): return 'hello' in request.text self.assertMatch(request_method='POST', matcher_method='POST', request_data='hello world', additional_matcher=test_match_body) self.assertNoMatch(request_method='POST', matcher_method='POST', request_data='goodbye world', additional_matcher=test_match_body) def test_reset_reverts_count(self): url = 'mock://test/site/' matcher = adapter._Matcher('GET', url, [_MatcherResponse()], complete_qs=False, additional_matcher=None, request_headers={}, real_http=False, case_sensitive=False) request = adapter._RequestObjectProxy._create('GET', url) call_count = 3 for _ in range(call_count): matcher(request) self.assertEqual(matcher.call_count, call_count) matcher.reset() self.assertEqual(matcher.call_count, 0)
TestMatcher
python
PrefectHQ__prefect
src/prefect/server/utilities/leasing.py
{ "start": 513, "end": 2468 }
class ____(Protocol[T]): async def create_lease( self, resource_ids: list[UUID], ttl: timedelta, metadata: T | None = None ) -> ResourceLease[T]: """ Create a new resource lease. Args: resource_ids: The IDs of the resources that the lease is associated with. ttl: How long the lease should initially be held for. metadata: Additional metadata associated with the lease. Returns: A ResourceLease object representing the lease. """ ... async def read_lease(self, lease_id: UUID) -> ResourceLease[T] | None: """ Read a resource lease. Args: lease_id: The ID of the lease to read. Returns: A ResourceLease object representing the lease, or None if not found. """ ... async def renew_lease(self, lease_id: UUID, ttl: timedelta) -> bool | None: """ Renew a resource lease. Args: lease_id: The ID of the lease to renew. ttl: The new amount of time the lease should be held for. Returns: True if the lease was successfully renewed, False if the lease does not exist or has already expired. None may be returned by legacy implementations for backwards compatibility (treated as success). """ ... async def revoke_lease(self, lease_id: UUID) -> None: """ Release a resource lease by removing it from list of active leases. Args: lease_id: The ID of the lease to release. """ ... async def read_expired_lease_ids(self, limit: int = 100) -> list[UUID]: """ Read the IDs of expired leases. Args: limit: The maximum number of expired leases to read. Returns: A list of UUIDs representing the expired leases. """ ...
LeaseStorage
python
apache__airflow
airflow-core/src/airflow/ti_deps/deps/trigger_rule_dep.py
{ "start": 1817, "end": 3550 }
class ____(NamedTuple): """ States of the upstream tis for a specific ti. This is used to determine whether the specific ti can run in this iteration. """ success: int skipped: int failed: int upstream_failed: int removed: int done: int success_setup: int skipped_setup: int @classmethod def calculate(cls, finished_upstreams: Iterator[TaskInstance]) -> _UpstreamTIStates: """ Calculate states for a task instance. ``counter`` is inclusive of ``setup_counter`` -- e.g. if there are 2 skipped upstreams, one of which is a setup, then counter will show 2 skipped and setup counter will show 1. :param finished_upstreams: all the finished upstreams of the dag_run """ counter: Counter[str] = Counter() setup_counter: Counter[str] = Counter() for ti in finished_upstreams: if TYPE_CHECKING: assert ti.task assert ti.state curr_state = {ti.state: 1} counter.update(curr_state) if ti.task.is_setup: setup_counter.update(curr_state) return _UpstreamTIStates( success=counter.get(TaskInstanceState.SUCCESS, 0), skipped=counter.get(TaskInstanceState.SKIPPED, 0), failed=counter.get(TaskInstanceState.FAILED, 0), upstream_failed=counter.get(TaskInstanceState.UPSTREAM_FAILED, 0), removed=counter.get(TaskInstanceState.REMOVED, 0), done=sum(counter.values()), success_setup=setup_counter.get(TaskInstanceState.SUCCESS, 0), skipped_setup=setup_counter.get(TaskInstanceState.SKIPPED, 0), )
_UpstreamTIStates
python
wandb__wandb
wandb/automations/actions.py
{ "start": 2514, "end": 2793 }
class ____(NotificationActionFields, frozen=False): action_type: Literal[ActionType.NOTIFICATION] = ActionType.NOTIFICATION integration: _SlackIntegrationStub title: Optional[str] message: Optional[str] severity: Optional[AlertSeverity]
SavedNotificationAction
python
pandas-dev__pandas
pandas/tests/io/parser/conftest.py
{ "start": 2372, "end": 9144 }
class ____(BaseParser): engine = "pyarrow" float_precision_choices = [None] @pytest.fixture def csv_dir_path(datapath): """ The directory path to the data files needed for parser tests. """ return datapath("io", "parser", "data") @pytest.fixture def csv1(datapath): """ The path to the data file "test1.csv" needed for parser tests. """ return os.path.join(datapath("io", "data", "csv"), "test1.csv") _cParserHighMemory = CParserHighMemory _cParserLowMemory = CParserLowMemory _pythonParser = PythonParser _pyarrowParser = PyArrowParser _py_parsers_only = [_pythonParser] _c_parsers_only = [_cParserHighMemory, _cParserLowMemory] _pyarrow_parsers_only = [ pytest.param( _pyarrowParser, marks=[ pytest.mark.single_cpu, pytest.mark.skipif(not HAS_PYARROW, reason="pyarrow is not installed"), ], ) ] _all_parsers = [*_c_parsers_only, *_py_parsers_only, *_pyarrow_parsers_only] _py_parser_ids = ["python"] _c_parser_ids = ["c_high", "c_low"] _pyarrow_parsers_ids = ["pyarrow"] _all_parser_ids = [*_c_parser_ids, *_py_parser_ids, *_pyarrow_parsers_ids] @pytest.fixture(params=_all_parsers, ids=_all_parser_ids) def all_parsers(request): """ Fixture all of the CSV parsers. """ parser = request.param() if parser.engine == "pyarrow": pytest.importorskip("pyarrow", VERSIONS["pyarrow"]) # Try finding a way to disable threads all together # for more stable CI runs import pyarrow pyarrow.set_cpu_count(1) return parser @pytest.fixture(params=_c_parsers_only, ids=_c_parser_ids) def c_parser_only(request): """ Fixture all of the CSV parsers using the C engine. """ return request.param() @pytest.fixture(params=_py_parsers_only, ids=_py_parser_ids) def python_parser_only(request): """ Fixture all of the CSV parsers using the Python engine. """ return request.param() @pytest.fixture(params=_pyarrow_parsers_only, ids=_pyarrow_parsers_ids) def pyarrow_parser_only(request): """ Fixture all of the CSV parsers using the Pyarrow engine. """ return request.param() def _get_all_parser_float_precision_combinations(): """ Return all allowable parser and float precision combinations and corresponding ids. """ params = [] ids = [] for parser, parser_id in zip(_all_parsers, _all_parser_ids): if hasattr(parser, "values"): # Wrapped in pytest.param, get the actual parser back parser = parser.values[0] for precision in parser.float_precision_choices: # Re-wrap in pytest.param for pyarrow mark = ( [ pytest.mark.single_cpu, pytest.mark.skipif( not HAS_PYARROW, reason="pyarrow is not installed" ), ] if parser.engine == "pyarrow" else () ) param = pytest.param((parser(), precision), marks=mark) params.append(param) ids.append(f"{parser_id}-{precision}") return {"params": params, "ids": ids} @pytest.fixture( params=_get_all_parser_float_precision_combinations()["params"], ids=_get_all_parser_float_precision_combinations()["ids"], ) def all_parsers_all_precisions(request): """ Fixture for all allowable combinations of parser and float precision """ return request.param _utf_values = [8, 16, 32] _encoding_seps = ["", "-", "_"] _encoding_prefixes = ["utf", "UTF"] _encoding_fmts = [ f"{prefix}{sep}{{0}}" for sep in _encoding_seps for prefix in _encoding_prefixes ] @pytest.fixture(params=_utf_values) def utf_value(request): """ Fixture for all possible integer values for a UTF encoding. """ return request.param @pytest.fixture(params=_encoding_fmts) def encoding_fmt(request): """ Fixture for all possible string formats of a UTF encoding. """ return request.param @pytest.fixture( params=[ ("-1,0", -1.0), ("-1,2e0", -1.2), ("-1e0", -1.0), ("+1e0", 1.0), ("+1e+0", 1.0), ("+1e-1", 0.1), ("+,1e1", 1.0), ("+1,e0", 1.0), ("-,1e1", -1.0), ("-1,e0", -1.0), ("0,1", 0.1), ("1,", 1.0), (",1", 0.1), ("-,1", -0.1), ("1_,", 1.0), ("1_234,56", 1234.56), ("1_234,56e0", 1234.56), # negative cases; must not parse as float ("_", "_"), ("-_", "-_"), ("-_1", "-_1"), ("-_1e0", "-_1e0"), ("_1", "_1"), ("_1,", "_1,"), ("_1,_", "_1,_"), ("_1e0", "_1e0"), ("1,2e_1", "1,2e_1"), ("1,2e1_0", "1,2e1_0"), ("1,_2", "1,_2"), (",1__2", ",1__2"), (",1e", ",1e"), ("-,1e", "-,1e"), ("1_000,000_000", "1_000,000_000"), ("1,e1_2", "1,e1_2"), ("e11,2", "e11,2"), ("1e11,2", "1e11,2"), ("1,2,2", "1,2,2"), ("1,2_1", "1,2_1"), ("1,2e-10e1", "1,2e-10e1"), ("--1,2", "--1,2"), ("1a_2,1", "1a_2,1"), ("1,2E-1", 0.12), ("1,2E1", 12.0), ] ) def numeric_decimal(request): """ Fixture for all numeric formats which should get recognized. The first entry represents the value to read while the second represents the expected result. """ return request.param @pytest.fixture def pyarrow_xfail(request): """ Fixture that xfails a test if the engine is pyarrow. Use if failure is do to unsupported keywords or inconsistent results. """ if "all_parsers" in request.fixturenames: parser = request.getfixturevalue("all_parsers") elif "all_parsers_all_precisions" in request.fixturenames: # Return value is tuple of (engine, precision) parser = request.getfixturevalue("all_parsers_all_precisions")[0] else: return if parser.engine == "pyarrow": mark = pytest.mark.xfail(reason="pyarrow doesn't support this.") request.applymarker(mark) @pytest.fixture def pyarrow_skip(request): """ Fixture that skips a test if the engine is pyarrow. Use if failure is do a parsing failure from pyarrow.csv.read_csv """ if "all_parsers" in request.fixturenames: parser = request.getfixturevalue("all_parsers") elif "all_parsers_all_precisions" in request.fixturenames: # Return value is tuple of (engine, precision) parser = request.getfixturevalue("all_parsers_all_precisions")[0] else: return if parser.engine == "pyarrow": pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
PyArrowParser
python
huggingface__transformers
tests/models/decision_transformer/test_modeling_decision_transformer.py
{ "start": 6362, "end": 9272 }
class ____(unittest.TestCase): @slow def test_autoregressive_prediction(self): """ An integration test that performs autoregressive prediction of state, action and return from a sequence of state, actions and returns. Test is performed over two timesteps. """ NUM_STEPS = 2 # number of steps of autoregressive prediction we will perform TARGET_RETURN = 10 # defined by the RL environment, may be normalized model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert") model = model.to(torch_device) config = model.config torch.manual_seed(0) state = torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32) # env.reset() expected_outputs = torch.tensor( [[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]], device=torch_device ) returns_to_go = torch.tensor(TARGET_RETURN, device=torch_device, dtype=torch.float32).reshape(1, 1, 1) states = state actions = torch.zeros(1, 0, config.act_dim, device=torch_device, dtype=torch.float32) rewards = torch.zeros(1, 0, device=torch_device, dtype=torch.float32) timesteps = torch.tensor(0, device=torch_device, dtype=torch.long).reshape(1, 1) for step in range(NUM_STEPS): actions = torch.cat([actions, torch.zeros(1, 1, config.act_dim, device=torch_device)], dim=1) rewards = torch.cat([rewards, torch.zeros(1, 1, device=torch_device)], dim=1) attention_mask = torch.ones(1, states.shape[1]).to(dtype=torch.long, device=states.device) with torch.no_grad(): _, action_pred, _ = model( states=states, actions=actions, rewards=rewards, returns_to_go=returns_to_go, timesteps=timesteps, attention_mask=attention_mask, return_dict=False, ) self.assertEqual(action_pred.shape, actions.shape) torch.testing.assert_close(action_pred[0, -1], expected_outputs[step], rtol=1e-4, atol=1e-4) state, reward, _, _ = ( # env.step(action) torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32), 1.0, False, {}, ) actions[-1] = action_pred[0, -1] states = torch.cat([states, state], dim=1) pred_return = returns_to_go[0, -1] - reward returns_to_go = torch.cat([returns_to_go, pred_return.reshape(1, 1, 1)], dim=1) timesteps = torch.cat( [timesteps, torch.ones((1, 1), device=torch_device, dtype=torch.long) * (step + 1)], dim=1 )
DecisionTransformerModelIntegrationTest
python
run-llama__llama_index
llama-index-core/llama_index/core/utils.py
{ "start": 3677, "end": 5977 }
class ____(Protocol): def encode(self, text: str, *args: Any, **kwargs: Any) -> List[Any]: ... def set_global_tokenizer(tokenizer: Union[Tokenizer, Callable[[str], list]]) -> None: import llama_index.core if isinstance(tokenizer, Tokenizer): llama_index.core.global_tokenizer = tokenizer.encode else: llama_index.core.global_tokenizer = tokenizer def get_tokenizer(model_name: str = "gpt-3.5-turbo") -> Callable[[str], List]: import llama_index.core if llama_index.core.global_tokenizer is None: tiktoken_import_err = ( "`tiktoken` package not found, please run `pip install tiktoken`" ) try: import tiktoken except ImportError: raise ImportError(tiktoken_import_err) # set tokenizer cache temporarily should_revert = False if "TIKTOKEN_CACHE_DIR" not in os.environ: should_revert = True os.environ["TIKTOKEN_CACHE_DIR"] = os.path.join( os.path.dirname(os.path.abspath(__file__)), "_static/tiktoken_cache", ) enc = tiktoken.encoding_for_model(model_name) tokenizer = partial(enc.encode, allowed_special="all") set_global_tokenizer(tokenizer) if should_revert: del os.environ["TIKTOKEN_CACHE_DIR"] assert llama_index.core.global_tokenizer is not None return llama_index.core.global_tokenizer def get_new_id(d: Set) -> str: """Get a new ID.""" while True: new_id = str(uuid.uuid4()) if new_id not in d: break return new_id def get_new_int_id(d: Set) -> int: """Get a new integer ID.""" while True: new_id = random.randint(0, sys.maxsize) if new_id not in d: break return new_id @contextmanager def temp_set_attrs(obj: Any, **kwargs: Any) -> Generator: """ Temporary setter. Utility class for setting a temporary value for an attribute on a class. Taken from: https://tinyurl.com/2p89xymh """ prev_values = {k: getattr(obj, k) for k in kwargs} for k, v in kwargs.items(): setattr(obj, k, v) try: yield finally: for k, v in prev_values.items(): setattr(obj, k, v) @dataclass
Tokenizer
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/roles.py
{ "start": 6258, "end": 6314 }
class ____(ReturnsRowsRole): __slots__ = ()
HasCTERole
python
sympy__sympy
sympy/physics/optics/gaussopt.py
{ "start": 1322, "end": 4657 }
class ____(MutableDenseMatrix): """ Base class for a Ray Transfer Matrix. It should be used if there is not already a more specific subclass mentioned in See Also. Parameters ========== parameters : A, B, C and D or 2x2 matrix (Matrix(2, 2, [A, B, C, D])) Examples ======== >>> from sympy.physics.optics import RayTransferMatrix, ThinLens >>> from sympy import Symbol, Matrix >>> mat = RayTransferMatrix(1, 2, 3, 4) >>> mat Matrix([ [1, 2], [3, 4]]) >>> RayTransferMatrix(Matrix([[1, 2], [3, 4]])) Matrix([ [1, 2], [3, 4]]) >>> mat.A 1 >>> f = Symbol('f') >>> lens = ThinLens(f) >>> lens Matrix([ [ 1, 0], [-1/f, 1]]) >>> lens.C -1/f See Also ======== GeometricRay, BeamParameter, FreeSpace, FlatRefraction, CurvedRefraction, FlatMirror, CurvedMirror, ThinLens References ========== .. [1] https://en.wikipedia.org/wiki/Ray_transfer_matrix_analysis """ def __new__(cls, *args): if len(args) == 4: temp = ((args[0], args[1]), (args[2], args[3])) elif len(args) == 1 \ and isinstance(args[0], Matrix) \ and args[0].shape == (2, 2): temp = args[0] else: raise ValueError(filldedent(''' Expecting 2x2 Matrix or the 4 elements of the Matrix but got %s''' % str(args))) return Matrix.__new__(cls, temp) def __mul__(self, other): if isinstance(other, RayTransferMatrix): return RayTransferMatrix(Matrix(self)*Matrix(other)) elif isinstance(other, GeometricRay): return GeometricRay(Matrix(self)*Matrix(other)) elif isinstance(other, BeamParameter): temp = Matrix(self)*Matrix(((other.q,), (1,))) q = (temp[0]/temp[1]).expand(complex=True) return BeamParameter(other.wavelen, together(re(q)), z_r=together(im(q))) else: return Matrix.__mul__(self, other) @property def A(self): """ The A parameter of the Matrix. Examples ======== >>> from sympy.physics.optics import RayTransferMatrix >>> mat = RayTransferMatrix(1, 2, 3, 4) >>> mat.A 1 """ return self[0, 0] @property def B(self): """ The B parameter of the Matrix. Examples ======== >>> from sympy.physics.optics import RayTransferMatrix >>> mat = RayTransferMatrix(1, 2, 3, 4) >>> mat.B 2 """ return self[0, 1] @property def C(self): """ The C parameter of the Matrix. Examples ======== >>> from sympy.physics.optics import RayTransferMatrix >>> mat = RayTransferMatrix(1, 2, 3, 4) >>> mat.C 3 """ return self[1, 0] @property def D(self): """ The D parameter of the Matrix. Examples ======== >>> from sympy.physics.optics import RayTransferMatrix >>> mat = RayTransferMatrix(1, 2, 3, 4) >>> mat.D 4 """ return self[1, 1]
RayTransferMatrix
python
Unity-Technologies__ml-agents
ml-agents/mlagents/trainers/torch_entities/attention.py
{ "start": 925, "end": 4023 }
class ____(torch.nn.Module): NEG_INF = -1e6 def __init__(self, embedding_size: int, num_heads: int): """ Multi Head Attention module. We do not use the regular Torch implementation since Sentis does not support some operators it uses. Takes as input to the forward method 3 tensors: - query: of dimensions (batch_size, number_of_queries, embedding_size) - key: of dimensions (batch_size, number_of_keys, embedding_size) - value: of dimensions (batch_size, number_of_keys, embedding_size) The forward method will return 2 tensors: - The output: (batch_size, number_of_queries, embedding_size) - The attention matrix: (batch_size, num_heads, number_of_queries, number_of_keys) :param embedding_size: The size of the embeddings that will be generated (should be dividable by the num_heads) :param total_max_elements: The maximum total number of entities that can be passed to the module :param num_heads: The number of heads of the attention module """ super().__init__() self.n_heads = num_heads self.head_size: int = embedding_size // self.n_heads self.embedding_size: int = self.head_size * self.n_heads def forward( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n_q: int, n_k: int, key_mask: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: b = -1 # the batch size query = query.reshape( b, n_q, self.n_heads, self.head_size ) # (b, n_q, h, emb / h) key = key.reshape(b, n_k, self.n_heads, self.head_size) # (b, n_k, h, emb / h) value = value.reshape( b, n_k, self.n_heads, self.head_size ) # (b, n_k, h, emb / h) query = query.permute([0, 2, 1, 3]) # (b, h, n_q, emb / h) # The next few lines are equivalent to : key.permute([0, 2, 3, 1]) # This is a hack, ONNX will compress two permute operations and # Sentis will not like seeing `permute([0,2,3,1])` key = key.permute([0, 2, 1, 3]) # (b, h, emb / h, n_k) key -= 1 key += 1 key = key.permute([0, 1, 3, 2]) # (b, h, emb / h, n_k) qk = torch.matmul(query, key) # (b, h, n_q, n_k) if key_mask is None: qk = qk / (self.embedding_size**0.5) else: key_mask = key_mask.reshape(b, 1, 1, n_k) qk = (1 - key_mask) * qk / ( self.embedding_size**0.5 ) + key_mask * self.NEG_INF att = torch.softmax(qk, dim=3) # (b, h, n_q, n_k) value = value.permute([0, 2, 1, 3]) # (b, h, n_k, emb / h) value_attention = torch.matmul(att, value) # (b, h, n_q, emb / h) value_attention = value_attention.permute([0, 2, 1, 3]) # (b, n_q, h, emb / h) value_attention = value_attention.reshape( b, n_q, self.embedding_size ) # (b, n_q, emb) return value_attention, att
MultiHeadAttention
python
openai__openai-python
src/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py
{ "start": 172, "end": 297 }
class ____(BaseModel): enabled: bool """Whether automatic thread titling is enabled."""
ChatSessionAutomaticThreadTitling
python
ray-project__ray
python/ray/dashboard/modules/job/job_head.py
{ "start": 6548, "end": 28729 }
class ____(SubprocessModule): """Runs on the head node of a Ray cluster and handles Ray Jobs APIs. NOTE(architkulkarni): Please keep this class in sync with the OpenAPI spec at `doc/source/cluster/running-applications/job-submission/openapi.yml`. We currently do not automatically check that the OpenAPI spec is in sync with the implementation. If any changes are made to the paths in the @route decorators or in the Responses returned by the methods (or any nested fields in the Responses), you will need to find the corresponding field of the OpenAPI yaml file and update it manually. Also, bump the version number in the yaml file and in this class's `get_version`. """ # Time that we sleep while tailing logs while waiting for # the supervisor actor to start. We don't know which node # to read the logs from until then. WAIT_FOR_SUPERVISOR_ACTOR_INTERVAL_S = 1 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._job_info_client = None # To make sure that the internal KV is initialized by getting the lazy property assert self.gcs_client is not None assert ray.experimental.internal_kv._internal_kv_initialized() # It contains all `JobAgentSubmissionClient` that # `JobHead` has ever used, and will not be deleted # from it unless `JobAgentSubmissionClient` is no # longer available (the corresponding agent process is dead) # {node_id: JobAgentSubmissionClient} self._agents: Dict[NodeID, JobAgentSubmissionClient] = dict() async def get_target_agent( self, timeout_s: float = WAIT_AVAILABLE_AGENT_TIMEOUT ) -> JobAgentSubmissionClient: """ Get a `JobAgentSubmissionClient`, which is a client for interacting with jobs via an agent process. Args: timeout_s: The timeout for the operation. Returns: A `JobAgentSubmissionClient` for interacting with jobs via an agent process. Raises: TimeoutError: If the operation times out. """ return await self._get_head_node_agent(timeout_s) async def _get_head_node_agent_once(self) -> JobAgentSubmissionClient: head_node_id_hex = await get_head_node_id(self.gcs_client) if not head_node_id_hex: raise Exception("Head node id has not yet been persisted in GCS") head_node_id = NodeID.from_hex(head_node_id_hex) if head_node_id not in self._agents: ip, http_port, _ = await self._fetch_agent_info(head_node_id) agent_http_address = f"http://{build_address(ip, http_port)}" self._agents[head_node_id] = JobAgentSubmissionClient(agent_http_address) return self._agents[head_node_id] async def _get_head_node_agent(self, timeout_s: float) -> JobAgentSubmissionClient: """Retrieves HTTP client for `JobAgent` running on the Head node. If the head node does not have an agent, it will retry every `TRY_TO_GET_AGENT_INFO_INTERVAL_SECONDS` seconds indefinitely. Args: timeout_s: The timeout for the operation. Returns: A `JobAgentSubmissionClient` for interacting with jobs via the head node's agent process. Raises: TimeoutError: If the operation times out. """ timeout_point = time.time() + timeout_s exception = None while time.time() < timeout_point: try: return await self._get_head_node_agent_once() except Exception as e: exception = e logger.exception( f"Failed to get head node agent, retrying in {TRY_TO_GET_AGENT_INFO_INTERVAL_SECONDS} seconds..." ) await asyncio.sleep(TRY_TO_GET_AGENT_INFO_INTERVAL_SECONDS) raise TimeoutError( f"Failed to get head node agent within {timeout_s} seconds. The last exception is {exception}" ) async def _fetch_agent_info(self, target_node_id: NodeID) -> Tuple[str, int, int]: """ Fetches agent info by the Node ID. May raise exception if there's network error or the agent info is not found. Returns: (ip, http_port, grpc_port) """ key = f"{DASHBOARD_AGENT_ADDR_NODE_ID_PREFIX}{target_node_id.hex()}" value = await self.gcs_client.async_internal_kv_get( key, namespace=KV_NAMESPACE_DASHBOARD, timeout=GCS_RPC_TIMEOUT_SECONDS, ) if not value: raise KeyError( f"Agent info not found in internal KV for node {target_node_id}. " "It's possible that the agent didn't launch successfully due to " "port conflicts or other issues. Please check `dashboard_agent.log` " "for more details." ) return json.loads(value.decode()) @routes.get("/api/version") async def get_version(self, req: Request) -> Response: # NOTE(edoakes): CURRENT_VERSION should be bumped and checked on the # client when we have backwards-incompatible changes. resp = VersionResponse( version=CURRENT_VERSION, ray_version=ray.__version__, ray_commit=ray.__commit__, session_name=self.session_name, ) return Response( text=json.dumps(dataclasses.asdict(resp)), content_type="application/json", status=aiohttp.web.HTTPOk.status_code, ) @routes.get("/api/packages/{protocol}/{package_name}") async def get_package(self, req: Request) -> Response: package_uri = http_uri_components_to_uri( protocol=req.match_info["protocol"], package_name=req.match_info["package_name"], ) logger.debug(f"Adding temporary reference to package {package_uri}.") try: pin_runtime_env_uri(package_uri) except Exception: return Response( text=traceback.format_exc(), status=aiohttp.web.HTTPInternalServerError.status_code, ) if not package_exists(package_uri): return Response( text=f"Package {package_uri} does not exist", status=aiohttp.web.HTTPNotFound.status_code, ) return Response() @routes.put("/api/packages/{protocol}/{package_name}") async def upload_package(self, req: Request): package_uri = http_uri_components_to_uri( protocol=req.match_info["protocol"], package_name=req.match_info["package_name"], ) logger.info(f"Uploading package {package_uri} to the GCS.") try: data = await req.read() await get_or_create_event_loop().run_in_executor( None, upload_package_to_gcs, package_uri, data, ) except Exception: return Response( text=traceback.format_exc(), status=aiohttp.web.HTTPInternalServerError.status_code, ) return Response(status=aiohttp.web.HTTPOk.status_code) @routes.post("/api/jobs/") async def submit_job(self, req: Request) -> Response: result = await parse_and_validate_request(req, JobSubmitRequest) # Request parsing failed, returned with Response object. if isinstance(result, Response): return result else: submit_request: JobSubmitRequest = result try: job_agent_client = await self.get_target_agent() resp = await job_agent_client.submit_job_internal(submit_request) except asyncio.TimeoutError: return Response( text="No available agent to submit job, please try again later.", status=aiohttp.web.HTTPInternalServerError.status_code, ) except (TypeError, ValueError): return Response( text=traceback.format_exc(), status=aiohttp.web.HTTPBadRequest.status_code, ) except Exception: return Response( text=traceback.format_exc(), status=aiohttp.web.HTTPInternalServerError.status_code, ) return Response( text=json.dumps(dataclasses.asdict(resp)), content_type="application/json", status=aiohttp.web.HTTPOk.status_code, ) @routes.post("/api/jobs/{job_or_submission_id}/stop") async def stop_job(self, req: Request) -> Response: job_or_submission_id = req.match_info["job_or_submission_id"] job = await find_job_by_ids( self.gcs_client, self._job_info_client, job_or_submission_id, ) if not job: return Response( text=f"Job {job_or_submission_id} does not exist", status=aiohttp.web.HTTPNotFound.status_code, ) if job.type is not JobType.SUBMISSION: return Response( text="Can only stop submission type jobs", status=aiohttp.web.HTTPBadRequest.status_code, ) try: job_agent_client = await self.get_target_agent() resp = await job_agent_client.stop_job_internal(job.submission_id) except Exception: return Response( text=traceback.format_exc(), status=aiohttp.web.HTTPInternalServerError.status_code, ) return Response( text=json.dumps(dataclasses.asdict(resp)), content_type="application/json" ) @routes.delete("/api/jobs/{job_or_submission_id}") async def delete_job(self, req: Request) -> Response: job_or_submission_id = req.match_info["job_or_submission_id"] job = await find_job_by_ids( self.gcs_client, self._job_info_client, job_or_submission_id, ) if not job: return Response( text=f"Job {job_or_submission_id} does not exist", status=aiohttp.web.HTTPNotFound.status_code, ) if job.type is not JobType.SUBMISSION: return Response( text="Can only delete submission type jobs", status=aiohttp.web.HTTPBadRequest.status_code, ) try: job_agent_client = await self.get_target_agent() resp = await job_agent_client.delete_job_internal(job.submission_id) except Exception: return Response( text=traceback.format_exc(), status=aiohttp.web.HTTPInternalServerError.status_code, ) return Response( text=json.dumps(dataclasses.asdict(resp)), content_type="application/json" ) @routes.get("/api/jobs/{job_or_submission_id}") async def get_job_info(self, req: Request) -> Response: job_or_submission_id = req.match_info["job_or_submission_id"] job = await find_job_by_ids( self.gcs_client, self._job_info_client, job_or_submission_id, ) if not job: return Response( text=f"Job {job_or_submission_id} does not exist", status=aiohttp.web.HTTPNotFound.status_code, ) return Response( text=json.dumps(job.dict()), content_type="application/json", ) # TODO(rickyx): This endpoint's logic is also mirrored in state API's endpoint. # We should eventually unify the backend logic (and keep the logic in sync before # that). @routes.get("/api/jobs/") async def list_jobs(self, req: Request) -> Response: (driver_jobs, submission_job_drivers), submission_jobs = await asyncio.gather( get_driver_jobs(self.gcs_client), self._job_info_client.get_all_jobs() ) submission_jobs = [ JobDetails( **dataclasses.asdict(job), submission_id=submission_id, job_id=submission_job_drivers.get(submission_id).id if submission_id in submission_job_drivers else None, driver_info=submission_job_drivers.get(submission_id), type=JobType.SUBMISSION, ) for submission_id, job in submission_jobs.items() ] return Response( text=json.dumps( [ *[submission_job.dict() for submission_job in submission_jobs], *[job_info.dict() for job_info in driver_jobs.values()], ] ), content_type="application/json", ) @routes.get("/api/jobs/{job_or_submission_id}/logs") async def get_job_logs(self, req: Request) -> Response: job_or_submission_id = req.match_info["job_or_submission_id"] job = await find_job_by_ids( self.gcs_client, self._job_info_client, job_or_submission_id, ) if not job: return Response( text=f"Job {job_or_submission_id} does not exist", status=aiohttp.web.HTTPNotFound.status_code, ) if job.type is not JobType.SUBMISSION: return Response( text="Can only get logs of submission type jobs", status=aiohttp.web.HTTPBadRequest.status_code, ) try: job_agent_client = self.get_job_driver_agent_client(job) payload = ( await job_agent_client.get_job_logs_internal(job.submission_id) if job_agent_client else JobLogsResponse("") ) return Response( text=json.dumps(dataclasses.asdict(payload)), content_type="application/json", ) except Exception: return Response( text=traceback.format_exc(), status=aiohttp.web.HTTPInternalServerError.status_code, ) @routes.get( "/api/jobs/{job_or_submission_id}/logs/tail", resp_type=ResponseType.WEBSOCKET ) async def tail_job_logs(self, req: Request) -> StreamResponse: job_or_submission_id = req.match_info["job_or_submission_id"] job = await find_job_by_ids( self.gcs_client, self._job_info_client, job_or_submission_id, ) if not job: return Response( text=f"Job {job_or_submission_id} does not exist", status=aiohttp.web.HTTPNotFound.status_code, ) if job.type is not JobType.SUBMISSION: return Response( text="Can only get logs of submission type jobs", status=aiohttp.web.HTTPBadRequest.status_code, ) ws = aiohttp.web.WebSocketResponse() await ws.prepare(req) driver_agent_http_address = None while driver_agent_http_address is None: job = await find_job_by_ids( self.gcs_client, self._job_info_client, job_or_submission_id, ) driver_agent_http_address = job.driver_agent_http_address status = job.status if status.is_terminal() and driver_agent_http_address is None: # Job exited before supervisor actor started. return ws await asyncio.sleep(self.WAIT_FOR_SUPERVISOR_ACTOR_INTERVAL_S) job_agent_client = self.get_job_driver_agent_client(job) async for lines in job_agent_client.tail_job_logs(job.submission_id): await ws.send_str(lines) return ws def get_job_driver_agent_client( self, job: JobDetails ) -> Optional[JobAgentSubmissionClient]: if job.driver_agent_http_address is None: return None driver_node_id = job.driver_node_id if driver_node_id not in self._agents: self._agents[driver_node_id] = JobAgentSubmissionClient( job.driver_agent_http_address ) return self._agents[driver_node_id] @routes.get("/api/component_activities") async def get_component_activities( self, req: aiohttp.web.Request ) -> aiohttp.web.Response: timeout = req.query.get("timeout", None) if timeout and timeout.isdigit(): timeout = int(timeout) else: timeout = 30 # Get activity information for driver driver_activity_info = await self._get_job_activity_info(timeout=timeout) resp = {"driver": dict(driver_activity_info)} if RAY_CLUSTER_ACTIVITY_HOOK in os.environ: try: cluster_activity_callable = load_class( os.environ[RAY_CLUSTER_ACTIVITY_HOOK] ) external_activity_output = cluster_activity_callable() assert isinstance(external_activity_output, dict), ( f"Output of hook {os.environ[RAY_CLUSTER_ACTIVITY_HOOK]} " "should be Dict[str, RayActivityResponse]. Got " f"output: {external_activity_output}" ) for component_type in external_activity_output: try: component_activity_output = external_activity_output[ component_type ] # Parse and validate output to type RayActivityResponse component_activity_output = RayActivityResponse( **dict(component_activity_output) ) resp[component_type] = dict(component_activity_output) except Exception as e: logger.exception( f"Failed to get activity status of {component_type} " f"from user hook {os.environ[RAY_CLUSTER_ACTIVITY_HOOK]}." ) resp[component_type] = { "is_active": RayActivityStatus.ERROR, "reason": repr(e), "timestamp": datetime.now().timestamp(), } except Exception as e: logger.exception( "Failed to get activity status from user " f"hook {os.environ[RAY_CLUSTER_ACTIVITY_HOOK]}." ) resp["external_component"] = { "is_active": RayActivityStatus.ERROR, "reason": repr(e), "timestamp": datetime.now().timestamp(), } return aiohttp.web.Response( text=json.dumps(resp), content_type="application/json", status=aiohttp.web.HTTPOk.status_code, ) async def _get_job_activity_info(self, timeout: int) -> RayActivityResponse: # Returns if there is Ray activity from drivers (job). # Drivers in namespaces that start with _ray_internal_ are not # considered activity. # This includes the _ray_internal_dashboard job that gets automatically # created with every cluster try: reply = await self.gcs_client.async_get_all_job_info( skip_submission_job_info_field=True, skip_is_running_tasks_field=True, timeout=timeout, ) num_active_drivers = 0 latest_job_end_time = 0 for job_table_entry in reply.values(): is_dead = bool(job_table_entry.is_dead) in_internal_namespace = job_table_entry.config.ray_namespace.startswith( "_ray_internal_" ) latest_job_end_time = ( max(latest_job_end_time, job_table_entry.end_time) if job_table_entry.end_time else latest_job_end_time ) if not is_dead and not in_internal_namespace: num_active_drivers += 1 current_timestamp = datetime.now().timestamp() # Latest job end time must be before or equal to the current timestamp. # Job end times may be provided in epoch milliseconds. Check if this # is true, and convert to seconds if latest_job_end_time > current_timestamp: latest_job_end_time = latest_job_end_time / 1000 assert current_timestamp >= latest_job_end_time, ( f"Most recent job end time {latest_job_end_time} must be " f"before or equal to the current timestamp {current_timestamp}" ) is_active = ( RayActivityStatus.ACTIVE if num_active_drivers > 0 else RayActivityStatus.INACTIVE ) return RayActivityResponse( is_active=is_active, reason=f"Number of active drivers: {num_active_drivers}" if num_active_drivers else None, timestamp=current_timestamp, # If latest_job_end_time == 0, no jobs have finished yet so don't # populate last_activity_at last_activity_at=latest_job_end_time if latest_job_end_time else None, ) except Exception as e: logger.exception("Failed to get activity status of Ray drivers.") return RayActivityResponse( is_active=RayActivityStatus.ERROR, reason=repr(e), timestamp=datetime.now().timestamp(), ) async def run(self): await super().run() if not self._job_info_client: self._job_info_client = JobInfoStorageClient(self.gcs_client)
JobHead
python
django__django
tests/model_fields/models.py
{ "start": 14996, "end": 15060 }
class ____(models.Model): field = models.UUIDField()
UUIDModel
python
PrefectHQ__prefect
src/prefect/client/schemas/actions.py
{ "start": 2616, "end": 5274 }
class ____(ActionBaseModel): schedule: SCHEDULE_TYPES = Field( default=..., description="The schedule for the deployment." ) active: bool = Field( default=True, description="Whether or not the schedule is active." ) max_scheduled_runs: Optional[PositiveInteger] = Field( default=None, description="The maximum number of scheduled runs for the schedule.", ) parameters: dict[str, Any] = Field( default_factory=dict, description="Parameter overrides for the schedule.", ) slug: Optional[str] = Field( default=None, description="A unique identifier for the schedule.", ) @field_validator("active", mode="wrap") @classmethod def validate_active(cls, v: Any, handler: Callable[[Any], Any]) -> bool: try: return handler(v) except Exception: raise ValueError( f"active must be able to be parsed as a boolean, got {v!r} of type {type(v)}" ) @field_validator("max_scheduled_runs") @classmethod def validate_max_scheduled_runs(cls, v: Optional[int]) -> Optional[int]: return validate_schedule_max_scheduled_runs( v, PREFECT_DEPLOYMENT_SCHEDULE_MAX_SCHEDULED_RUNS.value() ) @classmethod def from_schedule(cls, schedule: Schedule) -> "DeploymentScheduleCreate": if schedule.interval is not None: return cls( schedule=IntervalSchedule( interval=schedule.interval, timezone=schedule.timezone, anchor_date=schedule.anchor_date, ), parameters=schedule.parameters, active=schedule.active, slug=schedule.slug, ) elif schedule.cron is not None: return cls( schedule=CronSchedule( cron=schedule.cron, timezone=schedule.timezone, day_or=schedule.day_or, ), parameters=schedule.parameters, active=schedule.active, slug=schedule.slug, ) elif schedule.rrule is not None: return cls( schedule=RRuleSchedule( rrule=schedule.rrule, timezone=schedule.timezone, ), parameters=schedule.parameters, active=schedule.active, slug=schedule.slug, ) else: return cls( schedule=NoSchedule(), )
DeploymentScheduleCreate
python
ray-project__ray
rllib/algorithms/dreamerv3/tests/test_dreamerv3.py
{ "start": 1000, "end": 12144 }
class ____(unittest.TestCase): @classmethod def setUpClass(cls): ray.init() @classmethod def tearDownClass(cls): ray.shutdown() def test_dreamerv3_compilation(self): """Test whether DreamerV3 can be built with all frameworks.""" # Build a DreamerV3Config object. config = ( dreamerv3.DreamerV3Config() .env_runners(num_env_runners=0) .training( # Keep things simple. Especially the long dream rollouts seem # to take an enormous amount of time (initially). batch_size_B=4, horizon_H=5, batch_length_T=16, model_size="nano", # Use a tiny model for testing symlog_obs=True, use_float16=False, ) .learners( num_learners=2, num_cpus_per_learner=1, num_gpus_per_learner=0, ) ) num_iterations = 3 for env in [ # "DMC/cartpole/swingup", # causes strange MuJoCo error(s) on CI "FrozenLake-v1", "CartPole-v1", "ale_py:ALE/MsPacman-v5", "Pendulum-v1", ]: print("Env={}".format(env)) # Add one-hot observations for FrozenLake env. if env == "FrozenLake-v1": config.env_runners( env_to_module_connector=( lambda env, spaces, device: FlattenObservations() ) ) else: config.env_runners(env_to_module_connector=None) # Add Atari preprocessing. if env == "ale_py:ALE/MsPacman-v5": def env_creator(cfg): return wrap_atari_for_new_api_stack( gym.make(env, **cfg, render_mode="rgb_array"), # No frame-stacking. DreamerV3 processes color images with a # GRU, so partial observability is ok. framestack=None, grayscale=False, ) tune.register_env("env", env_creator) env = "env" elif env.startswith("DMC"): parts = env.split("/") assert len(parts) == 3, ( "ERROR: DMC env must be formatted as 'DMC/[task]/[domain]', e.g. " f"'DMC/cartpole/swingup'! You provided '{env}'." ) def env_creator(cfg): return ActionClip( DMCEnv( parts[1], parts[2], from_pixels=True, channels_first=False, ) ) tune.register_env("env", env_creator) env = "env" config.environment(env) algo = config.build_algo() obs_space = algo.env_runner._env_to_module.observation_space act_space = algo.env_runner.env.single_action_space rl_module = algo.env_runner.module for i in range(num_iterations): results = algo.train() print(results) # Test dream trajectory w/ recreated observations. sample = algo.replay_buffer.sample() start_states = rl_module.dreamer_model.get_initial_state() start_states = tree.map_structure( # Repeat only the batch dimension (B times). lambda s: s.unsqueeze(0).repeat(1, *([1] * len(s.shape))), start_states, ) dream = rl_module.dreamer_model.dream_trajectory_with_burn_in( start_states=start_states, timesteps_burn_in=5, timesteps_H=45, observations=torch.from_numpy(sample["obs"][:1]), # B=1 actions=torch.from_numpy( one_hot( sample["actions"], depth=act_space.n, ) if isinstance(act_space, gym.spaces.Discrete) else sample["actions"] )[ :1 ], # B=1 ) check( dream["actions_dreamed_t0_to_H_BxT"].shape, (46, 1) + ( (act_space.n,) if isinstance(act_space, gym.spaces.Discrete) else tuple(act_space.shape) ), ) check(dream["continues_dreamed_t0_to_H_BxT"].shape, (46, 1)) check( dream["observations_dreamed_t0_to_H_BxT"].shape, [46, 1] + list(obs_space.shape), ) algo.stop() def test_dreamerv3_dreamer_model_sizes(self): """Tests, whether the different model sizes match the ones reported in [1].""" # For Atari, these are the exact numbers from the repo ([3]). # However, for CartPole + size "S" and "M", the author's original code will not # match for the world model count. This is due to the fact that the author uses # encoder/decoder nets with 5x1024 nodes (which corresponds to XL) regardless of # the `model_size` settings (iff >="S"). expected_num_params_world_model = { # XS encoder # kernel=[4, 256], (no bias), layernorm=[256],[256] # XS reward_predictor # kernel=[1280, 256], (no bias), layernorm[256],[256] # kernel=[256, 255] bias=[255] # 1280=1024 (z-state) + 256 (h-state) # XS continue_predictor # kernel=[1280, 256], (no bias), layernorm=[256],[256] # kernel=[256, 1] bias=[1] # XS sequence_model # [ # pre-MLP: kernel=[1026, 256], (no bias), layernorm=[256],[256], silu # custom GRU: kernel=[512, 768], (no bias), layernorm=[768],[768] # ] # XS decoder # kernel=[1280, 256], (no bias), layernorm=[256],[256] # kernel=[256, 4] bias=[4] # XS posterior_mlp # kernel=[512, 256], (no bias), layernorm=[256],[256] # XS posterior_representation_layer # kernel=[256, 1024], bias=[1024] "XS_cartpole": 2435076, "S_cartpole": 7493380, "M_cartpole": 16206084, "L_cartpole": 37802244, "XL_cartpole": 108353796, # XS encoder (atari) # cnn kernel=[4, 4, 3, 24], (no bias), layernorm=[24],[24], # cnn kernel=[4, 4, 24, 48], (no bias), layernorm=[48],[48], # cnn kernel=[4, 4, 48, 96], (no bias), layernorm=[96],[96], # cnn kernel=[4, 4, 96, 192], (no bias), layernorm=[192],[192], # XS decoder (atari) # init dense kernel[1280, 3072] bias=[3072] -> reshape into image # [4, 4, 96, 192], [96], [96] # [4, 4, 48, 96], [48], [48], # [4, 4, 24, 48], [24], [24], # [4, 4, 3, 24], [3] <- no layernorm at end "XS_atari": 7538979, "S_atari": 15687811, "M_atari": 32461635, "L_atari": 68278275, "XL_atari": 181558659, } # All values confirmed against [3] (100% match). expected_num_params_actor = { # hidden=[1280, 256] # hidden_norm=[256], [256] # pi (2 actions)=[256, 2], [2] "XS_cartpole": 328706, "S_cartpole": 1051650, "M_cartpole": 2135042, "L_cartpole": 4136450, "XL_cartpole": 9449474, "XS_atari": 329734, "S_atari": 1053702, "M_atari": 2137606, "L_atari": 4139526, "XL_atari": 9453574, } # All values confirmed against [3] (100% match). expected_num_params_critic = { # hidden=[1280, 256] # hidden_norm=[256], [256] # vf (buckets)=[256, 255], [255] "XS_cartpole": 393727, "S_cartpole": 1181439, "M_cartpole": 2297215, "L_cartpole": 4331007, "XL_cartpole": 9708799, "XS_atari": 393727, "S_atari": 1181439, "M_atari": 2297215, "L_atari": 4331007, "XL_atari": 9708799, } config = dreamerv3.DreamerV3Config().training( batch_length_T=16, horizon_H=5, symlog_obs=True, ) # Check all model_sizes described in the paper ([1]) on matching the number # of parameters to RLlib's implementation. for model_size in ["XS", "S", "M", "L", "XL"]: config.model_size = model_size # Atari and CartPole spaces. for obs_space, num_actions, env_name in [ (gym.spaces.Box(-1.0, 0.0, (4,), np.float32), 2, "cartpole"), (gym.spaces.Box(-1.0, 0.0, (64, 64, 3), np.float32), 6, "atari"), ]: print(f"Testing model_size={model_size} on env-type: {env_name} ..") config.environment( observation_space=obs_space, action_space=gym.spaces.Discrete(num_actions), ) # Create our RLModule to compute actions with. policy_dict, _ = config.get_multi_agent_setup() module_spec = config.get_multi_rl_module_spec(policy_dict=policy_dict) rl_module = module_spec.build()[DEFAULT_MODULE_ID] # Count the generated RLModule's parameters and compare to the # paper's reported numbers ([1] and [3]). num_params_world_model = sum( np.prod(v.shape) for v in rl_module.world_model.parameters() if v.requires_grad ) self.assertEqual( num_params_world_model, expected_num_params_world_model[f"{model_size}_{env_name}"], ) num_params_actor = sum( np.prod(v.shape) for v in rl_module.actor.parameters() if v.requires_grad ) self.assertEqual( num_params_actor, expected_num_params_actor[f"{model_size}_{env_name}"], ) num_params_critic = sum( np.prod(v.shape) for v in rl_module.critic.parameters() if v.requires_grad ) self.assertEqual( num_params_critic, expected_num_params_critic[f"{model_size}_{env_name}"], ) print("\tok") if __name__ == "__main__": import sys import pytest sys.exit(pytest.main(["-v", __file__]))
TestDreamerV3
python
huggingface__transformers
tests/models/nougat/test_image_processing_nougat.py
{ "start": 3972, "end": 15188 }
class ____(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = NougatImageProcessor if is_vision_available() else None fast_image_processing_class = NougatImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = NougatImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() @cached_property def image_processor(self): return self.image_processing_class(**self.image_processor_dict) @unittest.skip(reason="FIXME: @yoni.") def test_slow_fast_equivalence_batched(self): pass def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class(**self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 20, "width": 20}) kwargs = dict(self.image_processor_dict) kwargs.pop("size", None) image_processor = self.image_processing_class(**kwargs, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_expected_output(self): dummy_image = self.image_processor_tester.prepare_dummy_image() for image_processing_class in self.image_processor_list: image_processor = image_processing_class(**self.image_processor_dict) inputs = image_processor(dummy_image, return_tensors="pt") torch.testing.assert_close(inputs["pixel_values"].mean(), torch.tensor(0.4906), rtol=1e-3, atol=1e-3) def test_crop_margin_all_white(self): image = np.uint8(np.ones((3, 100, 100)) * 255) for image_processing_class in self.image_processor_list: if image_processing_class == NougatImageProcessorFast: image = torch.from_numpy(image) image_processor = image_processing_class(**self.image_processor_dict) cropped_image = image_processor.crop_margin(image) self.assertTrue(torch.equal(image, cropped_image)) else: image_processor = image_processing_class(**self.image_processor_dict) cropped_image = image_processor.crop_margin(image) self.assertTrue(np.array_equal(image, cropped_image)) def test_crop_margin_centered_black_square(self): image = np.ones((3, 100, 100), dtype=np.uint8) * 255 image[:, 45:55, 45:55] = 0 expected_cropped = image[:, 45:55, 45:55] for image_processing_class in self.image_processor_list: if image_processing_class == NougatImageProcessorFast: image = torch.from_numpy(image) expected_cropped = torch.from_numpy(expected_cropped) image_processor = image_processing_class(**self.image_processor_dict) cropped_image = image_processor.crop_margin(image) self.assertTrue(torch.equal(expected_cropped, cropped_image)) else: image_processor = image_processing_class(**self.image_processor_dict) cropped_image = image_processor.crop_margin(image) self.assertTrue(np.array_equal(expected_cropped, cropped_image)) def test_align_long_axis_no_rotation(self): image = np.uint8(np.ones((3, 100, 200)) * 255) for image_processing_class in self.image_processor_list: if image_processing_class == NougatImageProcessorFast: image = torch.from_numpy(image) size = SizeDict(height=200, width=300) image_processor = image_processing_class(**self.image_processor_dict) aligned_image = image_processor.align_long_axis(image, size) self.assertEqual(image.shape, aligned_image.shape) else: size = {"height": 200, "width": 300} image_processor = image_processing_class(**self.image_processor_dict) aligned_image = image_processor.align_long_axis(image, size) self.assertEqual(image.shape, aligned_image.shape) def test_align_long_axis_with_rotation(self): image = np.uint8(np.ones((3, 200, 100)) * 255) for image_processing_class in self.image_processor_list: image_processor = image_processing_class(**self.image_processor_dict) if image_processing_class == NougatImageProcessorFast: image = torch.from_numpy(image) size = SizeDict(height=300, width=200) image_processor = image_processing_class(**self.image_processor_dict) aligned_image = image_processor.align_long_axis(image, size) self.assertEqual(torch.Size([3, 200, 100]), aligned_image.shape) else: size = {"height": 300, "width": 200} image_processor = image_processing_class(**self.image_processor_dict) aligned_image = image_processor.align_long_axis(image, size) self.assertEqual((3, 200, 100), aligned_image.shape) def test_align_long_axis_data_format(self): image = np.uint8(np.ones((3, 100, 200)) * 255) for image_processing_class in self.image_processor_list: if image_processing_class == NougatImageProcessorFast: image = torch.from_numpy(image) image_processor = image_processing_class(**self.image_processor_dict) size = SizeDict(height=200, width=300) aligned_image = image_processor.align_long_axis(image, size) self.assertEqual(torch.Size([3, 100, 200]), aligned_image.shape) else: size = {"height": 200, "width": 300} data_format = "channels_first" image_processor = image_processing_class(**self.image_processor_dict) aligned_image = image_processor.align_long_axis(image, size, data_format) self.assertEqual((3, 100, 200), aligned_image.shape) def prepare_dummy_np_image(self): revision = "ec57bf8c8b1653a209c13f6e9ee66b12df0fc2db" filepath = hf_hub_download( repo_id="hf-internal-testing/fixtures_docvqa", filename="nougat_pdf.png", repo_type="dataset", revision=revision, ) image = Image.open(filepath).convert("RGB") return np.array(image).transpose(2, 0, 1) def test_crop_margin_equality_cv2_python(self): image = self.prepare_dummy_np_image() for image_processing_class in self.image_processor_list: if image_processing_class == NougatImageProcessorFast: image = torch.from_numpy(image) image_processor = image_processing_class(**self.image_processor_dict) image_cropped_python = image_processor.crop_margin(image) self.assertEqual(image_cropped_python.shape, torch.Size([3, 850, 685])) self.assertAlmostEqual(image_cropped_python.float().mean().item(), 237.43881150708458, delta=0.001) else: image_processor = image_processing_class(**self.image_processor_dict) image_cropped_python = image_processor.crop_margin(image) self.assertEqual(image_cropped_python.shape, (3, 850, 685)) self.assertAlmostEqual(image_cropped_python.mean(), 237.43881150708458, delta=0.001) def test_call_numpy_4_channels(self): for image_processing_class in self.image_processor_list: if image_processing_class == NougatImageProcessor: # Test that can process images which have an arbitrary number of channels # Initialize image_processing image_processor = image_processing_class(**self.image_processor_dict) # create random numpy tensors self.image_processor_tester.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) # Test not batched input encoded_images = image_processor( image_inputs[0], return_tensors="pt", input_data_format="channels_last", image_mean=(0.0, 0.0, 0.0, 0.0), image_std=(1.0, 1.0, 1.0, 1.0), ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape( [image_inputs[0]] ) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processor( image_inputs, return_tensors="pt", input_data_format="channels_last", image_mean=(0.0, 0.0, 0.0, 0.0), image_std=(1.0, 1.0, 1.0, 1.0), ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_slow_fast_equivalence(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg")) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_image, return_tensors="pt") encoding_fast = image_processor_fast(dummy_image, return_tensors="pt") # Adding a larger than usual tolerance because the slow processor uses reducing_gap=2.0 during resizing. torch.testing.assert_close(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=2e-1, rtol=0) self.assertLessEqual( torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 2e-2 )
NougatImageProcessingTest
python
apache__airflow
task-sdk/src/airflow/sdk/api/datamodels/_generated.py
{ "start": 7456, "end": 7872 }
class ____(BaseModel): """ Schema for updating TaskInstance to up_for_retry. """ model_config = ConfigDict( extra="forbid", ) state: Annotated[Literal["up_for_retry"] | None, Field(title="State")] = "up_for_retry" end_date: Annotated[AwareDatetime, Field(title="End Date")] rendered_map_index: Annotated[str | None, Field(title="Rendered Map Index")] = None
TIRetryStatePayload
python
pytorch__pytorch
torch/_inductor/autoheuristic/autoheuristic_utils.py
{ "start": 607, "end": 1308 }
class ____: """ AHOperation can be used to augment the data collected by AutoHeuristic. One might for example store features like m, k, n, but also want to use features like m*n, or k*n, to learn a heuristic. Instead of storing features that can be created from the collected data, one can use AHOperation to create new features from the collected data. """ def __init__( self, name: str, func: Callable[[Any], Value], is_categorical: bool = False ) -> None: self.name = name self.func = func self.is_categorical = is_categorical def apply_operation(self, data: Any) -> None: data[self.name] = self.func(data)
AHOperation
python
numpy__numpy
numpy/_core/tests/test_unicode.py
{ "start": 12371, "end": 12521 }
class ____(ByteorderValues): """Check the byteorder in unicode (size 2, UCS2 values)""" ulen = 2 ucs_value = ucs2_value
TestByteorder_2_UCS2
python
pyinstaller__pyinstaller
tests/unit/test_hookutils.py
{ "start": 1819, "end": 2642 }
class ____(object): # Verify that removing a suffix from an empty string is OK. def test_empty_string(self): assert '' == hookutils.remove_suffix('', 'suffix') # An empty suffix should pass the string through unmodified. def test_emptystr_unmodif(self): assert 'test' == hookutils.remove_suffix('test', '') # If the string is the suffix, it should be empty at exit. def test_string_suffix(self): assert '' == hookutils.remove_suffix('test', 'test') # Just the suffix should be removed. def test_just_suffix(self): assert 'test' == hookutils.remove_suffix('testing', 'ing') # A matching string not as suffix should produce no modifications def test_no_modific(self): assert 'testa' == hookutils.remove_suffix('testa', 'test')
TestRemoveSuffix
python
pandas-dev__pandas
pandas/errors/__init__.py
{ "start": 24254, "end": 25417 }
class ____(Exception): """ Exception is raised when trying to index and there is a mismatch in dimensions. Raised by properties like :attr:`.pandas.DataFrame.iloc` when an indexer is out of bounds or :attr:`.pandas.DataFrame.loc` when its index is unalignable to the frame index. See Also -------- DataFrame.iloc : Purely integer-location based indexing for \ selection by position. DataFrame.loc : Access a group of rows and columns by label(s) \ or a boolean array. Examples -------- >>> df = pd.DataFrame({"A": [1, 1, 1]}) >>> df.loc[..., ..., "A"] # doctest: +SKIP ... # IndexingError: indexer may only contain one '...' entry >>> df = pd.DataFrame({"A": [1, 1, 1]}) >>> df.loc[1, ..., ...] # doctest: +SKIP ... # IndexingError: Too many indexers >>> df[pd.Series([True], dtype=bool)] # doctest: +SKIP ... # IndexingError: Unalignable boolean Series provided as indexer... >>> s = pd.Series(range(2), index=pd.MultiIndex.from_product([["a", "b"], ["c"]])) >>> s.loc["a", "c", "d"] # doctest: +SKIP ... # IndexingError: Too many indexers """
IndexingError
python
apache__airflow
task-sdk/src/airflow/sdk/definitions/asset/decorators.py
{ "start": 5036, "end": 5389 }
class ____(Asset): """ Asset representation from decorating a function with ``@asset``. :meta private: """ _function: Callable _source: asset def __attrs_post_init__(self) -> None: with self._source.create_dag(default_dag_id=self.name): _instantiate_task(self) @attrs.define(kw_only=True)
AssetDefinition
python
getsentry__sentry
src/sentry/replays/usecases/query/conditions/error_ids.py
{ "start": 1153, "end": 2416 }
class ____(ComputedBase): """Sum of error ids array condition visitor.""" @staticmethod def visit_eq(value: UUID) -> Condition: return contains(ErrorIdsArray.visit_eq(value)) @staticmethod def visit_neq(value: UUID) -> Condition: return does_not_contain(ErrorIdsArray.visit_eq(value)) @staticmethod def visit_in(value: list[UUID]) -> Condition: return contains(ErrorIdsArray.visit_in(value)) @staticmethod def visit_not_in(value: list[UUID]) -> Condition: return does_not_contain(ErrorIdsArray.visit_in(value)) def has_error_id(error_id: UUID) -> Function: return Function( "has", parameters=[ # Because this is an exact match operation we use the bloom filter index. Column("_error_ids_hashed"), Function("cityHash64", parameters=[to_uuid(error_id)]), ], ) def has_any_error_id(error_ids: list[UUID]) -> Function: return Function( "hasAny", parameters=[ # Because this is an exact match operation we use the bloom filter index. Column("_error_ids_hashed"), [Function("cityHash64", parameters=[to_uuid(eid)]) for eid in error_ids], ], )
SumOfErrorIdsArray
python
getsentry__sentry
src/sentry/workflow_engine/migrations/0078_workflow_fire_history_date_index.py
{ "start": 155, "end": 1580 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False dependencies = [ ("sentry", "0952_fix_span_item_event_type_alerts"), ("workflow_engine", "0077_add_wfh_single_write_col"), ] operations = [ migrations.AddIndex( model_name="workflowfirehistory", index=models.Index( fields=["workflow", "date_added"], name="workflow_en_workflo_270fe2_idx" ), ), ]
Migration
python
miyuchina__mistletoe
mistletoe/contrib/jira_renderer.py
{ "start": 1265, "end": 8435 }
class ____(BaseRenderer): """ JIRA renderer class. See mistletoe.base_renderer module for more info. """ def __init__(self, *extras): """ Args: extras (list): allows subclasses to add even more custom tokens. """ self.listTokens = [] self.lastChildOfQuotes = [] super().__init__(*chain([block_token.HtmlBlock, span_token.HtmlSpan], extras)) def render_strong(self, token): template = '*{}*' return template.format(self.render_inner(token)) def render_emphasis(self, token): template = '_{}_' return template.format(self.render_inner(token)) def render_inline_code(self, token): template = '{{{{{}}}}}' return template.format(self.render_inner(token)) def render_strikethrough(self, token): template = '-{}-' return template.format(self.render_inner(token)) def render_image(self, token): template = '!{src}!' self.render_inner(token) return template.format(src=token.src) def render_link(self, token): template = '[{inner}|{target}{title}]' inner = self.render_inner(token) target = escape_url(token.target) if token.title: title = '|{}'.format(token.title) else: title = '' return template.format(inner=inner, target=target, title=title) def render_auto_link(self, token): template = '[{target}]' target = escape_url(token.target) return template.format(target=target) def render_escape_sequence(self, token): return self.render_inner(token) def render_raw_text(self, token, escape=True): if escape: def repl(match): return '\\' + match.group(0) # The following regex tries to find special chars that are one of the following: # 1. the whole string (typically in an EscapeSequence) # 2. just after a non-whitespace # 3. just before a non-whitespace re_esc_chars = r'[{}\[\]\-*_+^~]' re_find = r'(^{esc_chars}$)|((?<=\S)({esc_chars}))|(({esc_chars})(?=\S))'.format(esc_chars=re_esc_chars) return re.sub(re_find, repl, token.content) else: return token.content @staticmethod def render_html_span(token): return token.content def render_heading(self, token): template = 'h{level}. {inner}' inner = self.render_inner(token) return template.format(level=token.level, inner=inner) + self._block_eol(token) def render_quote(self, token): self.lastChildOfQuotes.append(token.children[-1]) inner = self.render_inner(token) del (self.lastChildOfQuotes[-1]) if len(token.children) == 1 and isinstance(token.children[0], block_token.Paragraph): template = 'bq. {inner}' + self._block_eol(token)[0:-1] else: template = '{{quote}}\n{inner}{{quote}}' + self._block_eol(token) return template.format(inner=inner) def render_paragraph(self, token): return '{}'.format(self.render_inner(token)) + self._block_eol(token) def render_block_code(self, token): template = '{{code{attr}}}\n{inner}{{code}}' + self._block_eol(token) if token.language: attr = ':{}'.format(token.language) else: attr = '' inner = self.render_raw_text(token.children[0], False) return template.format(attr=attr, inner=inner) def render_list(self, token): inner = self.render_inner(token) return inner + self._block_eol(token)[0:-1] def render_list_item(self, token): template = '{prefix} {inner}' prefix = ''.join(self.listTokens) result = template.format(prefix=prefix, inner=self.render_inner(token)) return result def render_inner(self, token): if isinstance(token, block_token.List): if token.start: self.listTokens.append('#') else: self.listTokens.append('*') rendered = [self.render(child) for child in token.children] if isinstance(token, block_token.List): del (self.listTokens[-1]) return ''.join(rendered) def render_table(self, token): # This is actually gross and I wonder if there's a better way to do it. # # The primary difficulty seems to be passing down alignment options to # reach individual cells. template = '{inner}\n' if hasattr(token, 'header'): head_template = '{inner}' header = token.header head_inner = self.render_table_row(header, True) head_rendered = head_template.format(inner=head_inner) else: head_rendered = '' body_template = '{inner}' body_inner = self.render_inner(token) body_rendered = body_template.format(inner=body_inner) return template.format(inner=head_rendered + body_rendered) def render_table_row(self, token, is_header=False): if is_header: template = '{inner}||\n' else: template = '{inner}|\n' inner = ''.join([self.render_table_cell(child, is_header) for child in token.children]) return template.format(inner=inner) def render_table_cell(self, token, in_header=False): if in_header: template = '||{inner}' else: template = '|{inner}' inner = self.render_inner(token) if inner == '': inner = ' ' return template.format(inner=inner) @staticmethod def render_thematic_break(token): return '----\n' @staticmethod def render_line_break(token): # Note: In Jira, outputting just '\n' instead of '\\\n' should be usually sufficient as well. # It is not clear when it wouldn't be sufficient though, so we use the longer variant for sure. return ' ' if token.soft else '\\\\\n' @staticmethod def render_html_block(token): return token.content def render_document(self, token): self.footnotes.update(token.footnotes) return self.render_inner(token) def _block_eol(self, token): """ Jira syntax is very limited when it comes to lists: whenever we put an empty line anywhere in a list, it gets terminated and there seems to be no workaround for this. Also to have blocks like paragraphs really vertically separated, we need to put an empty line between them. This function handles these two cases. """ return ( "\n" if len(self.listTokens) > 0 or (len(self.lastChildOfQuotes) > 0 and token is self.lastChildOfQuotes[-1]) else "\n\n" ) def escape_url(raw): """ Escape urls to prevent code injection craziness. (Hopefully.) """ from urllib.parse import quote return quote(raw, safe='/#:()*?=%@+,&;') JIRARenderer = JiraRenderer """ Deprecated name of the `JiraRenderer` class. """
JiraRenderer
python
scrapy__scrapy
scrapy/downloadermiddlewares/httpcompression.py
{ "start": 1529, "end": 8344 }
class ____: """This middleware allows compressed (gzip, deflate) traffic to be sent/received from websites""" def __init__( self, stats: StatsCollector | None = None, *, crawler: Crawler | None = None, ): if not crawler: self.stats = stats self._max_size = 1073741824 self._warn_size = 33554432 return self.stats = crawler.stats self._max_size = crawler.settings.getint("DOWNLOAD_MAXSIZE") self._warn_size = crawler.settings.getint("DOWNLOAD_WARNSIZE") crawler.signals.connect(self.open_spider, signals.spider_opened) @classmethod def from_crawler(cls, crawler: Crawler) -> Self: if not crawler.settings.getbool("COMPRESSION_ENABLED"): raise NotConfigured return cls(crawler=crawler) def open_spider(self, spider: Spider) -> None: if hasattr(spider, "download_maxsize"): warnings.warn( "The 'download_maxsize' spider attribute is deprecated. " "Use Spider.custom_settings or Spider.update_settings() instead. " "The corresponding setting name is 'DOWNLOAD_MAXSIZE'.", category=ScrapyDeprecationWarning, stacklevel=2, ) self._max_size = spider.download_maxsize if hasattr(spider, "download_warnsize"): warnings.warn( "The 'download_warnsize' spider attribute is deprecated. " "Use Spider.custom_settings or Spider.update_settings() instead. " "The corresponding setting name is 'DOWNLOAD_WARNSIZE'.", category=ScrapyDeprecationWarning, stacklevel=2, ) self._warn_size = spider.download_warnsize @_warn_spider_arg def process_request( self, request: Request, spider: Spider | None = None ) -> Request | Response | None: request.headers.setdefault("Accept-Encoding", b", ".join(ACCEPTED_ENCODINGS)) return None @_warn_spider_arg def process_response( self, request: Request, response: Response, spider: Spider | None = None ) -> Request | Response: if request.method == "HEAD": return response if isinstance(response, Response): content_encoding = response.headers.getlist("Content-Encoding") if content_encoding: max_size = request.meta.get("download_maxsize", self._max_size) warn_size = request.meta.get("download_warnsize", self._warn_size) try: decoded_body, content_encoding = self._handle_encoding( response.body, content_encoding, max_size ) except _DecompressionMaxSizeExceeded as e: raise IgnoreRequest( f"Ignored response {response} because its body " f"({len(response.body)} B compressed, " f"{e.decompressed_size} B decompressed so far) exceeded " f"DOWNLOAD_MAXSIZE ({max_size} B) during decompression." ) from e if len(response.body) < warn_size <= len(decoded_body): logger.warning( f"{response} body size after decompression " f"({len(decoded_body)} B) is larger than the " f"download warning size ({warn_size} B)." ) if content_encoding: self._warn_unknown_encoding(response, content_encoding) response.headers["Content-Encoding"] = content_encoding if self.stats: self.stats.inc_value( "httpcompression/response_bytes", len(decoded_body), ) self.stats.inc_value("httpcompression/response_count") respcls = responsetypes.from_args( headers=response.headers, url=response.url, body=decoded_body ) kwargs: dict[str, Any] = {"body": decoded_body} if issubclass(respcls, TextResponse): # force recalculating the encoding until we make sure the # responsetypes guessing is reliable kwargs["encoding"] = None response = response.replace(cls=respcls, **kwargs) if not content_encoding: del response.headers["Content-Encoding"] return response def _handle_encoding( self, body: bytes, content_encoding: list[bytes], max_size: int ) -> tuple[bytes, list[bytes]]: to_decode, to_keep = self._split_encodings(content_encoding) for encoding in to_decode: body = self._decode(body, encoding, max_size) return body, to_keep @staticmethod def _split_encodings( content_encoding: list[bytes], ) -> tuple[list[bytes], list[bytes]]: supported_encodings = {*ACCEPTED_ENCODINGS, b"x-gzip"} to_keep: list[bytes] = [ encoding.strip().lower() for encoding in chain.from_iterable( encodings.split(b",") for encodings in content_encoding ) ] to_decode: list[bytes] = [] while to_keep: encoding = to_keep.pop() if encoding not in supported_encodings: to_keep.append(encoding) return to_decode, to_keep to_decode.append(encoding) return to_decode, to_keep @staticmethod def _decode(body: bytes, encoding: bytes, max_size: int) -> bytes: if encoding in {b"gzip", b"x-gzip"}: return gunzip(body, max_size=max_size) if encoding == b"deflate": return _inflate(body, max_size=max_size) if encoding == b"br": return _unbrotli(body, max_size=max_size) if encoding == b"zstd": return _unzstd(body, max_size=max_size) # shouldn't be reached return body # pragma: no cover def _warn_unknown_encoding( self, response: Response, encodings: list[bytes] ) -> None: encodings_str = b",".join(encodings).decode() msg = ( f"{self.__class__.__name__} cannot decode the response for {response.url} " f"from unsupported encoding(s) '{encodings_str}'." ) if b"br" in encodings: msg += " You need to install brotli or brotlicffi >= 1.2.0 to decode 'br'." if b"zstd" in encodings: msg += " You need to install zstandard to decode 'zstd'." logger.warning(msg)
HttpCompressionMiddleware
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-deepset/destination_deepset/models.py
{ "start": 641, "end": 1319 }
class ____(BaseModel): """Configuration for the deepset cloud destination.""" model_config = ConfigDict(extra="allow") api_key: str = Field(title="API Key", description="Your deepset cloud API key", min_length=8) base_url: str = Field( default="https://api.cloud.deepset.ai", title="Base URL", description="Base url of your deepset cloud instance. Configure this if using an on-prem instance.", ) workspace: str = Field(title="Workspace", description="Name of workspace to which to sync the data.") retries: int = Field(5, title="Retries", description="Number of times to retry an action before giving up.")
DeepsetCloudConfig
python
readthedocs__readthedocs.org
readthedocs/rtd_tests/tests/test_privacy_urls.py
{ "start": 8353, "end": 9237 }
class ____(ProjectMixin): request_data = { "/projects/": {}, "/projects/search/autocomplete/": {"data": {"term": "pip"}}, "/projects/autocomplete/version/pip/": {"data": {"term": "pip"}}, "/projects/pip/autocomplete/file/": {"data": {"term": "pip"}}, } response_data = { # Public "/projects/": {"status_code": 301}, "/projects/pip/downloads/": {"status_code": 302}, "/projects/pip/downloads/pdf/latest/": {"status_code": 200}, "/projects/pip/badge/": {"status_code": 200}, "/projects/invalid_slug/": {"status_code": 302}, "/projects/pip/search/": {"status_code": 302}, "/dashboard/pip/advanced/": {"status_code": 301}, } def test_public_urls(self): from readthedocs.projects.urls.public import urlpatterns self._test_url(urlpatterns)
PublicProjectMixin
python
walkccc__LeetCode
solutions/1700. Number of Students Unable to Eat Lunch/1700.py
{ "start": 0, "end": 290 }
class ____: def countStudents(self, students: list[int], sandwiches: list[int]) -> int: count = collections.Counter(students) for i, sandwich in enumerate(sandwiches): if count[sandwich] == 0: return len(sandwiches) - i count[sandwich] -= 1 return 0
Solution
python
sympy__sympy
sympy/physics/quantum/sho1d.py
{ "start": 1169, "end": 5382 }
class ____(SHOOp): """The Raising Operator or a^dagger. When a^dagger acts on a state it raises the state up by one. Taking the adjoint of a^dagger returns 'a', the Lowering Operator. a^dagger can be rewritten in terms of position and momentum. We can represent a^dagger as a matrix, which will be its default basis. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the operator. Examples ======== Create a Raising Operator and rewrite it in terms of position and momentum, and show that taking its adjoint returns 'a': >>> from sympy.physics.quantum.sho1d import RaisingOp >>> from sympy.physics.quantum import Dagger >>> ad = RaisingOp('a') >>> ad.rewrite('xp').doit() sqrt(2)*(m*omega*X - I*Px)/(2*sqrt(hbar)*sqrt(m*omega)) >>> Dagger(ad) a Taking the commutator of a^dagger with other Operators: >>> from sympy.physics.quantum import Commutator >>> from sympy.physics.quantum.sho1d import RaisingOp, LoweringOp >>> from sympy.physics.quantum.sho1d import NumberOp >>> ad = RaisingOp('a') >>> a = LoweringOp('a') >>> N = NumberOp('N') >>> Commutator(ad, a).doit() -1 >>> Commutator(ad, N).doit() -RaisingOp(a) Apply a^dagger to a state: >>> from sympy.physics.quantum import qapply >>> from sympy.physics.quantum.sho1d import RaisingOp, SHOKet >>> ad = RaisingOp('a') >>> k = SHOKet('k') >>> qapply(ad*k) sqrt(k + 1)*|k + 1> Matrix Representation >>> from sympy.physics.quantum.sho1d import RaisingOp >>> from sympy.physics.quantum.represent import represent >>> ad = RaisingOp('a') >>> represent(ad, basis=N, ndim=4, format='sympy') Matrix([ [0, 0, 0, 0], [1, 0, 0, 0], [0, sqrt(2), 0, 0], [0, 0, sqrt(3), 0]]) """ def _eval_rewrite_as_xp(self, *args, **kwargs): return (S.One/sqrt(Integer(2)*hbar*m*omega))*( S.NegativeOne*I*Px + m*omega*X) def _eval_adjoint(self): return LoweringOp(*self.args) def _eval_commutator_LoweringOp(self, other): return S.NegativeOne def _eval_commutator_NumberOp(self, other): return S.NegativeOne*self def _apply_operator_SHOKet(self, ket, **options): temp = ket.n + S.One return sqrt(temp)*SHOKet(temp) def _represent_default_basis(self, **options): return self._represent_NumberOp(None, **options) def _represent_XOp(self, basis, **options): # This logic is good but the underlying position # representation logic is broken. # temp = self.rewrite('xp').doit() # result = represent(temp, basis=X) # return result raise NotImplementedError('Position representation is not implemented') def _represent_NumberOp(self, basis, **options): ndim_info = options.get('ndim', 4) format = options.get('format','sympy') matrix = matrix_zeros(ndim_info, ndim_info, **options) for i in range(ndim_info - 1): value = sqrt(i + 1) if format == 'scipy.sparse': value = float(value) matrix[i + 1, i] = value if format == 'scipy.sparse': matrix = matrix.tocsr() return matrix #-------------------------------------------------------------------------- # Printing Methods #-------------------------------------------------------------------------- def _print_contents(self, printer, *args): arg0 = printer._print(self.args[0], *args) return '%s(%s)' % (self.__class__.__name__, arg0) def _print_contents_pretty(self, printer, *args): from sympy.printing.pretty.stringpict import prettyForm pform = printer._print(self.args[0], *args) pform = pform**prettyForm('\N{DAGGER}') return pform def _print_contents_latex(self, printer, *args): arg = printer._print(self.args[0]) return '%s^{\\dagger}' % arg
RaisingOp
python
Lightning-AI__lightning
tests/tests_pytorch/checkpointing/test_model_checkpoint.py
{ "start": 34780, "end": 34943 }
class ____(BoringModel): def on_after_backward(self): if self.current_epoch == 1: raise RuntimeError("Trouble!")
TroubledModelOnAfterBackward
python
astropy__astropy
astropy/table/ndarray_mixin.py
{ "start": 139, "end": 599 }
class ____(ParentDtypeInfo): _represent_as_dict_primary_data = "data" def _represent_as_dict(self): """Represent Column as a dict that can be serialized.""" col = self._parent out = {"data": col.view(np.ndarray)} return out def _construct_from_dict(self, map): """Construct Column from ``map``.""" data = map.pop("data") out = self._parent_cls(data, **map) return out
NdarrayMixinInfo
python
huggingface__transformers
tests/models/timm_backbone/test_modeling_timm_backbone.py
{ "start": 2816, "end": 10408 }
class ____(ModelTesterMixin, BackboneTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TimmBackbone,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": TimmBackbone} if is_torch_available() else {} test_resize_embeddings = False has_attentions = False def setUp(self): # self.config_class = PreTrainedConfig self.config_class = TimmBackboneConfig self.model_tester = TimmBackboneModelTester(self) self.config_tester = ConfigTester( self, config_class=self.config_class, has_text_modality=False, common_properties=["num_channels"] ) def test_config(self): self.config_tester.run_common_tests() # `TimmBackbone` has no `_init_weights`. Timm's way of weight init. seems to give larger magnitude in the intermediate values during `forward`. def test_batching_equivalence(self, atol=1e-4, rtol=1e-4): super().test_batching_equivalence(atol=atol, rtol=rtol) def test_timm_transformer_backbone_equivalence(self): timm_checkpoint = "resnet18" transformers_checkpoint = "microsoft/resnet-18" timm_model = AutoBackbone.from_pretrained(timm_checkpoint, use_timm_backbone=True) transformers_model = AutoBackbone.from_pretrained(transformers_checkpoint) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(len(timm_model.stage_names), len(transformers_model.stage_names)) self.assertEqual(timm_model.channels, transformers_model.channels) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices, [-1]) self.assertEqual(transformers_model.out_indices, [len(timm_model.stage_names) - 1]) timm_model = AutoBackbone.from_pretrained(timm_checkpoint, use_timm_backbone=True, out_indices=[1, 2, 3]) transformers_model = AutoBackbone.from_pretrained(transformers_checkpoint, out_indices=[1, 2, 3]) self.assertEqual(timm_model.out_indices, transformers_model.out_indices) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(timm_model.channels, transformers_model.channels) @unittest.skip(reason="TimmBackbone doesn't support feed forward chunking") def test_feed_forward_chunking(self): pass @unittest.skip(reason="TimmBackbone doesn't have num_hidden_layers attribute") def test_hidden_states_output(self): pass @unittest.skip(reason="TimmBackbone initialization is managed on the timm side") def test_can_init_all_missing_weights(self): pass @unittest.skip(reason="TimmBackbone models doesn't have inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="TimmBackbone models doesn't have inputs_embeds") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="TimmBackbone model cannot be created without specifying a backbone checkpoint") def test_from_pretrained_no_checkpoint(self): pass @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone") def test_save_load(self): pass @unittest.skip(reason="TimmBackbone uses its own `from_pretrained` without device_map support") def test_can_load_with_device_context_manager(self): pass @unittest.skip(reason="TimmBackbone uses its own `from_pretrained` without device_map support") def test_can_load_with_global_device_set(self): pass @unittest.skip(reason="TimmBackbone uses its own `from_pretrained` without device_map support") def test_cannot_load_with_meta_device_context_manager(self): pass @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone") def test_load_save_without_tied_weights(self): pass @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone") def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip(reason="TimmBackbone doesn't have hidden size info in its configuration.") def test_channels(self): pass @unittest.skip(reason="Safetensors is not supported by timm.") def test_can_use_safetensors(self): pass @unittest.skip(reason="Need to use a timm backbone and there is no tiny model available.") def test_model_is_small(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0][-1] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) # TimmBackbone config doesn't have out_features attribute def test_create_from_modified_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), len(config.out_indices)) self.assertEqual(len(model.channels), len(config.out_indices)) # Check output of last stage is taken if out_features=None, out_indices=None modified_config = copy.deepcopy(config) modified_config.out_indices = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) # Check backbone can be initialized with fresh weights modified_config = copy.deepcopy(config) modified_config.use_pretrained_backbone = False model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict)
TimmBackboneModelTest
python
dagster-io__dagster
python_modules/dagster/dagster/_core/storage/base_storage.py
{ "start": 349, "end": 1337 }
class ____(ABC, MayHaveInstanceWeakref[T_DagsterInstance]): """Abstract base class for Dagster persistent storage, for reading and writing data for runs, events, and schedule/sensor state. Users should not directly instantiate concrete subclasses of this class; they are instantiated by internal machinery when ``dagster-webserver`` and ``dagster-daemon`` load, based on the values in the ``dagster.yaml`` file in ``$DAGSTER_HOME``. Configuration of concrete subclasses of this class should be done by setting values in that file. """ @property @abstractmethod def event_log_storage(self) -> EventLogStorage[T_DagsterInstance]: raise NotImplementedError() @property @abstractmethod def run_storage(self) -> RunStorage[T_DagsterInstance]: raise NotImplementedError() @property @abstractmethod def schedule_storage(self) -> ScheduleStorage[T_DagsterInstance]: raise NotImplementedError()
DagsterStorage
python
viewflow__viewflow
tests/contrib/test_contrib_auth.py
{ "start": 263, "end": 3218 }
class ____(TestCase): def setUp(self): self.admin = User.objects.create_superuser("admin", "admin@admin.com", "admin") self.user = User.objects.create_user("user", "user@user.com", "user") self.validator = html5lib.HTMLParser(strict=True) @tag("integration") def test_change_user_avatar(self): img = BytesIO(b"my_binary_data") img.name = "avatar.png" request = RequestFactory().post("/", {"avatar": img}) request.user = self.admin view = auth.ProfileView() view.setup(request) response = view.post(request) self.assertEqual(response.status_code, 200) self.assertIn("/media/avatars/", auth.get_user_avatar_url(self.admin)) @tag("integration") def test_get_default_user_avatar_url(self): """If no user avatar exists, use the default media.""" self.assertEqual( auth.get_user_avatar_url(self.user), "/static/viewflow/img/user.png" ) def test_profile_page(self): self.assertTrue(self.client.login(username="admin", password="admin")) response = self.client.get("/accounts/profile/") self.assertEqual(response.status_code, 200) self.validator.parse(response.content) def test_login_page(self): response = self.client.get("/accounts/login/") self.assertEqual(response.status_code, 200) self.validator.parse(response.content) def _test_logout_page(self): response = self.client.get("/accounts/logout/") self.assertEqual(response.status_code, 200) self.validator.parse(response.content) def test_password_change(self): self.assertTrue(self.client.login(username="admin", password="admin")) response = self.client.get("/accounts/password_change/") self.assertEqual(response.status_code, 200) self.validator.parse(response.content) def test_password_change_done(self): self.assertTrue(self.client.login(username="admin", password="admin")) response = self.client.get("/accounts/password_change/done/") self.assertEqual(response.status_code, 200) self.validator.parse(response.content) def test_password_reset_request(self): response = self.client.get("/accounts/password_reset/") self.assertEqual(response.status_code, 200) self.validator.parse(response.content) def test_password_reset_request_done(self): response = self.client.get("/accounts/password_reset/done/") self.assertEqual(response.status_code, 200) self.validator.parse(response.content) def _test_password_reset(self): pass # TODO def test_password_reset_complete(self): response = self.client.get("/accounts/reset/done/") self.assertEqual(response.status_code, 200) self.validator.parse(response.content) urlpatterns = [ path("accounts/", auth.AuthViewset().urls), ]
Test
python
doocs__leetcode
solution/2100-2199/2131.Longest Palindrome by Concatenating Two Letter Words/Solution.py
{ "start": 0, "end": 364 }
class ____: def longestPalindrome(self, words: List[str]) -> int: cnt = Counter(words) ans = x = 0 for k, v in cnt.items(): if k[0] == k[1]: x += v & 1 ans += v // 2 * 2 * 2 else: ans += min(v, cnt[k[::-1]]) * 2 ans += 2 if x else 0 return ans
Solution
python
huggingface__transformers
examples/modular-transformers/modeling_dummy_bert.py
{ "start": 1597, "end": 5644 }
class ____(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: int = 0, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): # NOTE: We assume either pos ids to have bsz == 1 (broadcastable) or bsz == effective bsz (input_shape[0]) buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1) buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids) token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length) else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: Optional[float] = None, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): if scaling is None: scaling = query.size(-1) ** -0.5 # Take the dot product between "query" and "key" to get the raw attention scores. attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None and attention_mask.ndim == 4: attention_mask = attention_mask[:, :, :, : key.shape[-2]] attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights
DummyBertEmbeddings
python
networkx__networkx
networkx/algorithms/centrality/tests/test_closeness_centrality.py
{ "start": 6058, "end": 8728 }
class ____: @staticmethod def pick_add_edge(G): u = nx.utils.arbitrary_element(G) possible_nodes = set(G) - (set(G.neighbors(u)) | {u}) v = nx.utils.arbitrary_element(possible_nodes) return (u, v) @staticmethod def pick_remove_edge(G): u = nx.utils.arbitrary_element(G) possible_nodes = list(G.neighbors(u)) v = nx.utils.arbitrary_element(possible_nodes) return (u, v) def test_directed_raises(self): dir_G = nx.gn_graph(n=5) prev_cc = None edge = self.pick_add_edge(dir_G) with pytest.raises(nx.NetworkXNotImplemented): nx.incremental_closeness_centrality(dir_G, edge, prev_cc, insertion=True) def test_wrong_size_prev_cc_raises(self, undirected_G): G, prev_cc = undirected_G edge = self.pick_add_edge(G) prev_cc.pop(0) with pytest.raises(nx.NetworkXError): nx.incremental_closeness_centrality(G, edge, prev_cc, insertion=True) def test_wrong_nodes_prev_cc_raises(self, undirected_G): G, prev_cc = undirected_G edge = self.pick_add_edge(G) num_nodes = len(prev_cc) prev_cc.pop(0) prev_cc[num_nodes] = 0.5 with pytest.raises(nx.NetworkXError): nx.incremental_closeness_centrality(G, edge, prev_cc, insertion=True) def test_zero_centrality(self): G = nx.path_graph(3) prev_cc = nx.closeness_centrality(G) edge = self.pick_remove_edge(G) test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insertion=False) G.remove_edges_from([edge]) real_cc = nx.closeness_centrality(G) shared_items = set(test_cc.items()) & set(real_cc.items()) assert len(shared_items) == len(real_cc) assert 0 in test_cc.values() def test_incremental(self, undirected_G): # Check that incremental and regular give same output G, _ = undirected_G prev_cc = None for i in range(5): if i % 2 == 0: # Remove an edge insert = False edge = self.pick_remove_edge(G) else: # Add an edge insert = True edge = self.pick_add_edge(G) test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insert) if insert: G.add_edges_from([edge]) else: G.remove_edges_from([edge]) real_cc = nx.closeness_centrality(G) assert set(test_cc.items()) == set(real_cc.items()) prev_cc = test_cc
TestIncrementalClosenessCentrality
python
cython__cython
Cython/Compiler/StringEncoding.py
{ "start": 66, "end": 677 }
class ____: """Assemble a unicode string. """ def __init__(self): self.chars = [] def append(self, characters): assert isinstance(characters, str), f"Expected str, got {type(characters)}" self.chars.append(characters) def append_charval(self, char_number): self.chars.append( chr(char_number) ) def append_uescape(self, char_number, escape_string): self.append_charval(char_number) def getstring(self): return EncodedString(''.join(self.chars)) def getstrings(self): return (None, self.getstring())
UnicodeLiteralBuilder