language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
pola-rs__polars
py-polars/src/polars/datatype_expr/list.py
{ "start": 68, "end": 496 }
class ____: """Namespace for list datatype expressions.""" _accessor = "list" def __init__(self, expr: pl.DataTypeExpr) -> None: self._pydatatype_expr = expr._pydatatype_expr def inner_dtype(self) -> pl.DataTypeExpr: """Get the inner DataType of list.""" return pl.DataTypeExpr._from_pydatatype_expr( self._pydatatype_expr.list_inner_dtype() )
DataTypeExprListNameSpace
python
huggingface__transformers
src/transformers/models/ernie4_5/modeling_ernie4_5.py
{ "start": 5006, "end": 9361 }
class ____(nn.Module): def __init__(self, config: Ernie4_5Config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., 0::2] x2 = x[..., 1::2] return torch.stack((-x2, x1), dim=-1).flatten(-2) def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ # glm rope style (with full dim) and full precision original_dtype = q.dtype cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) # Interleave them instead of usual shape cos = cos[..., : cos.shape[-1] // 2].repeat_interleave(2, dim=-1) sin = sin[..., : sin.shape[-1] // 2].repeat_interleave(2, dim=-1) q_embed = (q.float() * cos) + (rotate_half(q).float() * sin) k_embed = (k.float() * cos) + (rotate_half(k).float() * sin) return q_embed.to(original_dtype), k_embed.to(original_dtype)
Ernie4_5MLP
python
tensorflow__tensorflow
tensorflow/python/ops/distributions/student_t.py
{ "start": 13274, "end": 14185 }
class ____(StudentT): """StudentT with `df = floor(abs(df))` and `scale = softplus(scale)`.""" @deprecation.deprecated( "2019-01-01", "Use `tfd.StudentT(tf.floor(tf.abs(df)), loc, " "tf.nn.softplus(scale)) instead.", warn_once=True) def __init__(self, df, loc, scale, validate_args=False, allow_nan_stats=True, name="StudentTWithAbsDfSoftplusScale"): parameters = dict(locals()) with ops.name_scope(name, values=[df, scale]) as name: super(StudentTWithAbsDfSoftplusScale, self).__init__( df=math_ops.floor(math_ops.abs(df)), loc=loc, scale=nn.softplus(scale, name="softplus_scale"), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name) self._parameters = parameters
StudentTWithAbsDfSoftplusScale
python
run-llama__llama_index
llama-index-packs/llama-index-packs-llama-dataset-metadata/llama_index/packs/llama_dataset_metadata/base.py
{ "start": 6239, "end": 8241 }
class ____(BaseLlamaPack): """ A llamapack for creating and saving the necessary metadata files for submitting a llamadataset: card.json and README.md. """ def run( self, index: BaseIndex, benchmark_df: pd.DataFrame, rag_dataset: "LabelledRagDataset", name: str, description: str, baseline_name: str, source_urls: Optional[List[str]] = None, code_url: Optional[str] = None, ): """ Main usage for a llamapack. This will build the card.json and README.md and save them to local disk. Args: index (BaseIndex): the index from which query_engine is derived and used in the rag evaluation. benchmark_df (pd.DataFrame): the benchmark dataframe after using RagEvaluatorPack rag_dataset (LabelledRagDataset): the LabelledRagDataset used for evaluations name (str): The name of the new dataset e.g., "Paul Graham Essay Dataset" baseline_name (str): The name of the baseline e.g., "llamaindex" description (str): The description of the new dataset. source_urls (Optional[List[str]], optional): _description_. Defaults to None. code_url (Optional[str], optional): _description_. Defaults to None. """ readme_obj = Readme(name=name) card_obj = DatasetCard.from_rag_evaluation( index=index, benchmark_df=benchmark_df, rag_dataset=rag_dataset, name=name, description=description, baseline_name=baseline_name, source_urls=source_urls, code_url=code_url, ) # save card.json with open("card.json", "w") as f: json.dump(card_obj.dict(by_alias=True), f) # save README.md with open("README.md", "w") as f: f.write(readme_obj.create_readme())
LlamaDatasetMetadataPack
python
tiangolo__fastapi
docs_src/cookie_param_models/tutorial001_an_py310.py
{ "start": 116, "end": 343 }
class ____(BaseModel): session_id: str fatebook_tracker: str | None = None googall_tracker: str | None = None @app.get("/items/") async def read_items(cookies: Annotated[Cookies, Cookie()]): return cookies
Cookies
python
django__django
django/views/generic/__init__.py
{ "start": 802, "end": 886 }
class ____(Exception): """A problem in a generic view.""" pass
GenericViewError
python
kamyu104__LeetCode-Solutions
Python/k-inverse-pairs-array.py
{ "start": 797, "end": 1460 }
class ____(object): def kInversePairs(self, n, k): """ :type n: int :type k: int :rtype: int """ MOD = 10**9+7 dp = [0]*(k+1) dp[0] = 1 for i in xrange(n): new_dp = [0]*len(dp) for j in xrange(len(dp)): new_dp[j] = dp[j] if j-1 >= 0: new_dp[j] = (new_dp[j]+new_dp[j-1])%MOD if j-(i+1) >= 0: new_dp[j] = (new_dp[j]-dp[j-(i+1)])%MOD dp = new_dp return dp[-1] # Time: O(n * k) # Space: O(k) # knapsack dp, combinatorics, sliding window, two pointers
Solution2
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/nn_ops/conv1d_transpose_test.py
{ "start": 1139, "end": 9188 }
class ____(test.TestCase): def testConv1DTransposeSingleStride(self): with self.cached_session(): strides = [1, 1, 1] # Input, output: [batch, width, depth] x_shape = [2, 6, 3] y_shape = [2, 6, 2] # Filter: [kernel_width, output_depth, input_depth] f_shape = [3, 2, 3] x = constant_op.constant( 1.0, shape=x_shape, name="x", dtype=dtypes.float32) f = constant_op.constant( 1.0, shape=f_shape, name="filter", dtype=dtypes.float32) output = nn_ops.conv1d_transpose( x, f, y_shape, strides=strides, padding="SAME") value = self.evaluate(output) for n in range(y_shape[0]): for w in range(y_shape[1]): for c in range(y_shape[2]): target = 2 * 3.0 w_in = w > 0 and w < y_shape[1] - 1 if w_in: target += 3.0 self.assertAllClose(target, value[n, w, c]) def testConv1DTransposeSame(self): with self.cached_session(): strides = [1, 2, 1] # Input, output: [batch, width, depth] x_shape = [2, 4, 3] y_shape = [2, 8, 2] # Filter: [kernel_width, output_depth, input_depth] f_shape = [3, 2, 3] x = constant_op.constant( 1.0, shape=x_shape, name="x", dtype=dtypes.float32) f = constant_op.constant( 1.0, shape=f_shape, name="filter", dtype=dtypes.float32) output = nn_ops.conv1d_transpose( x, f, y_shape, strides=strides, padding="SAME") value = self.evaluate(output) for n in range(x_shape[0]): for k in range(f_shape[1]): for w in range(y_shape[1]): target = 3.0 # We add a case for locations divisible by the stride. w_in = w % strides[1] == 0 and w > 0 and w < y_shape[1] - 1 if w_in: target += 3.0 self.assertAllClose(target, value[n, w, k]) def testConv1DTransposeValid(self): with self.cached_session(): strides = [1, 2, 1] # Input, output: [batch, width, depth] x_shape = [2, 4, 3] y_shape = [2, 9, 2] # Filter: [kernel_width, output_depth, input_depth] f_shape = [3, 2, 3] x = constant_op.constant( 1.0, shape=x_shape, name="x", dtype=dtypes.float32) f = constant_op.constant( 1.0, shape=f_shape, name="filter", dtype=dtypes.float32) output = nn_ops.conv1d_transpose( x, f, y_shape, strides=strides, padding="VALID") value = self.evaluate(output) cache_values = np.zeros(y_shape, dtype=np.float32) # The amount of padding added pad = 1 for n in range(x_shape[0]): for k in range(f_shape[1]): for w in range(pad, y_shape[1] - pad): target = 3.0 # We add a case for locations divisible by the stride. w_in = w % strides[1] == 0 and w > pad and w < y_shape[1] - 1 - pad if w_in: target += 3.0 cache_values[n, w, k] = target # copy values in the border cache_values[n, 0, k] = cache_values[n, 1, k] cache_values[n, -1, k] = cache_values[n, -2, k] cache_values[n, :, k] = cache_values[n, :, k] self.assertAllClose(cache_values, value) @test_util.run_deprecated_v1 def testGradient(self): self.skipTest("b/262851489: Fix nightly build for GPU.") x_shape = [2, 4, 3] f_shape = [3, 2, 3] y_shape = [2, 8, 2] strides = [1, 2, 1] np.random.seed(1) # Make it reproducible. x_val = np.random.random_sample(x_shape).astype(np.float64) f_val = np.random.random_sample(f_shape).astype(np.float64) with self.cached_session(): x = constant_op.constant(x_val, name="x", dtype=dtypes.float32) f = constant_op.constant(f_val, name="f", dtype=dtypes.float32) output = nn_ops.conv1d_transpose( x, f, y_shape, strides=strides, padding="SAME") err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape], output, y_shape) print("conv1d_transpose gradient err = %g " % err) err_tolerance = 0.0005 self.assertLess(err, err_tolerance) def testConv1DTransposeSingleStrideNCW(self): # `NCW` data format is only supported for CUDA device. if test.is_gpu_available(cuda_only=True): with self.session(): strides = [1, 1, 1] # Input, output: [batch, depth, width] x_shape = [2, 3, 4] y_shape = [2, 2, 4] # Filter: [kernel_width, output_depth, input_depth] f_shape = [3, 2, 3] x = constant_op.constant( 1.0, shape=x_shape, name="x", dtype=dtypes.float32) f = constant_op.constant( 1.0, shape=f_shape, name="filter", dtype=dtypes.float32) output = nn_ops.conv1d_transpose( x, f, y_shape, strides=strides, padding="SAME", data_format="NCW") value = self.evaluate(output) for n in range(x_shape[0]): for k in range(f_shape[1]): for w in range(y_shape[2]): target = 2 * 3.0 w_in = w > 0 and w < y_shape[2] - 1 if w_in: target += 3.0 self.assertAllClose(target, value[n, k, w]) def testConv1DTransposeSameNCW(self): # `NCW` data format is only supported for CUDA device. if test.is_gpu_available(cuda_only=True): with self.session(): strides = [1, 1, 2] # Input, output: [batch, depth, width] x_shape = [2, 3, 4] y_shape = [2, 2, 8] # Filter: [kernel_width, output_depth, input_depth] f_shape = [3, 2, 3] x = constant_op.constant( 1.0, shape=x_shape, name="x", dtype=dtypes.float32) f = constant_op.constant( 1.0, shape=f_shape, name="filter", dtype=dtypes.float32) output = nn_ops.conv1d_transpose( x, f, y_shape, strides=strides, padding="SAME", data_format="NCW") value = self.evaluate(output) for n in range(x_shape[0]): for k in range(f_shape[1]): for w in range(y_shape[2]): target = 3.0 # We add a case for locations divisible by the stride. w_in = w % strides[2] == 0 and w > 0 and w < y_shape[2] - 1 if w_in: target += 3.0 self.assertAllClose(target, value[n, k, w]) def testConv1DTransposeValidNCW(self): # `NCW` data format is only supported for CUDA device. if test.is_gpu_available(cuda_only=True): with self.session(): strides = [1, 1, 2] # Input, output: [batch, depth, width] x_shape = [2, 3, 4] y_shape = [2, 2, 9] # Filter: [kernel_width, output_depth, input_depth] f_shape = [3, 2, 3] x = constant_op.constant( 1.0, shape=x_shape, name="x", dtype=dtypes.float32) f = constant_op.constant( 1.0, shape=f_shape, name="filter", dtype=dtypes.float32) output = nn_ops.conv1d_transpose( x, f, y_shape, strides=strides, padding="VALID", data_format="NCW") value = self.evaluate(output) cache_values = np.zeros(y_shape, dtype=np.float32) # The amount of padding added pad = 1 for n in range(x_shape[0]): for k in range(f_shape[1]): for w in range(pad, y_shape[2] - pad): target = 3.0 # We add a case for locations divisible by the stride. w_in = w % strides[2] == 0 and w > pad and \ w < y_shape[2] - 1 - pad if w_in: target += 3.0 cache_values[n, k, w] = target # copy values in the border cache_values[n, k, 0] = cache_values[n, k, 1] cache_values[n, k, -1] = cache_values[n, k, -2] cache_values[n, k, :] = cache_values[n, k, :] self.assertAllClose(cache_values, value) if __name__ == "__main__": test.main()
Conv1DTransposeTest
python
getsentry__sentry
src/sentry/utils/retries.py
{ "start": 743, "end": 3436 }
class ____(ABC): @abstractmethod def __call__(self, function: Callable[[], T]) -> T: raise NotImplementedError @classmethod def wrap(cls, *args, **kwargs): """ A decorator that may be used to wrap a function to be retried using this policy. """ retrier = cls(*args, **kwargs) def decorator(fn): @functools.wraps(fn) def execute_with_retry(*args, **kwargs): return retrier(functools.partial(fn, *args, **kwargs)) return execute_with_retry return decorator def exponential_delay(duration: float) -> Callable[[int], float]: """ Returns a simple exponential delay function that starts with the given duration. """ def delay(attempt: int) -> float: return float(2 ** (attempt - 1)) * duration return delay def sigmoid_delay(offset: int = -5, midpoint: int = 0, step: int = 1) -> Callable[[int], float]: """ Returns an S-Curve function. A sigmoid is the intersection of these two behaviors: `while(true): retry() # immediate retry` and `while(true): sleep(1); retry() # static-wait then retry` The intersection of these two worlds is an exponential function which gradually ramps the program up to (or down to) a stable state (the s-curve). The sharpness of the curse is controlled with step. A step of 0 flattens the curve. A step of infinity turns the curve into a step change (a vertical line). The sigmoid is more difficult to intuit than a simple exponential delay but it allows you to cap the maximum amount of time you're willing to wait between retries. The cap is _always_ 1 second regardless of the value of the other arguments. If you want to wait longer than one second multiply the result of the function by something! Consider this program: [sigmoid_delay()(i) for i in range(-5, 5)] is equivalent to: [0.006, 0.017, 0.0474, 0.119, 0.268, 0.5, 0.731, 0.880, 0.952, 0.982] You get the same results with: [sigmoid_delay()(i) for i in range(10)] except the window has changed: [0.5, 0.731, 0.880, 0.952, 0.982, ...] Now you see further along the curve. This explains the utility of the `offset` parameter. The offset allows you to slide along the window. A smaller offset gives you faster retries. A larger offset gives you slower retries. An offset pushed too far past the midpoint reduces this function to a static wait. """ def delay(attempt: int) -> float: return 1 / (1 + math.exp(-step * ((attempt + offset) - midpoint))) return delay
RetryPolicy
python
readthedocs__readthedocs.org
readthedocs/organizations/views/base.py
{ "start": 4269, "end": 5364 }
class ____(SuccessMessageMixin, CheckOrganizationsEnabled): """Mixin for an organization view that doesn't have nested components.""" model = Organization form_class = OrganizationForm admin_only = True # Only relevant when mixed into lookup_field = "slug" lookup_url_field = "slug" def get_queryset(self): if self.admin_only: return Organization.objects.for_admin_user(user=self.request.user) return Organization.objects.for_user(user=self.request.user) def get_form(self, data=None, files=None, **kwargs): kwargs["user"] = self.request.user cls = self.get_form_class() return cls(data, files, **kwargs) def get_context_data(self, **kwargs): """Add onboarding context.""" context = super().get_context_data(**kwargs) if not self.get_queryset().exists(): context["onboarding"] = True return context def get_success_url(self): return reverse_lazy( "organization_edit", args=[self.object.slug], )
OrganizationView
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/operators/test_eks.py
{ "start": 29991, "end": 37751 }
class ____: @mock.patch("airflow.providers.cncf.kubernetes.operators.pod.KubernetesPodOperator.execute") @mock.patch("airflow.providers.amazon.aws.hooks.eks.EksHook.generate_config_file") @mock.patch("airflow.providers.amazon.aws.hooks.eks.EksHook._secure_credential_context") @mock.patch("airflow.providers.amazon.aws.hooks.eks.EksHook.get_session") @mock.patch("airflow.providers.amazon.aws.hooks.eks.EksHook.__init__", return_value=None) def test_existing_nodegroup( self, mock_eks_hook, mock_get_session, mock_secure_credential_context, mock_generate_config_file, mock_k8s_pod_operator_execute, ): ti_context = mock.MagicMock(name="ti_context") # Mock the credential chain mock_session = mock.MagicMock() mock_credentials = mock.MagicMock() mock_frozen_credentials = mock.MagicMock() mock_frozen_credentials.access_key = "test_access_key" mock_frozen_credentials.secret_key = "test_secret_key" mock_frozen_credentials.token = "test_token" mock_get_session.return_value = mock_session mock_session.get_credentials.return_value = mock_credentials mock_credentials.get_frozen_credentials.return_value = mock_frozen_credentials # Mock the credential context manager mock_credentials_file = "/tmp/test_creds.aws_creds" mock_secure_credential_context.return_value.__enter__.return_value = mock_credentials_file # Mock the config file context manager mock_config_file = "/tmp/test_kubeconfig" mock_generate_config_file.return_value.__enter__.return_value = mock_config_file op = EksPodOperator( task_id="run_pod", pod_name="run_pod", cluster_name=CLUSTER_NAME, image="amazon/aws-cli:latest", cmds=["sh", "-c", "ls"], labels={"demo": "hello_world"}, get_logs=True, # Delete the pod when it reaches its final state, or the execution is interrupted. on_finish_action="delete_pod", ) op_return_value = op.execute(ti_context) # Verify all the expected calls were made mock_k8s_pod_operator_execute.assert_called_once_with(ti_context) mock_eks_hook.assert_called_once_with(aws_conn_id="aws_default", region_name=None) mock_get_session.assert_called_once() mock_session.get_credentials.assert_called_once() mock_credentials.get_frozen_credentials.assert_called_once() mock_secure_credential_context.assert_called_once_with( "test_access_key", "test_secret_key", "test_token" ) mock_generate_config_file.assert_called_once_with( eks_cluster_name=CLUSTER_NAME, pod_namespace="default", credentials_file=mock_credentials_file ) assert mock_k8s_pod_operator_execute.return_value == op_return_value assert op.config_file == mock_config_file @pytest.mark.parametrize( ("compatible_kpo", "kwargs", "expected_attributes"), [ ( True, {"on_finish_action": "delete_succeeded_pod"}, {"on_finish_action": OnFinishAction.DELETE_SUCCEEDED_POD}, ), ( # test default True, {}, {"on_finish_action": OnFinishAction.DELETE_POD}, ), ( # test default False, {}, {}, ), ], ) def test_on_finish_action_handler(self, compatible_kpo, kwargs, expected_attributes): kpo_init_args_mock = mock.MagicMock(**{"parameters": ["on_finish_action"] if compatible_kpo else []}) with mock.patch("inspect.signature", return_value=kpo_init_args_mock): op = EksPodOperator( task_id="run_pod", pod_name="run_pod", cluster_name=CLUSTER_NAME, image="amazon/aws-cli:latest", cmds=["sh", "-c", "ls"], labels={"demo": "hello_world"}, get_logs=True, **kwargs, ) for expected_attr in expected_attributes: assert op.__getattribute__(expected_attr) == expected_attributes[expected_attr] def test_template_fields(self): op = EksPodOperator( task_id="run_pod", pod_name="run_pod", cluster_name=CLUSTER_NAME, image="amazon/aws-cli:latest", cmds=["sh", "-c", "ls"], labels={"demo": "hello_world"}, get_logs=True, on_finish_action="delete_pod", ) validate_template_fields(op) @mock.patch("airflow.providers.cncf.kubernetes.operators.pod.KubernetesPodOperator.trigger_reentry") @mock.patch("airflow.providers.amazon.aws.hooks.eks.EksHook.generate_config_file") @mock.patch("airflow.providers.amazon.aws.hooks.eks.EksHook._secure_credential_context") @mock.patch("airflow.providers.amazon.aws.hooks.eks.EksHook.get_session") @mock.patch("airflow.providers.amazon.aws.hooks.eks.EksHook.__init__", return_value=None) def test_trigger_reentry( self, mock_eks_hook, mock_get_session, mock_secure_credential_context, mock_generate_config_file, mock_k8s_pod_operator_trigger_reentry, ): ti_context = mock.MagicMock(name="ti_context") event = {"eks_cluster_name": "eks_cluster_name", "namespace": "namespace"} # Mock the credential chain mock_session = mock.MagicMock() mock_credentials = mock.MagicMock() mock_frozen_credentials = mock.MagicMock() mock_frozen_credentials.access_key = "test_access_key" mock_frozen_credentials.secret_key = "test_secret_key" mock_frozen_credentials.token = "test_token" mock_get_session.return_value = mock_session mock_session.get_credentials.return_value = mock_credentials mock_credentials.get_frozen_credentials.return_value = mock_frozen_credentials # Mock the credential context manager mock_credentials_file = "/tmp/test_creds.aws_creds" mock_secure_credential_context.return_value.__enter__.return_value = mock_credentials_file # Mock the config file context manager mock_config_file = "/tmp/test_kubeconfig" mock_generate_config_file.return_value.__enter__.return_value = mock_config_file op = EksPodOperator( task_id="run_pod", pod_name="run_pod", cluster_name=CLUSTER_NAME, image="amazon/aws-cli:latest", cmds=["sh", "-c", "ls"], labels={"demo": "hello_world"}, get_logs=True, # Delete the pod when it reaches its final state, or the execution is interrupted. on_finish_action="delete_pod", ) op.trigger_reentry(ti_context, event) # Verify all the expected calls were made mock_k8s_pod_operator_trigger_reentry.assert_called_once_with(ti_context, event) mock_get_session.assert_called_once() mock_session.get_credentials.assert_called_once() mock_credentials.get_frozen_credentials.assert_called_once() mock_secure_credential_context.assert_called_once_with( "test_access_key", "test_secret_key", "test_token" ) mock_generate_config_file.assert_called_once_with( eks_cluster_name="eks_cluster_name", pod_namespace="namespace", credentials_file=mock_credentials_file, ) assert op.config_file == mock_config_file
TestEksPodOperator
python
ethereum__web3.py
web3/providers/persistent/request_processor.py
{ "start": 678, "end": 1081 }
class ____(asyncio.Queue[T]): """ A queue that relies on a task to be running to process items in the queue. """ async def get(self) -> T: item = await super().get() if isinstance(item, Exception): # if the item is an exception, raise it so the task can handle this case # more gracefully raise item return item
TaskReliantQueue
python
getsentry__sentry
tests/sentry/event_manager/test_event_manager.py
{ "start": 163816, "end": 168012 }
class ____(TransactionTestCase): def test_simple(self) -> None: perf_data = load_data("transaction-n-plus-one", timestamp=before_now(minutes=10)) perf_data["event_id"] = str(uuid.uuid4()) event = _get_event_instance(perf_data, project_id=self.project.id) group_hash = "some_group" group, created, _ = save_grouphash_and_group(self.project, event, group_hash) assert created group_2, created, _ = save_grouphash_and_group(self.project, event, group_hash) assert group.id == group_2.id assert not created assert Group.objects.filter(grouphash__hash=group_hash).count() == 1 group_3, created, _ = save_grouphash_and_group(self.project, event, "new_hash") assert created assert group_2.id != group_3.id assert Group.objects.filter(grouphash__hash=group_hash).count() == 1 example_transaction_event = { "type": "transaction", "timestamp": datetime.now().isoformat(), "start_timestamp": (datetime.now() - timedelta(seconds=1)).isoformat(), "spans": [], "contexts": { "trace": { "parent_span_id": "8988cec7cc0779c1", "type": "trace", "op": "foobar", "trace_id": "a7d67cf796774551a95be6543cacd459", "span_id": "babaae0d4b7512d9", "status": "ok", } }, } example_error_event = { "event_id": "80e3496eff734ab0ac993167aaa0d1cd", "release": "5.222.5", "type": "error", "level": "fatal", "platform": "cocoa", "tags": {"level": "fatal"}, "environment": "test-app", "sdk": { "name": "sentry.cocoa", "version": "8.2.0", "integrations": [ "Crash", "PerformanceTracking", "MetricKit", "WatchdogTerminationTracking", "ViewHierarchy", "NetworkTracking", "ANRTracking", "AutoBreadcrumbTracking", "FramesTracking", "AppStartTracking", "Screenshot", "FileIOTracking", "UIEventTracking", "AutoSessionTracking", "CoreDataTracking", "PreWarmedAppStartTracing", ], }, "user": { "id": "803F5C87-0F8B-41C7-8499-27BD71A92738", "ip_address": "192.168.0.1", "geo": {"country_code": "US", "region": "United States"}, }, "logger": "my.logger.name", } @pytest.mark.parametrize( "event_data,expected_type", [ pytest.param( example_transaction_event, "transactions", id="transactions", ), pytest.param( example_error_event, "errors", id="errors", ), ], ) @django_db_all def test_cogs_event_manager( default_project: int, event_data: Mapping[str, Any], expected_type: str ) -> None: storage: MemoryMessageStorage[KafkaPayload] = MemoryMessageStorage() broker = LocalBroker(storage) topic = Topic("shared-resources-usage") broker.create_topic(topic, 1) producer = broker.get_producer() with ( override_options( {"shared_resources_accounting_enabled": [settings.COGS_EVENT_STORE_LABEL]} ), usage_accountant_backend(producer), ): raw_event_params = make_event(**event_data) manager = EventManager(raw_event_params) manager.normalize() normalized_data = dict(manager.get_data()) _ = manager.save(default_project) expected_len = len(json.dumps(normalized_data)) msg1 = broker.consume(Partition(topic, 0), 0) assert msg1 is not None payload = msg1.payload assert payload is not None formatted = json.loads(payload.value.decode("utf-8")) assert formatted["shared_resource_id"] == settings.COGS_EVENT_STORE_LABEL assert formatted["app_feature"] == expected_type assert formatted["usage_unit"] == "bytes" # We cannot assert for exact length because manager save method adds some extra fields. So we # assert that the length is at least greater than the expected length. assert formatted["amount"] >= expected_len
TestSaveGroupHashAndGroup
python
doocs__leetcode
solution/0100-0199/0111.Minimum Depth of Binary Tree/Solution.py
{ "start": 192, "end": 545 }
class ____: def minDepth(self, root: Optional[TreeNode]) -> int: if root is None: return 0 if root.left is None: return 1 + self.minDepth(root.right) if root.right is None: return 1 + self.minDepth(root.left) return 1 + min(self.minDepth(root.left), self.minDepth(root.right))
Solution
python
pyparsing__pyparsing
pyparsing/core.py
{ "start": 195863, "end": 196536 }
class ____(ParseElementEnhance): """Matches if expression matches at the beginning of the parse string:: AtStringStart(Word(nums)).parse_string("123") # prints ["123"] AtStringStart(Word(nums)).parse_string(" 123") # raises ParseException """ def __init__(self, expr: Union[ParserElement, str]) -> None: super().__init__(expr) self.callPreparse = False def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: if loc != 0: raise ParseException(instring, loc, "not found at string start") return super().parseImpl(instring, loc, do_actions)
AtStringStart
python
astropy__astropy
astropy/io/fits/tests/test_hdulist.py
{ "start": 601, "end": 46659 }
class ____(FitsTestCase): def test_update_name(self): with fits.open(self.data("o4sp040b0_raw.fits")) as hdul: hdul[4].name = "Jim" hdul[4].ver = 9 assert hdul[("JIM", 9)].header["extname"] == "JIM" def test_hdu_file_bytes(self): with fits.open(self.data("checksum.fits")) as hdul: res = hdul[0].filebytes() assert res == 11520 res = hdul[1].filebytes() assert res == 8640 def test_hdulist_file_info(self): def test_fileinfo(**kwargs): assert res["datSpan"] == kwargs.get("datSpan", 2880) assert res["resized"] == kwargs.get("resized", False) assert res["filename"] == self.data("checksum.fits") assert res["datLoc"] == kwargs.get("datLoc", 8640) assert res["hdrLoc"] == kwargs.get("hdrLoc", 0) assert res["filemode"] == "readonly" with fits.open(self.data("checksum.fits")) as hdul: res = hdul.fileinfo(0) res = hdul.fileinfo(1) test_fileinfo(datLoc=17280, hdrLoc=11520) hdu = fits.ImageHDU(data=hdul[0].data) hdul.insert(1, hdu) res = hdul.fileinfo(0) test_fileinfo(resized=True) res = hdul.fileinfo(1) test_fileinfo(datSpan=None, resized=True, datLoc=None, hdrLoc=None) res = hdul.fileinfo(2) test_fileinfo(resized=1, datLoc=17280, hdrLoc=11520) def test_create_from_multiple_primary(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/145 Ensure that a validation error occurs when saving an HDUList containing multiple PrimaryHDUs. """ hdul = fits.HDUList([fits.PrimaryHDU(), fits.PrimaryHDU()]) pytest.raises( VerifyError, hdul.writeto, self.temp("temp.fits"), output_verify="exception" ) def test_append_primary_to_empty_list(self): # Tests appending a Simple PrimaryHDU to an empty HDUList. hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-append.fits")) assert fits.info(self.temp("test-append.fits"), output=False) == info def test_append_extension_to_empty_list(self): """Tests appending a Simple ImageHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.ImageHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) info = [(0, "PRIMARY", 1, "PrimaryHDU", 4, (100,), "int32", "")] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-append.fits")) assert fits.info(self.temp("test-append.fits"), output=False) == info def test_append_table_extension_to_empty_list(self): """Tests appending a Simple Table ExtensionHDU to a empty HDUList.""" hdul = fits.HDUList() with fits.open(self.data("tb.fits")) as hdul1: hdul.append(hdul1[1]) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""), (1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-append.fits")) assert fits.info(self.temp("test-append.fits"), output=False) == info def test_append_groupshdu_to_empty_list(self): """Tests appending a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.GroupsHDU() hdul.append(hdu) info = [(0, "PRIMARY", 1, "GroupsHDU", 8, (), "", "1 Groups 0 Parameters")] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-append.fits")) assert fits.info(self.temp("test-append.fits"), output=False) == info def test_append_primary_to_non_empty_list(self): """Tests appending a Simple PrimaryHDU to a non-empty HDUList.""" with fits.open(self.data("arange.fits")) as hdul: hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 7, (11, 10, 7), "int32", ""), (1, "", 1, "ImageHDU", 6, (100,), "int32", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-append.fits")) assert fits.info(self.temp("test-append.fits"), output=False) == info def test_append_extension_to_non_empty_list(self): """Tests appending a Simple ExtensionHDU to a non-empty HDUList.""" with fits.open(self.data("tb.fits")) as hdul: hdul.append(hdul[1]) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 11, (), "", ""), (1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), (2, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-append.fits")) assert fits.info(self.temp("test-append.fits"), output=False) == info def test_append_groupshdu_to_non_empty_list(self): """Tests appending a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) hdu = fits.GroupsHDU() with pytest.raises(ValueError): hdul.append(hdu) @pytest.mark.parametrize( "image", ["scale.fits", "o4sp040b0_raw.fits", "fixed-1890.fits"] ) @pytest.mark.parametrize("do_not_scale", [True, False]) def test_append_scaled_image_with_do_not_scale_image_data( self, image, do_not_scale ): """Tests appending a scaled ImageHDU to a HDUList.""" with fits.open( self.data(image), do_not_scale_image_data=do_not_scale ) as source: # create the file dest = fits.HDUList() dest.append(source[0]) # append a second hdu dest.append(source[0]) assert dest[-1].header.get("BZERO") == source[0].header.get("BZERO") assert dest[-1].header.get("BSCALE") == source[0].header.get("BSCALE") dest.writeto(self.temp("test-append.fits")) with fits.open( self.temp("test-append.fits"), do_not_scale_image_data=do_not_scale ) as tmphdu: assert tmphdu[-1].header.get("BZERO") == source[0].header.get("BZERO") assert tmphdu[-1].header.get("BSCALE") == source[0].header.get("BSCALE") def test_insert_primary_to_empty_list(self): """Tests inserting a Simple PrimaryHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_extension_to_empty_list(self): """Tests inserting a Simple ImageHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.ImageHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) info = [(0, "PRIMARY", 1, "PrimaryHDU", 4, (100,), "int32", "")] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_table_extension_to_empty_list(self): """Tests inserting a Simple Table ExtensionHDU to a empty HDUList.""" hdul = fits.HDUList() with fits.open(self.data("tb.fits")) as hdul1: hdul.insert(0, hdul1[1]) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""), (1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_groupshdu_to_empty_list(self): """Tests inserting a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.GroupsHDU() hdul.insert(0, hdu) info = [(0, "PRIMARY", 1, "GroupsHDU", 8, (), "", "1 Groups 0 Parameters")] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_primary_to_non_empty_list(self): """Tests inserting a Simple PrimaryHDU to a non-empty HDUList.""" with fits.open(self.data("arange.fits")) as hdul: hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.insert(1, hdu) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 7, (11, 10, 7), "int32", ""), (1, "", 1, "ImageHDU", 6, (100,), "int32", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_extension_to_non_empty_list(self): """Tests inserting a Simple ExtensionHDU to a non-empty HDUList.""" with fits.open(self.data("tb.fits")) as hdul: hdul.insert(1, hdul[1]) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 11, (), "", ""), (1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), (2, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_groupshdu_to_non_empty_list(self): """Tests inserting a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) hdu = fits.GroupsHDU() with pytest.raises(ValueError): hdul.insert(1, hdu) info = [ (0, "PRIMARY", 1, "GroupsHDU", 8, (), "", "1 Groups 0 Parameters"), (1, "", 1, "ImageHDU", 6, (100,), "int32", ""), ] hdul.insert(0, hdu) assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_groupshdu_to_begin_of_hdulist_with_groupshdu(self): """ Tests inserting a Simple GroupsHDU to the beginning of an HDUList that that already contains a GroupsHDU. """ hdul = fits.HDUList() hdu = fits.GroupsHDU() hdul.insert(0, hdu) with pytest.raises(ValueError): hdul.insert(0, hdu) def test_insert_extension_to_primary_in_non_empty_list(self): # Tests inserting a Simple ExtensionHDU to a non-empty HDUList. with fits.open(self.data("tb.fits")) as hdul: hdul.insert(0, hdul[1]) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""), (1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), (2, "", 1, "ImageHDU", 12, (), "", ""), (3, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_image_extension_to_primary_in_non_empty_list(self): """ Tests inserting a Simple Image ExtensionHDU to a non-empty HDUList as the primary HDU. """ with fits.open(self.data("tb.fits")) as hdul: hdu = fits.ImageHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", ""), (1, "", 1, "ImageHDU", 12, (), "", ""), (2, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_filename(self, home_is_data): """Tests the HDUList filename method.""" with fits.open(self.data("tb.fits")) as hdul: name = hdul.filename() assert name == os.path.expanduser(self.data("tb.fits")) def test_file_like(self): """ Tests the use of a file like object with no tell or seek methods in HDUList.writeto(), HDULIST.flush() or astropy.io.fits.writeto() """ hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul = fits.HDUList() hdul.append(hdu) tmpfile = open(self.temp("tmpfile.fits"), "wb") hdul.writeto(tmpfile) tmpfile.close() info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")] assert fits.info(self.temp("tmpfile.fits"), output=False) == info def test_file_like_2(self): hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) tmpfile = open(self.temp("tmpfile.fits"), "wb") hdul = fits.open(tmpfile, mode="ostream") hdul.append(hdu) hdul.flush() tmpfile.close() hdul.close() info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")] assert fits.info(self.temp("tmpfile.fits"), output=False) == info def test_file_like_3(self): tmpfile = open(self.temp("tmpfile.fits"), "wb") fits.writeto(tmpfile, np.arange(100, dtype=np.int32)) tmpfile.close() info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")] assert fits.info(self.temp("tmpfile.fits"), output=False) == info def test_shallow_copy(self): """ Tests that `HDUList.__copy__()` and `HDUList.copy()` return a shallow copy (regression test for #7211). """ n = np.arange(10.0) primary_hdu = fits.PrimaryHDU(n) hdu = fits.ImageHDU(n) hdul = fits.HDUList([primary_hdu, hdu]) for hdulcopy in (hdul.copy(), copy.copy(hdul)): assert isinstance(hdulcopy, fits.HDUList) assert hdulcopy is not hdul assert hdulcopy[0] is hdul[0] assert hdulcopy[1] is hdul[1] def test_deep_copy(self): """ Tests that `HDUList.__deepcopy__()` returns a deep copy. """ n = np.arange(10.0) primary_hdu = fits.PrimaryHDU(n) hdu = fits.ImageHDU(n) hdul = fits.HDUList([primary_hdu, hdu]) hdulcopy = copy.deepcopy(hdul) assert isinstance(hdulcopy, fits.HDUList) assert hdulcopy is not hdul for index in range(len(hdul)): assert hdulcopy[index] is not hdul[index] assert hdulcopy[index].header == hdul[index].header np.testing.assert_array_equal(hdulcopy[index].data, hdul[index].data) def test_new_hdu_extname(self): """ Tests that new extension HDUs that are added to an HDUList can be properly indexed by their EXTNAME/EXTVER (regression test for ticket:48). """ with fits.open(self.data("test0.fits")) as f: hdul = fits.HDUList() hdul.append(f[0].copy()) hdu = fits.ImageHDU(header=f[1].header) hdul.append(hdu) assert hdul[1].header["EXTNAME"] == "SCI" assert hdul[1].header["EXTVER"] == 1 assert hdul.index_of(("SCI", 1)) == 1 assert hdul.index_of(hdu) == len(hdul) - 1 def test_update_filelike(self): """Test opening a file-like object in update mode and resizing the HDU. """ sf = io.BytesIO() arr = np.zeros((100, 100)) hdu = fits.PrimaryHDU(data=arr) hdu.writeto(sf) sf.seek(0) arr = np.zeros((200, 200)) hdul = fits.open(sf, mode="update") hdul[0].data = arr hdul.flush() sf.seek(0) hdul = fits.open(sf) assert len(hdul) == 1 assert (hdul[0].data == arr).all() def test_flush_readonly(self): """Test flushing changes to a file opened in a read only mode.""" oldmtime = os.stat(self.data("test0.fits")).st_mtime with fits.open(self.data("test0.fits")) as hdul: hdul[0].header["FOO"] = "BAR" with pytest.warns(AstropyUserWarning, match="mode is not supported") as w: hdul.flush() assert len(w) == 1 assert oldmtime == os.stat(self.data("test0.fits")).st_mtime def test_fix_extend_keyword(self): hdul = fits.HDUList() hdul.append(fits.PrimaryHDU()) hdul.append(fits.ImageHDU()) del hdul[0].header["EXTEND"] hdul.verify("silentfix") assert "EXTEND" in hdul[0].header assert hdul[0].header["EXTEND"] is True def test_fix_malformed_naxisj(self): """ Tests that malformed NAXISj values are fixed sensibly. """ hdu = fits.open(self.data("arange.fits")) # Malform NAXISj header data hdu[0].header["NAXIS1"] = 11.0 hdu[0].header["NAXIS2"] = "10.0" hdu[0].header["NAXIS3"] = "7" # Axes cache needs to be malformed as well hdu[0]._axes = [11.0, "10.0", "7"] # Perform verification including the fix hdu.verify("silentfix") # Check that malformed data was converted assert hdu[0].header["NAXIS1"] == 11 assert hdu[0].header["NAXIS2"] == 10 assert hdu[0].header["NAXIS3"] == 7 hdu.close() def test_fix_wellformed_naxisj(self): """ Tests that wellformed NAXISj values are not modified. """ hdu = fits.open(self.data("arange.fits")) # Fake new NAXISj header data hdu[0].header["NAXIS1"] = 768 hdu[0].header["NAXIS2"] = 64 hdu[0].header["NAXIS3"] = 8 # Axes cache needs to be faked as well hdu[0]._axes = [768, 64, 8] # Perform verification including the fix hdu.verify("silentfix") # Check that malformed data was converted assert hdu[0].header["NAXIS1"] == 768 assert hdu[0].header["NAXIS2"] == 64 assert hdu[0].header["NAXIS3"] == 8 hdu.close() def test_new_hdulist_extend_keyword(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/114 Tests that adding a PrimaryHDU to a new HDUList object updates the EXTEND keyword on that HDU. """ h0 = fits.Header() hdu = fits.PrimaryHDU(header=h0) sci = fits.ImageHDU(data=np.array([10])) hdul = fits.HDUList([hdu, sci]) assert "EXTEND" in hdu.header assert hdu.header["EXTEND"] is True hdul.writeto(self.temp("temp.fits")) hdr = fits.getheader(self.temp("temp.fits")) assert "EXTEND" in hdr assert hdr["EXTEND"] is True def test_replace_memmaped_array(self, home_is_temp): # Copy the original before we modify it with fits.open(self.data("test0.fits")) as hdul: hdul.writeto(self.temp("temp.fits")) hdul = fits.open(self.temp("temp.fits"), mode="update", memmap=True) old_data = hdul[1].data.copy() hdul[1].data = hdul[1].data + 1 hdul.close() with fits.open(self.temp("temp.fits"), memmap=True) as hdul: assert ((old_data + 1) == hdul[1].data).all() def test_open_file_with_bad_file_padding(self): """ Test warning when opening files with extra padding at the end. See https://github.com/astropy/astropy/issues/4351 """ # write some arbitrary data to a FITS file fits.writeto(self.temp("temp.fits"), np.arange(100)) # append some arbitrary number of zeros to the end with open(self.temp("temp.fits"), "ab") as fobj: fobj.write(b"\x00" * 1234) with pytest.warns( AstropyUserWarning, match="Unexpected extra padding at the end of the file." ) as w: with fits.open(self.temp("temp.fits")) as fobj: fobj.info() assert len(w) == 1 @pytest.mark.filterwarnings("ignore:Unexpected extra padding") def test_open_file_with_end_padding(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/106 Open files with end padding bytes. """ with fits.open(self.data("test0.fits"), do_not_scale_image_data=True) as hdul: info = hdul.info(output=False) hdul.writeto(self.temp("temp.fits")) with open(self.temp("temp.fits"), "ab") as f: f.seek(0, os.SEEK_END) f.write(b"\0" * 2880) assert info == fits.info( self.temp("temp.fits"), output=False, do_not_scale_image_data=True ) def test_open_file_with_bad_header_padding(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/136 Open files with nulls for header block padding instead of spaces. """ a = np.arange(100).reshape(10, 10) hdu = fits.PrimaryHDU(data=a) hdu.writeto(self.temp("temp.fits")) # Figure out where the header padding begins and fill it with nulls end_card_pos = str(hdu.header).index("END" + " " * 77) padding_start = end_card_pos + 80 padding_len = 2880 - padding_start with open(self.temp("temp.fits"), "r+b") as f: f.seek(padding_start) f.write(b"\0" * padding_len) with pytest.warns( AstropyUserWarning, match="contains null bytes instead of spaces" ) as w: with fits.open(self.temp("temp.fits")) as hdul: assert (hdul[0].data == a).all() assert len(w) == 1 assert len(hdul) == 1 assert str(hdul[0].header) == str(hdu.header) def test_update_with_truncated_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148 Test that saving an update where the header is shorter than the original header doesn't leave a stump from the old header in the file. """ data = np.arange(100) hdu = fits.PrimaryHDU(data=data) idx = 1 while len(hdu.header) < 34: hdu.header[f"TEST{idx}"] = idx idx += 1 hdu.writeto(self.temp("temp.fits"), checksum=True) with fits.open(self.temp("temp.fits"), mode="update") as hdul: # Modify the header, forcing it to be rewritten hdul[0].header["TEST1"] = 2 with fits.open(self.temp("temp.fits")) as hdul: assert (hdul[0].data == data).all() def test_update_resized_header(self, home_is_temp): """ Test saving updates to a file where the header is one block smaller than before, and in the case where the header is one block larger than before. """ data = np.arange(100) hdu = fits.PrimaryHDU(data=data) idx = 1 while len(str(hdu.header)) <= 2880: hdu.header[f"TEST{idx}"] = idx idx += 1 orig_header = hdu.header.copy() hdu.writeto(self.temp("temp.fits")) with fits.open(self.temp("temp.fits"), mode="update") as hdul: while len(str(hdul[0].header)) > 2880: del hdul[0].header[-1] with fits.open(self.temp("temp.fits")) as hdul: assert hdul[0].header == orig_header[:-1] assert (hdul[0].data == data).all() if ( sys.platform.startswith("win") and sys.version_info < (3, 14) and not NUMPY_LT_2_0 ): ctx = pytest.warns( UserWarning, match="Memory map object was closed but appears to still be referenced", ) else: ctx = nullcontext() with ctx, fits.open(self.temp("temp.fits"), mode="update") as hdul: idx = 101 while len(str(hdul[0].header)) <= 2880 * 2: hdul[0].header[f"TEST{idx}"] = idx idx += 1 # Touch something in the data too so that it has to be rewritten hdul[0].data[0] = 27 with fits.open(self.temp("temp.fits")) as hdul: assert hdul[0].header[:-37] == orig_header[:-1] assert hdul[0].data[0] == 27 assert (hdul[0].data[1:] == data[1:]).all() def test_update_resized_header2(self, home_is_temp): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/150 This is similar to test_update_resized_header, but specifically tests a case of multiple consecutive flush() calls on the same HDUList object, where each flush() requires a resize. """ data1 = np.arange(100) data2 = np.arange(100) + 100 phdu = fits.PrimaryHDU(data=data1) hdu = fits.ImageHDU(data=data2) phdu.writeto(self.temp("temp.fits")) with fits.open(self.temp("temp.fits"), mode="append") as hdul: hdul.append(hdu) with fits.open(self.temp("temp.fits"), mode="update") as hdul: idx = 1 while len(str(hdul[0].header)) <= 2880 * 2: hdul[0].header[f"TEST{idx}"] = idx idx += 1 hdul.flush() hdul.append(hdu) with fits.open(self.temp("temp.fits")) as hdul: assert (hdul[0].data == data1).all() assert hdul[1].header == hdu.header assert (hdul[1].data == data2).all() assert (hdul[2].data == data2).all() def test_hdul_fromstring(self): """ Test creating the HDUList structure in memory from a string containing an entire FITS file. This is similar to test_hdu_fromstring but for an entire multi-extension FITS file at once. """ # Tests HDUList.fromstring for all of Astropy's built in test files def test_fromstring(filename): with fits.open(filename) as hdul: orig_info = hdul.info(output=False) with open(filename, "rb") as f: dat = f.read() hdul2 = fits.HDUList.fromstring(dat) assert orig_info == hdul2.info(output=False) for idx in range(len(hdul)): assert hdul[idx].header == hdul2[idx].header if hdul[idx].data is None or hdul2[idx].data is None: assert hdul[idx].data == hdul2[idx].data elif hdul[idx].data.dtype.fields and hdul2[idx].data.dtype.fields: # Compare tables for n in hdul[idx].data.names: c1 = hdul[idx].data[n] c2 = hdul2[idx].data[n] assert (c1 == c2).all() elif any(dim == 0 for dim in hdul[idx].data.shape) or any( dim == 0 for dim in hdul2[idx].data.shape ): # For some reason some combinations of Python and Numpy # on Windows result in MemoryErrors when trying to work # on memmap arrays with more than one dimension but # some dimensions of size zero, so include a special # case for that return hdul[idx].data.shape == hdul2[idx].data.shape else: np.testing.assert_array_equal(hdul[idx].data, hdul2[idx].data) for filename in get_pkg_data_filenames("data", pattern="*.fits"): if sys.platform == "win32" and filename.endswith("zerowidth.fits"): # Running this test on this file causes a crash in some # versions of Numpy on Windows. See ticket: # https://aeon.stsci.edu/ssb/trac/pyfits/ticket/174 continue elif filename.endswith(("variable_length_table.fits", "theap-gap.fits")): # Comparing variable length arrays is non-trivial and thus # skipped at this point. # TODO: That's probably possible, so one could make it work. continue test_fromstring(filename) # Test that creating an HDUList from something silly raises a TypeError pytest.raises(TypeError, fits.HDUList.fromstring, ["a", "b", "c"]) @pytest.mark.filterwarnings("ignore:Saving a backup") def test_save_backup(self, home_is_temp): """Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/121 Save backup of file before flushing changes. """ testfile = self.copy_file("scale.fits") with fits.open(testfile, mode="update", save_backup=True) as hdul: # Make some changes to the original file to force its header # and data to be rewritten hdul[0].header["TEST"] = "TEST" # This emits warning that needs to be ignored at the # pytest.mark.filterwarnings level. hdul[0].data[0] = 0 assert os.path.exists(os.path.expanduser(self.temp("scale.fits.bak"))) with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul1: with fits.open( self.temp("scale.fits.bak"), do_not_scale_image_data=True ) as hdul2: assert hdul1[0].header == hdul2[0].header assert (hdul1[0].data == hdul2[0].data).all() with fits.open(testfile, mode="update", save_backup=True) as hdul: # One more time to see if multiple backups are made hdul[0].header["TEST2"] = "TEST" hdul[0].data[0] = 1 assert os.path.exists(os.path.expanduser(self.temp("scale.fits.bak"))) assert os.path.exists(os.path.expanduser(self.temp("scale.fits.bak.1"))) def test_replace_mmap_data(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/25 Replacing the mmap'd data of one file with mmap'd data from a different file should work. """ arr_a = np.arange(10) arr_b = arr_a * 2 def test(mmap_a, mmap_b): hdu_a = fits.PrimaryHDU(data=arr_a) hdu_a.writeto(self.temp("test_a.fits"), overwrite=True) hdu_b = fits.PrimaryHDU(data=arr_b) hdu_b.writeto(self.temp("test_b.fits"), overwrite=True) with fits.open( self.temp("test_a.fits"), mode="update", memmap=mmap_a ) as hdul_a: with fits.open(self.temp("test_b.fits"), memmap=mmap_b) as hdul_b: hdul_a[0].data = hdul_b[0].data with fits.open(self.temp("test_a.fits")) as hdul_a: assert np.all(hdul_a[0].data == arr_b) test(True, True) # Repeat the same test but this time don't mmap A test(False, True) # Finally, without mmaping B test(True, False) def test_replace_mmap_data_2(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/25 Replacing the mmap'd data of one file with mmap'd data from a different file should work. Like test_replace_mmap_data but with table data instead of image data. """ arr_a = np.arange(10) arr_b = arr_a * 2 def test(mmap_a, mmap_b): col_a = fits.Column(name="a", format="J", array=arr_a) col_b = fits.Column(name="b", format="J", array=arr_b) hdu_a = fits.BinTableHDU.from_columns([col_a]) hdu_a.writeto(self.temp("test_a.fits"), overwrite=True) hdu_b = fits.BinTableHDU.from_columns([col_b]) hdu_b.writeto(self.temp("test_b.fits"), overwrite=True) with fits.open( self.temp("test_a.fits"), mode="update", memmap=mmap_a ) as hdul_a: with fits.open(self.temp("test_b.fits"), memmap=mmap_b) as hdul_b: hdul_a[1].data = hdul_b[1].data with fits.open(self.temp("test_a.fits")) as hdul_a: assert "b" in hdul_a[1].columns.names assert "a" not in hdul_a[1].columns.names assert np.all(hdul_a[1].data["b"] == arr_b) test(True, True) # Repeat the same test but this time don't mmap A test(False, True) # Finally, without mmaping B test(True, False) def test_extname_in_hdulist(self): """ Tests to make sure that the 'in' operator works. Regression test for https://github.com/astropy/astropy/issues/3060 """ with fits.open(self.data("o4sp040b0_raw.fits")) as hdulist: hdulist.append(fits.ImageHDU(name="a")) assert "a" in hdulist assert "A" in hdulist assert ("a", 1) in hdulist assert ("A", 1) in hdulist assert "b" not in hdulist assert ("a", 2) not in hdulist assert ("b", 1) not in hdulist assert ("b", 2) not in hdulist assert hdulist[0] in hdulist assert fits.ImageHDU() not in hdulist def test_overwrite(self, home_is_temp): hdulist = fits.HDUList([fits.PrimaryHDU()]) hdulist.writeto(self.temp("test_overwrite.fits")) with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH): hdulist.writeto(self.temp("test_overwrite.fits"), overwrite=False) hdulist.writeto(self.temp("test_overwrite.fits"), overwrite=True) def test_invalid_hdu_key_in_contains(self): """ Make sure invalid keys in the 'in' operator return False. Regression test for https://github.com/astropy/astropy/issues/5583 """ hdulist = fits.HDUList(fits.PrimaryHDU()) hdulist.append(fits.ImageHDU()) hdulist.append(fits.ImageHDU()) # A more or less random assortment of things which are not valid keys. bad_keys = [None, 3.5, {}] for key in bad_keys: assert key not in hdulist def test_iteration_of_lazy_loaded_hdulist(self): """ Regression test for https://github.com/astropy/astropy/issues/5585 """ hdulist = fits.HDUList(fits.PrimaryHDU()) hdulist.append(fits.ImageHDU(name="SCI")) hdulist.append(fits.ImageHDU(name="SCI")) hdulist.append(fits.ImageHDU(name="nada")) hdulist.append(fits.ImageHDU(name="SCI")) filename = self.temp("many_extension.fits") hdulist.writeto(filename) f = fits.open(filename) # Check that all extensions are read if f is not sliced all_exts = list(f) assert len(all_exts) == 5 # Reload the file to ensure we are still lazy loading f.close() f = fits.open(filename) # Try a simple slice with no conditional on the ext. This is essentially # the reported failure. all_exts_but_zero = list(f[1:]) assert len(all_exts_but_zero) == 4 # Reload the file to ensure we are still lazy loading f.close() f = fits.open(filename) # Check whether behavior is proper if the upper end of the slice is not # omitted. read_exts = [ext for ext in f[1:4] if ext.header["EXTNAME"] == "SCI"] assert len(read_exts) == 2 f.close() def test_read_non_standard_hdu(self): filename = self.temp("bad-fits.fits") hdu = fits.PrimaryHDU() hdu.header["FOO"] = "BAR" buf = io.BytesIO() hdu.writeto(buf) buf.seek(0) hdustr = buf.read() hdustr = hdustr.replace( b"SIMPLE = T", b"SIMPLE = F" ) with open(filename, mode="wb") as f: f.write(hdustr) with fits.open(filename) as hdul: assert isinstance(hdul[0], _NonstandardHDU) assert hdul[0].header["FOO"] == "BAR" def test_proper_error_raised_on_non_fits_file(self): filename = self.temp("not-fits.fits") with open(filename, mode="w", encoding="utf=8") as f: f.write("Not a FITS file") match = ( "No SIMPLE card found, this file does not appear to be a valid FITS file" ) # This should raise an OSError because there is no end card. with pytest.raises(OSError, match=match): fits.open(filename) with pytest.raises(OSError, match=match): fits.open(filename, mode="append") with pytest.raises(OSError, match=match): fits.open(filename, mode="update") def test_proper_error_raised_on_invalid_fits_file(self): filename = self.temp("bad-fits.fits") hdu = fits.PrimaryHDU() hdu.header["FOO"] = "BAR" buf = io.BytesIO() hdu.writeto(buf) # write 80 additional bytes so the block will have the correct size buf.write(b" " * 80) buf.seek(0) buf.seek(80) # now remove the SIMPLE card with open(filename, mode="wb") as f: f.write(buf.read()) match = ( "No SIMPLE card found, this file does not appear to be a valid FITS file" ) # This should raise an OSError because there is no end card. with pytest.raises(OSError, match=match): fits.open(filename) with pytest.raises(OSError, match=match): fits.open(filename, mode="append") with pytest.raises(OSError, match=match): fits.open(filename, mode="update") with fits.open(filename, ignore_missing_simple=True) as hdul: assert isinstance(hdul[0], _ValidHDU) assert hdul[0].header["FOO"] == "BAR" def test_warning_raised_on_non_standard_simple_card(self): filename = self.temp("bad-fits.fits") hdu = fits.PrimaryHDU() hdu.header["FOO"] = "BAR" buf = io.BytesIO() hdu.writeto(buf) # change the simple card format buf.seek(0) buf.write(b"SIMPLE = T ") buf.seek(0) with open(filename, mode="wb") as f: f.write(buf.read()) match = "Found a SIMPLE card but its format doesn't respect the FITS Standard" with pytest.warns(VerifyWarning, match=match): with fits.open(filename): pass with pytest.warns(VerifyWarning, match=match): with fits.open(filename, mode="append"): pass with pytest.warns(VerifyWarning, match=match): with fits.open(filename, mode="update"): pass with fits.open(filename, ignore_missing_simple=True) as hdul: assert isinstance(hdul[0], _ValidHDU) assert hdul[0].header["FOO"] == "BAR" # change the simple card format buf.seek(0) buf.write(b"SIMPLE = T / This is a FITS file") buf.seek(0) with open(filename, mode="wb") as f: f.write(buf.read()) with pytest.warns(VerifyWarning, match=match): with fits.open(filename): pass def test_proper_error_raised_on_non_fits_file_with_unicode(self): """ Regression test for https://github.com/astropy/astropy/issues/5594 The failure shows up when (in python 3+) you try to open a file with unicode content that is not actually a FITS file. See: https://github.com/astropy/astropy/issues/5594#issuecomment-266583218 """ filename = self.temp("not-fits-with-unicode.fits") with open(filename, mode="w", encoding="utf=8") as f: f.write("Ce\xe7i ne marche pas") # This should raise an OSError because there is no end card. with pytest.raises( OSError, match=( "No SIMPLE card found, this file " "does not appear to be a valid FITS file" ), ): fits.open(filename) def test_no_resource_warning_raised_on_non_fits_file(self): """ Regression test for https://github.com/astropy/astropy/issues/6168 The ResourceWarning shows up when (in python 3+) you try to open a non-FITS file when using a filename. """ # To avoid creating the file multiple times the tests are # all included in one test file. See the discussion to the # PR at https://github.com/astropy/astropy/issues/6168 # filename = self.temp("not-fits.fits") with open(filename, mode="w") as f: f.write("# header line\n") f.write("0.1 0.2\n") # Opening the file should raise an OSError however the file # is opened (there are two distinct code paths, depending on # whether ignore_missing_end is True or False). # # Explicit tests are added to make sure the file handle is not # closed when passed in to fits.open. In this case the ResourceWarning # was not raised. # Make sure that files opened by the user are not closed with open(filename, mode="rb") as f: with pytest.raises(OSError): fits.open(f, ignore_missing_end=False) assert not f.closed with open(filename, mode="rb") as f: with pytest.raises(OSError): fits.open(f, ignore_missing_end=True) assert not f.closed with pytest.raises(OSError): fits.open(filename, ignore_missing_end=False) with pytest.raises(OSError): fits.open(filename, ignore_missing_end=True) def test_warning_raised_extra_bytes_after_last_hdu(self): filename = "test_extra_bytes.fits" fits.writeto(self.temp(filename), np.arange(100)) # write some extra bytes to the end of the file with open(self.temp(filename), "ab") as f: f.write(b"extra bytes") # this should not raise a DeprecationWarning about the indent # function (#18607) match = "There may be extra bytes after the last HDU" with ( pytest.warns(VerifyWarning, match=match), fits.open(self.temp(filename)) as hdul, ): assert len(hdul) == 1 def test_pop_with_lazy_load(self): filename = self.data("checksum.fits") with fits.open(filename) as hdul: # Try popping the hdulist before doing anything else. This makes sure # that https://github.com/astropy/astropy/issues/7185 is fixed. hdu = hdul.pop() assert len(hdul) == 1 # Read the file again and try popping from the beginning with fits.open(filename) as hdul2: hdu2 = hdul2.pop(0) assert len(hdul2) == 1 # Just a sanity check with fits.open(filename) as hdul3: assert len(hdul3) == 2 assert hdul3[0].header == hdu2.header assert hdul3[1].header == hdu.header def test_pop_extname(self): with fits.open(self.data("o4sp040b0_raw.fits")) as hdul: assert len(hdul) == 7 hdu1 = hdul[1] hdu4 = hdul[4] hdu_popped = hdul.pop(("SCI", 2)) assert len(hdul) == 6 assert hdu_popped is hdu4 hdu_popped = hdul.pop("SCI") assert len(hdul) == 5 assert hdu_popped is hdu1 # Skip due to https://github.com/astropy/astropy/issues/8916 @pytest.mark.skipif( sys.platform.startswith("win32"), reason="Cannot test on Windows" ) def test_write_hdulist_to_stream(self): """ Unit test for https://github.com/astropy/astropy/issues/7435 to ensure that an HDUList can be written to a stream. """ data = np.array([[1, 2, 3], [4, 5, 6]]) hdu = fits.PrimaryHDU(data) hdulist = fits.HDUList([hdu]) with open(self.temp("test.fits"), "wb") as fout: with subprocess.Popen(["cat"], stdin=subprocess.PIPE, stdout=fout) as p: hdulist.writeto(p.stdin) def test_output_verify(self): hdul = fits.HDUList([fits.PrimaryHDU()]) hdul[0].header["FOOBAR"] = 42 hdul.writeto(self.temp("test.fits")) with open(self.temp("test.fits"), "rb") as f: data = f.read() # create invalid card data = data.replace(b"FOOBAR =", b"FOOBAR = ") with open(self.temp("test2.fits"), "wb") as f: f.write(data) with pytest.raises(VerifyError): with fits.open(self.temp("test2.fits"), mode="update") as hdul: hdul[0].header["MORE"] = "here" with pytest.warns(VerifyWarning) as ww: with fits.open( self.temp("test2.fits"), mode="update", output_verify="fix+warn" ) as hdul: hdul[0].header["MORE"] = "here" assert len(ww) == 6 msg = "Card 'FOOBAR ' is not FITS standard (equal sign not at column 8)" assert msg in str(ww[3].message)
TestHDUListFunctions
python
cython__cython
Cython/Compiler/PyrexTypes.py
{ "start": 69200, "end": 71822 }
class ____(BaseType): "A C const or volatile type" subtypes = ['cv_base_type'] is_cv_qualified = 1 def __init__(self, base_type, is_const=0, is_volatile=0): self.cv_base_type = base_type self.is_const = is_const self.is_volatile = is_volatile if base_type.has_attributes and base_type.scope is not None: from .Symtab import CConstOrVolatileScope self.scope = CConstOrVolatileScope(base_type.scope, is_const, is_volatile) def cv_string(self): cvstring = "" if self.is_const: cvstring = "const " + cvstring if self.is_volatile: cvstring = "volatile " + cvstring return cvstring def __repr__(self): return "<CConstOrVolatileType %s%r>" % (self.cv_string(), self.cv_base_type) def __str__(self): return self.declaration_code("", for_display=1) def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): cv = self.cv_string() if for_display or pyrex: return cv + self.cv_base_type.declaration_code(entity_code, for_display, dll_linkage, pyrex) else: return self.cv_base_type.declaration_code(cv + entity_code, for_display, dll_linkage, pyrex) def specialize(self, values): base_type = self.cv_base_type.specialize(values) if base_type == self.cv_base_type: return self return CConstOrVolatileType(base_type, self.is_const, self.is_volatile) def deduce_template_params(self, actual): return self.cv_base_type.deduce_template_params(actual) def can_coerce_to_pyobject(self, env): return self.cv_base_type.can_coerce_to_pyobject(env) def can_coerce_from_pyobject(self, env): return self.cv_base_type.can_coerce_from_pyobject(env) def create_to_py_utility_code(self, env): if self.cv_base_type.create_to_py_utility_code(env): self.to_py_function = self.cv_base_type.to_py_function return True def same_as_resolved_type(self, other_type): if other_type.is_cv_qualified: return self.cv_base_type.same_as_resolved_type(other_type.cv_base_type) # Accept cv LHS <- non-cv RHS. return self.cv_base_type.same_as_resolved_type(other_type) def __getattr__(self, name): return getattr(self.cv_base_type, name) def c_const_type(base_type): """Creates a C 'const ...' type but does not test for 'error_type'. """ return CConstOrVolatileType(base_type, is_const=True)
CConstOrVolatileType
python
PrefectHQ__prefect
tests/server/orchestration/api/test_variables.py
{ "start": 11054, "end": 13246 }
class ____: async def test_no_results( self, client: AsyncClient, ): res = await client.post( "/variables/count", ) assert res.status_code == 200 assert res.json() == 0 async def test_no_filter( self, client: AsyncClient, variables, ): res = await client.post( "/variables/count", ) assert res.status_code == 200 assert res.json() == 4 async def test_filter_name( self, client: AsyncClient, variables, ): # any filter res = await client.post( "/variables/count", json=dict( variables=VariableFilter( name=VariableFilterName(any_=["variable1"]) ).model_dump(mode="json") ), ) assert res.status_code == 200 assert res.json() == 1 # like filter res = await client.post( "/variables/count", json=dict( variables=VariableFilter( name=VariableFilterName(like_="variable1%") ).model_dump(mode="json") ), ) assert res.status_code == 200 assert res.json() == 2 async def test_filter_id( self, client: AsyncClient, variables, ): variable = variables[0] # any filter res = await client.post( "/variables/count", json=dict( variables=VariableFilter( id=VariableFilterId(any_=[variable.id]) ).model_dump(mode="json") ), ) assert res.json() == 1 async def test_filter_tags( self, client: AsyncClient, variables, ): # any filter res = await client.post( "/variables/count", json=dict( variables=VariableFilter( tags=VariableFilterTags(all_=["tag1"]) ).model_dump(mode="json") ), ) assert res.status_code == 200 assert res.json() == 2
TestCountVariables
python
walkccc__LeetCode
solutions/2069. Walking Robot Simulation II/2069.py
{ "start": 0, "end": 683 }
class ____: def __init__(self, width: int, height: int): self.isOrigin = True self.i = 0 self.pos = ([((0, 0), 'South')] + [((i, 0), 'East') for i in range(1, width)] + [((width - 1, j), 'North') for j in range(1, height)] + [((i, height - 1), 'West') for i in range(width - 2, -1, -1)] + [((0, j), 'South') for j in range(height - 2, 0, -1)]) def step(self, num: int) -> None: self.isOrigin = False self.i = (self.i + num) % len(self.pos) def getPos(self) -> list[int]: return self.pos[self.i][0] def getDir(self) -> str: return 'East' if self.isOrigin else self.pos[self.i][1]
Robot
python
pyparsing__pyparsing
tests/test_simple_unit.py
{ "start": 11298, "end": 13175 }
class ____(PyparsingExpressionTestCase): EQ = pp.Suppress("=") tests = [ PyparsingTest( desc="Define multiple results names in groups", expr=pp.Group( pp.Word(pp.alphas)("key") + EQ + pp.pyparsing_common.number("value") )[...], text="range=5280 long=-138.52 lat=46.91", expected_list=[["range", 5280], ["long", -138.52], ["lat", 46.91]], ), PyparsingTest( desc=( "Define multiple results names in groups" " - use Dict to define results names using parsed keys" ), expr=pp.Dict( pp.Group(pp.Word(pp.alphas) + EQ + pp.pyparsing_common.number)[...] ), text="range=5280 long=-138.52 lat=46.91", expected_list=[["range", 5280], ["long", -138.52], ["lat", 46.91]], expected_dict={"lat": 46.91, "long": -138.52, "range": 5280}, ), PyparsingTest( desc="Define multiple value types", expr=pp.Dict( pp.Group( pp.Word(pp.alphas) + EQ + ( pp.pyparsing_common.number | pp.one_of("True False") | pp.QuotedString("'") ) )[...] ), text="long=-122.47 lat=37.82 public=True name='Golden Gate Bridge'", expected_list=[ ["long", -122.47], ["lat", 37.82], ["public", "True"], ["name", "Golden Gate Bridge"], ], expected_dict={ "long": -122.47, "lat": 37.82, "public": "True", "name": "Golden Gate Bridge", }, ), ]
TestGroups
python
dagster-io__dagster
examples/docs_snippets/docs_snippets/guides/components/shell-script-component/pythonic/2-shell-command-empty-no-model-init.py
{ "start": 23, "end": 428 }
class ____(dg.Component, dg.Resolvable): """COMPONENT SUMMARY HERE. COMPONENT DESCRIPTION HERE. """ def __init__( self, # added arguments here will define yaml schema via Resolvable ): pass def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions: # Add definition construction logic here. return dg.Definitions()
ShellCommand
python
huggingface__transformers
tests/models/dac/test_modeling_dac.py
{ "start": 36992, "end": 44211 }
class ____(unittest.TestCase): @parameterized.expand([(model_name,) for model_name in EXPECTED_PREPROC_SHAPE.keys()]) @require_deterministic_for_xpu def test_integration(self, model_name): # load model and processor model_id = f"descript/{model_name}" model = DacModel.from_pretrained(model_id, force_download=True).to(torch_device).eval() processor = AutoProcessor.from_pretrained(model_id) # load audio sample librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_sample = librispeech_dummy[0]["audio"]["array"] # check on processor audio shape inputs = processor( raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt", ).to(torch_device) torch.equal(torch.tensor(inputs["input_values"].shape), EXPECTED_PREPROC_SHAPE[model_name]) with torch.no_grad(): # compare encoder loss encoder_outputs = model.encode(inputs["input_values"]) torch.testing.assert_close( encoder_outputs[0].squeeze().item(), EXPECTED_ENC_LOSS[model_name], rtol=1e-3, atol=1e-3 ) # compare quantizer outputs quantizer_outputs = model.quantizer(encoder_outputs[1]) torch.testing.assert_close( quantizer_outputs[1][..., : EXPECTED_QUANT_CODES[model_name].shape[-1]], EXPECTED_QUANT_CODES[model_name], rtol=1e-6, atol=1e-6, ) torch.testing.assert_close( quantizer_outputs[4].squeeze().item(), EXPECTED_QUANT_CODEBOOK_LOSS[model_name], rtol=1e-4, atol=1e-4 ) # compare decoder outputs decoded_outputs = model.decode(encoder_outputs[1]) torch.testing.assert_close( decoded_outputs["audio_values"][..., : EXPECTED_DEC_OUTPUTS[model_name].shape[-1]], EXPECTED_DEC_OUTPUTS[model_name], rtol=1e-3, atol=1e-3, ) # compare codec error / lossiness codec_err = compute_rmse(decoded_outputs["audio_values"], inputs["input_values"]) torch.testing.assert_close(codec_err, EXPECTED_CODEC_ERROR[model_name], rtol=1e-5, atol=1e-5) # make sure forward and decode gives same result enc_dec = model(inputs["input_values"])[1] torch.testing.assert_close(decoded_outputs["audio_values"], enc_dec, rtol=1e-6, atol=1e-6) @parameterized.expand([(model_name,) for model_name in EXPECTED_PREPROC_SHAPE_BATCH.keys()]) def test_integration_batch(self, model_name): # load model and processor model_id = f"descript/{model_name}" model = DacModel.from_pretrained(model_id).to(torch_device) processor = AutoProcessor.from_pretrained(model_id) # load audio samples librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_samples = [np.array([audio_sample["array"]])[0] for audio_sample in librispeech_dummy[-2:]["audio"]] # check on processor audio shape inputs = processor( raw_audio=audio_samples, sampling_rate=processor.sampling_rate, truncation=False, return_tensors="pt", ).to(torch_device) torch.equal(torch.tensor(inputs["input_values"].shape), EXPECTED_PREPROC_SHAPE_BATCH[model_name]) with torch.no_grad(): # compare encoder loss encoder_outputs = model.encode(inputs["input_values"]) torch.testing.assert_close( encoder_outputs[0].mean().item(), EXPECTED_ENC_LOSS_BATCH[model_name], rtol=1e-3, atol=1e-3 ) # compare quantizer outputs quantizer_outputs = model.quantizer(encoder_outputs[1]) torch.testing.assert_close( quantizer_outputs[1][..., : EXPECTED_QUANT_CODES_BATCH[model_name].shape[-1]], EXPECTED_QUANT_CODES_BATCH[model_name], rtol=1e-6, atol=1e-6, ) torch.testing.assert_close( quantizer_outputs[4].mean().item(), EXPECTED_QUANT_CODEBOOK_LOSS_BATCH[model_name], rtol=1e-4, atol=1e-4, ) # compare decoder outputs decoded_outputs = model.decode(encoder_outputs[1]) torch.testing.assert_close( EXPECTED_DEC_OUTPUTS_BATCH[model_name], decoded_outputs["audio_values"][..., : EXPECTED_DEC_OUTPUTS_BATCH[model_name].shape[-1]], rtol=1e-3, atol=1e-3, ) # compare codec error / lossiness codec_err = compute_rmse(decoded_outputs["audio_values"], inputs["input_values"]) torch.testing.assert_close(codec_err, EXPECTED_CODEC_ERROR_BATCH[model_name], rtol=1e-6, atol=1e-6) # make sure forward and decode gives same result enc_dec = model(inputs["input_values"])[1] torch.testing.assert_close(decoded_outputs["audio_values"], enc_dec, rtol=1e-6, atol=1e-6) @parameterized.expand([(model_name,) for model_name in EXPECTED_PREPROC_SHAPE_BATCH.keys()]) def test_quantizer_from_latents_integration(self, model_name): model_id = f"descript/{model_name}" model = DacModel.from_pretrained(model_id).to(torch_device) processor = AutoProcessor.from_pretrained(model_id) # load audio sample librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_sample = librispeech_dummy[0]["audio"]["array"] # check on processor audio shape inputs = processor( raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt", ).to(torch_device) input_values = inputs["input_values"] with torch.no_grad(): encoder_outputs = model.encode(input_values) latents = encoder_outputs.projected_latents # reconstruction using from_latents quantizer_representation, quantized_latents = model.quantizer.from_latents(latents=latents) reconstructed = model.decode(quantized_representation=quantizer_representation).audio_values # forward pass original_reconstructed = model(input_values).audio_values # ensure forward and decode are the same self.assertTrue( torch.allclose(reconstructed, original_reconstructed, atol=1e-6), msg="Reconstructed codes from latents should match original quantized codes", )
DacIntegrationTest
python
walkccc__LeetCode
solutions/2533. Number of Good Binary Strings/2533.py
{ "start": 0, "end": 750 }
class ____: def goodBinaryStrings( self, minLength: int, maxLength: int, oneGroup: int, zeroGroup: int, ) -> int: MOD = 1_000_000_007 # dp[i] := the number of good binary strings with length i dp = [1] + [0] * maxLength for i in range(maxLength + 1): # There are good binary strings with length i, so we can append # consecutive 0s or 1s after it. if dp[i] > 0: appendZeros = i + zeroGroup if appendZeros <= maxLength: dp[appendZeros] += dp[i] dp[appendZeros] %= MOD appendOnes = i + oneGroup if appendOnes <= maxLength: dp[appendOnes] += dp[i] dp[appendOnes] %= MOD return sum(dp[minLength:]) % MOD
Solution
python
zarr-developers__zarr-python
src/zarr/codecs/numcodecs/_codecs.py
{ "start": 9356, "end": 9793 }
class ____(_NumcodecsArrayArrayCodec, codec_name="quantize"): def __init__(self, **codec_config: JSON) -> None: super().__init__(**codec_config) def evolve_from_array_spec(self, array_spec: ArraySpec) -> Quantize: if self.codec_config.get("dtype") is None: dtype = array_spec.dtype.to_native_dtype() return Quantize(**{**self.codec_config, "dtype": str(dtype)}) return self
Quantize
python
django__django
django/db/transaction.py
{ "start": 3959, "end": 12506 }
class ____(ContextDecorator): """ Guarantee the atomic execution of a given block. An instance can be used either as a decorator or as a context manager. When it's used as a decorator, __call__ wraps the execution of the decorated function in the instance itself, used as a context manager. When it's used as a context manager, __enter__ creates a transaction or a savepoint, depending on whether a transaction is already in progress, and __exit__ commits the transaction or releases the savepoint on normal exit, and rolls back the transaction or to the savepoint on exceptions. It's possible to disable the creation of savepoints if the goal is to ensure that some code runs within a transaction without creating overhead. A stack of savepoint identifiers is maintained as an attribute of the connection. None denotes the absence of a savepoint. This allows reentrancy even if the same AtomicWrapper is reused. For example, it's possible to define `oa = atomic('other')` and use `@oa` or `with oa:` multiple times. Since database connections are thread-local, this is thread-safe. An atomic block can be tagged as durable. In this case, a RuntimeError is raised if it's nested within another atomic block. This guarantees that database changes in a durable block are committed to the database when the block exits without error. This is a private API. """ def __init__(self, using, savepoint, durable): self.using = using self.savepoint = savepoint self.durable = durable self._from_testcase = False def __enter__(self): connection = get_connection(self.using) if ( self.durable and connection.atomic_blocks and not connection.atomic_blocks[-1]._from_testcase ): raise RuntimeError( "A durable atomic block cannot be nested within another " "atomic block." ) if not connection.in_atomic_block: # Reset state when entering an outermost atomic block. connection.commit_on_exit = True connection.needs_rollback = False if not connection.get_autocommit(): # Pretend we're already in an atomic block to bypass the code # that disables autocommit to enter a transaction, and make a # note to deal with this case in __exit__. connection.in_atomic_block = True connection.commit_on_exit = False if connection.in_atomic_block: # We're already in a transaction; create a savepoint, unless we # were told not to or we're already waiting for a rollback. The # second condition avoids creating useless savepoints and prevents # overwriting needs_rollback until the rollback is performed. if self.savepoint and not connection.needs_rollback: sid = connection.savepoint() connection.savepoint_ids.append(sid) else: connection.savepoint_ids.append(None) else: connection.set_autocommit( False, force_begin_transaction_with_broken_autocommit=True ) connection.in_atomic_block = True if connection.in_atomic_block: connection.atomic_blocks.append(self) def __exit__(self, exc_type, exc_value, traceback): connection = get_connection(self.using) if connection.in_atomic_block: connection.atomic_blocks.pop() if connection.savepoint_ids: sid = connection.savepoint_ids.pop() else: # Prematurely unset this flag to allow using commit or rollback. connection.in_atomic_block = False try: if connection.closed_in_transaction: # The database will perform a rollback by itself. # Wait until we exit the outermost block. pass elif exc_type is None and not connection.needs_rollback: if connection.in_atomic_block: # Release savepoint if there is one if sid is not None: try: connection.savepoint_commit(sid) except DatabaseError: try: connection.savepoint_rollback(sid) # The savepoint won't be reused. Release it to # minimize overhead for the database server. connection.savepoint_commit(sid) except Error: # If rolling back to a savepoint fails, mark # for rollback at a higher level and avoid # shadowing the original exception. connection.needs_rollback = True raise else: # Commit transaction try: connection.commit() except DatabaseError: try: connection.rollback() except Error: # An error during rollback means that something # went wrong with the connection. Drop it. connection.close() raise else: # This flag will be set to True again if there isn't a # savepoint allowing to perform the rollback at this level. connection.needs_rollback = False if connection.in_atomic_block: # Roll back to savepoint if there is one, mark for rollback # otherwise. if sid is None: connection.needs_rollback = True else: try: connection.savepoint_rollback(sid) # The savepoint won't be reused. Release it to # minimize overhead for the database server. connection.savepoint_commit(sid) except Error: # If rolling back to a savepoint fails, mark for # rollback at a higher level and avoid shadowing # the original exception. connection.needs_rollback = True else: # Roll back transaction try: connection.rollback() except Error: # An error during rollback means that something # went wrong with the connection. Drop it. connection.close() finally: # Outermost block exit when autocommit was enabled. if not connection.in_atomic_block: if connection.closed_in_transaction: connection.connection = None else: connection.set_autocommit(True) # Outermost block exit when autocommit was disabled. elif not connection.savepoint_ids and not connection.commit_on_exit: if connection.closed_in_transaction: connection.connection = None else: connection.in_atomic_block = False def atomic(using=None, savepoint=True, durable=False): # Bare decorator: @atomic -- although the first argument is called # `using`, it's actually the function being decorated. if callable(using): return Atomic(DEFAULT_DB_ALIAS, savepoint, durable)(using) # Decorator: @atomic(...) or context manager: with atomic(...): ... else: return Atomic(using, savepoint, durable) def _non_atomic_requests(view, using): try: view._non_atomic_requests.add(using) except AttributeError: view._non_atomic_requests = {using} return view def non_atomic_requests(using=None): if callable(using): return _non_atomic_requests(using, DEFAULT_DB_ALIAS) else: if using is None: using = DEFAULT_DB_ALIAS return lambda view: _non_atomic_requests(view, using)
Atomic
python
matplotlib__matplotlib
lib/matplotlib/bezier.py
{ "start": 418, "end": 5251 }
class ____(ValueError): pass # some functions def get_intersection(cx1, cy1, cos_t1, sin_t1, cx2, cy2, cos_t2, sin_t2): """ Return the intersection between the line through (*cx1*, *cy1*) at angle *t1* and the line through (*cx2*, *cy2*) at angle *t2*. """ # line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0. # line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1 line1_rhs = sin_t1 * cx1 - cos_t1 * cy1 line2_rhs = sin_t2 * cx2 - cos_t2 * cy2 # rhs matrix a, b = sin_t1, -cos_t1 c, d = sin_t2, -cos_t2 ad_bc = a * d - b * c if abs(ad_bc) < 1e-12: raise ValueError("Given lines do not intersect. Please verify that " "the angles are not equal or differ by 180 degrees.") # rhs_inverse a_, b_ = d, -b c_, d_ = -c, a a_, b_, c_, d_ = (k / ad_bc for k in [a_, b_, c_, d_]) x = a_ * line1_rhs + b_ * line2_rhs y = c_ * line1_rhs + d_ * line2_rhs return x, y def get_normal_points(cx, cy, cos_t, sin_t, length): """ For a line passing through (*cx*, *cy*) and having an angle *t*, return locations of the two points located along its perpendicular line at the distance of *length*. """ if length == 0.: return cx, cy, cx, cy cos_t1, sin_t1 = sin_t, -cos_t cos_t2, sin_t2 = -sin_t, cos_t x1, y1 = length * cos_t1 + cx, length * sin_t1 + cy x2, y2 = length * cos_t2 + cx, length * sin_t2 + cy return x1, y1, x2, y2 # BEZIER routines # subdividing bezier curve # http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html def _de_casteljau1(beta, t): next_beta = beta[:-1] * (1 - t) + beta[1:] * t return next_beta def split_de_casteljau(beta, t): """ Split a Bézier segment defined by its control points *beta* into two separate segments divided at *t* and return their control points. """ beta = np.asarray(beta) beta_list = [beta] while True: beta = _de_casteljau1(beta, t) beta_list.append(beta) if len(beta) == 1: break left_beta = [beta[0] for beta in beta_list] right_beta = [beta[-1] for beta in reversed(beta_list)] return left_beta, right_beta def find_bezier_t_intersecting_with_closedpath( bezier_point_at_t, inside_closedpath, t0=0., t1=1., tolerance=0.01): """ Find the intersection of the Bézier curve with a closed path. The intersection point *t* is approximated by two parameters *t0*, *t1* such that *t0* <= *t* <= *t1*. Search starts from *t0* and *t1* and uses a simple bisecting algorithm therefore one of the end points must be inside the path while the other doesn't. The search stops when the distance of the points parametrized by *t0* and *t1* gets smaller than the given *tolerance*. Parameters ---------- bezier_point_at_t : callable A function returning x, y coordinates of the Bézier at parameter *t*. It must have the signature:: bezier_point_at_t(t: float) -> tuple[float, float] inside_closedpath : callable A function returning True if a given point (x, y) is inside the closed path. It must have the signature:: inside_closedpath(point: tuple[float, float]) -> bool t0, t1 : float Start parameters for the search. tolerance : float Maximal allowed distance between the final points. Returns ------- t0, t1 : float The Bézier path parameters. """ start = bezier_point_at_t(t0) end = bezier_point_at_t(t1) start_inside = inside_closedpath(start) end_inside = inside_closedpath(end) if start_inside == end_inside and start != end: raise NonIntersectingPathException( "Both points are on the same side of the closed path") while True: # return if the distance is smaller than the tolerance if np.hypot(start[0] - end[0], start[1] - end[1]) < tolerance: return t0, t1 # calculate the middle point middle_t = 0.5 * (t0 + t1) middle = bezier_point_at_t(middle_t) middle_inside = inside_closedpath(middle) if start_inside ^ middle_inside: t1 = middle_t if end == middle: # Edge case where infinite loop is possible # Caused by large numbers relative to tolerance return t0, t1 end = middle else: t0 = middle_t if start == middle: # Edge case where infinite loop is possible # Caused by large numbers relative to tolerance return t0, t1 start = middle start_inside = middle_inside
NonIntersectingPathException
python
streamlit__streamlit
lib/tests/streamlit/elements/doc_string_test.py
{ "start": 1296, "end": 1730 }
class ____: def __init__(self, available, ExceptionType=AttributeError): self.available = available self.ExceptionType = ExceptionType def __getattribute__(self, name): if name == "say_hello" and not self.available: raise self.ExceptionType(f"{name} is not accessible when x is even") return object.__getattribute__(self, name) def say_hello(self): pass
ConditionalHello
python
kamyu104__LeetCode-Solutions
Python/valid-palindrome-iii.py
{ "start": 31, "end": 637 }
class ____(object): def isValidPalindrome(self, s, k): """ :type s: str :type k: int :rtype: bool """ if s == s[::-1]: # optional, to optimize special case return True dp = [[1] * len(s) for _ in xrange(2)] for i in reversed(xrange(len(s))): for j in xrange(i+1, len(s)): if s[i] == s[j]: dp[i%2][j] = 2 + dp[(i+1)%2][j-1] if i+1 <= j-1 else 2 else: dp[i%2][j] = max(dp[(i+1)%2][j], dp[i%2][j-1]) return len(s) <= k + dp[0][-1]
Solution
python
PrefectHQ__prefect
tests/server/models/test_flows.py
{ "start": 84, "end": 894 }
class ____: async def test_create_flow_succeeds(self, session): flow = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name="my-flow") ) assert flow.name == "my-flow" assert flow.id async def test_create_flow_does_not_upsert(self, session): # create a flow flow_1 = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name="my-flow", tags=["green"]) ) # try to create the same flow with tags flow_2 = await models.flows.create_flow( session=session, flow=schemas.core.Flow(name="my-flow", tags=["blue"]) ) assert flow_1.tags == flow_2.tags assert flow_1.name == flow_2.name assert flow_1.id == flow_2.id
TestCreateFlow
python
numpy__numpy
numpy/distutils/system_info.py
{ "start": 54930, "end": 55052 }
class ____(atlas_info): _lib_names = ['satlas'] _lib_atlas = _lib_names _lib_lapack = _lib_names
atlas_3_10_info
python
huggingface__transformers
src/transformers/models/phi3/modular_phi3.py
{ "start": 10807, "end": 11049 }
class ____(MistralForTokenClassification): pass __all__ = [ "Phi3PreTrainedModel", "Phi3Model", # noqa: F822 "Phi3ForCausalLM", "Phi3ForSequenceClassification", "Phi3ForTokenClassification", ]
Phi3ForTokenClassification
python
huggingface__transformers
src/transformers/models/xmod/modeling_xmod.py
{ "start": 44111, "end": 44980 }
class ____(nn.Module): """Roberta Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x @auto_docstring( custom_intro=""" X-MOD Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ )
XmodLMHead
python
numba__llvmlite
llvmlite/binding/executionengine.py
{ "start": 7717, "end": 9690 }
class ____(ffi.ObjectRef): """ Internal: an ObjectCache instance for use within an ExecutionEngine. """ def __init__(self, obj): ptr = ffi.lib.LLVMPY_CreateObjectCache(_notify_c_hook, _getbuffer_c_hook, obj) ffi.ObjectRef.__init__(self, ptr) def _dispose(self): self._capi.LLVMPY_DisposeObjectCache(self) # ============================================================================ # FFI ffi.lib.LLVMPY_CreateMCJITCompiler.argtypes = [ ffi.LLVMModuleRef, ffi.LLVMTargetMachineRef, c_bool, POINTER(c_char_p), ] ffi.lib.LLVMPY_CreateMCJITCompiler.restype = ffi.LLVMExecutionEngineRef ffi.lib.LLVMPY_RemoveModule.argtypes = [ ffi.LLVMExecutionEngineRef, ffi.LLVMModuleRef, POINTER(c_char_p), ] ffi.lib.LLVMPY_RemoveModule.restype = c_bool ffi.lib.LLVMPY_AddModule.argtypes = [ ffi.LLVMExecutionEngineRef, ffi.LLVMModuleRef ] ffi.lib.LLVMPY_AddGlobalMapping.argtypes = [ffi.LLVMExecutionEngineRef, ffi.LLVMValueRef, c_void_p] ffi.lib.LLVMPY_FinalizeObject.argtypes = [ffi.LLVMExecutionEngineRef] ffi.lib.LLVMPY_GetExecutionEngineTargetData.argtypes = [ ffi.LLVMExecutionEngineRef ] ffi.lib.LLVMPY_GetExecutionEngineTargetData.restype = ffi.LLVMTargetDataRef ffi.lib.LLVMPY_TryAllocateExecutableMemory.argtypes = [] ffi.lib.LLVMPY_TryAllocateExecutableMemory.restype = c_int ffi.lib.LLVMPY_GetFunctionAddress.argtypes = [ ffi.LLVMExecutionEngineRef, c_char_p ] ffi.lib.LLVMPY_GetFunctionAddress.restype = c_uint64 ffi.lib.LLVMPY_GetGlobalValueAddress.argtypes = [ ffi.LLVMExecutionEngineRef, c_char_p ] ffi.lib.LLVMPY_GetGlobalValueAddress.restype = c_uint64 ffi.lib.LLVMPY_MCJITAddObjectFile.argtypes = [ ffi.LLVMExecutionEngineRef, ffi.LLVMObjectFileRef ]
_ObjectCacheRef
python
davidhalter__jedi
jedi/inference/filters.py
{ "start": 4428, "end": 6019 }
class ____(_AbstractUsedNamesFilter): def __init__(self, parent_context, node_context=None, until_position=None, origin_scope=None): """ node_context is an option to specify a second value for use cases like the class mro where the parent class of a new name would be the value, but for some type inference it's important to have a local value of the other classes. """ super().__init__(parent_context, node_context) self._origin_scope = origin_scope self._until_position = until_position def _filter(self, names): names = super()._filter(names) names = [n for n in names if self._is_name_reachable(n)] return list(self._check_flows(names)) def _is_name_reachable(self, name): parent = name.parent if parent.type == 'trailer': return False base_node = parent if parent.type in ('classdef', 'funcdef') else name return get_cached_parent_scope(self._parso_cache_node, base_node) == self._parser_scope def _check_flows(self, names): for name in sorted(names, key=lambda name: name.start_pos, reverse=True): check = flow_analysis.reachability_check( context=self._node_context, value_scope=self._parser_scope, node=name, origin_scope=self._origin_scope ) if check is not flow_analysis.UNREACHABLE: yield name if check is flow_analysis.REACHABLE: break
ParserTreeFilter
python
python-poetry__poetry
src/poetry/utils/cache.py
{ "start": 1246, "end": 1635 }
class ____(Generic[T]): """ Stores data and metadata for cache items. """ data: T expires: int | None = None @property def expired(self) -> bool: """ Return true if the cache item has exceeded its expiration period. """ return self.expires is not None and time.time() >= self.expires @dataclasses.dataclass(frozen=True)
CacheItem
python
huggingface__transformers
src/transformers/models/sam/processing_sam.py
{ "start": 1070, "end": 1381 }
class ____(ImagesKwargs, total=False): segmentation_maps: Optional[ImageInput] input_points: Optional[NestedList] input_labels: Optional[NestedList] input_boxes: Optional[NestedList] point_pad_value: Optional[int] mask_size: dict[str, int] mask_pad_size: dict[str, int]
SamImagesKwargs
python
astropy__astropy
astropy/convolution/kernels.py
{ "start": 10019, "end": 11936 }
class ____(Kernel2D): """ 2D Tophat filter kernel. The Tophat filter is an isotropic smoothing filter. It can produce artifacts when applied repeatedly on the same data. The generated kernel is normalized so that it integrates to 1. Parameters ---------- radius : int Radius of the filter kernel. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Tophat2DKernel tophat_2D_kernel = Tophat2DKernel(40) plt.imshow(tophat_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ def __init__(self, radius, **kwargs): self._model = models.Disk2D(1.0 / (np.pi * radius**2), 0, 0, radius) self._default_size = _round_up_to_odd_integer(2 * radius) super().__init__(**kwargs) self.normalize()
Tophat2DKernel
python
streamlit__streamlit
lib/streamlit/runtime/forward_msg_queue.py
{ "start": 808, "end": 10620 }
class ____: """Accumulates a session's outgoing ForwardMsgs. Each AppSession adds messages to its queue, and the Server periodically flushes all session queues and delivers their messages to the appropriate clients. ForwardMsgQueue is not thread-safe - a queue should only be used from a single thread. """ _before_enqueue_msg: Callable[[ForwardMsg], None] | None = None @staticmethod def on_before_enqueue_msg( before_enqueue_msg: Callable[[ForwardMsg], None] | None, ) -> None: """Set a callback to be called before a message is enqueued. Used in static streamlit app generation. """ ForwardMsgQueue._before_enqueue_msg = before_enqueue_msg def __init__(self) -> None: self._queue: list[ForwardMsg] = [] # A mapping of (delta_path -> _queue.indexof(msg)) for each # Delta message in the queue. We use this for coalescing # redundant outgoing Deltas (where a newer Delta supersedes # an older Delta, with the same delta_path, that's still in the # queue). self._delta_index_map: dict[tuple[int, ...], int] = {} def get_debug(self) -> dict[str, Any]: from google.protobuf.json_format import MessageToDict return { "queue": [MessageToDict(m) for m in self._queue], "ids": list(self._delta_index_map.keys()), } def is_empty(self) -> bool: return len(self._queue) == 0 def enqueue(self, msg: ForwardMsg) -> None: """Add message into queue, possibly composing it with another message.""" if ForwardMsgQueue._before_enqueue_msg: ForwardMsgQueue._before_enqueue_msg(msg) if not _is_composable_message(msg): self._queue.append(msg) return # If there's a Delta message with the same delta_path already in # the queue - meaning that it refers to the same location in # the app - we attempt to combine this new Delta into the old # one. This is an optimization that prevents redundant Deltas # from being sent to the frontend. # One common case where this happens is with `st.write` since # it uses a trick with `st.empty` to handle lists of args. # Note: its not guaranteed that the optimization is always applied # since the queue can be flushed to the browser at any time. # For example: # queue 1: # > empty [0, 0] <- skipped # > markdown [0, 0] # > empty [1, 0] <- send to frontend # # queue 2: # > markdown [1, 0] # > ... delta_key = tuple(msg.metadata.delta_path) if delta_key in self._delta_index_map: index = self._delta_index_map[delta_key] old_msg = self._queue[index] composed_msg = _maybe_compose_delta_msgs(old_msg, msg) if composed_msg is not None: self._queue[index] = composed_msg return # No composition occurred. Append this message to the queue, and # store its index for potential future composition. self._delta_index_map[delta_key] = len(self._queue) self._queue.append(msg) def clear( self, retain_lifecycle_msgs: bool = False, fragment_ids_this_run: list[str] | None = None, ) -> None: """Clear the queue, potentially retaining lifecycle messages. The retain_lifecycle_msgs argument exists because in some cases (in particular when a currently running script is interrupted by a new BackMsg), we don't want to remove certain messages from the queue as doing so may cause the client to not hear about important script lifecycle events (such as the script being stopped early in order to be rerun). If fragment_ids_this_run is provided, delta messages not belonging to any fragment or belonging to a fragment not in fragment_ids_this_run will be preserved to prevent clearing messages unrelated to the running fragments. """ if not retain_lifecycle_msgs: self._queue = [] else: self._queue = [ _update_script_finished_message(msg, fragment_ids_this_run is not None) for msg in self._queue if msg.WhichOneof("type") in { "new_session", "script_finished", "session_status_changed", "parent_message", "page_info_changed", } or ( # preserve all messages if this is a fragment rerun and... fragment_ids_this_run is not None and ( # the message is not a delta message # (not associated with a fragment) or... msg.delta is None or ( # it is a delta but not associated with any of the passed # fragments msg.delta is not None and ( msg.delta.fragment_id is None or msg.delta.fragment_id not in fragment_ids_this_run ) ) ) ) ] self._delta_index_map = {} def flush(self) -> list[ForwardMsg]: """Clear the queue and return a list of the messages it contained before being cleared. """ queue = self._queue self.clear() return queue def __len__(self) -> int: return len(self._queue) def _is_composable_message(msg: ForwardMsg) -> bool: """True if the ForwardMsg is potentially composable with other ForwardMsgs.""" if msg.HasField("ref_hash"): # reference messages (cached in frontend) are always composable. # Only new_element deltas can be reference messages. return True if not msg.HasField("delta"): # Non-delta messages are never composable. return False # We never compose add_rows messages in Python, because the add_rows # operation can raise errors, and we don't have a good way of handling # those errors in the message queue. delta_type = msg.delta.WhichOneof("type") return delta_type not in {"add_rows", "arrow_add_rows"} def _maybe_compose_delta_msgs( old_msg: ForwardMsg, new_msg: ForwardMsg ) -> ForwardMsg | None: """Optimization logic that composes new_msg onto old_msg if possible. If the combination takes place, the function returns a new ForwardMsg that should replace old_msg in the queue. This basically means that the old_msg is not send to the browser since its considered unnecessary. If the new_msg is incompatible with old_msg, the function returns None. In this case, the new_msg should just be appended to the queue as normal. """ if old_msg.HasField("delta") and old_msg.delta.WhichOneof("type") == "add_block": # We never replace add_block deltas, because blocks can have # other dependent deltas later in the queue. For example: # # > placeholder = st.empty() # > placeholder.columns(1) # > placeholder.empty() # # The call to "placeholder.columns(1)" creates two blocks, a parent # container with delta_path (0, 0), and a column child with # delta_path (0, 0, 0). If the final "placeholder.empty()" Delta # is composed with the parent container Delta, the frontend will # throw an error when it tries to add that column child to what is # now just an element, and not a block. return None if new_msg.HasField("ref_hash"): # ref_hash messages are always composable. # Only new_element deltas can be reference messages. return new_msg new_delta_type = new_msg.delta.WhichOneof("type") if new_delta_type in {"new_element", "add_block"}: return new_msg return None def _update_script_finished_message( msg: ForwardMsg, is_fragment_run: bool ) -> ForwardMsg: """ When we are here, the message queue is cleared from non-lifecycle messages before they were flushed to the browser. If there were no non-lifecycle messages in the queue, changing the type here should not matter for the frontend anyways, so we optimistically change the `script_finished` message to `FINISHED_EARLY_FOR_RERUN`. This indicates to the frontend that the previous run was interrupted by a new script start. Otherwise, a `FINISHED_SUCCESSFULLY` message might trigger a reset of widget states on the frontend. """ if msg.WhichOneof("type") == "script_finished" and ( # If this is not a fragment run (= full app run), its okay to change the # script_finished type to FINISHED_EARLY_FOR_RERUN because another full app run # is about to start. # If this is a fragment run, it is allowed to change the state of # all script_finished states except for FINISHED_SUCCESSFULLY, which we use to # indicate that a full app run has finished successfully (in other words, a # fragment should not modify the finished status of a full app run, because # the fragment finished state is different and the frontend might not trigger # cleanups etc. correctly). is_fragment_run is False or msg.script_finished != ForwardMsg.ScriptFinishedStatus.FINISHED_SUCCESSFULLY ): msg.script_finished = ForwardMsg.ScriptFinishedStatus.FINISHED_EARLY_FOR_RERUN return msg
ForwardMsgQueue
python
run-llama__llama_index
llama-index-integrations/tools/llama-index-tools-jira-issue/tests/test_tools_jira_issue.py
{ "start": 378, "end": 11674 }
class ____: """Test suite for JiraIssueToolSpec.""" @pytest.fixture def mock_jira(self): """Create a mock JIRA client.""" with patch("llama_index.tools.jira_issue.base.JIRA") as mock: yield mock @pytest.fixture def jira_tool_spec(self, mock_jira): """Create a JiraIssueToolSpec instance with mocked JIRA client.""" mock_jira.return_value = Mock() return JiraIssueToolSpec( email="test@example.com", api_key="test-api-key", server_url="https://test.atlassian.net", ) def test_init_with_missing_credentials(self): """Test that initialization fails with missing credentials.""" with pytest.raises(Exception, match="Please provide Jira credentials"): JiraIssueToolSpec(email="", api_key="", server_url="") def test_search_issues_success(self, jira_tool_spec): """Test successful issue search.""" # Mock issue objects mock_issue1 = Mock() mock_issue1.key = "PROJ-123" mock_issue1.fields.summary = "Test Issue 1" mock_issue1.fields.status.name = "In Progress" mock_issue1.fields.assignee = Mock(displayName="John Doe") mock_issue2 = Mock() mock_issue2.key = "PROJ-124" mock_issue2.fields.summary = "Test Issue 2" mock_issue2.fields.status.name = "To Do" mock_issue2.fields.assignee = None jira_tool_spec.jira.search_issues.return_value = [mock_issue1, mock_issue2] result = jira_tool_spec.search_issues("project = PROJ") assert result["error"] is False assert result["message"] == "Issues found" assert len(result["issues"]) == 2 assert result["issues"][0]["key"] == "PROJ-123" assert result["issues"][0]["assignee"] == "John Doe" assert result["issues"][1]["assignee"] is None def test_search_issues_no_results(self, jira_tool_spec): """Test issue search with no results.""" jira_tool_spec.jira.search_issues.return_value = [] result = jira_tool_spec.search_issues("project = NONEXISTENT") assert result["error"] is True assert result["message"] == "No issues found." def test_search_issues_failure(self, jira_tool_spec): """Test failed issue search.""" jira_tool_spec.jira.search_issues.side_effect = Exception("Invalid JQL") result = jira_tool_spec.search_issues("invalid jql") assert result["error"] is True assert "Failed to search issues: Invalid JQL" in result["message"] def test_create_issue_success(self, jira_tool_spec): """Test successful issue creation.""" mock_issue = Mock(key="KAN-123") jira_tool_spec.jira.create_issue.return_value = mock_issue result = jira_tool_spec.create_issue( project_key="KAN", summary="New Test Issue", description="Test description", issue_type="Task", ) assert result["error"] is False assert result["message"] == "Issue KAN-123 created successfully." assert result["issue_key"] == "KAN-123" # Verify the create_issue was called with correct parameters jira_tool_spec.jira.create_issue.assert_called_once_with( project="KAN", summary="New Test Issue", description="Test description", issuetype={"name": "Task"}, ) def test_create_issue_failure(self, jira_tool_spec): """Test failed issue creation.""" jira_tool_spec.jira.create_issue.side_effect = Exception("Project not found") result = jira_tool_spec.create_issue(project_key="INVALID") assert result["error"] is True assert "Failed to create new issue: Project not found" in result["message"] def test_add_comment_to_issue_success(self, jira_tool_spec): """Test successful comment addition.""" mock_issue = Mock() jira_tool_spec.jira.issue.return_value = mock_issue result = jira_tool_spec.add_comment_to_issue("KAN-123", "Test comment") assert result["error"] is False assert result["message"] == "Comment added to issue KAN-123." jira_tool_spec.jira.add_comment.assert_called_once_with( mock_issue, "Test comment" ) def test_add_comment_to_issue_failure(self, jira_tool_spec): """Test failed comment addition.""" jira_tool_spec.jira.issue.side_effect = Exception("Issue not found") result = jira_tool_spec.add_comment_to_issue("INVALID-123", "Test comment") assert result["error"] is True assert "Failed to add comment to issue INVALID-123" in result["message"] def test_update_issue_summary_success(self, jira_tool_spec): """Test successful summary update.""" mock_issue = Mock() jira_tool_spec.jira.issue.return_value = mock_issue result = jira_tool_spec.update_issue_summary( "KAN-123", "Updated Summary", notify=True ) assert result["error"] is False assert result["message"] == "Issue KAN-123 summary updated." mock_issue.update.assert_called_once_with( summary="Updated Summary", notify=True ) def test_update_issue_summary_failure(self, jira_tool_spec): """Test failed summary update.""" jira_tool_spec.jira.issue.side_effect = Exception("Permission denied") result = jira_tool_spec.update_issue_summary("KAN-123", "Updated Summary") assert result["error"] is True assert "Failed to update issue KAN-123: Permission denied" in result["message"] def test_update_issue_assignee_success(self, jira_tool_spec): """Test successful assignee update.""" mock_user = Mock() mock_user.displayName = "John Doe" mock_user.accountId = "12345" jira_tool_spec.jira.search_users.return_value = [mock_user] mock_issue = Mock() jira_tool_spec.jira.issue.return_value = mock_issue result = jira_tool_spec.update_issue_assignee("KAN-123", "John Doe") assert result["error"] is False assert result["message"] == "Issue KAN-123 successfully assigned to John Doe" mock_issue.update.assert_called_once_with(assignee={"accountId": "12345"}) def test_update_issue_assignee_user_not_found(self, jira_tool_spec): """Test assignee update when user is not found.""" jira_tool_spec.jira.search_users.return_value = [] result = jira_tool_spec.update_issue_assignee("KAN-123", "Unknown User") assert result["error"] is True assert "User with full name 'Unknown User' not found" in result["message"] def test_update_issue_assignee_failure(self, jira_tool_spec): """Test failed assignee update.""" jira_tool_spec.jira.search_users.side_effect = Exception("API Error") result = jira_tool_spec.update_issue_assignee("KAN-123", "John Doe") assert result["error"] is True assert ( "An error occurred while updating the assignee: API Error" in result["message"] ) def test_update_issue_status_success(self, jira_tool_spec): """Test successful status update.""" mock_issue = Mock() jira_tool_spec.jira.issue.return_value = mock_issue jira_tool_spec.jira.transitions.return_value = [ {"id": "1", "name": "To Do"}, {"id": "2", "name": "In Progress"}, {"id": "3", "name": "Done"}, ] result = jira_tool_spec.update_issue_status("KAN-123", "Done") assert result["error"] is False assert result["message"] == "Issue KAN-123 status updated to Done." jira_tool_spec.jira.transition_issue.assert_called_once_with(mock_issue, "3") def test_update_issue_status_invalid_transition(self, jira_tool_spec): """Test status update with invalid transition.""" mock_issue = Mock() jira_tool_spec.jira.issue.return_value = mock_issue jira_tool_spec.jira.transitions.return_value = [ {"id": "1", "name": "To Do"}, {"id": "2", "name": "In Progress"}, ] result = jira_tool_spec.update_issue_status("KAN-123", "Done") assert result["error"] is True assert "Status 'Done' not available for issue KAN-123" in result["message"] assert "Available transitions: ['To Do', 'In Progress']" in result["message"] def test_update_issue_status_failure(self, jira_tool_spec): """Test failed status update.""" jira_tool_spec.jira.issue.side_effect = Exception("Issue not found") result = jira_tool_spec.update_issue_status("INVALID-123", "Done") assert result["error"] is True assert "Failed to update status for issue INVALID-123" in result["message"] def test_update_issue_due_date_success(self, jira_tool_spec): """Test successful due date update.""" mock_issue = Mock() jira_tool_spec.jira.issue.return_value = mock_issue result = jira_tool_spec.update_issue_due_date("KAN-123", "2024-12-31") assert result["error"] is False assert result["message"] == "Issue KAN-123 due date updated." mock_issue.update.assert_called_once_with(duedate="2024-12-31") def test_update_issue_due_date_clear(self, jira_tool_spec): """Test clearing due date.""" mock_issue = Mock() jira_tool_spec.jira.issue.return_value = mock_issue result = jira_tool_spec.update_issue_due_date("KAN-123", None) assert result["error"] is False assert result["message"] == "Issue KAN-123 due date cleared." mock_issue.update.assert_called_once_with(duedate=None) def test_update_issue_due_date_invalid_format(self, jira_tool_spec): """Test due date update with invalid date format.""" result = jira_tool_spec.update_issue_due_date("KAN-123", "31-12-2024") assert result["error"] is True assert result["message"] == "Invalid date format. Use YYYY-MM-DD." def test_update_issue_due_date_failure(self, jira_tool_spec): """Test failed due date update.""" jira_tool_spec.jira.issue.side_effect = Exception("Permission denied") result = jira_tool_spec.update_issue_due_date("KAN-123", "2024-12-31") assert result["error"] is True assert "Failed to update due date for issue KAN-123" in result["message"] def test_delete_issue_success(self, jira_tool_spec): """Test successful issue deletion.""" mock_issue = Mock() jira_tool_spec.jira.issue.return_value = mock_issue result = jira_tool_spec.delete_issue("KAN-123") assert result["error"] is False assert result["message"] == "Issue KAN-123 deleted successfully." mock_issue.delete.assert_called_once() def test_delete_issue_failure(self, jira_tool_spec): """Test failed issue deletion.""" jira_tool_spec.jira.issue.side_effect = Exception("Issue not found") result = jira_tool_spec.delete_issue("INVALID-123") assert result["error"] is True assert "Failed to delete issue INVALID-123" in result["message"]
TestJiraIssueToolSpec
python
facebookresearch__faiss
tests/test_standalone_codec.py
{ "start": 3497, "end": 5631 }
class ____(unittest.TestCase): """ comparative accuracy of a few types of indexes """ def compare_accuracy(self, lowac, highac, max_errs=(1e10, 1e10)): d = 96 nb = 1000 nq = 0 nt = 2000 xt, x, _ = get_dataset_2(d, nt, nb, nq) errs = [] for factory_string in lowac, highac: codec = faiss.index_factory(d, factory_string) print('sa codec: code size %d' % codec.sa_code_size()) codec.train(xt) codes = codec.sa_encode(x) x2 = codec.sa_decode(codes) err = ((x - x2) ** 2).sum() errs.append(err) self.assertGreater(errs[0], errs[1]) self.assertGreater(max_errs[0], errs[0]) self.assertGreater(max_errs[1], errs[1]) # just a small IndexLattice I/O test if 'Lattice' in highac: codec2 = faiss.deserialize_index( faiss.serialize_index(codec)) codes = codec2.sa_encode(x) x3 = codec2.sa_decode(codes) self.assertTrue(np.all(x2 == x3)) def test_SQ(self): self.compare_accuracy('SQ4', 'SQ8') def test_SQ2(self): self.compare_accuracy('SQ6', 'SQ8') def test_SQ3(self): self.compare_accuracy('SQ8', 'SQfp16') def test_SQ4(self): self.compare_accuracy('SQ8', 'SQbf16') def test_PQ(self): self.compare_accuracy('PQ6x8np', 'PQ8x8np') def test_PQ2(self): self.compare_accuracy('PQ8x6np', 'PQ8x8np') def test_IVFvsPQ(self): self.compare_accuracy('PQ8np', 'IVF256,PQ8np') def test_Lattice(self): # measured low/high: 20946.244, 5277.483 self.compare_accuracy('ZnLattice3x10_4', 'ZnLattice3x20_4', (22000, 5400)) def test_Lattice2(self): # here the difference is actually tiny # measured errs: [16403.072, 15967.735] self.compare_accuracy('ZnLattice3x12_1', 'ZnLattice3x12_7', (18000, 16000)) swig_ptr = faiss.swig_ptr
TestAccuracy
python
allegroai__clearml
clearml/utilities/proxy_object.py
{ "start": 179, "end": 2048 }
class ____(dict): """Dictionary wrapper that updates an arguments instance on any item set in the dictionary""" def __init__( self, update_obj: Any, update_func: Callable, *args: Any, **kwargs: Any, ) -> None: super(ProxyDictPostWrite, self).__init__(*args, **kwargs) self._update_obj = update_obj self._update_func = None for k, i in self.items(): if isinstance(i, dict): super(ProxyDictPostWrite, self).update({k: ProxyDictPostWrite(update_obj, self._set_callback, i)}) self._update_func = update_func def __setitem__(self, key: Any, value: Any) -> None: super(ProxyDictPostWrite, self).__setitem__(key, value) self._set_callback() def __reduce__(self) -> tuple: return dict, (), None, None, iter(self._to_dict().items()) def _set_callback(self, *_: Any) -> None: if self._update_func: self._update_func(self._update_obj, self) def _to_dict(self) -> dict: a_dict = {} for k, i in self.items(): if isinstance(i, ProxyDictPostWrite): a_dict[k] = i._to_dict() else: a_dict[k] = i return a_dict def to_dict(self) -> dict: return self._to_dict() def update(self, E: Optional[Union[dict, Mapping]] = None, **F: Any) -> None: res = self._do_update(E, **F) self._set_callback() return res def _do_update(self, E: Optional[Union[dict, "ProxyDictPostWrite"]] = None, **F: Any) -> None: res = super(ProxyDictPostWrite, self).update( ProxyDictPostWrite(self._update_obj, self._set_callback, E) if E is not None else ProxyDictPostWrite(self._update_obj, self._set_callback, **F) ) return res
ProxyDictPostWrite
python
realpython__materials
python-tuple/data_class.py
{ "start": 71, "end": 391 }
class ____: name: str age: int position: str = "Python Developer" with open("employees.csv", mode="r") as csv_file: reader = csv.reader(csv_file) next(reader) # Skip headers employees = [] for name, age, position in reader: employees.append(Employee(name, int(age), position))
Employee
python
redis__redis-py
redis/auth/err.py
{ "start": 522, "end": 694 }
class ____(Exception): """ Represents an exception during token renewal process. """ def __init__(self, *args): super().__init__(*args)
TokenRenewalErr
python
doocs__leetcode
solution/0200-0299/0207.Course Schedule/Solution.py
{ "start": 0, "end": 520 }
class ____: def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool: g = [[] for _ in range(numCourses)] indeg = [0] * numCourses for a, b in prerequisites: g[b].append(a) indeg[a] += 1 q = [i for i, x in enumerate(indeg) if x == 0] for i in q: numCourses -= 1 for j in g[i]: indeg[j] -= 1 if indeg[j] == 0: q.append(j) return numCourses == 0
Solution
python
pola-rs__polars
py-polars/src/polars/datatypes/classes.py
{ "start": 38765, "end": 42026 }
class ____(DataType): """ Base class for extension data types. .. warning:: This functionality is considered **unstable**. It may be changed at any point without it being considered a breaking change. See Also -------- Extension polars.register_extension_type """ def __init__( self, name: str, storage: PolarsDataType, metadata: str | None = None ) -> None: self._name = name self._storage = storage self._metadata = metadata @classmethod def ext_from_params( cls, name: str, storage: PolarsDataType, metadata: str | None ) -> Any: """Creates an Extension type instance from its parameters.""" slf = cls.__new__(cls) slf._name = name slf._storage = storage slf._metadata = metadata return slf def ext_name(self) -> str: """Returns the name of this extension type.""" return self._name def ext_storage(self) -> PolarsDataType: """Returns the storage type for this extension type.""" return self._storage def ext_metadata(self) -> str | None: """Returns the metadata for this extension type.""" return self._metadata def _string_repr(self) -> str: """ Return a short string representation of the extension type. This should be lowercase and if feasible show parameters in brackets, for example i64, str, datetime[ns], etc. This is used when displaying dataframes in a human-readable format, so brevity is important. This function starts with an underscore for historical reasons; it is intended to be overridden by subclasses. """ s = self.ext_name().lower() if len(s) <= 12: return s else: return s[:10] + ".." def __repr__(self) -> str: md = self.ext_metadata() if md is not None: return f"{self.__class__.__name__}({self.ext_name()!r}, {self.ext_storage()!r}, {md!r})" else: return f"{self.__class__.__name__}({self.ext_name()!r}, {self.ext_storage()!r})" # It's not recommended to override the below methods. def __hash__(self) -> int: return hash((self.ext_name(), self.ext_storage(), self.ext_metadata())) @overload # type: ignore[override] def __eq__(self, other: pl.DataTypeExpr) -> pl.Expr: ... @overload def __eq__(self, other: PolarsDataType) -> bool: ... def __eq__(self, other: pl.DataTypeExpr | PolarsDataType) -> pl.Expr | bool: if isinstance(other, pl.DataTypeExpr): return self.to_dtype_expr() == other else: return ( isinstance(other, BaseExtension) and self.ext_name() == other.ext_name() and self.ext_storage() == other.ext_storage() and self.ext_metadata() == other.ext_metadata() ) def __getstate__(self) -> tuple[str, PolarsDataType, str | None]: return self.ext_name(), self.ext_storage(), self.ext_metadata() def __setstate__(self, state: tuple[str, PolarsDataType, str | None]) -> None: self.__dict__ = type(self).ext_from_params(*state).__dict__
BaseExtension
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/sparse_ops/sparse_xent_op_d9m_test.py
{ "start": 1487, "end": 3004 }
class ____(test.TestCase): """Test d9m-unimplemented exceptions from SparseSoftmaxXentWithLogitsOp. Test that tf.errors.UnimplementedError is thrown, as appropriate, by the GPU code-paths through SparseSoftmaxXentWithLogitsOp when deterministic ops are enabled. This test assumes that sparse_xent_op_test.py runs equivalent test cases when deterministic ops are not enabled and will therefore detect erroneous exception throwing in those cases. """ @test_util.run_gpu_only @test_util.run_in_graph_and_eager_modes def testExceptionThrowing(self): with self.session(), test_util.force_gpu(): for features_dtype in [dtypes.float16, dtypes.float32]: for labels_dtype in [dtypes.int32, dtypes.int64]: features = constant_op.constant([[0.3, 0.5], [0.2, 0.6]], dtype=features_dtype) labels = constant_op.constant([1, 0], dtype=labels_dtype) with self.assertRaisesRegex( errors_impl.UnimplementedError, "The GPU implementation of SparseSoftmaxCrossEntropyWithLogits " + "that would have been executed is not deterministic. Note that " + "the Python API uses an alternative, deterministic, " + "GPU-accelerated path when determinsim is enabled."): result = gen_nn_ops.sparse_softmax_cross_entropy_with_logits( features=features, labels=labels) self.evaluate(result)
SparseXentOpDeterminismExceptionsTest
python
pytorch__pytorch
test/inductor/test_foreach.py
{ "start": 5799, "end": 34387 }
class ____(TestCase): check_model_gpu = check_model_gpu check_model_cpu = check_model check_kernel_count = True def setUp(self): super().setUp() torch._inductor.metrics.reset() def tearDown(self): super().tearDown() torch._inductor.metrics.reset() def _test_single_list(self, op): if op in un_ops_under_test: def fn(a0, a1): return op([a0, a1]) elif op in bin_ops_under_test: def fn(a0, a1, b0, b1): return op([a0, a1], [b0, b1]) else: def fn(a0, a1, b0, b1, c0, c1): return op([a0, a1], [b0, b1], [c0, c1]) self.check_model_gpu( fn, gen_args(op), ) def _test_single_scalar(self, op): def fn(a0, a1): return op([a0, a1], 3.3) self.check_model_gpu( fn, ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ), ) def _test_single_scalar_tensor(self, op): def fn(a0, a1): return op([a0, a1], torch.tensor(3.3, device=GPU_TYPE)) self.check_model_gpu( fn, ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ), ) # called in test_gpu_cpp_wrapper.py @requires_gpu def test_foreach_cpp_wrapper_cuda(self): self._test_single_list(op=torch._foreach_add) # called in test_gpu_cpp_wrapper.py test_foreach_cpp_wrapper_xpu = test_foreach_cpp_wrapper_cuda @requires_gpu @all_ops def test_single_list(self, op): self._test_single_list(op) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @scalar_bin_ops def test_single_scalar(self, op): self._test_single_scalar(op) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @scalar_tensor_bin_ops def test_single_scalar_tensor(self, op): self._test_single_scalar_tensor(op) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @all_ops def test_scheduler_fusion_list(self, op): if op in un_ops_under_test: def fn(a0, a1): c = op([a0, a1]) return torch._foreach_sqrt(c) elif op in bin_ops_under_test: def fn(a0, a1, b0, b1): c = op([a0, a1], [b0, b1]) return c, torch._foreach_add([a0, a1], c) else: def fn(a0, a1, b0, b1, c0, c1): c = op([a0, a1], [b0, b1], [c0, c1]) return c, torch._foreach_add([a0, a1], c) self.check_model_gpu( fn, gen_args(op), ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @scalar_bin_ops def test_scheduler_fusion_scalar(self, op): def fn(a0, a1): c = op([a0, a1], 3.4) return c, torch._foreach_add([a0, a1], c) self.check_model_gpu( fn, ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ), ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @scalar_bin_ops def test_broadcasting(self, op): def fn(a0, a1, b0, b1): return op([a0, a1], [b0, b1]) fn_opt = torch.compile(fn) inputs = ( torch.rand(10, 1, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), torch.rand(1, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ) actual = fn_opt(*inputs) expected = fn(*inputs) self.assertEqual(actual, expected) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @all_ops def test_singleton_lists(self, op): if op in un_ops_under_test: def fn(a0): return op([a0]) args = (torch.rand(10, 10, device=GPU_TYPE),) elif op in bin_ops_under_test: def fn(a0, b0): return op([a0], [b0]) args = ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), ) else: def fn(a0, b0, c0): return op([a0], [b0], [c0]) args = ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), ) self.check_model_gpu( fn, args, ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @bin_ops def test_type_promotion(self, op): def fn(a0, a1, b0, b1): return op([a0, a1], [b0, b1]) fn_opt = torch.compile(fn) max32 = torch.iinfo(torch.int32).max max64 = torch.iinfo(torch.int64).max inputs = ( torch.randint(max32, (10, 10), device=GPU_TYPE, dtype=torch.int32), torch.randint(max32, (20, 20), device=GPU_TYPE, dtype=torch.int32), torch.randint(max32, (10, 10), device=GPU_TYPE, dtype=torch.int32), torch.randint(max64, (20, 20), device=GPU_TYPE, dtype=torch.int64), ) actual = fn_opt(*inputs) expected = fn(*inputs) self.assertEqual(actual, expected) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @scalar_bin_ops def test_kernel_split_arg_limit_list(self, op): # NB: foeach_copy won't pass this test because it will dce one set of buffers def fn(a, b): return op(a, b) fn_opt = torch.compile(fn) max_args = 370 max_list_len = (max_args // 3) + 1 inputs = ( [torch.rand(10, 10, device=GPU_TYPE) for _ in range(max_list_len)], [torch.rand(10, 10, device=GPU_TYPE) for _ in range(max_list_len)], ) actual = fn_opt(*inputs) expected = fn(*inputs) self.assertEqual(actual, expected) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 2) @requires_gpu @scalar_bin_ops @unittest.skip( "Triton recursion depth exceeded: https://github.com/triton-lang/triton/issues/1763" ) def test_kernel_split_arg_limit_scalar(self, op): def fn(a): return op(a, 3.3) fn_opt = torch.compile(fn) max_args = 370 max_list_len = (max_args // 2) + 1 inputs = ([torch.rand(10, 10, device=GPU_TYPE) for _ in range(max_list_len)],) actual = fn_opt(*inputs) expected = fn(*inputs) self.assertEqual(actual, expected) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 2) @requires_gpu @bin_ops def test_fusion_duplicate_buffer_list(self, op): def fn(a0, a1, b0, b1): c = op([a0, a1], [b0, b1]) return op([a0, b0], [c[0], c[0]]) self.check_model_gpu( fn, ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ), reference_in_float=False, check_lowp=False, ) kernel_count = 1 if "foreach_map" in op.__name__: kernel_count = 2 self.assertEqual(torch._inductor.metrics.generated_kernel_count, kernel_count) @requires_gpu @all_ops def test_non_foreach_consumer_list(self, op): if op in un_ops_under_test: def fn(a0, a1): c = op([a0, a1]) return torch.mul(c[0], a0) elif op in bin_ops_under_test: def fn(a0, a1, b0, b1): c = op([a0, a1], [b0, b1]) return torch.mul(c[0], a0) else: def fn(a0, a1, b0, b1, c0, c1): c = op([a0, a1], [b0, b1], [c0, c1]) return torch.mul(c[0], a0) self.check_model_gpu( fn, gen_args(op), ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @scalar_bin_ops def test_non_foreach_consumer_scalar(self, op): def fn(a0, a1): c = op([a0, a1], 4.7) return torch.mul(c[0], a0) self.check_model_gpu( fn, ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ), ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @all_ops def test_non_foreach_producer_list(self, op): if op in un_ops_under_test: def fn(a0, a1): c0 = torch.add(a0, a0) c1 = torch.add(a1, a1) return op([c0, c1]) elif op in bin_ops_under_test: def fn(a0, a1, b0, b1): c0 = torch.add(a0, b0) c1 = torch.add(a1, b1) return op([a0, a1], [c0, c1]) else: def fn(a0, a1, b0, b1, c0, c1): c0 = torch.add(a0, b0) c1 = torch.add(a1, b1) return op([a0, a1], [b0, b1], [c0, c1]) self.check_model_gpu( fn, gen_args(op), reference_in_float=False, check_lowp=False ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @scalar_bin_ops def test_non_foreach_producer_scalar(self, op): def fn(a0, a1, b0, b1): c0 = torch.mul(a0, b0) c1 = torch.mul(a1, b1) return op([c0, c1], 5.6) self.check_model_gpu( fn, ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ), ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @all_ops def test_non_foreach_consumer_producer_list(self, op): if op in un_ops_under_test: def fn(a0, a1): c0 = torch.add(a0, a0) c1 = torch.mul(a1, a1) d = op([c0, c1]) e0 = torch.mul(d[0], a0) e1 = torch.mul(d[1], a1) return [e0, e1] elif op in bin_ops_under_test: def fn(a0, a1, b0, b1): c0 = torch.add(a0, b0) c1 = torch.add(a1, b1) d = op([a0, a1], [c0, c1]) e0 = torch.mul(d[0], a0) e1 = torch.mul(d[1], a1) return [e0, e1] else: def fn(a0, a1, b0, b1, c0, c1): c0 = torch.add(a0, b0) c1 = torch.add(a1, b1) d = op([a0, a1], [b0, b1], [c0, c1]) e0 = torch.mul(d[0], a0) e1 = torch.mul(d[1], a1) return [e0, e1] self.check_model_gpu( fn, gen_args(op), reference_in_float=False, check_lowp=False, ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @scalar_bin_ops def test_non_foreach_consumer_producer_scalar(self, op): def fn(a0, a1, b0, b1): c0 = torch.add(a0, b0) c1 = torch.add(a1, b1) d = op([c0, c1], 5.8) e0 = torch.mul(d[0], a0) e1 = torch.mul(d[1], a1) return [e0, e1] self.check_model_gpu( fn, ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ), reference_in_float=False, check_lowp=False, ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @bin_ops @torch._dynamo.config.patch("automatic_dynamic_shapes", False) @torch._dynamo.config.patch("assume_static_by_default", False) @torch._inductor.config.patch("combo_kernel_foreach_dynamic_shapes", False) def test_dynamic_shapes_fallback(self, op): def fn(a0, a1, b0, b1): return op([a0, a1], [b0, b1]) inputs = ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ) self.check_model_gpu(fn, inputs) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 2) @requires_gpu @torch._dynamo.config.patch("automatic_dynamic_shapes", False) @torch._dynamo.config.patch("assume_static_by_default", False) @torch._inductor.config.patch("combo_kernel_foreach_dynamic_shapes", True) def test_enable_dynamic_shapes_python_wrapper(self, op=torch._foreach_add): def fn(a0, a1, b0, b1): return op([a0, a1], [b0, b1]) inputs = ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ) self.check_model_gpu(fn, inputs) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @torch._dynamo.config.patch("automatic_dynamic_shapes", False) @torch._dynamo.config.patch("assume_static_by_default", False) @torch._inductor.config.patch("combo_kernel_foreach_dynamic_shapes", True) @torch._inductor.config.patch("cpp_wrapper", True) def test_enable_dynamic_shapes_cpp_wrapper_cuda(self, op=torch._foreach_add): def fn(a0, a1, b0, b1): return op([a0, a1], [b0, b1]) inputs = ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ) self.check_model_gpu(fn, inputs) # called in test_gpu_cpp_wrapper.py test_enable_dynamic_shapes_cpp_wrapper_xpu = ( test_enable_dynamic_shapes_cpp_wrapper_cuda ) @unittest.skipIf(IS_FBCODE, "cpp compile not supported in fbcode") @bin_ops def test_cpu_cpp_fallback(self, op): def fn(a0, a1, b0, b1): return op([a0, a1], [b0, b1]) inputs = ( torch.rand(10, 10, device="cpu"), torch.rand(20, 20, device="cpu"), torch.rand(10, 10, device="cpu"), torch.rand(20, 20, device="cpu"), ) self.check_model_cpu(fn, inputs) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 2) @requires_gpu @decomp_ops def test_decomp(self, op): def fn(a0, a1, b0, b1, c0, c1): return op([a0, a1], [b0, b1], [c0, c1], value=0.5) self.check_model_gpu( fn, ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ), ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu def test_fuse_concat(self): def fn(x1, x2, x3, w1, w2, w3): x = torch.stack([x1, x2, x3]) w = torch.stack([w1, w2, w3]) y = torch.bmm(x, w) return y x1 = torch.randn(5, 4).to(GPU_TYPE) x2 = x1 + 1 x3 = x1 + 2 w1 = torch.randn(4, 3).to(GPU_TYPE) w2 = w1 + 1 w3 = w1 + 2 args = (x1, x2, x3, w1, w2, w3) self.check_model_gpu(fn, args) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 2) @requires_gpu def test_zero_elems(self): def fn(a0, a1, b0, b1): return torch._foreach_add([a0, a1], [b0, b1]) self.check_model_gpu( fn, ( torch.rand(0, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), torch.rand(0, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), ), ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @bin_ops def test_2d_blocking(self, op): def fn(a0, a1, b0, b1): return op([a0, a1], [b0, b1]) self.check_model_gpu( fn, ( torch.rand(10, 40, device=GPU_TYPE), torch.rand(10, 30, device=GPU_TYPE), torch.rand(40, 10, device=GPU_TYPE).t(), torch.rand(30, 10, device=GPU_TYPE).t(), ), ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @bin_ops def test_2d_blocking_partitioning(self, op): def fn(a0, a1, b0, b1): return op([a0, a1], [b0, b1]) self.check_model_gpu( fn, ( torch.rand(30, 20, device=GPU_TYPE), torch.rand(40, 30, device=GPU_TYPE), torch.rand(30, 20, device=GPU_TYPE), torch.rand(30, 40, device=GPU_TYPE).t(), ), ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 2) @requires_gpu @bin_ops def test_2d_blocking_partitioning_elems(self, op): """2D blocking should be grouped by number of yelems""" def fn(a0, a1, a2, b0, b1, b2): return op([a0, a1, a2], [b0, b1, b2]) self.check_model_gpu( fn, ( torch.rand(10, 20, device=GPU_TYPE), torch.rand(30, 20, device=GPU_TYPE), torch.rand(10, 30, device=GPU_TYPE), torch.rand(20, 10, device=GPU_TYPE).t(), torch.rand(20, 30, device=GPU_TYPE).t(), torch.rand(30, 10, device=GPU_TYPE).t(), ), ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 2) @requires_gpu @bin_ops @torch._inductor.config.patch("combo_kernel_allow_mixed_sizes", 2) def test_2d_blocking_partitioning_mixed_sizes(self, op): """2D blocking with mixed sizes should group together""" def fn(a0, a1, a2, b0, b1, b2): return op([a0, a1, a2], [b0, b1, b2]) self.check_model_gpu( fn, ( torch.rand(10, 20, device=GPU_TYPE), torch.rand(30, 20, device=GPU_TYPE), torch.rand(10, 30, device=GPU_TYPE), torch.rand(20, 10, device=GPU_TYPE).t(), torch.rand(20, 30, device=GPU_TYPE).t(), torch.rand(30, 10, device=GPU_TYPE).t(), ), ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @inplace_bin_ops def test_reinplacing(self, op): def fn(a0, a1, b0, b1): op([a0, a1], [b0, b1]) return [a0, a1] inputs = ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ) self.check_model_gpu(fn, inputs, check_lowp=False) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @inplace_bin_ops def test_reinplacing_mut_before(self, op): def fn(a0, a1, b0, b1): a0.add_(torch.ones(10, 10, device=GPU_TYPE)) op([a0, a1], [b0, b1]) return [a0, a1] inputs = ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ) self.check_model_gpu(fn, inputs, check_lowp=False) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @inplace_bin_ops def test_reinplacing_mut_after(self, op): def fn(a0, a1, b0, b1): op([a0, a1], [b0, b1]) a0.add_(torch.ones(10, 10, device=GPU_TYPE)) return [a0, a1] inputs = ( torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), torch.rand(10, 10, device=GPU_TYPE), torch.rand(20, 20, device=GPU_TYPE), ) self.check_model_gpu(fn, inputs, check_lowp=False) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu def test_multi_device(self): def test_foreach_add(a0, a1, b0, b1): return torch._foreach_add([a0, a1], [b0, b1]) inps = [ torch.ones(10, 10, device=GPU_TYPE), torch.ones(20, 20, device="cpu"), torch.zeros(10, 10, device=GPU_TYPE), torch.zeros(20, 20, device="cpu"), ] out_eager = test_foreach_add(*inps) out_compiled = torch.compile(test_foreach_add)(*inps) self.assertEqual(out_eager, out_compiled) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 2) @requires_gpu def test_aliasing(self): def test_foreach_add(a0, a1, a2, b0, b1, b2): return torch._foreach_add_([a0, a1, a2], [b0, b1, b2]) input = torch.ones(10, 10, device=GPU_TYPE) input2 = torch.ones(10, 10, device=GPU_TYPE) inps = [ input, input.view(10, 10), input.view(10, 10), input2, input2.view(10, 10), input2.view(10, 10), ] out_eager = test_foreach_add(*inps) out_compiled = torch.compile(test_foreach_add)(*inps) self.assertEqual(out_eager, out_compiled) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 4) @requires_gpu @torch._inductor.config.patch("combo_kernel_allow_mixed_sizes", 1) def test_2d_block_no_mixed_sizes_no_mask(self): """2D blocking with no mixed sizes constant mask""" def fn(a0, a1, a2, b0, b1, b2): return torch._foreach_add([a0, a1, a2], [b0, b1, b2]) self.check_model_gpu( fn, ( torch.rand(1024, 2048, device=GPU_TYPE), torch.rand(2048, 2048, device=GPU_TYPE), torch.rand(1024, 2048, device=GPU_TYPE), torch.rand(2048, 1024, device=GPU_TYPE).t(), torch.rand(2048, 2048, device=GPU_TYPE).t(), torch.rand(2048, 1024, device=GPU_TYPE).t(), ), ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 2) @requires_gpu @torch._inductor.config.patch("combo_kernel_allow_mixed_sizes", 2) def test_2d_block_mixed_sizes_with_mask(self): """2D blocking with mixed sizes should have mask""" def fn(a0, a1, a2, b0, b1, b2): return torch._foreach_add([a0, a1, a2], [b0, b1, b2]) self.check_model_gpu( fn, ( torch.rand(1024, 2048, device=GPU_TYPE), torch.rand(2048, 2048, device=GPU_TYPE), torch.rand(1024, 2048, device=GPU_TYPE), torch.rand(2048, 1024, device=GPU_TYPE).t(), torch.rand(2048, 2048, device=GPU_TYPE).t(), torch.rand(2048, 1024, device=GPU_TYPE).t(), ), ) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1) @requires_gpu @foreach_map_bin_ops def test_foreach_map_backward_binary(self, op): from torch._dynamo.polyfills import foreach_map_fn def fn(xs, ys): outs = op(xs, ys) return outs[0].sum() + outs[1].sum() + outs[2].sum() def ref_fn(xs, ys): outs = foreach_map_fn(torch.add, xs, ys) return outs[0].sum() + outs[1].sum() + outs[2].sum() ref_inps = ( [ torch.rand(10, 20, device=GPU_TYPE, requires_grad=True), torch.rand(10, 30, device=GPU_TYPE, requires_grad=True), torch.rand(30, 30, device=GPU_TYPE, requires_grad=True), ], [ torch.rand(10, 20, device=GPU_TYPE, requires_grad=True), torch.rand(10, 30, device=GPU_TYPE, requires_grad=True), torch.rand(30, 30, device=GPU_TYPE, requires_grad=True), ], ) inps = ( [x.clone().detach().requires_grad_(True) for x in ref_inps[0]], [y.clone().detach().requires_grad_(True) for y in ref_inps[1]], ) out_ref = ref_fn(*ref_inps) out_ref.backward() # unpacking result, (fw_code, bw_code) _, (_, _) = run_fw_bw_and_get_code(lambda: torch.compile(fn)(*inps)) for ref, act in zip(tree_flatten(ref_inps)[0], tree_flatten(inps)[0]): torch.allclose(ref.grad, act.grad) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 5) @requires_gpu def test_foreach_map_input_mutation(self): def fn(xs, ys): outs = foreach_map_add_inplace(xs, ys) return outs[0].sum() + outs[1].sum() + outs[2].sum() ref_inps = ( [ torch.rand(10, 20, device=GPU_TYPE, requires_grad=True), torch.rand(10, 30, device=GPU_TYPE, requires_grad=True), torch.rand(30, 30, device=GPU_TYPE, requires_grad=True), ], [ torch.rand(10, 20, device=GPU_TYPE, requires_grad=True), torch.rand(10, 30, device=GPU_TYPE, requires_grad=True), torch.rand(30, 30, device=GPU_TYPE, requires_grad=True), ], ) # Set requires_grad to be False to avoid mutating a leaf variable inps = ( [x.clone().detach().requires_grad_(False) for x in ref_inps[0]], [y.clone().detach().requires_grad_(False) for y in ref_inps[1]], ) # TODO: after decomposing auto_functionalized, we're getting # a functional subgraph with an inlined epilogue. with self.assertRaisesRegex( torch._inductor.exc.InductorError, "Buffer mutation detected during lowering of aten.copy_.default", ): with mock.patch( "torch._dynamo.variables.higher_order_ops.BaseHOPVariable.supports_input_mutation", True, ): _ = run_fw_bw_and_get_code(lambda: torch.compile(fn)(*inps)) @requires_gpu @foreach_map_un_ops def test_foreach_map_backward_unary(self, op): from torch._dynamo.polyfills import foreach_map_fn def fn(xs): outs = op(xs) return outs[0].sum() + outs[1].sum() + outs[2].sum() def ref_fn(xs): outs = foreach_map_fn(op.original_op, xs) return outs[0].sum() + outs[1].sum() + outs[2].sum() ref_inp = [ torch.rand(10, 20, device=GPU_TYPE, requires_grad=True), torch.rand(10, 30, device=GPU_TYPE, requires_grad=True), torch.rand(30, 30, device=GPU_TYPE, requires_grad=True), ] inp = [x.clone().detach().requires_grad_(True) for x in ref_inp] out_ref = ref_fn(ref_inp) out_ref.backward() # unpacking result, (fw_code, bw_code) _, (_, _) = run_fw_bw_and_get_code(lambda: torch.compile(fn)(inp)) for ref, act in zip(ref_inp, inp): torch.allclose(ref.grad, act.grad) self.assertEqual(torch._inductor.metrics.generated_kernel_count, 5) if __name__ == "__main__": from torch._inductor.test_case import run_tests if HAS_CPU or HAS_GPU: run_tests(needs="filelock")
ForeachTests
python
pytorch__pytorch
test/package/test_save_load.py
{ "start": 544, "end": 10078 }
class ____(PackageTestCase): """Core save_* and loading API tests.""" def test_saving_source(self): buffer = BytesIO() with PackageExporter(buffer) as he: he.save_source_file("foo", str(packaging_directory / "module_a.py")) he.save_source_file("foodir", str(packaging_directory / "package_a")) buffer.seek(0) hi = PackageImporter(buffer) foo = hi.import_module("foo") s = hi.import_module("foodir.subpackage") self.assertEqual(foo.result, "module_a") self.assertEqual(s.result, "package_a.subpackage") def test_saving_string(self): buffer = BytesIO() with PackageExporter(buffer) as he: src = dedent( """\ import math the_math = math """ ) he.save_source_string("my_mod", src) buffer.seek(0) hi = PackageImporter(buffer) m = hi.import_module("math") import math self.assertIs(m, math) my_mod = hi.import_module("my_mod") self.assertIs(my_mod.math, math) def test_save_module(self): buffer = BytesIO() with PackageExporter(buffer) as he: import module_a import package_a he.save_module(module_a.__name__) he.save_module(package_a.__name__) buffer.seek(0) hi = PackageImporter(buffer) module_a_i = hi.import_module("module_a") self.assertEqual(module_a_i.result, "module_a") self.assertIsNot(module_a, module_a_i) package_a_i = hi.import_module("package_a") self.assertEqual(package_a_i.result, "package_a") self.assertIsNot(package_a_i, package_a) def test_dunder_imports(self): buffer = BytesIO() with PackageExporter(buffer) as he: import package_b obj = package_b.PackageBObject he.intern("**") he.save_pickle("res", "obj.pkl", obj) buffer.seek(0) hi = PackageImporter(buffer) hi.load_pickle("res", "obj.pkl") package_b = hi.import_module("package_b") self.assertEqual(package_b.result, "package_b") math = hi.import_module("math") self.assertEqual(math.__name__, "math") xml_sub_sub_package = hi.import_module("xml.sax.xmlreader") self.assertEqual(xml_sub_sub_package.__name__, "xml.sax.xmlreader") subpackage_1 = hi.import_module("package_b.subpackage_1") self.assertEqual(subpackage_1.result, "subpackage_1") subpackage_2 = hi.import_module("package_b.subpackage_2") self.assertEqual(subpackage_2.result, "subpackage_2") subsubpackage_0 = hi.import_module("package_b.subpackage_0.subsubpackage_0") self.assertEqual(subsubpackage_0.result, "subsubpackage_0") def test_bad_dunder_imports(self): """Test to ensure bad __imports__ don't cause PackageExporter to fail.""" buffer = BytesIO() with PackageExporter(buffer) as e: e.save_source_string( "m", '__import__(these, unresolvable, "things", won, crash, me)', # codespell:ignore ) def test_save_module_binary(self): f = BytesIO() with PackageExporter(f) as he: import module_a import package_a he.save_module(module_a.__name__) he.save_module(package_a.__name__) f.seek(0) hi = PackageImporter(f) module_a_i = hi.import_module("module_a") self.assertEqual(module_a_i.result, "module_a") self.assertIsNot(module_a, module_a_i) package_a_i = hi.import_module("package_a") self.assertEqual(package_a_i.result, "package_a") self.assertIsNot(package_a_i, package_a) def test_pickle(self): import package_a.subpackage obj = package_a.subpackage.PackageASubpackageObject() obj2 = package_a.PackageAObject(obj) buffer = BytesIO() with PackageExporter(buffer) as he: he.intern("**") he.save_pickle("obj", "obj.pkl", obj2) buffer.seek(0) hi = PackageImporter(buffer) # check we got dependencies sp = hi.import_module("package_a.subpackage") # check we didn't get other stuff with self.assertRaises(ImportError): hi.import_module("module_a") obj_loaded = hi.load_pickle("obj", "obj.pkl") self.assertIsNot(obj2, obj_loaded) self.assertIsInstance(obj_loaded.obj, sp.PackageASubpackageObject) self.assertIsNot( package_a.subpackage.PackageASubpackageObject, sp.PackageASubpackageObject ) def test_pickle_long_name_with_protocol_4(self): import package_a.long_name container = [] # Indirectly grab the function to avoid pasting a 256 character # function into the test package_a.long_name.add_function(container) buffer = BytesIO() with PackageExporter(buffer) as exporter: exporter.intern("**") exporter.save_pickle( "container", "container.pkl", container, pickle_protocol=4 ) buffer.seek(0) importer = PackageImporter(buffer) unpickled_container = importer.load_pickle("container", "container.pkl") self.assertIsNot(container, unpickled_container) self.assertEqual(len(unpickled_container), 1) self.assertEqual(container[0](), unpickled_container[0]()) def test_exporting_mismatched_code(self): """ If an object with the same qualified name is loaded from different packages, the user should get an error if they try to re-save the object with the wrong package's source code. """ import package_a.subpackage obj = package_a.subpackage.PackageASubpackageObject() obj2 = package_a.PackageAObject(obj) b1 = BytesIO() with PackageExporter(b1) as pe: pe.intern("**") pe.save_pickle("obj", "obj.pkl", obj2) b1.seek(0) importer1 = PackageImporter(b1) loaded1 = importer1.load_pickle("obj", "obj.pkl") b1.seek(0) importer2 = PackageImporter(b1) loaded2 = importer2.load_pickle("obj", "obj.pkl") def make_exporter(): pe = PackageExporter(BytesIO(), importer=[importer1, sys_importer]) # Ensure that the importer finds the 'PackageAObject' defined in 'importer1' first. return pe # This succeeds because OrderedImporter.get_name() properly # falls back to sys_importer which can find the original PackageAObject pe = make_exporter() pe.save_pickle("obj", "obj.pkl", obj2) # This should also fail. The 'PackageAObject' type defined from 'importer1' # is not necessarily the same as the one defined from 'importer2' pe = make_exporter() with self.assertRaises(pickle.PicklingError): pe.save_pickle("obj", "obj.pkl", loaded2) # This should succeed. The 'PackageAObject' type defined from # 'importer1' is a match for the one used by loaded1. pe = make_exporter() pe.save_pickle("obj", "obj.pkl", loaded1) def test_save_imported_module(self): """Saving a module that came from another PackageImporter should work.""" import package_a.subpackage obj = package_a.subpackage.PackageASubpackageObject() obj2 = package_a.PackageAObject(obj) buffer = BytesIO() with PackageExporter(buffer) as exporter: exporter.intern("**") exporter.save_pickle("model", "model.pkl", obj2) buffer.seek(0) importer = PackageImporter(buffer) imported_obj2 = importer.load_pickle("model", "model.pkl") imported_obj2_module = imported_obj2.__class__.__module__ # Should export without error. buffer2 = BytesIO() with PackageExporter(buffer2, importer=(importer, sys_importer)) as exporter: exporter.intern("**") exporter.save_module(imported_obj2_module) def test_save_imported_module_using_package_importer(self): """Exercise a corner case: re-packaging a module that uses `torch_package_importer`""" import package_a.use_torch_package_importer # noqa: F401 buffer = BytesIO() with PackageExporter(buffer) as exporter: exporter.intern("**") exporter.save_module("package_a.use_torch_package_importer") buffer.seek(0) importer = PackageImporter(buffer) # Should export without error. buffer2 = BytesIO() with PackageExporter(buffer2, importer=(importer, sys_importer)) as exporter: exporter.intern("**") exporter.save_module("package_a.use_torch_package_importer") @skipIf(version_info >= (3, 13), "https://github.com/pytorch/pytorch/issues/142170") def test_save_load_fp8(self): tensor = torch.rand(20, 20).to(torch.float8_e4m3fn) buffer = BytesIO() with PackageExporter(buffer) as exporter: exporter.save_pickle("fp8_model", "model.pkl", tensor) buffer.seek(0) importer = PackageImporter(buffer) loaded_tensor = importer.load_pickle("fp8_model", "model.pkl") self.assertTrue(torch.equal(tensor, loaded_tensor)) if __name__ == "__main__": run_tests()
TestSaveLoad
python
faif__python-patterns
patterns/behavioral/publish_subscribe.py
{ "start": 969, "end": 2382 }
class ____: def __init__(self, name: str, msg_center: Provider) -> None: self.name = name self.provider = msg_center def subscribe(self, msg: str) -> None: self.provider.subscribe(msg, self) def unsubscribe(self, msg: str) -> None: self.provider.unsubscribe(msg, self) def run(self, msg: str) -> None: print(f"{self.name} got {msg}") def main(): """ >>> message_center = Provider() >>> fftv = Publisher(message_center) >>> jim = Subscriber("jim", message_center) >>> jim.subscribe("cartoon") >>> jack = Subscriber("jack", message_center) >>> jack.subscribe("music") >>> gee = Subscriber("gee", message_center) >>> gee.subscribe("movie") >>> vani = Subscriber("vani", message_center) >>> vani.subscribe("movie") >>> vani.unsubscribe("movie") # Note that no one subscribed to `ads` # and that vani changed their mind >>> fftv.publish("cartoon") >>> fftv.publish("music") >>> fftv.publish("ads") >>> fftv.publish("movie") >>> fftv.publish("cartoon") >>> fftv.publish("cartoon") >>> fftv.publish("movie") >>> fftv.publish("blank") >>> message_center.update() jim got cartoon jack got music gee got movie jim got cartoon jim got cartoon gee got movie """ if __name__ == "__main__": import doctest doctest.testmod()
Subscriber
python
getsentry__sentry
tests/sentry/integrations/gitlab/tasks/test_pr_comment.py
{ "start": 1021, "end": 5046 }
class ____(GitLabTestCase): def setUp(self) -> None: super().setUp() self.installation = get_installation_of_type( GitlabIntegration, integration=self.integration, org_id=self.organization.id ) self.pr_comment_workflow = self.installation.get_pr_comment_workflow() self.another_integration = self.create_integration( organization=self.organization, external_id="1", provider="github" ) self.another_org_user = self.create_user("foo@localhost") self.another_organization = self.create_organization( name="Foobar", owner=self.another_org_user ) self.another_team = self.create_team(organization=self.organization, name="Mariachi Band") self.another_org_project = self.create_project( organization=self.another_organization, teams=[self.another_team], name="Bengal" ) self.another_org_integration = self.create_integration( organization=self.another_organization, external_id="1", provider="gitlab" ) self.user_to_commit_author_map = { self.user: self.create_commit_author(project=self.project, user=self.user), self.another_org_user: self.create_commit_author( project=self.another_org_project, user=self.another_org_user ), } self.repo = self.create_gitlab_repo(name="Get Sentry / Example Repo", external_id=123) self.pr_key = 1 self.commit_sha = 1 self.fingerprint = 1 def add_commit_to_repo(self, repo, user, project): if user not in self.user_to_commit_author_map: self.user_to_commit_author_map[user] = self.create_commit_author( project=repo.project, user=user ) commit = self.create_commit( project=project, repo=repo, author=self.user_to_commit_author_map[user], key=str(self.commit_sha), message=str(self.commit_sha), ) self.commit_sha += 1 return commit def add_pr_to_commit(self, commit: Commit, date_added=None): if date_added is None: date_added = before_now(minutes=1) pr = PullRequest.objects.create( organization_id=commit.organization_id, repository_id=commit.repository_id, key=str(self.pr_key), author=commit.author, message="foo", title="bar", merge_commit_sha=commit.key, date_added=date_added, ) self.pr_key += 1 self.add_branch_commit_to_pr(commit, pr) return pr def add_branch_commit_to_pr(self, commit: Commit, pr: PullRequest): pr_commit = PullRequestCommit.objects.create(pull_request=pr, commit=commit) return pr_commit def add_groupowner_to_commit(self, commit: Commit, project, user): event = self.store_event( data={ "message": f"issue {self.fingerprint}", "culprit": f"issue{self.fingerprint}", "fingerprint": [f"issue{self.fingerprint}"], }, project_id=project.id, ) assert event.group is not None self.fingerprint += 1 groupowner = GroupOwner.objects.create( group=event.group, user_id=user.id, project=project, organization_id=commit.organization_id, type=GroupOwnerType.SUSPECT_COMMIT.value, context={"commitId": commit.id}, ) return groupowner def create_pr_issues(self, repo=None): if repo is None: repo = self.repo commit_1 = self.add_commit_to_repo(repo, self.user, self.project) pr = self.add_pr_to_commit(commit_1) self.add_groupowner_to_commit(commit_1, self.project, self.user) self.add_groupowner_to_commit(commit_1, self.another_org_project, self.another_org_user) return pr
GitlabCommentTestCase
python
pandas-dev__pandas
pandas/tests/io/formats/test_to_latex.py
{ "start": 20775, "end": 24863 }
class ____: @pytest.fixture def df_with_symbols(self): """Dataframe with special characters for testing chars escaping.""" a = "a" b = "b" return DataFrame({"co$e^x$": {a: "a", b: "b"}, "co^l1": {a: "a", b: "b"}}) def test_to_latex_escape_false(self, df_with_symbols): result = df_with_symbols.to_latex(escape=False) expected = _dedent( r""" \begin{tabular}{lll} \toprule & co$e^x$ & co^l1 \\ \midrule a & a & a \\ b & b & b \\ \bottomrule \end{tabular} """ ) assert result == expected def test_to_latex_escape_default(self, df_with_symbols): # gh50871: in v2.0 escape is False by default (styler.format.escape=None) default = df_with_symbols.to_latex() specified_true = df_with_symbols.to_latex(escape=True) assert default != specified_true def test_to_latex_special_escape(self): df = DataFrame([r"a\b\c", r"^a^b^c", r"~a~b~c"]) result = df.to_latex(escape=True) expected = _dedent( r""" \begin{tabular}{ll} \toprule & 0 \\ \midrule 0 & a\textbackslash b\textbackslash c \\ 1 & \textasciicircum a\textasciicircum b\textasciicircum c \\ 2 & \textasciitilde a\textasciitilde b\textasciitilde c \\ \bottomrule \end{tabular} """ ) assert result == expected def test_to_latex_escape_special_chars(self): special_characters = ["&", "%", "$", "#", "_", "{", "}", "~", "^", "\\"] df = DataFrame(data=special_characters) result = df.to_latex(escape=True) expected = _dedent( r""" \begin{tabular}{ll} \toprule & 0 \\ \midrule 0 & \& \\ 1 & \% \\ 2 & \$ \\ 3 & \# \\ 4 & \_ \\ 5 & \{ \\ 6 & \} \\ 7 & \textasciitilde \\ 8 & \textasciicircum \\ 9 & \textbackslash \\ \bottomrule \end{tabular} """ ) assert result == expected def test_to_latex_escape_special_chars_in_index_names(self): # https://github.com/pandas-dev/pandas/issues/61309 # https://github.com/pandas-dev/pandas/issues/57362 index = "&%$#_{}}~^\\" df = DataFrame({index: [1, 2, 3]}).set_index(index) result = df.to_latex(escape=True) expected = _dedent( r""" \begin{tabular}{l} \toprule \&\%\$\#\_\{\}\}\textasciitilde \textasciicircum \textbackslash \\ \midrule 1 \\ 2 \\ 3 \\ \bottomrule \end{tabular} """ ) assert result == expected def test_to_latex_escape_special_chars_in_column_name(self): df = DataFrame({"A": [1, 2, 3], "B": ["a", "b", "c"]}) df.columns.name = "_^~" result = df.to_latex(escape=True) expected = _dedent( r""" \begin{tabular}{lrl} \toprule \_\textasciicircum \textasciitilde & A & B \\ \midrule 0 & 1 & a \\ 1 & 2 & b \\ 2 & 3 & c \\ \bottomrule \end{tabular} """ ) assert result == expected def test_to_latex_specified_header_special_chars_without_escape(self): # GH 7124 df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) result = df.to_latex(header=["$A$", "$B$"], escape=False) expected = _dedent( r""" \begin{tabular}{lrl} \toprule & $A$ & $B$ \\ \midrule 0 & 1 & b1 \\ 1 & 2 & b2 \\ \bottomrule \end{tabular} """ ) assert result == expected
TestToLatexEscape
python
doocs__leetcode
solution/3700-3799/3748.Count Stable Subarrays/Solution.py
{ "start": 0, "end": 805 }
class ____: def countStableSubarrays( self, nums: List[int], queries: List[List[int]] ) -> List[int]: s = [0] l, n = 0, len(nums) seg = [] for r, x in enumerate(nums): if r == n - 1 or x > nums[r + 1]: seg.append(l) k = r - l + 1 s.append(s[-1] + (1 + k) * k // 2) l = r + 1 ans = [] for l, r in queries: i = bisect_right(seg, l) j = bisect_right(seg, r) - 1 if i > j: k = r - l + 1 ans.append((1 + k) * k // 2) else: a = seg[i] - l b = r - seg[j] + 1 ans.append((1 + a) * a // 2 + s[j] - s[i] + (1 + b) * b // 2) return ans
Solution
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/selectable.py
{ "start": 151103, "end": 158795 }
class ____( HasCompileState, GenerativeSelect, TypedReturnsRows[Unpack[_Ts]] ): """Forms the basis of ``UNION``, ``UNION ALL``, and other SELECT-based set operations. .. seealso:: :func:`_expression.union` :func:`_expression.union_all` :func:`_expression.intersect` :func:`_expression.intersect_all` :func:`_expression.except` :func:`_expression.except_all` """ __visit_name__ = "compound_select" _traverse_internals: _TraverseInternalsType = ( [ ("selects", InternalTraversal.dp_clauseelement_list), ("_limit_clause", InternalTraversal.dp_clauseelement), ("_offset_clause", InternalTraversal.dp_clauseelement), ("_fetch_clause", InternalTraversal.dp_clauseelement), ("_fetch_clause_options", InternalTraversal.dp_plain_dict), ("_order_by_clauses", InternalTraversal.dp_clauseelement_list), ("_group_by_clauses", InternalTraversal.dp_clauseelement_list), ("_for_update_arg", InternalTraversal.dp_clauseelement), ("keyword", InternalTraversal.dp_string), ] + SupportsCloneAnnotations._clone_annotations_traverse_internals + HasCTE._has_ctes_traverse_internals + DialectKWArgs._dialect_kwargs_traverse_internals + ExecutableStatement._executable_traverse_internals ) selects: List[SelectBase] _is_from_container = True _auto_correlate = False def __init__( self, keyword: _CompoundSelectKeyword, *selects: _SelectStatementForCompoundArgument[Unpack[_Ts]], ): self.keyword = keyword self.selects = [ coercions.expect( roles.CompoundElementRole, s, apply_propagate_attrs=self ).self_group(against=self) for s in selects ] GenerativeSelect.__init__(self) @classmethod def _create_union( cls, *selects: _SelectStatementForCompoundArgument[Unpack[_Ts]] ) -> CompoundSelect[Unpack[_Ts]]: return CompoundSelect(_CompoundSelectKeyword.UNION, *selects) @classmethod def _create_union_all( cls, *selects: _SelectStatementForCompoundArgument[Unpack[_Ts]] ) -> CompoundSelect[Unpack[_Ts]]: return CompoundSelect(_CompoundSelectKeyword.UNION_ALL, *selects) @classmethod def _create_except( cls, *selects: _SelectStatementForCompoundArgument[Unpack[_Ts]] ) -> CompoundSelect[Unpack[_Ts]]: return CompoundSelect(_CompoundSelectKeyword.EXCEPT, *selects) @classmethod def _create_except_all( cls, *selects: _SelectStatementForCompoundArgument[Unpack[_Ts]] ) -> CompoundSelect[Unpack[_Ts]]: return CompoundSelect(_CompoundSelectKeyword.EXCEPT_ALL, *selects) @classmethod def _create_intersect( cls, *selects: _SelectStatementForCompoundArgument[Unpack[_Ts]] ) -> CompoundSelect[Unpack[_Ts]]: return CompoundSelect(_CompoundSelectKeyword.INTERSECT, *selects) @classmethod def _create_intersect_all( cls, *selects: _SelectStatementForCompoundArgument[Unpack[_Ts]] ) -> CompoundSelect[Unpack[_Ts]]: return CompoundSelect(_CompoundSelectKeyword.INTERSECT_ALL, *selects) def _scalar_type(self) -> TypeEngine[Any]: return self.selects[0]._scalar_type() def self_group( self, against: Optional[OperatorType] = None ) -> GroupedElement: return SelectStatementGrouping(self) def is_derived_from(self, fromclause: Optional[FromClause]) -> bool: for s in self.selects: if s.is_derived_from(fromclause): return True return False def set_label_style(self, style: SelectLabelStyle) -> Self: if self._label_style is not style: self = self._generate() select_0 = self.selects[0].set_label_style(style) self.selects = [select_0] + self.selects[1:] return self def _ensure_disambiguated_names(self) -> Self: new_select = self.selects[0]._ensure_disambiguated_names() if new_select is not self.selects[0]: self = self._generate() self.selects = [new_select] + self.selects[1:] return self def _generate_fromclause_column_proxies( self, subquery: FromClause, columns: ColumnCollection[str, KeyedColumnElement[Any]], primary_key: ColumnSet, foreign_keys: Set[KeyedColumnElement[Any]], *, proxy_compound_columns: Optional[ Iterable[Sequence[ColumnElement[Any]]] ] = None, ) -> None: # this is a slightly hacky thing - the union exports a # column that resembles just that of the *first* selectable. # to get at a "composite" column, particularly foreign keys, # you have to dig through the proxies collection which we # generate below. select_0 = self.selects[0] if self._label_style is not LABEL_STYLE_DEFAULT: select_0 = select_0.set_label_style(self._label_style) # hand-construct the "_proxies" collection to include all # derived columns place a 'weight' annotation corresponding # to how low in the list of select()s the column occurs, so # that the corresponding_column() operation can resolve # conflicts extra_col_iterator = zip( *[ [ c._annotate(dd) for c in stmt._all_selected_columns if is_column_element(c) ] for dd, stmt in [ ({"weight": i + 1}, stmt) for i, stmt in enumerate(self.selects) ] ] ) # the incoming proxy_compound_columns can be present also if this is # a compound embedded in a compound. it's probably more appropriate # that we generate new weights local to this nested compound, though # i haven't tried to think what it means for compound nested in # compound select_0._generate_fromclause_column_proxies( subquery, columns, proxy_compound_columns=extra_col_iterator, primary_key=primary_key, foreign_keys=foreign_keys, ) def _refresh_for_new_column(self, column: ColumnElement[Any]) -> None: super()._refresh_for_new_column(column) for select in self.selects: select._refresh_for_new_column(column) @util.ro_non_memoized_property def _all_selected_columns(self) -> _SelectIterable: return self.selects[0]._all_selected_columns @util.ro_non_memoized_property def selected_columns( self, ) -> ColumnCollection[str, ColumnElement[Any]]: """A :class:`_expression.ColumnCollection` representing the columns that this SELECT statement or similar construct returns in its result set, not including :class:`_sql.TextClause` constructs. For a :class:`_expression.CompoundSelect`, the :attr:`_expression.CompoundSelect.selected_columns` attribute returns the selected columns of the first SELECT statement contained within the series of statements within the set operation. .. seealso:: :attr:`_sql.Select.selected_columns` .. versionadded:: 1.4 """ return self.selects[0].selected_columns # backwards compat for elem in _CompoundSelectKeyword: setattr(CompoundSelect, elem.name, elem) @CompileState.plugin_for("default", "select")
CompoundSelect
python
spack__spack
lib/spack/spack/solver/requirements.py
{ "start": 796, "end": 1020 }
class ____(enum.Enum): """Origin of a requirement""" REQUIRE_YAML = enum.auto() PREFER_YAML = enum.auto() CONFLICT_YAML = enum.auto() DIRECTIVE = enum.auto() INPUT_SPECS = enum.auto()
RequirementOrigin
python
networkx__networkx
networkx/readwrite/tests/test_p2g.py
{ "start": 129, "end": 1319 }
class ____: @classmethod def setup_class(cls): cls.G = nx.Graph(name="test") e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] cls.G.add_edges_from(e) cls.G.add_node("g") cls.DG = nx.DiGraph(cls.G) def test_read_p2g(self): s = b"""\ name 3 4 a 1 2 b c 0 2 """ bytesIO = io.BytesIO(s) DG = read_p2g(bytesIO) assert DG.name == "name" assert sorted(DG) == ["a", "b", "c"] assert edges_equal( DG.edges(), [("a", "c"), ("a", "b"), ("c", "a"), ("c", "c")], directed=True ) def test_write_p2g(self): s = b"""foo 3 2 1 1 2 2 3 """ fh = io.BytesIO() G = nx.DiGraph() G.name = "foo" G.add_edges_from([(1, 2), (2, 3)]) write_p2g(G, fh) fh.seek(0) r = fh.read() assert r == s def test_write_read_p2g(self): fh = io.BytesIO() G = nx.DiGraph() G.name = "foo" G.add_edges_from([("a", "b"), ("b", "c")]) write_p2g(G, fh) fh.seek(0) H = read_p2g(fh) assert edges_equal(G.edges(), H.edges(), directed=True)
TestP2G
python
huggingface__transformers
tests/models/blip_2/test_modeling_blip_2.py
{ "start": 49172, "end": 52347 }
class ____(ModelTesterMixin, unittest.TestCase): all_model_classes = (Blip2VisionModelWithProjection,) if is_torch_available() else () test_resize_embeddings = False def setUp(self): self.model_tester = Blip2VisionModelWithProjectionTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Training is not yet supported") def test_training(self): pass @unittest.skip(reason="Training is not yet supported") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="Training is not yet supported") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="Training is not yet supported") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Blip2VisionModelWithProjection does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Blip2VisionModelWithProjection does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @slow @require_torch_accelerator def test_model_from_pretrained(self): model_name = "Salesforce/blip2-itm-vit-g" model = Blip2VisionModelWithProjection.from_pretrained(model_name) self.assertIsNotNone(model) self.assertTrue(hasattr(model, "vision_projection")) _, pixel_values = self.model_tester.prepare_config_and_inputs() model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(pixel_values=pixel_values) self.assertEqual( outputs.image_embeds.shape, ( self.model_tester.vision_model_tester.batch_size, model.config.num_query_tokens, model.config.image_text_hidden_size, ), )
Blip2VisionModelWithProjectionTest
python
GoogleCloudPlatform__python-docs-samples
appengine/standard/migration/ndb/overview/main.py
{ "start": 808, "end": 2510 }
class ____(ndb.Model): """Models an individual Guestbook entry with content and date.""" content = ndb.StringProperty() date = ndb.DateTimeProperty(auto_now_add=True) with client.context(): @classmethod def query_book(cls, ancestor_key): return cls.query(ancestor=ancestor_key).order(-cls.date) @app.route("/", methods=["GET"]) def display_guestbook(): guestbook_name = request.args.get("guestbook_name", "") print("GET guestbook name is {}".format(guestbook_name)) with client.context(): ancestor_key = ndb.Key("Book", guestbook_name or "*notitle*") greetings = Greeting.query_book(ancestor_key).fetch(20) greeting_blockquotes = [greeting.content for greeting in greetings] return render_template( "index.html", greeting_blockquotes=greeting_blockquotes, guestbook_name=guestbook_name, ) @app.route("/sign", methods=["POST"]) def update_guestbook(): # We set the parent key on each 'Greeting' to ensure each guestbook's # greetings are in the same entity group. guestbook_name = request.form.get("guestbook_name", "") print("Guestbook name from the form: {}".format(guestbook_name)) with client.context(): print("Guestbook name from the URL: {}".format(guestbook_name)) greeting = Greeting( parent=ndb.Key("Book", guestbook_name or "*notitle*"), content=request.form.get("content", None), ) greeting.put() return redirect("/?" + urlencode({"guestbook_name": guestbook_name})) if __name__ == "__main__": # This is used when running locally. app.run(host="127.0.0.1", port=8080, debug=True)
Greeting
python
milvus-io__pymilvus
tests/test_grpc_handler.py
{ "start": 422, "end": 5327 }
class ____: @pytest.mark.parametrize("has", [True, False]) def test_has_collection_no_error(self, channel, client_thread, has): handler = GrpcHandler(channel=channel) has_collection_future = client_thread.submit(handler.has_collection, "fake") (invocation_metadata, request, rpc) = channel.take_unary_unary( descriptor.methods_by_name["DescribeCollection"] ) rpc.send_initial_metadata(()) reason = "" if has else "can't find collection" code = 0 if has else 100 expected_result = milvus_pb2.DescribeCollectionResponse( status=common_pb2.Status(code=code, reason=reason), ) rpc.terminate(expected_result, (), grpc.StatusCode.OK, "") got_result = has_collection_future.result() assert got_result is has def test_has_collection_error(self, channel, client_thread): handler = GrpcHandler(channel=channel) has_collection_future = client_thread.submit(handler.has_collection, "fake") (invocation_metadata, request, rpc) = channel.take_unary_unary( descriptor.methods_by_name["DescribeCollection"] ) rpc.send_initial_metadata(()) expected_result = milvus_pb2.DescribeCollectionResponse( status=common_pb2.Status(code=1, reason="other reason"), ) rpc.terminate(expected_result, (), grpc.StatusCode.OK, "") with pytest.raises(MilvusException): has_collection_future.result() def test_has_collection_Unavailable_exception(self, channel, client_thread): handler = GrpcHandler(channel=channel) channel.close() # Retry is unable to test has_collection_future = client_thread.submit(handler.has_collection, "fake", timeout=0) (invocation_metadata, request, rpc) = channel.take_unary_unary( descriptor.methods_by_name["DescribeCollection"] ) rpc.send_initial_metadata(()) expected_result = milvus_pb2.DescribeCollectionResponse() rpc.terminate(expected_result, (), grpc.StatusCode.UNAVAILABLE, "server Unavailable") with pytest.raises(MilvusException): has_collection_future.result() def test_get_server_version_error(self, channel, client_thread): handler = GrpcHandler(channel=channel) get_version_future = client_thread.submit(handler.get_server_version) (invocation_metadata, request, rpc) = channel.take_unary_unary( descriptor.methods_by_name["GetVersion"] ) rpc.send_initial_metadata(()) expected_result = milvus_pb2.GetVersionResponse( status=common_pb2.Status(code=1, reason="unexpected error"), ) rpc.terminate(expected_result, (), grpc.StatusCode.OK, "") with pytest.raises(MilvusException): get_version_future.result() def test_get_server_version(self, channel, client_thread): version = "2.2.0" handler = GrpcHandler(channel=channel) get_version_future = client_thread.submit(handler.get_server_version) (invocation_metadata, request, rpc) = channel.take_unary_unary( descriptor.methods_by_name["GetVersion"] ) rpc.send_initial_metadata(()) expected_result = milvus_pb2.GetVersionResponse( status=common_pb2.Status(code=0), version=version, ) rpc.terminate(expected_result, (), grpc.StatusCode.OK, "") got_result = get_version_future.result() assert got_result == version @pytest.mark.parametrize("_async", [True]) def test_flush_all(self, channel, client_thread, _async): handler = GrpcHandler(channel=channel) flush_all_future = client_thread.submit(handler.flush_all, _async=_async, timeout=10) (invocation_metadata, request, rpc) = channel.take_unary_unary( descriptor.methods_by_name["FlushAll"] ) rpc.send_initial_metadata(()) expected_result = milvus_pb2.FlushAllResponse( status=common_pb2.Status(code=0), flush_all_ts=100, ) rpc.terminate(expected_result, (), grpc.StatusCode.OK, "") assert flush_all_future is not None def test_get_flush_all_state(self, channel, client_thread): handler = GrpcHandler(channel=channel) flushed = client_thread.submit(handler.get_flush_all_state, flush_all_ts=100) (invocation_metadata, request, rpc) = channel.take_unary_unary( descriptor.methods_by_name["GetFlushAllState"] ) rpc.send_initial_metadata(()) expected_result = milvus_pb2.GetFlushStateResponse( status=common_pb2.Status(code=0), flushed=True, ) rpc.terminate(expected_result, (), grpc.StatusCode.OK, "") assert flushed.result() is True
TestGrpcHandler
python
astropy__astropy
astropy/coordinates/polarization.py
{ "start": 2055, "end": 4550 }
class ____(MixinInfo): # The attributes containing actual information. _represent_as_dict_attrs = {"value"} # Since there is only one attribute, use a column with the name to represent it # (rather than as name.value) _represent_as_dict_primary_data = "value" # Attributes that should be presented as positional arguments to # the class initializer (which takes "stokes" as an argument, not "value"). _construct_from_dict_args = ("value",) @property def unit(self): return None @property def dtype(self): return self._parent._data.dtype @staticmethod def default_format(val): return f"{val.symbol}" def new_like(self, cols, length, metadata_conflicts="warn", name=None): """ Return a new StokesCoord instance which is consistent with the input ``cols`` and has ``length`` rows. This is intended for creating an empty column object whose elements can be set in-place for table operations like join or vstack. Parameters ---------- cols : list List of input columns length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : `~astropy.coordinates.StokesCoord` (or subclass) Empty instance of this class consistent with ``cols`` """ # Get merged info attributes like shape, dtype, format, description, etc. attrs = self.merge_cols_attributes( cols, metadata_conflicts, name, ("meta", "format", "description") ) # Make an empty StokesCoord. shape = (length,) + attrs.pop("shape") data = np.zeros(shape=shape, dtype=attrs.pop("dtype")) # Get arguments needed to reconstruct class out = self._construct_from_dict({"value": data}) # Set remaining info attributes for attr, value in attrs.items(): setattr(out.info, attr, value) return out def get_sortable_arrays(self): """ Return a list of arrays which can be lexically sorted to represent the order of the parent column. For StokesCoord this is just the underlying values. Returns ------- arrays : list of ndarray """ return [self._parent._data]
StokesCoordInfo
python
huggingface__transformers
src/transformers/models/sam2_video/modeling_sam2_video.py
{ "start": 15574, "end": 18274 }
class ____(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__( self, num_pos_feats: int = 64, temperature: int = 10000, normalize: bool = False, scale: Optional[float] = None ): super().__init__() if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") self.num_pos_feats = num_pos_feats self.temperature = temperature self.normalize = normalize self.scale = 2 * math.pi if scale is None else scale @compile_compatible_method_lru_cache(maxsize=1) def forward( self, shape: torch.Size, device: Union[torch.device, str], dtype: torch.dtype, mask: Optional[Tensor] = None, ) -> Tensor: if mask is None: mask = torch.zeros((shape[0], shape[2], shape[3]), device=device, dtype=torch.bool) not_mask = (~mask).to(dtype) y_embed = not_mask.cumsum(1) x_embed = not_mask.cumsum(2) if self.normalize: eps = 1e-6 y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=device).to(dtype) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights
Sam2VideoPositionEmbeddingSine
python
jazzband__django-simple-history
simple_history/tests/tests/test_models.py
{ "start": 66636, "end": 67974 }
class ____(TestCase): def setUp(self): pre_create_historical_record.connect( add_static_history_ip_address, sender=HistoricalPollWithHistoricalIPAddress, dispatch_uid="add_static_history_ip_address", ) def tearDown(self): pre_create_historical_record.disconnect( add_static_history_ip_address, sender=HistoricalPollWithHistoricalIPAddress, dispatch_uid="add_static_history_ip_address", ) def test_extra_ip_address_field_populated_on_save(self): poll = PollWithHistoricalIPAddress.objects.create( question="Will it blend?", pub_date=today ) poll_history = poll.history.first() self.assertEqual("192.168.0.1", poll_history.ip_address) def test_extra_ip_address_field_not_present_on_poll(self): poll = PollWithHistoricalIPAddress.objects.create( question="Will it blend?", pub_date=today ) with self.assertRaises(AttributeError): poll.ip_address def add_dynamic_history_ip_address(sender, **kwargs): history_instance = kwargs["history_instance"] history_instance.ip_address = HistoricalRecords.context.request.META["REMOTE_ADDR"] @override_settings(**middleware_override_settings)
ExtraFieldsStaticIPAddressTestCase
python
ray-project__ray
python/ray/tune/experiment/trial.py
{ "start": 6992, "end": 38610 }
class ____: """A trial object holds the state for one model training run. Trials are themselves managed by the TrialRunner class, which implements the event loop for submitting trial runs to a Ray cluster. Trials start in the PENDING state, and transition to RUNNING once started. On error, it transitions to ERROR, otherwise TERMINATED on success. There are resources allocated to each trial. These should be specified using ``PlacementGroupFactory``. Attributes: trainable_name: Name of the trainable object to be executed. config: Provided configuration dictionary with evaluated params. trial_id: Unique identifier for the trial. path: Path where results for this trial are stored. Can be on the local node or on cloud storage. local_path: Path on the local disk where results are stored. remote_path: Path on cloud storage where results are stored, or None if not set. relative_logdir: Directory of the trial relative to its experiment directory. evaluated_params: Evaluated parameters by search algorithm, experiment_tag: Identifying trial name to show in the console status: One of PENDING, RUNNING, PAUSED, TERMINATED, ERROR/ error_file: Path to the errors that this trial has raised. """ _nonjson_fields = [ "results", "extra_arg", "placement_group_factory", "_resources", "_default_placement_group_factory", ] PENDING = "PENDING" RUNNING = "RUNNING" PAUSED = "PAUSED" TERMINATED = "TERMINATED" ERROR = "ERROR" def __init__( self, trainable_name: str, *, config: Optional[Dict] = None, trial_id: Optional[str] = None, storage: Optional[StorageContext] = None, evaluated_params: Optional[Dict] = None, experiment_tag: str = "", placement_group_factory: Optional[PlacementGroupFactory] = None, stopping_criterion: Optional[Dict[str, float]] = None, checkpoint_config: Optional[CheckpointConfig] = None, export_formats: Optional[List[str]] = None, restore_path: Optional[str] = None, trial_name_creator: Optional[Callable[["Trial"], str]] = None, trial_dirname_creator: Optional[Callable[["Trial"], str]] = None, log_to_file: Union[Optional[str], Tuple[Optional[str], Optional[str]]] = None, max_failures: int = 0, stub: bool = False, _setup_default_resource: bool = True, ): """Initialize a new trial. The args here take the same meaning as the command line flags defined in ray.tune.experiment.config_parser. Args: _setup_default_resource: Whether to set up default resources. When initializing trials from checkpoints, this field is set to false, so that setting up default resources can be delayed till after ``trial.config`` is loaded from checkpoints. """ # If this is set, trainables are not validated or looked up. # This can be used e.g. to initialize Trial objects from checkpoints # without loading the trainable first. self.stub = stub if not self.stub: validate_trainable(trainable_name) # Trial config self.trainable_name = trainable_name self.trial_id = Trial.generate_id() if trial_id is None else trial_id self.temporary_state = _TemporaryTrialState() self.run_metadata = _TrainingRunMetadata() # Create a copy, since `init_local_path` updates the context with the # generated trial dirname. self.storage = copy.copy(storage) self.config = config or {} # Save a copy of the original unresolved config so that we can swap # out and update any reference config values after restoration. self.__unresolved_config = self.config # Parameters that Tune varies across searches. self.evaluated_params = evaluated_params or {} self.experiment_tag = experiment_tag self.stopping_criterion = stopping_criterion or {} self._setup_default_resource = _setup_default_resource if placement_group_factory and not isinstance( placement_group_factory, PlacementGroupFactory ): placement_group_factory = resource_dict_to_pg_factory( placement_group_factory ) self._default_placement_group_factory = placement_group_factory # Will be created in create_placement_group_factory(). self.placement_group_factory = None self.log_to_file = log_to_file # Make sure `stdout_file, stderr_file = Trial.log_to_file` works if ( not self.log_to_file or not isinstance(self.log_to_file, Sequence) or not len(self.log_to_file) == 2 ): self.log_to_file = (None, None) self.max_failures = max_failures # Local trial state that is updated during the run self._default_result_or_future: Union[ray.ObjectRef, dict, None] = None self.export_formats = export_formats self.status = Trial.PENDING self.relative_logdir = None self.trial_name_creator = trial_name_creator self.trial_dirname_creator = trial_dirname_creator self.custom_trial_name = None self.custom_dirname = None # Checkpoint config checkpoint_config = checkpoint_config or CheckpointConfig() self.run_metadata.checkpoint_manager = _CheckpointManager( checkpoint_config=checkpoint_config ) # Restoration fields self.restore_path = restore_path self._restore_checkpoint_result: Optional[_TrainingResult] = None if restore_path: # tune.run(restore) passes in a path without metrics. self._restore_checkpoint_result = _TrainingResult( checkpoint=Checkpoint.from_directory(restore_path), metrics={} ) if trial_name_creator: self.custom_trial_name = trial_name_creator(self) if trial_dirname_creator: self.custom_dirname = trial_dirname_creator(self) if os.path.sep in self.custom_dirname: raise ValueError( f"Trial dirname must not contain '/'. Got {self.custom_dirname}" ) self._state_json = None def create_placement_group_factory(self): """Compute placement group factory if needed. Note: this must be called after all the placeholders in self.config are resolved. """ trainable_cls = self.get_trainable_cls() if not trainable_cls or not self._setup_default_resource: # Create placement group factory using default resources. self.placement_group_factory = ( self._default_placement_group_factory or resource_dict_to_pg_factory() ) return default_resources = trainable_cls.default_resource_request(self.config) # If Trainable returns resources, do not allow manual override via # `resources_per_trial` by the user. if default_resources and self._default_placement_group_factory: raise TuneError( "Resources for {} have been automatically set to {} " "by its `default_resource_request()` method. Please " "clear the `resources_per_trial` option.".format( trainable_cls, default_resources ) ) if default_resources and not isinstance( default_resources, PlacementGroupFactory ): default_resources = resource_dict_to_pg_factory(default_resources) self.placement_group_factory = ( # default_resource_request default_resources # resources_per_trial or self._default_placement_group_factory # cpu=1 or resource_dict_to_pg_factory() ) def _get_default_result_or_future(self) -> Optional[dict]: """Calls ray.get on self._default_result_or_future and assigns back. Returns None in case of exceptions. Will also set the trial location if runner is set. """ if self._default_result_or_future and isinstance( self._default_result_or_future, ray.ObjectRef ): try: self._default_result_or_future = ray.get(self._default_result_or_future) except RayActorError: # error during initialization self._default_result_or_future = None if self._default_result_or_future and self.temporary_state.ray_actor: self.set_location( _Location( self._default_result_or_future.get(NODE_IP), self._default_result_or_future.get(PID), ) ) return self._default_result_or_future def resolve_config_placeholders(self, placeholder_resolvers: Dict[Tuple, Any]): from ray.tune.impl.placeholder import resolve_placeholders # Make a copy of the unresolved config before resolve it. self.config = copy.deepcopy(self.__unresolved_config) resolve_placeholders(self.config, placeholder_resolvers) @property def last_result(self) -> dict: # The logic in here is as follows: # 1. If the trial has reported at least once, last_result would have # been set and therefore would not be empty. We can just return it. # 2. If the trial has not reported at least once but we have the # future for the default results dict, (obtained through # Trainable.get_auto_filled_metrics), we get that future # and return it. # 3. In the worst case where we have nothing, we just set the # trial_id and return that. result = self.run_metadata.last_result if not {k for k in result if k != TRIAL_ID}: self._get_default_result_or_future() result = self._default_result_or_future or result result.setdefault(TRIAL_ID, self.trial_id) return result @property def metric_analysis(self): return self.run_metadata.metric_analysis @property def metric_n_steps(self): return self.run_metadata.metric_n_steps def get_ray_actor_ip(self) -> Optional[str]: if self.temporary_state.location.hostname: return self.temporary_state.location.hostname if not self.temporary_state.ray_actor: return None hostname, pid = ray.get( self.temporary_state.ray_actor.get_current_ip_pid.remote() ) self.temporary_state.location = _Location(hostname, pid) return self.temporary_state.location.hostname @property @Deprecated("Replaced by `local_experiment_path`") def local_dir(self): return self.local_experiment_path @property def experiment_dir_name(self): return self.storage.experiment_dir_name @property def remote_experiment_path(self) -> str: return self.storage.experiment_fs_path @property def local_experiment_path(self) -> str: return self.storage.experiment_driver_staging_path @property @Deprecated("Replaced by `local_path`") def logdir(self) -> Optional[str]: # TODO(justinvyu): [Deprecated] Remove in 2.11. raise DeprecationWarning("Use `local_path` instead of `logdir`.") @property def local_path(self) -> Optional[str]: return self.storage.trial_driver_staging_path @property def path(self) -> Optional[str]: return self.storage.trial_fs_path @property def has_reported_at_least_once(self) -> bool: return bool(self.run_metadata.last_result) @property def node_ip(self): return self.temporary_state.location.hostname @property def checkpoint_at_end(self): config = self.run_metadata.checkpoint_manager.checkpoint_config return config.checkpoint_at_end @property def checkpoint_freq(self): config = self.run_metadata.checkpoint_manager.checkpoint_config return config.checkpoint_frequency @property def latest_checkpoint_result(self) -> Optional[_TrainingResult]: # NOTE: Fallback to the checkpoint passed in from `tune.run(restore)` # if the trial hasn't saved any checkpoints itself yet. return ( self.run_metadata.checkpoint_manager.latest_checkpoint_result or self._restore_checkpoint_result ) @property def checkpoint(self) -> Optional[Checkpoint]: """Returns the most recent checkpoint if one has been saved.""" return ( self.latest_checkpoint_result.checkpoint if self.latest_checkpoint_result else None ) @classmethod def generate_id(cls): return str(uuid.uuid4().hex)[:8] def reset(self) -> "Trial": # If there is `default_resource_request` associated with the trainable, # clear `resources` and `placement_group_factory`. # This is mainly relevant for RLlib tuning jobs, where we save users # of the trouble to specify the resources themselves by having some # default resources for popular RLlib algorithms. trainable_cls = self.get_trainable_cls() clear_resources = trainable_cls and trainable_cls.default_resource_request( self.config ) placement_group_factory = ( self.placement_group_factory if not clear_resources else None ) checkpoint_config = self.run_metadata.checkpoint_manager.checkpoint_config return Trial( self.trainable_name, config=self.config, trial_id=None, evaluated_params=self.evaluated_params, experiment_tag=self.experiment_tag, placement_group_factory=placement_group_factory, stopping_criterion=self.stopping_criterion, checkpoint_config=checkpoint_config, export_formats=self.export_formats, restore_path=self.restore_path, trial_name_creator=self.trial_name_creator, trial_dirname_creator=self.trial_dirname_creator, log_to_file=self.log_to_file, max_failures=self.max_failures, storage=self.storage, ) @Deprecated("Replaced by `init_local_path()`") def init_logdir(self): # TODO(justinvyu): [Deprecated] Remove in 2.11. raise DeprecationWarning("Use `init_local_path` instead of `init_logdir`.") def init_local_path(self): """Init logdir.""" if not self.relative_logdir: self.relative_logdir = _create_unique_logdir_name( str(self.local_experiment_path), self._generate_dirname() ) # Populate the storage context with the trial dir name we just generated. self.storage.trial_dir_name = self.relative_logdir assert self.local_path logdir_path = Path(self.local_path) max_path_length = _get_max_path_length() if len(str(logdir_path)) >= max_path_length: logger.warning( f"The path to the trial log directory is too long " f"(max length: {max_path_length}. " f"Consider using `trial_dirname_creator` to shorten the path. " f"Path: {logdir_path}" ) logdir_path.mkdir(parents=True, exist_ok=True) self.invalidate_json_state() def update_resources(self, resources: Union[dict, PlacementGroupFactory]): """EXPERIMENTAL: Updates the resource requirements. Should only be called when the trial is not running. Raises: ValueError: if trial status is running. """ if self.status is Trial.RUNNING: raise ValueError("Cannot update resources while Trial is running.") placement_group_factory = resources if isinstance(resources, dict): placement_group_factory = resource_dict_to_pg_factory(resources) self.placement_group_factory = placement_group_factory self.invalidate_json_state() def set_ray_actor(self, ray_actor): self.temporary_state.ray_actor = ray_actor if ray_actor: # Do not block here, the result will be gotten when last_result # property is accessed self._default_result_or_future = ray_actor.get_auto_filled_metrics.remote( debug_metrics_only=True ) def set_location(self, location): """Sets the location of the trial.""" self.temporary_state.location = location def set_status(self, status): """Sets the status of the trial.""" self.status = status if status == Trial.RUNNING: if self.run_metadata.start_time is None: self.run_metadata.start_time = time.time() self.invalidate_json_state() def set_config(self, config): self.config = config self.invalidate_json_state() def set_experiment_tag(self, experiment_tag): self.experiment_tag = experiment_tag self.invalidate_json_state() def set_storage(self, new_storage: StorageContext): """Updates the storage context of the trial. If the `storage_path` or `experiment_dir_name` has changed, then this setter also updates the paths of all checkpoints tracked by the checkpoint manager. This enables restoration from a checkpoint if the user moves the directory. """ original_storage = self.storage checkpoint_manager = self.run_metadata.checkpoint_manager for checkpoint_result in checkpoint_manager.best_checkpoint_results: checkpoint_result.checkpoint = Checkpoint( path=checkpoint_result.checkpoint.path.replace( original_storage.trial_fs_path, new_storage.trial_fs_path, 1 ), filesystem=new_storage.storage_filesystem, ) latest_checkpoint_result = checkpoint_manager.latest_checkpoint_result if latest_checkpoint_result: latest_checkpoint_result.checkpoint = Checkpoint( path=latest_checkpoint_result.checkpoint.path.replace( original_storage.trial_fs_path, new_storage.trial_fs_path, 1 ), filesystem=new_storage.storage_filesystem, ) self.storage = new_storage self.invalidate_json_state() @property def num_failures(self): return self.run_metadata.num_failures @property def num_failures_after_restore(self): return self.run_metadata.num_failures_after_restore @property def error_file(self): if not self.local_path or not self.run_metadata.error_filename: return None return Path(self.local_path, self.run_metadata.error_filename).as_posix() @property def pickled_error_file(self): if not self.local_path or not self.run_metadata.pickled_error_filename: return None return Path( self.local_path, self.run_metadata.pickled_error_filename ).as_posix() def get_pickled_error(self) -> Optional[Exception]: """Returns the pickled error object if it exists in storage. This is a pickled version of the latest error that the trial encountered. """ error_filename = self.run_metadata.pickled_error_filename if error_filename is None: return None fs = self.storage.storage_filesystem pickled_error_fs_path = Path( self.storage.trial_fs_path, error_filename ).as_posix() if _exists_at_fs_path(fs=fs, fs_path=pickled_error_fs_path): with fs.open_input_stream(pickled_error_fs_path) as f: return cloudpickle.loads(f.readall()) return None def get_error(self) -> Optional[TuneError]: """Returns the error text file trace as a TuneError object if it exists in storage. This is a text trace of the latest error that the trial encountered, which is used in the case that the error is not picklable. """ error_filename = self.run_metadata.error_filename if error_filename is None: return None fs = self.storage.storage_filesystem txt_error_fs_path = Path(self.storage.trial_fs_path, error_filename).as_posix() if _exists_at_fs_path(fs=fs, fs_path=txt_error_fs_path): with fs.open_input_stream(txt_error_fs_path) as f: return f.readall().decode() return None def _handle_restore_error(self, exc: Exception): # For Restoration errors, we only increment the restore failure count # if the number of failures exceeds the restore retry limit. if self.temporary_state.num_restore_failures >= int( os.environ.get("TUNE_RESTORE_RETRY_NUM", 0) ): self.run_metadata.num_failures += 1 else: self.temporary_state.num_restore_failures += 1 def _handle_ray_actor_error(self, exc: RayActorError): count_preemption_errors = bool( int(os.environ.get(RAY_TRAIN_COUNT_PREEMPTION_AS_FAILURE, "0")) ) if not exc.preempted or count_preemption_errors: # Only count non-preempted actor errors as failures. self.run_metadata.num_failures += 1 def _handle_ray_task_error(self, exc: RayTaskError): cause = exc.as_instanceof_cause() if isinstance(cause, RayActorError): # Handle the RayActorError directly (ex: Ray Train worker actor errors) return self._handle_ray_actor_error(cause) # Increment failures for all user errors (which get raised as RayTaskError) self.run_metadata.num_failures += 1 def handle_error( self, exc: Optional[Union[TuneError, RayTaskError, RayActorError]] = None ): if self.is_restoring: self._handle_restore_error(exc) elif isinstance(exc, RayActorError): self._handle_ray_actor_error(exc) elif isinstance(exc, RayTaskError): self._handle_ray_task_error(exc) else: self.run_metadata.num_failures += 1 if self.local_path: self.run_metadata.error_filename = EXPR_ERROR_FILE if isinstance(exc, (RayTaskError, RayActorError)): # Piping through the actual error to result grid. self.run_metadata.pickled_error_filename = EXPR_ERROR_PICKLE_FILE with open(self.pickled_error_file, "wb") as f: cloudpickle.dump(exc, f) with open(self.error_file, "a+") as f: f.write( "Failure # {} (occurred at {})\n".format( self.run_metadata.num_failures, date_str() ) ) f.write(str(exc) + "\n") self.run_metadata.invalidate_cache() def should_stop(self, result): """Whether the given result meets this trial's stopping criteria.""" if result.get(DONE): return True for criterion, stop_value in self.stopping_criterion.items(): if isinstance(criterion, dict): raise ValueError( "Stopping criteria is now flattened by default. " "Use forward slashes to nest values `key1/key2/key3`." ) elif criterion not in result: if log_once("tune_trial_stop_criterion_not_found"): logger.warning( f"Stopping criterion '{criterion}' not found in result dict! " f"Available keys are {list(result.keys())}. If '{criterion}' is" " never reported, the run will continue until training is " "finished." ) elif result[criterion] >= stop_value: return True return False def should_checkpoint(self): """Whether this trial is due for checkpointing.""" result = self.last_result or {} if result.get(DONE) and self.checkpoint_at_end: return True return ( self.checkpoint_freq and result.get(TRAINING_ITERATION, 0) % self.checkpoint_freq == 0 ) def has_checkpoint(self) -> bool: return self.checkpoint is not None def on_checkpoint(self, checkpoint_result: _TrainingResult): """Hook for handling checkpoints taken by the Trainable. Args: checkpoint: Checkpoint taken. """ self.run_metadata.checkpoint_manager.register_checkpoint(checkpoint_result) # Update the checkpoint index to keep the checkpoint index in sync. # This index will get restored when the trial is restored and will # be passed to the Trainable as the starting checkpoint index. self.storage._update_checkpoint_index(checkpoint_result.metrics) self.invalidate_json_state() self.run_metadata.invalidate_cache() def on_restore(self): """Handles restoration completion.""" assert self.is_restoring self.run_metadata.last_result = self.temporary_state.restoring_from.metrics self.run_metadata.last_result.setdefault("config", self.config) self.temporary_state.restoring_from = None self.temporary_state.num_restore_failures = 0 def should_recover(self): """Returns whether the trial qualifies for retrying. `num_failures` should represent the number of times the trial has failed *up to the moment this method is called.* If we've failed 5 times and `max_failures=5`, then we should recover, since we only pass the limit on the 6th failure. Note this may return true even when there is no checkpoint, either because `self.checkpoint_freq` is `0` or because the trial failed before a checkpoint has been made. """ return ( self.run_metadata.num_failures <= self.max_failures or self.max_failures < 0 ) def update_last_result(self, result): if self.experiment_tag: result.update(experiment_tag=self.experiment_tag) self.set_location(_Location(result.get(NODE_IP), result.get(PID))) self.run_metadata.last_result = result self.run_metadata.last_result_time = time.time() metric_result = self.last_result.copy() for remove_metric in DEBUG_METRICS: metric_result.pop(remove_metric, None) for metric, value in flatten_dict(metric_result).items(): if isinstance(value, Number): self.run_metadata.update_metric( metric, value, step=result.get("training_iteration") ) def get_trainable_cls(self): if self.stub: return None return get_trainable_cls(self.trainable_name) def is_finished(self): return self.status in [Trial.ERROR, Trial.TERMINATED] @property def is_restoring(self): return self.temporary_state.restoring_from is not None @property def is_saving(self): return self.temporary_state.saving_to is not None def __repr__(self): return self._trainable_name(include_trial_id=True) def __str__(self): return self._trainable_name(include_trial_id=True) def _trainable_name(self, include_trial_id=False): """Combines ``env`` with ``trainable_name`` and ``trial_id``. Can be overridden with a custom string creator. """ if self.custom_trial_name: return self.custom_trial_name if "env" in self.config: env = self.config["env"] if isinstance(env, type): env = env.__name__ identifier = "{}_{}".format(self.trainable_name, env) else: identifier = self.trainable_name if include_trial_id: identifier += "_" + self.trial_id return identifier.replace("/", "_") def _generate_dirname(self): if self.custom_dirname: generated_dirname = self.custom_dirname else: MAX_LEN_IDENTIFIER = int(os.environ.get("TUNE_MAX_LEN_IDENTIFIER", "130")) generated_dirname = f"{str(self)}_{self.experiment_tag}" generated_dirname = generated_dirname[:MAX_LEN_IDENTIFIER] generated_dirname += f"_{date_str()}" # This is the file path used by rsync. ['/', '(', ')'] are not allowed. return re.sub("[/()]", "_", generated_dirname) def invalidate_json_state(self): self._state_json = None def get_json_state(self) -> Tuple[str, str]: if self._state_json is None: state = self.__getstate__() state.pop("run_metadata", None) self._state_json = json.dumps(state, indent=2, cls=TuneFunctionEncoder) runtime_metadata_json = self.run_metadata.get_json_state() return self._state_json, runtime_metadata_json @classmethod def from_json_state(cls, json_state: str, stub: bool = False) -> "Trial": state = json.loads(json_state, cls=TuneFunctionDecoder) new_trial = Trial( state["trainable_name"], stub=stub, _setup_default_resource=False, ) new_trial.__setstate__(state) return new_trial def restore_run_metadata(self, run_metadata: str): self.run_metadata = _TrainingRunMetadata.from_json_state(run_metadata) @classmethod def from_directory( cls, path: Union[str, os.PathLike], stub: bool = False ) -> "Trial": metadata_path = Path(path, TRIAL_STATE_FILENAME) if not metadata_path.exists(): raise FileNotFoundError( f"Can't restore trial from path: File `{metadata_path}` not found." ) json_state = metadata_path.read_text() return cls.from_json_state(json_state, stub=stub) def __getstate__(self): """Memento generator for Trial. Sets RUNNING trials to PENDING. Note this can only occur if the trial holds a PERSISTENT checkpoint. """ state = self.__dict__.copy() for key in self._nonjson_fields: state[key] = binary_to_hex(cloudpickle.dumps(state.get(key))) state.pop("temporary_state", None) state["_state_json"] = None state["_default_result_or_future"] = None return state def __setstate__(self, state): if state["status"] == Trial.RUNNING: state["status"] = Trial.PENDING for key in self._nonjson_fields: if key in state: state[key] = cloudpickle.loads(hex_to_binary(state[key])) # Ensure that stub doesn't get overriden stub = state.pop("stub", True) self.__dict__.update(state) self.stub = stub or getattr(self, "stub", False) if not self.stub: validate_trainable(self.trainable_name) self.temporary_state = _TemporaryTrialState() assert self.placement_group_factory
Trial
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/constructors.py
{ "start": 1282, "end": 1471 }
class ____(ParentWithInit): def __new__(cls, input): _test_sink(input) return object.__new__(cls) def test_new_thing(): c = ChildWithNew(_test_source())
ChildWithNew
python
google__python-fire
examples/widget/collector.py
{ "start": 708, "end": 1127 }
class ____(object): """A Collector has one Widget, but wants more.""" def __init__(self): self.widget = widget.Widget() self.desired_widget_count = 10 def collect_widgets(self): """Returns all the widgets the Collector wants.""" return [widget.Widget() for _ in range(self.desired_widget_count)] def main(): fire.Fire(Collector(), name='collector') if __name__ == '__main__': main()
Collector
python
huggingface__transformers
src/transformers/models/altclip/modeling_altclip.py
{ "start": 40216, "end": 44481 }
class ____(AltCLIPPreTrainedModel): config: AltCLIPTextConfig # Copied from transformers.models.clap.modeling_clap.ClapTextModel.__init__ with ClapText->AltRoberta def __init__(self, config, add_pooling_layer=True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.embeddings = AltRobertaEmbeddings(config) self.encoder = AltRobertaEncoder(config) self.pooler = AltRobertaPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @auto_docstring # Copied from transformers.models.clap.modeling_clap.ClapTextModel.forward def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
AltRobertaModel
python
getsentry__sentry
src/sentry/auth/providers/github/views.py
{ "start": 4539, "end": 4857 }
class ____(forms.Form): org = forms.ChoiceField(label="Organization") def __init__(self, org_list: list[dict[str, Any]], *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) set_field_choices(self.fields["org"], [(o["id"], o["login"]) for o in org_list])
SelectOrganizationForm
python
lazyprogrammer__machine_learning_examples
supervised_class2/rf_vs_bag2.py
{ "start": 1102, "end": 2854 }
class ____: def __init__(self, n_estimators): self.B = n_estimators def fit(self, X, Y, M=None): N, D = X.shape if M is None: M = int(np.sqrt(D)) self.models = [] self.features = [] for b in range(self.B): tree = DecisionTreeClassifier() # sample features features = np.random.choice(D, size=M, replace=False) # sample training samples idx = np.random.choice(N, size=N, replace=True) Xb = X[idx] Yb = Y[idx] tree.fit(Xb[:, features], Yb) self.features.append(features) self.models.append(tree) def predict(self, X): N = len(X) P = np.zeros(N) for features, tree in zip(self.features, self.models): P += tree.predict(X[:, features]) return np.round(P / self.B) def score(self, X, Y): P = self.predict(X) return np.mean(P == Y) T = 500 test_error_prf = np.empty(T) test_error_rf = np.empty(T) test_error_bag = np.empty(T) for num_trees in range(T): if num_trees == 0: test_error_prf[num_trees] = None test_error_rf[num_trees] = None test_error_bag[num_trees] = None else: rf = RandomForestClassifier(n_estimators=num_trees) rf.fit(Xtrain, Ytrain) test_error_rf[num_trees] = rf.score(Xtest, Ytest) bg = BaggedTreeClassifier(n_estimators=num_trees) bg.fit(Xtrain, Ytrain) test_error_bag[num_trees] = bg.score(Xtest, Ytest) prf = NotAsRandomForest(n_estimators=num_trees) prf.fit(Xtrain, Ytrain) test_error_prf[num_trees] = prf.score(Xtest, Ytest) if num_trees % 10 == 0: print("num_trees:", num_trees) plt.plot(test_error_rf, label='rf') plt.plot(test_error_prf, label='pseudo rf') plt.plot(test_error_bag, label='bag') plt.legend() plt.show()
NotAsRandomForest
python
fastapi__sqlmodel
docs_src/tutorial/connect/select/tutorial001.py
{ "start": 254, "end": 2194 }
class ____(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) name: str = Field(index=True) secret_name: str age: Optional[int] = Field(default=None, index=True) team_id: Optional[int] = Field(default=None, foreign_key="team.id") sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" engine = create_engine(sqlite_url, echo=True) def create_db_and_tables(): SQLModel.metadata.create_all(engine) def create_heroes(): with Session(engine) as session: team_preventers = Team(name="Preventers", headquarters="Sharp Tower") team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar") session.add(team_preventers) session.add(team_z_force) session.commit() hero_deadpond = Hero( name="Deadpond", secret_name="Dive Wilson", team_id=team_z_force.id ) hero_rusty_man = Hero( name="Rusty-Man", secret_name="Tommy Sharp", age=48, team_id=team_preventers.id, ) hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador") session.add(hero_deadpond) session.add(hero_rusty_man) session.add(hero_spider_boy) session.commit() session.refresh(hero_deadpond) session.refresh(hero_rusty_man) session.refresh(hero_spider_boy) print("Created hero:", hero_deadpond) print("Created hero:", hero_rusty_man) print("Created hero:", hero_spider_boy) def select_heroes(): with Session(engine) as session: statement = select(Hero, Team).where(Hero.team_id == Team.id) results = session.exec(statement) for hero, team in results: print("Hero:", hero, "Team:", team) def main(): create_db_and_tables() create_heroes() select_heroes() if __name__ == "__main__": main()
Hero
python
getsentry__sentry
src/sentry/management/commands/generate_reset_password_link.py
{ "start": 231, "end": 1190 }
class ____(BaseCommand): help = "Generate a link for a user to reset their password" def add_arguments(self, parser): parser.add_argument( "--noinput", dest="noinput", action="store_true", default=False, help="Dont ask for confirmation before merging accounts.", ) def handle(self, username, **options): users = find_users(username, with_valid_password=False) if not users: sys.stdout.write("No account found with given username.\n") return for user in users: password_hash, created = LostPasswordHash.objects.get_or_create(user=user) if not password_hash.is_valid(): password_hash.date_added = timezone.now() password_hash.set_hash() password_hash.save() echo(f"{user.username} ({user.email}) - {password_hash.get_absolute_url()}")
Command
python
marshmallow-code__marshmallow
src/marshmallow/fields.py
{ "start": 67283, "end": 69322 }
class ____(Field): """A field that takes the value returned by a `Schema <marshmallow.Schema>` method. :param serialize: The name of the Schema method from which to retrieve the value. The method must take an argument ``obj`` (in addition to self) that is the object to be serialized. :param deserialize: Optional name of the Schema method for deserializing a value The method must take a single argument ``value``, which is the value to deserialize. .. versionchanged:: 3.0.0 Removed ``method_name`` parameter. """ _CHECK_ATTRIBUTE = False def __init__( self, serialize: str | None = None, deserialize: str | None = None, **kwargs: Unpack[_BaseFieldKwargs], # FIXME: Omit dump_only and load_only ): # Set dump_only and load_only based on arguments kwargs["dump_only"] = bool(serialize) and not bool(deserialize) kwargs["load_only"] = bool(deserialize) and not bool(serialize) super().__init__(**kwargs) self.serialize_method_name = serialize self.deserialize_method_name = deserialize self._serialize_method = None self._deserialize_method = None def _bind_to_schema(self, field_name, parent): if self.serialize_method_name: self._serialize_method = utils.callable_or_raise( getattr(parent, self.serialize_method_name) ) if self.deserialize_method_name: self._deserialize_method = utils.callable_or_raise( getattr(parent, self.deserialize_method_name) ) super()._bind_to_schema(field_name, parent) def _serialize(self, value, attr, obj, **kwargs): if self._serialize_method is not None: return self._serialize_method(obj) return missing_ def _deserialize(self, value, attr, data, **kwargs): if self._deserialize_method is not None: return self._deserialize_method(value) return value
Method
python
Pylons__pyramid
src/pyramid/csrf.py
{ "start": 1557, "end": 2862 }
class ____: """A CSRF storage policy that persists the CSRF token in the session. Note that using this CSRF implementation requires that a :term:`session factory` is configured. ``key`` The session key where the CSRF token will be stored. Default: `_csrft_`. .. versionadded:: 1.9 """ _token_factory = staticmethod(lambda: text_(uuid.uuid4().hex)) def __init__(self, key='_csrft_'): self.key = key def new_csrf_token(self, request): """Sets a new CSRF token into the session and returns it.""" token = self._token_factory() request.session[self.key] = token return token def get_csrf_token(self, request): """Returns the currently active CSRF token from the session, generating a new one if needed.""" token = request.session.get(self.key, None) if not token: token = self.new_csrf_token(request) return token def check_csrf_token(self, request, supplied_token): """Returns ``True`` if the ``supplied_token`` is valid.""" expected_token = self.get_csrf_token(request) return not strings_differ( bytes_(expected_token), bytes_(supplied_token) ) @implementer(ICSRFStoragePolicy)
SessionCSRFStoragePolicy
python
econchick__interrogate
tests/functional/sample/full.py
{ "start": 1706, "end": 1879 }
class ____: """Bar class""" def method_bar(self): """a method that does bar""" class InnerBar: """an inner class""" pass
Bar
python
pytorch__pytorch
benchmarks/instruction_counts/core/api.py
{ "start": 900, "end": 1055 }
class ____(enum.Enum): FORWARD = "Forward" FORWARD_BACKWARD = "Forward + Backward" EXPLICIT = "" @dataclasses.dataclass(frozen=True)
AutogradMode
python
pydantic__pydantic
pydantic-core/python/pydantic_core/core_schema.py
{ "start": 137686, "end": 139996 }
class ____(TypedDict, total=False): type: Required[Literal['url']] max_length: int allowed_schemes: list[str] host_required: bool # default False default_host: str default_port: int default_path: str strict: bool ref: str metadata: dict[str, Any] serialization: SerSchema def url_schema( *, max_length: int | None = None, allowed_schemes: list[str] | None = None, host_required: bool | None = None, default_host: str | None = None, default_port: int | None = None, default_path: str | None = None, preserve_empty_path: bool | None = None, strict: bool | None = None, ref: str | None = None, metadata: dict[str, Any] | None = None, serialization: SerSchema | None = None, ) -> UrlSchema: """ Returns a schema that matches a URL value, e.g.: ```py from pydantic_core import SchemaValidator, core_schema schema = core_schema.url_schema() v = SchemaValidator(schema) print(v.validate_python('https://example.com')) #> https://example.com/ ``` Args: max_length: The maximum length of the URL allowed_schemes: The allowed URL schemes host_required: Whether the URL must have a host default_host: The default host to use if the URL does not have a host default_port: The default port to use if the URL does not have a port default_path: The default path to use if the URL does not have a path preserve_empty_path: Whether to preserve an empty path or convert it to '/', default False strict: Whether to use strict URL parsing ref: optional unique identifier of the schema, used to reference the schema in other places metadata: Any other information you want to include with the schema, not used by pydantic-core serialization: Custom serialization schema """ return _dict_not_none( type='url', max_length=max_length, allowed_schemes=allowed_schemes, host_required=host_required, default_host=default_host, default_port=default_port, default_path=default_path, preserve_empty_path=preserve_empty_path, strict=strict, ref=ref, metadata=metadata, serialization=serialization, )
UrlSchema
python
Unity-Technologies__ml-agents
utils/validate_inits.py
{ "start": 106, "end": 1701 }
class ____(PEP420PackageFinder): """ The PEP420PackageFinder (used by find_namespace_packages) thinks everything looks like a package, even if there are no python files in it. This is a little stricter and only considers directories with python files in it. """ @staticmethod def _looks_like_package(path, package_name=None): glob_path = os.path.join(path, "*.py") return any(glob.iglob(glob_path)) def validate_packages(root_dir): """ Makes sure that all python files are discoverable by find_packages(), which is what we use in setup.py. We could potentially use find_namespace_packages instead, but depending on PEP420 has been flaky in the past (particularly with regards to mypy). """ exclude = ["*.tests", "*.tests.*", "tests.*", "tests"] found_packages = find_packages(root_dir, exclude=exclude) found_ns_packages = NonTrivialPEP420PackageFinder.find(root_dir, exclude=exclude) assert found_packages, f"Couldn't find anything in directory {root_dir}" if set(found_packages) != set(found_ns_packages): raise RuntimeError( "The following packages are not discoverable using found_packages():\n" f"{set(found_ns_packages) - set(found_packages)}\n" "Make sure you have an __init__.py file in the directories." ) else: print(f"__init__.py files for {root_dir} are OK.") def main(): for root_dir in ["ml-agents", "ml-agents-envs"]: validate_packages(root_dir) if __name__ == "__main__": main()
NonTrivialPEP420PackageFinder
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/dml.py
{ "start": 64211, "end": 64520 }
class ____(Update, TypedReturnsRows[Unpack[_Ts]]): """Typing-only class that establishes a generic type form of :class:`.Update` which tracks returned column types. This datatype is delivered when calling the :meth:`.Update.returning` method. .. versionadded:: 2.0 """
ReturningUpdate
python
tensorflow__tensorflow
tensorflow/python/data/util/nest_test.py
{ "start": 1711, "end": 23793 }
class ____(test_base.DatasetTestBase, parameterized.TestCase): @combinations.generate(test_base.default_test_combinations()) def testFlattenAndPack(self): structure = ((3, 4), 5, (6, 7, (9, 10), 8)) flat = ["a", "b", "c", "d", "e", "f", "g", "h"] self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8]) self.assertEqual( nest.pack_sequence_as(structure, flat), (("a", "b"), "c", ("d", "e", ("f", "g"), "h"))) point = collections.namedtuple("Point", ["x", "y"]) structure = (point(x=4, y=2), ((point(x=1, y=0),),)) flat = [4, 2, 1, 0] self.assertEqual(nest.flatten(structure), flat) restructured_from_flat = nest.pack_sequence_as(structure, flat) self.assertEqual(restructured_from_flat, structure) self.assertEqual(restructured_from_flat[0].x, 4) self.assertEqual(restructured_from_flat[0].y, 2) self.assertEqual(restructured_from_flat[1][0][0].x, 1) self.assertEqual(restructured_from_flat[1][0][0].y, 0) self.assertEqual([5], nest.flatten(5)) self.assertEqual([np.array([5])], nest.flatten(np.array([5]))) self.assertEqual("a", nest.pack_sequence_as(5, ["a"])) self.assertEqual( np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])])) with self.assertRaisesRegex(ValueError, "Argument `structure` is a scalar"): nest.pack_sequence_as("scalar", [4, 5]) with self.assertRaisesRegex(TypeError, "flat_sequence"): nest.pack_sequence_as([4, 5], "bad_sequence") with self.assertRaises(ValueError): nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"]) @combinations.generate(test_base.default_test_combinations()) def testDataclassIsNested(self): mt = MaskedTensor(mask=True, value=constant_op.constant([1])) self.assertTrue(nest.is_nested(mt)) @combinations.generate(test_base.default_test_combinations()) def testFlattenDataclass(self): mt = MaskedTensor(mask=True, value=constant_op.constant([1])) leaves = nest.flatten(mt) self.assertLen(leaves, 1) self.assertAllEqual(leaves[0], [1]) @combinations.generate(test_base.default_test_combinations()) def testPackDataclass(self): mt = MaskedTensor(mask=True, value=constant_op.constant([1])) leaves = nest.flatten(mt) reconstructed_mt = nest.pack_sequence_as(mt, leaves) self.assertIsInstance(reconstructed_mt, MaskedTensor) self.assertEqual(reconstructed_mt.mask, mt.mask) self.assertAllEqual(reconstructed_mt.value, mt.value) mt2 = MaskedTensor(mask=False, value=constant_op.constant([2])) reconstructed_mt = nest.pack_sequence_as(mt2, leaves) self.assertIsInstance(reconstructed_mt, MaskedTensor) self.assertFalse(reconstructed_mt.mask) self.assertAllEqual(reconstructed_mt.value, [1]) @combinations.generate(test_base.default_test_combinations()) def testDataclassMapStructure(self): mt = MaskedTensor(mask=True, value=constant_op.constant([1])) mt_doubled = nest.map_structure(lambda x: x * 2, mt) self.assertIsInstance(mt_doubled, MaskedTensor) self.assertEqual(mt_doubled.mask, True) self.assertAllEqual(mt_doubled.value, [2]) @combinations.generate(test_base.default_test_combinations()) def testDataclassAssertSameStructure(self): mt1 = MaskedTensor(mask=True, value=constant_op.constant([1])) mt2 = MaskedTensor(mask=False, value=constant_op.constant([2])) nest.assert_same_structure(mt1, mt2) mt3 = (1, 2) with self.assertRaisesRegex( # pylint: disable=g-error-prone-assert-raises TypeError, "don't have the same nested structure", ): nest.assert_same_structure(mt1, mt3) class SubMaskedTensor(MaskedTensor): pass mt_subclass = SubMaskedTensor(mask=True, value=constant_op.constant([1])) nest.assert_same_structure(mt1, mt_subclass, check_types=False) with self.assertRaisesRegex( # pylint: disable=g-error-prone-assert-raises TypeError, "don't have the same sequence type", ): nest.assert_same_structure(mt1, mt_subclass) @combinations.generate(test_base.default_test_combinations()) def testDataclassAssertShallowStructure(self): mt = MaskedTensor(mask=True, value=constant_op.constant([1])) structure1 = ("a", "b") structure2 = (mt, "c") nest.assert_shallow_structure(structure1, structure2) structure3 = (mt, "d", "e") with self.assertRaisesRegex( # pylint: disable=g-error-prone-assert-raises ValueError, "don't have the same sequence length", ): nest.assert_shallow_structure(structure1, structure3) structure4 = {"a": mt, "b": "c"} nest.assert_shallow_structure(structure1, structure4, check_types=False) with self.assertRaisesRegex( # pylint: disable=g-error-prone-assert-raises TypeError, "don't have the same sequence type", ): nest.assert_shallow_structure(structure1, structure4) @combinations.generate(test_base.default_test_combinations()) def testFlattenDictOrder(self): """`flatten` orders dicts by key, including OrderedDicts.""" ordered = collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)]) plain = {"d": 3, "b": 1, "a": 0, "c": 2} ordered_flat = nest.flatten(ordered) plain_flat = nest.flatten(plain) self.assertEqual([0, 1, 2, 3], ordered_flat) self.assertEqual([0, 1, 2, 3], plain_flat) @combinations.generate(test_base.default_test_combinations()) def testPackDictOrder(self): """Packing orders dicts by key, including OrderedDicts.""" ordered = collections.OrderedDict([("d", 0), ("b", 0), ("a", 0), ("c", 0)]) plain = {"d": 0, "b": 0, "a": 0, "c": 0} seq = [0, 1, 2, 3] ordered_reconstruction = nest.pack_sequence_as(ordered, seq) plain_reconstruction = nest.pack_sequence_as(plain, seq) self.assertEqual( collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)]), ordered_reconstruction) self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction) @combinations.generate(test_base.default_test_combinations()) def testFlattenAndPackWithDicts(self): # A nice messy mix of tuples, lists, dicts, and `OrderedDict`s. named_tuple = collections.namedtuple("A", ("b", "c")) mess = ( "z", named_tuple(3, 4), { "c": ( 1, collections.OrderedDict([ ("b", 3), ("a", 2), ]), ), "b": 5 }, 17 ) flattened = nest.flatten(mess) self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 17]) structure_of_mess = ( 14, named_tuple("a", True), { "c": ( 0, collections.OrderedDict([ ("b", 9), ("a", 8), ]), ), "b": 3 }, "hi everybody", ) unflattened = nest.pack_sequence_as(structure_of_mess, flattened) self.assertEqual(unflattened, mess) # Check also that the OrderedDict was created, with the correct key order. unflattened_ordered_dict = unflattened[2]["c"][1] self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict) self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"]) @combinations.generate(test_base.default_test_combinations()) def testFlattenSparseValue(self): st = sparse_tensor.SparseTensorValue([[0]], [0], [1]) single_value = st list_of_values = [st, st, st] nest_of_values = ((st), ((st), (st))) dict_of_values = {"foo": st, "bar": st, "baz": st} self.assertEqual([st], nest.flatten(single_value)) self.assertEqual([[st, st, st]], nest.flatten(list_of_values)) self.assertEqual([st, st, st], nest.flatten(nest_of_values)) self.assertEqual([st, st, st], nest.flatten(dict_of_values)) @combinations.generate(test_base.default_test_combinations()) def testFlattenRaggedValue(self): rt = ragged_factory_ops.constant_value([[[0]], [[1]]]) single_value = rt list_of_values = [rt, rt, rt] nest_of_values = ((rt), ((rt), (rt))) dict_of_values = {"foo": rt, "bar": rt, "baz": rt} self.assertEqual([rt], nest.flatten(single_value)) self.assertEqual([[rt, rt, rt]], nest.flatten(list_of_values)) self.assertEqual([rt, rt, rt], nest.flatten(nest_of_values)) self.assertEqual([rt, rt, rt], nest.flatten(dict_of_values)) @combinations.generate(test_base.default_test_combinations()) def testIsNested(self): self.assertFalse(nest.is_nested("1234")) self.assertFalse(nest.is_nested([1, 3, [4, 5]])) self.assertTrue(nest.is_nested(((7, 8), (5, 6)))) self.assertFalse(nest.is_nested([])) self.assertFalse(nest.is_nested(set([1, 2]))) ones = array_ops.ones([2, 3]) self.assertFalse(nest.is_nested(ones)) self.assertFalse(nest.is_nested(math_ops.tanh(ones))) self.assertFalse(nest.is_nested(np.ones((4, 5)))) self.assertTrue(nest.is_nested({"foo": 1, "bar": 2})) self.assertFalse( nest.is_nested(sparse_tensor.SparseTensorValue([[0]], [0], [1]))) self.assertFalse( nest.is_nested(ragged_factory_ops.constant_value([[[0]], [[1]]]))) @combinations.generate(test_base.default_test_combinations()) def testAssertSameStructure(self): structure1 = (((1, 2), 3), 4, (5, 6)) structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) structure_different_num_elements = ("spam", "eggs") structure_different_nesting = (((1, 2), 3), 4, 5, (6,)) structure_dictionary = {"foo": 2, "bar": 4, "baz": {"foo": 5, "bar": 6}} structure_dictionary_diff_nested = { "foo": 2, "bar": 4, "baz": { "foo": 5, "baz": 6 } } nest.assert_same_structure(structure1, structure2) nest.assert_same_structure("abc", 1.0) nest.assert_same_structure("abc", np.array([0, 1])) nest.assert_same_structure("abc", constant_op.constant([0, 1])) with self.assertRaisesRegex(ValueError, "don't have the same nested structure"): nest.assert_same_structure(structure1, structure_different_num_elements) with self.assertRaisesRegex(ValueError, "don't have the same nested structure"): nest.assert_same_structure((0, 1), np.array([0, 1])) with self.assertRaisesRegex(ValueError, "don't have the same nested structure"): nest.assert_same_structure(0, (0, 1)) with self.assertRaisesRegex(ValueError, "don't have the same nested structure"): nest.assert_same_structure(structure1, structure_different_nesting) named_type_0 = collections.namedtuple("named_0", ("a", "b")) named_type_1 = collections.namedtuple("named_1", ("a", "b")) self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), named_type_0("a", "b")) nest.assert_same_structure(named_type_0(3, 4), named_type_0("a", "b")) self.assertRaises(TypeError, nest.assert_same_structure, named_type_0(3, 4), named_type_1(3, 4)) with self.assertRaisesRegex(ValueError, "don't have the same nested structure"): nest.assert_same_structure(named_type_0(3, 4), named_type_0((3,), 4)) with self.assertRaisesRegex(ValueError, "don't have the same nested structure"): nest.assert_same_structure(((3,), 4), (3, (4,))) structure1_list = {"a": ((1, 2), 3), "b": 4, "c": (5, 6)} structure2_list = {"a": ((1, 2), 3), "b": 4, "d": (5, 6)} with self.assertRaisesRegex(TypeError, "don't have the same sequence type"): nest.assert_same_structure(structure1, structure1_list) nest.assert_same_structure(structure1, structure2, check_types=False) nest.assert_same_structure(structure1, structure1_list, check_types=False) with self.assertRaisesRegex(ValueError, "don't have the same set of keys"): nest.assert_same_structure(structure1_list, structure2_list) with self.assertRaisesRegex(ValueError, "don't have the same set of keys"): nest.assert_same_structure(structure_dictionary, structure_dictionary_diff_nested) nest.assert_same_structure( structure_dictionary, structure_dictionary_diff_nested, check_types=False) nest.assert_same_structure( structure1_list, structure2_list, check_types=False) @combinations.generate(test_base.default_test_combinations()) def testMapStructure(self): structure1 = (((1, 2), 3), 4, (5, 6)) structure2 = (((7, 8), 9), 10, (11, 12)) structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1) nest.assert_same_structure(structure1, structure1_plus1) self.assertAllEqual( [2, 3, 4, 5, 6, 7], nest.flatten(structure1_plus1)) structure1_plus_structure2 = nest.map_structure( lambda x, y: x + y, structure1, structure2) self.assertEqual( (((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)), structure1_plus_structure2) self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4)) self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4)) with self.assertRaisesRegex(TypeError, "callable"): nest.map_structure("bad", structure1_plus1) with self.assertRaisesRegex(ValueError, "same nested structure"): nest.map_structure(lambda x, y: None, 3, (3,)) with self.assertRaisesRegex(TypeError, "same sequence type"): nest.map_structure(lambda x, y: None, ((3, 4), 5), {"a": (3, 4), "b": 5}) with self.assertRaisesRegex(ValueError, "same nested structure"): nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5))) with self.assertRaisesRegex(ValueError, "same nested structure"): nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)), check_types=False) with self.assertRaisesRegex(ValueError, "Only valid keyword argument"): nest.map_structure(lambda x: None, structure1, foo="a") with self.assertRaisesRegex(ValueError, "Only valid keyword argument"): nest.map_structure(lambda x: None, structure1, check_types=False, foo="a") @combinations.generate(test_base.default_test_combinations()) def testAssertShallowStructure(self): inp_ab = ("a", "b") inp_abc = ("a", "b", "c") expected_message = ( "The two structures don't have the same sequence length. Input " "structure has length 2, while shallow structure has length 3.") with self.assertRaisesRegex(ValueError, expected_message): nest.assert_shallow_structure(inp_abc, inp_ab) inp_ab1 = ((1, 1), (2, 2)) inp_ab2 = {"a": (1, 1), "b": (2, 2)} expected_message = ( "The two structures don't have the same sequence type. Input structure " "has type 'tuple', while shallow structure has type " "'dict'.") with self.assertRaisesRegex(TypeError, expected_message): nest.assert_shallow_structure(inp_ab2, inp_ab1) nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False) inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}} inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}} expected_message = ( r"The two structures don't have the same keys. Input " r"structure has keys \['c'\], while shallow structure has " r"keys \['d'\].") with self.assertRaisesRegex(ValueError, expected_message): nest.assert_shallow_structure(inp_ab2, inp_ab1) inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))]) inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)]) nest.assert_shallow_structure(inp_ab, inp_ba) @combinations.generate(test_base.default_test_combinations()) def testFlattenUpTo(self): input_tree = (((2, 2), (3, 3)), ((4, 9), (5, 5))) shallow_tree = ((True, True), (False, True)) flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)]) self.assertEqual(flattened_shallow_tree, [True, True, False, True]) input_tree = ((("a", 1), (("b", 2), (("c", 3), (("d", 4)))))) shallow_tree = (("level_1", ("level_2", ("level_3", ("level_4"))))) input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree, input_tree) input_tree_flattened = nest.flatten(input_tree) self.assertEqual(input_tree_flattened_as_shallow_tree, [("a", 1), ("b", 2), ("c", 3), ("d", 4)]) self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4]) ## Shallow non-list edge-case. # Using iterable elements. input_tree = ["input_tree"] shallow_tree = "shallow_tree" flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) self.assertEqual(flattened_input_tree, [input_tree]) self.assertEqual(flattened_shallow_tree, [shallow_tree]) input_tree = ("input_tree_0", "input_tree_1") shallow_tree = "shallow_tree" flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) self.assertEqual(flattened_input_tree, [input_tree]) self.assertEqual(flattened_shallow_tree, [shallow_tree]) # Using non-iterable elements. input_tree = (0,) shallow_tree = 9 flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) self.assertEqual(flattened_input_tree, [input_tree]) self.assertEqual(flattened_shallow_tree, [shallow_tree]) input_tree = (0, 1) shallow_tree = 9 flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) self.assertEqual(flattened_input_tree, [input_tree]) self.assertEqual(flattened_shallow_tree, [shallow_tree]) ## Both non-list edge-case. # Using iterable elements. input_tree = "input_tree" shallow_tree = "shallow_tree" flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) self.assertEqual(flattened_input_tree, [input_tree]) self.assertEqual(flattened_shallow_tree, [shallow_tree]) # Using non-iterable elements. input_tree = 0 shallow_tree = 0 flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) self.assertEqual(flattened_input_tree, [input_tree]) self.assertEqual(flattened_shallow_tree, [shallow_tree]) ## Input non-list edge-case. # Using iterable elements. input_tree = "input_tree" shallow_tree = ("shallow_tree",) expected_message = ("If shallow structure is a sequence, input must also " "be a sequence. Input has type: 'str'.") with self.assertRaisesRegex(TypeError, expected_message): flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) self.assertEqual(flattened_shallow_tree, list(shallow_tree)) input_tree = "input_tree" shallow_tree = ("shallow_tree_9", "shallow_tree_8") with self.assertRaisesRegex(TypeError, expected_message): flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) self.assertEqual(flattened_shallow_tree, list(shallow_tree)) # Using non-iterable elements. input_tree = 0 shallow_tree = (9,) expected_message = ("If shallow structure is a sequence, input must also " "be a sequence. Input has type: 'int'.") with self.assertRaisesRegex(TypeError, expected_message): flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) self.assertEqual(flattened_shallow_tree, list(shallow_tree)) input_tree = 0 shallow_tree = (9, 8) with self.assertRaisesRegex(TypeError, expected_message): flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) self.assertEqual(flattened_shallow_tree, list(shallow_tree)) # Using dict. input_tree = {"a": ((2, 2), (3, 3)), "b": ((4, 9), (5, 5))} shallow_tree = {"a": (True, True), "b": (False, True)} flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree) flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree) self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)]) self.assertEqual(flattened_shallow_tree, [True, True, False, True]) @combinations.generate(test_base.default_test_combinations()) def testMapStructureUpTo(self): ab_tuple = collections.namedtuple("ab_tuple", "a, b") op_tuple = collections.namedtuple("op_tuple", "add, mul") inp_val = ab_tuple(a=2, b=3) inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3)) out = nest.map_structure_up_to( inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops) self.assertEqual(out.a, 6) self.assertEqual(out.b, 15) data_list = ((2, 4, 6, 8), ((1, 3, 5, 7, 9), (3, 5, 7))) name_list = ("evens", ("odds", "primes")) out = nest.map_structure_up_to( name_list, lambda name, sec: "first_{}_{}".format(len(sec), name), name_list, data_list) self.assertEqual(out, ("first_4_evens", ("first_5_odds", "first_3_primes"))) if __name__ == "__main__": test.main()
NestTest
python
scikit-learn__scikit-learn
sklearn/tree/_classes.py
{ "start": 2398, "end": 25113 }
class ____(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta): """Base class for decision trees. Warning: This class should not be used directly. Use derived classes instead. """ # "check_input" is used for optimisation and isn't something to be passed # around in a pipeline. __metadata_request__predict = {"check_input": metadata_routing.UNUSED} _parameter_constraints: dict = { "splitter": [StrOptions({"best", "random"})], "max_depth": [Interval(Integral, 1, None, closed="left"), None], "min_samples_split": [ Interval(Integral, 2, None, closed="left"), Interval(RealNotInt, 0.0, 1.0, closed="right"), ], "min_samples_leaf": [ Interval(Integral, 1, None, closed="left"), Interval(RealNotInt, 0.0, 1.0, closed="neither"), ], "min_weight_fraction_leaf": [Interval(Real, 0.0, 0.5, closed="both")], "max_features": [ Interval(Integral, 1, None, closed="left"), Interval(RealNotInt, 0.0, 1.0, closed="right"), StrOptions({"sqrt", "log2"}), None, ], "random_state": ["random_state"], "max_leaf_nodes": [Interval(Integral, 2, None, closed="left"), None], "min_impurity_decrease": [Interval(Real, 0.0, None, closed="left")], "ccp_alpha": [Interval(Real, 0.0, None, closed="left")], "monotonic_cst": ["array-like", None], } @abstractmethod def __init__( self, *, criterion, splitter, max_depth, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_features, max_leaf_nodes, random_state, min_impurity_decrease, class_weight=None, ccp_alpha=0.0, monotonic_cst=None, ): self.criterion = criterion self.splitter = splitter self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.max_features = max_features self.max_leaf_nodes = max_leaf_nodes self.random_state = random_state self.min_impurity_decrease = min_impurity_decrease self.class_weight = class_weight self.ccp_alpha = ccp_alpha self.monotonic_cst = monotonic_cst def get_depth(self): """Return the depth of the decision tree. The depth of a tree is the maximum distance between the root and any leaf. Returns ------- self.tree_.max_depth : int The maximum depth of the tree. """ check_is_fitted(self) return self.tree_.max_depth def get_n_leaves(self): """Return the number of leaves of the decision tree. Returns ------- self.tree_.n_leaves : int Number of leaves. """ check_is_fitted(self) return self.tree_.n_leaves def _support_missing_values(self, X): return ( not issparse(X) and self.__sklearn_tags__().input_tags.allow_nan and self.monotonic_cst is None ) def _compute_missing_values_in_feature_mask(self, X, estimator_name=None): """Return boolean mask denoting if there are missing values for each feature. This method also ensures that X is finite. Parameter --------- X : array-like of shape (n_samples, n_features), dtype=DOUBLE Input data. estimator_name : str or None, default=None Name to use when raising an error. Defaults to the class name. Returns ------- missing_values_in_feature_mask : ndarray of shape (n_features,), or None Missing value mask. If missing values are not supported or there are no missing values, return None. """ estimator_name = estimator_name or self.__class__.__name__ common_kwargs = dict(estimator_name=estimator_name, input_name="X") if not self._support_missing_values(X): assert_all_finite(X, **common_kwargs) return None with np.errstate(over="ignore"): overall_sum = np.sum(X) if not np.isfinite(overall_sum): # Raise a ValueError in case of the presence of an infinite element. _assert_all_finite_element_wise(X, xp=np, allow_nan=True, **common_kwargs) # If the sum is not nan, then there are no missing values if not np.isnan(overall_sum): return None missing_values_in_feature_mask = _any_isnan_axis0(X) return missing_values_in_feature_mask def _fit( self, X, y, sample_weight=None, check_input=True, missing_values_in_feature_mask=None, ): random_state = check_random_state(self.random_state) if check_input: # Need to validate separately here. # We can't pass multi_output=True because that would allow y to be # csr. # _compute_missing_values_in_feature_mask will check for finite values and # compute the missing mask if the tree supports missing values check_X_params = dict( dtype=DTYPE, accept_sparse="csc", ensure_all_finite=False ) check_y_params = dict(ensure_2d=False, dtype=None) X, y = validate_data( self, X, y, validate_separately=(check_X_params, check_y_params) ) missing_values_in_feature_mask = ( self._compute_missing_values_in_feature_mask(X) ) if issparse(X): X.sort_indices() if X.indices.dtype != np.intc or X.indptr.dtype != np.intc: raise ValueError( "No support for np.int64 index based sparse matrices" ) if self.criterion == "poisson": if np.any(y < 0): raise ValueError( "Some value(s) of y are negative which is" " not allowed for Poisson regression." ) if np.sum(y) <= 0: raise ValueError( "Sum of y is not positive which is " "necessary for Poisson regression." ) # Determine output settings n_samples, self.n_features_in_ = X.shape is_classification = is_classifier(self) y = np.atleast_1d(y) expanded_class_weight = None if y.ndim == 1: # reshape is necessary to preserve the data contiguity against vs # [:, np.newaxis] that does not. y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] if is_classification: check_classification_targets(y) y = np.copy(y) self.classes_ = [] self.n_classes_ = [] if self.class_weight is not None: y_original = np.copy(y) y_encoded = np.zeros(y.shape, dtype=int) for k in range(self.n_outputs_): classes_k, y_encoded[:, k] = np.unique(y[:, k], return_inverse=True) self.classes_.append(classes_k) self.n_classes_.append(classes_k.shape[0]) y = y_encoded if self.class_weight is not None: expanded_class_weight = compute_sample_weight( self.class_weight, y_original ) self.n_classes_ = np.array(self.n_classes_, dtype=np.intp) if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous: y = np.ascontiguousarray(y, dtype=DOUBLE) max_depth = np.iinfo(np.int32).max if self.max_depth is None else self.max_depth if isinstance(self.min_samples_leaf, numbers.Integral): min_samples_leaf = self.min_samples_leaf else: # float min_samples_leaf = ceil(self.min_samples_leaf * n_samples) if isinstance(self.min_samples_split, numbers.Integral): min_samples_split = self.min_samples_split else: # float min_samples_split = ceil(self.min_samples_split * n_samples) min_samples_split = max(2, min_samples_split) min_samples_split = max(min_samples_split, 2 * min_samples_leaf) if isinstance(self.max_features, str): if self.max_features == "sqrt": max_features = max(1, int(np.sqrt(self.n_features_in_))) elif self.max_features == "log2": max_features = max(1, int(np.log2(self.n_features_in_))) elif self.max_features is None: max_features = self.n_features_in_ elif isinstance(self.max_features, numbers.Integral): max_features = self.max_features else: # float if self.max_features > 0.0: max_features = max(1, int(self.max_features * self.n_features_in_)) else: max_features = 0 self.max_features_ = max_features max_leaf_nodes = -1 if self.max_leaf_nodes is None else self.max_leaf_nodes if len(y) != n_samples: raise ValueError( "Number of labels=%d does not match number of samples=%d" % (len(y), n_samples) ) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X, dtype=DOUBLE) if expanded_class_weight is not None: if sample_weight is not None: sample_weight = sample_weight * expanded_class_weight else: sample_weight = expanded_class_weight # Set min_weight_leaf from min_weight_fraction_leaf if sample_weight is None: min_weight_leaf = self.min_weight_fraction_leaf * n_samples else: min_weight_leaf = self.min_weight_fraction_leaf * np.sum(sample_weight) # Build tree criterion = self.criterion if not isinstance(criterion, Criterion): if is_classification: criterion = CRITERIA_CLF[self.criterion]( self.n_outputs_, self.n_classes_ ) else: criterion = CRITERIA_REG[self.criterion](self.n_outputs_, n_samples) else: # Make a deepcopy in case the criterion has mutable attributes that # might be shared and modified concurrently during parallel fitting criterion = copy.deepcopy(criterion) SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS splitter = self.splitter if self.monotonic_cst is None: monotonic_cst = None else: if self.n_outputs_ > 1: raise ValueError( "Monotonicity constraints are not supported with multiple outputs." ) # Check to correct monotonicity constraint' specification, # by applying element-wise logical conjunction # Note: we do not cast `np.asarray(self.monotonic_cst, dtype=np.int8)` # straight away here so as to generate error messages for invalid # values using the original values prior to any dtype related conversion. monotonic_cst = np.asarray(self.monotonic_cst) if monotonic_cst.shape[0] != X.shape[1]: raise ValueError( "monotonic_cst has shape {} but the input data " "X has {} features.".format(monotonic_cst.shape[0], X.shape[1]) ) valid_constraints = np.isin(monotonic_cst, (-1, 0, 1)) if not np.all(valid_constraints): unique_constaints_value = np.unique(monotonic_cst) raise ValueError( "monotonic_cst must be None or an array-like of -1, 0 or 1, but" f" got {unique_constaints_value}" ) monotonic_cst = np.asarray(monotonic_cst, dtype=np.int8) if is_classifier(self): if self.n_classes_[0] > 2: raise ValueError( "Monotonicity constraints are not supported with multiclass " "classification" ) # Binary classification trees are built by constraining probabilities # of the *negative class* in order to make the implementation similar # to regression trees. # Since self.monotonic_cst encodes constraints on probabilities of the # *positive class*, all signs must be flipped. monotonic_cst *= -1 if not isinstance(self.splitter, Splitter): splitter = SPLITTERS[self.splitter]( criterion, self.max_features_, min_samples_leaf, min_weight_leaf, random_state, monotonic_cst, ) if is_classifier(self): self.tree_ = Tree(self.n_features_in_, self.n_classes_, self.n_outputs_) else: self.tree_ = Tree( self.n_features_in_, # TODO: tree shouldn't need this in this case np.array([1] * self.n_outputs_, dtype=np.intp), self.n_outputs_, ) # Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise if max_leaf_nodes < 0: builder = DepthFirstTreeBuilder( splitter, min_samples_split, min_samples_leaf, min_weight_leaf, max_depth, self.min_impurity_decrease, ) else: builder = BestFirstTreeBuilder( splitter, min_samples_split, min_samples_leaf, min_weight_leaf, max_depth, max_leaf_nodes, self.min_impurity_decrease, ) builder.build(self.tree_, X, y, sample_weight, missing_values_in_feature_mask) if self.n_outputs_ == 1 and is_classifier(self): self.n_classes_ = self.n_classes_[0] self.classes_ = self.classes_[0] self._prune_tree() return self def _validate_X_predict(self, X, check_input): """Validate the training data on predict (probabilities).""" if check_input: if self._support_missing_values(X): ensure_all_finite = "allow-nan" else: ensure_all_finite = True X = validate_data( self, X, dtype=DTYPE, accept_sparse="csr", reset=False, ensure_all_finite=ensure_all_finite, ) if issparse(X) and ( X.indices.dtype != np.intc or X.indptr.dtype != np.intc ): raise ValueError("No support for np.int64 index based sparse matrices") else: # The number of features is checked regardless of `check_input` _check_n_features(self, X, reset=False) return X def predict(self, X, check_input=True): """Predict class or regression value for X. For a classification model, the predicted class for each sample in X is returned. For a regression model, the predicted value based on X is returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you're doing. Returns ------- y : array-like of shape (n_samples,) or (n_samples, n_outputs) The predicted classes, or the predict values. """ check_is_fitted(self) X = self._validate_X_predict(X, check_input) proba = self.tree_.predict(X) n_samples = X.shape[0] # Classification if is_classifier(self): if self.n_outputs_ == 1: return self.classes_.take(np.argmax(proba, axis=1), axis=0) else: class_type = self.classes_[0].dtype predictions = np.zeros((n_samples, self.n_outputs_), dtype=class_type) for k in range(self.n_outputs_): predictions[:, k] = self.classes_[k].take( np.argmax(proba[:, k], axis=1), axis=0 ) return predictions # Regression else: if self.n_outputs_ == 1: return proba[:, 0] else: return proba[:, :, 0] def apply(self, X, check_input=True): """Return the index of the leaf that each sample is predicted as. .. versionadded:: 0.17 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you're doing. Returns ------- X_leaves : array-like of shape (n_samples,) For each datapoint x in X, return the index of the leaf x ends up in. Leaves are numbered within ``[0; self.tree_.node_count)``, possibly with gaps in the numbering. """ check_is_fitted(self) X = self._validate_X_predict(X, check_input) return self.tree_.apply(X) def decision_path(self, X, check_input=True): """Return the decision path in the tree. .. versionadded:: 0.18 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you're doing. Returns ------- indicator : sparse matrix of shape (n_samples, n_nodes) Return a node indicator CSR matrix where non zero elements indicates that the samples goes through the nodes. """ X = self._validate_X_predict(X, check_input) return self.tree_.decision_path(X) def _prune_tree(self): """Prune tree using Minimal Cost-Complexity Pruning.""" check_is_fitted(self) if self.ccp_alpha == 0.0: return # build pruned tree if is_classifier(self): n_classes = np.atleast_1d(self.n_classes_) pruned_tree = Tree(self.n_features_in_, n_classes, self.n_outputs_) else: pruned_tree = Tree( self.n_features_in_, # TODO: the tree shouldn't need this param np.array([1] * self.n_outputs_, dtype=np.intp), self.n_outputs_, ) _build_pruned_tree_ccp(pruned_tree, self.tree_, self.ccp_alpha) self.tree_ = pruned_tree def cost_complexity_pruning_path(self, X, y, sample_weight=None): """Compute the pruning path during Minimal Cost-Complexity Pruning. See :ref:`minimal_cost_complexity_pruning` for details on the pruning process. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like of shape (n_samples,) or (n_samples, n_outputs) The target values (class labels) as integers or strings. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. Splits are also ignored if they would result in any single class carrying a negative weight in either child node. Returns ------- ccp_path : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. ccp_alphas : ndarray Effective alphas of subtree during pruning. impurities : ndarray Sum of the impurities of the subtree leaves for the corresponding alpha value in ``ccp_alphas``. """ est = clone(self).set_params(ccp_alpha=0.0) est.fit(X, y, sample_weight=sample_weight) return Bunch(**ccp_pruning_path(est.tree_)) @property def feature_importances_(self): """Return the feature importances. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance. Warning: impurity-based feature importances can be misleading for high cardinality features (many unique values). See :func:`sklearn.inspection.permutation_importance` as an alternative. Returns ------- feature_importances_ : ndarray of shape (n_features,) Normalized total reduction of criteria by feature (Gini importance). """ check_is_fitted(self) return self.tree_.compute_feature_importances() def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = True return tags # ============================================================================= # Public estimators # =============================================================================
BaseDecisionTree
python
Textualize__textual
src/textual/_event_broker.py
{ "start": 73, "end": 158 }
class ____(Exception): """Raised when handler isn't found in the meta."""
NoHandler
python
huggingface__transformers
src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py
{ "start": 123514, "end": 123606 }
class ____(Qwen3OmniMoeThinkerTextRotaryEmbedding): pass
Qwen3OmniMoeTalkerRotaryEmbedding
python
kamyu104__LeetCode-Solutions
Python/shortest-word-distance-iii.py
{ "start": 29, "end": 739 }
class ____(object): # @param {string[]} words # @param {string} word1 # @param {string} word2 # @return {integer} def shortestWordDistance(self, words, word1, word2): dist = float("inf") is_same = (word1 == word2) i, index1, index2 = 0, None, None while i < len(words): if words[i] == word1: if is_same and index1 is not None: dist = min(dist, abs(index1 - i)) index1 = i elif words[i] == word2: index2 = i if index1 is not None and index2 is not None: dist = min(dist, abs(index1 - index2)) i += 1 return dist
Solution
python
pytorch__pytorch
torch/fx/graph_module.py
{ "start": 1155, "end": 5385 }
class ____: def __init__(self): self.eval_cache = {} self.next_id = 0 def cache(self, src: str, globals: dict[str, Any], co_fields=None): """Store the source in a private cache, and add a lazy entry in linecache that allows the source to be retrieved by 'filename'. Args: src (str): The module source to cache globals (dict): The module globals Returns: str: The cache key (and dummy filename) generated for src. """ key = self._get_key() if co_fields: if "co_filename" in co_fields: # If only co_filename is provided, use it directly as the key if "co_firstlineno" not in co_fields or "co_name" not in co_fields: key = co_fields["co_filename"] else: # Full co_fields with all three components key += f" from {co_fields['co_filename']}:{co_fields['co_firstlineno']} in {co_fields['co_name']}" self.eval_cache[key] = src # Don't mutate globals so that this loader is only used # to populate linecache, and doesn't interact with other modules # that might check `__loader__` globals_copy = globals.copy() globals_copy["__file__"] = key globals_copy["__name__"] = key globals_copy["__loader__"] = self linecache.lazycache(key, globals_copy) return key # Part of the loader protocol (PEP 302) # linecache will use this method when trying to find source code def get_source(self, module_name) -> Optional[str]: if module_name in self.eval_cache: return self.eval_cache[module_name] return None def _get_key(self): key = f"<eval_with_key>.{self.next_id}" self.next_id += 1 return key _loader = _EvalCacheLoader() def _exec_with_source(src: str, globals: dict[str, Any], co_fields=None): key = _loader.cache(src, globals, co_fields) exec(compile(src, key, "exec"), globals) def _forward_from_src(src: str, globals: dict[str, Any], co_fields=None): return _method_from_src( method_name="forward", src=src, globals=globals, co_fields=co_fields ) def _method_from_src( method_name: str, src: str, globals: dict[str, Any], co_fields=None ) -> Callable: # avoid mutating the passed in dict globals_copy = globals.copy() _exec_with_source(src, globals_copy, co_fields) fn = globals_copy[method_name] del globals_copy[method_name] return fn def _format_import_statement(name: str, obj: Any, importer: Importer) -> str: if name in _custom_builtins: return _custom_builtins[name].import_str if _is_from_torch(name): return "import torch" module_name, attr_name = importer.get_name(obj) return f"from {module_name} import {attr_name} as {name}" def _format_import_block(globals: dict[str, Any], importer: Importer): import_strs: set[str] = { _format_import_statement(name, obj, importer) for name, obj in globals.items() } # Sort the imports so we have a stable import block that allows us to # hash the graph module and get a consistent key for use in a cache. return "\n".join(sorted(import_strs)) @compatibility(is_backward_compatible=True) def reduce_graph_module(body: dict[Any, Any], import_block: str) -> torch.nn.Module: # BC: attribute name was changed from `code` to `_code` to facilitate # making `code` into a property and adding a docstring to it fn_src = body.get("_code") or body["code"] forward = _forward_from_src(import_block + fn_src, {}) return _deserialize_graph_module(forward, body) @compatibility(is_backward_compatible=True) def reduce_package_graph_module( importer: PackageImporter, body: dict[Any, Any], generated_module_name: str ) -> torch.nn.Module: forward = importer.import_module(generated_module_name).forward return _deserialize_graph_module(forward, body) # We create a dummy class here because symbolic_trace pulls the forward() # function off of the class, rather than the instance. This class is used # in _deserialize_graph_module() below.
_EvalCacheLoader
python
ansible__ansible
lib/ansible/module_utils/facts/system/distribution.py
{ "start": 33010, "end": 33610 }
class ____(BaseFactCollector): name = 'distribution' _fact_ids = set(['distribution_version', 'distribution_release', 'distribution_major_version', 'os_family']) # type: t.Set[str] def collect(self, module=None, collected_facts=None): collected_facts = collected_facts or {} facts_dict = {} if not module: return facts_dict distribution = Distribution(module=module) distro_facts = distribution.get_distribution_facts() return distro_facts
DistributionFactCollector
python
kubernetes-client__python
kubernetes/client/models/v1_resource_slice_spec.py
{ "start": 383, "end": 12578 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'all_nodes': 'bool', 'devices': 'list[V1Device]', 'driver': 'str', 'node_name': 'str', 'node_selector': 'V1NodeSelector', 'per_device_node_selection': 'bool', 'pool': 'V1ResourcePool', 'shared_counters': 'list[V1CounterSet]' } attribute_map = { 'all_nodes': 'allNodes', 'devices': 'devices', 'driver': 'driver', 'node_name': 'nodeName', 'node_selector': 'nodeSelector', 'per_device_node_selection': 'perDeviceNodeSelection', 'pool': 'pool', 'shared_counters': 'sharedCounters' } def __init__(self, all_nodes=None, devices=None, driver=None, node_name=None, node_selector=None, per_device_node_selection=None, pool=None, shared_counters=None, local_vars_configuration=None): # noqa: E501 """V1ResourceSliceSpec - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._all_nodes = None self._devices = None self._driver = None self._node_name = None self._node_selector = None self._per_device_node_selection = None self._pool = None self._shared_counters = None self.discriminator = None if all_nodes is not None: self.all_nodes = all_nodes if devices is not None: self.devices = devices self.driver = driver if node_name is not None: self.node_name = node_name if node_selector is not None: self.node_selector = node_selector if per_device_node_selection is not None: self.per_device_node_selection = per_device_node_selection self.pool = pool if shared_counters is not None: self.shared_counters = shared_counters @property def all_nodes(self): """Gets the all_nodes of this V1ResourceSliceSpec. # noqa: E501 AllNodes indicates that all nodes have access to the resources in the pool. Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. # noqa: E501 :return: The all_nodes of this V1ResourceSliceSpec. # noqa: E501 :rtype: bool """ return self._all_nodes @all_nodes.setter def all_nodes(self, all_nodes): """Sets the all_nodes of this V1ResourceSliceSpec. AllNodes indicates that all nodes have access to the resources in the pool. Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. # noqa: E501 :param all_nodes: The all_nodes of this V1ResourceSliceSpec. # noqa: E501 :type: bool """ self._all_nodes = all_nodes @property def devices(self): """Gets the devices of this V1ResourceSliceSpec. # noqa: E501 Devices lists some or all of the devices in this pool. Must not have more than 128 entries. # noqa: E501 :return: The devices of this V1ResourceSliceSpec. # noqa: E501 :rtype: list[V1Device] """ return self._devices @devices.setter def devices(self, devices): """Sets the devices of this V1ResourceSliceSpec. Devices lists some or all of the devices in this pool. Must not have more than 128 entries. # noqa: E501 :param devices: The devices of this V1ResourceSliceSpec. # noqa: E501 :type: list[V1Device] """ self._devices = devices @property def driver(self): """Gets the driver of this V1ResourceSliceSpec. # noqa: E501 Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name. Must be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable. # noqa: E501 :return: The driver of this V1ResourceSliceSpec. # noqa: E501 :rtype: str """ return self._driver @driver.setter def driver(self, driver): """Sets the driver of this V1ResourceSliceSpec. Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name. Must be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable. # noqa: E501 :param driver: The driver of this V1ResourceSliceSpec. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and driver is None: # noqa: E501 raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501 self._driver = driver @property def node_name(self): """Gets the node_name of this V1ResourceSliceSpec. # noqa: E501 NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node. This field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available. Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. This field is immutable. # noqa: E501 :return: The node_name of this V1ResourceSliceSpec. # noqa: E501 :rtype: str """ return self._node_name @node_name.setter def node_name(self, node_name): """Sets the node_name of this V1ResourceSliceSpec. NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node. This field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available. Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. This field is immutable. # noqa: E501 :param node_name: The node_name of this V1ResourceSliceSpec. # noqa: E501 :type: str """ self._node_name = node_name @property def node_selector(self): """Gets the node_selector of this V1ResourceSliceSpec. # noqa: E501 :return: The node_selector of this V1ResourceSliceSpec. # noqa: E501 :rtype: V1NodeSelector """ return self._node_selector @node_selector.setter def node_selector(self, node_selector): """Sets the node_selector of this V1ResourceSliceSpec. :param node_selector: The node_selector of this V1ResourceSliceSpec. # noqa: E501 :type: V1NodeSelector """ self._node_selector = node_selector @property def per_device_node_selection(self): """Gets the per_device_node_selection of this V1ResourceSliceSpec. # noqa: E501 PerDeviceNodeSelection defines whether the access from nodes to resources in the pool is set on the ResourceSlice level or on each device. If it is set to true, every device defined the ResourceSlice must specify this individually. Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. # noqa: E501 :return: The per_device_node_selection of this V1ResourceSliceSpec. # noqa: E501 :rtype: bool """ return self._per_device_node_selection @per_device_node_selection.setter def per_device_node_selection(self, per_device_node_selection): """Sets the per_device_node_selection of this V1ResourceSliceSpec. PerDeviceNodeSelection defines whether the access from nodes to resources in the pool is set on the ResourceSlice level or on each device. If it is set to true, every device defined the ResourceSlice must specify this individually. Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. # noqa: E501 :param per_device_node_selection: The per_device_node_selection of this V1ResourceSliceSpec. # noqa: E501 :type: bool """ self._per_device_node_selection = per_device_node_selection @property def pool(self): """Gets the pool of this V1ResourceSliceSpec. # noqa: E501 :return: The pool of this V1ResourceSliceSpec. # noqa: E501 :rtype: V1ResourcePool """ return self._pool @pool.setter def pool(self, pool): """Sets the pool of this V1ResourceSliceSpec. :param pool: The pool of this V1ResourceSliceSpec. # noqa: E501 :type: V1ResourcePool """ if self.local_vars_configuration.client_side_validation and pool is None: # noqa: E501 raise ValueError("Invalid value for `pool`, must not be `None`") # noqa: E501 self._pool = pool @property def shared_counters(self): """Gets the shared_counters of this V1ResourceSliceSpec. # noqa: E501 SharedCounters defines a list of counter sets, each of which has a name and a list of counters available. The names of the SharedCounters must be unique in the ResourceSlice. The maximum number of counters in all sets is 32. # noqa: E501 :return: The shared_counters of this V1ResourceSliceSpec. # noqa: E501 :rtype: list[V1CounterSet] """ return self._shared_counters @shared_counters.setter def shared_counters(self, shared_counters): """Sets the shared_counters of this V1ResourceSliceSpec. SharedCounters defines a list of counter sets, each of which has a name and a list of counters available. The names of the SharedCounters must be unique in the ResourceSlice. The maximum number of counters in all sets is 32. # noqa: E501 :param shared_counters: The shared_counters of this V1ResourceSliceSpec. # noqa: E501 :type: list[V1CounterSet] """ self._shared_counters = shared_counters def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1ResourceSliceSpec): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1ResourceSliceSpec): return True return self.to_dict() != other.to_dict()
V1ResourceSliceSpec
python
aimacode__aima-python
deep_learning4e.py
{ "start": 1870, "end": 2111 }
class ____(Activation): def __init__(self, alpha=0.01): self.alpha = alpha def function(self, x): return max(x, self.alpha * x) def derivative(self, value): return 1 if value > 0 else self.alpha
LeakyReLU
python
pytorch__pytorch
test/dynamo/test_higher_order_ops.py
{ "start": 173965, "end": 176446 }
class ____(torch.nn.Module): def forward(self, L_x_: "f32[3, 3, 3]"): l_x_ = L_x_ _saved_tensors_hooks_disable = torch._C._autograd._saved_tensors_hooks_disable("torch.func.{grad, vjp, jacrev, hessian} don't yet support saved tensor hooks. Please open an issue with your use case."); _saved_tensors_hooks_disable = None _grad_increment_nesting = torch._C._functorch._grad_increment_nesting(); _grad_increment_nesting = None diff_args: "f32[3, 3, 3]" = torch._C._functorch._wrap_for_grad(l_x_, 1); l_x_ = None set_inplace_requires_grad_allowed = torch._C._functorch.set_inplace_requires_grad_allowed(True); set_inplace_requires_grad_allowed = None _set_tensor_requires_grad: "f32[3, 3, 3]" = torch._functorch.eager_transforms._set_tensor_requires_grad(diff_args); _set_tensor_requires_grad = None set_inplace_requires_grad_allowed_1 = torch._C._functorch.set_inplace_requires_grad_allowed(False); set_inplace_requires_grad_allowed_1 = None sin: "f32[3, 3, 3]" = diff_args.sin() add: "f32[3, 3, 3]" = sin + 3.14; sin = None output: "f32[]" = add.sum(); add = None _autograd_grad = torch._functorch.eager_transforms._autograd_grad((output,), [diff_args], create_graph = True); diff_args = None grad_input: "f32[3, 3, 3]" = _autograd_grad[0]; _autograd_grad = None grad_input_1: "f32[3, 3, 3]" = torch._C._functorch._unwrap_for_grad(grad_input, 1); grad_input = None output_1: "f32[]" = torch._C._functorch._unwrap_for_grad(output, 1); output = output_1 = None _grad_decrement_nesting = torch._C._functorch._grad_decrement_nesting(); _grad_decrement_nesting = None _saved_tensors_hooks_enable = torch._C._autograd._saved_tensors_hooks_enable(); _saved_tensors_hooks_enable = None return (grad_input_1,) """, ) def test_grad_has_aux(self): counters.clear() y = 3.14 def fn(x): return ((x.sin() + y).sum(), x.cos()) def wrapper_fn(x): return torch.func.grad(fn, has_aux=True)(x) x = torch.randn(3, 3, 3) wrapped_gm = self._compile_check(wrapper_fn, (x,)) # Dynamic shapes produce a slightly different graph. if check_dynamic_shape_capture(): return actual = normalize_gm(wrapped_gm.print_readable(print_output=False)) self.assertExpectedInline( actual, """\
GraphModule
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_dlp.py
{ "start": 13179, "end": 14062 }
class ____: @mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook") def test_get_deidentify_template(self, mock_hook): mock_hook.return_value.get_deidentify_template.return_value = DeidentifyTemplate() operator = CloudDLPGetDeidentifyTemplateOperator( template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID, task_id="id" ) operator.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=None, ) mock_hook.return_value.get_deidentify_template.assert_called_once_with( template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID, project_id=None, retry=DEFAULT, timeout=None, metadata=(), )
TestCloudDLPGetDeidentifyTemplateOperator
python
airbytehq__airbyte
airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/config.py
{ "start": 13339, "end": 13727 }
class ____(GenericModel, Generic[TestConfigT]): bypass_reason: Optional[str] tests: Optional[List[TestConfigT]] @validator("tests", always=True) def no_bypass_reason_when_tests_is_set(cls, tests, values): if tests and values.get("bypass_reason"): raise ValueError("You can't set a bypass_reason if tests are set.") return tests
GenericTestConfig
python
PrefectHQ__prefect
tests/test_flows.py
{ "start": 114052, "end": 121736 }
class ____: def test_noniterable_hook_raises(self): def crashed_hook(): pass with pytest.raises( TypeError, match=re.escape( "Expected iterable for 'on_crashed'; got function instead. Please" " provide a list of hooks to 'on_crashed':\n\n" "@flow(on_crashed=[hook1, hook2])\ndef my_flow():\n\tpass" ), ): @flow(on_crashed=crashed_hook) def flow1(): pass def test_noncallable_hook_raises(self): with pytest.raises( TypeError, match=re.escape( "Expected callables in 'on_crashed'; got str instead. Please provide a" " list of hooks to 'on_crashed':\n\n" "@flow(on_crashed=[hook1, hook2])\ndef my_flow():\n\tpass" ), ): @flow(on_crashed=["test"]) def flow1(): pass def test_callable_noncallable_hook_raises(self): def crashed_hook(): pass with pytest.raises( TypeError, match=re.escape( "Expected callables in 'on_crashed'; got str instead. Please provide a" " list of hooks to 'on_crashed':\n\n" "@flow(on_crashed=[hook1, hook2])\ndef my_flow():\n\tpass" ), ): @flow(on_crashed=[crashed_hook, "test"]) def flow2(): pass def test_decorated_on_crashed_hooks_run_on_crashed_state(self): my_mock = MagicMock() @flow def my_flow(): return State(type=StateType.CRASHED) @my_flow.on_crashed def crashed_hook1(flow, flow_run, state): my_mock("crashed_hook1") @my_flow.on_crashed def crashed_hook2(flow, flow_run, state): my_mock("crashed_hook2") my_flow(return_state=True) assert my_mock.mock_calls == [call("crashed_hook1"), call("crashed_hook2")] def test_on_crashed_hooks_run_on_crashed_state(self): my_mock = MagicMock() def crashed_hook1(flow, flow_run, state): my_mock("crashed_hook1") def crashed_hook2(flow, flow_run, state): my_mock("crashed_hook2") @flow(on_crashed=[crashed_hook1, crashed_hook2]) def my_flow(): return State(type=StateType.CRASHED) my_flow(return_state=True) assert my_mock.mock_calls == [call("crashed_hook1"), call("crashed_hook2")] def test_on_crashed_hooks_are_ignored_if_terminal_state_completed(self): my_mock = MagicMock() def crashed_hook1(flow, flow_run, state): my_mock("crashed_hook1") def crashed_hook2(flow, flow_run, state): my_mock("crashed_hook2") @flow(on_crashed=[crashed_hook1, crashed_hook2]) def my_passing_flow(): pass state = my_passing_flow(return_state=True) assert state.type == StateType.COMPLETED my_mock.assert_not_called() def test_on_crashed_hooks_are_ignored_if_terminal_state_failed(self): my_mock = MagicMock() def crashed_hook1(flow, flow_run, state): my_mock("crashed_hook1") def crashed_hook2(flow, flow_run, state): my_mock("crashed_hook2") @flow(on_crashed=[crashed_hook1, crashed_hook2]) def my_failing_flow(): raise Exception("Failing flow") state = my_failing_flow(return_state=True) assert state.type == StateType.FAILED my_mock.assert_not_called() def test_other_crashed_hooks_run_if_one_hook_fails(self): my_mock = MagicMock() def crashed1(flow, flow_run, state): my_mock("crashed1") def crashed2(flow, flow_run, state): raise Exception("Failing flow") def crashed3(flow, flow_run, state): my_mock("crashed3") @flow(on_crashed=[crashed1, crashed2, crashed3]) def my_flow(): return State(type=StateType.CRASHED) my_flow(return_state=True) assert my_mock.mock_calls == [call("crashed1"), call("crashed3")] @pytest.mark.parametrize( "hook1, hook2", [ (create_hook, create_hook), (create_hook, create_async_hook), (create_async_hook, create_hook), (create_async_hook, create_async_hook), ], ) def test_on_crashed_hooks_work_with_sync_and_async(self, hook1, hook2): my_mock = MagicMock() hook1_with_mock = hook1(my_mock) hook2_with_mock = hook2(my_mock) @flow(on_crashed=[hook1_with_mock, hook2_with_mock]) def my_flow(): return State(type=StateType.CRASHED) my_flow(return_state=True) assert my_mock.mock_calls == [call(), call()] def test_on_crashed_hook_on_subflow_succeeds(self): my_mock = MagicMock() def crashed1(flow, flow_run, state): my_mock("crashed1") def failed1(flow, flow_run, state): my_mock("failed1") @flow(on_crashed=[crashed1]) def subflow(): return State(type=StateType.CRASHED) @flow(on_failure=[failed1]) def my_flow(): subflow() my_flow(return_state=True) assert my_mock.mock_calls == [call("crashed1"), call("failed1")] # runner handles running on crashed hooks by monitoring the process the flow is running in @pytest.mark.skip(reason="Fails with new engine, passed on old engine") async def test_on_crashed_hook_called_on_sigterm_from_flow_without_cancelling_state( self, mock_sigterm_handler ): my_mock = MagicMock() def crashed(flow, flow_run, state): my_mock("crashed") @flow(on_crashed=[crashed]) def my_flow(): # terminate process with SIGTERM os.kill(os.getpid(), signal.SIGTERM) with pytest.raises(prefect.exceptions.TerminationSignal): await my_flow(return_state=True) assert my_mock.mock_calls == [call("crashed")] async def test_on_crashed_hook_called_on_sigterm_from_flow_with_cancelling_state( self, mock_sigterm_handler ): my_mock = MagicMock() def crashed(flow, flow_run, state): my_mock("crashed") @task async def cancel_parent(): async with get_client() as client: await client.set_flow_run_state( runtime.flow_run.id, State(type=StateType.CANCELLING), force=True ) @flow(on_crashed=[crashed]) async def my_flow(): # simulate user cancelling flow run from UI await cancel_parent() # simulate worker cancellation of flow run os.kill(os.getpid(), signal.SIGTERM) with pytest.raises(prefect.exceptions.TerminationSignal): await my_flow(return_state=True) my_mock.assert_called_once() def test_on_crashed_hooks_respect_env_var(self, monkeypatch): my_mock = MagicMock() monkeypatch.setenv("PREFECT__ENABLE_CANCELLATION_AND_CRASHED_HOOKS", "false") def crashed_hook1(flow, flow_run, state): my_mock("crashed_hook1") def crashed_hook2(flow, flow_run, state): my_mock("crashed_hook2") @flow(on_crashed=[crashed_hook1, crashed_hook2]) def my_flow(): return State(type=StateType.CRASHED) state = my_flow(return_state=True) assert state.type == StateType.CRASHED my_mock.assert_not_called()
TestFlowHooksOnCrashed
python
huggingface__transformers
src/transformers/models/rag/retrieval_rag.py
{ "start": 8275, "end": 10498 }
class ____(Index): def __init__(self, vector_size, dataset, index_initialized=False): requires_backends(self, ["faiss"]) self.vector_size = vector_size self.dataset = dataset self._index_initialized = index_initialized self._check_dataset_format(with_index=index_initialized) dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True, dtype="float32") def _check_dataset_format(self, with_index: bool): if not isinstance(self.dataset, Dataset): raise TypeError(f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}") if len({"title", "text", "embeddings"} - set(self.dataset.column_names)) > 0: raise ValueError( "Dataset should be a dataset with the following columns: " "title (str), text (str) and embeddings (arrays of dimension vector_size), " f"but got columns {self.dataset.column_names}" ) if with_index and "embeddings" not in self.dataset.list_indexes(): raise ValueError( "Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it " "or `dataset.load_faiss_index` to load one from the disk." ) def init_index(self): raise NotImplementedError() def is_initialized(self): return self._index_initialized def get_doc_dicts(self, doc_ids: np.ndarray) -> list[dict]: return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])] def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> tuple[np.ndarray, np.ndarray]: _, ids = self.dataset.search_batch("embeddings", question_hidden_states, n_docs) docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids] vectors = [doc["embeddings"] for doc in docs] for i in range(len(vectors)): if len(vectors[i]) < n_docs: vectors[i] = np.vstack([vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))]) return np.array(ids), np.array(vectors) # shapes (batch_size, n_docs) and (batch_size, n_docs, d)
HFIndexBase
python
sympy__sympy
sympy/vector/operators.py
{ "start": 719, "end": 1194 }
class ____(Expr): """ Represents unevaluated Gradient. Examples ======== >>> from sympy.vector import CoordSys3D, Gradient >>> R = CoordSys3D('R') >>> s = R.x*R.y*R.z >>> Gradient(s) Gradient(R.x*R.y*R.z) """ def __new__(cls, expr): expr = sympify(expr) obj = Expr.__new__(cls, expr) obj._expr = expr return obj def doit(self, **hints): return gradient(self._expr, doit=True)
Gradient
python
openai__openai-python
src/openai/types/fine_tuning/dpo_hyperparameters.py
{ "start": 220, "end": 1064 }
class ____(BaseModel): batch_size: Union[Literal["auto"], int, None] = None """Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance. """ beta: Union[Literal["auto"], float, None] = None """The beta value for the DPO method. A higher beta value will increase the weight of the penalty between the policy and reference model. """ learning_rate_multiplier: Union[Literal["auto"], float, None] = None """Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting. """ n_epochs: Union[Literal["auto"], int, None] = None """The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. """
DpoHyperparameters
python
astropy__astropy
astropy/coordinates/angles/errors.py
{ "start": 1153, "end": 1712 }
class ____(AstropyWarning): """ Raised when an hour value is 24. Parameters ---------- hour : int, float """ def __init__(self, hour, alternativeactionstr=None): self.hour = hour self.alternativeactionstr = alternativeactionstr def __str__(self): message = ( f"'hour' was found to be '{self.hour}', which is not in range (-24, 24)." ) if self.alternativeactionstr is not None: message += " " + self.alternativeactionstr return message
IllegalHourWarning
python
pytorch__pytorch
test/dynamo/test_subclasses.py
{ "start": 112712, "end": 113705 }
class ____(torch.nn.Module): def forward( self, primals_1: "f32[24]", # SubclassGetAttrAOTInput(base=PlainAOTInput(idx=0), attr='a') primals_2: "f32[24]", # SubclassGetAttrAOTInput(base=PlainAOTInput(idx=0), attr='b') ): clone: "f32[24]" = torch.ops.aten.clone.default(primals_1); primals_1 = None clone_1: "f32[24]" = torch.ops.aten.clone.default(primals_2); primals_2 = None view: "f32[3, 2, 4]" = torch.ops.aten.view.default(clone, [3, 2, 4]); clone = None view_1: "f32[3, 2, 4]" = torch.ops.aten.view.default(clone_1, [3, 2, 4]); clone_1 = None return ( view, # SubclassGetAttrAOTOutput(base=PlainAOTOutput(idx=0), attr='a') view_1, # SubclassGetAttrAOTOutput(base=PlainAOTOutput(idx=0), attr='b') ) """, # noqa: B950 ) self.assertExpectedInline( normalize_gm(bw[0].print_readable(print_output=False, expanded_def=True)), """\
GraphModule
python
conda__conda
conda/exceptions.py
{ "start": 19406, "end": 19550 }
class ____(ParseError): def __init__(self, reason: str): self.reason = reason super().__init__(self.args[0])
CouldntParseError
python
mwaskom__seaborn
tests/test_rcmod.py
{ "start": 1087, "end": 2310 }
class ____: @pytest.fixture(autouse=True) def reset_params(self): yield rcmod.reset_orig() def flatten_list(self, orig_list): iter_list = map(np.atleast_1d, orig_list) flat_list = [item for sublist in iter_list for item in sublist] return flat_list def assert_rc_params(self, params): for k, v in params.items(): # Various subtle issues in matplotlib lead to unexpected # values for the backend rcParam, which isn't relevant here if k == "backend": continue if isinstance(v, np.ndarray): npt.assert_array_equal(mpl.rcParams[k], v) else: assert mpl.rcParams[k] == v def assert_rc_params_equal(self, params1, params2): for key, v1 in params1.items(): # Various subtle issues in matplotlib lead to unexpected # values for the backend rcParam, which isn't relevant here if key == "backend": continue v2 = params2[key] if isinstance(v1, np.ndarray): npt.assert_array_equal(v1, v2) else: assert v1 == v2
RCParamFixtures
python
ray-project__ray
python/ray/_private/thirdparty/pynvml/pynvml.py
{ "start": 58097, "end": 58387 }
class ____(_PrintableStructure): _fields_ = [ ('version', c_uint), ('total', c_ulonglong), ('reserved', c_ulonglong), ('free', c_ulonglong), ('used', c_ulonglong), ] _fmt_ = {'<default>': "%d B"} nvmlMemory_v2 = 0x02000028
c_nvmlMemory_v2_t
python
google__jax
jax/_src/test_loader.py
{ "start": 5960, "end": 7048 }
class ____(unittest.TestSuite): """Runs tests in parallel using threads if TEST_NUM_THREADS is > 1. Caution: this test suite does not run setUpClass or setUpModule methods if thread parallelism is enabled. """ def __init__(self, suite: unittest.TestSuite): super().__init__(list(suite)) def run(self, result: unittest.TestResult, debug: bool = False) -> unittest.TestResult: if TEST_NUM_THREADS.value <= 0: return super().run(result) test_warning_util.install_threadsafe_warning_handlers() executor = ThreadPoolExecutor(TEST_NUM_THREADS.value) lock = threading.Lock() futures = [] def run_test(test): """Recursively runs tests in a test suite or test case.""" if isinstance(test, unittest.TestSuite): for subtest in test: run_test(subtest) else: test_result = ThreadSafeTestResult(lock, result) futures.append(executor.submit(_run_one_test, test, test_result)) with executor: run_test(self) for future in futures: future.result() return result
JaxTestSuite
python
python-attrs__attrs
typing-examples/baseline.py
{ "start": 400, "end": 556 }
class ____: x: int ngf = NGFrozen(1) attrs.fields(NGFrozen).x.evolve(eq=False) a = attrs.fields(NGFrozen).x a.evolve(repr=False) @attrs.define
NGFrozen
python
pallets__itsdangerous
src/itsdangerous/timed.py
{ "start": 5955, "end": 8087 }
class ____(Serializer[_TSerialized]): """Uses :class:`TimestampSigner` instead of the default :class:`.Signer`. """ default_signer: type[TimestampSigner] = TimestampSigner # pyright: ignore def iter_unsigners( self, salt: str | bytes | None = None ) -> cabc.Iterator[TimestampSigner]: return t.cast("cabc.Iterator[TimestampSigner]", super().iter_unsigners(salt)) # TODO: Signature is incompatible because parameters were added # before salt. def loads( # type: ignore[override] self, s: str | bytes, max_age: int | None = None, return_timestamp: bool = False, salt: str | bytes | None = None, ) -> t.Any: """Reverse of :meth:`dumps`, raises :exc:`.BadSignature` if the signature validation fails. If a ``max_age`` is provided it will ensure the signature is not older than that time in seconds. In case the signature is outdated, :exc:`.SignatureExpired` is raised. All arguments are forwarded to the signer's :meth:`~TimestampSigner.unsign` method. """ s = want_bytes(s) last_exception = None for signer in self.iter_unsigners(salt): try: base64d, timestamp = signer.unsign( s, max_age=max_age, return_timestamp=True ) payload = self.load_payload(base64d) if return_timestamp: return payload, timestamp return payload except SignatureExpired: # The signature was unsigned successfully but was # expired. Do not try the next signer. raise except BadSignature as err: last_exception = err raise t.cast(BadSignature, last_exception) def loads_unsafe( # type: ignore[override] self, s: str | bytes, max_age: int | None = None, salt: str | bytes | None = None, ) -> tuple[bool, t.Any]: return self._loads_unsafe_impl(s, salt, load_kwargs={"max_age": max_age})
TimedSerializer