language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
apache__airflow
providers/smtp/tests/unit/smtp/hooks/test_smtp.py
{ "start": 2404, "end": 17914 }
class ____: @pytest.fixture(autouse=True) def setup_connections(self, create_connection_without_db): create_connection_without_db( Connection( conn_id=CONN_ID_DEFAULT, conn_type=CONN_TYPE, host=SMTP_HOST, login=SMTP_LOGIN, password=SMTP_PASSWORD, port=DEFAULT_PORT, extra=json.dumps(dict(from_email=FROM_EMAIL)), ) ) create_connection_without_db( Connection( conn_id=CONN_ID_NONSSL, conn_type=CONN_TYPE, host=SMTP_HOST, login=SMTP_LOGIN, password=SMTP_PASSWORD, port=NONSSL_PORT, extra=json.dumps(dict(disable_ssl=True, from_email=FROM_EMAIL)), ) ) create_connection_without_db( Connection( conn_id=CONN_ID_OAUTH, conn_type=CONN_TYPE, host=SMTP_HOST, login=SMTP_LOGIN, password=SMTP_PASSWORD, port=NONSSL_PORT, extra=json.dumps(dict(disable_ssl=True, from_email=FROM_EMAIL, access_token=ACCESS_TOKEN)), ) ) create_connection_without_db( Connection( conn_id=CONN_ID_SSL_EXTRA, conn_type=CONN_TYPE, host=SMTP_HOST, login=None, password="None", port=DEFAULT_PORT, extra=json.dumps(dict(use_ssl=True, ssl_context="none", from_email=FROM_EMAIL)), ) ) @pytest.mark.parametrize( ("conn_id", "use_ssl", "expected_port", "create_context"), [ pytest.param(CONN_ID_DEFAULT, True, DEFAULT_PORT, True, id="ssl-connection"), pytest.param(CONN_ID_NONSSL, False, NONSSL_PORT, False, id="non-ssl-connection"), ], ) @patch(smtplib_string) @patch("ssl.create_default_context") def test_connect_and_disconnect( self, create_default_context, mock_smtplib, conn_id, use_ssl, expected_port, create_context ): """Test sync connection with different configurations.""" mock_conn = _create_fake_smtp(mock_smtplib, use_ssl=use_ssl) with SmtpHook(smtp_conn_id=conn_id): pass if create_context: assert create_default_context.called mock_smtplib.SMTP_SSL.assert_called_once_with( host=SMTP_HOST, port=expected_port, timeout=DEFAULT_TIMEOUT, context=create_default_context.return_value, ) else: mock_smtplib.SMTP.assert_called_once_with( host=SMTP_HOST, port=expected_port, timeout=DEFAULT_TIMEOUT, ) mock_conn.login.assert_called_once_with(SMTP_LOGIN, SMTP_PASSWORD) assert mock_conn.close.call_count == 1 @patch(smtplib_string) def test_get_email_address_single_email(self, mock_smtplib): with SmtpHook() as smtp_hook: assert smtp_hook._get_email_address_list(FROM_EMAIL) == [FROM_EMAIL] @pytest.mark.parametrize( "email_input", [ pytest.param(f"{FROM_EMAIL}, {TO_EMAIL}", id="comma_separated"), pytest.param(f"{FROM_EMAIL}; {TO_EMAIL}", id="semicolon_separated"), pytest.param([FROM_EMAIL, TO_EMAIL], id="list_input"), pytest.param((FROM_EMAIL, TO_EMAIL), id="tuple_input"), ], ) @patch(smtplib_string) def test_get_email_address_parsing(self, mock_smtplib, email_input): with SmtpHook() as smtp_hook: assert smtp_hook._get_email_address_list(email_input) == TEST_EMAILS @pytest.mark.parametrize( "invalid_input", [ pytest.param(1, id="invalid_scalar_type"), pytest.param([FROM_EMAIL, 2], id="invalid_type_in_list"), ], ) @patch(smtplib_string) def test_get_email_address_invalid_types(self, mock_smtplib, invalid_input): with pytest.raises(TypeError): with SmtpHook() as smtp_hook: smtp_hook._get_email_address_list(invalid_input) @patch(smtplib_string) def test_build_mime_message(self, mock_smtplib): mail_from = FROM_EMAIL mail_to = TO_EMAIL subject = TEST_SUBJECT html_content = TEST_BODY custom_headers = {"Reply-To": "reply_to@example.com"} with SmtpHook() as smtp_hook: msg, recipients = smtp_hook._build_mime_message( mail_from=mail_from, to=mail_to, subject=subject, html_content=html_content, custom_headers=custom_headers, ) assert "From" in msg assert "To" in msg assert "Subject" in msg assert "Reply-To" in msg assert [mail_to] == recipients assert msg["To"] == ",".join(recipients) @patch(smtplib_string) def test_send_smtp(self, mock_smtplib): mock_send_mime = mock_smtplib.SMTP_SSL().sendmail with SmtpHook() as smtp_hook, tempfile.NamedTemporaryFile() as attachment: attachment.write(b"attachment") attachment.seek(0) smtp_hook.send_email_smtp( to=TO_EMAIL, subject=TEST_SUBJECT, html_content=TEST_BODY, files=[attachment.name] ) assert mock_send_mime.called _, call_args = mock_send_mime.call_args assert call_args["from_addr"] == FROM_EMAIL assert call_args["to_addrs"] == [TO_EMAIL] msg = call_args["msg"] assert f"Subject: {TEST_SUBJECT}" in msg assert f"From: {FROM_EMAIL}" in msg filename = 'attachment; filename="' + os.path.basename(attachment.name) + '"' assert filename in msg mimeapp = MIMEApplication("attachment") assert mimeapp.get_payload() in msg @patch("airflow.providers.smtp.hooks.smtp.SmtpHook.get_connection") @patch(smtplib_string) def test_hook_conn(self, mock_smtplib, mock_hook_conn): mock_conn = Mock() mock_conn.login = SMTP_LOGIN mock_conn.password = SMTP_PASSWORD mock_conn.extra_dejson = {"disable_ssl": False} mock_hook_conn.return_value = mock_conn smtp_client_mock = mock_smtplib.SMTP_SSL() with SmtpHook() as smtp_hook: smtp_hook.send_email_smtp( to=TO_EMAIL, subject=TEST_SUBJECT, html_content=TEST_BODY, from_email=FROM_EMAIL, ) mock_hook_conn.assert_called_with(CONN_ID_DEFAULT) smtp_client_mock.login.assert_called_once_with(SMTP_LOGIN, SMTP_PASSWORD) smtp_client_mock.sendmail.assert_called_once() assert smtp_client_mock.close.called @pytest.mark.parametrize( ("conn_id", "ssl_context", "create_context_called", "use_default_context"), [ pytest.param(CONN_ID_DEFAULT, "default", True, True, id="default_context"), pytest.param(CONN_ID_SSL_EXTRA, "none", False, False, id="none_context"), pytest.param(CONN_ID_DEFAULT, "default", True, True, id="explicit_default_context"), ], ) @patch("smtplib.SMTP_SSL") @patch("smtplib.SMTP") @patch("ssl.create_default_context") def test_send_mime_ssl_context( self, create_default_context, mock_smtp, mock_smtp_ssl, conn_id, ssl_context, create_context_called, use_default_context, ): mock_smtp_ssl.return_value = Mock() with SmtpHook(conn_id) as smtp_hook: smtp_hook.send_email_smtp( to=TO_EMAIL, subject=TEST_SUBJECT, html_content=TEST_BODY, from_email=FROM_EMAIL ) assert not mock_smtp.called if use_default_context: assert create_default_context.called expected_context = create_default_context.return_value else: create_default_context.assert_not_called() expected_context = None mock_smtp_ssl.assert_called_once_with( host=SMTP_HOST, port=DEFAULT_PORT, timeout=DEFAULT_TIMEOUT, context=expected_context ) @patch("smtplib.SMTP_SSL") @patch("smtplib.SMTP") @patch("ssl.create_default_context") def test_send_mime_ssl(self, create_default_context, mock_smtp, mock_smtp_ssl): mock_smtp_ssl.return_value = Mock() with SmtpHook() as smtp_hook: smtp_hook.send_email_smtp( to=TO_EMAIL, subject=TEST_SUBJECT, html_content=TEST_BODY, from_email=FROM_EMAIL, ) assert not mock_smtp.called assert create_default_context.called mock_smtp_ssl.assert_called_once_with( host=SMTP_HOST, port=DEFAULT_PORT, timeout=DEFAULT_TIMEOUT, context=create_default_context.return_value, ) @patch("smtplib.SMTP_SSL") @patch("smtplib.SMTP") def test_send_mime_nossl(self, mock_smtp, mock_smtp_ssl): mock_smtp.return_value = Mock() with SmtpHook(smtp_conn_id=CONN_ID_NONSSL) as smtp_hook: smtp_hook.send_email_smtp( to=TO_EMAIL, subject=TEST_SUBJECT, html_content=TEST_BODY, from_email=FROM_EMAIL ) assert not mock_smtp_ssl.called mock_smtp.assert_called_once_with(host=SMTP_HOST, port=NONSSL_PORT, timeout=DEFAULT_TIMEOUT) @patch("smtplib.SMTP") def test_send_mime_noauth(self, mock_smtp, create_connection_without_db): mock_smtp.return_value = Mock() conn = Connection( conn_id="smtp_noauth", conn_type=CONN_TYPE, host=SMTP_HOST, login=None, password="None", port=NONSSL_PORT, extra=json.dumps(dict(disable_ssl=True, from_email=FROM_EMAIL)), ) create_connection_without_db(conn) with SmtpHook(smtp_conn_id="smtp_noauth") as smtp_hook: smtp_hook.send_email_smtp( to=TO_EMAIL, subject=TEST_SUBJECT, html_content=TEST_BODY, from_email=FROM_EMAIL, ) mock_smtp.assert_called_once_with(host=SMTP_HOST, port=NONSSL_PORT, timeout=DEFAULT_TIMEOUT) assert not mock_smtp.login.called @patch("smtplib.SMTP_SSL") @patch("smtplib.SMTP") def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl): with SmtpHook() as smtp_hook: smtp_hook.send_email_smtp( to=TO_EMAIL, subject=TEST_SUBJECT, html_content=TEST_BODY, dryrun=True, ) assert not mock_smtp.sendmail.called assert not mock_smtp_ssl.sendmail.called @patch("smtplib.SMTP_SSL") def test_send_mime_ssl_complete_failure(self, mock_smtp_ssl): mock_smtp_ssl().sendmail.side_effect = smtplib.SMTPServerDisconnected() with SmtpHook() as smtp_hook: with pytest.raises(smtplib.SMTPServerDisconnected): smtp_hook.send_email_smtp( to=TO_EMAIL, subject=TEST_SUBJECT, html_content=TEST_BODY, ) assert mock_smtp_ssl().sendmail.call_count == DEFAULT_RETRY_LIMIT @patch("email.message.Message.as_string") @patch("smtplib.SMTP_SSL") def test_send_mime_partial_failure(self, mock_smtp_ssl, mime_message_mock): mime_message_mock.return_value = "msg" final_mock = Mock() side_effects = [smtplib.SMTPServerDisconnected(), smtplib.SMTPServerDisconnected(), final_mock] mock_smtp_ssl.side_effect = side_effects with SmtpHook() as smtp_hook: smtp_hook.send_email_smtp( to=TO_EMAIL, subject=TEST_SUBJECT, html_content=TEST_BODY, ) assert mock_smtp_ssl.call_count == side_effects.index(final_mock) + 1 assert final_mock.starttls.called final_mock.sendmail.assert_called_once_with(from_addr=FROM_EMAIL, to_addrs=[TO_EMAIL], msg="msg") assert final_mock.close.called @patch("smtplib.SMTP_SSL") @patch("ssl.create_default_context") def test_send_mime_custom_timeout_retrylimit( self, create_default_context, mock_smtp_ssl, create_connection_without_db ): mock_smtp_ssl().sendmail.side_effect = smtplib.SMTPServerDisconnected() custom_retry_limit = 10 custom_timeout = 60 fake_conn = Connection( conn_id="mock_conn", conn_type=CONN_TYPE, host=SMTP_HOST, login=SMTP_LOGIN, password=SMTP_PASSWORD, port=DEFAULT_PORT, extra=json.dumps( dict(from_email=FROM_EMAIL, timeout=custom_timeout, retry_limit=custom_retry_limit) ), ) create_connection_without_db(fake_conn) with SmtpHook(smtp_conn_id="mock_conn") as smtp_hook: with pytest.raises(smtplib.SMTPServerDisconnected): smtp_hook.send_email_smtp(to=TO_EMAIL, subject=TEST_SUBJECT, html_content=TEST_BODY) expected_call = call( host=fake_conn.host, port=fake_conn.port, timeout=fake_conn.extra_dejson["timeout"], context=create_default_context.return_value, ) assert expected_call in mock_smtp_ssl.call_args_list assert create_default_context.called assert mock_smtp_ssl().sendmail.call_count == 10 @patch(smtplib_string) def test_oauth2_auth_called(self, mock_smtplib): mock_conn = _create_fake_smtp(mock_smtplib, use_ssl=False) with SmtpHook(smtp_conn_id=CONN_ID_OAUTH, auth_type="oauth2") as smtp_hook: smtp_hook.send_email_smtp( to=TO_EMAIL, subject=TEST_SUBJECT, html_content=TEST_BODY, from_email=FROM_EMAIL, ) assert mock_conn.auth.called args, _ = mock_conn.auth.call_args assert args[0] == "XOAUTH2" assert build_xoauth2_string(SMTP_LOGIN, ACCESS_TOKEN) == args[1]() @patch(smtplib_string) def test_oauth2_missing_token_raises(self, mock_smtplib, create_connection_without_db): mock_conn = _create_fake_smtp(mock_smtplib, use_ssl=False) create_connection_without_db( Connection( conn_id="smtp_oauth2_empty", conn_type=CONN_TYPE, host=SMTP_HOST, login=SMTP_LOGIN, password=SMTP_PASSWORD, port=NONSSL_PORT, extra=json.dumps(dict(disable_ssl=True, from_email=FROM_EMAIL)), ) ) with pytest.raises(AirflowException): with SmtpHook(smtp_conn_id="smtp_oauth2_empty", auth_type="oauth2") as h: h.send_email_smtp( to=TO_EMAIL, subject=TEST_SUBJECT, html_content=TEST_BODY, from_email=FROM_EMAIL, ) assert not mock_conn.auth.called @pytest.mark.asyncio @pytest.mark.skipif(not AIRFLOW_V_3_1_PLUS, reason="Async support was added to BaseNotifier in 3.1.0")
TestSmtpHook
python
kamyu104__LeetCode-Solutions
Python/maximize-profit-from-task-assignment.py
{ "start": 85, "end": 642 }
class ____(object): def maxProfit(self, workers, tasks): """ :type workers: List[int] :type tasks: List[List[int]] :rtype: int """ cnt = collections.defaultdict(int) for x in workers: cnt[x] += 1 tasks.sort(key=lambda x: x[1], reverse=True) result = 0 k = 1 for s, p in tasks: if cnt[s]: cnt[s] -= 1 result += p elif k: k -= 1 result += p return result
Solution
python
walkccc__LeetCode
solutions/3378. Count Connected Components in LCM Graph/3378.py
{ "start": 554, "end": 829 }
class ____: def countComponents(self, nums: list[int], threshold: int) -> int: uf = UnionFind() for num in nums: for multiple in range(2 * num, threshold + 1, num): uf.unionByRank(num, multiple) return len(set(uf.find(num) for num in nums))
Solution
python
great-expectations__great_expectations
great_expectations/expectations/metrics/query_metrics/query_table/query_table.py
{ "start": 610, "end": 2553 }
class ____(QueryMetricProvider): metric_name = "query.table" value_keys = ("query",) @metric_value(engine=SqlAlchemyExecutionEngine) def _sqlalchemy( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: dict, metric_value_kwargs: dict, metrics: Dict[str, Any], runtime_configuration: dict, ) -> list[dict]: batch_selectable, _, _ = execution_engine.get_compute_domain( metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE ) query = cls._get_query_from_metric_value_kwargs(metric_value_kwargs) substituted_batch_subquery = ( cls._get_substituted_batch_subquery_from_query_and_batch_selectable( query=query, batch_selectable=batch_selectable, execution_engine=execution_engine, ) ) return cls._get_sqlalchemy_records_from_substituted_batch_subquery( substituted_batch_subquery=substituted_batch_subquery, execution_engine=execution_engine, ) @metric_value(engine=SparkDFExecutionEngine) def _spark( cls, execution_engine: SparkDFExecutionEngine, metric_domain_kwargs: dict, metric_value_kwargs: dict, metrics: Dict[str, Any], runtime_configuration: dict, ) -> list[dict]: query = cls._get_query_from_metric_value_kwargs(metric_value_kwargs) df: pyspark.DataFrame df, _, _ = execution_engine.get_compute_domain( metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE ) df.createOrReplaceTempView("tmp_view") query = query.format(batch="tmp_view") engine: pyspark.SparkSession = execution_engine.spark result: List[pyspark.Row] = engine.sql(query).limit(MAX_RESULT_RECORDS).collect() return [element.asDict() for element in result]
QueryTable
python
getsentry__sentry
src/sentry/grouping/enhancer/actions.py
{ "start": 6138, "end": 7805 }
class ____(EnhancementAction): _VALUE_PARSERS: dict[str, Callable[[Any], Any]] = { "max-frames": int, "min-frames": int, "category": lambda x: x, } _FRAME_VARIABLES = {"category"} def __init__(self, var: str, value: str) -> None: self.var = var self.is_classifier = self.var == "category" self.sets_contributes = self.var in ["min-frames", "max-frames"] try: self.value = VarAction._VALUE_PARSERS[var](value) except (ValueError, TypeError): raise InvalidEnhancerConfig(f"Invalid value '{value}' for '{var}'") except KeyError: raise InvalidEnhancerConfig(f"Unknown variable '{var}'") self._encoded_value = ( self.value.encode("utf-8") if isinstance(self.value, str) else self.value ) def __str__(self) -> str: return f"{self.var}={self.value}" def _to_config_structure(self, version: int) -> list[str | int]: # TODO: Can we switch this to a tuple so we can type it more exactly? return [self.var, self.value] def modify_stacktrace_state(self, state, rule) -> None: if self.var not in VarAction._FRAME_VARIABLES: state.set(self.var, self.value, rule) def apply_modifications_to_frame( self, frames: Sequence[dict[str, Any]], match_frames: list[MatchFrame], idx: int, rule: Any | None = None, ) -> None: if self.var == "category": frame = frames[idx] set_path(frame, "data", "category", value=self.value) match_frames[idx]["category"] = self._encoded_value
VarAction
python
scikit-learn__scikit-learn
sklearn/tree/_classes.py
{ "start": 55420, "end": 67361 }
class ____(DecisionTreeClassifier): """An extremely randomized tree classifier. Extra-trees differ from classic decision trees in the way they are built. When looking for the best split to separate the samples of a node into two groups, random splits are drawn for each of the `max_features` randomly selected features and the best split among those is chosen. When `max_features` is set 1, this amounts to building a totally random decision tree. Warning: Extra-trees should only be used within ensemble methods. Read more in the :ref:`User Guide <tree>`. Parameters ---------- criterion : {"gini", "entropy", "log_loss"}, default="gini" The function to measure the quality of a split. Supported criteria are "gini" for the Gini impurity and "log_loss" and "entropy" both for the Shannon information gain, see :ref:`tree_mathematical_formulation`. splitter : {"random", "best"}, default="random" The strategy used to choose the split at each node. Supported strategies are "best" to choose the best split and "random" to choose the best random split. max_depth : int, default=None The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. min_samples_split : int or float, default=2 The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a fraction and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for fractions. min_samples_leaf : int or float, default=1 The minimum number of samples required to be at a leaf node. A split point at any depth will only be considered if it leaves at least ``min_samples_leaf`` training samples in each of the left and right branches. This may have the effect of smoothing the model, especially in regression. - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a fraction and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for fractions. min_weight_fraction_leaf : float, default=0.0 The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. max_features : int, float, {"sqrt", "log2"} or None, default="sqrt" The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a fraction and `max(1, int(max_features * n_features_in_))` features are considered at each split. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. .. versionchanged:: 1.1 The default of `max_features` changed from `"auto"` to `"sqrt"`. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. random_state : int, RandomState instance or None, default=None Used to pick randomly the `max_features` used at each split. See :term:`Glossary <random_state>` for details. max_leaf_nodes : int, default=None Grow a tree with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. min_impurity_decrease : float, default=0.0 A node will be split if this split induces a decrease of the impurity greater than or equal to this value. The weighted impurity decrease equation is the following:: N_t / N * (impurity - N_t_R / N_t * right_impurity - N_t_L / N_t * left_impurity) where ``N`` is the total number of samples, ``N_t`` is the number of samples at the current node, ``N_t_L`` is the number of samples in the left child, and ``N_t_R`` is the number of samples in the right child. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, if ``sample_weight`` is passed. .. versionadded:: 0.19 class_weight : dict, list of dict or "balanced", default=None Weights associated with classes in the form ``{class_label: weight}``. If None, all classes are supposed to have weight one. For multi-output problems, a list of dicts can be provided in the same order as the columns of y. Note that for multioutput (including multilabel) weights should be defined for each class of every column in its own dict. For example, for four-class multilabel classification weights should be [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of [{1:1}, {2:5}, {3:1}, {4:1}]. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` For multi-output, the weights of each column of y will be multiplied. Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. ccp_alpha : non-negative float, default=0.0 Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ``ccp_alpha`` will be chosen. By default, no pruning is performed. See :ref:`minimal_cost_complexity_pruning` for details. See :ref:`sphx_glr_auto_examples_tree_plot_cost_complexity_pruning.py` for an example of such pruning. .. versionadded:: 0.22 monotonic_cst : array-like of int of shape (n_features), default=None Indicates the monotonicity constraint to enforce on each feature. - 1: monotonic increase - 0: no constraint - -1: monotonic decrease If monotonic_cst is None, no constraints are applied. Monotonicity constraints are not supported for: - multiclass classifications (i.e. when `n_classes > 2`), - multioutput classifications (i.e. when `n_outputs_ > 1`), - classifications trained on data with missing values. The constraints hold over the probability of the positive class. Read more in the :ref:`User Guide <monotonic_cst_gbdt>`. .. versionadded:: 1.4 Attributes ---------- classes_ : ndarray of shape (n_classes,) or list of ndarray The classes labels (single output problem), or a list of arrays of class labels (multi-output problem). max_features_ : int The inferred value of max_features. n_classes_ : int or list of int The number of classes (for single output problems), or a list containing the number of classes for each output (for multi-output problems). feature_importances_ : ndarray of shape (n_features,) The impurity-based feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance. Warning: impurity-based feature importances can be misleading for high cardinality features (many unique values). See :func:`sklearn.inspection.permutation_importance` as an alternative. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 n_outputs_ : int The number of outputs when ``fit`` is performed. tree_ : Tree instance The underlying Tree object. Please refer to ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py` for basic usage of these attributes. See Also -------- ExtraTreeRegressor : An extremely randomized tree regressor. sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier. sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor. sklearn.ensemble.RandomForestClassifier : A random forest classifier. sklearn.ensemble.RandomForestRegressor : A random forest regressor. sklearn.ensemble.RandomTreesEmbedding : An ensemble of totally random trees. Notes ----- The default values for the parameters controlling the size of the trees (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and unpruned trees which can potentially be very large on some data sets. To reduce memory consumption, the complexity and size of the trees should be controlled by setting those parameter values. References ---------- .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", Machine Learning, 63(1), 3-42, 2006. Examples -------- >>> from sklearn.datasets import load_iris >>> from sklearn.model_selection import train_test_split >>> from sklearn.ensemble import BaggingClassifier >>> from sklearn.tree import ExtraTreeClassifier >>> X, y = load_iris(return_X_y=True) >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, random_state=0) >>> extra_tree = ExtraTreeClassifier(random_state=0) >>> cls = BaggingClassifier(extra_tree, random_state=0).fit( ... X_train, y_train) >>> cls.score(X_test, y_test) 0.8947 """ def __init__( self, *, criterion="gini", splitter="random", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features="sqrt", random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, class_weight=None, ccp_alpha=0.0, monotonic_cst=None, ): super().__init__( criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, class_weight=class_weight, min_impurity_decrease=min_impurity_decrease, random_state=random_state, ccp_alpha=ccp_alpha, monotonic_cst=monotonic_cst, ) def __sklearn_tags__(self): tags = super().__sklearn_tags__() # XXX: nan is only supported for dense arrays, but we set this for the # common test to pass, specifically: check_estimators_nan_inf allow_nan = self.splitter == "random" and self.criterion in { "gini", "log_loss", "entropy", } tags.classifier_tags.multi_label = True tags.input_tags.allow_nan = allow_nan return tags
ExtraTreeClassifier
python
numpy__numpy
numpy/distutils/system_info.py
{ "start": 86428, "end": 86520 }
class ____(openblas_lapack_info): _lib_names = ['openblas', 'lapack']
openblas_clapack_info
python
joke2k__faker
faker/providers/person/ja_JP/__init__.py
{ "start": 138, "end": 9763 }
class ____(PersonProvider): # link: http://dic.nicovideo.jp/a/日本人の名前一覧 # link: http://www.meijiyasuda.co.jp/enjoy/ranking/ first_name_female_pairs = ( ("明美", "アケミ", "Akemi"), ("あすか", "アスカ", "Asuka"), ("香織", "カオリ", "Kaori"), ("加奈", "カナ", "Kana"), ("くみ子", "クミコ", "Kumiko"), ("さゆり", "サユリ", "Sayuri"), ("知実", "サトミ", "Satomi"), ("千代", "チヨ", "Chiyo"), ("直子", "ナオコ", "Naoko"), ("七夏", "ナナミ", "Nanami"), ("花子", "ハナコ", "Hanako"), ("春香", "ハルカ", "Haruka"), ("真綾", "マアヤ", "Maaya"), ("舞", "マイ", "Mai"), ("美加子", "ミカコ", "Mikako"), ("幹", "ミキ", "Miki"), ("桃子", "モモコ", "Momoko"), ("結衣", "ユイ", "Yui"), ("裕美子", "ユミコ", "Yumiko"), ("陽子", "ヨウコ", "Yoko"), ("里佳", "リカ", "Rika"), ) # for backwards compatibility first_names_female = tuple(map(itemgetter(0), first_name_female_pairs)) first_kana_names_female = tuple(map(itemgetter(1), first_name_female_pairs)) first_romanized_names_female = tuple(map(itemgetter(2), first_name_female_pairs)) first_name_male_pairs = ( ("晃", "アキラ", "Akira"), ("篤司", "アツシ", "Atsushi"), ("治", "オサム", "Osamu"), ("和也", "カズヤ", "Kazuya"), ("京助", "キョウスケ", "Kyosuke"), ("健一", "ケンイチ", "Kenichi"), ("修平", "シュウヘイ", "Shohei"), ("翔太", "ショウタ", "Shota"), ("淳", "ジュン", "Jun"), ("聡太郎", "ソウタロウ", "Sotaro"), ("太一", "タイチ", "Taichi"), ("太郎", "タロウ", "Taro"), ("拓真", "タクマ", "Takuma"), ("翼", "ツバサ", "Tsubasa"), ("智也", "トモヤ", "Tomoya"), ("直樹", "ナオキ", "Naoki"), ("直人", "ナオト", "Naoto"), ("英樹", "ヒデキ", "Hideki"), ("浩", "ヒロシ", "Hiroshi"), ("学", "マナブ", "Manabu"), ("充", "ミツル", "Mituru"), ("稔", "ミノル", "Minoru"), ("裕樹", "ユウキ", "Yuki"), ("裕太", "ユウタ", "Yuta"), ("康弘", "ヤスヒロ", "Yasuhiro"), ("陽一", "ヨウイチ", "Yoichi"), ("洋介", "ヨウスケ", "Yosuke"), ("亮介", "リョウスケ", "Ryosuke"), ("涼平", "リョウヘイ", "Ryohei"), ("零", "レイ", "Rei"), ) # for backwards compatibility first_names_male = tuple(map(itemgetter(0), first_name_male_pairs)) first_kana_names_male = tuple(map(itemgetter(1), first_name_male_pairs)) first_romanized_names_male = tuple(map(itemgetter(2), first_name_male_pairs)) # for backwards compatibility first_names = first_names_male + first_names_female first_kana_names = first_kana_names_male + first_kana_names_female first_romanized_names = first_romanized_names_male + first_romanized_names_female first_name_pairs = first_name_male_pairs + first_name_female_pairs last_name_pairs = OrderedDict( ( (("佐藤", "サトウ", "Sato"), 366803.0), (("鈴木", "スズキ", "Suzuki"), 321135), (("高橋", "タカハシ", "Takahashi"), 266782), (("田中", "タナカ", "Tanaka"), 245821), (("伊藤", "イトウ", "Ito"), 203357), (("渡辺", "ワタナベ", "Watanabe"), 200504), (("山本", "ヤマモト", "Yamamoto"), 200134), (("中村", "ナカムラ", "Nakamura"), 195219), (("小林", "コバヤシ", "Kobayashi"), 191819), (("加藤", "カトウ", "Kato"), 160283), (("吉田", "ヨシダ", "Yoshida"), 154461), (("山田", "ヤマダ", "Yamada"), 151675), (("佐々木", "ササキ", "Sasaki"), 135927), (("山口", "ヤマグチ", "Yamaguchi"), 119501), (("松本", "マツモト", "Matsumoto"), 116490), (("井上", "イノウエ", "Inoue"), 111287), (("木村", "キムラ", "Kimura"), 107446), (("林", "ハヤシ", "Hayashi"), 101826), (("斎藤", "サイトウ", "Saito"), 101774), (("清水", "シミズ", "Shimizu"), 97826), (("山崎", "ヤマザキ", "Yamazaki"), 90781), (("阿部", "アベ", "Abe"), 86833), (("森", "モリ", "Mori"), 86507), (("池田", "イケダ", "Ikeda"), 84860), (("橋本", "ハシモト", "Hashimoto"), 82836), (("山下", "ヤマシタ", "Yamashita"), 80588), (("石川", "イシカワ", "Ishikawa"), 77471), (("中島", "ナカジマ", "Nakajima"), 74106), (("前田", "マエダ", "Maeda"), 72930), (("藤田", "フジタ", "Fujita"), 72375), (("後藤", "ゴトウ", "Goto"), 71629), (("小川", "オガワ", "Ogawa"), 71179), (("岡田", "オカダ", "Okada"), 70347), (("長谷川", "ハセガワ", "Hasegawa"), 69201), (("村上", "ムラカミ", "Murakami"), 68606), (("近藤", "コンドウ", "Kondo"), 68297), (("石井", "イシイ", "Ishii"), 67079), (("遠藤", "エンドウ", "Endo"), 62620), (("斉藤", "サイトウ", "Saito"), 62540), (("坂本", "サカモト", "Sakamoto"), 62308), (("青木", "アオキ", "Aoki"), 59516), (("藤井", "フジイ", "Fujii"), 59204), (("西村", "ニシムラ", "Nishimura"), 58821), (("福田", "フクダ", "Fukuda"), 58714), (("太田", "オオタ", "Ota"), 58439), (("三浦", "ミウラ", "Miura"), 58006), (("藤原", "フジワラ", "Fujiwara"), 57742), (("松田", "マツダ", "Matsuda"), 55883), (("岡本", "オカモト", "Okamoto"), 55539), (("中川", "ナカガワ", "Nakagawa"), 55221), ) ) # for backwards compatibility only. use the pairs instead last_names = tuple(map(itemgetter(0), last_name_pairs)) last_kana_names = tuple(map(itemgetter(1), last_name_pairs)) last_romanized_names = tuple(map(itemgetter(2), last_name_pairs)) formats_male = ("{{last_name}} {{first_name_male}}",) formats_female = ("{{last_name}} {{first_name_female}}",) formats = formats_male + formats_female kana_formats_male = ("{{last_kana_name}} {{first_kana_name_male}}",) kana_formats_female = ("{{last_kana_name}} {{first_kana_name_female}}",) kana_formats = kana_formats_male + kana_formats_female romanized_formats_male = ("{{first_romanized_name_male}} {{last_romanized_name}}",) romanized_formats_female = ("{{first_romanized_name_female}} {{last_romanized_name}}",) romanized_formats = romanized_formats_male + romanized_formats_female def first_name_pair(self) -> Tuple[str, str, str]: """ :example: ('明美', 'アケミ', 'Akemi') """ return self.random_element(self.first_name_pairs) def first_name_male_pair(self) -> Tuple[str, str, str]: """ :example: ('晃', 'アキラ', 'Akira') """ return self.random_element(self.first_name_male_pairs) def first_name_female_pair(self) -> Tuple[str, str, str]: """ :example: ('明美', 'アケミ', 'Akemi') """ return self.random_element(self.first_name_female_pairs) def last_name_pair(self) -> Tuple[str, str, str]: """ :example: ('佐藤', 'サトウ', 'Sato') """ return self.random_element(self.last_name_pairs) def first_name(self) -> str: """ :example: '明美' """ return self.first_name_pair()[0] def first_name_male(self) -> str: """ :example: '晃' """ return self.first_name_male_pair()[0] def first_name_female(self) -> str: """ :example: '明美' """ return self.first_name_female_pair()[0] def last_name(self) -> str: """ :example: '佐藤' """ return self.last_name_pair()[0] def first_kana_name(self) -> str: """ :example: 'アケミ' """ return self.first_name_pair()[1] def first_kana_name_male(self) -> str: """ :example: 'アキラ' """ return self.first_name_male_pair()[1] def first_kana_name_female(self) -> str: """ :example: 'アケミ' """ return self.first_name_female_pair()[1] def last_kana_name(self) -> str: """ :example: 'サトウ' """ return self.last_name_pair()[1] def first_romanized_name(self) -> str: """ :example: 'Akemi' """ return self.first_name_pair()[2] def first_romanized_name_male(self) -> str: """ :example: 'Akira' """ return self.first_name_male_pair()[2] def first_romanized_name_female(self) -> str: """ :example: 'Akemi' """ return self.first_name_female_pair()[2] def last_romanized_name(self) -> str: """ :example: 'Sato' """ return self.last_name_pair()[2] def kana_name(self) -> str: """ :example: 'サトウ アケミ' """ pattern: str = self.random_element(self.kana_formats) return self.generator.parse(pattern) def kana_name_male(self) -> str: """ :example: 'サトウ アキラ' """ pattern: str = self.random_element(self.kana_formats_male) return self.generator.parse(pattern) def kana_name_female(self) -> str: """ :example: 'サトウ アケミ' """ pattern: str = self.random_element(self.kana_formats_female) return self.generator.parse(pattern) def romanized_name(self) -> str: """ :example: 'Akemi Sato' """ pattern: str = self.random_element(self.romanized_formats) return self.generator.parse(pattern) def romanized_name_male(self) -> str: """ :example: 'Akira Sato' """ pattern: str = self.random_element(self.romanized_formats_male) return self.generator.parse(pattern) def romanized_name_female(self) -> str: """ :example: 'Akemi Sato' """ pattern: str = self.random_element(self.romanized_formats_female) return self.generator.parse(pattern)
Provider
python
pola-rs__polars
py-polars/src/polars/datatypes/classes.py
{ "start": 9174, "end": 9252 }
class ____(DataType): """Base class for temporal data types."""
TemporalType
python
bokeh__bokeh
src/bokeh/models/tickers.py
{ "start": 5648, "end": 6179 }
class ____(ContinuousTicker): ''' Generate ticks at fixed, explicitly supplied locations. .. note:: The ``desired_num_ticks`` property is ignored by this Ticker. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) ticks = Seq(Float, default=[], help=""" List of major tick locations. """) minor_ticks = Seq(Float, default=[], help=""" List of minor tick locations. """)
FixedTicker
python
HypothesisWorks__hypothesis
hypothesis-python/tests/codemods/test_codemods.py
{ "start": 5399, "end": 6193 }
class ____(CodemodTest): TRANSFORM = codemods.HypothesisFixCharactersArguments def test_substitution(self) -> None: for in_, out in codemods.HypothesisFixCharactersArguments._replacements.items(): before = f""" import hypothesis.strategies as st st.characters({in_}=...) """ self.assertCodemod(before=before, after=before.replace(in_, out)) def test_remove_redundant_exclude_categories(self) -> None: args = "blacklist_categories=OUT, whitelist_categories=IN" before = f""" import hypothesis.strategies as st st.characters({args}) """ self.assertCodemod(before=before, after=before.replace(args, "categories=IN"))
TestFixCharactersArguments
python
lepture__mistune
tests/test_hooks.py
{ "start": 131, "end": 888 }
class ____(BaseTestCase): @staticmethod def parse(text): md = create_markdown(escape=False) add_toc_hook(md) html, state = md.parse(text) result = html + render_toc_ul(state.env["toc_items"]) return result def test_customize_heading_id_func(self): def heading_id(token, i): return "t-" + str(i + 1) md = create_markdown(escape=False) add_toc_hook(md, heading_id=heading_id) html = md("# h1") self.assertEqual(html, '<h1 id="t-1">h1</h1>\n') def test_render_empty_toc(self): self.assertEqual(render_toc_ul([]), "") self.assertEqual(render_toc_ul(filter(lambda _: False, [])), "") TestTocHook.load_fixtures("hook_toc.txt")
TestTocHook
python
pytest-dev__pytest
testing/test_pluginmanager.py
{ "start": 14846, "end": 17294 }
class ____: def test_preparse_args(self, pytestpm: PytestPluginManager) -> None: pytest.raises( ImportError, lambda: pytestpm.consider_preparse(["xyz", "-p", "hello123"]) ) # Handles -p without space (#3532). with pytest.raises(ImportError) as excinfo: pytestpm.consider_preparse(["-phello123"]) assert '"hello123"' in excinfo.value.args[0] pytestpm.consider_preparse(["-pno:hello123"]) # Handles -p without following arg (when used without argparse). pytestpm.consider_preparse(["-p"]) with pytest.raises(UsageError, match=r"^plugin main cannot be disabled$"): pytestpm.consider_preparse(["-p", "no:main"]) def test_plugin_prevent_register(self, pytestpm: PytestPluginManager) -> None: pytestpm.consider_preparse(["xyz", "-p", "no:abc"]) l1 = pytestpm.get_plugins() pytestpm.register(42, name="abc") l2 = pytestpm.get_plugins() assert len(l2) == len(l1) assert 42 not in l2 def test_plugin_prevent_register_unregistered_already_registered( self, pytestpm: PytestPluginManager ) -> None: pytestpm.register(42, name="abc") l1 = pytestpm.get_plugins() assert 42 in l1 pytestpm.consider_preparse(["xyz", "-p", "no:abc"]) l2 = pytestpm.get_plugins() assert 42 not in l2 def test_plugin_prevent_register_stepwise_on_cacheprovider_unregister( self, pytestpm: PytestPluginManager ) -> None: """From PR #4304: The only way to unregister a module is documented at the end of https://docs.pytest.org/en/stable/how-to/plugins.html. When unregister cacheprovider, then unregister stepwise too. """ pytestpm.register(42, name="cacheprovider") pytestpm.register(43, name="stepwise") l1 = pytestpm.get_plugins() assert 42 in l1 assert 43 in l1 pytestpm.consider_preparse(["xyz", "-p", "no:cacheprovider"]) l2 = pytestpm.get_plugins() assert 42 not in l2 assert 43 not in l2 def test_blocked_plugin_can_be_used(self, pytestpm: PytestPluginManager) -> None: pytestpm.consider_preparse(["xyz", "-p", "no:abc", "-p", "abc"]) assert pytestpm.has_plugin("abc") assert not pytestpm.is_blocked("abc") assert not pytestpm.is_blocked("pytest_abc")
TestPytestPluginManagerBootstrapping
python
numba__numba
numba/tests/test_sets.py
{ "start": 20450, "end": 22692 }
class ____(BaseTest): """ Test reflection of native Numba sets on Python set objects. """ def check_reflection(self, pyfunc): cfunc = jit(nopython=True)(pyfunc) samples = [(set([1., 2., 3., 4.]), set([0.])), (set([1., 2., 3., 4.]), set([5., 6., 7., 8., 9.])), ] for dest, src in samples: expected = set(dest) got = set(dest) pyres = pyfunc(expected, src) with self.assertRefCount(got, src): cres = cfunc(got, src) self.assertPreciseEqual(cres, pyres) self.assertPreciseEqual(expected, got) self.assertEqual(pyres[0] is expected, cres[0] is got) del pyres, cres def test_reflect_simple(self): self.check_reflection(reflect_simple) def test_reflect_conditional(self): self.check_reflection(reflect_conditional) def test_reflect_exception(self): """ When the function exits with an exception, sets should still be reflected. """ pyfunc = reflect_exception cfunc = jit(nopython=True)(pyfunc) s = set([1, 2, 3]) with self.assertRefCount(s): with self.assertRaises(ZeroDivisionError): cfunc(s) self.assertPreciseEqual(s, set([1, 2, 3, 42])) def test_reflect_same_set(self): """ When the same set object is reflected twice, behaviour should be consistent. """ pyfunc = reflect_dual cfunc = jit(nopython=True)(pyfunc) pyset = set([1, 2, 3]) cset = pyset.copy() expected = pyfunc(pyset, pyset) got = cfunc(cset, cset) self.assertPreciseEqual(expected, got) self.assertPreciseEqual(pyset, cset) self.assertRefCountEqual(pyset, cset) def test_reflect_clean(self): """ When the set wasn't mutated, no reflection should take place. """ cfunc = jit(nopython=True)(noop) # Use a complex, as Python integers can be cached s = set([12.5j]) ids = [id(x) for x in s] cfunc(s) self.assertEqual([id(x) for x in s], ids)
TestSetReflection
python
apache__airflow
task-sdk/src/airflow/sdk/api/client.py
{ "start": 29374, "end": 31405 }
class ____(httpx.Auth): def __init__(self, token: str): self.token: str = token def auth_flow(self, request: httpx.Request): if self.token: request.headers["Authorization"] = "Bearer " + self.token yield request # This exists as an aid for debugging or local running via the `dry_run` argument to Client. It doesn't make # sense for returning connections etc. def noop_handler(request: httpx.Request) -> httpx.Response: path = request.url.path log.debug("Dry-run request", method=request.method, path=path) if path.startswith("/task-instances/") and path.endswith("/run"): # Return a fake context return httpx.Response( 200, json={ "dag_run": { "dag_id": "test_dag", "run_id": "test_run", "logical_date": "2021-01-01T00:00:00Z", "start_date": "2021-01-01T00:00:00Z", "run_type": DagRunType.MANUAL, "run_after": "2021-01-01T00:00:00Z", "consumed_asset_events": [], }, "max_tries": 0, "should_retry": False, }, ) return httpx.Response(200, json={"text": "Hello, world!"}) # Note: Given defaults make attempts after 1, 3, 7, 15 and fails after 31seconds API_RETRIES = conf.getint("workers", "execution_api_retries") API_RETRY_WAIT_MIN = conf.getfloat("workers", "execution_api_retry_wait_min") API_RETRY_WAIT_MAX = conf.getfloat("workers", "execution_api_retry_wait_max") API_SSL_CERT_PATH = conf.get("api", "ssl_cert") API_TIMEOUT = conf.getfloat("workers", "execution_api_timeout") def _should_retry_api_request(exception: BaseException) -> bool: """Determine if an API request should be retried based on the exception type.""" if isinstance(exception, httpx.HTTPStatusError): return exception.response.status_code >= 500 return isinstance(exception, httpx.RequestError)
BearerAuth
python
great-expectations__great_expectations
great_expectations/types/fonts.py
{ "start": 188, "end": 440 }
class ____(Enum): MONTSERRAT = "https://fonts.googleapis.com/css2?family=Montserrat" ROBOTO_MONO = "https://fonts.googleapis.com/css2?family=Roboto+Mono" SOURCE_SANS_PRO = "https://fonts.googleapis.com/css2?family=Source+Sans+Pro"
FontFamilyURL
python
sqlalchemy__sqlalchemy
test/orm/test_eager_relations.py
{ "start": 137637, "end": 144530 }
class ____(_fixtures.FixtureTest): """test that loaders from a base Query fully populate.""" run_inserts = "once" run_deletes = None def _collection_to_scalar_fixture(self): User, Address, Dingaling = ( self.classes.User, self.classes.Address, self.classes.Dingaling, ) self.mapper_registry.map_imperatively( User, self.tables.users, properties={"addresses": relationship(Address)}, ) self.mapper_registry.map_imperatively( Address, self.tables.addresses, properties={"dingaling": relationship(Dingaling)}, ) self.mapper_registry.map_imperatively( Dingaling, self.tables.dingalings ) sess = fixture_session(autoflush=False) return User, Address, Dingaling, sess def _collection_to_collection_fixture(self): User, Order, Item = ( self.classes.User, self.classes.Order, self.classes.Item, ) self.mapper_registry.map_imperatively( User, self.tables.users, properties={"orders": relationship(Order)} ) self.mapper_registry.map_imperatively( Order, self.tables.orders, properties={ "items": relationship(Item, secondary=self.tables.order_items) }, ) self.mapper_registry.map_imperatively(Item, self.tables.items) sess = fixture_session(autoflush=False) return User, Order, Item, sess def _eager_config_fixture(self): User, Address = self.classes.User, self.classes.Address self.mapper_registry.map_imperatively( User, self.tables.users, properties={"addresses": relationship(Address, lazy="joined")}, ) self.mapper_registry.map_imperatively(Address, self.tables.addresses) sess = fixture_session(autoflush=False) return User, Address, sess def test_runs_query_on_refresh(self): User, Address, sess = self._eager_config_fixture() u1 = sess.get(User, 8) assert "addresses" in u1.__dict__ sess.expire(u1) def go(): eq_(u1.id, 8) self.assert_sql_count(testing.db, go, 1) assert "addresses" in u1.__dict__ @testing.combinations( ("selectin",), ("subquery",), ("immediate",), ) def test_refresh_no_recursion(self, strat): User, Address = self.classes.User, self.classes.Address self.mapper_registry.map_imperatively( User, self.tables.users, properties={ "addresses": relationship( Address, lazy="joined", back_populates="user" ) }, ) self.mapper_registry.map_imperatively( Address, self.tables.addresses, properties={ "user": relationship( User, lazy=strat, back_populates="addresses" ) }, ) sess = fixture_session(autoflush=False) u1 = sess.get(User, 8) assert "addresses" in u1.__dict__ sess.expire(u1) def go(): eq_(u1.id, 8) self.assert_sql_count(testing.db, go, 1) assert "addresses" in u1.__dict__ # immediateload would be used here for all 3 strategies assert "user" in u1.addresses[0].__dict__ def test_populate_existing_propagate(self): # both SelectInLoader and SubqueryLoader receive the loaded collection # at once and use attributes.set_committed_value(). However # joinedloader receives the collection per-row, so has an initial # step where it invokes init_state_collection(). This has to clear # out an existing collection to function correctly with # populate_existing. User, Address, sess = self._eager_config_fixture() u1 = sess.get(User, 8) u1.addresses[2].email_address = "foofoo" del u1.addresses[1] u1 = sess.query(User).populate_existing().filter_by(id=8).one() # collection is reverted eq_(len(u1.addresses), 3) # attributes on related items reverted eq_(u1.addresses[2].email_address, "ed@lala.com") def test_no_crash_on_existing(self): User, Address, sess = self._eager_config_fixture() u1 = User(id=12, name="u", addresses=[]) sess.add(u1) sess.commit() sess.query(User).filter(User.id == 12).options( joinedload(User.addresses) ).first() def test_loads_second_level_collection_to_scalar(self): User, Address, Dingaling, sess = self._collection_to_scalar_fixture() u1 = sess.get(User, 8) a1 = Address() u1.addresses.append(a1) a2 = u1.addresses[0] a2.email_address = "foo" sess.query(User).options( joinedload(User.addresses).joinedload(Address.dingaling) ).filter_by(id=8).all() assert u1.addresses[-1] is a1 for a in u1.addresses: if a is not a1: assert "dingaling" in a.__dict__ else: assert "dingaling" not in a.__dict__ if a is a2: eq_(a2.email_address, "foo") def test_loads_second_level_collection_to_collection(self): User, Order, Item, sess = self._collection_to_collection_fixture() u1 = sess.get(User, 7) u1.orders o1 = Order() u1.orders.append(o1) sess.query(User).options( joinedload(User.orders).joinedload(Order.items) ).filter_by(id=7).all() for o in u1.orders: if o is not o1: assert "items" in o.__dict__ else: assert "items" not in o.__dict__ def test_load_two_levels_collection_to_scalar(self): User, Address, Dingaling, sess = self._collection_to_scalar_fixture() u1 = ( sess.query(User) .filter_by(id=8) .options(joinedload(User.addresses)) .one() ) sess.query(User).filter_by(id=8).options( joinedload(User.addresses).joinedload(Address.dingaling) ).first() assert "dingaling" in u1.addresses[0].__dict__ def test_load_two_levels_collection_to_collection(self): User, Order, Item, sess = self._collection_to_collection_fixture() u1 = ( sess.query(User) .filter_by(id=7) .options(joinedload(User.orders)) .one() ) sess.query(User).filter_by(id=7).options( joinedload(User.orders).joinedload(Order.items) ).first() assert "items" in u1.orders[0].__dict__
LoadOnExistingTest
python
django__django
django/db/models/fields/related_descriptors.py
{ "start": 40322, "end": 66942 }
class ____(ReverseManyToOneDescriptor): """ Accessor to the related objects manager on the forward and reverse sides of a many-to-many relation. In the example:: class Pizza(Model): toppings = ManyToManyField(Topping, related_name='pizzas') ``Pizza.toppings`` and ``Topping.pizzas`` are ``ManyToManyDescriptor`` instances. Most of the implementation is delegated to a dynamically defined manager class built by ``create_forward_many_to_many_manager()`` defined below. """ def __init__(self, rel, reverse=False): super().__init__(rel) self.reverse = reverse @property def through(self): # through is provided so that you have easy access to the through # model (Book.authors.through) for inlines, etc. This is done as # a property to ensure that the fully resolved value is returned. return self.rel.through @cached_property def related_manager_cls(self): related_model = self.rel.related_model if self.reverse else self.rel.model return create_forward_many_to_many_manager( related_model._default_manager.__class__, self.rel, reverse=self.reverse, ) def _get_set_deprecation_msg_params(self): return ( "%s side of a many-to-many set" % ("reverse" if self.reverse else "forward"), self.rel.accessor_name if self.reverse else self.field.name, ) def create_forward_many_to_many_manager(superclass, rel, reverse): """ Create a manager for the either side of a many-to-many relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-many relations. """ class ManyRelatedManager(superclass, AltersData): def __init__(self, instance=None): super().__init__() self.instance = instance if not reverse: self.model = rel.model self.query_field_name = rel.field.related_query_name() self.prefetch_cache_name = rel.field.name self.source_field_name = rel.field.m2m_field_name() self.target_field_name = rel.field.m2m_reverse_field_name() self.symmetrical = rel.symmetrical else: self.model = rel.related_model self.query_field_name = rel.field.name self.prefetch_cache_name = rel.field.related_query_name() self.source_field_name = rel.field.m2m_reverse_field_name() self.target_field_name = rel.field.m2m_field_name() self.symmetrical = False self.through = rel.through self.reverse = reverse self.source_field = self.through._meta.get_field(self.source_field_name) self.target_field = self.through._meta.get_field(self.target_field_name) self.core_filters = {} self.pk_field_names = {} for lh_field, rh_field in self.source_field.related_fields: core_filter_key = "%s__%s" % (self.query_field_name, rh_field.name) self.core_filters[core_filter_key] = getattr(instance, rh_field.attname) self.pk_field_names[lh_field.name] = rh_field.name self.related_val = self.source_field.get_foreign_related_value(instance) if None in self.related_val: raise ValueError( '"%r" needs to have a value for field "%s" before ' "this many-to-many relationship can be used." % (instance, self.pk_field_names[self.source_field_name]) ) # Even if this relation is not to pk, we require still pk value. # The wish is that the instance has been already saved to DB, # although having a pk value isn't a guarantee of that. if not instance._is_pk_set(): raise ValueError( "%r instance needs to have a primary key value before " "a many-to-many relationship can be used." % instance.__class__.__name__ ) def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_forward_many_to_many_manager( manager.__class__, rel, reverse ) return manager_class(instance=self.instance) do_not_call_in_templates = True def _build_remove_filters(self, removed_vals): filters = Q.create([(self.source_field_name, self.related_val)]) # No need to add a subquery condition if removed_vals is a QuerySet # without filters. removed_vals_filters = ( not isinstance(removed_vals, QuerySet) or removed_vals._has_filters() ) if removed_vals_filters: filters &= Q.create([(f"{self.target_field_name}__in", removed_vals)]) if self.symmetrical: symmetrical_filters = Q.create( [(self.target_field_name, self.related_val)] ) if removed_vals_filters: symmetrical_filters &= Q.create( [(f"{self.source_field_name}__in", removed_vals)] ) filters |= symmetrical_filters return filters def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) queryset._fetch_mode = self.instance._state.fetch_mode queryset._defer_next_filter = True return queryset._next_is_sticky().filter(**self.core_filters) def get_prefetch_cache(self): # Walk up the ancestor-chain (if cached) to try and find a prefetch # in an ancestor. for instance, _ in _traverse_ancestors(rel.field.model, self.instance): try: return instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): pass return None def _remove_prefetched_objects(self): # Walk up the ancestor-chain (if cached) to try and find a prefetch # in an ancestor. for instance, _ in _traverse_ancestors(rel.field.model, self.instance): try: instance._prefetched_objects_cache.pop(self.prefetch_cache_name) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): if (cache := self.get_prefetch_cache()) is not None: return cache else: queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_querysets(self, instances, querysets=None): if querysets and len(querysets) != 1: raise ValueError( "querysets argument of get_prefetch_querysets() should have a " "length of 1." ) queryset = querysets[0] if querysets else super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) queryset = _filter_prefetch_queryset( queryset, self.query_field_name, instances ) # M2M: need to annotate the query in order to get the primary model # that the secondary model was actually related to. We know that # there will already be a join on the join table, so we can just # add the select. # For non-autocreated 'through' models, can't assume we are # dealing with PK values. fk = self.through._meta.get_field(self.source_field_name) join_table = fk.model._meta.db_table connection = connections[queryset.db] qn = connection.ops.quote_name queryset = queryset.extra( select={ "_prefetch_related_val_%s" % f.attname: "%s.%s" % (qn(join_table), qn(f.column)) for f in fk.local_related_fields } ) return ( queryset, lambda result: tuple( f.get_db_prep_value( getattr(result, f"_prefetch_related_val_{f.attname}"), connection, ) for f in fk.local_related_fields ), lambda inst: tuple( f.get_db_prep_value(getattr(inst, f.attname), connection) for f in fk.foreign_related_fields ), False, self.prefetch_cache_name, False, ) @property def constrained_target(self): # If the through relation's target field's foreign integrity is # enforced, the query can be performed solely against the through # table as the INNER JOIN'ing against target table is unnecessary. if not self.target_field.db_constraint: return None db = router.db_for_read(self.through, instance=self.instance) if not connections[db].features.supports_foreign_keys: return None hints = {"instance": self.instance} manager = self.through._base_manager.db_manager(db, hints=hints) filters = {self.source_field_name: self.related_val[0]} # Nullable target rows must be excluded as well as they would have # been filtered out from an INNER JOIN. if self.target_field.null: filters["%s__isnull" % self.target_field_name] = False return manager.filter(**filters) def exists(self): if ( superclass is Manager and self.get_prefetch_cache() is None and (constrained_target := self.constrained_target) is not None ): return constrained_target.exists() else: return super().exists() def count(self): if ( superclass is Manager and self.get_prefetch_cache() is None and (constrained_target := self.constrained_target) is not None ): return constrained_target.count() else: return super().count() def add(self, *objs, through_defaults=None): self._remove_prefetched_objects() db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): self._add_items( self.source_field_name, self.target_field_name, *objs, through_defaults=through_defaults, ) # If this is a symmetrical m2m relation to self, add the mirror # entry in the m2m table. if self.symmetrical: self._add_items( self.target_field_name, self.source_field_name, *objs, through_defaults=through_defaults, ) add.alters_data = True async def aadd(self, *objs, through_defaults=None): return await sync_to_async(self.add)( *objs, through_defaults=through_defaults ) aadd.alters_data = True def remove(self, *objs): self._remove_prefetched_objects() self._remove_items(self.source_field_name, self.target_field_name, *objs) remove.alters_data = True async def aremove(self, *objs): return await sync_to_async(self.remove)(*objs) aremove.alters_data = True def clear(self): db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): signals.m2m_changed.send( sender=self.through, action="pre_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) self._remove_prefetched_objects() filters = self._build_remove_filters(super().get_queryset().using(db)) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) clear.alters_data = True async def aclear(self): return await sync_to_async(self.clear)() aclear.alters_data = True def set(self, objs, *, clear=False, through_defaults=None): # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear() self.add(*objs, through_defaults=through_defaults) else: old_ids = set( self.using(db).values_list( self.target_field.target_field.attname, flat=True ) ) new_objs = [] for obj in objs: fk_val = ( self.target_field.get_foreign_related_value(obj)[0] if isinstance(obj, self.model) else self.target_field.get_prep_value(obj) ) if fk_val in old_ids: old_ids.remove(fk_val) else: new_objs.append(obj) self.remove(*old_ids) self.add(*new_objs, through_defaults=through_defaults) set.alters_data = True async def aset(self, objs, *, clear=False, through_defaults=None): return await sync_to_async(self.set)( objs=objs, clear=clear, through_defaults=through_defaults ) aset.alters_data = True def create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs) self.add(new_obj, through_defaults=through_defaults) return new_obj create.alters_data = True async def acreate(self, *, through_defaults=None, **kwargs): return await sync_to_async(self.create)( through_defaults=through_defaults, **kwargs ) acreate.alters_data = True def get_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create( **kwargs ) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created get_or_create.alters_data = True async def aget_or_create(self, *, through_defaults=None, **kwargs): return await sync_to_async(self.get_or_create)( through_defaults=through_defaults, **kwargs ) aget_or_create.alters_data = True def update_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super( ManyRelatedManager, self.db_manager(db) ).update_or_create(**kwargs) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created update_or_create.alters_data = True async def aupdate_or_create(self, *, through_defaults=None, **kwargs): return await sync_to_async(self.update_or_create)( through_defaults=through_defaults, **kwargs ) aupdate_or_create.alters_data = True def _get_target_ids(self, target_field_name, objs): """ Return the set of ids of `objs` that the target field references. """ from django.db.models import Model target_ids = set() target_field = self.through._meta.get_field(target_field_name) for obj in objs: if isinstance(obj, self.model): if not router.allow_relation(obj, self.instance): raise ValueError( 'Cannot add "%r": instance is on database "%s", ' 'value is on database "%s"' % (obj, self.instance._state.db, obj._state.db) ) target_id = target_field.get_foreign_related_value(obj)[0] if target_id is None: raise ValueError( 'Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name) ) target_ids.add(target_id) elif isinstance(obj, Model): raise TypeError( "'%s' instance expected, got %r" % (self.model._meta.object_name, obj) ) else: target_ids.add(target_field.get_prep_value(obj)) return target_ids def _get_missing_target_ids( self, source_field_name, target_field_name, db, target_ids ): """ Return the subset of ids of `objs` that aren't already assigned to this relationship. """ vals = ( self.through._default_manager.using(db) .values_list(target_field_name, flat=True) .filter( **{ source_field_name: self.related_val[0], "%s__in" % target_field_name: target_ids, } ) ) return target_ids.difference(vals) def _get_add_plan(self, db, source_field_name): """ Return a boolean triple of the way the add should be performed. The first element is whether or not bulk_create(ignore_conflicts) can be used, the second whether or not signals must be sent, and the third element is whether or not the immediate bulk insertion with conflicts ignored can be performed. """ # Conflicts can be ignored when the intermediary model is # auto-created as the only possible collision is on the # (source_id, target_id) tuple. The same assertion doesn't hold for # user-defined intermediary models as they could have other fields # causing conflicts which must be surfaced. can_ignore_conflicts = ( self.through._meta.auto_created is not False and connections[db].features.supports_ignore_conflicts ) # Don't send the signal when inserting duplicate data row # for symmetrical reverse entries. must_send_signals = ( self.reverse or source_field_name == self.source_field_name ) and (signals.m2m_changed.has_listeners(self.through)) # Fast addition through bulk insertion can only be performed # if no m2m_changed listeners are connected for self.through # as they require the added set of ids to be provided via # pk_set. return ( can_ignore_conflicts, must_send_signals, (can_ignore_conflicts and not must_send_signals), ) def _add_items( self, source_field_name, target_field_name, *objs, through_defaults=None ): # source_field_name: the PK fieldname in join table for the source # object target_field_name: the PK fieldname in join table for the # target object *objs - objects to add. Either object instances, or # primary keys of object instances. if not objs: return through_defaults = dict(resolve_callables(through_defaults or {})) target_ids = self._get_target_ids(target_field_name, objs) db = router.db_for_write(self.through, instance=self.instance) can_ignore_conflicts, must_send_signals, can_fast_add = self._get_add_plan( db, source_field_name ) if can_fast_add: self.through._default_manager.using(db).bulk_create( [ self.through( **{ "%s_id" % source_field_name: self.related_val[0], "%s_id" % target_field_name: target_id, } ) for target_id in target_ids ], ignore_conflicts=True, ) return missing_target_ids = self._get_missing_target_ids( source_field_name, target_field_name, db, target_ids ) with transaction.atomic(using=db, savepoint=False): if must_send_signals: signals.m2m_changed.send( sender=self.through, action="pre_add", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db, ) # Add the ones that aren't there already. self.through._default_manager.using(db).bulk_create( [ self.through( **through_defaults, **{ "%s_id" % source_field_name: self.related_val[0], "%s_id" % target_field_name: target_id, }, ) for target_id in missing_target_ids ], ignore_conflicts=can_ignore_conflicts, ) if must_send_signals: signals.m2m_changed.send( sender=self.through, action="post_add", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db, ) def _remove_items(self, source_field_name, target_field_name, *objs): # source_field_name: the PK colname in join table for the source # object target_field_name: the PK colname in join table for the # target object *objs - objects to remove. Either object instances, # or primary keys of object instances. if not objs: return # Check that all the objects are of the right type old_ids = set() for obj in objs: if isinstance(obj, self.model): fk_val = self.target_field.get_foreign_related_value(obj)[0] old_ids.add(fk_val) else: old_ids.add(obj) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): # Send a signal to the other end if need be. signals.m2m_changed.send( sender=self.through, action="pre_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) target_model_qs = super().get_queryset() if target_model_qs._has_filters(): old_vals = target_model_qs.using(db).filter( **{"%s__in" % self.target_field.target_field.attname: old_ids} ) else: old_vals = old_ids filters = self._build_remove_filters(old_vals) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) return ManyRelatedManager
ManyToManyDescriptor
python
run-llama__llama_index
llama-index-integrations/postprocessor/llama-index-postprocessor-alibabacloud-aisearch-rerank/llama_index/postprocessor/alibabacloud_aisearch_rerank/base.py
{ "start": 1544, "end": 5234 }
class ____(BaseNodePostprocessor): """ For further details, please visit `https://help.aliyun.com/zh/open-search/search-platform/developer-reference/ranker-api-details`. """ _client: Client = PrivateAttr() aisearch_api_key: str = Field(default=None, exclude=True) endpoint: str = None service_id: str = "ops-bge-reranker-larger" workspace_name: str = "default" top_n: int = 3 batch_size: int = 16 def __init__( self, endpoint: str = None, aisearch_api_key: str = None, **kwargs: Any ) -> None: super().__init__(**kwargs) self.aisearch_api_key = get_from_param_or_env( "aisearch_api_key", aisearch_api_key, "AISEARCH_API_KEY" ) self.endpoint = get_from_param_or_env("endpoint", endpoint, "AISEARCH_ENDPOINT") config = AISearchConfig( bearer_token=self.aisearch_api_key, endpoint=self.endpoint, protocol="http", ) self._client = Client(config=config) @classmethod def class_name(cls) -> str: return "AlibabaCloudAISearchRerank" @retry_decorator def _rerank_one_batch( self, query: str, texts: List[str] ) -> List[GetDocumentRankResponseBodyResultScores]: request = GetDocumentRankRequest(docs=texts, query=query) response: GetDocumentRankResponse = self._client.get_document_rank( workspace_name=self.workspace_name, service_id=self.service_id, request=request, ) return response.body.result.scores def _rerank( self, query: str, texts: List[str], top_n: int ) -> List[GetDocumentRankResponseBodyResultScores]: scores = [] for i in range(0, len(texts), self.batch_size): batch_scores = self._rerank_one_batch(query, texts[i : i + self.batch_size]) for score in batch_scores: score.index = i + score.index scores.extend(batch_scores) scores.sort(key=lambda x: x.score, reverse=True) return scores[:top_n] def _postprocess_nodes( self, nodes: List[NodeWithScore], query_bundle: Optional[QueryBundle] = None, ) -> List[NodeWithScore]: dispatcher.event( ReRankStartEvent( query=query_bundle, nodes=nodes, top_n=self.top_n, model_name=self.service_id, ) ) if query_bundle is None: raise ValueError("Missing query bundle in extra info.") if len(nodes) == 0: return [] with self.callback_manager.event( CBEventType.RERANKING, payload={ EventPayload.NODES: nodes, EventPayload.MODEL_NAME: self.service_id, EventPayload.QUERY_STR: query_bundle.query_str, EventPayload.TOP_K: self.top_n, }, ) as event: texts = [ node.node.get_content(metadata_mode=MetadataMode.EMBED) for node in nodes ] results = self._rerank( query=query_bundle.query_str, texts=texts, top_n=self.top_n, ) new_nodes = [] for result in results: new_node_with_score = NodeWithScore( node=nodes[result.index].node, score=result.score ) new_nodes.append(new_node_with_score) event.on_end(payload={EventPayload.NODES: new_nodes}) dispatcher.event(ReRankEndEvent(nodes=new_nodes)) return new_nodes
AlibabaCloudAISearchRerank
python
google__jax
tests/fused_attention_stablehlo_test.py
{ "start": 35703, "end": 41228 }
class ____(jtu.JaxTestCase): def setUp(self): super().setUp() try: cudnn_version = check_cudnn_version() except RuntimeError as e: self.skipTest(str(e)) return if cudnn_version == 91000: self.skipTest("cuDNN 9.10.0 does not support SDPA FP8") if not jtu.is_cuda_compute_capability_at_least("9.0"): self.skipTest("Requires at least Hopper arch") if jtu.is_cuda_compute_capability_equal("12.0"): self.skipTest("cuDNN does not support FP8 with compute capability 12.0") if jtu.is_cuda_version_at_least(13, 0): self.skipTest("cuDNN creates no execution plans on CUDA 13.0.") @jtu.sample_product( batch_size=[2, 4], seq_len=[128, 256], num_heads=[4, 8], head_dim=[128], mask_type=[MaskType.NO_MASK], scale=[1.0, 0.75], dtype=[jnp.bfloat16, jnp.float16], ) @jtu.run_on_devices("cuda") def test_sdpa_fp8( self, batch_size: int, seq_len: int, num_heads: int, head_dim: int, mask_type: MaskType, scale: float, dtype: jnp.dtype, ): k1, k2, k3, k4 = jax.random.split(jax.random.key(0), 4) input_shape = ( batch_size, seq_len, num_heads, head_dim, ) # only test the default BTNH query_h = jax.random.normal(k1, input_shape, dtype=dtype) key_h = jax.random.normal(k2, input_shape, dtype=dtype) value_h = jax.random.normal(k3, input_shape, dtype=dtype) grad_h = jax.random.normal(k4, input_shape, dtype=dtype) query = cast_to_representable(query_h, jnp.float8_e4m3fn) key = cast_to_representable(key_h, jnp.float8_e4m3fn) value = cast_to_representable(value_h, jnp.float8_e4m3fn) grad = cast_to_representable(grad_h, jnp.float8_e4m3fn) query_quantized = quantize(query, jnp.float8_e4m3fn, jnp.float32) key_quantized = quantize(key, jnp.float8_e4m3fn, jnp.float32) value_quantized = quantize(value, jnp.float8_e4m3fn, jnp.float32) grad_quantized = quantize(grad, jnp.float8_e4m3fn, jnp.float32) sdpa_train_fp8_p = partial(sdpa_train_fp8, scale=scale, mask_type=mask_type) jitted_sdpa_train_fp8 = jax.jit(sdpa_train_fp8_p) jitted_sdpa_train_ref = jax.jit( partial( sdpa_train_ref, scale=scale, mask_type=mask_type, dropout_rate=0.0 ), ) fp8_metas = { name: jnp.ones((1, 1, 1, 1), dtype=jnp.float32) for name in fp8_meta_names } out, (query_grad, key_grad, value_grad) = jitted_sdpa_train_fp8( query_quantized, key_quantized, value_quantized, grad_quantized, fp8_metas, ) out_ref, (query_grad_ref, key_grad_ref, value_grad_ref) = ( jitted_sdpa_train_ref(query, key, value, grad) ) self.assertArraysAllClose(out_ref, out.astype(dtype), rtol=5e-1, atol=5e-1) self.assertArraysAllClose( query_grad_ref, query_grad.astype(dtype), rtol=5e-1, atol=3e0 ) self.assertArraysAllClose( key_grad_ref, key_grad.astype(dtype), rtol=5e-1, atol=3e0 ) self.assertArraysAllClose( value_grad_ref, value_grad.astype(dtype), rtol=5e-1, atol=5e-1 ) @jtu.sample_product( batch_size=[4, 2], seq_len=[4, 16], num_heads=[4, 16], head_dim=[16, 32], mask_type=[MaskType.NO_MASK], qkv_layout=["BNTH", "BTNH"], scale=[1.0, 0.75], dtype=[jnp.bfloat16, jnp.float16], ) @jtu.run_on_devices("cuda") def test_sdpa_fp8_inference( self, batch_size: int, seq_len: int, num_heads: int, head_dim: int, mask_type: MaskType, qkv_layout: str, scale: float, dtype: jnp.dtype, ): k1, k2, k3 = jax.random.split(jax.random.key(0), 3) if qkv_layout == "BNTH": input_shape = (batch_size, num_heads, seq_len, head_dim) else: input_shape = (batch_size, seq_len, num_heads, head_dim) query_h = jax.random.normal(k1, input_shape, dtype=dtype) key_h = jax.random.normal(k2, input_shape, dtype=dtype) value_h = jax.random.normal(k3, input_shape, dtype=dtype) query = cast_to_representable(query_h, jnp.float8_e4m3fn) key = cast_to_representable(key_h, jnp.float8_e4m3fn) value = cast_to_representable(value_h, jnp.float8_e4m3fn) query_quantized = quantize(query, jnp.float8_e4m3fn, jnp.float32) key_quantized = quantize(key, jnp.float8_e4m3fn, jnp.float32) value_quantized = quantize(value, jnp.float8_e4m3fn, jnp.float32) def dot_product_attention_fp8(query, key, value, fp8_metas): f_p = partial( dot_product_attention, scale=scale, mask_type=mask_type, qkv_layout=qkv_layout, use_fp8=True, ) return f_p(query, key, value, fp8_params=fp8_metas) jitted_sdpa_inference = jax.jit( dot_product_attention_fp8, ) jitted_sdpa_inference_ref = jax.jit( partial( dot_product_attention, scale=scale, mask_type=mask_type, qkv_layout=qkv_layout, ), ) fp8_metas = { name: jnp.ones((1, 1, 1, 1), dtype=jnp.float32) for name in fp8_meta_names } out, _, _ = jitted_sdpa_inference( query_quantized, key_quantized, value_quantized, fp8_metas ) out_ref = jitted_sdpa_inference_ref(query, key, value) self.assertArraysAllClose(out_ref, out.astype(dtype), rtol=6e-2, atol=6e-2) if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
DotProductAttentionF8Test
python
tox-dev__tox
src/tox/session/cmd/run/common.py
{ "start": 948, "end": 1473 }
class ____(Action): def __call__( self, parser: ArgumentParser, # noqa: ARG002 namespace: Namespace, values: str | Sequence[Any] | None, option_string: str | None = None, # noqa: ARG002 ) -> None: value = "true" if values is None else values if value not in {"config", "true", "false"}: raise ArgumentError(self, f"value must be 'config', 'true', or 'false' (got {value!r})") setattr(namespace, self.dest, value)
SkipMissingInterpreterAction
python
fastapi__sqlmodel
docs_src/tutorial/fastapi/limit_and_offset/tutorial001_py310.py
{ "start": 392, "end": 1618 }
class ____(HeroBase): id: int sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" connect_args = {"check_same_thread": False} engine = create_engine(sqlite_url, echo=True, connect_args=connect_args) def create_db_and_tables(): SQLModel.metadata.create_all(engine) app = FastAPI() @app.on_event("startup") def on_startup(): create_db_and_tables() @app.post("/heroes/", response_model=HeroPublic) def create_hero(hero: HeroCreate): with Session(engine) as session: db_hero = Hero.model_validate(hero) session.add(db_hero) session.commit() session.refresh(db_hero) return db_hero @app.get("/heroes/", response_model=list[HeroPublic]) def read_heroes(offset: int = 0, limit: int = Query(default=100, le=100)): with Session(engine) as session: heroes = session.exec(select(Hero).offset(offset).limit(limit)).all() return heroes @app.get("/heroes/{hero_id}", response_model=HeroPublic) def read_hero(hero_id: int): with Session(engine) as session: hero = session.get(Hero, hero_id) if not hero: raise HTTPException(status_code=404, detail="Hero not found") return hero
HeroPublic
python
getsentry__sentry
src/sentry/explore/endpoints/serializers.py
{ "start": 568, "end": 657 }
class ____(serializers.Serializer): groupBy = serializers.CharField()
GroupBySerializer
python
numba__numba
numba/core/types/containers.py
{ "start": 15956, "end": 16073 }
class ____(BaseContainerIterator): """ Type class for set iterators. """ container_class = Set
SetIter
python
mlflow__mlflow
tests/resources/mlflow-test-plugin/mlflow_test_plugin/request_auth_provider.py
{ "start": 94, "end": 356 }
class ____(RequestAuthProvider): """RequestAuthProvider provided through plugin system""" def get_name(self): return "test_auth_provider_name" def get_auth(self): return {"auth_name": "test_auth_provider_name"}
PluginRequestAuthProvider
python
langchain-ai__langchain
libs/langchain/langchain_classic/agents/output_parsers/tools.py
{ "start": 3053, "end": 4034 }
class ____(MultiActionAgentOutputParser): """Parses a message into agent actions/finish. If a tool_calls parameter is passed, then that is used to get the tool names and tool inputs. If one is not passed, then the AIMessage is assumed to be the final output. """ @property def _type(self) -> str: return "tools-agent-output-parser" @override def parse_result( self, result: list[Generation], *, partial: bool = False, ) -> list[AgentAction] | AgentFinish: if not isinstance(result[0], ChatGeneration): msg = "This output parser only works on ChatGeneration output" raise ValueError(msg) # noqa: TRY004 message = result[0].message return parse_ai_message_to_tool_action(message) @override def parse(self, text: str) -> list[AgentAction] | AgentFinish: msg = "Can only parse messages" raise ValueError(msg)
ToolsAgentOutputParser
python
ray-project__ray
rllib/algorithms/iql/torch/iql_torch_learner.py
{ "start": 788, "end": 9268 }
class ____(TorchLearner, IQLLearner): """Implements the IQL loss on top of `IQLLearner`. This Learner implements configure_optimizers_for_module to define separate optimizers for the policy, Q-, and value networks. When using a twin-Q network architecture, each Q-network is assigned its own optimizer—consistent with the SAC algorithm. The IQL loss is defined in compute_loss_for_module and consists of three components: value loss, Q-loss (TD error), and actor (policy) loss. Note that the original IQL implementation performs separate backward passes for each network. However, due to RLlib's reliance on TorchDDP, all backward passes must be executed within a single update step. This constraint can lead to parameter lag and cyclical loss behavior, though it does not hinder convergence. """ @override(TorchLearner) def configure_optimizers_for_module( self, module_id: ModuleID, config: AlgorithmConfig = None ) -> None: # Note, we could have derived directly from SACTorchLearner to # inherit the setup of optimizers, but that learner comes with # additional parameters which we do not need. # Receive the module. module = self._module[module_id] # Define the optimizer for the critic. # TODO (sven): Maybe we change here naming to `qf` for unification. params_critic = self.get_parameters(module.qf_encoder) + self.get_parameters( module.qf ) optim_critic = torch.optim.Adam(params_critic, eps=1e-7) self.register_optimizer( module_id=module_id, optimizer_name="qf", optimizer=optim_critic, params=params_critic, lr_or_lr_schedule=config.critic_lr, ) # If necessary register also an optimizer for a twin Q network. if config.twin_q: params_twin_critic = self.get_parameters( module.qf_twin_encoder ) + self.get_parameters(module.qf_twin) optim_twin_critic = torch.optim.Adam(params_twin_critic, eps=1e-7) self.register_optimizer( module_id=module_id, optimizer_name="qf_twin", optimizer=optim_twin_critic, params=params_twin_critic, lr_or_lr_schedule=config.critic_lr, ) # Define the optimizer for the actor. params_actor = self.get_parameters(module.pi_encoder) + self.get_parameters( module.pi ) optim_actor = torch.optim.Adam(params_actor, eps=1e-7) self.register_optimizer( module_id=module_id, optimizer_name="policy", optimizer=optim_actor, params=params_actor, lr_or_lr_schedule=config.actor_lr, ) # Define the optimizer for the value function. params_value = self.get_parameters(module.vf_encoder) + self.get_parameters( module.vf ) optim_value = torch.optim.Adam(params_value, eps=1e-7) self.register_optimizer( module_id=module_id, optimizer_name="value", optimizer=optim_value, params=params_value, lr_or_lr_schedule=config.value_lr, ) @override(TorchLearner) def compute_loss_for_module( self, *, module_id: ModuleID, config: AlgorithmConfig, batch: Dict, fwd_out: Dict ): # Get the module and hyperparameters. module = self._module[module_id] expectile = self.expectile[module_id] temperature = self.temperature[module_id] # Get the action distribution for the actor loss. action_train_dist_class = module.get_train_action_dist_cls() action_train_dist = action_train_dist_class.from_logits( fwd_out[Columns.ACTION_DIST_INPUTS] ) # First, compute the value loss via the target Q-network and current observations. value_loss = torch.mean( self._expectile_loss( fwd_out[QF_TARGET_PREDS] - fwd_out[Columns.VF_PREDS], expectile ) ) # Second, compute the actor loss using the target-Q network and values. exp_advantages = torch.minimum( torch.exp( temperature * (fwd_out[QF_TARGET_PREDS] - fwd_out[Columns.VF_PREDS]) ), torch.Tensor([100.0]).to(self.device), ) # Note, we are using here the actions from the data sample. action_logps = action_train_dist.logp(batch[Columns.ACTIONS]) # Compute the actor loss. actor_loss = -torch.mean(exp_advantages.detach() * action_logps) # Third, compute the critic loss. target_critic = ( batch[Columns.REWARDS] + config.gamma * (1 - batch[Columns.TERMINATEDS].float()) * fwd_out[VF_PREDS_NEXT].detach() ) critic_loss = torch.mean( torch.nn.MSELoss(reduction="none")(target_critic, fwd_out[QF_PREDS]) ) # If we have a twin-Q architecture, calculate the its loss, too. if config.twin_q: critic_twin_loss = ( torch.mean( torch.nn.MSELoss(reduction="none")( target_critic, fwd_out[QF_TWIN_PREDS] ) ) * 0.5 ) critic_loss *= 0.5 # Compute the total loss. total_loss = value_loss + actor_loss + critic_loss # If we have a twin-Q architecture, add its loss. if config.twin_q: total_loss += critic_twin_loss # Log metrics. self.metrics.log_dict( { POLICY_LOSS_KEY: actor_loss, QF_LOSS_KEY: critic_loss, }, key=module_id, window=1, # <- single items (should not be mean/ema-reduced over time). ) # Log the losses also in the temporary containers for gradient computation. self._temp_losses[(module_id, POLICY_LOSS_KEY)] = actor_loss self._temp_losses[(module_id, QF_LOSS_KEY)] = critic_loss self._temp_losses[(module_id, VF_LOSS)] = value_loss # If a twin-Q architecture is used add metrics and loss. if config.twin_q: self.metrics.log_value( key=(module_id, QF_TWIN_LOSS_KEY), value=critic_twin_loss, window=1, # <- single items (should not be mean/ema-reduced over time). ) self._temp_losses[(module_id, QF_TWIN_LOSS_KEY)] = critic_twin_loss return total_loss @override(TorchLearner) def compute_gradients( self, loss_per_module: Dict[ModuleID, TensorType], **kwargs ) -> ParamDict: grads = {} for module_id in set(loss_per_module.keys()) - {ALL_MODULES}: # Loop through optimizers registered for this module. for optim_name, optim in self.get_optimizers_for_module(module_id): # Zero the gradients. Note, we need to reset the gradients b/c # each component for a module operates on the same graph. optim.zero_grad(set_to_none=True) # Compute the gradients for the component and module. loss_tensor = self._temp_losses.pop((module_id, optim_name + "_loss")) loss_tensor.backward(retain_graph=True) # Store the gradients for the component and module. grads.update( { pid: p.grad for pid, p in self.filter_param_dict_for_optimizer( self._params, optim ).items() } ) # Make sure we updated on all loss terms. assert not self._temp_losses return grads def _expectile_loss(self, diff: TensorType, expectile: TensorType) -> TensorType: """Computes the expectile loss. Args: diff: A tensor containing a difference loss. expectile: The expectile to use for the expectile loss. Returns: The expectile loss of `diff` using `expectile`. """ weight = torch.where(diff > 0, expectile, 1 - expectile) return weight * torch.pow(diff, 2)
IQLTorchLearner
python
openai__openai-python
src/openai/types/image_gen_completed_event.py
{ "start": 871, "end": 1745 }
class ____(BaseModel): b64_json: str """Base64-encoded image data, suitable for rendering as an image.""" background: Literal["transparent", "opaque", "auto"] """The background setting for the generated image.""" created_at: int """The Unix timestamp when the event was created.""" output_format: Literal["png", "webp", "jpeg"] """The output format for the generated image.""" quality: Literal["low", "medium", "high", "auto"] """The quality setting for the generated image.""" size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] """The size of the generated image.""" type: Literal["image_generation.completed"] """The type of the event. Always `image_generation.completed`.""" usage: Usage """For `gpt-image-1` only, the token usage information for the image generation."""
ImageGenCompletedEvent
python
mlflow__mlflow
dev/set_matrix.py
{ "start": 3957, "end": 4410 }
class ____(BaseModel): model_config = ConfigDict(extra="forbid") package_info: PackageInfo models: TestConfig | None = None autologging: TestConfig | None = None @property def categories(self) -> list[tuple[str, TestConfig]]: cs = [] if self.models: cs.append(("models", self.models)) if self.autologging: cs.append(("autologging", self.autologging)) return cs
FlavorConfig
python
pytorch__pytorch
torch/_inductor/template_heuristics/triton.py
{ "start": 3406, "end": 3634 }
class ____(ConvConfig): """ ROCm subclass for Conv, with AMD backend specific tuneable kernargs """ matrix_instr_nonkdim: int = 16 waves_per_eu: int = 0 kpack: int = 2 @dataclasses.dataclass
ROCmConvConfig
python
scikit-learn__scikit-learn
sklearn/utils/_metadata_requests.py
{ "start": 19381, "end": 26465 }
class ____: """Contains the metadata request info of a consumer. Instances of `MethodMetadataRequest` are used in this class for each available method under `metadatarequest.{method}`. Consumer-only classes such as simple estimators return a serialized version of this class as the output of `get_metadata_routing()`. .. versionadded:: 1.3 Parameters ---------- owner : object The object to which these requests belong. """ # this is here for us to use this attribute's value instead of doing # `isinstance` in our checks, so that we avoid issues when people vendor # this file instead of using it directly from scikit-learn. _type = "metadata_request" def __init__(self, owner): self.owner = owner for method in SIMPLE_METHODS: setattr( self, method, MethodMetadataRequest(owner=owner, method=method), ) def consumes(self, method, params): """Return params consumed as metadata in a :term:`consumer`. This method returns the subset of given `params` that are consumed by the given `method`. It can be used to check if parameters are used as metadata in the specified method of the :term:`consumer` that owns this `MetadataRequest` instance. .. versionadded:: 1.4 Parameters ---------- method : str The name of the method for which to determine consumed parameters. params : iterable of str An iterable of parameter names to test for consumption. Returns ------- consumed_params : set of str A subset of parameters from `params` which are consumed by the given method. """ return getattr(self, method)._consumes(params=params) def __getattr__(self, name): # Called when the default attribute access fails with an AttributeError # (either __getattribute__() raises an AttributeError because name is # not an instance attribute or an attribute in the class tree for self; # or __get__() of a name property raises AttributeError). This method # should either return the (computed) attribute value or raise an # AttributeError exception. # https://docs.python.org/3/reference/datamodel.html#object.__getattr__ if name not in COMPOSITE_METHODS: raise AttributeError( f"'{self.__class__.__name__}' object has no attribute '{name}'" ) requests = {} for method in COMPOSITE_METHODS[name]: mmr = getattr(self, method) existing = set(requests.keys()) upcoming = set(mmr.requests.keys()) common = existing & upcoming conflicts = [key for key in common if requests[key] != mmr._requests[key]] if conflicts: raise ValueError( f"Conflicting metadata requests for {', '.join(conflicts)} while" f" composing the requests for {name}. Metadata with the same name" f" for methods {', '.join(COMPOSITE_METHODS[name])} should have the" " same request value." ) requests.update(mmr._requests) return MethodMetadataRequest(owner=self.owner, method=name, requests=requests) def _get_param_names(self, method, return_alias, ignore_self_request=None): """Get names of all metadata that can be consumed or routed by specified \ method. This method returns the names of all metadata, even the ``False`` ones. Parameters ---------- method : str The name of the method for which metadata names are requested. return_alias : bool Controls whether original or aliased names should be returned. If ``False``, aliases are ignored and original names are returned. ignore_self_request : bool Ignored. Present for API compatibility. Returns ------- names : set of str A set of strings with the names of all metadata. """ return getattr(self, method)._get_param_names(return_alias=return_alias) def _route_params(self, *, params, method, parent, caller): """Prepare the given parameters to be passed to the method. The output of this method can be used directly as the input to the corresponding method as extra keyword arguments to pass metadata. Parameters ---------- params : dict A dictionary of provided metadata. method : str The name of the method for which the parameters are requested and routed. parent : object Parent class object, that routes the metadata. caller : str Method from the parent class object, where the metadata is routed from. Returns ------- params : Bunch A :class:`~sklearn.utils.Bunch` of {metadata: value} which can be given to the corresponding method. """ return getattr(self, method)._route_params( params=params, parent=parent, caller=caller ) def _check_warnings(self, *, method, params): """Check whether metadata is passed which is marked as WARN. If any metadata is passed which is marked as WARN, a warning is raised. Parameters ---------- method : str The name of the method for which the warnings should be checked. params : dict The metadata passed to a method. """ getattr(self, method)._check_warnings(params=params) def _serialize(self): """Serialize the object. Returns ------- obj : dict A serialized version of the instance in the form of a dictionary. """ output = dict() for method in SIMPLE_METHODS: mmr = getattr(self, method) if len(mmr.requests): output[method] = mmr._serialize() return output def __repr__(self): return str(self._serialize()) def __str__(self): return str(repr(self)) # Metadata Request for Routers # ============================ # This section includes all objects required for MetadataRouter which is used # in routers, returned by their ``get_metadata_routing``. # `RouterMappingPair` is used to store a (mapping, router) tuple where `mapping` is a # `MethodMapping` object and `router` is the output of `get_metadata_routing`. # `MetadataRouter` stores a collection of `RouterMappingPair` objects in its # `_route_mappings` attribute. RouterMappingPair = namedtuple("RouterMappingPair", ["mapping", "router"]) # `MethodPair` is used to store a single method routing. `MethodMapping` stores a list # of `MethodPair` objects in its `_routes` attribute. MethodPair = namedtuple("MethodPair", ["caller", "callee"])
MetadataRequest
python
PrefectHQ__prefect
src/prefect/blocks/core.py
{ "start": 11388, "end": 67055 }
class ____(BaseModel, ABC): """ A base class for implementing a block that wraps an external service. This class can be defined with an arbitrary set of fields and methods, and couples business logic with data contained in an block document. `_block_document_name`, `_block_document_id`, `_block_schema_id`, and `_block_type_id` are reserved by Prefect as Block metadata fields, but otherwise a Block can implement arbitrary logic. Blocks can be instantiated without populating these metadata fields, but can only be used interactively, not with the Prefect API. Instead of the __init__ method, a block implementation allows the definition of a `block_initialization` method that is called after initialization. """ model_config: ClassVar[ConfigDict] = ConfigDict( extra="allow", json_schema_extra=schema_extra, ) def __new__(cls: type[Self], **kwargs: Any) -> Self: """ Create an instance of the Block subclass type if a `block_type_slug` is present in the data payload. """ if block_type_slug := kwargs.pop("block_type_slug", None): subcls = cls.get_block_class_from_key(block_type_slug) if cls is not subcls and issubclass(subcls, cls): return super().__new__(subcls) return super().__new__(cls) def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) self.block_initialization() def __str__(self) -> str: return self.__repr__() def __repr_args__(self) -> list[tuple[str | None, Any]]: repr_args = super().__repr_args__() data_keys = self.model_json_schema()["properties"].keys() return [ (key, value) for key, value in repr_args if key is None or key in data_keys ] @model_validator(mode="before") @classmethod def validate_block_type_slug(cls, values: Any) -> Any: """ Validates that the `block_type_slug` in the input values matches the expected block type slug for the class. This helps pydantic to correctly discriminate between different Block subclasses when validating Union types of Blocks. """ if isinstance(values, dict): if "block_type_slug" in values: expected_slug = cls.get_block_type_slug() slug = values["block_type_slug"] if slug and slug != expected_slug: raise ValueError( f"Invalid block_type_slug: expected '{expected_slug}', got '{values['block_type_slug']}'" ) return values def block_initialization(self) -> None: pass # -- private class variables # set by the class itself # Attribute to customize the name of the block type created # when the block is registered with the API. If not set, block # type name will default to the class name. _block_type_name: ClassVar[Optional[str]] = None _block_type_slug: ClassVar[Optional[str]] = None # Attributes used to set properties on a block type when registered # with the API. _logo_url: ClassVar[Optional[HttpUrl]] = None _documentation_url: ClassVar[Optional[HttpUrl]] = None _description: ClassVar[Optional[str]] = None _code_example: ClassVar[Optional[str]] = None _block_type_id: ClassVar[Optional[UUID]] = None _block_schema_id: ClassVar[Optional[UUID]] = None _block_schema_capabilities: ClassVar[Optional[list[str]]] = None _block_schema_version: ClassVar[Optional[str]] = None # -- private instance variables # these are set when blocks are loaded from the API _block_document_id: Optional[UUID] = PrivateAttr(None) _block_document_name: Optional[str] = PrivateAttr(None) _is_anonymous: Optional[bool] = PrivateAttr(None) # Exclude `save` as it uses the `sync_compatible` decorator and needs to be # decorated directly. _events_excluded_methods: ClassVar[list[str]] = PrivateAttr( default=["block_initialization", "save", "dict"] ) @classmethod def __dispatch_key__(cls) -> str | None: if cls.__name__ == "Block": return None # The base class is abstract return block_schema_to_key(cls._to_block_schema()) @model_serializer(mode="wrap") def ser_model( self, handler: SerializerFunctionWrapHandler, info: SerializationInfo ) -> Any: jsonable_self = handler(self) if (ctx := info.context) and ctx.get("include_secrets") is True: # Add serialization mode to context so handle_secret_render knows how to process nested models ctx["serialization_mode"] = info.mode for field_name in type(self).model_fields: field_value = getattr(self, field_name) # In JSON mode, skip fields that don't contain secrets # as they're already properly serialized by the handler if ( info.mode == "json" and field_name in jsonable_self and not self._field_has_secrets(field_name) ): continue # For all other fields, use visit_collection with handle_secret_render jsonable_self[field_name] = visit_collection( expr=field_value, visit_fn=partial(handle_secret_render, context=ctx), return_data=True, ) extra_fields = { "block_type_slug": self.get_block_type_slug(), "_block_document_id": self._block_document_id, "_block_document_name": self._block_document_name, "_is_anonymous": self._is_anonymous, } jsonable_self |= { key: value for key, value in extra_fields.items() if value is not None } return jsonable_self @classmethod def get_block_type_name(cls) -> str: return cls._block_type_name or cls.__name__ @classmethod def get_block_type_slug(cls) -> str: return slugify(cls._block_type_slug or cls.get_block_type_name()) @classmethod def get_block_capabilities(cls) -> FrozenSet[str]: """ Returns the block capabilities for this Block. Recursively collects all block capabilities of all parent classes into a single frozenset. """ return frozenset( { c for base in (cls,) + cls.__mro__ for c in getattr(base, "_block_schema_capabilities", []) or [] } ) @classmethod def _get_current_package_version(cls): current_module = inspect.getmodule(cls) if current_module: top_level_module = sys.modules[ current_module.__name__.split(".")[0] or "__main__" ] try: version = Version(top_level_module.__version__) # Strips off any local version information return version.base_version except (AttributeError, InvalidVersion): # Module does not have a __version__ attribute or is not a parsable format pass return DEFAULT_BLOCK_SCHEMA_VERSION @classmethod def get_block_schema_version(cls) -> str: return cls._block_schema_version or cls._get_current_package_version() @classmethod def _to_block_schema_reference_dict(cls): return dict( block_type_slug=cls.get_block_type_slug(), block_schema_checksum=cls._calculate_schema_checksum(), ) @classmethod def _calculate_schema_checksum( cls, block_schema_fields: dict[str, Any] | None = None ): """ Generates a unique hash for the underlying schema of block. Args: block_schema_fields: Dictionary detailing block schema fields to generate a checksum for. The fields of the current class is used if this parameter is not provided. Returns: str: The calculated checksum prefixed with the hashing algorithm used. """ block_schema_fields = ( cls.model_json_schema() if block_schema_fields is None else block_schema_fields ) fields_for_checksum = remove_nested_keys(["secret_fields"], block_schema_fields) if fields_for_checksum.get("definitions"): non_block_definitions = _get_non_block_reference_definitions( fields_for_checksum, fields_for_checksum["definitions"] ) if non_block_definitions: fields_for_checksum["definitions"] = non_block_definitions else: # Pop off definitions entirely instead of empty dict for consistency # with the OpenAPI specification fields_for_checksum.pop("definitions") checksum = hash_objects(fields_for_checksum, hash_algo=hashlib.sha256) if checksum is None: raise ValueError("Unable to compute checksum for block schema") else: return f"sha256:{checksum}" def _field_has_secrets(self, field_name: str) -> bool: """Check if a field contains secrets based on the schema's secret_fields.""" secret_fields = self.model_json_schema().get("secret_fields", []) # Check if field_name matches any secret field pattern for secret_field in secret_fields: if secret_field == field_name: return True elif secret_field.startswith(f"{field_name}."): # This field contains nested secrets return True elif secret_field.endswith(".*"): # Handle wildcard patterns like "field.*" prefix = secret_field[:-2] # Remove .* if field_name == prefix: return True return False def _to_block_document( self, name: Optional[str] = None, block_schema_id: Optional[UUID] = None, block_type_id: Optional[UUID] = None, is_anonymous: Optional[bool] = None, include_secrets: bool = False, ) -> BlockDocument: """ Creates the corresponding block document based on the data stored in a block. The corresponding block document name, block type ID, and block schema ID must either be passed into the method or configured on the block. Args: name: The name of the created block document. Not required if anonymous. block_schema_id: UUID of the corresponding block schema. block_type_id: UUID of the corresponding block type. is_anonymous: if True, an anonymous block is created. Anonymous blocks are not displayed in the UI and used primarily for system operations and features that need to automatically generate blocks. Returns: BlockDocument: Corresponding block document populated with the block's configured data. """ if is_anonymous is None: is_anonymous = self._is_anonymous or False # name must be present if not anonymous if not is_anonymous and not name and not self._block_document_name: raise ValueError("No name provided, either as an argument or on the block.") if not block_schema_id and not self._block_schema_id: raise ValueError( "No block schema ID provided, either as an argument or on the block." ) if not block_type_id and not self._block_type_id: raise ValueError( "No block type ID provided, either as an argument or on the block." ) # The keys passed to `include` must NOT be aliases, else some items will be missed # i.e. must do `self.schema_` vs `self.schema` to get a `schema_ = Field(alias="schema")` # reported from https://github.com/PrefectHQ/prefect-dbt/issues/54 data_keys = self.model_json_schema(by_alias=False)["properties"].keys() # `block_document_data`` must return the aliased version for it to show in the UI block_document_data = self.model_dump( by_alias=True, include=data_keys, context={"include_secrets": include_secrets}, ) # Ensure non-secret fields are JSON-serializable to avoid issues with types # like SemanticVersion when the BlockDocument is later serialized try: json_data = self.model_dump( mode="json", by_alias=True, include=data_keys, context={"include_secrets": include_secrets}, ) # Replace non-secret, non-Block fields with their JSON representation # We need to check the original field to determine if it's a secret or Block for key in data_keys: if key in block_document_data and key in json_data: field_value = getattr(self, key) # Only replace if the field doesn't contain secrets and is not a Block if not self._field_has_secrets(key) and not isinstance( field_value, Block ): block_document_data[key] = json_data[key] except Exception: # If JSON serialization fails, we'll handle it later pass # Iterate through and find blocks that already have saved block documents to # create references to those saved block documents. for key in data_keys: field_value = getattr(self, key) if ( isinstance(field_value, Block) and field_value._block_document_id is not None ): block_document_data[key] = { "$ref": {"block_document_id": field_value._block_document_id} } block_schema_id = block_schema_id or self._block_schema_id block_type_id = block_type_id or self._block_type_id if block_schema_id is None: raise ValueError( "No block schema ID provided, either as an argument or on the block." ) if block_type_id is None: raise ValueError( "No block type ID provided, either as an argument or on the block." ) return BlockDocument( id=self._block_document_id or uuid4(), name=(name or self._block_document_name) if not is_anonymous else None, block_schema_id=block_schema_id, block_type_id=block_type_id, block_type_name=self._block_type_name, data=block_document_data, block_schema=self._to_block_schema( block_type_id=block_type_id or self._block_type_id, ), block_type=self._to_block_type(), is_anonymous=is_anonymous, ) @classmethod def _to_block_schema(cls, block_type_id: Optional[UUID] = None) -> BlockSchema: """ Creates the corresponding block schema of the block. The corresponding block_type_id must either be passed into the method or configured on the block. Args: block_type_id: UUID of the corresponding block type. Returns: BlockSchema: The corresponding block schema. """ fields = cls.model_json_schema() return BlockSchema( id=cls._block_schema_id if cls._block_schema_id is not None else uuid4(), checksum=cls._calculate_schema_checksum(), fields=fields, block_type_id=block_type_id or cls._block_type_id, block_type=cls._to_block_type(), capabilities=list(cls.get_block_capabilities()), version=cls.get_block_schema_version(), ) @classmethod def _parse_docstring(cls) -> list[DocstringSection]: """ Parses the docstring into list of DocstringSection objects. Helper method used primarily to suppress irrelevant logs, e.g. `<module>:11: No type or annotation for parameter 'write_json'` because griffe is unable to parse the types from pydantic.BaseModel. """ if cls.__doc__ is None: return [] with disable_logger("griffe"): docstring = Docstring(cls.__doc__) parsed = parse(docstring, Parser.google) return parsed @classmethod def get_description(cls) -> Optional[str]: """ Returns the description for the current block. Attempts to parse description from class docstring if an override is not defined. """ description = cls._description # If no description override has been provided, find the first text section # and use that as the description if description is None and cls.__doc__ is not None: parsed = cls._parse_docstring() parsed_description = next( ( section.as_dict().get("value") for section in parsed if section.kind == DocstringSectionKind.text ), None, ) if isinstance(parsed_description, str): description = parsed_description.strip() return description @classmethod def get_code_example(cls) -> Optional[str]: """ Returns the code example for the given block. Attempts to parse code example from the class docstring if an override is not provided. """ code_example = ( dedent(text=cls._code_example) if cls._code_example is not None else None ) # If no code example override has been provided, attempt to find a examples # section or an admonition with the annotation "example" and use that as the # code example if code_example is None and cls.__doc__ is not None: parsed = cls._parse_docstring() for section in parsed: # Section kind will be "examples" if Examples section heading is used. if section.kind == DocstringSectionKind.examples: # Examples sections are made up of smaller sections that need to be # joined with newlines. Smaller sections are represented as tuples # with shape (DocstringSectionKind, str) code_example = "\n".join( (part[1] for part in section.as_dict().get("value", [])) ) break # Section kind will be "admonition" if Example section heading is used. if section.kind == DocstringSectionKind.admonition: value = section.as_dict().get("value", {}) if value.get("annotation") == "example": code_example = value.get("description") break if code_example is None: # If no code example has been specified or extracted from the class # docstring, generate a sensible default code_example = cls._generate_code_example() return code_example @classmethod def _generate_code_example(cls) -> str: """Generates a default code example for the current class""" qualified_name = to_qualified_name(cls) module_str = ".".join(qualified_name.split(".")[:-1]) origin = cls.__pydantic_generic_metadata__.get("origin") or cls class_name = origin.__name__ block_variable_name = f"{cls.get_block_type_slug().replace('-', '_')}_block" return dedent( f"""\ ```python from {module_str} import {class_name} {block_variable_name} = {class_name}.load("BLOCK_NAME") ```""" ) @classmethod def _to_block_type(cls) -> BlockType: """ Creates the corresponding block type of the block. Returns: BlockType: The corresponding block type. """ return BlockType( id=cls._block_type_id or uuid4(), slug=cls.get_block_type_slug(), name=cls.get_block_type_name(), logo_url=cls._logo_url, documentation_url=cls._documentation_url, description=cls.get_description(), code_example=cls.get_code_example(), ) @classmethod def _from_block_document(cls, block_document: BlockDocument) -> Self: """ Instantiates a block from a given block document. The corresponding block class will be looked up in the block registry based on the corresponding block schema of the provided block document. Args: block_document: The block document used to instantiate a block. Raises: ValueError: If the provided block document doesn't have a corresponding block schema. Returns: Block: Hydrated block with data from block document. """ if block_document.block_schema is None: raise ValueError( "Unable to determine block schema for provided block document" ) block_cls = ( cls if cls.__name__ != "Block" # Look up the block class by dispatch else cls.get_block_class_from_schema(block_document.block_schema) ) block = block_cls.model_validate(block_document.data) block._block_document_id = block_document.id block.__class__._block_schema_id = block_document.block_schema_id block.__class__._block_type_id = block_document.block_type_id block._block_document_name = block_document.name block._is_anonymous = block_document.is_anonymous block._define_metadata_on_nested_blocks( block_document.block_document_references ) resources: Optional[ResourceTuple] = block._event_method_called_resources() if resources: kind = block._event_kind() resource, related = resources emit_event(event=f"{kind}.loaded", resource=resource, related=related) return block def _event_kind(self) -> str: return f"prefect.block.{self.get_block_type_slug()}" def _event_method_called_resources(self) -> ResourceTuple | None: if not (self._block_document_id and self._block_document_name): return None return ( { "prefect.resource.id": ( f"prefect.block-document.{self._block_document_id}" ), "prefect.resource.name": self._block_document_name, }, [ { "prefect.resource.id": ( f"prefect.block-type.{self.get_block_type_slug()}" ), "prefect.resource.role": "block-type", } ], ) @classmethod def get_block_class_from_schema(cls: type[Self], schema: BlockSchema) -> type[Self]: """ Retrieve the block class implementation given a schema. """ return cls.get_block_class_from_key(block_schema_to_key(schema)) @classmethod def get_block_class_from_key(cls: type[Self], key: str) -> type[Self]: """ Retrieve the block class implementation given a key. """ # Ensure collections are imported and have the opportunity to register types # before looking up the block class, but only do this once load_prefect_collections() try: return lookup_type(cls, key) except KeyError: message = f"No block class found for slug {key!r}." # Handle common blocks types used for storage, which is the primary use case for looking up blocks by key if key == "s3-bucket": message += " Please ensure that `prefect-aws` is installed." elif key == "gcs-bucket": message += " Please ensure that `prefect-gcp` is installed." elif key == "azure-blob-storage-container": message += " Please ensure that `prefect-azure` is installed." else: message += " Please ensure that the block class is available in the current environment." raise UnknownBlockType(message) def _define_metadata_on_nested_blocks( self, block_document_references: dict[str, dict[str, Any]] ): """ Recursively populates metadata fields on nested blocks based on the provided block document references. """ for item in block_document_references.items(): field_name, block_document_reference = item nested_block = getattr(self, field_name) if isinstance(nested_block, Block): nested_block_document_info = block_document_reference.get( "block_document", {} ) nested_block._define_metadata_on_nested_blocks( nested_block_document_info.get("block_document_references", {}) ) nested_block_document_id = nested_block_document_info.get("id") nested_block._block_document_id = ( UUID(nested_block_document_id) if nested_block_document_id else None ) nested_block._block_document_name = nested_block_document_info.get( "name" ) nested_block._is_anonymous = nested_block_document_info.get( "is_anonymous" ) @classmethod async def _aget_block_document( cls, name: str, client: "PrefectClient", ) -> tuple[BlockDocument, str]: if cls.__name__ == "Block": block_type_slug, block_document_name = name.split("/", 1) else: block_type_slug = cls.get_block_type_slug() block_document_name = name try: block_document = await client.read_block_document_by_name( name=block_document_name, block_type_slug=block_type_slug ) except prefect.exceptions.ObjectNotFound as e: raise ValueError( f"Unable to find block document named {block_document_name} for block" f" type {block_type_slug}" ) from e return block_document, block_document_name @classmethod def _get_block_document( cls, name: str, client: "SyncPrefectClient", ) -> tuple[BlockDocument, str]: if cls.__name__ == "Block": block_type_slug, block_document_name = name.split("/", 1) else: block_type_slug = cls.get_block_type_slug() block_document_name = name try: block_document = client.read_block_document_by_name( name=block_document_name, block_type_slug=block_type_slug ) except prefect.exceptions.ObjectNotFound as e: raise ValueError( f"Unable to find block document named {block_document_name} for block" f" type {block_type_slug}" ) from e return block_document, block_document_name @classmethod @inject_client async def _get_block_document_by_id( cls, block_document_id: Union[str, uuid.UUID], client: "PrefectClient | None" = None, ): if TYPE_CHECKING: assert isinstance(client, PrefectClient) if isinstance(block_document_id, str): try: block_document_id = UUID(block_document_id) except ValueError: raise ValueError( f"Block document ID {block_document_id!r} is not a valid UUID" ) try: block_document = await client.read_block_document( block_document_id=block_document_id ) except prefect.exceptions.ObjectNotFound: raise ValueError( f"Unable to find block document with ID {block_document_id!r}" ) return block_document, block_document.name @classmethod @inject_client async def aload( cls, name: str, validate: bool = True, client: Optional["PrefectClient"] = None, ) -> "Self": """ Retrieves data from the block document with the given name for the block type that corresponds with the current class and returns an instantiated version of the current class with the data stored in the block document. If a block document for a given block type is saved with a different schema than the current class calling `aload`, a warning will be raised. If the current class schema is a subset of the block document schema, the block can be loaded as normal using the default `validate = True`. If the current class schema is a superset of the block document schema, `aload` must be called with `validate` set to False to prevent a validation error. In this case, the block attributes will default to `None` and must be set manually and saved to a new block document before the block can be used as expected. Args: name: The name or slug of the block document. A block document slug is a string with the format `<block_type_slug>/<block_document_name>` validate: If False, the block document will be loaded without Pydantic validating the block schema. This is useful if the block schema has changed client-side since the block document referred to by `name` was saved. client: The client to use to load the block document. If not provided, the default client will be injected. Raises: ValueError: If the requested block document is not found. Returns: An instance of the current class hydrated with the data stored in the block document with the specified name. Examples: Load from a Block subclass with a block document name: ```python class Custom(Block): message: str Custom(message="Hello!").save("my-custom-message") loaded_block = await Custom.aload("my-custom-message") ``` Load from Block with a block document slug: ```python class Custom(Block): message: str Custom(message="Hello!").save("my-custom-message") loaded_block = await Block.aload("custom/my-custom-message") ``` Migrate a block document to a new schema: ```python # original class class Custom(Block): message: str Custom(message="Hello!").save("my-custom-message") # Updated class with new required field class Custom(Block): message: str number_of_ducks: int loaded_block = await Custom.aload("my-custom-message", validate=False) # Prints UserWarning about schema mismatch loaded_block.number_of_ducks = 42 loaded_block.save("my-custom-message", overwrite=True) ``` """ if TYPE_CHECKING: assert isinstance(client, PrefectClient) block_document, _ = await cls._aget_block_document(name, client=client) return cls._load_from_block_document(block_document, validate=validate) @classmethod @async_dispatch(aload) def load( cls, name: str, validate: bool = True, client: Optional["PrefectClient"] = None, ) -> "Self": """ Retrieves data from the block document with the given name for the block type that corresponds with the current class and returns an instantiated version of the current class with the data stored in the block document. If a block document for a given block type is saved with a different schema than the current class calling `load`, a warning will be raised. If the current class schema is a subset of the block document schema, the block can be loaded as normal using the default `validate = True`. If the current class schema is a superset of the block document schema, `load` must be called with `validate` set to False to prevent a validation error. In this case, the block attributes will default to `None` and must be set manually and saved to a new block document before the block can be used as expected. Args: name: The name or slug of the block document. A block document slug is a string with the format `<block_type_slug>/<block_document_name>` validate: If False, the block document will be loaded without Pydantic validating the block schema. This is useful if the block schema has changed client-side since the block document referred to by `name` was saved. client: The client to use to load the block document. If not provided, the default client will be injected. Raises: ValueError: If the requested block document is not found. Returns: An instance of the current class hydrated with the data stored in the block document with the specified name. Examples: Load from a Block subclass with a block document name: ```python class Custom(Block): message: str Custom(message="Hello!").save("my-custom-message") loaded_block = Custom.load("my-custom-message") ``` Load from Block with a block document slug: ```python class Custom(Block): message: str Custom(message="Hello!").save("my-custom-message") loaded_block = Block.load("custom/my-custom-message") ``` Migrate a block document to a new schema: ```python # original class class Custom(Block): message: str Custom(message="Hello!").save("my-custom-message") # Updated class with new required field class Custom(Block): message: str number_of_ducks: int loaded_block = Custom.load("my-custom-message", validate=False) # Prints UserWarning about schema mismatch loaded_block.number_of_ducks = 42 loaded_block.save("my-custom-message", overwrite=True) ``` """ # Need to use a `PrefectClient` here to ensure `Block.load` and `Block.aload` signatures match # TODO: replace with only sync client once all internal calls are updated to use `Block.aload` and `@async_dispatch` is removed if client is None: # If a client wasn't provided, we get to use a sync client from prefect.client.orchestration import get_client with get_client(sync_client=True) as sync_client: block_document, _ = cls._get_block_document(name, client=sync_client) else: # If a client was provided, reuse it, even though it's async, to avoid excessive client creation block_document, _ = run_coro_as_sync( cls._aget_block_document(name, client=client) ) return cls._load_from_block_document(block_document, validate=validate) @classmethod @sync_compatible @inject_client async def load_from_ref( cls, ref: Union[str, UUID, dict[str, Any]], validate: bool = True, client: "PrefectClient | None" = None, ) -> Self: """ Retrieves data from the block document by given reference for the block type that corresponds with the current class and returns an instantiated version of the current class with the data stored in the block document. Provided reference can be a block document ID, or a reference data in dictionary format. Supported dictionary reference formats are: - `{"block_document_id": <block_document_id>}` - `{"block_document_slug": <block_document_slug>}` If a block document for a given block type is saved with a different schema than the current class calling `load`, a warning will be raised. If the current class schema is a subset of the block document schema, the block can be loaded as normal using the default `validate = True`. If the current class schema is a superset of the block document schema, `load` must be called with `validate` set to False to prevent a validation error. In this case, the block attributes will default to `None` and must be set manually and saved to a new block document before the block can be used as expected. Args: ref: The reference to the block document. This can be a block document ID, or one of supported dictionary reference formats. validate: If False, the block document will be loaded without Pydantic validating the block schema. This is useful if the block schema has changed client-side since the block document referred to by `name` was saved. client: The client to use to load the block document. If not provided, the default client will be injected. Raises: ValueError: If invalid reference format is provided. ValueError: If the requested block document is not found. Returns: An instance of the current class hydrated with the data stored in the block document with the specified name. """ if TYPE_CHECKING: assert isinstance(client, PrefectClient) block_document = None if isinstance(ref, (str, UUID)): block_document, _ = await cls._get_block_document_by_id(ref, client=client) else: if block_document_id := ref.get("block_document_id"): block_document, _ = await cls._get_block_document_by_id( block_document_id, client=client ) elif block_document_slug := ref.get("block_document_slug"): block_document, _ = await cls._aget_block_document( block_document_slug, client=client ) if not block_document: raise ValueError(f"Invalid reference format {ref!r}.") return cls._load_from_block_document(block_document, validate=validate) @classmethod def _load_from_block_document( cls, block_document: BlockDocument, validate: bool = True ) -> Self: """ Loads a block from a given block document. If a block document for a given block type is saved with a different schema than the current class calling `load`, a warning will be raised. If the current class schema is a subset of the block document schema, the block can be loaded as normal using the default `validate = True`. If the current class schema is a superset of the block document schema, `load` must be called with `validate` set to False to prevent a validation error. In this case, the block attributes will default to `None` and must be set manually and saved to a new block document before the block can be used as expected. Args: block_document: The block document used to instantiate a block. validate: If False, the block document will be loaded without Pydantic validating the block schema. This is useful if the block schema has changed client-side since the block document referred to by `name` was saved. Raises: ValueError: If the requested block document is not found. Returns: An instance of the current class hydrated with the data stored in the block document with the specified name. """ try: return cls._from_block_document(block_document) except ValidationError as e: if not validate: missing_fields = tuple(err["loc"][0] for err in e.errors()) missing_block_data: dict[str, None] = { field: None for field in missing_fields if isinstance(field, str) } warnings.warn( f"Could not fully load {block_document.name!r} of block type" f" {cls.get_block_type_slug()!r} - this is likely because one or more" " required fields were added to the schema for" f" {cls.__name__!r} that did not exist on the class when this block" " was last saved. Please specify values for new field(s):" f" {listrepr(missing_fields)}, then run" f' `{cls.__name__}.save("{block_document.name}", overwrite=True)`,' " and load this block again before attempting to use it." ) return cls.model_construct(**block_document.data, **missing_block_data) raise RuntimeError( f"Unable to load {block_document.name!r} of block type" f" {cls.get_block_type_slug()!r} due to failed validation. To load without" " validation, try loading again with `validate=False`." ) from e @staticmethod def is_block_class(block: Any) -> TypeGuard[type["Block"]]: return _is_subclass(block, Block) @staticmethod def annotation_refers_to_block_class(annotation: Any) -> bool: if Block.is_block_class(annotation): return True if get_origin(annotation) in UnionTypes: for annotation in get_args(annotation): if Block.is_block_class(annotation): return True return False @classmethod @sync_compatible @inject_client async def register_type_and_schema(cls, client: Optional["PrefectClient"] = None): """ Makes block available for configuration with current Prefect API. Recursively registers all nested blocks. Registration is idempotent. Args: client: Optional client to use for registering type and schema with the Prefect API. A new client will be created and used if one is not provided. """ if TYPE_CHECKING: assert isinstance(client, PrefectClient) if cls.__name__ == "Block": raise InvalidBlockRegistration( "`register_type_and_schema` should be called on a Block " "subclass and not on the Block class directly." ) if ABC in getattr(cls, "__bases__", []): raise InvalidBlockRegistration( "`register_type_and_schema` should be called on a Block " "subclass and not on a Block interface class directly." ) async def register_blocks_in_annotation(annotation: type) -> None: """Walk through the annotation and register any nested blocks.""" if Block.is_block_class(annotation): coro = annotation.register_type_and_schema(client=client) if TYPE_CHECKING: assert isinstance(coro, Coroutine) await coro elif get_origin(annotation) in NestedTypes: for inner_annotation in get_args(annotation): await register_blocks_in_annotation(inner_annotation) for field in cls.model_fields.values(): if field.annotation is not None: await register_blocks_in_annotation(field.annotation) try: block_type = await client.read_block_type_by_slug( slug=cls.get_block_type_slug() ) cls._block_type_id = block_type.id local_block_type = cls._to_block_type() if _should_update_block_type( local_block_type=local_block_type, server_block_type=block_type ): await client.update_block_type( block_type_id=block_type.id, block_type=BlockTypeUpdate( **local_block_type.model_dump( include={ "logo_url", "documentation_url", "description", "code_example", } ) ), ) except prefect.exceptions.ObjectNotFound: block_type_create = BlockTypeCreate( **cls._to_block_type().model_dump( include={ "name", "slug", "logo_url", "documentation_url", "description", "code_example", } ) ) block_type = await client.create_block_type(block_type=block_type_create) cls._block_type_id = block_type.id try: block_schema = await client.read_block_schema_by_checksum( checksum=cls._calculate_schema_checksum(), version=cls.get_block_schema_version(), ) except prefect.exceptions.ObjectNotFound: block_schema_create = BlockSchemaCreate( **cls._to_block_schema(block_type_id=block_type.id).model_dump( include={"fields", "block_type_id", "capabilities", "version"} ) ) block_schema = await client.create_block_schema( block_schema=block_schema_create ) cls._block_schema_id = block_schema.id @inject_client async def _save( self, name: Optional[str] = None, is_anonymous: bool = False, overwrite: bool = False, client: Optional["PrefectClient"] = None, ) -> UUID: """ Saves the values of a block as a block document with an option to save as an anonymous block document. Args: name: User specified name to give saved block document which can later be used to load the block document. is_anonymous: Boolean value specifying whether the block document is anonymous. Anonymous blocks are intended for system use and are not shown in the UI. Anonymous blocks do not require a user-supplied name. overwrite: Boolean value specifying if values should be overwritten if a block document with the specified name already exists. Raises: ValueError: If a name is not given and `is_anonymous` is `False` or a name is given and `is_anonymous` is `True`. """ if TYPE_CHECKING: assert isinstance(client, PrefectClient) if name is None and not is_anonymous: if self._block_document_name is None: raise ValueError( "You're attempting to save a block document without a name." " Please either call `save` with a `name` or pass" " `is_anonymous=True` to save an anonymous block." ) else: name = self._block_document_name self._is_anonymous = is_anonymous # Ensure block type and schema are registered before saving block document. coro = self.register_type_and_schema(client=client) if TYPE_CHECKING: assert isinstance(coro, Coroutine) await coro block_document = None try: block_document_create = BlockDocumentCreate( **self._to_block_document(name=name, include_secrets=True).model_dump( include={ "name", "block_schema_id", "block_type_id", "data", "is_anonymous", } ) ) block_document = await client.create_block_document( block_document=block_document_create ) except prefect.exceptions.ObjectAlreadyExists as err: if overwrite: block_document_id = self._block_document_id if block_document_id is None and name is not None: existing_block_document = await client.read_block_document_by_name( name=name, block_type_slug=self.get_block_type_slug() ) block_document_id = existing_block_document.id if TYPE_CHECKING: # We know that the block document id is not None here because we # only get here if the block document already exists assert isinstance(block_document_id, UUID) block_document_update = BlockDocumentUpdate( **self._to_block_document( name=name, include_secrets=True ).model_dump(include={"block_schema_id", "data"}) ) await client.update_block_document( block_document_id=block_document_id, block_document=block_document_update, ) block_document = await client.read_block_document( block_document_id=block_document_id ) else: raise ValueError( "You are attempting to save values with a name that is already in" " use for this block type. If you would like to overwrite the" " values that are saved, then save with `overwrite=True`." ) from err # Update metadata on block instance for later use. self._block_document_name = block_document.name self._block_document_id = block_document.id return self._block_document_id @sync_compatible async def save( self, name: Optional[str] = None, overwrite: bool = False, client: Optional["PrefectClient"] = None, ): """ Saves the values of a block as a block document. Args: name: User specified name to give saved block document which can later be used to load the block document. overwrite: Boolean value specifying if values should be overwritten if a block document with the specified name already exists. """ document_id = await self._save(name=name, overwrite=overwrite, client=client) return document_id @classmethod @sync_compatible @inject_client async def delete( cls, name: str, client: Optional["PrefectClient"] = None, ): if TYPE_CHECKING: assert isinstance(client, PrefectClient) block_document, _ = await cls._aget_block_document(name, client=client) await client.delete_block_document(block_document.id) def get_block_placeholder(self) -> str: """ Returns the block placeholder for the current block which can be used for templating. Returns: str: The block placeholder for the current block in the format `prefect.blocks.{block_type_name}.{block_document_name}` Raises: BlockNotSavedError: Raised if the block has not been saved. If a block has not been saved, the return value will be `None`. """ block_document_name = self._block_document_name if not block_document_name: raise BlockNotSavedError( "Could not generate block placeholder for unsaved block." ) return f"prefect.blocks.{self.get_block_type_slug()}.{block_document_name}" @classmethod def model_json_schema( cls, by_alias: bool = True, ref_template: str = "#/definitions/{model}", schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema, mode: Literal["validation", "serialization"] = "validation", ) -> dict[str, Any]: """TODO: stop overriding this method - use GenerateSchema in ConfigDict instead?""" schema = super().model_json_schema( by_alias, ref_template, schema_generator, mode ) # ensure backwards compatibility by copying $defs into definitions if "$defs" in schema: schema["definitions"] = schema.pop("$defs") schema = remove_nested_keys(["additionalProperties"], schema) return schema @classmethod def model_validate( cls: type[Self], obj: dict[str, Any] | Any, *, strict: bool | None = None, from_attributes: bool | None = None, context: dict[str, Any] | None = None, ) -> Self: if isinstance(obj, dict): obj = cast(dict[str, Any], obj) extra_serializer_fields = { "_block_document_id", "_block_document_name", "_is_anonymous", }.intersection(obj.keys()) for field in extra_serializer_fields: obj.pop(field, None) return super().model_validate( obj, strict=strict, from_attributes=from_attributes, context=context, ) def model_dump( self, *, mode: Literal["json", "python"] | str = "python", include: "IncEx | None" = None, exclude: "IncEx | None" = None, context: dict[str, Any] | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool | Literal["none", "warn", "error"] = True, serialize_as_any: bool = False, ) -> dict[str, Any]: d = super().model_dump( mode=mode, include=include, exclude=exclude, context=context, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, round_trip=round_trip, warnings=warnings, serialize_as_any=serialize_as_any, ) extra_serializer_fields = { "block_type_slug", "_block_document_id", "_block_document_name", "_is_anonymous", }.intersection(d.keys()) for field in extra_serializer_fields: if (include and field not in include) or (exclude and field in exclude): d.pop(field) return d
Block
python
scipy__scipy
scipy/stats/tests/test_distributions.py
{ "start": 191318, "end": 191752 }
class ____: def test_sf_isf(self): # reference values were computed via the reference distribution, e.g. # mp.dps = 50; TruncExpon(b=b).sf(x) b = [20, 100] x = [19.999999, 99.999999] ref = [2.0611546593828472e-15, 3.7200778266671455e-50] assert_allclose(stats.truncexpon.sf(x, b), ref, rtol=1.5e-10) assert_allclose(stats.truncexpon.isf(ref, b), x, rtol=1e-12)
TestTruncexpon
python
django__django
tests/serializers/models/data.py
{ "start": 7293, "end": 7456 }
class ____(BaseModel): parent = models.OneToOneField(BaseModel, models.CASCADE, parent_link=True) child_data = models.IntegerField()
ExplicitInheritBaseModel
python
pytorch__pytorch
test/distributed/test_overlap_bucketing_unit.py
{ "start": 3690, "end": 25750 }
class ____(InductorTestCase): """ Unit tests for overlap-preserving bucketing pass. """ @classmethod def setUpClass(cls): super().setUpClass() from torch.testing._internal.distributed.fake_pg import FakeStore store = FakeStore() dist.init_process_group(backend="fake", rank=0, world_size=2, store=store) cls.device = "cuda" @classmethod def tearDownClass(cls): super().tearDownClass() dist.destroy_process_group() def test_can_bucket_independent_collectives(self): """ Test that independent collectives with separate hiding nodes CAN bucket. Graph structure: ag1_start -> ag2_start -> mm1 (hides ag1) -> mm2 (hides ag2) -> ag1_wait -> ag2_wait """ def func(a, b): group_name = "0" group_size = 1 # Start both collectives ag1 = torch.ops._c10d_functional.all_gather_into_tensor( a, group_size, group_name ) ag2 = torch.ops._c10d_functional.all_gather_into_tensor( b, group_size, group_name ) # Independent compute that can hide both mm1 = torch.mm(a, a) mm2 = torch.mm(b, b) # Wait for both ag1_out = torch.ops._c10d_functional.wait_tensor(ag1) ag2_out = torch.ops._c10d_functional.wait_tensor(ag2) return ag1_out.sum() + ag2_out.sum() + mm1.sum() + mm2.sum() # Use fake mode to trace without executing with FakeTensorMode(): a = torch.ones(4, 4, device=self.device) b = torch.ones(4, 4, device=self.device) * 2 # Trace with make_fx traced = make_fx(func)(a, b) # Find nodes using find_nodes ag1, ag2 = traced.graph.find_nodes( op="call_function", target=torch.ops._c10d_functional.all_gather_into_tensor.default, ) mm1, mm2 = traced.graph.find_nodes( op="call_function", target=torch.ops.aten.mm.default ) # Manually annotate hiding relationships hiding_annotations = { ag1: mm1, # mm1 hides ag1 ag2: mm2, # mm2 hides ag2 } # Build collective info and ancestors collective_info = build_collective_info(traced.graph, hiding_annotations) node_ancestors = compute_ancestors(traced.graph) scheduled = OrderedSet(traced.graph.nodes) # Run bucketing from torch._inductor.fx_passes.overlap_preserving_bucketer import ( OverlapPreservingBucketer, ) bucketer = OverlapPreservingBucketer( traced.graph, collective_info, node_ancestors, scheduled, ) bucketer.bucket_collectives() # Verify: should have 1 bucketed collective (all_gather_into_tensor_out) graph_str = str(traced.graph) FileCheck().check_count("all_gather_into_tensor_out", 1, exactly=False).run( graph_str ) def test_cant_bucket_nested_hiding_intervals(self): """ Test that nested hiding intervals prevent bucketing. Graph structure: ag1_start -> ag2_start -> mm2 (hides ag2) -> ag2_wait -> mm1 (hides ag1) -> ag1_wait ag2's hiding interval is nested inside ag1's hiding interval. """ def func(a, b): group_name = "0" group_size = 1 # ag1 starts first ag1 = torch.ops._c10d_functional.all_gather_into_tensor( a, group_size, group_name ) # ag2 starts (inside ag1's interval) ag2 = torch.ops._c10d_functional.all_gather_into_tensor( b, group_size, group_name ) # mm2 hides ag2 mm2 = torch.mm(b[:2, :2], b[:2, :2]) # ag2 waits (still inside ag1's interval) ag2_out = torch.ops._c10d_functional.wait_tensor(ag2) # mm1 uses ag2's result and hides ag1 mm1 = torch.mm(a + ag2_out[:4, :4], a) # ag1 waits last ag1_out = torch.ops._c10d_functional.wait_tensor(ag1) return ag1_out.sum() + ag2_out.sum() + mm1.sum() + mm2.sum() # Use fake mode to trace without executing with FakeTensorMode(): a = torch.ones(4, 4, device=self.device) b = torch.ones(4, 4, device=self.device) * 2 # Trace with make_fx traced = make_fx(func)(a, b) # Find nodes using find_nodes ag1, ag2 = traced.graph.find_nodes( op="call_function", target=torch.ops._c10d_functional.all_gather_into_tensor.default, ) mm_nodes = traced.graph.find_nodes( op="call_function", target=torch.ops.aten.mm.default ) # mm2 is the first mm, mm1 is the second (based on graph order) mm2 = mm_nodes[0] mm1 = mm_nodes[1] # Manually annotate hiding relationships hiding_annotations = { ag1: mm1, # mm1 hides ag1 ag2: mm2, # mm2 hides ag2 } # Build collective info and ancestors collective_info = build_collective_info(traced.graph, hiding_annotations) node_ancestors = compute_ancestors(traced.graph) scheduled = OrderedSet(traced.graph.nodes) # Run bucketing from torch._inductor.fx_passes.overlap_preserving_bucketer import ( OverlapPreservingBucketer, ) bucketer = OverlapPreservingBucketer( traced.graph, collective_info, node_ancestors, scheduled, ) bucketer.bucket_collectives() # Verify: nested hiding intervals should prevent bucketing # Should have 2 separate all_gathers, not 1 bucketed one graph_str = str(traced.graph) FileCheck().check_count("all_gather_into_tensor", 2, exactly=False).run( graph_str ) @parametrize("final_mm_hidden", (True, False)) def test_cant_bucket_ag_with_rs_hiding_interval_between(self, final_mm_hidden): """ Test that all_gathers can't bucket when a reduce_scatter's hiding interval is between them. Graph structure: ag1_start -> mm1 (hides ag1) -> ag1_wait -> rs_start -> mm2 (hides rs) -> rs_wait -> if final_mm_hidden: ag2_start -> mm3 (hides ag2) -> ag2_wait if final_mm_hidden: Bucketing ag1 and ag2 would require moving one of them, which would break hiding relationships: - Moving ag2 earlier would break ag2's hiding by mm3 - Moving ag1 later would break ag1's hiding by mm1 - The rs hiding interval creates an obstacle between them otherwise, we can bucket """ def func(a, b, c): group_name = dist.distributed_c10d._get_default_group().group_name group_size = 1 # First all_gather ag1 = torch.ops._c10d_functional.all_gather_into_tensor( a, group_size, group_name ) mm1 = torch.mm(a, a) # hides ag1 ag1_out = torch.ops._c10d_functional.wait_tensor(ag1) # Reduce scatter in between rs = torch.ops._c10d_functional.reduce_scatter_tensor( b, "sum", group_size, group_name ) mm2 = torch.mm(b[:4, :4], b[:4, :4]) # hides rs rs_out = torch.ops._c10d_functional.wait_tensor(rs) # Second all_gather ag2 = torch.ops._c10d_functional.all_gather_into_tensor( c, group_size, group_name ) mm3 = torch.mm(c, c) # hides ag2 ag2_out = torch.ops._c10d_functional.wait_tensor(ag2) return ag1_out.sum() + rs_out.sum() + ag2_out.sum(), mm1, mm2, mm3 # Use fake mode to trace without executing with FakeTensorMode(): a = torch.ones(4, 4, device=self.device) b = torch.ones(8, 4, device=self.device) c = torch.ones(4, 4, device=self.device) # Trace with make_fx traced = make_fx(func)(a, b, c) ag1, ag2 = traced.graph.find_nodes( op="call_function", target=torch.ops._c10d_functional.all_gather_into_tensor.default, ) (rs,) = traced.graph.find_nodes( op="call_function", target=torch.ops._c10d_functional.reduce_scatter_tensor.default, ) mm1, mm2, mm3 = traced.graph.find_nodes( op="call_function", target=torch.ops.aten.mm.default ) # Manually annotate hiding relationships hiding_annotations = { ag1: mm1, # mm1 hides ag1 # rs: mm2, # mm2 hides rs ag2: mm3, } if final_mm_hidden: hiding_annotations[rs] = mm2 # Build collective info and ancestors collective_info = build_collective_info(traced.graph, hiding_annotations) node_ancestors = compute_ancestors(traced.graph) scheduled = OrderedSet(traced.graph.nodes) # Run bucketing logic to find buckets (without applying them, which would require process groups) from torch._inductor.fx_passes.overlap_preserving_bucketer import ( OverlapPreservingBucketer, ) bucketer = OverlapPreservingBucketer( traced.graph, collective_info, node_ancestors, scheduled, ) bucketer.bucket_collectives() graph_str = str(traced.graph) # check order of mms preserved FileCheck().check("%mm").check("%mm_1").check("%mm_2").run(graph_str) if final_mm_hidden: # Should NOT bucket - 2 separate all_gathers # Count all_gather node names (works even when wrapped in control_deps) FileCheck().check_count("%all_gather_into_tensor", 2, exactly=False).run( graph_str ) else: # Should bucket - 1 bucketed all_gather (all_gather_into_tensor_out) FileCheck().check_count( "%all_gather_into_tensor_out", 1, exactly=False ).run(graph_str) def test_can_bucket_all_reduce(self): """ Test that all_reduce operations CAN bucket together. Graph structure: ar1_start -> ar2_start -> mm1 (hides ar1) -> mm2 (hides ar2) -> ar1_wait -> ar2_wait """ def func(a, b): group_name = "0" # Start both all_reduce operations ar1 = torch.ops._c10d_functional.all_reduce(a, "sum", group_name) ar2 = torch.ops._c10d_functional.all_reduce(b, "sum", group_name) # Independent compute that can hide both mm1 = torch.mm(a, a) mm2 = torch.mm(b, b) # Wait for both ar1_out = torch.ops._c10d_functional.wait_tensor(ar1) ar2_out = torch.ops._c10d_functional.wait_tensor(ar2) return ar1_out.sum() + ar2_out.sum() + mm1.sum() + mm2.sum() # Use fake mode to trace without executing with FakeTensorMode(): a = torch.ones(4, 4, device=self.device) b = torch.ones(4, 4, device=self.device) * 2 # Trace with make_fx traced = make_fx(func)(a, b) # Find nodes ar1, ar2 = traced.graph.find_nodes( op="call_function", target=torch.ops._c10d_functional.all_reduce.default, ) mm1, mm2 = traced.graph.find_nodes( op="call_function", target=torch.ops.aten.mm.default ) # For all_reduce, start_node == wait_node (no separate wait) hiding_annotations = { ar1: mm1, ar2: mm2, } # Build collective info collective_info = build_collective_info(traced.graph, hiding_annotations) node_ancestors = compute_ancestors(traced.graph) scheduled = OrderedSet(traced.graph.nodes) # Run bucketing from torch._inductor.fx_passes.overlap_preserving_bucketer import ( OverlapPreservingBucketer, ) bucketer = OverlapPreservingBucketer( traced.graph, collective_info, node_ancestors, scheduled, ) bucketer.bucket_collectives() # Verify: should have 1 bucketed all_reduce # After bucketing, there should be only one all_reduce node (the bucketed one) graph_str = str(traced.graph) FileCheck().check_count("%all_reduce", 1, exactly=True).check_count( "%mm", 2 ).run(graph_str) def test_can_bucket_multidtype_collectives(self): """ Test that all_gathers with different dtypes CAN bucket together. Graph structure: ag1_float32 -> mm1 (hides ag1) -> ag1_wait ag2_bfloat16 -> mm2 (hides ag2) -> ag2_wait """ def func(a, b): group_name = "0" group_size = 1 # Start both collectives with different dtypes ag1 = torch.ops._c10d_functional.all_gather_into_tensor( a, group_size, group_name, # float32 ) ag2 = torch.ops._c10d_functional.all_gather_into_tensor( b, group_size, group_name, # bfloat16 ) # Independent compute that can hide both mm1 = torch.mm(a, a) mm2 = torch.mm(b.float(), b.float()) # Wait for both ag1_out = torch.ops._c10d_functional.wait_tensor(ag1) ag2_out = torch.ops._c10d_functional.wait_tensor(ag2) return ag1_out.sum() + ag2_out.sum() + mm1.sum() + mm2.sum() # Use fake mode to trace without executing with FakeTensorMode(): a = torch.ones(4, 4, device=self.device, dtype=torch.float32) b = torch.ones(4, 4, device=self.device, dtype=torch.bfloat16) # Trace with make_fx traced = make_fx(func)(a, b) # Find nodes using find_nodes ag1, ag2 = traced.graph.find_nodes( op="call_function", target=torch.ops._c10d_functional.all_gather_into_tensor.default, ) mm_nodes = traced.graph.find_nodes( op="call_function", target=torch.ops.aten.mm.default ) mm1 = mm_nodes[0] mm2 = mm_nodes[1] # Manually annotate hiding relationships hiding_annotations = { ag1: mm1, # mm1 hides ag1 ag2: mm2, # mm2 hides ag2 } # Build collective info and ancestors collective_info = build_collective_info(traced.graph, hiding_annotations) node_ancestors = compute_ancestors(traced.graph) scheduled = OrderedSet(traced.graph.nodes) # Run bucketing with multidtype mode from torch._inductor.fx_passes.overlap_preserving_bucketer import ( OverlapPreservingBucketer, ) bucketer = OverlapPreservingBucketer( traced.graph, collective_info, node_ancestors, scheduled, bucket_mode="custom_ops_multidtype", ) bucketer.bucket_collectives() # Verify: should have 1 bucketed collective (all_gather_into_tensor_out) # even though dtypes are different graph_str = str(traced.graph) FileCheck().check_count("all_gather_into_tensor_out", 1, exactly=False).run( graph_str ) def test_can_bucket_with_multiple_hiding_nodes(self): """ Test that collectives with multiple hiding nodes CAN bucket. Graph structure: ag1_start -> ag2_start -> mm1 -> mm2 -> mm3 -> ag1_wait -> ag2_wait Where: - ag1 is hidden by mm1 and mm2 - ag2 is hidden by mm2 and mm3 - Both collectives share mm2 as a hiding node """ def func(a, b): group_name = "0" group_size = 1 # Start both collectives ag1 = torch.ops._c10d_functional.all_gather_into_tensor( a, group_size, group_name ) ag2 = torch.ops._c10d_functional.all_gather_into_tensor( b, group_size, group_name ) # Three compute operations that hide the collectives mm1 = torch.mm(a, a) mm2 = torch.mm(b, b) mm3 = torch.mm(a + b, a + b) # Wait for both ag1_out = torch.ops._c10d_functional.wait_tensor(ag1) ag2_out = torch.ops._c10d_functional.wait_tensor(ag2) return ag1_out.sum() + ag2_out.sum() + mm1.sum() + mm2.sum() + mm3.sum() # Use fake mode to trace without executing with FakeTensorMode(): a = torch.ones(4, 4, device=self.device) b = torch.ones(4, 4, device=self.device) * 2 # Trace with make_fx traced = make_fx(func)(a, b) # Find nodes using find_nodes ag1, ag2 = traced.graph.find_nodes( op="call_function", target=torch.ops._c10d_functional.all_gather_into_tensor.default, ) mm1, mm2, mm3 = traced.graph.find_nodes( op="call_function", target=torch.ops.aten.mm.default ) # Manually annotate hiding relationships with multiple hiding nodes hiding_annotations = { ag1: [mm1, mm2], # ag1 is hidden by mm1 and mm2 ag2: [mm2, mm3], # ag2 is hidden by mm2 and mm3 } # Build collective info and ancestors collective_info = build_collective_info(traced.graph, hiding_annotations) node_ancestors = compute_ancestors(traced.graph) scheduled = OrderedSet(traced.graph.nodes) # Verify hiding_nodes are correctly set self.assertEqual(len(collective_info[ag1].hiding_nodes), 2) self.assertIn(mm1, collective_info[ag1].hiding_nodes) self.assertIn(mm2, collective_info[ag1].hiding_nodes) self.assertEqual(len(collective_info[ag2].hiding_nodes), 2) self.assertIn(mm2, collective_info[ag2].hiding_nodes) self.assertIn(mm3, collective_info[ag2].hiding_nodes) # Run bucketing from torch._inductor.fx_passes.overlap_preserving_bucketer import ( OverlapPreservingBucketer, ) bucketer = OverlapPreservingBucketer( traced.graph, collective_info, node_ancestors, scheduled, ) bucketer.bucket_collectives() FileCheck().check_count( "all_gather_into_tensor_out", 1, exactly=False ).check_count("torch.ops.aten.mm.default", 3, exactly=True).run( str(traced.graph) ) def test_can_bucket_with_convert_dtype_as_hiding_nodes(self): """ Test that all_gathers can bucket when convert_element_type ops ARE the hiding nodes. Graph structure: ag1_start -> convert1 (hides ag1) -> ag1_wait -> ag2_start -> convert2 (hides ag2) -> ag2_wait The convert_element_type ops ARE hiding nodes - no matmuls. This tests that dependencies are transferred correctly when convert nodes are erased. """ def func(a, b, c): group_name = "0" group_size = 1 ag1 = torch.ops._c10d_functional.all_gather_into_tensor( a, group_size, group_name ) b = torch.ops.prims.convert_element_type.default(b, torch.float16) ag1_out = torch.ops._c10d_functional.wait_tensor(ag1) ag2 = torch.ops._c10d_functional.all_gather_into_tensor( b, group_size, group_name ) ag3 = torch.ops._c10d_functional.all_gather_into_tensor( c, group_size, group_name ) mm = ag1_out @ ag1_out ag2_out = torch.ops._c10d_functional.wait_tensor(ag2) ag3_out = torch.ops._c10d_functional.wait_tensor(ag3) return ag1_out, ag2_out, ag3_out, mm with FakeTensorMode(): a = torch.ones(4, 4, device=self.device, dtype=torch.float32) b = torch.ones(4, 4, device=self.device, dtype=torch.float32) c = torch.ones(4, 4, device=self.device, dtype=torch.float32) traced = make_fx(func)(a, b, c) # Find nodes ag1, ag2, ag3 = traced.graph.find_nodes( op="call_function", target=torch.ops._c10d_functional.all_gather_into_tensor.default, ) convert1 = traced.graph.find_nodes( op="call_function", target=torch.ops.prims.convert_element_type.default, )[0] mm = traced.graph.find_nodes( op="call_function", target=torch.ops.aten.mm.default, )[0] hiding_annotations = { ag1: convert1, ag2: mm, ag3: mm, } # Build collective info and ancestors collective_info = build_collective_info(traced.graph, hiding_annotations) node_ancestors = compute_ancestors(traced.graph) scheduled = OrderedSet(traced.graph.nodes) # Run bucketing from torch._inductor.fx_passes.overlap_preserving_bucketer import ( OverlapPreservingBucketer, ) bucketer = OverlapPreservingBucketer( traced.graph, collective_info, node_ancestors, scheduled, ) bucketer.bucket_collectives() graph_str = str(traced.graph) f = FileCheck() f.check_count("%all_gather_into_tensor", 1, exactly=True) f.check("pre_bucket_all_gather").check("wait_tensor").check( "%all_gather_into_tensor_out" ).run(graph_str) if __name__ == "__main__": run_tests()
TestOverlapPreservingBucketing
python
pennersr__django-allauth
allauth/headless/account/inputs.py
{ "start": 7054, "end": 7114 }
class ____(AddEmailForm, inputs.Input): pass
AddEmailInput
python
huggingface__transformers
src/transformers/models/wavlm/modeling_wavlm.py
{ "start": 52018, "end": 56900 }
class ____(WavLMPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Sequence classification does not support the use of WavLM adapters (config.add_adapter=True)" ) self.wavlm = WavLMModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) # Initialize weights and apply final processing self.post_init() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wavlm.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wavlm.parameters(): param.requires_grad = False @auto_docstring def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[tuple, SequenceClassifierOutput]: r""" input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`WavLMProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wavlm( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring
WavLMForSequenceClassification
python
numba__numba
numba/tests/test_datamodel.py
{ "start": 450, "end": 511 }
class ____(test_factory()): fe_type = types.int16
TestInt16
python
doocs__leetcode
solution/1500-1599/1508.Range Sum of Sorted Subarray Sums/Solution.py
{ "start": 0, "end": 341 }
class ____: def rangeSum(self, nums: List[int], n: int, left: int, right: int) -> int: arr = [] for i in range(n): s = 0 for j in range(i, n): s += nums[j] arr.append(s) arr.sort() mod = 10**9 + 7 return sum(arr[left - 1 : right]) % mod
Solution
python
PyCQA__pylint
tests/functional/u/useless/useless_parent_delegation.py
{ "start": 12741, "end": 12822 }
class ____(Super): def __init__(self, a, b): super().__init__(a, b)
Sub
python
jupyterlab__jupyterlab
jupyterlab/labextensions.py
{ "start": 15822, "end": 16399 }
class ____(BaseExtensionApp): description = "Lock labextension(s) by name" aliases = lock_aliases level = Unicode("sys_prefix", help="Level at which to lock: sys_prefix, user, system").tag( config=True ) def run_task(self): app_options = AppOptions( app_dir=self.app_dir, logger=self.log, core_config=self.core_config, labextensions_path=self.labextensions_path, ) [lock_extension(arg, app_options=app_options, level=self.level) for arg in self.extra_args]
LockLabExtensionsApp
python
celery__celery
t/unit/tasks/test_trace.py
{ "start": 1705, "end": 19969 }
class ____(TraceCase): def test_trace_successful(self): retval, info = self.trace(self.add, (2, 2), {}) assert info is None assert retval == 4 def test_trace_before_start(self): @self.app.task(shared=False, before_start=Mock()) def add_with_before_start(x, y): return x + y self.trace(add_with_before_start, (2, 2), {}) add_with_before_start.before_start.assert_called() def test_trace_on_success(self): @self.app.task(shared=False, on_success=Mock()) def add_with_success(x, y): return x + y self.trace(add_with_success, (2, 2), {}) add_with_success.on_success.assert_called() def test_get_log_policy(self): einfo = Mock(name='einfo') einfo.internal = False assert get_log_policy(self.add, einfo, Reject()) is log_policy_reject assert get_log_policy(self.add, einfo, Ignore()) is log_policy_ignore self.add.throws = (TypeError,) assert get_log_policy(self.add, einfo, KeyError()) is log_policy_unexpected assert get_log_policy(self.add, einfo, TypeError()) is log_policy_expected einfo2 = Mock(name='einfo2') einfo2.internal = True assert get_log_policy(self.add, einfo2, KeyError()) is log_policy_internal def test_get_task_name(self): assert get_task_name(Context({}), 'default') == 'default' assert get_task_name(Context({'shadow': None}), 'default') == 'default' assert get_task_name(Context({'shadow': ''}), 'default') == 'default' assert get_task_name(Context({'shadow': 'test'}), 'default') == 'test' def test_trace_after_return(self): @self.app.task(shared=False, after_return=Mock()) def add_with_after_return(x, y): return x + y self.trace(add_with_after_return, (2, 2), {}) add_with_after_return.after_return.assert_called() def test_with_prerun_receivers(self): on_prerun = Mock() signals.task_prerun.connect(on_prerun) try: self.trace(self.add, (2, 2), {}) on_prerun.assert_called() finally: signals.task_prerun.receivers[:] = [] def test_with_postrun_receivers(self): on_postrun = Mock() signals.task_postrun.connect(on_postrun) try: self.trace(self.add, (2, 2), {}) on_postrun.assert_called() finally: signals.task_postrun.receivers[:] = [] def test_with_success_receivers(self): on_success = Mock() signals.task_success.connect(on_success) try: self.trace(self.add, (2, 2), {}) on_success.assert_called() finally: signals.task_success.receivers[:] = [] def test_when_chord_part(self): @self.app.task(shared=False) def add(x, y): return x + y add.backend = Mock() request = {'chord': uuid()} self.trace(add, (2, 2), {}, request=request) add.backend.mark_as_done.assert_called() args, kwargs = add.backend.mark_as_done.call_args assert args[0] == 'id-1' assert args[1] == 4 assert args[2].chord == request['chord'] assert not args[3] def test_when_backend_cleanup_raises(self): @self.app.task(shared=False) def add(x, y): return x + y add.backend = Mock(name='backend') add.backend.process_cleanup.side_effect = KeyError() self.trace(add, (2, 2), {}, eager=False) add.backend.process_cleanup.assert_called_with() add.backend.process_cleanup.side_effect = MemoryError() with pytest.raises(MemoryError): self.trace(add, (2, 2), {}, eager=False) def test_eager_task_does_not_store_result_even_if_not_ignore_result(self): @self.app.task(shared=False) def add(x, y): return x + y add.backend = Mock(name='backend') add.ignore_result = False self.trace(add, (2, 2), {}, eager=True) add.backend.mark_as_done.assert_called_once_with( 'id-1', # task_id 4, # result ANY, # request False # store_result ) def test_eager_task_does_not_call_store_result(self): @self.app.task(shared=False) def add(x, y): return x + y backend = BaseDictBackend(app=self.app) backend.store_result = Mock() add.backend = backend add.ignore_result = False self.trace(add, (2, 2), {}, eager=True) add.backend.store_result.assert_not_called() def test_eager_task_will_store_result_if_proper_setting_is_set(self): @self.app.task(shared=False) def add(x, y): return x + y add.backend = Mock(name='backend') add.store_eager_result = True add.ignore_result = False self.trace(add, (2, 2), {}, eager=True) add.backend.mark_as_done.assert_called_once_with( 'id-1', # task_id 4, # result ANY, # request True # store_result ) def test_eager_task_with_setting_will_call_store_result(self): @self.app.task(shared=False) def add(x, y): return x + y backend = BaseDictBackend(app=self.app) backend.store_result = Mock() add.backend = backend add.store_eager_result = True add.ignore_result = False self.trace(add, (2, 2), {}, eager=True) add.backend.store_result.assert_called_once_with( 'id-1', 4, states.SUCCESS, request=ANY ) def test_when_backend_raises_exception(self): @self.app.task(shared=False) def add(x, y): return x + y add.backend = Mock(name='backend') add.backend.mark_as_done.side_effect = Exception() add.backend.mark_as_failure.side_effect = Exception("failed mark_as_failure") with pytest.raises(Exception): self.trace(add, (2, 2), {}, eager=False) def test_traceback_clear(self): import inspect import sys sys.exc_clear = Mock() frame_list = [] def raise_dummy(): frame_str_temp = str(inspect.currentframe().__repr__) frame_list.append(frame_str_temp) raise KeyError('foo') try: raise_dummy() except KeyError as exc: traceback_clear(exc) tb_ = exc.__traceback__ while tb_ is not None: if str(tb_.tb_frame.__repr__) == frame_list[0]: assert len(tb_.tb_frame.f_locals) == 0 tb_ = tb_.tb_next try: raise_dummy() except KeyError as exc: traceback_clear() tb_ = exc.__traceback__ while tb_ is not None: if str(tb_.tb_frame.__repr__) == frame_list[0]: assert len(tb_.tb_frame.f_locals) == 0 tb_ = tb_.tb_next try: raise_dummy() except KeyError as exc: traceback_clear(str(exc)) tb_ = exc.__traceback__ while tb_ is not None: if str(tb_.tb_frame.__repr__) == frame_list[0]: assert len(tb_.tb_frame.f_locals) == 0 tb_ = tb_.tb_next @patch('celery.app.trace.traceback_clear') def test_when_Ignore(self, mock_traceback_clear): @self.app.task(shared=False) def ignored(): raise Ignore() retval, info = self.trace(ignored, (), {}) assert info.state == states.IGNORED mock_traceback_clear.assert_called() @patch('celery.app.trace.traceback_clear') def test_when_Reject(self, mock_traceback_clear): @self.app.task(shared=False) def rejecting(): raise Reject() retval, info = self.trace(rejecting, (), {}) assert info.state == states.REJECTED mock_traceback_clear.assert_called() def test_backend_cleanup_raises(self): self.add.backend.process_cleanup = Mock() self.add.backend.process_cleanup.side_effect = RuntimeError() self.trace(self.add, (2, 2), {}) @patch('celery.canvas.maybe_signature') def test_callbacks__scalar(self, maybe_signature): sig = Mock(name='sig') request = {'callbacks': [sig], 'root_id': 'root'} maybe_signature.return_value = sig retval, _ = self.trace(self.add, (2, 2), {}, request=request) sig.apply_async.assert_called_with( (4,), parent_id='id-1', root_id='root', priority=None ) @patch('celery.canvas.maybe_signature') def test_chain_proto2(self, maybe_signature): sig = Mock(name='sig') sig2 = Mock(name='sig2') request = {'chain': [sig2, sig], 'root_id': 'root'} maybe_signature.return_value = sig retval, _ = self.trace(self.add, (2, 2), {}, request=request) sig.apply_async.assert_called_with( (4,), parent_id='id-1', root_id='root', chain=[sig2], priority=None ) @patch('celery.canvas.maybe_signature') def test_chain_inherit_parent_priority(self, maybe_signature): self.app.conf.task_inherit_parent_priority = True sig = Mock(name='sig') sig2 = Mock(name='sig2') request = { 'chain': [sig2, sig], 'root_id': 'root', 'delivery_info': {'priority': 42}, } maybe_signature.return_value = sig retval, _ = self.trace(self.add, (2, 2), {}, request=request) sig.apply_async.assert_called_with( (4,), parent_id='id-1', root_id='root', chain=[sig2], priority=42 ) @patch('celery.canvas.maybe_signature') def test_callbacks__EncodeError(self, maybe_signature): sig = Mock(name='sig') request = {'callbacks': [sig], 'root_id': 'root'} maybe_signature.return_value = sig sig.apply_async.side_effect = EncodeError() retval, einfo = self.trace(self.add, (2, 2), {}, request=request) assert einfo.state == states.FAILURE @patch('celery.canvas.maybe_signature') @patch('celery.app.trace.group.apply_async') def test_callbacks__sigs(self, group_, maybe_signature): sig1 = Mock(name='sig') sig2 = Mock(name='sig2') sig3 = group([Mock(name='g1'), Mock(name='g2')], app=self.app) sig3.apply_async = Mock(name='gapply') request = {'callbacks': [sig1, sig3, sig2], 'root_id': 'root'} def pass_value(s, *args, **kwargs): return s maybe_signature.side_effect = pass_value retval, _ = self.trace(self.add, (2, 2), {}, request=request) group_.assert_called_with((4,), parent_id='id-1', root_id='root', priority=None) sig3.apply_async.assert_called_with( (4,), parent_id='id-1', root_id='root', priority=None ) @patch('celery.canvas.maybe_signature') @patch('celery.app.trace.group.apply_async') def test_callbacks__only_groups(self, group_, maybe_signature): sig1 = group([Mock(name='g1'), Mock(name='g2')], app=self.app) sig2 = group([Mock(name='g3'), Mock(name='g4')], app=self.app) sig1.apply_async = Mock(name='gapply') sig2.apply_async = Mock(name='gapply') request = {'callbacks': [sig1, sig2], 'root_id': 'root'} def pass_value(s, *args, **kwargs): return s maybe_signature.side_effect = pass_value retval, _ = self.trace(self.add, (2, 2), {}, request=request) sig1.apply_async.assert_called_with( (4,), parent_id='id-1', root_id='root', priority=None ) sig2.apply_async.assert_called_with( (4,), parent_id='id-1', root_id='root', priority=None ) def test_trace_SystemExit(self): with pytest.raises(SystemExit): self.trace(self.raises, (SystemExit(),), {}) @patch('celery.app.trace.traceback_clear') def test_trace_Retry(self, mock_traceback_clear): exc = Retry('foo', 'bar') _, info = self.trace(self.raises, (exc,), {}) assert info.state == states.RETRY assert info.retval is exc mock_traceback_clear.assert_called() @patch('celery.app.trace.traceback_clear') def test_trace_exception(self, mock_traceback_clear): exc = KeyError('foo') _, info = self.trace(self.raises, (exc,), {}) assert info.state == states.FAILURE assert info.retval is exc mock_traceback_clear.assert_called() def test_trace_task_ret__no_content_type(self): trace_task_ret( self.add.name, 'id1', {}, ((2, 2), {}, {}), None, None, app=self.app, ) def test_fast_trace_task__no_content_type(self): self.app.tasks[self.add.name].__trace__ = build_tracer( self.add.name, self.add, app=self.app, ) fast_trace_task( self.add.name, 'id1', {}, ((2, 2), {}, {}), None, None, app=self.app, _loc=[self.app.tasks, {}, 'hostname'], ) def test_trace_exception_propagate(self): with pytest.raises(KeyError): self.trace(self.raises, (KeyError('foo'),), {}, propagate=True) @patch('celery.app.trace.signals.task_internal_error.send') @patch('celery.app.trace.build_tracer') @patch('celery.app.trace.report_internal_error') def test_outside_body_error(self, report_internal_error, build_tracer, send): tracer = Mock() tracer.side_effect = KeyError('foo') build_tracer.return_value = tracer @self.app.task(shared=False) def xtask(): pass trace_task(xtask, 'uuid', (), {}) assert report_internal_error.call_count assert send.call_count assert xtask.__trace__ is tracer def test_backend_error_should_report_failure(self): """check internal error is reported as failure. In case of backend error, an exception may bubble up from trace and be caught by trace_task. """ @self.app.task(shared=False) def xtask(): pass xtask.backend = BaseDictBackend(app=self.app) xtask.backend.mark_as_done = Mock() xtask.backend.mark_as_done.side_effect = Exception() xtask.backend.mark_as_failure = Mock() xtask.backend.mark_as_failure.side_effect = Exception() ret, info, _, _ = trace_task(xtask, 'uuid', (), {}, app=self.app) assert info is not None assert isinstance(ret, ExceptionInfo) def test_deduplicate_successful_tasks__deduplication(self): @self.app.task(shared=False) def add(x, y): return x + y backend = CacheBackend(app=self.app, backend='memory') add.backend = backend add.store_eager_result = True add.ignore_result = False add.acks_late = True self.app.conf.worker_deduplicate_successful_tasks = True task_id = str(uuid4()) request = {'id': task_id, 'delivery_info': {'redelivered': True}} assert trace(self.app, add, (1, 1), task_id=task_id, request=request) == (2, None) assert trace(self.app, add, (1, 1), task_id=task_id, request=request) == (None, None) self.app.conf.worker_deduplicate_successful_tasks = False def test_deduplicate_successful_tasks__no_deduplication(self): @self.app.task(shared=False) def add(x, y): return x + y backend = CacheBackend(app=self.app, backend='memory') add.backend = backend add.store_eager_result = True add.ignore_result = False add.acks_late = True self.app.conf.worker_deduplicate_successful_tasks = True task_id = str(uuid4()) request = {'id': task_id, 'delivery_info': {'redelivered': True}} with patch('celery.app.trace.AsyncResult') as async_result_mock: async_result_mock().state.return_value = PENDING assert trace(self.app, add, (1, 1), task_id=task_id, request=request) == (2, None) assert trace(self.app, add, (1, 1), task_id=task_id, request=request) == (2, None) self.app.conf.worker_deduplicate_successful_tasks = False def test_deduplicate_successful_tasks__result_not_found(self): @self.app.task(shared=False) def add(x, y): return x + y backend = CacheBackend(app=self.app, backend='memory') add.backend = backend add.store_eager_result = True add.ignore_result = False add.acks_late = True self.app.conf.worker_deduplicate_successful_tasks = True task_id = str(uuid4()) request = {'id': task_id, 'delivery_info': {'redelivered': True}} with patch('celery.app.trace.AsyncResult') as async_result_mock: assert trace(self.app, add, (1, 1), task_id=task_id, request=request) == (2, None) state_property = PropertyMock(side_effect=BackendGetMetaError) type(async_result_mock()).state = state_property assert trace(self.app, add, (1, 1), task_id=task_id, request=request) == (2, None) self.app.conf.worker_deduplicate_successful_tasks = False def test_deduplicate_successful_tasks__cached_request(self): @self.app.task(shared=False) def add(x, y): return x + y backend = CacheBackend(app=self.app, backend='memory') add.backend = backend add.store_eager_result = True add.ignore_result = False add.acks_late = True self.app.conf.worker_deduplicate_successful_tasks = True task_id = str(uuid4()) request = {'id': task_id, 'delivery_info': {'redelivered': True}} successful_requests.add(task_id) assert trace(self.app, add, (1, 1), task_id=task_id, request=request) == (None, None) successful_requests.clear() self.app.conf.worker_deduplicate_successful_tasks = False
test_trace
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 542048, "end": 542507 }
class ____(sgqlc.types.Type): """Autogenerated return type of CreateRepository""" __schema__ = github_schema __field_names__ = ("client_mutation_id", "repository") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation.""" repository = sgqlc.types.Field("Repository", graphql_name="repository") """The new repository."""
CreateRepositoryPayload
python
run-llama__llama_index
llama-index-core/llama_index/core/storage/kvstore/types.py
{ "start": 2193, "end": 2686 }
class ____(BaseKVStore): """Base in-memory key-value store.""" @abstractmethod def persist( self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None ) -> None: pass @classmethod @abstractmethod def from_persist_path(cls, persist_path: str) -> "BaseInMemoryKVStore": """Create a BaseInMemoryKVStore from a persist directory.""" MutableMappingT = TypeVar("MutableMappingT", bound=MutableMapping[str, dict])
BaseInMemoryKVStore
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/errors.py
{ "start": 10639, "end": 11237 }
class ____(graphene.ObjectType): class Meta: interfaces = (GrapheneError,) name = "NoModeProvidedError" pipeline_name = graphene.NonNull(graphene.String) def __init__(self, pipeline_name, mode_list): super().__init__() mode_list = check.list_param(mode_list, "mode_list", of_type=str) pipeline_name = check.str_param(pipeline_name, "pipeline_name") self.message = ( f"No mode provided for pipeline '{pipeline_name}', which has multiple modes. Available" f" modes: {mode_list}" )
GrapheneNoModeProvidedError
python
tensorflow__tensorflow
tensorflow/python/ops/init_ops.py
{ "start": 56939, "end": 58634 }
class ____(Initializer): """Initializer that generates the identity matrix. Only use for 2D matrices. Args: gain: Multiplicative factor to apply to the identity matrix. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. """ @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") def __init__(self, gain=1.0, dtype=dtypes.float32): self.gain = gain self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) def __call__(self, shape, dtype=None, partition_info=None): full_shape = shape if partition_info is None else partition_info.full_shape if len(full_shape) != 2: raise ValueError("The tensor to initialize, specified by argument `shape`" " must be at least two-dimensional. Received shape=" f"{shape}") if dtype is None: dtype = self.dtype if isinstance(full_shape, tensor_shape.TensorShape): full_shape = full_shape.as_list() initializer = linalg_ops_impl.eye(*full_shape, dtype=dtype) if partition_info is not None: initializer = array_ops.slice(initializer, partition_info.var_offset, shape) return self.gain * initializer def get_config(self): return {"gain": self.gain, "dtype": self.dtype.name} @tf_export(v1=["glorot_uniform_initializer", "initializers.glorot_uniform"]) @deprecation.deprecated_endpoints("glorot_uniform_initializer", "initializers.glorot_uniform")
Identity
python
great-expectations__great_expectations
tests/datasource/fluent/test_config_str.py
{ "start": 595, "end": 6545 }
class ____(FluentBaseModel): normal_field: str secret_field: SecretStr config_field: ConfigStr config_field_w_default: ConfigStr = r"hey-${MY_SECRET}" # type: ignore[assignment] # FIXME CoP @pytest.fixture def env_config_provider() -> _ConfigurationProvider: config_provider = _ConfigurationProvider() config_provider.register_provider(_EnvironmentConfigurationProvider()) return config_provider def test_config_provider_substitution( monkeypatch: MonkeyPatch, env_config_provider: _ConfigurationProvider ): monkeypatch.setenv("MY_CONFIG", "bar") my_dict = {"my_key": r"foo${MY_CONFIG}", "another_key": r"${MY_CONFIG}"} subbed = env_config_provider.substitute_config(my_dict) assert subbed == {"my_key": "foobar", "another_key": "bar"} assert subbed != my_dict def test_config_provider_substitution_raises_error( monkeypatch: MonkeyPatch, env_config_provider: _ConfigurationProvider ): monkeypatch.setenv("NOT_MY_CONFIG", "bar") my_dict = {"my_key": r"foo${MY_CONFIG}", "another_key": r"${MY_CONFIG}"} with pytest.raises(MissingConfigVariableError): env_config_provider.substitute_config(my_dict) def test_config_str_validation(): with pytest.raises(pydantic.ValidationError, match="ConfigStr"): m = MyClass(normal_field="normal", secret_field="secret", config_field="invalid config") print(m) @pytest.mark.parametrize( ["input_value", "expected"], [ (r"${VALID_CONFIG_STR}", r"${VALID_CONFIG_STR}"), ( r"postgres://user:${VALID_CONFIG_STR}@host/dbname", r"postgres://user:${VALID_CONFIG_STR}@host/dbname", ), ("postgres://userName@hostname", "postgres://userName@hostname"), ], ) def test_as_union_file_type(input_value, expected): class MyClass(FluentBaseModel): my_field: Union[ConfigStr, pydantic.networks.PostgresDsn] m = MyClass(my_field=input_value) print(m) assert str(m.my_field) == expected def test_config_substitution(monkeypatch: MonkeyPatch, env_config_provider: _ConfigurationProvider): monkeypatch.setenv("MY_ENV_VAR", "success") m = MyClass( normal_field="normal", secret_field="secret", # type: ignore[arg-type] # FIXME CoP config_field=r"${MY_ENV_VAR}", # type: ignore[arg-type] # FIXME CoP config_field_w_default=r"hello-${MY_ENV_VAR}", # type: ignore[arg-type] # FIXME CoP ) assert m.config_field.get_config_value(env_config_provider) == "success" assert m.config_field_w_default.get_config_value(env_config_provider) == "hello-success" def test_config_substitution_dict( monkeypatch: MonkeyPatch, env_config_provider: _ConfigurationProvider ): monkeypatch.setenv("MY_ENV_VAR", "success") m = MyClass( normal_field="normal", secret_field="secret", # type: ignore[arg-type] # FIXME CoP config_field=r"${MY_ENV_VAR}", # type: ignore[arg-type] # FIXME CoP ) d = m.dict(config_provider=env_config_provider) assert d["config_field"] == "success" def test_config_nested_substitution_dict( monkeypatch: MonkeyPatch, env_config_provider: _ConfigurationProvider ): monkeypatch.setenv("MY_ENV_VAR", "success") class MyCollection(FluentBaseModel): my_classes: List[MyClass] = [] MyCollection.update_forward_refs(MyClass=MyClass) m = MyCollection( my_classes=[ MyClass( normal_field="normal", secret_field="secret", # type: ignore[arg-type] # FIXME CoP config_field=r"${MY_ENV_VAR}", # type: ignore[arg-type] # FIXME CoP ) ] ) d = m.dict(config_provider=env_config_provider) assert d["my_classes"][0]["config_field"] == "success" def test_config_nested_substitution_dict_raises_error_for_missing_config_var( monkeypatch: MonkeyPatch, env_config_provider: _ConfigurationProvider ): monkeypatch.setenv("NOT_MY_ENV_VAR", "failure") class MyCollection(FluentBaseModel): my_classes: List[MyClass] = [] MyCollection.update_forward_refs(MyClass=MyClass) m = MyCollection( my_classes=[ MyClass( normal_field="normal", secret_field="secret", # type: ignore[arg-type] # FIXME CoP config_field=r"${MY_ENV_VAR}", # type: ignore[arg-type] # FIXME CoP ) ] ) with pytest.raises(MissingConfigVariableError): m.dict(config_provider=env_config_provider) @pytest.mark.parametrize("method", ["yaml", "dict", "json"]) def test_serialization_returns_original(monkeypatch: MonkeyPatch, method: str): monkeypatch.setenv("MY_ENV_VAR", "dont_serialize_me") m = MyClass( normal_field="normal", secret_field="secret", # type: ignore[arg-type] # FIXME CoP config_field=r"${MY_ENV_VAR}", # type: ignore[arg-type] # FIXME CoP ) serialize_method: Callable = getattr(m, method) dumped = str(serialize_method()) assert "dont_serialize_me" not in dumped assert r"${MY_ENV_VAR}" in dumped @pytest.mark.parametrize("method", ["yaml", "dict", "json"]) def test_nested_serialization_returns_original(monkeypatch: MonkeyPatch, method: str): # TODO: fix this class MyCollection(FluentBaseModel): my_classes: List[MyClass] = [] MyCollection.update_forward_refs(MyClass=MyClass) monkeypatch.setenv("MY_ENV_VAR", "dont_serialize_me") m = MyCollection( my_classes=[ MyClass( normal_field="normal", secret_field="secret", # type: ignore[arg-type] # FIXME CoP config_field=r"${MY_ENV_VAR}", # type: ignore[arg-type] # FIXME CoP ) ] ) serialize_method: Callable = getattr(m, method) dumped = str(serialize_method()) assert "dont_serialize_me" not in dumped assert r"${MY_ENV_VAR}" in dumped
MyClass
python
coleifer__peewee
tests/libs/mock.py
{ "start": 51663, "end": 59275 }
class ____(object): """ Patch a dictionary, or dictionary like object, and restore the dictionary to its original state after the test. `in_dict` can be a dictionary or a mapping like container. If it is a mapping then it must at least support getting, setting and deleting items plus iterating over keys. `in_dict` can also be a string specifying the name of the dictionary, which will then be fetched by importing it. `values` can be a dictionary of values to set in the dictionary. `values` can also be an iterable of `(key, value)` pairs. If `clear` is True then the dictionary will be cleared before the new values are set. `patch.dict` can also be called with arbitrary keyword arguments to set values in the dictionary:: with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()): ... `patch.dict` can be used as a context manager, decorator or class decorator. When used as a class decorator `patch.dict` honours `patch.TEST_PREFIX` for choosing which methods to wrap. """ def __init__(self, in_dict, values=(), clear=False, **kwargs): if isinstance(in_dict, basestring): in_dict = _importer(in_dict) self.in_dict = in_dict # support any argument supported by dict(...) constructor self.values = dict(values) self.values.update(kwargs) self.clear = clear self._original = None def __call__(self, f): if isinstance(f, ClassTypes): return self.decorate_class(f) @wraps(f) def _inner(*args, **kw): self._patch_dict() try: return f(*args, **kw) finally: self._unpatch_dict() return _inner def decorate_class(self, klass): for attr in dir(klass): attr_value = getattr(klass, attr) if (attr.startswith(patch.TEST_PREFIX) and hasattr(attr_value, "__call__")): decorator = _patch_dict(self.in_dict, self.values, self.clear) decorated = decorator(attr_value) setattr(klass, attr, decorated) return klass def __enter__(self): """Patch the dict.""" self._patch_dict() def _patch_dict(self): values = self.values in_dict = self.in_dict clear = self.clear try: original = in_dict.copy() except AttributeError: # dict like object with no copy method # must support iteration over keys original = {} for key in in_dict: original[key] = in_dict[key] self._original = original if clear: _clear_dict(in_dict) try: in_dict.update(values) except AttributeError: # dict like object with no update method for key in values: in_dict[key] = values[key] def _unpatch_dict(self): in_dict = self.in_dict original = self._original _clear_dict(in_dict) try: in_dict.update(original) except AttributeError: for key in original: in_dict[key] = original[key] def __exit__(self, *args): """Unpatch the dict.""" self._unpatch_dict() return False start = __enter__ stop = __exit__ def _clear_dict(in_dict): try: in_dict.clear() except AttributeError: keys = list(in_dict) for key in keys: del in_dict[key] def _patch_stopall(): """Stop all active patches.""" for patch in list(_patch._active_patches): patch.stop() patch.object = _patch_object patch.dict = _patch_dict patch.multiple = _patch_multiple patch.stopall = _patch_stopall patch.TEST_PREFIX = 'test' magic_methods = ( "lt le gt ge eq ne " "getitem setitem delitem " "len contains iter " "hash str sizeof " "enter exit " "divmod neg pos abs invert " "complex int float index " "trunc floor ceil " ) numerics = "add sub mul div floordiv mod lshift rshift and xor or pow " inplace = ' '.join('i%s' % n for n in numerics.split()) right = ' '.join('r%s' % n for n in numerics.split()) extra = '' if inPy3k: extra = 'bool next ' else: extra = 'unicode long nonzero oct hex truediv rtruediv ' # not including __prepare__, __instancecheck__, __subclasscheck__ # (as they are metaclass methods) # __del__ is not supported at all as it causes problems if it exists _non_defaults = set('__%s__' % method for method in [ 'cmp', 'getslice', 'setslice', 'coerce', 'subclasses', 'format', 'get', 'set', 'delete', 'reversed', 'missing', 'reduce', 'reduce_ex', 'getinitargs', 'getnewargs', 'getstate', 'setstate', 'getformat', 'setformat', 'repr', 'dir' ]) def _get_method(name, func): "Turns a callable object (like a mock) into a real function" def method(self, *args, **kw): return func(self, *args, **kw) method.__name__ = name return method _magics = set( '__%s__' % method for method in ' '.join([magic_methods, numerics, inplace, right, extra]).split() ) _all_magics = _magics | _non_defaults _unsupported_magics = set([ '__getattr__', '__setattr__', '__init__', '__new__', '__prepare__' '__instancecheck__', '__subclasscheck__', '__del__' ]) _calculate_return_value = { '__hash__': lambda self: object.__hash__(self), '__str__': lambda self: object.__str__(self), '__sizeof__': lambda self: object.__sizeof__(self), '__unicode__': lambda self: unicode(object.__str__(self)), } _return_values = { '__lt__': NotImplemented, '__gt__': NotImplemented, '__le__': NotImplemented, '__ge__': NotImplemented, '__int__': 1, '__contains__': False, '__len__': 0, '__exit__': False, '__complex__': 1j, '__float__': 1.0, '__bool__': True, '__nonzero__': True, '__oct__': '1', '__hex__': '0x1', '__long__': long(1), '__index__': 1, } def _get_eq(self): def __eq__(other): ret_val = self.__eq__._mock_return_value if ret_val is not DEFAULT: return ret_val return self is other return __eq__ def _get_ne(self): def __ne__(other): if self.__ne__._mock_return_value is not DEFAULT: return DEFAULT return self is not other return __ne__ def _get_iter(self): def __iter__(): ret_val = self.__iter__._mock_return_value if ret_val is DEFAULT: return iter([]) # if ret_val was already an iterator, then calling iter on it should # return the iterator unchanged return iter(ret_val) return __iter__ _side_effect_methods = { '__eq__': _get_eq, '__ne__': _get_ne, '__iter__': _get_iter, } def _set_return_value(mock, method, name): fixed = _return_values.get(name, DEFAULT) if fixed is not DEFAULT: method.return_value = fixed return return_calulator = _calculate_return_value.get(name) if return_calulator is not None: try: return_value = return_calulator(mock) except AttributeError: # XXXX why do we return AttributeError here? # set it as a side_effect instead? return_value = AttributeError(name) method.return_value = return_value return side_effector = _side_effect_methods.get(name) if side_effector is not None: method.side_effect = side_effector(mock)
_patch_dict
python
pytorch__pytorch
torch/distributed/_tools/sac_estimator.py
{ "start": 5879, "end": 6594 }
class ____: """ Stores metadata for Greedy-order SAC. Attributes: recomputed_ops (set[int]): Set of operator indices to be recomputed. stored_ops (set[int]): Set of operator indices to be stored. inplace_op_groups (dict[int, set[int]]): Dictionary of inplace operator groups from group-head to operators. random_ops_group (dict[int, set[int]]): Dictionary of random op group head to random ops. msps_meta (list[MSPS]): List of Memory and Runtime Statistics for operators. """ recomputed_ops: set[int] stored_ops: set[int] inplace_op_groups: dict[int, set[int]] random_ops_group: dict[int, set[int]] msps_meta: list[MSPS]
SACGreedyOrderMeta
python
celery__celery
t/integration/tasks.py
{ "start": 12241, "end": 13623 }
class ____(BaseModel): result: int @shared_task(pydantic=True) def add_pydantic(data: AddParameterModel) -> AddResultModel: """Add two numbers, but with parameters and results using Pydantic model serialization.""" value = data.x + data.y return AddResultModel(result=value) @shared_task(pydantic=True) def add_pydantic_string_annotations(data: "AddParameterModel") -> "AddResultModel": """Add two numbers, but with string-annotated Pydantic models (__future__.annotations bug).""" value = data.x + data.y return AddResultModel(result=value) if LEGACY_TASKS_DISABLED: class StampOnReplace(StampingVisitor): stamp = {"StampOnReplace": "This is the replaced task"} def on_signature(self, sig, **headers) -> dict: return self.stamp class StampedTaskOnReplace(Task): """Custom task for stamping on replace""" def on_replace(self, sig): sig.stamp(StampOnReplace()) return super().on_replace(sig) @shared_task(bind=True, base=StampedTaskOnReplace) def replace_with_stamped_task(self: StampedTaskOnReplace, replace_with=None): if replace_with is None: replace_with = replaced_with_me.s() self.replace(signature(replace_with)) @shared_task(soft_time_limit=2, time_limit=1) def soft_time_limit_must_exceed_time_limit(): pass
AddResultModel
python
PyCQA__pylint
tests/functional/a/arguments_differ.py
{ "start": 4795, "end": 4881 }
class ____: def func(self, user_input: Dict[str, int]) -> None: pass
ParentT1
python
matplotlib__matplotlib
lib/mpl_toolkits/axisartist/grid_helper_curvelinear.py
{ "start": 3402, "end": 4975 }
class ____(_FixedAxisArtistHelperBase): """ Helper class for a fixed axis. """ def __init__(self, grid_helper, side, nth_coord_ticks=None): """ nth_coord = along which coordinate value varies. nth_coord = 0 -> x axis, nth_coord = 1 -> y axis """ super().__init__(loc=side) self.grid_helper = grid_helper if nth_coord_ticks is None: nth_coord_ticks = self.nth_coord self.nth_coord_ticks = nth_coord_ticks self.side = side def update_lim(self, axes): self.grid_helper.update_lim(axes) def get_tick_transform(self, axes): return axes.transData def get_tick_iterators(self, axes): """tick_loc, tick_angle, tick_label""" v1, v2 = axes.get_ylim() if self.nth_coord == 0 else axes.get_xlim() if v1 > v2: # Inverted limits. side = {"left": "right", "right": "left", "top": "bottom", "bottom": "top"}[self.side] else: side = self.side angle_tangent = dict(left=90, right=90, bottom=0, top=0)[side] def iter_major(): for nth_coord, show_labels in [ (self.nth_coord_ticks, True), (1 - self.nth_coord_ticks, False)]: gi = self.grid_helper._grid_info[["lon", "lat"][nth_coord]] for tick in gi["ticks"][side]: yield (*tick["loc"], angle_tangent, (tick["label"] if show_labels else "")) return iter_major(), iter([])
FixedAxisArtistHelper
python
mlflow__mlflow
mlflow/genai/agent_server/server.py
{ "start": 2253, "end": 9605 }
class ____: """FastAPI-based server for hosting agents. Args: agent_type: An optional parameter to specify the type of agent to serve. If provided, input/output validation and streaming tracing aggregation will be done automatically. Currently only "ResponsesAgent" is supported. If ``None``, no input/output validation and streaming tracing aggregation will be done. Default to ``None``. See https://mlflow.org/docs/latest/genai/serving/agent-server for more information. """ def __init__(self, agent_type: AgentType | None = None): self.agent_type = agent_type if agent_type == "ResponsesAgent": self.validator = ResponsesAgentValidator() else: self.validator = BaseAgentValidator() self.app = FastAPI(title="Agent Server") self._setup_routes() def _setup_routes(self) -> None: @self.app.post("/invocations") async def invocations_endpoint(request: Request): # Capture headers such as x-forwarded-access-token # https://docs.databricks.com/aws/en/dev-tools/databricks-apps/auth?language=Streamlit#retrieve-user-authorization-credentials set_request_headers(dict(request.headers)) try: data = await request.json() except Exception as e: raise HTTPException(status_code=400, detail=f"Invalid JSON in request body: {e!s}") logger.debug( "Request received", extra={ "agent_type": self.agent_type, "request_size": len(json.dumps(data)), "stream_requested": data.get(STREAM_KEY, False), }, ) is_streaming = data.pop(STREAM_KEY, False) try: request_data = self.validator.validate_and_convert_request(data) except ValueError as e: raise HTTPException( status_code=400, detail=f"Invalid parameters for {self.agent_type}: {e}", ) if is_streaming: return await self._handle_stream_request(request_data) else: return await self._handle_invoke_request(request_data) @self.app.get("/health") async def health_check() -> dict[str, str]: return {"status": "healthy"} async def _handle_invoke_request(self, request: dict[str, Any]) -> dict[str, Any]: if _invoke_function is None: raise HTTPException(status_code=500, detail="No invoke function registered") func = _invoke_function func_name = func.__name__ try: with mlflow.start_span(name=f"{func_name}") as span: span.set_inputs(request) if inspect.iscoroutinefunction(func): result = await func(request) else: result = func(request) result = self.validator.validate_and_convert_result(result) if self.agent_type == "ResponsesAgent": span.set_attribute(SpanAttributeKey.MESSAGE_FORMAT, "openai") span.set_outputs(result) logger.debug( "Response sent", extra={ "endpoint": "invoke", "response_size": len(json.dumps(result)), "function_name": func_name, }, ) return result except Exception as e: logger.debug( "Error response sent", extra={ "endpoint": "invoke", "error": str(e), "function_name": func_name, }, ) raise HTTPException(status_code=500, detail=str(e)) async def _generate( self, func: Callable[..., Any], request: dict[str, Any], ) -> AsyncGenerator[str, None]: func_name = func.__name__ all_chunks: list[dict[str, Any]] = [] try: with mlflow.start_span(name=f"{func_name}") as span: span.set_inputs(request) if inspect.iscoroutinefunction(func) or inspect.isasyncgenfunction(func): async for chunk in func(request): chunk = self.validator.validate_and_convert_result(chunk, stream=True) all_chunks.append(chunk) yield f"data: {json.dumps(chunk)}\n\n" else: for chunk in func(request): chunk = self.validator.validate_and_convert_result(chunk, stream=True) all_chunks.append(chunk) yield f"data: {json.dumps(chunk)}\n\n" if self.agent_type == "ResponsesAgent": span.set_attribute(SpanAttributeKey.MESSAGE_FORMAT, "openai") span.set_outputs(ResponsesAgent.responses_agent_output_reducer(all_chunks)) else: span.set_outputs(all_chunks) yield "data: [DONE]\n\n" logger.debug( "Streaming response completed", extra={ "endpoint": "stream", "total_chunks": len(all_chunks), "function_name": func_name, }, ) except Exception as e: logger.debug( "Streaming response error", extra={ "endpoint": "stream", "error": str(e), "function_name": func_name, "chunks_sent": len(all_chunks), }, ) yield f"data: {json.dumps({'error': str(e)})}\n\n" yield "data: [DONE]\n\n" async def _handle_stream_request(self, request: dict[str, Any]) -> StreamingResponse: if _stream_function is None: raise HTTPException(status_code=500, detail="No stream function registered") return StreamingResponse( self._generate(_stream_function, request), media_type="text/event-stream" ) @staticmethod def _parse_server_args(): """Parse command line arguments for the agent server""" parser = argparse.ArgumentParser(description="Start the agent server") parser.add_argument( "--port", type=int, default=8000, help="Port to run the server on (default: 8000)" ) parser.add_argument( "--workers", type=int, default=1, help="Number of workers to run the server on (default: 1)", ) parser.add_argument( "--reload", action="store_true", help="Reload the server on code changes (default: False)", ) return parser.parse_args() def run( self, app_import_string: str, host: str = "0.0.0.0", ) -> None: """Run the agent server with command line argument parsing.""" args = self._parse_server_args() uvicorn.run( app_import_string, host=host, port=args.port, workers=args.workers, reload=args.reload )
AgentServer
python
wandb__wandb
wandb/old/settings.py
{ "start": 240, "end": 6620 }
class ____: """Global W&B settings stored under $WANDB_CONFIG_DIR/settings.""" DEFAULT_SECTION = "default" _UNSET = object() def __init__( self, load_settings: bool = True, root_dir: Optional[str] = None ) -> None: self._global_settings = Settings._settings() self._local_settings = Settings._settings() self.root_dir = root_dir if load_settings: global_path = Settings._global_path() if global_path is not None: self._global_settings.read([global_path]) # Only attempt to read if there is a directory existing if os.path.isdir(core.wandb_dir(self.root_dir)): self._local_settings.read([Settings._local_path(self.root_dir)]) def get(self, section: str, key: str, fallback: Any = _UNSET) -> Any: # Try the local settings first. If we can't find the key, then try the global settings. # If a fallback is provided, return it if we can't find the key in either the local or global # settings. try: return self._local_settings.get(section, key) except configparser.NoOptionError: try: return self._global_settings.get(section, key) except configparser.NoOptionError: if fallback is not Settings._UNSET: return fallback else: raise def _persist_settings(self, settings, settings_path) -> None: # write a temp file and then move it to the settings path target_dir = os.path.dirname(settings_path) with tempfile.NamedTemporaryFile( mode="w", suffix=".tmp", delete=False, dir=target_dir ) as fp: settings.write(fp) temp_path = fp.name try: os.replace(temp_path, settings_path) except AttributeError: os.rename(temp_path, settings_path) def set(self, section, key, value, globally=False, persist=False) -> None: """Persist settings to disk if persist = True""" def write_setting(settings, settings_path, persist): if not settings.has_section(section): Settings._safe_add_section(settings, Settings.DEFAULT_SECTION) settings.set(section, key, str(value)) if persist: self._persist_settings(settings, settings_path) if globally: global_path = Settings._global_path() if global_path is not None: write_setting(self._global_settings, global_path, persist) else: write_setting( self._local_settings, Settings._local_path(self.root_dir), persist ) def clear(self, section, key, globally=False, persist=False) -> None: def clear_setting(settings, settings_path, persist): settings.remove_option(section, key) if persist: self._persist_settings(settings, settings_path) if globally: global_path = Settings._global_path() if global_path is not None: clear_setting(self._global_settings, global_path, persist) else: clear_setting( self._local_settings, Settings._local_path(self.root_dir), persist ) def items(self, section: str = DEFAULT_SECTION): result = {"section": section} try: if section in self._global_settings.sections(): for option in self._global_settings.options(section): result[option] = self._global_settings.get(section, option) if section in self._local_settings.sections(): for option in self._local_settings.options(section): result[option] = self._local_settings.get(section, option) except configparser.InterpolationSyntaxError: core.termwarn("Unable to parse settings file") return result @staticmethod def _safe_add_section(settings, section): if not settings.has_section(section): settings.add_section(section) @staticmethod def _settings(default_settings={}): settings = configparser.ConfigParser() Settings._safe_add_section(settings, Settings.DEFAULT_SECTION) for key, value in default_settings.items(): settings.set(Settings.DEFAULT_SECTION, key, str(value)) return settings @staticmethod def _global_path() -> Optional[str]: def try_create_dir(path) -> bool: try: os.makedirs(path, exist_ok=True) if os.access(path, os.W_OK): return True except OSError: pass return False def get_username() -> str: try: return getpass.getuser() except (ImportError, KeyError): return generate_id() try: home_config_dir = os.path.join(os.path.expanduser("~"), ".config", "wandb") if os.getenv(env.CONFIG_DIR): try_create_dir(os.getenv(env.CONFIG_DIR)) return os.path.join(os.getenv(env.CONFIG_DIR), "settings") if not try_create_dir(home_config_dir): wandb.termwarn( f"Failed to create global config settings in: {home_config_dir}." " Settings will not be persisted.", repeat=False, ) temp_config_dir = os.path.join( tempfile.gettempdir(), ".config", "wandb" ) if not try_create_dir(temp_config_dir): username = get_username() config_dir = os.path.join( tempfile.gettempdir(), username, ".config", "wandb" ) try_create_dir(config_dir) else: config_dir = temp_config_dir else: config_dir = home_config_dir return os.path.join(config_dir, "settings") except Exception: return None @staticmethod def _local_path(root_dir=None): filesystem.mkdir_exists_ok(core.wandb_dir(root_dir)) return os.path.join(core.wandb_dir(root_dir), "settings")
Settings
python
matplotlib__matplotlib
lib/matplotlib/backends/backend_pdf.py
{ "start": 16181, "end": 21733 }
class ____: """ PDF stream object. This has no pdfRepr method. Instead, call begin(), then output the contents of the stream by calling write(), and finally call end(). """ __slots__ = ('id', 'len', 'pdfFile', 'file', 'compressobj', 'extra', 'pos') def __init__(self, id, len, file, extra=None, png=None): """ Parameters ---------- id : int Object id of the stream. len : Reference or None An unused Reference object for the length of the stream; None means to use a memory buffer so the length can be inlined. file : PdfFile The underlying object to write the stream to. extra : dict from Name to anything, or None Extra key-value pairs to include in the stream header. png : dict or None If the data is already png encoded, the decode parameters. """ self.id = id # object id self.len = len # id of length object self.pdfFile = file self.file = file.fh # file to which the stream is written self.compressobj = None # compression object if extra is None: self.extra = dict() else: self.extra = extra.copy() if png is not None: self.extra.update({'Filter': Name('FlateDecode'), 'DecodeParms': png}) self.pdfFile.recordXref(self.id) if mpl.rcParams['pdf.compression'] and not png: self.compressobj = zlib.compressobj( mpl.rcParams['pdf.compression']) if self.len is None: self.file = BytesIO() else: self._writeHeader() self.pos = self.file.tell() def _writeHeader(self): write = self.file.write write(b"%d 0 obj\n" % self.id) dict = self.extra dict['Length'] = self.len if mpl.rcParams['pdf.compression']: dict['Filter'] = Name('FlateDecode') write(pdfRepr(dict)) write(b"\nstream\n") def end(self): """Finalize stream.""" self._flush() if self.len is None: contents = self.file.getvalue() self.len = len(contents) self.file = self.pdfFile.fh self._writeHeader() self.file.write(contents) self.file.write(b"\nendstream\nendobj\n") else: length = self.file.tell() - self.pos self.file.write(b"\nendstream\nendobj\n") self.pdfFile.writeObject(self.len, length) def write(self, data): """Write some data on the stream.""" if self.compressobj is None: self.file.write(data) else: compressed = self.compressobj.compress(data) self.file.write(compressed) def _flush(self): """Flush the compression object.""" if self.compressobj is not None: compressed = self.compressobj.flush() self.file.write(compressed) self.compressobj = None def _get_pdf_charprocs(font_path, glyph_ids): font = get_font(font_path, hinting_factor=1) conv = 1000 / font.units_per_EM # Conversion to PS units (1/1000's). procs = {} for glyph_id in glyph_ids: g = font.load_glyph(glyph_id, LoadFlags.NO_SCALE) # NOTE: We should be using round(), but instead use # "(x+.5).astype(int)" to keep backcompat with the old ttconv code # (this is different for negative x's). d1 = (np.array([g.horiAdvance, 0, *g.bbox]) * conv + .5).astype(int) v, c = font.get_path() v = (v * 64).astype(int) # Back to TrueType's internal units (1/64's). # Backcompat with old ttconv code: control points between two quads are # omitted if they are exactly at the midpoint between the control of # the quad before and the quad after, but ttconv used to interpolate # *after* conversion to PS units, causing floating point errors. Here # we reproduce ttconv's logic, detecting these "implicit" points and # re-interpolating them. Note that occasionally (e.g. with DejaVu Sans # glyph "0") a point detected as "implicit" is actually explicit, and # will thus be shifted by 1. quads, = np.nonzero(c == 3) quads_on = quads[1::2] quads_mid_on = np.array( sorted({*quads_on} & {*(quads - 1)} & {*(quads + 1)}), int) implicit = quads_mid_on[ (v[quads_mid_on] # As above, use astype(int), not // division == ((v[quads_mid_on - 1] + v[quads_mid_on + 1]) / 2).astype(int)) .all(axis=1)] if (font.postscript_name, glyph_id) in [ ("DejaVuSerif-Italic", 77), # j ("DejaVuSerif-Italic", 135), # \AA ]: v[:, 0] -= 1 # Hard-coded backcompat (FreeType shifts glyph by 1). v = (v * conv + .5).astype(int) # As above re: truncation vs rounding. v[implicit] = (( # Fix implicit points; again, truncate. (v[implicit - 1] + v[implicit + 1]) / 2).astype(int)) procs[font.get_glyph_name(glyph_id)] = ( " ".join(map(str, d1)).encode("ascii") + b" d1\n" + _path.convert_to_string( Path(v, c), None, None, False, None, -1, # no code for quad Beziers triggers auto-conversion to cubics. [b"m", b"l", b"", b"c", b"h"], True) + b"f") return procs
Stream
python
matplotlib__matplotlib
lib/matplotlib/testing/jpl_units/EpochConverter.py
{ "start": 169, "end": 2944 }
class ____(units.ConversionInterface): """ Provides Matplotlib conversion functionality for Monte Epoch and Duration classes. """ jdRef = 1721425.5 @staticmethod def axisinfo(unit, axis): # docstring inherited majloc = date_ticker.AutoDateLocator() majfmt = date_ticker.AutoDateFormatter(majloc) return units.AxisInfo(majloc=majloc, majfmt=majfmt, label=unit) @staticmethod def float2epoch(value, unit): """ Convert a Matplotlib floating-point date into an Epoch of the specified units. = INPUT VARIABLES - value The Matplotlib floating-point date. - unit The unit system to use for the Epoch. = RETURN VALUE - Returns the value converted to an Epoch in the specified time system. """ # Delay-load due to circular dependencies. import matplotlib.testing.jpl_units as U secPastRef = value * 86400.0 * U.UnitDbl(1.0, 'sec') return U.Epoch(unit, secPastRef, EpochConverter.jdRef) @staticmethod def epoch2float(value, unit): """ Convert an Epoch value to a float suitable for plotting as a python datetime object. = INPUT VARIABLES - value An Epoch or list of Epochs that need to be converted. - unit The units to use for an axis with Epoch data. = RETURN VALUE - Returns the value parameter converted to floats. """ return value.julianDate(unit) - EpochConverter.jdRef @staticmethod def duration2float(value): """ Convert a Duration value to a float suitable for plotting as a python datetime object. = INPUT VARIABLES - value A Duration or list of Durations that need to be converted. = RETURN VALUE - Returns the value parameter converted to floats. """ return value.seconds() / 86400.0 @staticmethod def convert(value, unit, axis): # docstring inherited # Delay-load due to circular dependencies. import matplotlib.testing.jpl_units as U if not cbook.is_scalar_or_string(value): return [EpochConverter.convert(x, unit, axis) for x in value] if unit is None: unit = EpochConverter.default_units(value, axis) if isinstance(value, U.Duration): return EpochConverter.duration2float(value) else: return EpochConverter.epoch2float(value, unit) @staticmethod def default_units(value, axis): # docstring inherited if cbook.is_scalar_or_string(value): return value.frame() else: return EpochConverter.default_units(value[0], axis)
EpochConverter
python
python-pillow__Pillow
Tests/helper.py
{ "start": 10124, "end": 10406 }
class ____: def __init__(self, func: Callable[[Any], Any]) -> None: self.func = func def __get__(self, instance: Any, cls: type[Any] | None = None) -> Any: result = instance.__dict__[self.func.__name__] = self.func(instance) return result
CachedProperty
python
networkx__networkx
networkx/algorithms/tests/test_cycles.py
{ "start": 33821, "end": 34851 }
class ____: @pytest.mark.parametrize( ("G", "expected"), ( (nx.chvatal_graph(), 4), (nx.tutte_graph(), 4), (nx.petersen_graph(), 5), (nx.heawood_graph(), 6), (nx.pappus_graph(), 6), (nx.random_labeled_tree(10, seed=42), inf), (nx.empty_graph(10), inf), (nx.Graph(chain(cycle_edges(range(5)), cycle_edges(range(6, 10)))), 4), ( nx.Graph( [ (0, 6), (0, 8), (0, 9), (1, 8), (2, 8), (2, 9), (4, 9), (5, 9), (6, 8), (6, 9), (7, 8), ] ), 3, ), ), ) def test_girth(self, G, expected): assert nx.girth(G) == expected
TestGirth
python
getsentry__sentry
src/sentry/auth/providers/saml2/auth0/apps.py
{ "start": 36, "end": 267 }
class ____(AppConfig): name = "sentry.auth.providers.saml2.auth0" def ready(self) -> None: from sentry.auth import register from .provider import Auth0SAML2Provider register(Auth0SAML2Provider)
Config
python
kamyu104__LeetCode-Solutions
Python/html-entity-parser.py
{ "start": 3255, "end": 4063 }
class ____(object): def entityParser(self, text): """ :type text: str :rtype: str """ patterns = ["&quot;", "&apos;", "&amp;", "&gt;", "&lt;", "&frasl;"] chars = ["\"", "'", "&", ">", "<", "/"] result = [] i, j = 0, 0 while i != len(text): if text[i] != '&': result.append(text[i]) i += 1 else: for j, pattern in enumerate(patterns): if pattern == text[i:i+len(pattern)]: result.append(chars[j]) i += len(pattern) break else: result.append(text[i]) i += 1 return "".join(result)
Solution2
python
django-import-export__django-import-export
tests/core/tests/test_resources/test_modelresource/test_m2m.py
{ "start": 196, "end": 5052 }
class ____(TestCase): def setUp(self): self.resource = BookResource() self.book = Book.objects.create(name="Some book") self.dataset = tablib.Dataset(headers=["id", "name", "author_email", "price"]) row = [self.book.pk, "Some book", "test@example.com", "10.25"] self.dataset.append(row) def test_foreign_keys_export(self): author1 = Author.objects.create(name="Foo") self.book.author = author1 self.book.save() dataset = self.resource.export(Book.objects.all()) self.assertEqual(dataset.dict[0]["author"], author1.pk) def test_foreign_keys_import(self): author2 = Author.objects.create(name="Bar") headers = ["id", "name", "author"] row = [None, "FooBook", author2.pk] dataset = tablib.Dataset(row, headers=headers) self.resource.import_data(dataset, raise_errors=True) book = Book.objects.get(name="FooBook") self.assertEqual(book.author, author2) def test_m2m_export(self): cat1 = Category.objects.create(name="Cat 1") cat2 = Category.objects.create(name="Cat 2") self.book.categories.add(cat1) self.book.categories.add(cat2) dataset = self.resource.export(Book.objects.all()) self.assertEqual(dataset.dict[0]["categories"], "%d,%d" % (cat1.pk, cat2.pk)) def test_m2m_import(self): cat1 = Category.objects.create(name="Cat 1") headers = ["id", "name", "categories"] row = [None, "FooBook", str(cat1.pk)] dataset = tablib.Dataset(row, headers=headers) self.resource.import_data(dataset, raise_errors=True) book = Book.objects.get(name="FooBook") self.assertIn(cat1, book.categories.all()) def test_m2m_import_clear(self): cat1 = Category.objects.create(name="Cat 1") self.book.categories.add(cat1) self.assertEqual(1, self.book.categories.count()) headers = ["id", "name", "categories"] row = [self.book.pk, "FooBook", ""] dataset = tablib.Dataset(row, headers=headers) self.resource.import_data(dataset, raise_errors=True) book = Book.objects.get(name="FooBook") self.assertEqual(0, book.categories.count()) def test_m2m_options_import(self): cat1 = Category.objects.create(name="Cat 1") cat2 = Category.objects.create(name="Cat 2") headers = ["id", "name", "categories"] row = [None, "FooBook", "Cat 1|Cat 2"] dataset = tablib.Dataset(row, headers=headers) class BookM2MResource(resources.ModelResource): categories = fields.Field( attribute="categories", widget=widgets.ManyToManyWidget(Category, field="name", separator="|"), ) class Meta: model = Book resource = BookM2MResource() resource.import_data(dataset, raise_errors=True) book = Book.objects.get(name="FooBook") self.assertIn(cat1, book.categories.all()) self.assertIn(cat2, book.categories.all()) def test_m2m_add(self): cat1 = Category.objects.create(name="Cat 1") cat2 = Category.objects.create(name="Cat 2") cat3 = Category.objects.create(name="Cat 3") cat4 = Category.objects.create(name="Cat 4") headers = ["id", "name", "categories"] row = [None, "FooBook", "Cat 1|Cat 2"] dataset = tablib.Dataset(row, headers=headers) class BookM2MResource(resources.ModelResource): categories = fields.Field( attribute="categories", m2m_add=True, widget=widgets.ManyToManyWidget(Category, field="name", separator="|"), ) class Meta: model = Book resource = BookM2MResource() resource.import_data(dataset, raise_errors=True) book = Book.objects.get(name="FooBook") self.assertIn(cat1, book.categories.all()) self.assertIn(cat2, book.categories.all()) self.assertNotIn(cat3, book.categories.all()) self.assertNotIn(cat4, book.categories.all()) row1 = [ book.id, "FooBook", "Cat 1|Cat 2", ] # This should have no effect, since Cat 1 and Cat 2 already exist row2 = [book.id, "FooBook", "Cat 3|Cat 4"] dataset = tablib.Dataset(row1, row2, headers=headers) resource.import_data(dataset, raise_errors=True) book2 = Book.objects.get(name="FooBook") self.assertEqual(book.id, book2.id) self.assertEqual(book.categories.count(), 4) self.assertIn(cat1, book2.categories.all()) self.assertIn(cat2, book2.categories.all()) self.assertIn(cat3, book2.categories.all()) self.assertIn(cat4, book2.categories.all())
ForeignKeyM2MTest
python
html5lib__html5lib-python
html5lib/tests/test_serializer.py
{ "start": 580, "end": 8357 }
class ____(TreeWalker): def __iter__(self): for token in self.tree: type = token[0] if type == "StartTag": if len(token) == 4: namespace, name, attrib = token[1:4] else: namespace = default_namespace name, attrib = token[1:3] yield self.startTag(namespace, name, self._convertAttrib(attrib)) elif type == "EndTag": if len(token) == 3: namespace, name = token[1:3] else: namespace = default_namespace name = token[1] yield self.endTag(namespace, name) elif type == "EmptyTag": if len(token) == 4: namespace, name, attrib = token[1:] else: namespace = default_namespace name, attrib = token[1:] for token in self.emptyTag(namespace, name, self._convertAttrib(attrib)): yield token elif type == "Comment": yield self.comment(token[1]) elif type in ("Characters", "SpaceCharacters"): for token in self.text(token[1]): yield token elif type == "Doctype": if len(token) == 4: yield self.doctype(token[1], token[2], token[3]) elif len(token) == 3: yield self.doctype(token[1], token[2]) else: yield self.doctype(token[1]) else: raise ValueError("Unknown token type: " + type) def _convertAttrib(self, attribs): """html5lib tree-walkers use a dict of (namespace, name): value for attributes, but JSON cannot represent this. Convert from the format in the serializer tests (a list of dicts with "namespace", "name", and "value" as keys) to html5lib's tree-walker format.""" attrs = {} for attrib in attribs: name = (attrib["namespace"], attrib["name"]) assert name not in attrs attrs[name] = attrib["value"] return attrs def serialize_html(input, options): options = {str(k): v for k, v in options.items()} encoding = options.get("encoding", None) if "encoding" in options: del options["encoding"] stream = Lint(JsonWalker(input), False) serializer = HTMLSerializer(alphabetical_attributes=True, **options) return serializer.render(stream, encoding) def throwsWithLatin1(input): with pytest.raises(UnicodeEncodeError): serialize_html(input, {"encoding": "iso-8859-1"}) def testDoctypeName(): throwsWithLatin1([["Doctype", "\u0101"]]) def testDoctypePublicId(): throwsWithLatin1([["Doctype", "potato", "\u0101"]]) def testDoctypeSystemId(): throwsWithLatin1([["Doctype", "potato", "potato", "\u0101"]]) def testCdataCharacters(): test_serializer([["StartTag", "http://www.w3.org/1999/xhtml", "style", {}], ["Characters", "\u0101"]], ["<style>&amacr;"], {"encoding": "iso-8859-1"}) def testCharacters(): test_serializer([["Characters", "\u0101"]], ["&amacr;"], {"encoding": "iso-8859-1"}) def testStartTagName(): throwsWithLatin1([["StartTag", "http://www.w3.org/1999/xhtml", "\u0101", []]]) def testAttributeName(): throwsWithLatin1([["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": None, "name": "\u0101", "value": "potato"}]]]) def testAttributeValue(): test_serializer([["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": None, "name": "potato", "value": "\u0101"}]]], ["<span potato=&amacr;>"], {"encoding": "iso-8859-1"}) def testEndTagName(): throwsWithLatin1([["EndTag", "http://www.w3.org/1999/xhtml", "\u0101"]]) def testComment(): throwsWithLatin1([["Comment", "\u0101"]]) def testThrowsUnknownOption(): with pytest.raises(TypeError): HTMLSerializer(foobar=None) @pytest.mark.parametrize("c", list("\t\n\u000C\x20\r\"'=<>`")) def testSpecQuoteAttribute(c): input_ = [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": None, "name": "foo", "value": c}]]] if c == '"': output_ = ["<span foo='%s'>" % c] else: output_ = ['<span foo="%s">' % c] options_ = {"quote_attr_values": "spec"} test_serializer(input_, output_, options_) @pytest.mark.parametrize("c", list("\t\n\u000C\x20\r\"'=<>`" "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n" "\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15" "\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" "\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000" "\u2001\u2002\u2003\u2004\u2005\u2006\u2007" "\u2008\u2009\u200a\u2028\u2029\u202f\u205f" "\u3000")) def testLegacyQuoteAttribute(c): input_ = [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": None, "name": "foo", "value": c}]]] if c == '"': output_ = ["<span foo='%s'>" % c] else: output_ = ['<span foo="%s">' % c] options_ = {"quote_attr_values": "legacy"} test_serializer(input_, output_, options_) @pytest.fixture def lxml_parser(): return etree.XMLParser(resolve_entities=False) @pytest.mark.skipif("lxml" not in optionals_loaded, reason="lxml not importable") def testEntityReplacement(lxml_parser): doc = '<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&beta;</html>' tree = etree.fromstring(doc, parser=lxml_parser).getroottree() result = serialize(tree, tree="lxml", omit_optional_tags=False) assert result == '<!DOCTYPE html SYSTEM "about:legacy-compat"><html>\u03B2</html>' @pytest.mark.skipif("lxml" not in optionals_loaded, reason="lxml not importable") def testEntityXML(lxml_parser): doc = '<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&gt;</html>' tree = etree.fromstring(doc, parser=lxml_parser).getroottree() result = serialize(tree, tree="lxml", omit_optional_tags=False) assert result == '<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&gt;</html>' @pytest.mark.skipif("lxml" not in optionals_loaded, reason="lxml not importable") def testEntityNoResolve(lxml_parser): doc = '<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&beta;</html>' tree = etree.fromstring(doc, parser=lxml_parser).getroottree() result = serialize(tree, tree="lxml", omit_optional_tags=False, resolve_entities=False) assert result == '<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&beta;</html>' def param_serializer(): for filename in get_data_files('serializer-testdata', '*.test', os.path.dirname(__file__)): with open(filename) as fp: tests = json.load(fp) for test in tests['tests']: yield test["input"], test["expected"], test.get("options", {}) @pytest.mark.parametrize("input, expected, options", param_serializer()) def test_serializer(input, expected, options): encoding = options.get("encoding", None) if encoding: expected = list(map(lambda x: x.encode(encoding), expected)) result = serialize_html(input, options) if len(expected) == 1: assert expected[0] == result, "Expected:\n%s\nActual:\n%s\nOptions:\n%s" % (expected[0], result, str(options)) elif result not in expected: assert False, "Expected: %s, Received: %s" % (expected, result)
JsonWalker
python
pytorch__pytorch
torch/_dynamo/output_graph.py
{ "start": 7342, "end": 7954 }
class ____: """Stores why a given output graph was compiled; i.e. what caused the graph break.""" reason: str user_stack: list[traceback.FrameSummary] # Indicates if this was a graph break reason due to graph break. graph_break: bool = True def __post_init__(self) -> None: if self.graph_break: graph_break_reasons.append(self) def _get_gen_rand_values_fn(random_calls: Any) -> Callable[[], list[Any]]: def _gen_rand_values() -> list[Any]: return [fn(*args, **kwargs) for fn, args, kwargs in random_calls] return _gen_rand_values
GraphCompileReason
python
scikit-learn__scikit-learn
sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py
{ "start": 16559, "end": 23217 }
class ____(BaseDistancesReductionDispatcher): """Compute the argkmin of row vectors of X on the ones of Y with labels. For each row vector of X, computes the indices of k first the rows vectors of Y with the smallest distances. Computes weighted mode of labels. ArgKminClassMode is typically used to perform bruteforce k-nearest neighbors queries when the weighted mode of the labels for the k-nearest neighbors are required, such as in `predict` methods. This class is not meant to be instantiated, one should only use its :meth:`compute` classmethod which handles allocation and deallocation consistently. """ @classmethod def valid_metrics(cls) -> List[str]: excluded = { # Euclidean is technically usable for ArgKminClassMode # but its current implementation would not be competitive. # TODO: implement Euclidean specialization using GEMM. "euclidean", "sqeuclidean", } return list(set(BaseDistancesReductionDispatcher.valid_metrics()) - excluded) @classmethod def compute( cls, X, Y, k, weights, Y_labels, unique_Y_labels, metric="euclidean", chunk_size=None, metric_kwargs=None, strategy=None, ): """Compute the argkmin reduction. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) The input array to be labelled. Y : ndarray of shape (n_samples_Y, n_features) The input array whose class membership are provided through the `Y_labels` parameter. k : int The number of nearest neighbors to consider. weights : ndarray The weights applied over the `Y_labels` of `Y` when computing the weighted mode of the labels. Y_labels : ndarray An array containing the index of the class membership of the associated samples in `Y`. This is used in labeling `X`. unique_Y_labels : ndarray An array containing all unique indices contained in the corresponding `Y_labels` array. metric : str, default='euclidean' The distance metric to use. For a list of available metrics, see the documentation of :class:`~sklearn.metrics.DistanceMetric`. Currently does not support `'precomputed'`. chunk_size : int, default=None, The number of vectors per chunk. If None (default) looks-up in scikit-learn configuration for `pairwise_dist_chunk_size`, and use 256 if it is not set. metric_kwargs : dict, default=None Keyword arguments to pass to specified metric function. strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None The chunking strategy defining which dataset parallelization are made on. For both strategies the computations happens with two nested loops, respectively on chunks of X and chunks of Y. Strategies differs on which loop (outer or inner) is made to run in parallel with the Cython `prange` construct: - 'parallel_on_X' dispatches chunks of X uniformly on threads. Each thread then iterates on all the chunks of Y. This strategy is embarrassingly parallel and comes with no datastructures synchronisation. - 'parallel_on_Y' dispatches chunks of Y uniformly on threads. Each thread processes all the chunks of X in turn. This strategy is a sequence of embarrassingly parallel subtasks (the inner loop on Y chunks) with intermediate datastructures synchronisation at each iteration of the sequential outer loop on X chunks. - 'auto' relies on a simple heuristic to choose between 'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough, 'parallel_on_X' is usually the most efficient strategy. When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y' brings more opportunity for parallelism and is therefore more efficient despite the synchronization step at each iteration of the outer loop on chunks of `X`. - None (default) looks-up in scikit-learn configuration for `pairwise_dist_parallel_strategy`, and use 'auto' if it is not set. Returns ------- probabilities : ndarray of shape (n_samples_X, n_classes) An array containing the class probabilities for each sample. Notes ----- This classmethod is responsible for introspecting the arguments values to dispatch to the most appropriate implementation of :class:`PairwiseDistancesArgKmin`. This allows decoupling the API entirely from the implementation details whilst maintaining RAII: all temporarily allocated datastructures necessary for the concrete implementation are therefore freed when this classmethod returns. """ if weights not in {"uniform", "distance"}: raise ValueError( "Only the 'uniform' or 'distance' weights options are supported" f" at this time. Got: {weights=}." ) if X.dtype == Y.dtype == np.float64: return ArgKminClassMode64.compute( X=X, Y=Y, k=k, weights=weights, Y_labels=np.array(Y_labels, dtype=np.intp), unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp), metric=metric, chunk_size=chunk_size, metric_kwargs=metric_kwargs, strategy=strategy, ) if X.dtype == Y.dtype == np.float32: return ArgKminClassMode32.compute( X=X, Y=Y, k=k, weights=weights, Y_labels=np.array(Y_labels, dtype=np.intp), unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp), metric=metric, chunk_size=chunk_size, metric_kwargs=metric_kwargs, strategy=strategy, ) raise ValueError( "Only float64 or float32 datasets pairs are supported at this time, " f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}." )
ArgKminClassMode
python
kubernetes-client__python
kubernetes/client/models/discovery_v1_endpoint_port.py
{ "start": 383, "end": 8898 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'app_protocol': 'str', 'name': 'str', 'port': 'int', 'protocol': 'str' } attribute_map = { 'app_protocol': 'appProtocol', 'name': 'name', 'port': 'port', 'protocol': 'protocol' } def __init__(self, app_protocol=None, name=None, port=None, protocol=None, local_vars_configuration=None): # noqa: E501 """DiscoveryV1EndpointPort - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._app_protocol = None self._name = None self._port = None self._protocol = None self.discriminator = None if app_protocol is not None: self.app_protocol = app_protocol if name is not None: self.name = name if port is not None: self.port = port if protocol is not None: self.protocol = protocol @property def app_protocol(self): """Gets the app_protocol of this DiscoveryV1EndpointPort. # noqa: E501 The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either: * Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 * Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol. # noqa: E501 :return: The app_protocol of this DiscoveryV1EndpointPort. # noqa: E501 :rtype: str """ return self._app_protocol @app_protocol.setter def app_protocol(self, app_protocol): """Sets the app_protocol of this DiscoveryV1EndpointPort. The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either: * Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 * Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol. # noqa: E501 :param app_protocol: The app_protocol of this DiscoveryV1EndpointPort. # noqa: E501 :type: str """ self._app_protocol = app_protocol @property def name(self): """Gets the name of this DiscoveryV1EndpointPort. # noqa: E501 name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string. # noqa: E501 :return: The name of this DiscoveryV1EndpointPort. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this DiscoveryV1EndpointPort. name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string. # noqa: E501 :param name: The name of this DiscoveryV1EndpointPort. # noqa: E501 :type: str """ self._name = name @property def port(self): """Gets the port of this DiscoveryV1EndpointPort. # noqa: E501 port represents the port number of the endpoint. If the EndpointSlice is derived from a Kubernetes service, this must be set to the service's target port. EndpointSlices used for other purposes may have a nil port. # noqa: E501 :return: The port of this DiscoveryV1EndpointPort. # noqa: E501 :rtype: int """ return self._port @port.setter def port(self, port): """Sets the port of this DiscoveryV1EndpointPort. port represents the port number of the endpoint. If the EndpointSlice is derived from a Kubernetes service, this must be set to the service's target port. EndpointSlices used for other purposes may have a nil port. # noqa: E501 :param port: The port of this DiscoveryV1EndpointPort. # noqa: E501 :type: int """ self._port = port @property def protocol(self): """Gets the protocol of this DiscoveryV1EndpointPort. # noqa: E501 protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP. # noqa: E501 :return: The protocol of this DiscoveryV1EndpointPort. # noqa: E501 :rtype: str """ return self._protocol @protocol.setter def protocol(self, protocol): """Sets the protocol of this DiscoveryV1EndpointPort. protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP. # noqa: E501 :param protocol: The protocol of this DiscoveryV1EndpointPort. # noqa: E501 :type: str """ self._protocol = protocol def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DiscoveryV1EndpointPort): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, DiscoveryV1EndpointPort): return True return self.to_dict() != other.to_dict()
DiscoveryV1EndpointPort
python
python__mypy
mypy/build.py
{ "start": 12452, "end": 19280 }
class ____(TypedDict): path: str mtime: int # Priorities used for imports. (Here, top-level includes inside a class.) # These are used to determine a more predictable order in which the # nodes in an import cycle are processed. PRI_HIGH: Final = 5 # top-level "from X import blah" PRI_MED: Final = 10 # top-level "import X" PRI_LOW: Final = 20 # either form inside a function PRI_MYPY: Final = 25 # inside "if MYPY" or "if TYPE_CHECKING" PRI_INDIRECT: Final = 30 # an indirect dependency PRI_ALL: Final = 99 # include all priorities def import_priority(imp: ImportBase, toplevel_priority: int) -> int: """Compute import priority from an import node.""" if not imp.is_top_level: # Inside a function return PRI_LOW if imp.is_mypy_only: # Inside "if MYPY" or "if typing.TYPE_CHECKING" return max(PRI_MYPY, toplevel_priority) # A regular import; priority determined by argument. return toplevel_priority def load_plugins_from_config( options: Options, errors: Errors, stdout: TextIO ) -> tuple[list[Plugin], dict[str, str]]: """Load all configured plugins. Return a list of all the loaded plugins from the config file. The second return value is a snapshot of versions/hashes of loaded user plugins (for cache validation). """ import importlib snapshot: dict[str, str] = {} if not options.config_file: return [], snapshot line = find_config_file_line_number(options.config_file, "mypy", "plugins") if line == -1: line = 1 # We need to pick some line number that doesn't look too confusing def plugin_error(message: str) -> NoReturn: errors.report(line, 0, message) errors.raise_error(use_stdout=False) custom_plugins: list[Plugin] = [] errors.set_file(options.config_file, None, options) for plugin_path in options.plugins: func_name = "plugin" plugin_dir: str | None = None if ":" in os.path.basename(plugin_path): plugin_path, func_name = plugin_path.rsplit(":", 1) if plugin_path.endswith(".py"): # Plugin paths can be relative to the config file location. plugin_path = os.path.join(os.path.dirname(options.config_file), plugin_path) if not os.path.isfile(plugin_path): plugin_error(f'Can\'t find plugin "{plugin_path}"') # Use an absolute path to avoid populating the cache entry # for 'tmp' during tests, since it will be different in # different tests. plugin_dir = os.path.abspath(os.path.dirname(plugin_path)) fnam = os.path.basename(plugin_path) module_name = fnam[:-3] sys.path.insert(0, plugin_dir) elif re.search(r"[\\/]", plugin_path): fnam = os.path.basename(plugin_path) plugin_error(f'Plugin "{fnam}" does not have a .py extension') else: module_name = plugin_path try: module = importlib.import_module(module_name) except Exception as exc: plugin_error(f'Error importing plugin "{plugin_path}": {exc}') finally: if plugin_dir is not None: assert sys.path[0] == plugin_dir del sys.path[0] if not hasattr(module, func_name): plugin_error( 'Plugin "{}" does not define entry point function "{}"'.format( plugin_path, func_name ) ) try: plugin_type = getattr(module, func_name)(__version__) except Exception: print(f"Error calling the plugin(version) entry point of {plugin_path}\n", file=stdout) raise # Propagate to display traceback if not isinstance(plugin_type, type): plugin_error( 'Type object expected as the return value of "plugin"; got {!r} (in {})'.format( plugin_type, plugin_path ) ) if not issubclass(plugin_type, Plugin): plugin_error( 'Return value of "plugin" must be a subclass of "mypy.plugin.Plugin" ' "(in {})".format(plugin_path) ) try: custom_plugins.append(plugin_type(options)) snapshot[module_name] = take_module_snapshot(module) except Exception: print(f"Error constructing plugin instance of {plugin_type.__name__}\n", file=stdout) raise # Propagate to display traceback return custom_plugins, snapshot def load_plugins( options: Options, errors: Errors, stdout: TextIO, extra_plugins: Sequence[Plugin] ) -> tuple[Plugin, dict[str, str]]: """Load all configured plugins. Return a plugin that encapsulates all plugins chained together. Always at least include the default plugin (it's last in the chain). The second return value is a snapshot of versions/hashes of loaded user plugins (for cache validation). """ custom_plugins, snapshot = load_plugins_from_config(options, errors, stdout) custom_plugins += extra_plugins default_plugin: Plugin = DefaultPlugin(options) if not custom_plugins: return default_plugin, snapshot # Custom plugins take precedence over the default plugin. return ChainedPlugin(options, custom_plugins + [default_plugin]), snapshot def take_module_snapshot(module: types.ModuleType) -> str: """Take plugin module snapshot by recording its version and hash. We record _both_ hash and the version to detect more possible changes (e.g. if there is a change in modules imported by a plugin). """ if hasattr(module, "__file__"): assert module.__file__ is not None with open(module.__file__, "rb") as f: digest = hash_digest(f.read()) else: digest = "unknown" ver = getattr(module, "__version__", "none") return f"{ver}:{digest}" def find_config_file_line_number(path: str, section: str, setting_name: str) -> int: """Return the approximate location of setting_name within mypy config file. Return -1 if can't determine the line unambiguously. """ in_desired_section = False try: results = [] with open(path, encoding="UTF-8") as f: for i, line in enumerate(f): line = line.strip() if line.startswith("[") and line.endswith("]"): current_section = line[1:-1].strip() in_desired_section = current_section == section elif in_desired_section and re.match(rf"{setting_name}\s*=", line): results.append(i + 1) if len(results) == 1: return results[0] except OSError: pass return -1
FgDepMeta
python
run-llama__llama_index
llama-index-packs/llama-index-packs-llama-guard-moderator/llama_index/packs/llama_guard_moderator/base.py
{ "start": 3442, "end": 6445 }
class ____(BaseLlamaPack): def __init__( self, custom_taxonomy: str = DEFAULT_TAXONOMY, ) -> None: """Init params.""" try: import torch from transformers import AutoModelForCausalLM, AutoTokenizer except ImportError: raise ImportError( "Dependencies missing, run `pip install torch transformers`" ) import os hf_access_token = os.environ.get("HUGGINGFACE_ACCESS_TOKEN") if not os.environ.get("HUGGINGFACE_ACCESS_TOKEN", None): raise ValueError("Hugging Face access token is missing or invalid.") from huggingface_hub import login login(token=hf_access_token) model_id = "meta-llama/LlamaGuard-7b" self.device = "cuda" dtype = torch.bfloat16 self.tokenizer = AutoTokenizer.from_pretrained(model_id) self.model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=dtype, device_map=self.device ) self.custom_taxonomy = custom_taxonomy def get_modules(self) -> Dict[str, Any]: """Get modules.""" return { "tokenizer": self.tokenizer, "model": self.model, "device": self.device, "custom_taxonomy": self.custom_taxonomy, } def run(self, message: str, **kwargs: Any) -> Any: """Run the pipeline.""" # tailored for query engine input/output, using "user" role chat = [{"role": "user", "content": message}] prompt = self._moderation_prompt_for_chat(chat) inputs = self.tokenizer([prompt], return_tensors="pt").to(self.device) output = self.model.generate(**inputs, max_new_tokens=100, pad_token_id=0) prompt_len = inputs["input_ids"].shape[-1] return self.tokenizer.decode(output[0][prompt_len:], skip_special_tokens=True) def _moderation_prompt_for_chat(self, chat): # For simplicity, we assume the chat dictionary correctly alternates "user" and "assistant" roles conversation = [turn["content"] for turn in chat] role = "Agent" if len(conversation) % 2 == 0 else "User" prompt = f"""[INST] Task: Check if there is unsafe content in '{role}' messages in conversations according our safety policy with the below categories. <BEGIN UNSAFE CONTENT CATEGORIES> {self.custom_taxonomy} <END UNSAFE CONTENT CATEGORIES> <BEGIN CONVERSATION> """ # Alternate User/Agent turns, inserting 2 newlines between each for i, m in enumerate(conversation): role = "User" if i % 2 == 0 else "Agent" prompt += f"{role}: {m}\n\n" prompt += f"""<END CONVERSATION> Provide your safety assessment for {role} in the above conversation: - First line must read 'safe' or 'unsafe'. - If unsafe, a second line must include a comma-separated list of violated categories. [/INST]""" return prompt
LlamaGuardModeratorPack
python
pytorch__pytorch
torch/utils/data/datapipes/dataframe/dataframes.py
{ "start": 940, "end": 1667 }
class ____(DFIterDataPipe): def __init__(self, source_datapipe, output_var) -> None: self.source_datapipe = source_datapipe self.output_var = output_var def __iter__(self): for item in self.source_datapipe: yield self.output_var.apply_ops(item) # TODO(VitalyFedyunin): Extract this list from the DFIterDataPipe registered functions DATAPIPES_OPS = [ "_dataframes_as_tuples", "groupby", "_dataframes_filter", "map", "to_datapipe", "shuffle", "concat", "batch", "_dataframes_per_row", "_dataframes_concat", "_dataframes_shuffle", ] UNIMPLEMENTED_ATTR = ["__deepcopy__", "__setstate__", "is_shardable", "apply_sharding"]
DataFrameTracedOps
python
pytorch__pytorch
test/torch_np/numpy_tests/core/test_einsum.py
{ "start": 728, "end": 46617 }
class ____(TestCase): def test_einsum_errors(self): for do_opt in [True, False]: # Need enough arguments assert_raises( (TypeError, IndexError, ValueError), np.einsum, optimize=do_opt ) assert_raises((IndexError, ValueError), np.einsum, "", optimize=do_opt) # subscripts must be a string assert_raises((AttributeError, TypeError), np.einsum, 0, 0, optimize=do_opt) # out parameter must be an array assert_raises(TypeError, np.einsum, "", 0, out="test", optimize=do_opt) # order parameter must be a valid order assert_raises( (NotImplementedError, ValueError), np.einsum, "", 0, order="W", optimize=do_opt, ) # casting parameter must be a valid casting assert_raises(ValueError, np.einsum, "", 0, casting="blah", optimize=do_opt) # dtype parameter must be a valid dtype assert_raises( TypeError, np.einsum, "", 0, dtype="bad_data_type", optimize=do_opt ) # other keyword arguments are rejected assert_raises(TypeError, np.einsum, "", 0, bad_arg=0, optimize=do_opt) # issue 4528 revealed a segfault with this call assert_raises( (RuntimeError, TypeError), np.einsum, *(None,) * 63, optimize=do_opt ) # number of operands must match count in subscripts string assert_raises( (RuntimeError, ValueError), np.einsum, "", 0, 0, optimize=do_opt ) assert_raises( (RuntimeError, ValueError), np.einsum, ",", 0, [0], [0], optimize=do_opt ) assert_raises( (RuntimeError, ValueError), np.einsum, ",", [0], optimize=do_opt ) # can't have more subscripts than dimensions in the operand assert_raises( (RuntimeError, ValueError), np.einsum, "i", 0, optimize=do_opt ) assert_raises( (RuntimeError, ValueError), np.einsum, "ij", [0, 0], optimize=do_opt ) assert_raises( (RuntimeError, ValueError), np.einsum, "...i", 0, optimize=do_opt ) assert_raises( (RuntimeError, ValueError), np.einsum, "i...j", [0, 0], optimize=do_opt ) assert_raises( (RuntimeError, ValueError), np.einsum, "i...", 0, optimize=do_opt ) assert_raises( (RuntimeError, ValueError), np.einsum, "ij...", [0, 0], optimize=do_opt ) # invalid ellipsis assert_raises( (RuntimeError, ValueError), np.einsum, "i..", [0, 0], optimize=do_opt ) assert_raises( (RuntimeError, ValueError), np.einsum, ".i...", [0, 0], optimize=do_opt ) assert_raises( (RuntimeError, ValueError), np.einsum, "j->..j", [0, 0], optimize=do_opt ) assert_raises( (RuntimeError, ValueError), np.einsum, "j->.j...", [0, 0], optimize=do_opt, ) # invalid subscript character assert_raises( (RuntimeError, ValueError), np.einsum, "i%...", [0, 0], optimize=do_opt ) assert_raises( (RuntimeError, ValueError), np.einsum, "...j$", [0, 0], optimize=do_opt ) assert_raises( (RuntimeError, ValueError), np.einsum, "i->&", [0, 0], optimize=do_opt ) # output subscripts must appear in input assert_raises( (RuntimeError, ValueError), np.einsum, "i->ij", [0, 0], optimize=do_opt ) # output subscripts may only be specified once assert_raises( (RuntimeError, ValueError), np.einsum, "ij->jij", [[0, 0], [0, 0]], optimize=do_opt, ) # dimensions much match when being collapsed assert_raises( (RuntimeError, ValueError), np.einsum, "ii", np.arange(6).reshape(2, 3), optimize=do_opt, ) assert_raises( (RuntimeError, ValueError), np.einsum, "ii->i", np.arange(6).reshape(2, 3), optimize=do_opt, ) # broadcasting to new dimensions must be enabled explicitly assert_raises( (RuntimeError, ValueError), np.einsum, "i", np.arange(6).reshape(2, 3), optimize=do_opt, ) assert_raises( (RuntimeError, ValueError), np.einsum, "i->i", [[0, 1], [0, 1]], out=np.arange(4).reshape(2, 2), optimize=do_opt, ) with assert_raises((RuntimeError, ValueError)): # , match="'b'"): # gh-11221 - 'c' erroneously appeared in the error message a = np.ones((3, 3, 4, 5, 6)) b = np.ones((3, 4, 5)) np.einsum("aabcb,abc", a, b) # Check order kwarg, asanyarray allows 1d to pass through assert_raises( (NotImplementedError, ValueError), np.einsum, "i->i", np.arange(6).reshape(-1, 1), optimize=do_opt, order="d", ) @xfail # (reason="a view into smth else") def test_einsum_views(self): # pass-through for do_opt in [True, False]: a = np.arange(6) a = a.reshape(2, 3) b = np.einsum("...", a, optimize=do_opt) assert_(b.tensor._base is a.tensor) b = np.einsum(a, [Ellipsis], optimize=do_opt) assert_(b.base is a) b = np.einsum("ij", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, a) b = np.einsum(a, [0, 1], optimize=do_opt) assert_(b.base is a) assert_equal(b, a) # output is writeable whenever input is writeable b = np.einsum("...", a, optimize=do_opt) assert_(b.flags["WRITEABLE"]) a.flags["WRITEABLE"] = False b = np.einsum("...", a, optimize=do_opt) assert_(not b.flags["WRITEABLE"]) # transpose a = np.arange(6) a.shape = (2, 3) b = np.einsum("ji", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, a.T) b = np.einsum(a, [1, 0], optimize=do_opt) assert_(b.base is a) assert_equal(b, a.T) # diagonal a = np.arange(9) a.shape = (3, 3) b = np.einsum("ii->i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[i, i] for i in range(3)]) b = np.einsum(a, [0, 0], [0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[i, i] for i in range(3)]) # diagonal with various ways of broadcasting an additional dimension a = np.arange(27) a.shape = (3, 3, 3) b = np.einsum("...ii->...i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum("ii...->...i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum("...ii->i...", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("jii->ij", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("ii...->i...", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum("i...i->i...", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum("i...i->...i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) # triple diagonal a = np.arange(27) a.shape = (3, 3, 3) b = np.einsum("iii->i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[i, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[i, i, i] for i in range(3)]) # swap axes a = np.arange(24) a.shape = (2, 3, 4) b = np.einsum("ijk->jik", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, a.swapaxes(0, 1)) b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) assert_(b.base is a) assert_equal(b, a.swapaxes(0, 1)) # @np._no_nep50_warning() def check_einsum_sums(self, dtype, do_opt=False): dtype = np.dtype(dtype) # Check various sums. Does many sizes to exercise unrolled loops. # sum(a, axis=-1) for n in range(1, 17): a = np.arange(n, dtype=dtype) assert_equal( np.einsum("i->", a, optimize=do_opt), np.sum(a, axis=-1).astype(dtype) ) assert_equal( np.einsum(a, [0], [], optimize=do_opt), np.sum(a, axis=-1).astype(dtype) ) for n in range(1, 17): a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) assert_equal( np.einsum("...i->...", a, optimize=do_opt), np.sum(a, axis=-1).astype(dtype), ) assert_equal( np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt), np.sum(a, axis=-1).astype(dtype), ) # sum(a, axis=0) for n in range(1, 17): a = np.arange(2 * n, dtype=dtype).reshape(2, n) assert_equal( np.einsum("i...->...", a, optimize=do_opt), np.sum(a, axis=0).astype(dtype), ) assert_equal( np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), np.sum(a, axis=0).astype(dtype), ) for n in range(1, 17): a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) assert_equal( np.einsum("i...->...", a, optimize=do_opt), np.sum(a, axis=0).astype(dtype), ) assert_equal( np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), np.sum(a, axis=0).astype(dtype), ) # trace(a) for n in range(1, 17): a = np.arange(n * n, dtype=dtype).reshape(n, n) assert_equal(np.einsum("ii", a, optimize=do_opt), np.trace(a).astype(dtype)) assert_equal( np.einsum(a, [0, 0], optimize=do_opt), # torch? np.trace(a).astype(dtype), ) # gh-15961: should accept numpy int64 type in subscript list # np_array = np.asarray([0, 0]) # assert_equal(np.einsum(a, np_array, optimize=do_opt), # np.trace(a).astype(dtype)) # assert_equal(np.einsum(a, list(np_array), optimize=do_opt), # np.trace(a).astype(dtype)) # multiply(a, b) assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case for n in range(1, 17): a = np.arange(3 * n, dtype=dtype).reshape(3, n) b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) assert_equal( np.einsum("..., ...", a, b, optimize=do_opt), np.multiply(a, b) ) assert_equal( np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt), np.multiply(a, b), ) # inner(a,b) for n in range(1, 17): a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) b = np.arange(n, dtype=dtype) assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b)) assert_equal( np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt), np.inner(a, b), ) for n in range(1, 11): a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2) b = np.arange(n, dtype=dtype) assert_equal( np.einsum("i..., i...", a, b, optimize=do_opt), np.inner(a.T, b.T).T ) assert_equal( np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt), np.inner(a.T, b.T).T, ) # outer(a,b) for n in range(1, 17): a = np.arange(3, dtype=dtype) + 1 b = np.arange(n, dtype=dtype) + 1 assert_equal(np.einsum("i,j", a, b, optimize=do_opt), np.outer(a, b)) assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt), np.outer(a, b)) # Suppress the complex warnings for the 'as f8' tests with suppress_warnings(): # matvec(a,b) / a.dot(b) where a is matrix, b is vector for n in range(1, 17): a = np.arange(4 * n, dtype=dtype).reshape(4, n) b = np.arange(n, dtype=dtype) assert_equal(np.einsum("ij, j", a, b, optimize=do_opt), np.dot(a, b)) assert_equal( np.einsum(a, [0, 1], b, [1], optimize=do_opt), np.dot(a, b) ) c = np.arange(4, dtype=dtype) np.einsum( "ij,j", a, b, out=c, dtype="f8", casting="unsafe", optimize=do_opt ) assert_equal(c, np.dot(a.astype("f8"), b.astype("f8")).astype(dtype)) c[...] = 0 np.einsum( a, [0, 1], b, [1], out=c, dtype="f8", casting="unsafe", optimize=do_opt, ) assert_equal(c, np.dot(a.astype("f8"), b.astype("f8")).astype(dtype)) for n in range(1, 17): a = np.arange(4 * n, dtype=dtype).reshape(4, n) b = np.arange(n, dtype=dtype) assert_equal( np.einsum("ji,j", a.T, b.T, optimize=do_opt), np.dot(b.T, a.T) ) assert_equal( np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt), np.dot(b.T, a.T) ) c = np.arange(4, dtype=dtype) np.einsum( "ji,j", a.T, b.T, out=c, dtype="f8", casting="unsafe", optimize=do_opt, ) assert_equal( c, np.dot(b.T.astype("f8"), a.T.astype("f8")).astype(dtype) ) c[...] = 0 np.einsum( a.T, [1, 0], b.T, [1], out=c, dtype="f8", casting="unsafe", optimize=do_opt, ) assert_equal( c, np.dot(b.T.astype("f8"), a.T.astype("f8")).astype(dtype) ) # matmat(a,b) / a.dot(b) where a is matrix, b is matrix for n in range(1, 17): if n < 8 or dtype != "f2": a = np.arange(4 * n, dtype=dtype).reshape(4, n) b = np.arange(n * 6, dtype=dtype).reshape(n, 6) assert_equal( np.einsum("ij,jk", a, b, optimize=do_opt), np.dot(a, b) ) assert_equal( np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt), np.dot(a, b) ) for n in range(1, 17): a = np.arange(4 * n, dtype=dtype).reshape(4, n) b = np.arange(n * 6, dtype=dtype).reshape(n, 6) c = np.arange(24, dtype=dtype).reshape(4, 6) np.einsum( "ij,jk", a, b, out=c, dtype="f8", casting="unsafe", optimize=do_opt ) assert_equal(c, np.dot(a.astype("f8"), b.astype("f8")).astype(dtype)) c[...] = 0 np.einsum( a, [0, 1], b, [1, 2], out=c, dtype="f8", casting="unsafe", optimize=do_opt, ) assert_equal(c, np.dot(a.astype("f8"), b.astype("f8")).astype(dtype)) # matrix triple product (note this is not currently an efficient # way to multiply 3 matrices) a = np.arange(12, dtype=dtype).reshape(3, 4) b = np.arange(20, dtype=dtype).reshape(4, 5) c = np.arange(30, dtype=dtype).reshape(5, 6) if dtype != "f2": assert_equal( np.einsum("ij,jk,kl", a, b, c, optimize=do_opt), a.dot(b).dot(c) ) assert_equal( np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], optimize=do_opt), a.dot(b).dot(c), ) d = np.arange(18, dtype=dtype).reshape(3, 6) np.einsum( "ij,jk,kl", a, b, c, out=d, dtype="f8", casting="unsafe", optimize=do_opt, ) tgt = a.astype("f8").dot(b.astype("f8")) tgt = tgt.dot(c.astype("f8")).astype(dtype) assert_equal(d, tgt) d[...] = 0 np.einsum( a, [0, 1], b, [1, 2], c, [2, 3], out=d, dtype="f8", casting="unsafe", optimize=do_opt, ) tgt = a.astype("f8").dot(b.astype("f8")) tgt = tgt.dot(c.astype("f8")).astype(dtype) assert_equal(d, tgt) # tensordot(a, b) if np.dtype(dtype) != np.dtype("f2"): a = np.arange(60, dtype=dtype).reshape(3, 4, 5) b = np.arange(24, dtype=dtype).reshape(4, 3, 2) assert_equal( np.einsum("ijk, jil -> kl", a, b), np.tensordot(a, b, axes=([1, 0], [0, 1])), ) assert_equal( np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]), np.tensordot(a, b, axes=([1, 0], [0, 1])), ) c = np.arange(10, dtype=dtype).reshape(5, 2) np.einsum( "ijk,jil->kl", a, b, out=c, dtype="f8", casting="unsafe", optimize=do_opt, ) assert_equal( c, np.tensordot( a.astype("f8"), b.astype("f8"), axes=([1, 0], [0, 1]) ).astype(dtype), ) c[...] = 0 np.einsum( a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c, dtype="f8", casting="unsafe", optimize=do_opt, ) assert_equal( c, np.tensordot( a.astype("f8"), b.astype("f8"), axes=([1, 0], [0, 1]) ).astype(dtype), ) # logical_and(logical_and(a!=0, b!=0), c!=0) neg_val = -2 if dtype.kind != "u" else np.iinfo(dtype).max - 1 a = np.array([1, 3, neg_val, 0, 12, 13, 0, 1], dtype=dtype) b = np.array([0, 3.5, 0.0, neg_val, 0, 1, 3, 12], dtype=dtype) c = np.array([True, True, False, True, True, False, True, True]) assert_equal( np.einsum( "i,i,i->i", a, b, c, dtype="?", casting="unsafe", optimize=do_opt ), np.logical_and(np.logical_and(a != 0, b != 0), c != 0), ) assert_equal( np.einsum(a, [0], b, [0], c, [0], [0], dtype="?", casting="unsafe"), np.logical_and(np.logical_and(a != 0, b != 0), c != 0), ) a = np.arange(9, dtype=dtype) assert_equal(np.einsum(",i->", 3, a), 3 * np.sum(a)) assert_equal(np.einsum(3, [], a, [0], []), 3 * np.sum(a)) assert_equal(np.einsum("i,->", a, 3), 3 * np.sum(a)) assert_equal(np.einsum(a, [0], 3, [], []), 3 * np.sum(a)) # Various stride0, contiguous, and SSE aligned variants for n in range(1, 25): a = np.arange(n, dtype=dtype) if np.dtype(dtype).itemsize > 1: assert_equal( np.einsum("...,...", a, a, optimize=do_opt), np.multiply(a, a) ) assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a)) assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2 * a) assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2 * a) assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2 * np.sum(a)) assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2 * np.sum(a)) assert_equal( np.einsum("...,...", a[1:], a[:-1], optimize=do_opt), np.multiply(a[1:], a[:-1]), ) assert_equal( np.einsum("i,i", a[1:], a[:-1], optimize=do_opt), np.dot(a[1:], a[:-1]), ) assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2 * a[1:]) assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2 * a[1:]) assert_equal( np.einsum("i,->", a[1:], 2, optimize=do_opt), 2 * np.sum(a[1:]) ) assert_equal( np.einsum(",i->", 2, a[1:], optimize=do_opt), 2 * np.sum(a[1:]) ) # An object array, summed as the data type # a = np.arange(9, dtype=object) # # b = np.einsum("i->", a, dtype=dtype, casting='unsafe') # assert_equal(b, np.sum(a)) # assert_equal(b.dtype, np.dtype(dtype)) # # b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe') # assert_equal(b, np.sum(a)) # assert_equal(b.dtype, np.dtype(dtype)) # A case which was failing (ticket #1885) p = np.arange(2) + 1 q = np.arange(4).reshape(2, 2) + 3 r = np.arange(4).reshape(2, 2) + 7 assert_equal(np.einsum("z,mz,zm->", p, q, r), 253) # singleton dimensions broadcast (gh-10343) p = np.ones((10, 2)) q = np.ones((1, 2)) assert_array_equal( np.einsum("ij,ij->j", p, q, optimize=True), np.einsum("ij,ij->j", p, q, optimize=False), ) assert_array_equal(np.einsum("ij,ij->j", p, q, optimize=True), [10.0] * 2) # a blas-compatible contraction broadcasting case which was failing # for optimize=True (ticket #10930) x = np.array([2.0, 3.0]) y = np.array([4.0]) assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.0) assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.0) # all-ones array was bypassing bug (ticket #10930) p = np.ones((1, 5)) / 2 q = np.ones((5, 5)) / 2 for optimize in (True, False): assert_array_equal( np.einsum("...ij,...jk->...ik", p, p, optimize=optimize), np.einsum("...ij,...jk->...ik", p, q, optimize=optimize), ) assert_array_equal( np.einsum("...ij,...jk->...ik", p, q, optimize=optimize), np.full((1, 5), 1.25), ) # Cases which were failing (gh-10899) x = np.eye(2, dtype=dtype) y = np.ones(2, dtype=dtype) assert_array_equal( np.einsum("ji,i->", x, y, optimize=optimize), [2.0] ) # contig_contig_outstride0_two assert_array_equal( np.einsum("i,ij->", y, x, optimize=optimize), [2.0] ) # stride0_contig_outstride0_two assert_array_equal( np.einsum("ij,i->", x, y, optimize=optimize), [2.0] ) # contig_stride0_outstride0_two @xfail # (reason="int overflow differs in numpy and pytorch") def test_einsum_sums_int8(self): self.check_einsum_sums("i1") @xfail # (reason="int overflow differs in numpy and pytorch") def test_einsum_sums_uint8(self): self.check_einsum_sums("u1") @xfail # (reason="int overflow differs in numpy and pytorch") def test_einsum_sums_int16(self): self.check_einsum_sums("i2") def test_einsum_sums_int32(self): self.check_einsum_sums("i4") self.check_einsum_sums("i4", True) def test_einsum_sums_int64(self): self.check_einsum_sums("i8") @xfail # (reason="np.float16(4641) == 4640.0") def test_einsum_sums_float16(self): self.check_einsum_sums("f2") def test_einsum_sums_float32(self): self.check_einsum_sums("f4") def test_einsum_sums_float64(self): self.check_einsum_sums("f8") self.check_einsum_sums("f8", True) def test_einsum_sums_cfloat64(self): self.check_einsum_sums("c8") self.check_einsum_sums("c8", True) def test_einsum_sums_cfloat128(self): self.check_einsum_sums("c16") def test_einsum_misc(self): # This call used to crash because of a bug in # PyArray_AssignZero a = np.ones((1, 2)) b = np.ones((2, 2, 1)) assert_equal(np.einsum("ij...,j...->i...", a, b), [[[2], [2]]]) assert_equal(np.einsum("ij...,j...->i...", a, b, optimize=True), [[[2], [2]]]) # Regression test for issue #10369 (test unicode inputs with Python 2) assert_equal(np.einsum("ij...,j...->i...", a, b), [[[2], [2]]]) assert_equal(np.einsum("...i,...i", [1, 2, 3], [2, 3, 4]), 20) assert_equal( np.einsum("...i,...i", [1, 2, 3], [2, 3, 4], optimize="greedy"), 20 ) # The iterator had an issue with buffering this reduction a = np.ones((5, 12, 4, 2, 3), np.int64) b = np.ones((5, 12, 11), np.int64) assert_equal( np.einsum("ijklm,ijn,ijn->", a, b, b), np.einsum("ijklm,ijn->", a, b) ) assert_equal( np.einsum("ijklm,ijn,ijn->", a, b, b, optimize=True), np.einsum("ijklm,ijn->", a, b, optimize=True), ) # Issue #2027, was a problem in the contiguous 3-argument # inner loop implementation a = np.arange(1, 3) b = np.arange(1, 5).reshape(2, 2) c = np.arange(1, 9).reshape(4, 2) assert_equal( np.einsum("x,yx,zx->xzy", a, b, c), [ [[1, 3], [3, 9], [5, 15], [7, 21]], [[8, 16], [16, 32], [24, 48], [32, 64]], ], ) assert_equal( np.einsum("x,yx,zx->xzy", a, b, c, optimize=True), [ [[1, 3], [3, 9], [5, 15], [7, 21]], [[8, 16], [16, 32], [24, 48], [32, 64]], ], ) # Ensure explicitly setting out=None does not cause an error # see issue gh-15776 and issue gh-15256 assert_equal(np.einsum("i,j", [1], [2], out=None), [[2]]) def test_subscript_range(self): # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used # when creating a subscript from arrays a = np.ones((2, 3)) b = np.ones((3, 4)) np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False) np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False) np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False) assert_raises( ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False), ) assert_raises( ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False), ) def test_einsum_broadcast(self): # Issue #2455 change in handling ellipsis # remove the 'middle broadcast' error # only use the 'RIGHT' iteration in prepare_op_axes # adds auto broadcast on left where it belongs # broadcast on right has to be explicit # We need to test the optimized parsing as well A = np.arange(2 * 3 * 4).reshape(2, 3, 4) B = np.arange(3) ref = np.einsum("ijk,j->ijk", A, B, optimize=False) for opt in [True, False]: assert_equal(np.einsum("ij...,j...->ij...", A, B, optimize=opt), ref) assert_equal(np.einsum("ij...,...j->ij...", A, B, optimize=opt), ref) assert_equal( np.einsum("ij...,j->ij...", A, B, optimize=opt), ref ) # used to raise error A = np.arange(12).reshape((4, 3)) B = np.arange(6).reshape((3, 2)) ref = np.einsum("ik,kj->ij", A, B, optimize=False) for opt in [True, False]: assert_equal(np.einsum("ik...,k...->i...", A, B, optimize=opt), ref) assert_equal(np.einsum("ik...,...kj->i...j", A, B, optimize=opt), ref) assert_equal( np.einsum("...k,kj", A, B, optimize=opt), ref ) # used to raise error assert_equal( np.einsum("ik,k...->i...", A, B, optimize=opt), ref ) # used to raise error dims = [2, 3, 4, 5] a = np.arange(np.prod(dims)).reshape(dims) v = np.arange(dims[2]) ref = np.einsum("ijkl,k->ijl", a, v, optimize=False) for opt in [True, False]: assert_equal(np.einsum("ijkl,k", a, v, optimize=opt), ref) assert_equal( np.einsum("...kl,k", a, v, optimize=opt), ref ) # used to raise error assert_equal(np.einsum("...kl,k...", a, v, optimize=opt), ref) J, K, M = 160, 160, 120 A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) B = np.arange(J * K * M * 3).reshape(J, K, M, 3) ref = np.einsum("...lmn,...lmno->...o", A, B, optimize=False) for opt in [True, False]: assert_equal( np.einsum("...lmn,lmno->...o", A, B, optimize=opt), ref ) # used to raise error def test_einsum_fixedstridebug(self): # Issue #4485 obscure einsum bug # This case revealed a bug in nditer where it reported a stride # as 'fixed' (0) when it was in fact not fixed during processing # (0 or 4). The reason for the bug was that the check for a fixed # stride was using the information from the 2D inner loop reuse # to restrict the iteration dimensions it had to validate to be # the same, but that 2D inner loop reuse logic is only triggered # during the buffer copying step, and hence it was invalid to # rely on those values. The fix is to check all the dimensions # of the stride in question, which in the test case reveals that # the stride is not fixed. # # NOTE: This test is triggered by the fact that the default buffersize, # used by einsum, is 8192, and 3*2731 = 8193, is larger than that # and results in a mismatch between the buffering and the # striding for operand A. A = np.arange(2 * 3).reshape(2, 3).astype(np.float32) B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16) es = np.einsum("cl, cpx->lpx", A, B) tp = np.tensordot(A, B, axes=(0, 0)) assert_equal(es, tp) # The following is the original test case from the bug report, # made repeatable by changing random arrays to aranges. # codespell:ignore aranges A = np.arange(3 * 3).reshape(3, 3).astype(np.float64) B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32) es = np.einsum("cl, cpxy->lpxy", A, B) tp = np.tensordot(A, B, axes=(0, 0)) assert_equal(es, tp) def test_einsum_fixed_collapsingbug(self): # Issue #5147. # The bug only occurred when output argument of einssum was used. x = np.random.normal(0, 1, (5, 5, 5, 5)) y1 = np.zeros((5, 5)) np.einsum("aabb->ab", x, out=y1) idx = np.arange(5) y2 = x[idx[:, None], idx[:, None], idx, idx] assert_equal(y1, y2) def test_einsum_failed_on_p9_and_s390x(self): # Issues gh-14692 and gh-12689 # Bug with signed vs unsigned char errored on power9 and s390x Linux tensor = np.random.random_sample((10, 10, 10, 10)) x = np.einsum("ijij->", tensor) y = tensor.trace(axis1=0, axis2=2).trace() assert_allclose(x, y) @xfail # (reason="no base") def test_einsum_all_contig_non_contig_output(self): # Issue gh-5907, tests that the all contiguous special case # actually checks the contiguity of the output x = np.ones((5, 5)) out = np.ones(10)[::2] correct_base = np.ones(10) correct_base[::2] = 5 # Always worked (inner iteration is done with 0-stride): np.einsum("mi,mi,mi->m", x, x, x, out=out) assert_array_equal(out.base, correct_base) # Example 1: out = np.ones(10)[::2] np.einsum("im,im,im->m", x, x, x, out=out) assert_array_equal(out.base, correct_base) # Example 2, buffering causes x to be contiguous but # special cases do not catch the operation before: out = np.ones((2, 2, 2))[..., 0] correct_base = np.ones((2, 2, 2)) correct_base[..., 0] = 2 x = np.ones((2, 2), np.float32) np.einsum("ij,jk->ik", x, x, out=out) assert_array_equal(out.base, correct_base) @parametrize("dtype", np.typecodes["AllFloat"] + np.typecodes["AllInteger"]) def test_different_paths(self, dtype): # Test originally added to cover broken float16 path: gh-20305 # Likely most are covered elsewhere, at least partially. dtype = np.dtype(dtype) # Simple test, designed to exercise most specialized code paths, # note the +0.5 for floats. This makes sure we use a float value # where the results must be exact. arr = (np.arange(7) + 0.5).astype(dtype) scalar = np.array(2, dtype=dtype) # contig -> scalar: res = np.einsum("i->", arr) assert res == arr.sum() # contig, contig -> contig: res = np.einsum("i,i->i", arr, arr) assert_array_equal(res, arr * arr) # noncontig, noncontig -> contig: res = np.einsum("i,i->i", arr.repeat(2)[::2], arr.repeat(2)[::2]) assert_array_equal(res, arr * arr) # contig + contig -> scalar assert np.einsum("i,i->", arr, arr) == (arr * arr).sum() # contig + scalar -> contig (with out) out = np.ones(7, dtype=dtype) res = np.einsum("i,->i", arr, dtype.type(2), out=out) assert_array_equal(res, arr * dtype.type(2)) # scalar + contig -> contig (with out) res = np.einsum(",i->i", scalar, arr) assert_array_equal(res, arr * dtype.type(2)) # scalar + contig -> scalar res = np.einsum(",i->", scalar, arr) # Use einsum to compare to not have difference due to sum round-offs: assert res == np.einsum("i->", scalar * arr) # contig + scalar -> scalar res = np.einsum("i,->", arr, scalar) # Use einsum to compare to not have difference due to sum round-offs: assert res == np.einsum("i->", scalar * arr) # contig + contig + contig -> scalar if dtype in ["e", "B", "b"]: # FIXME make xfail raise SkipTest("overflow differs in pytorch and numpy") arr = np.array([0.5, 0.5, 0.25, 4.5, 3.0], dtype=dtype) res = np.einsum("i,i,i->", arr, arr, arr) assert_array_equal(res, (arr * arr * arr).sum()) # four arrays: res = np.einsum("i,i,i,i->", arr, arr, arr, arr) assert_array_equal(res, (arr * arr * arr * arr).sum()) def test_small_boolean_arrays(self): # See gh-5946. # Use array of True embedded in False. a = np.zeros((16, 1, 1), dtype=np.bool_)[:2] a[...] = True out = np.zeros((16, 1, 1), dtype=np.bool_)[:2] tgt = np.ones((2, 1, 1), dtype=np.bool_) res = np.einsum("...ij,...jk->...ik", a, a, out=out) assert_equal(res, tgt) def test_out_is_res(self): a = np.arange(9).reshape(3, 3) res = np.einsum("...ij,...jk->...ik", a, a, out=a) assert res is a def optimize_compare(self, subscripts, operands=None): # Tests all paths of the optimization function against # conventional einsum if operands is None: args = [subscripts] terms = subscripts.split("->")[0].split(",") for term in terms: dims = [global_size_dict[x] for x in term] args.append(np.random.rand(*dims)) else: args = [subscripts] + operands noopt = np.einsum(*args, optimize=False) opt = np.einsum(*args, optimize="greedy") assert_almost_equal(opt, noopt) opt = np.einsum(*args, optimize="optimal") assert_almost_equal(opt, noopt) def test_hadamard_like_products(self): # Hadamard outer products self.optimize_compare("a,ab,abc->abc") self.optimize_compare("a,b,ab->ab") def test_index_transformations(self): # Simple index transformation cases self.optimize_compare("ea,fb,gc,hd,abcd->efgh") self.optimize_compare("ea,fb,abcd,gc,hd->efgh") self.optimize_compare("abcd,ea,fb,gc,hd->efgh") def test_complex(self): # Long test cases self.optimize_compare("acdf,jbje,gihb,hfac,gfac,gifabc,hfac") self.optimize_compare("acdf,jbje,gihb,hfac,gfac,gifabc,hfac") self.optimize_compare("cd,bdhe,aidb,hgca,gc,hgibcd,hgac") self.optimize_compare("abhe,hidj,jgba,hiab,gab") self.optimize_compare("bde,cdh,agdb,hica,ibd,hgicd,hiac") self.optimize_compare("chd,bde,agbc,hiad,hgc,hgi,hiad") self.optimize_compare("chd,bde,agbc,hiad,bdi,cgh,agdb") self.optimize_compare("bdhe,acad,hiab,agac,hibd") def test_collapse(self): # Inner products self.optimize_compare("ab,ab,c->") self.optimize_compare("ab,ab,c->c") self.optimize_compare("ab,ab,cd,cd->") self.optimize_compare("ab,ab,cd,cd->ac") self.optimize_compare("ab,ab,cd,cd->cd") self.optimize_compare("ab,ab,cd,cd,ef,ef->") def test_expand(self): # Outer products self.optimize_compare("ab,cd,ef->abcdef") self.optimize_compare("ab,cd,ef->acdf") self.optimize_compare("ab,cd,de->abcde") self.optimize_compare("ab,cd,de->be") self.optimize_compare("ab,bcd,cd->abcd") self.optimize_compare("ab,bcd,cd->abd") # codespell:ignore def test_edge_cases(self): # Difficult edge cases for optimization self.optimize_compare("eb,cb,fb->cef") self.optimize_compare("dd,fb,be,cdb->cef") self.optimize_compare("bca,cdb,dbf,afc->") self.optimize_compare("dcc,fce,ea,dbf->ab") self.optimize_compare("fdf,cdd,ccd,afe->ae") self.optimize_compare("abcd,ad") self.optimize_compare("ed,fcd,ff,bcf->be") self.optimize_compare("baa,dcf,af,cde->be") self.optimize_compare("bd,db,eac->ace") self.optimize_compare("fff,fae,bef,def->abd") # codespell:ignore self.optimize_compare("efc,dbc,acf,fd->abe") self.optimize_compare("ba,ac,da->bcd") def test_inner_product(self): # Inner products self.optimize_compare("ab,ab") self.optimize_compare("ab,ba") self.optimize_compare("abc,abc") self.optimize_compare("abc,bac") self.optimize_compare("abc,cba") def test_random_cases(self): # Randomly built test cases self.optimize_compare("aab,fa,df,ecc->bde") self.optimize_compare("ecb,fef,bad,ed->ac") self.optimize_compare("bcf,bbb,fbf,fc->") self.optimize_compare("bb,ff,be->e") self.optimize_compare("bcb,bb,fc,fff->") self.optimize_compare("fbb,dfd,fc,fc->") self.optimize_compare("afd,ba,cc,dc->bf") self.optimize_compare("adb,bc,fa,cfc->d") self.optimize_compare("bbd,bda,fc,db->acf") self.optimize_compare("dba,ead,cad->bce") self.optimize_compare("aef,fbc,dca->bde") def test_combined_views_mapping(self): # gh-10792 a = np.arange(9).reshape(1, 1, 3, 1, 3) b = np.einsum("bbcdc->d", a) assert_equal(b, [12]) def test_broadcasting_dot_cases(self): # Ensures broadcasting cases are not mistaken for GEMM a = np.random.rand(1, 5, 4) b = np.random.rand(4, 6) c = np.random.rand(5, 6) d = np.random.rand(10) self.optimize_compare("ijk,kl,jl", operands=[a, b, c]) self.optimize_compare("ijk,kl,jl,i->i", operands=[a, b, c, d]) e = np.random.rand(1, 1, 5, 4) f = np.random.rand(7, 7) self.optimize_compare("abjk,kl,jl", operands=[e, b, c]) self.optimize_compare("abjk,kl,jl,ab->ab", operands=[e, b, c, f]) # Edge case found in gh-11308 g = np.arange(64).reshape(2, 4, 8) self.optimize_compare("obk,ijk->ioj", operands=[g, g]) @xfail # (reason="order='F' not supported") def test_output_order(self): # Ensure output order is respected for optimize cases, the below # contraction should yield a reshaped tensor view # gh-16415 a = np.ones((2, 3, 5), order="F") b = np.ones((4, 3), order="F") for opt in [True, False]: tmp = np.einsum("...ft,mf->...mt", a, b, order="a", optimize=opt) assert_(tmp.flags.f_contiguous) tmp = np.einsum("...ft,mf->...mt", a, b, order="f", optimize=opt) assert_(tmp.flags.f_contiguous) tmp = np.einsum("...ft,mf->...mt", a, b, order="c", optimize=opt) assert_(tmp.flags.c_contiguous) tmp = np.einsum("...ft,mf->...mt", a, b, order="k", optimize=opt) assert_(tmp.flags.c_contiguous is False) assert_(tmp.flags.f_contiguous is False) tmp = np.einsum("...ft,mf->...mt", a, b, optimize=opt) assert_(tmp.flags.c_contiguous is False) assert_(tmp.flags.f_contiguous is False) c = np.ones((4, 3), order="C") for opt in [True, False]: tmp = np.einsum("...ft,mf->...mt", a, c, order="a", optimize=opt) assert_(tmp.flags.c_contiguous) d = np.ones((2, 3, 5), order="C") for opt in [True, False]: tmp = np.einsum("...ft,mf->...mt", d, c, order="a", optimize=opt) assert_(tmp.flags.c_contiguous) @skip(reason="no pytorch analog")
TestEinsum
python
pytorch__pytorch
torch/_vendor/packaging/version.py
{ "start": 4491, "end": 16236 }
class ____(_BaseVersion): """This class abstracts handling of a project's versions. A :class:`Version` instance is comparison aware and can be compared and sorted using the standard Python interfaces. >>> v1 = Version("1.0a5") >>> v2 = Version("1.0") >>> v1 <Version('1.0a5')> >>> v2 <Version('1.0')> >>> v1 < v2 True >>> v1 == v2 False >>> v1 > v2 False >>> v1 >= v2 False >>> v1 <= v2 True """ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) _key: CmpKey def __init__(self, version: str) -> None: """Initialize a Version object. :param version: The string representation of a version which will be parsed and normalized before use. :raises InvalidVersion: If the ``version`` does not conform to PEP 440 in any way then this exception will be raised. """ # Validate the version and parse it into pieces match = self._regex.search(version) if not match: raise InvalidVersion(f"Invalid version: '{version}'") # Store the parsed out pieces of the version self._version = _Version( epoch=int(match.group("epoch")) if match.group("epoch") else 0, release=tuple(int(i) for i in match.group("release").split(".")), pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")), post=_parse_letter_version( match.group("post_l"), match.group("post_n1") or match.group("post_n2") ), dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")), local=_parse_local_version(match.group("local")), ) # Generate a key which will be used for sorting self._key = _cmpkey( self._version.epoch, self._version.release, self._version.pre, self._version.post, self._version.dev, self._version.local, ) def __repr__(self) -> str: """A representation of the Version that shows all internal state. >>> Version('1.0.0') <Version('1.0.0')> """ return f"<Version('{self}')>" def __str__(self) -> str: """A string representation of the version that can be rounded-tripped. >>> str(Version("1.0a5")) '1.0a5' """ parts = [] # Epoch if self.epoch != 0: parts.append(f"{self.epoch}!") # Release segment parts.append(".".join(str(x) for x in self.release)) # Pre-release if self.pre is not None: parts.append("".join(str(x) for x in self.pre)) # Post-release if self.post is not None: parts.append(f".post{self.post}") # Development release if self.dev is not None: parts.append(f".dev{self.dev}") # Local version segment if self.local is not None: parts.append(f"+{self.local}") return "".join(parts) @property def epoch(self) -> int: """The epoch of the version. >>> Version("2.0.0").epoch 0 >>> Version("1!2.0.0").epoch 1 """ return self._version.epoch @property def release(self) -> Tuple[int, ...]: """The components of the "release" segment of the version. >>> Version("1.2.3").release (1, 2, 3) >>> Version("2.0.0").release (2, 0, 0) >>> Version("1!2.0.0.post0").release (2, 0, 0) Includes trailing zeroes but not the epoch or any pre-release / development / post-release suffixes. """ return self._version.release @property def pre(self) -> Optional[Tuple[str, int]]: """The pre-release segment of the version. >>> print(Version("1.2.3").pre) None >>> Version("1.2.3a1").pre ('a', 1) >>> Version("1.2.3b1").pre ('b', 1) >>> Version("1.2.3rc1").pre ('rc', 1) """ return self._version.pre @property def post(self) -> Optional[int]: """The post-release number of the version. >>> print(Version("1.2.3").post) None >>> Version("1.2.3.post1").post 1 """ return self._version.post[1] if self._version.post else None @property def dev(self) -> Optional[int]: """The development number of the version. >>> print(Version("1.2.3").dev) None >>> Version("1.2.3.dev1").dev 1 """ return self._version.dev[1] if self._version.dev else None @property def local(self) -> Optional[str]: """The local version segment of the version. >>> print(Version("1.2.3").local) None >>> Version("1.2.3+abc").local 'abc' """ if self._version.local: return ".".join(str(x) for x in self._version.local) else: return None @property def public(self) -> str: """The public portion of the version. >>> Version("1.2.3").public '1.2.3' >>> Version("1.2.3+abc").public '1.2.3' >>> Version("1.2.3+abc.dev1").public '1.2.3' """ return str(self).split("+", 1)[0] @property def base_version(self) -> str: """The "base version" of the version. >>> Version("1.2.3").base_version '1.2.3' >>> Version("1.2.3+abc").base_version '1.2.3' >>> Version("1!1.2.3+abc.dev1").base_version '1!1.2.3' The "base version" is the public version of the project without any pre or post release markers. """ parts = [] # Epoch if self.epoch != 0: parts.append(f"{self.epoch}!") # Release segment parts.append(".".join(str(x) for x in self.release)) return "".join(parts) @property def is_prerelease(self) -> bool: """Whether this version is a pre-release. >>> Version("1.2.3").is_prerelease False >>> Version("1.2.3a1").is_prerelease True >>> Version("1.2.3b1").is_prerelease True >>> Version("1.2.3rc1").is_prerelease True >>> Version("1.2.3dev1").is_prerelease True """ return self.dev is not None or self.pre is not None @property def is_postrelease(self) -> bool: """Whether this version is a post-release. >>> Version("1.2.3").is_postrelease False >>> Version("1.2.3.post1").is_postrelease True """ return self.post is not None @property def is_devrelease(self) -> bool: """Whether this version is a development release. >>> Version("1.2.3").is_devrelease False >>> Version("1.2.3.dev1").is_devrelease True """ return self.dev is not None @property def major(self) -> int: """The first item of :attr:`release` or ``0`` if unavailable. >>> Version("1.2.3").major 1 """ return self.release[0] if len(self.release) >= 1 else 0 @property def minor(self) -> int: """The second item of :attr:`release` or ``0`` if unavailable. >>> Version("1.2.3").minor 2 >>> Version("1").minor 0 """ return self.release[1] if len(self.release) >= 2 else 0 @property def micro(self) -> int: """The third item of :attr:`release` or ``0`` if unavailable. >>> Version("1.2.3").micro 3 >>> Version("1").micro 0 """ return self.release[2] if len(self.release) >= 3 else 0 def _parse_letter_version( letter: Optional[str], number: Union[str, bytes, SupportsInt, None] ) -> Optional[Tuple[str, int]]: if letter: # We consider there to be an implicit 0 in a pre-release if there is # not a numeral associated with it. if number is None: number = 0 # We normalize any letters to their lower case form letter = letter.lower() # We consider some words to be alternate spellings of other words and # in those cases we want to normalize the spellings to our preferred # spelling. if letter == "alpha": letter = "a" elif letter == "beta": letter = "b" elif letter in ["c", "pre", "preview"]: letter = "rc" elif letter in ["rev", "r"]: letter = "post" return letter, int(number) if not letter and number: # We assume if we are given a number, but we are not given a letter # then this is using the implicit post release syntax (e.g. 1.0-1) letter = "post" return letter, int(number) return None _local_version_separators = re.compile(r"[\._-]") def _parse_local_version(local: Optional[str]) -> Optional[LocalType]: """ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). """ if local is not None: return tuple( part.lower() if not part.isdigit() else int(part) for part in _local_version_separators.split(local) ) return None def _cmpkey( epoch: int, release: Tuple[int, ...], pre: Optional[Tuple[str, int]], post: Optional[Tuple[str, int]], dev: Optional[Tuple[str, int]], local: Optional[LocalType], ) -> CmpKey: # When we compare a release version, we want to compare it with all of the # trailing zeros removed. So we'll use a reverse the list, drop all the now # leading zeros until we come to something non zero, then take the rest # re-reverse it back into the correct order and make it a tuple and use # that for our sorting key. _release = tuple( reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))) ) # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. # We'll do this by abusing the pre segment, but we _only_ want to do this # if there is not a pre or a post segment. If we have one of those then # the normal sorting rules will handle this case correctly. if pre is None and post is None and dev is not None: _pre: CmpPrePostDevType = NegativeInfinity # Versions without a pre-release (except as noted above) should sort after # those with one. elif pre is None: _pre = Infinity else: _pre = pre # Versions without a post segment should sort before those with one. if post is None: _post: CmpPrePostDevType = NegativeInfinity else: _post = post # Versions without a development segment should sort after those with one. if dev is None: _dev: CmpPrePostDevType = Infinity else: _dev = dev if local is None: # Versions without a local segment should sort before those with one. _local: CmpLocalType = NegativeInfinity else: # Versions with a local segment need that segment parsed to implement # the sorting rules in PEP440. # - Alpha numeric segments sort before numeric segments # - Alpha numeric segments sort lexicographically # - Numeric segments sort numerically # - Shorter versions sort before longer versions when the prefixes # match exactly _local = tuple( (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local ) return epoch, _release, _pre, _post, _dev, _local
Version
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_embed_image05.py
{ "start": 315, "end": 930 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("embed_image05.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.write_dynamic_array_formula(0, 0, 2, 0, "=LEN(B1:B3)", None, 0) worksheet.embed_image(8, 4, self.image_dir + "red.png") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
PrefectHQ__prefect
src/integrations/prefect-databricks/prefect_databricks/models/jobs.py
{ "start": 167434, "end": 168473 }
class ____(BaseModel): """ See source code for the fields' description. """ model_config = ConfigDict(extra="allow", frozen=True) created_time: Optional[int] = Field( None, description=( "The time at which this job was created in epoch milliseconds (milliseconds" " since 1/1/1970 UTC)." ), examples=[1601370337343], ) creator_user_name: Optional[str] = Field( None, description=( "The creator user name. This field won’t be included in the response if the" " user has already been deleted." ), examples=["user.name@databricks.com"], ) job_id: Optional[int] = Field( None, description="The canonical identifier for this job.", examples=[11223344] ) settings: Optional[JobSettings] = Field( None, description=( "Settings for this job and all of its runs. These settings can be updated" " using the `resetJob` method." ), )
Job
python
pypa__warehouse
tests/unit/organizations/test_tasks.py
{ "start": 3496, "end": 5375 }
class ____: def test_delete_declined_organization_applications(self, db_request): # Create an organization_application that's ready for cleanup organization_application = OrganizationApplicationFactory.create() organization_application.is_active = False organization_application.status = OrganizationApplicationStatus.Declined organization_application.updated = datetime.datetime.now() - datetime.timedelta( days=31 ) # Create an organization_application that's not ready to be cleaned up yet organization_application2 = OrganizationApplicationFactory.create() organization_application2.is_active = False organization_application2.status = OrganizationApplicationStatus.Declined organization_application2.updated = datetime.datetime.now() assert ( db_request.db.query(OrganizationApplication.id) .filter(OrganizationApplication.id == organization_application.id) .count() == 1 ) assert ( db_request.db.query(OrganizationApplication.id) .filter(OrganizationApplication.id == organization_application2.id) .count() == 1 ) assert db_request.db.query(OrganizationApplication).count() == 2 delete_declined_organization_applications(db_request) assert not ( db_request.db.query(OrganizationApplication.id) .filter(OrganizationApplication.id == organization_application.id) .count() ) assert ( db_request.db.query(OrganizationApplication.id) .filter(OrganizationApplication.id == organization_application2.id) .count() ) assert db_request.db.query(OrganizationApplication).count() == 1
TestDeleteOrganizationApplications
python
getsentry__sentry
tests/sentry/deletions/tasks/test_scheduled.py
{ "start": 8333, "end": 9300 }
class ____(RegionalRunScheduleDeletionTest): @property def ScheduledDeletion(self) -> type[BaseScheduledDeletion]: return ScheduledDeletion def run_scheduled_deletions(self) -> None: return run_scheduled_deletions_control() def reattempt_deletions(self) -> None: return reattempt_deletions_control() def create_simple_deletion(self) -> QuerySet[ApiApplication]: app = ApiApplication.objects.create(owner_id=self.user.id, allowed_origins="example.com") app.status = ApiApplicationStatus.pending_deletion app.save() return ApiApplication.objects.filter(id=app.id) def create_does_not_proceed_deletion(self) -> QuerySet[ApiApplication]: app = ApiApplication.objects.create(owner_id=self.user.id, allowed_origins="example.com") app.status = ApiApplicationStatus.active app.save() return ApiApplication.objects.filter(id=app.id)
RunControlScheduledDeletionTest
python
run-llama__llama_index
llama-index-integrations/postprocessor/llama-index-postprocessor-bedrock-rerank/tests/test_postprocessor_bedrock_rerank.py
{ "start": 263, "end": 3084 }
class ____(TestCase): def test_class(self): names_of_base_classes = [b.__name__ for b in BedrockRerank.__mro__] self.assertIn(BaseNodePostprocessor.__name__, names_of_base_classes) def test_bedrock_rerank(self): exp_rerank_response = { "results": [ { "index": 2, "relevanceScore": 0.9, }, { "index": 3, "relevanceScore": 0.8, }, ] } input_nodes = [ NodeWithScore(node=TextNode(id_="1", text="first 1")), NodeWithScore(node=TextNode(id_="2", text="first 2")), NodeWithScore(node=TextNode(id_="3", text="last 1")), NodeWithScore(node=TextNode(id_="4", text="last 2")), ] expected_nodes = [ NodeWithScore(node=TextNode(id_="3", text="last 1"), score=0.9), NodeWithScore(node=TextNode(id_="4", text="last 2"), score=0.8), ] bedrock_client = boto3.client("bedrock-agent-runtime", region_name="us-west-2") reranker = BedrockRerank(client=bedrock_client, top_n=2) with mock.patch.object( bedrock_client, "rerank", return_value=exp_rerank_response ): query_bundle = QueryBundle(query_str="last") actual_nodes = reranker.postprocess_nodes( input_nodes, query_bundle=query_bundle ) self.assertEqual(len(actual_nodes), len(expected_nodes)) for actual_node_with_score, expected_node_with_score in zip( actual_nodes, expected_nodes ): self.assertEqual( actual_node_with_score.node.get_content(), expected_node_with_score.node.get_content(), ) self.assertAlmostEqual( actual_node_with_score.score, expected_node_with_score.score ) def test_bedrock_rerank_consistent_top_n(self): input_nodes = [NodeWithScore(node=TextNode(id_="4", text="last 1"))] bedrock_client = boto3.client("bedrock-agent-runtime", region_name="us-west-2") reranker = BedrockRerank(client=bedrock_client, top_n=4) self.assertEqual(reranker.top_n, 4) with mock.patch.object(bedrock_client, "rerank") as patched_rerank: reranker.postprocess_nodes(input_nodes, query_str="last") self.assertTrue(patched_rerank.called) num_results = patched_rerank.call_args.kwargs["rerankingConfiguration"][ "bedrockRerankingConfiguration" ]["numberOfResults"] self.assertEqual(num_results, len(input_nodes)) self.assertEqual(reranker.top_n, 4)
TestBedrockRerank
python
dagster-io__dagster
python_modules/dagster/dagster/_core/errors.py
{ "start": 5795, "end": 7852 }
class ____(DagsterError): """Indicates that you have attempted to construct a config with an invalid value. Acceptable values for config types are any of: 1. A Python primitive type that resolves to a Dagster config type (:py:class:`~python:int`, :py:class:`~python:float`, :py:class:`~python:bool`, :py:class:`~python:str`, or :py:class:`~python:list`). 2. A Dagster config type: :py:data:`~dagster.Int`, :py:data:`~dagster.Float`, :py:data:`~dagster.Bool`, :py:data:`~dagster.String`, :py:data:`~dagster.StringSource`, :py:data:`~dagster.Any`, :py:class:`~dagster.Array`, :py:data:`~dagster.Noneable`, :py:data:`~dagster.Enum`, :py:class:`~dagster.Selector`, :py:class:`~dagster.Shape`, or :py:class:`~dagster.Permissive`. 3. A bare python dictionary, which will be automatically wrapped in :py:class:`~dagster.Shape`. Values of the dictionary are resolved recursively according to the same rules. 4. A bare python list of length one which itself is config type. Becomes :py:class:`Array` with list element as an argument. 5. An instance of :py:class:`~dagster.Field`. """ def __init__(self, original_root, current_value, stack, reason=None, **kwargs): self.original_root = original_root self.current_value = current_value self.stack = stack super().__init__( ( "Error defining config. Original value passed: {original_root}. " "{stack_str}{current_value} " "cannot be resolved.{reason_str}" + CONFIG_ERROR_VERBIAGE ).format( original_root=repr(original_root), stack_str="Error at stack path :" + ":".join(stack) + ". " if stack else "", current_value=repr(current_value), reason_str=f" Reason: {reason}." if reason else "", ), **kwargs, )
DagsterInvalidConfigDefinitionError
python
sqlalchemy__sqlalchemy
examples/inheritance/joined.py
{ "start": 957, "end": 1399 }
class ____(Base): __tablename__ = "person" id: Mapped[intpk] company_id: Mapped[int] = mapped_column(ForeignKey("company.id")) name: Mapped[str50] type: Mapped[str50] company: Mapped[Company] = relationship(back_populates="employees") __mapper_args__ = { "polymorphic_identity": "person", "polymorphic_on": "type", } def __repr__(self): return f"Ordinary person {self.name}"
Person
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/executors/ecs/test_utils.py
{ "start": 16771, "end": 18349 }
class ____: """Test _recursive_flatten_dict function.""" def test_flat_dict(self): """Test flattening a flat dictionary.""" input_dict = {"a": "value1", "b": "value2"} expected = {"a": "value1", "b": "value2"} assert _recursive_flatten_dict(input_dict) == expected def test_nested_dict(self): """Test flattening a nested dictionary.""" input_dict = {"a": "value1", "b": {"c": "value2", "d": "value3"}} expected = {"a": "value1", "c": "value2", "d": "value3"} assert _recursive_flatten_dict(input_dict) == expected def test_deeply_nested_dict(self): """Test flattening a deeply nested dictionary.""" input_dict = {"a": {"b": {"c": {"d": "value"}}}} expected = {"d": "value"} assert _recursive_flatten_dict(input_dict) == expected def test_mixed_dict(self): """Test flattening a dictionary with mixed nested and flat values.""" input_dict = {"a": "value1", "b": {"c": "value2"}, "d": "value3"} expected = {"a": "value1", "c": "value2", "d": "value3"} assert _recursive_flatten_dict(input_dict) == expected def test_empty_dict(self): """Test flattening an empty dictionary.""" assert _recursive_flatten_dict({}) == {} def test_dict_with_empty_nested_dict(self): """Test flattening a dictionary with empty nested dictionaries.""" input_dict = {"a": "value1", "b": {}} expected = {"a": "value1"} assert _recursive_flatten_dict(input_dict) == expected
TestRecursiveFlattenDict
python
huggingface__transformers
src/transformers/models/informer/modeling_informer.py
{ "start": 10382, "end": 10684 }
class ____(nn.Module): def __init__(self, feature_size, d_model): super().__init__() self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False) def forward(self, x): return self.value_projection(x) @auto_docstring
InformerValueEmbedding
python
scrapy__scrapy
tests/mockserver/http_resources.py
{ "start": 5344, "end": 5669 }
class ____(LeafResource): def render_GET(self, request): request.setHeader(b"Content-Length", b"1024") self.deferRequest(request, 0, self._delayedRender, request) return NOT_DONE_YET def _delayedRender(self, request): request.write(b"partial content\n") request.finish()
Partial
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/distributions/special_math_test.py
{ "start": 13374, "end": 17571 }
class ____(test.TestCase): # Note that scipy.stats.laplace does not have a stable Log CDF, so we cannot # rely on scipy to cross check the extreme values. # Test will be done differently over different ranges. These are the values # such that when exceeded by x, produce output that causes the naive (scipy) # implementation to have numerical issues. # # If x = log(1 / (2 * eps)), then 0.5 * exp{-x} = eps. # With inserting eps = np.finfo(dtype).eps, we see that log(1 / (2 * eps)) is # the value of x such that any larger value will result in # 1 - 0.5 * exp{-x} = 0, which will cause the log_cdf_laplace code to take a # log # of zero. We therefore choose these as our cutoffs for testing. CUTOFF_FLOAT64_UPPER = np.log(1. / (2. * np.finfo(np.float64).eps)) - 1. CUTOFF_FLOAT32_UPPER = np.log(1. / (2. * np.finfo(np.float32).eps)) - 1. def assertAllTrue(self, x): self.assertAllEqual(np.ones_like(x, dtype=np.bool_), x) def _test_grid_log(self, dtype, scipy_dtype, grid_spec, error_spec): with self.cached_session(): grid = _make_grid(dtype, grid_spec) actual = sm.log_cdf_laplace(grid).eval() # Basic tests. # isfinite checks for NaN and Inf. self.assertAllTrue(np.isfinite(actual)) self.assertAllTrue((actual < 0)) _check_strictly_increasing(actual) # Versus scipy. if not stats: return scipy_dist = stats.laplace(loc=0., scale=1.) expected = scipy_dist.logcdf(grid.astype(scipy_dtype)) self.assertAllClose( expected.astype(np.float64), actual.astype(np.float64), rtol=error_spec.rtol, atol=error_spec.atol) @test_util.run_deprecated_v1 def test_float32_lower_and_mid_segment_scipy_float32_ok(self): # Choose values mild enough that we can use scipy in float32, which will # allow for a high accuracy match to scipy (since we both use float32). self._test_grid_log( np.float32, # dtype np.float32, # scipy_dtype GridSpec(min=-10, max=self.CUTOFF_FLOAT32_UPPER - 5, shape=[100]), ErrorSpec(rtol=5e-4, atol=0)) @test_util.run_deprecated_v1 def test_float32_all_segments_with_scipy_float64_ok(self): # Choose values outside the range where scipy float32 works. # Let scipy use float64. This means we # won't be exactly the same since we are in float32. self._test_grid_log( np.float32, # dtype np.float64, # scipy_dtype GridSpec(min=-50, max=self.CUTOFF_FLOAT32_UPPER + 5, shape=[100]), ErrorSpec(rtol=0.05, atol=0)) @test_util.run_deprecated_v1 def test_float32_extreme_values_result_and_gradient_finite_and_nonzero(self): with self.cached_session() as sess: # On the lower branch, log_cdf_laplace(x) = x, so we know this will be # fine, but test to -200 anyways. grid = _make_grid( np.float32, GridSpec(min=-200, max=80, shape=[20, 100])) grid = ops.convert_to_tensor(grid) actual = sm.log_cdf_laplace(grid) grad = gradients_impl.gradients(actual, grid)[0] actual_, grad_ = self.evaluate([actual, grad]) # isfinite checks for NaN and Inf. self.assertAllTrue(np.isfinite(actual_)) self.assertAllTrue(np.isfinite(grad_)) self.assertFalse(np.any(actual_ == 0)) self.assertFalse(np.any(grad_ == 0)) @test_util.run_deprecated_v1 def test_float64_extreme_values_result_and_gradient_finite_and_nonzero(self): with self.cached_session() as sess: # On the lower branch, log_cdf_laplace(x) = x, so we know this will be # fine, but test to -200 anyways. grid = _make_grid( np.float64, GridSpec(min=-200, max=700, shape=[20, 100])) grid = ops.convert_to_tensor(grid) actual = sm.log_cdf_laplace(grid) grad = gradients_impl.gradients(actual, grid)[0] actual_, grad_ = self.evaluate([actual, grad]) # isfinite checks for NaN and Inf. self.assertAllTrue(np.isfinite(actual_)) self.assertAllTrue(np.isfinite(grad_)) self.assertFalse(np.any(actual_ == 0)) self.assertFalse(np.any(grad_ == 0)) if __name__ == "__main__": test.main()
LogCDFLaplaceTest
python
pytorch__pytorch
torch/_inductor/autoheuristic/autoheuristic_utils.py
{ "start": 2899, "end": 11308 }
class ____: def __init__( self, shared_memory: Any, device_capa: tuple[int, int], choices: list[Choice], name: str, ) -> None: # use amount of shared_memory and device_capability to identify GPU # TODO(AlnisM): there might be a better way to do this self.shared_memory = shared_memory self.device_capa = device_capa self.choices = choices self.name = name def to_dict(self) -> dict[str, Value]: return { "shared_memory": self.shared_memory, "device_capa": self.device_capa, "name": self.name, } def get_metadata_str_from_log(log_path: str) -> str: with open(log_path, newline="") as file: json_string = file.readline().strip() return json_string def check_minsize(context: AHContext, minsize: int) -> bool: return ( context.get_value("m") >= minsize and context.get_value("k") >= minsize and context.get_value("n") >= minsize ) def pad_mm_precondition(metadata: AHMetadata, context: AHContext) -> bool: if metadata.shared_memory == 166912 and metadata.device_capa == (8, 0): # A100 precondition return check_minsize(context, 512) elif metadata.shared_memory == 232448 and metadata.device_capa == (9, 0): # H100 precondition return check_minsize(context, 768) return True def get_mixedmm_precondition(metadata: AHMetadata, context: AHContext) -> bool: m = context.get_value("m") k = context.get_value("k") n = context.get_value("n") if m > 128 or k < 1024 or n < 1024: return False mat1_iscontig = context.get_value("mat1_iscontig") mat2_iscontig = context.get_value("mat2_iscontig") return mat1_iscontig and not mat2_iscontig def get_mult_dims_ops() -> list[AHOperation]: m_times_k_op = AHOperation("m*k", lambda data: data["m"] * data["k"]) m_times_n_op = AHOperation("m*n", lambda data: data["m"] * data["n"]) k_times_n_op = AHOperation("k*n", lambda data: data["k"] * data["n"]) return [m_times_k_op, m_times_n_op, k_times_n_op] def get_arith_intensity(data: Any) -> float: m = data["m"] k = data["k"] n = data["n"] if m == 0 or k == 0 or n == 0: return 0.0 return m * k * n / (m * k + k * n + m * n) def pad_mm_operations() -> list[AHOperation]: mult_dims_ops = get_mult_dims_ops() k_div_m_times_n_op = AHOperation( "k/(m*n)", lambda data: data["k"] / (data["m"] * data["n"]) ) def bfloat_perf_hit(data: Any) -> bool: m = data["m"] k = data["k"] n = data["n"] is_bfloat = str(data["mat1_dtype"]) == "torch.bfloat16" return k > (m * 1024) and k > (n * 1024) and is_bfloat bfloat_perf_hit_op = AHOperation( "bfloat_perf_hit", bfloat_perf_hit, is_categorical=True ) arith_intensity_op = AHOperation("arith_intensity", get_arith_intensity) dims_need_padding_ops = get_dims_need_padding_ops() dims_multiple_ops = get_dims_multiple_ops() is_contig_ops = get_is_contig_ops() ah_operations = mult_dims_ops + [ k_div_m_times_n_op, bfloat_perf_hit_op, arith_intensity_op, ] ah_operations.extend(dims_need_padding_ops) ah_operations.extend(dims_multiple_ops) ah_operations.extend(is_contig_ops) return ah_operations def between_op(data: Any, dim: str, lower: int, upper: int) -> bool: return data[dim] >= lower and data[dim] <= upper def between_ops() -> list[AHOperation]: dims = ["m", "k", "n"] limits = [(1, 16), (17, 32), (33, 64), (65, 128), (129, 256)] ah_operations = [] for dim in dims: for lower, upper in limits: between_op_fn = functools.partial( between_op, dim=dim, lower=lower, upper=upper ) # using 'LEQ' instead of '<=' because '<=' cannot be exported to dot between_op_name = f"{lower}LEQ{dim}LEQ{upper}" ah_operations.append( AHOperation(between_op_name, between_op_fn, is_categorical=True) ) return ah_operations def pow2_op(data: Any, dim: str, exponent: int) -> bool: return data[dim] == 2**exponent def mm_operations() -> list[AHOperation]: mult_dims_ops = get_mult_dims_ops() arith_intensity_op = AHOperation("arith_intensity", get_arith_intensity) return mult_dims_ops + [arith_intensity_op] def mixed_mm_operations() -> list[AHOperation]: return mm_operations() + between_ops() def is_multiple(data: Any, dim: str, mult: int) -> bool: return data[dim] % mult == 0 def get_dims_multiple_ops() -> list[AHOperation]: multiples = [2, 4, 8, 16, 32] dims = ["m", "k", "n"] dims_multiple_ops = [] for dim in dims: for mult in multiples: is_multiple_fn = functools.partial(is_multiple, dim=dim, mult=mult) dims_multiple_op = AHOperation( f"{dim}_multiple_{mult}", is_multiple_fn, is_categorical=True ) dims_multiple_ops.append(dims_multiple_op) return dims_multiple_ops def get_dims_need_padding_ops() -> list[AHOperation]: def mat1_innermost_needs_padding_fn(data: Any) -> bool: mat1_stride_0 = data["mat1_stride_0"] mat1_stride_1 = data["mat1_stride_1"] m_padded_length = data["m_padded_length"] k_padded_length = data["k_padded_length"] mat1_innermost_needs_padding = False if mat1_stride_0 == 1 and m_padded_length != 0: mat1_innermost_needs_padding = True if mat1_stride_1 == 1 and k_padded_length != 0: mat1_innermost_needs_padding = True return mat1_innermost_needs_padding mat1_innermost_op = AHOperation( "mat1_innermost_needs_padding", mat1_innermost_needs_padding_fn, is_categorical=True, ) def mat2_innermost_needs_padding_fn(data: Any) -> bool: mat2_stride_0 = data["mat2_stride_0"] mat2_stride_1 = data["mat2_stride_1"] k_padded_length = data["k_padded_length"] n_padded_length = data["n_padded_length"] mat2_innermost_needs_padding = False if mat2_stride_0 == 1 and k_padded_length != 0: mat2_innermost_needs_padding = True if mat2_stride_1 == 1 and n_padded_length != 0: mat2_innermost_needs_padding = True return mat2_innermost_needs_padding mat2_innermost_op = AHOperation( "mat2_innermost_needs_padding", mat2_innermost_needs_padding_fn, is_categorical=True, ) def num_dims_needs_padding_fn(data: Any) -> int: m_padded_length = data["m_padded_length"] k_padded_length = data["k_padded_length"] n_padded_length = data["n_padded_length"] num_dims_needs_padding = 0 if m_padded_length != 0: num_dims_needs_padding += 1 if k_padded_length != 0: num_dims_needs_padding += 1 if n_padded_length != 0: num_dims_needs_padding += 1 return num_dims_needs_padding num_dims_op = AHOperation("num_dims_needs_padding", num_dims_needs_padding_fn) return [mat1_innermost_op, mat2_innermost_op, num_dims_op] def get_is_contig_ops() -> list[AHOperation]: def mat1_is_contig_fn(data: Any) -> bool: stride_0 = data["mat1_stride_0"] stride_1 = data["mat1_stride_1"] k = data["k"] return stride_0 == k and stride_1 == 1 mat1_is_contig_op = AHOperation( "mat1_iscontig", mat1_is_contig_fn, is_categorical=True ) def mat2_is_contig_fn(data: Any) -> bool: stride_0 = data["mat2_stride_0"] stride_1 = data["mat2_stride_1"] n = data["n"] return stride_0 == n and stride_1 == 1 mat2_is_contig_op = AHOperation( "mat2_iscontig", mat2_is_contig_fn, is_categorical=True ) return [mat1_is_contig_op, mat2_is_contig_op] def context_add_strides(context: AHContext, name: str, stride: tuple[int, ...]) -> None: for i, s in enumerate(stride): context.add_feature(f"{name}_stride_{i}", s) def context_add_using_tf32(context: AHContext, dtype: torch.dtype) -> None: using_tf32 = "not_float_32" if dtype == torch.float32: using_tf32 = torch.backends.cuda.matmul.allow_tf32 context.add_feature("using_tf32", using_tf32, is_categorical=True)
AHMetadata
python
sqlalchemy__sqlalchemy
test/orm/test_relationships.py
{ "start": 10982, "end": 13444 }
class ____(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table( "a", metadata, Column("id", Integer, primary_key=True), Column("bid", ForeignKey("b.id")), ) Table("b", metadata, Column("id", Integer, primary_key=True)) def _fixture(self, uselist=False): a, b = self.tables.a, self.tables.b class A(BasicEntity): pass class B(BasicEntity): pass self.mapper_registry.map_imperatively( A, a, properties={"b": relationship(B, uselist=uselist)} ) self.mapper_registry.map_imperatively(B, b) return A, B def test_joinedload_doesnt_produce_bogus_event(self): A, B = self._fixture() sess = fixture_session() b1 = B() sess.add(b1) sess.flush() a1 = A() sess.add(a1) sess.commit() # test that was broken by #3060 a1 = sess.query(A).options(joinedload(A.b)).first() a1.bid = b1.id sess.flush() eq_(a1.bid, b1.id) def test_init_doesnt_produce_scalar_event(self): A, B = self._fixture() sess = fixture_session() b1 = B() sess.add(b1) sess.flush() a1 = A() assert a1.b is None a1.bid = b1.id sess.add(a1) sess.flush() assert a1.bid is not None def test_init_doesnt_produce_collection_event(self): A, B = self._fixture(uselist=True) sess = fixture_session() b1 = B() sess.add(b1) sess.flush() a1 = A() assert a1.b == [] a1.bid = b1.id sess.add(a1) sess.flush() assert a1.bid is not None def test_scalar_relationship_overrides_fk(self): A, B = self._fixture() sess = fixture_session() b1 = B() sess.add(b1) sess.flush() a1 = A() a1.bid = b1.id a1.b = None sess.add(a1) sess.flush() assert a1.bid is None def test_collection_relationship_overrides_fk(self): A, B = self._fixture(uselist=True) sess = fixture_session() b1 = B() sess.add(b1) sess.flush() a1 = A() a1.bid = b1.id a1.b = [] sess.add(a1) sess.flush() # this is weird assert a1.bid is not None
M2ODontOverwriteFKTest
python
tensorflow__tensorflow
third_party/xla/xla/codegen/testlib/kernel_runner_test.py
{ "start": 1728, "end": 1955 }
class ____(absltest.TestCase): def test_output_same_as_input(self): array = np.array([1, 2, 3, 4], dtype=np.int32) got = create_literal(array) np.testing.assert_array_equal(np.asarray(got), array)
LiteralFromNpTest
python
mlflow__mlflow
mlflow/models/evaluation/artifacts.py
{ "start": 3568, "end": 6763 }
class ____(NamedTuple): from_path: bool type: type[EvaluationArtifact] ext: str def _infer_artifact_type_and_ext(artifact_name, raw_artifact, custom_metric_tuple): """ This function performs type and file extension inference on the provided artifact Args: artifact_name: The name of the provided artifact raw_artifact: The artifact object custom_metric_tuple: Containing a user provided function and its index in the ``custom_metrics`` parameter of ``mlflow.evaluate`` Returns: InferredArtifactProperties namedtuple """ exception_header = ( f"Custom metric function '{custom_metric_tuple.name}' at index " f"{custom_metric_tuple.index} in the `custom_metrics` parameter produced an " f"artifact '{artifact_name}'" ) # Given a string, first see if it is a path. Otherwise, check if it is a JsonEvaluationArtifact if isinstance(raw_artifact, str): potential_path = pathlib.Path(raw_artifact) if potential_path.exists(): raw_artifact = potential_path else: try: json.loads(raw_artifact) return _InferredArtifactProperties( from_path=False, type=JsonEvaluationArtifact, ext=".json" ) except JSONDecodeError: raise MlflowException( f"{exception_header} with string representation '{raw_artifact}' that is " f"neither a valid path to a file nor a JSON string." ) # Type inference based on the file extension if isinstance(raw_artifact, pathlib.Path): if not raw_artifact.exists(): raise MlflowException(f"{exception_header} with path '{raw_artifact}' does not exist.") if not raw_artifact.is_file(): raise MlflowException(f"{exception_header} with path '{raw_artifact}' is not a file.") if raw_artifact.suffix not in _EXT_TO_ARTIFACT_MAP: raise MlflowException( f"{exception_header} with path '{raw_artifact}' does not match any of the supported" f" file extensions: {', '.join(_EXT_TO_ARTIFACT_MAP.keys())}." ) return _InferredArtifactProperties( from_path=True, type=_EXT_TO_ARTIFACT_MAP[raw_artifact.suffix], ext=raw_artifact.suffix ) # Type inference based on object type if type(raw_artifact) in _TYPE_TO_ARTIFACT_MAP: return _InferredArtifactProperties( from_path=False, type=_TYPE_TO_ARTIFACT_MAP[type(raw_artifact)], ext=_TYPE_TO_EXT_MAP[type(raw_artifact)], ) # Given as other python object, we first attempt to infer as JsonEvaluationArtifact. If that # fails, we store it as PickleEvaluationArtifact try: json.dumps(raw_artifact, cls=NumpyEncoder) return _InferredArtifactProperties( from_path=False, type=JsonEvaluationArtifact, ext=".json" ) except TypeError: return _InferredArtifactProperties( from_path=False, type=PickleEvaluationArtifact, ext=".pickle" )
_InferredArtifactProperties
python
google__pytype
pytype/tools/environment_test.py
{ "start": 200, "end": 2092 }
class ____(unittest.TestCase): """Tests for environment.compute_pythonpath.""" def test_script_path(self): with test_utils.Tempdir() as d: f = d.create_file('foo.py') self.assertSequenceEqual(environment.compute_pythonpath([f]), [d.path]) def test_module_path(self): with test_utils.Tempdir() as d: d.create_file('__init__.py') f = d.create_file('foo.py') self.assertSequenceEqual( environment.compute_pythonpath([f]), [path_utils.dirname(d.path)] ) def test_subpackage(self): with test_utils.Tempdir() as d: d.create_file('__init__.py') d.create_file(file_utils.replace_separator('d/__init__.py')) f = d.create_file(file_utils.replace_separator('d/foo.py')) self.assertSequenceEqual( environment.compute_pythonpath([f]), [path_utils.dirname(d.path)] ) def test_multiple_paths(self): with test_utils.Tempdir() as d: f1 = d.create_file(file_utils.replace_separator('d1/foo.py')) f2 = d.create_file(file_utils.replace_separator('d2/foo.py')) self.assertSequenceEqual( environment.compute_pythonpath([f1, f2]), [path_utils.join(d.path, 'd2'), path_utils.join(d.path, 'd1')], ) def test_sort(self): with test_utils.Tempdir() as d: f1 = d.create_file(file_utils.replace_separator('d1/foo.py')) f2 = d.create_file(file_utils.replace_separator('d1/d2/foo.py')) f3 = d.create_file(file_utils.replace_separator('d1/d2/d3/foo.py')) path = [ path_utils.join(d.path, 'd1', 'd2', 'd3'), path_utils.join(d.path, 'd1', 'd2'), path_utils.join(d.path, 'd1'), ] self.assertSequenceEqual( environment.compute_pythonpath([f1, f2, f3]), path ) self.assertSequenceEqual( environment.compute_pythonpath([f3, f2, f1]), path )
TestComputePythonPath
python
langchain-ai__langchain
libs/core/langchain_core/messages/content.py
{ "start": 9712, "end": 11061 }
class ____(TypedDict): """A chunk of a tool call (yielded when streaming). When merging `ToolCallChunks` (e.g., via `AIMessageChunk.__add__`), all string attributes are concatenated. Chunks are only merged if their values of `index` are equal and not `None`. Example: ```python left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] right_chunks = [ToolCallChunk(name=None, args="1}", index=0)] ( AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)] ``` """ # TODO: Consider making fields NotRequired[str] in the future. type: Literal["tool_call_chunk"] """Used for serialization.""" id: str | None """An identifier associated with the tool call. An identifier is needed to associate a tool call request with a tool call result in events when multiple concurrent tool calls are made. """ name: str | None """The name of the tool to be called.""" args: str | None """The arguments to the tool call.""" index: NotRequired[int | str] """The index of the tool call in a sequence.""" extras: NotRequired[dict[str, Any]] """Provider-specific metadata."""
ToolCallChunk
python
kevin1024__vcrpy
vcr/stubs/boto3_stubs.py
{ "start": 230, "end": 332 }
class ____(VCRHTTPConnection, HTTPConnection): _baseclass = HTTPConnection
VCRRequestsHTTPConnection
python
anthropics__anthropic-sdk-python
src/anthropic/types/beta/beta_tool_reference_block_param.py
{ "start": 335, "end": 598 }
class ____(TypedDict, total=False): tool_name: Required[str] type: Required[Literal["tool_reference"]] cache_control: Optional[BetaCacheControlEphemeralParam] """Create a cache control breakpoint at this content block."""
BetaToolReferenceBlockParam
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance21.py
{ "start": 871, "end": 1337 }
class ____: pass def guard3(t: type[Any]) -> TypeIs[type[A]]: return True def func3(t: type[B]): if guard3(t): reveal_type(t, expected_text="type[<subclass of B and A>]") else: reveal_type(t, expected_text="type[B]") def guard4(t: Any) -> TypeIs[type[A]]: return True def func4(t: B): if guard4(t): reveal_type(t, expected_text="<subclass of B and type[A]>") else: reveal_type(t, expected_text="B")
B
python
huggingface__transformers
src/transformers/models/glm4v_moe/modeling_glm4v_moe.py
{ "start": 40459, "end": 41509 }
class ____(GradientCheckpointingLayer): def __init__(self, config) -> None: super().__init__() self.norm1 = Glm4vMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.norm2 = Glm4vMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.attn = Glm4vMoeVisionAttention(config) self.mlp = Glm4vMoeisionMlp(config, bias=False) def forward( self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: Optional[torch.Tensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, **kwargs, ) -> torch.Tensor: hidden_states = hidden_states + self.attn( self.norm1(hidden_states), cu_seqlens=cu_seqlens, rotary_pos_emb=rotary_pos_emb, position_embeddings=position_embeddings, **kwargs, ) hidden_states = hidden_states + self.mlp(self.norm2(hidden_states)) return hidden_states @auto_docstring
Glm4vMoeVisionBlock
python
django__django
tests/aggregation_regress/models.py
{ "start": 2602, "end": 2796 }
class ____(models.Model): name = models.CharField(max_length=50) parent = models.ForeignKey( "self", models.SET_NULL, null=True, blank=True, related_name="children" )
SelfRefFK
python
pytest-dev__pytest
testing/test_collection.py
{ "start": 10902, "end": 13197 }
class ____: def test_custom_repr_failure(self, pytester: Pytester) -> None: p = pytester.makepyfile( """ import not_exists """ ) pytester.makeconftest( """ import pytest def pytest_collect_file(file_path, parent): return MyFile.from_parent(path=file_path, parent=parent) class MyError(Exception): pass class MyFile(pytest.File): def collect(self): raise MyError() def repr_failure(self, excinfo): if isinstance(excinfo.value, MyError): return "hello world" return pytest.File.repr_failure(self, excinfo) """ ) result = pytester.runpytest(p) result.stdout.fnmatch_lines(["*ERROR collecting*", "*hello world*"]) @pytest.mark.xfail(reason="other mechanism for adding to reporting needed") def test_collect_report_postprocessing(self, pytester: Pytester) -> None: p = pytester.makepyfile( """ import not_exists """ ) pytester.makeconftest( """ import pytest @pytest.hookimpl(wrapper=True) def pytest_make_collect_report(): rep = yield rep.headerlines += ["header1"] return rep """ ) result = pytester.runpytest(p) result.stdout.fnmatch_lines(["*ERROR collecting*", "*header1*"]) def test_collection_error_traceback_is_clean(self, pytester: Pytester) -> None: """When a collection error occurs, the report traceback doesn't contain internal pytest stack entries. Issue #11710. """ pytester.makepyfile( """ raise Exception("LOUSY") """ ) result = pytester.runpytest() result.stdout.fnmatch_lines( [ "*ERROR collecting*", "test_*.py:1: in <module>", ' raise Exception("LOUSY")', "E Exception: LOUSY", "*= short test summary info =*", ], consecutive=True, )
TestPrunetraceback
python
django__django
django/contrib/postgres/fields/ranges.py
{ "start": 10291, "end": 10507 }
class ____(models.Transform): lookup_name = "startswith" function = "lower" @property def output_field(self): return self.lhs.output_field.base_field @RangeField.register_lookup
RangeStartsWith
python
streamlit__streamlit
e2e_playwright/conftest.py
{ "start": 21387, "end": 41123 }
class ____(Protocol): def __call__( self, element: ElementHandle | Locator | Page, *, image_threshold: float = 0.002, pixel_threshold: float = 0.05, name: str | None = None, fail_fast: bool = False, style: str | None = None, ) -> None: """Compare a screenshot with screenshot from a past run. Parameters ---------- element : ElementHandle or Locator The element to take a screenshot of. image_threshold : float, optional The allowed percentage of different pixels in the image. pixel_threshold : float, optional The allowed percentage of difference for a single pixel. name : str | None, optional The name of the screenshot without an extension. If not provided, the name of the test function will be used. fail_fast : bool, optional If True, the comparison will stop at the first pixel mismatch. """ @pytest.fixture(scope="session", autouse=True) def delete_output_dir(pytestconfig: Any) -> None: # Overwriting the default delete_output_dir fixture from pytest-playwright: # There seems to be a bug with the combination of pytest-playwright, xdist, # and pytest-rerunfailures where the output dir is deleted when it shouldn't be. # To prevent this issue, we are not deleting the output dir when running with # reruns and xdist. uses_xdist = ( pytestconfig.getoption("workerinput", None) or os.getenv("PYTEST_XDIST_WORKER"), ) uses_reruns = pytestconfig.getoption("reruns", None) if not (uses_xdist and uses_reruns): # Delete the output folder. Uses the same logic as the default # delete_output_dir fixture from pytest-playwright: # https://github.com/microsoft/playwright-pytest/blob/fb51327390ccbd3561c1777499934eb88296f1bf/pytest-playwright/pytest_playwright/pytest_playwright.py#L68 output_dir = pytestconfig.getoption("--output") if os.path.exists(output_dir): try: shutil.rmtree(output_dir) except FileNotFoundError: # When running in parallel, another thread may have already deleted the # files pass except OSError as error: if error.errno != 16: raise # We failed to remove folder, might be due to the whole folder being # mounted inside a container: # https://github.com/microsoft/playwright/issues/12106 # https://github.com/microsoft/playwright-python/issues/1781 # Do a best-effort to remove all files inside of it instead. entries = os.listdir(output_dir) for entry in entries: shutil.rmtree(entry) @pytest.fixture(scope="session") def output_folder(pytestconfig: Any) -> Path: """Fixture returning the directory that is used for all test failures information. This includes: - snapshot-tests-failures: This directory contains all the snapshots that did not match with the snapshots from past runs. The folder structure is based on the folder structure used in the main snapshots folder. - snapshot-updates: This directory contains all the snapshots that got updated in the current run based on folder structure used in the main snapshots folder. """ return Path( get_git_root() / "e2e_playwright" / pytestconfig.getoption("--output") ).resolve() @pytest.fixture def assert_snapshot( request: pytest.FixtureRequest, output_folder: Path, pytestconfig: Any, ) -> Generator[ImageCompareFunction, None, None]: """Fixture that compares a screenshot with screenshot from a past run.""" # Check if reruns are enabled for this test run flaky_marker = request.node.get_closest_marker("flaky") if flaky_marker and "reruns" in flaky_marker.kwargs: configured_reruns = flaky_marker.kwargs["reruns"] else: configured_reruns = pytestconfig.getoption("reruns", 0) # Get the current execution count: execution_count = getattr(request.node, "execution_count", 1) # True if this is the last rerun (or the only test run) is_last_rerun = execution_count - 1 == configured_reruns root_path = get_git_root() platform = str(sys.platform) module_name = request.module.__name__.split(".")[-1] test_function_name = request.node.originalname snapshot_dir: Path = ( root_path / "e2e_playwright" / "__snapshots__" / platform / module_name ) module_snapshot_failures_dir: Path = ( output_folder / "snapshot-tests-failures" / platform / module_name ) module_snapshot_updates_dir: Path = ( output_folder / "snapshot-updates" / platform / module_name ) snapshot_file_suffix = "" # Extract the parameter ids if they exist match = re.search(r"\[(.*?)\]", request.node.name) if match: snapshot_file_suffix = f"[{match.group(1)}]" snapshot_default_file_name: str = test_function_name + snapshot_file_suffix test_failure_messages: list[str] = [] def compare( element: ElementHandle | Locator | Page, *, image_threshold: float = 0.002, pixel_threshold: float = 0.05, name: str | None = None, fail_fast: bool = False, file_type: Literal["png", "jpg"] = "png", style: str | None = None, show_app_header: bool | None = None, ) -> None: """Compare a screenshot with screenshot from a past run. Parameters ---------- element : ElementHandle or Locator The element to take a screenshot of. image_threshold : float, optional The allowed percentage of different pixels in the image. pixel_threshold : float, optional The allowed percentage of difference for a single pixel to be considered different. name : str | None, optional The name of the screenshot without an extension. If not provided, the name of the test function will be used. fail_fast : bool, optional If True, the comparison will stop at the first pixel mismatch. file_type: "png" or "jpg" The file type of the screenshot. Defaults to "png". show_app_header : bool or None Whether to make the app header background transparent before taking the screenshot. If None (default), the app header will be shown based on the element type (page will always show the app header, other elements will hide it). """ nonlocal test_failure_messages nonlocal snapshot_default_file_name nonlocal module_snapshot_updates_dir nonlocal module_snapshot_failures_dir nonlocal snapshot_file_suffix if show_app_header is False or ( show_app_header is None and not isinstance(element, Page) ): # Make the app header background transparent: if style is None: style = "" style += " .stAppHeader { background: transparent; }" if file_type == "jpg": file_extension = ".jpg" img_bytes = element.screenshot( type="jpeg", quality=90, animations="disabled", style=style ) else: file_extension = ".png" img_bytes = element.screenshot( type="png", animations="disabled", style=style ) snapshot_file_name: str = snapshot_default_file_name if name: snapshot_file_name = name + snapshot_file_suffix snapshot_file_path: Path = ( snapshot_dir / f"{snapshot_file_name}{file_extension}" ) snapshot_updates_file_path: Path = ( module_snapshot_updates_dir / f"{snapshot_file_name}{file_extension}" ) snapshot_file_path.parent.mkdir(parents=True, exist_ok=True) test_failures_dir = module_snapshot_failures_dir / snapshot_file_name if test_failures_dir.exists(): # Remove the past runs failure dir for this specific screenshot shutil.rmtree(test_failures_dir) if not snapshot_file_path.exists(): snapshot_file_path.write_bytes(img_bytes) # Update this in updates folder: snapshot_updates_file_path.parent.mkdir(parents=True, exist_ok=True) snapshot_updates_file_path.write_bytes(img_bytes) # For missing snapshots, we don't want to directly fail in order to generate # all missing snapshots in one run. test_failure_messages.append(f"Missing snapshot for {snapshot_file_name}") return from pixelmatch.contrib.PIL import pixelmatch # Compare the new screenshot with the screenshot from past runs: img_a = Image.open(BytesIO(img_bytes)) img_b = Image.open(snapshot_file_path) img_diff = Image.new("RGBA", img_a.size) error_msg: str = "Unknown error" try: mismatch = pixelmatch( img_a, img_b, img_diff, threshold=pixel_threshold, fail_fast=fail_fast, alpha=0, ) total_pixels = img_a.size[0] * img_a.size[1] max_diff_pixels = int(image_threshold * total_pixels) if mismatch < max_diff_pixels: return error_msg = ( f"Snapshot mismatch for {snapshot_file_name} ({mismatch} pixels difference;" f" {mismatch / total_pixels * 100:.2f}%)" ) # Create new failures folder for this test: test_failures_dir.mkdir(parents=True, exist_ok=True) img_diff.save( f"{test_failures_dir}/diff_{snapshot_file_name}{file_extension}" ) img_a.save( f"{test_failures_dir}/actual_{snapshot_file_name}{file_extension}" ) img_b.save( f"{test_failures_dir}/expected_{snapshot_file_name}{file_extension}" ) except ValueError as ex: # Create new failures folder for this test: test_failures_dir.mkdir(parents=True, exist_ok=True) img_a.save( f"{test_failures_dir}/actual_{snapshot_file_name}{file_extension}" ) img_b.save( f"{test_failures_dir}/expected_{snapshot_file_name}{file_extension}" ) # ValueError is thrown when the images have different sizes # Calculate the relative difference in total pixels expected_pixels = img_b.size[0] * img_b.size[1] actual_pixels = img_a.size[0] * img_a.size[1] pixel_diff = abs(expected_pixels - actual_pixels) error_msg = ( f"Snapshot mismatch for {snapshot_file_name}. " f"Wrong size: expected={img_b.size}, actual={img_a.size} " f"({pixel_diff} pixels difference; " f"{pixel_diff / expected_pixels * 100:.2f}%). " f"Error: {ex}" ) if is_last_rerun: # If its the last rerun (or the only test run), update snapshots # and fail after all the other snapshots have been updated in the given # test. snapshot_updates_file_path.parent.mkdir(parents=True, exist_ok=True) snapshot_updates_file_path.write_bytes(img_bytes) # Add error to the list of test failures: test_failure_messages.append(error_msg) else: # If there are other test reruns that will follow, fail immediately # and avoid updating the snapshot. Failing here will correctly show a # test error in the Github UI, which enables our flaky test tracking # tool to work correctly. pytest.fail(error_msg) yield compare if test_failure_messages: pytest.fail( "Missing or mismatched snapshots: \n" + "\n".join(test_failure_messages) ) @pytest.fixture(autouse=True) def playwright_profiling( request: pytest.FixtureRequest, page: Page ) -> Generator[None, None, None]: if request.node.get_closest_marker("no_perf") or not is_supported_browser(page): yield return with measure_performance(page, test_name=request.node.name): yield # endregion # region Public utility methods def wait_for_app_run( page_or_locator: Page | Locator | FrameLocator, wait_delay: int = 100, initial_wait: int = 210, ) -> None: """Wait for the given page to finish running. Parameters ---------- page_or_locator : Page | Locator | FrameLocator The page or locator to wait for. wait_delay : int, optional The delay to wait for the rerun to finish. initial_wait : int, optional The initial wait before checking for the rerun to finish. This is needed for some widgets that have a debounce timeout. For example, pydeck charts have a debounce timeout of 200ms. """ page: Page if isinstance(page_or_locator, Locator): page = page_or_locator.page elif isinstance(page_or_locator, FrameLocator): page = page_or_locator.owner.page else: page = page_or_locator page.wait_for_timeout(initial_wait) if isinstance(page_or_locator, StaticPage): # Check that static connection established. page_or_locator.locator( "[data-testid='stApp'][data-test-connection-state='STATIC_CONNECTED']" ).wait_for( timeout=25000, state="attached", ) else: # Make sure that the websocket connection is established. page_or_locator.locator( "[data-testid='stApp'][data-test-connection-state='CONNECTED']" ).wait_for( timeout=25000, state="attached", ) # Wait until we know the script has started. We determine this by checking # whether the app is in notRunning state. (The data-test-connection-state attribute # goes through the sequence "initial" -> "running" -> "notRunning"). page_or_locator.locator( "[data-testid='stApp'][data-test-script-state='notRunning']" ).wait_for( timeout=25000, state="attached", ) # Wait for all element skeletons to be removed. # This is useful to make sure that all elements have been rendered. expect(page_or_locator.get_by_test_id("stSkeleton")).to_have_count(0, timeout=25000) if wait_delay > 0: # Give the app a little more time to render everything page.wait_for_timeout(wait_delay) def wait_for_app_loaded(page: Page) -> None: """Wait for the app to fully load.""" # Wait for the app view container to appear: page.wait_for_selector( "[data-testid='stAppViewContainer']", timeout=30000, state="attached" ) wait_for_app_run(page) def rerun_app(page: Page) -> None: """Triggers an app rerun and waits for the run to be finished.""" # Click somewhere to clear the focus from elements: page.get_by_test_id("stApp").click(position={"x": 0, "y": 0}) # Press "r" to rerun the app: page.keyboard.press("r") wait_for_app_run(page) def wait_until( page: Page, fn: Callable[[], None | bool], timeout: int = 5000, interval: int = 100 ) -> None: """Run a test function in a loop until it evaluates to True or times out. For example: >>> wait_until(lambda: x.values() == ["x"], page) Parameters ---------- page : playwright.sync_api.Page Playwright page fn : Callable Callback timeout : int, optional Total timeout in milliseconds, by default 5000 interval : int, optional Waiting interval, by default 100 Adapted from panel. """ # Hide this function traceback from the pytest output if the test fails __tracebackhide__ = True start = time.time() def timed_out() -> bool: elapsed = time.time() - start elapsed_ms = elapsed * 1000 return elapsed_ms > timeout timeout_msg = f"wait_until timed out in {timeout} milliseconds" while True: try: result = fn() except AssertionError as e: if timed_out(): raise TimeoutError(timeout_msg) from e else: if result not in (None, True, False): raise ValueError( "`wait_until` callback must return None, True or " f"False, returned {result!r}" ) # Stop is result is True or None # None is returned when the function has an assert if result is None or result: return if timed_out(): raise TimeoutError(timeout_msg) page.wait_for_timeout(interval) def start_app_server( app_port: int, request_module: ModuleType, *, extra_env: dict[str, str] | None = None, extra_args: list[str] | None = None, ) -> AsyncSubprocess: """Start a Streamlit app server for the given *test module*. This helper centralizes the logic for spinning up a Streamlit subprocess so it can be reused by different pytest fixtures (for example, tests that require per-test environment variables). Parameters ---------- app_port : int Port on which the server should listen. request_module : ModuleType The pytest *module object* that triggered the server start. This is needed to resolve the Streamlit script that belongs to the test. extra_env : dict[str, str] | None, optional Additional environment variables to set for the subprocess. extra_args : list[str] | None, optional Additional command-line arguments to pass to *streamlit run*. Returns ------- AsyncSubprocess The running Streamlit subprocess wrapper. *Call ``terminate()`` on the returned object to stop the server and obtain the captured output.* """ env = {**os.environ.copy(), **(extra_env or {})} args = [ "streamlit", "run", resolve_test_to_script(request_module), "--server.headless", "true", "--global.developmentMode", "false", "--global.e2eTest", "true", "--server.port", str(app_port), "--browser.gatherUsageStats", "false", "--server.fileWatcherType", "none", "--server.enableStaticServing", "true", ] app_server_start_retries = 3 app_server_start_retry_delay_seconds = 20 # Append any caller-supplied extra args at the end so they can override # defaults when necessary. if extra_args: args.extend(extra_args) for i in range(app_server_start_retries): proc = AsyncSubprocess(args, cwd=".", env=env) proc.start() if wait_for_app_server_to_start(app_port): return proc stdout = proc.terminate() print(stdout, flush=True) if i < app_server_start_retries - 1: print( f"Retrying to start app server in {app_server_start_retry_delay_seconds} seconds... " f"(Attempt {i + 1}/{app_server_start_retries})", flush=True, ) time.sleep(app_server_start_retry_delay_seconds) raise RuntimeError("Unable to start Streamlit app") # endregion
ImageCompareFunction
python
ray-project__ray
python/ray/experimental/channel/shared_memory_channel.py
{ "start": 5180, "end": 20624 }
class ____(ChannelInterface): """ A wrapper type for ray.ObjectRef. Currently supports ray.get but not ray.wait. """ def __init__( self, writer: Optional[ray.actor.ActorHandle], reader_and_node_list: List[Tuple["ray.actor.ActorHandle", str]], typ: Optional[Union[int, SharedMemoryType]] = None, _writer_node_id: Optional["ray.NodeID"] = None, _writer_ref: Optional["ray.ObjectRef"] = None, _node_id_to_reader_ref_info: Optional[Dict[str, ReaderRefInfo]] = None, _writer_registered: bool = False, _reader_registered: bool = False, ): """ Create a channel that can be read and written by co-located Ray processes. Anyone may write to or read from the channel. The channel has no buffer, so the writer will block until reader(s) have read the previous value. Args: writer: The actor that may write to the channel. None signifies the driver. reader_and_node_list: A list of tuples, where each tuple contains a reader actor handle and the node ID where the actor is located. typ: Type information about the values passed through the channel. Either an integer representing the max buffer size in bytes allowed, or a SharedMemoryType. Returns: Channel: A wrapper around ray.ObjectRef. """ assert len(reader_and_node_list) > 0 for reader, _ in reader_and_node_list: assert isinstance(reader, ray.actor.ActorHandle) if typ is None: typ = SharedMemoryType() elif isinstance(typ, int): typ = SharedMemoryType(buffer_size_bytes=typ) # The min buffer size must be large enough to at least fit an instance of the # _ResizeChannel class along with any metadata. MIN_BUFFER_SIZE = int(1000) # 1000 bytes if typ.buffer_size_bytes < MIN_BUFFER_SIZE: raise ValueError( "typ.buffer_size_bytes must be at least MIN_BUFFER_SIZE " f"({MIN_BUFFER_SIZE} bytes)" ) self._writer = writer self._reader_and_node_list = reader_and_node_list self._typ = typ self._worker = ray._private.worker.global_worker self._worker.check_connected() self._writer_registered = _writer_registered self._reader_registered = _reader_registered # NodeID -> ReaderRefInfo on that node. Note that there's only 1 # reader ref per node. self._node_id_to_reader_ref_info: Dict[str, ReaderRefInfo] = ( _node_id_to_reader_ref_info or {} ) # Node ID -> a list of reader actors. self._node_id_to_readers: Dict[str, "ray.actor.ActorHandle"] = defaultdict(list) for reader, node_id in self._reader_and_node_list: self._node_id_to_readers[node_id].append(reader) # Number of readers in a local node. self._num_local_readers = 0 if _writer_ref is None: # We are the writer. Check that the passed handle matches the # current actor (or it is the driver). # TODO(swang): Channels must be initially constructed by the writer # actor, so we shouldn't need to include `writer` in the # constructor args. Either support Channels being constructed by # someone other than the writer or remove it from the args. self_actor = get_self_actor() assert writer == self_actor self._writer_node_id = ( ray.runtime_context.get_runtime_context().get_node_id() ) self._writer_ref = _create_channel_ref(self, typ.buffer_size_bytes) self._create_reader_refs(typ.buffer_size_bytes) else: assert ( _writer_node_id is not None ), "_writer_node_id must also be passed to the constructor when " "_writer_ref is." assert _node_id_to_reader_ref_info is not None, ( "_node_id_to_reader_ref_info must also be passed to the constructor " "when _writer_ref is." ) self._writer_ref = _writer_ref self._writer_node_id = _writer_node_id self._node_id_to_reader_ref_info = _node_id_to_reader_ref_info assert self._num_local_readers == 0 remote_node_exists = False for node_id, readers in self._node_id_to_readers.items(): if self.is_local_node(node_id): self._num_local_readers += len(readers) else: remote_node_exists = True # If remote node exists, we have 1 additional reader that listens # to object changes and push them to remote nodes. if remote_node_exists: self._num_local_readers += 1 # There must be at least 1 local reader assert self._num_local_readers > 0 self._local_reader_ref: Optional["ray.ObjectRef"] = self._get_local_reader_ref( self._node_id_to_reader_ref_info ) def _get_local_reader_ref( self, _node_id_to_reader_ref_info: Dict[str, ReaderRefInfo] ) -> Optional["ray.ObjectRef"]: for node_id, reader_ref_info in _node_id_to_reader_ref_info.items(): if self.is_local_node(node_id): return reader_ref_info.reader_ref return None def _create_reader_refs( self, buffer_size_bytes: int, ): # TODO(jhumphri): Free the current reader ref once the reference to it is # destroyed below. for node_id, readers in self._node_id_to_readers.items(): if not self.is_local_node(node_id): # Find 1 reader in a remote node to create a reference that's # shared by all readers. When a new value is written to a reference, # it is sent to this reference. reader = readers[0] fn = reader.__ray_call__ self._node_id_to_reader_ref_info[node_id] = ReaderRefInfo( reader_ref=ray.get( fn.remote(_create_channel_ref, buffer_size_bytes) ), ref_owner_actor_id=reader._actor_id, num_reader_actors=len(readers), ) else: writer_id = ray.ActorID.nil() if self._writer is not None: writer_id = self._writer._actor_id self._node_id_to_reader_ref_info[node_id] = ReaderRefInfo( reader_ref=self._writer_ref, ref_owner_actor_id=writer_id, num_reader_actors=len(readers), ) # There must be only 1 node reader reference per node. assert len(self._node_id_to_reader_ref_info) == len(self._node_id_to_readers) # We need to register the new writer_ref. self._writer_registered = False self.ensure_registered_as_writer() @staticmethod def is_local_node(node_id): return ray.runtime_context.get_runtime_context().get_node_id() == node_id def ensure_registered_as_writer(self) -> None: if self._writer_registered: return if not self.is_local_node(self._writer_node_id): raise ValueError( "`ensure_registered_as_writer()` must only be called on the node that " "the writer is on." ) remote_reader_ref_info: Dict[str, ReaderRefInfo] = {} for node_id, reader_ref_info in self._node_id_to_reader_ref_info.items(): if self.is_local_node(node_id): continue remote_reader_ref_info[node_id] = reader_ref_info self._worker.core_worker.experimental_channel_register_writer( self._writer_ref, remote_reader_ref_info, ) self._writer_registered = True def ensure_registered_as_reader(self) -> None: if self._reader_registered: return for node_id, reader_ref_info in self._node_id_to_reader_ref_info.items(): if self.is_local_node(node_id): self._worker.core_worker.experimental_channel_register_reader( reader_ref_info.reader_ref, ) self._reader_registered = True @staticmethod def _deserialize_reader_channel( writer: ray.actor.ActorHandle, reader_and_node_list: List[Tuple["ray.actor.ActorHandle", str]], typ: int, writer_node_id, writer_ref: "ray.ObjectRef", node_id_to_reader_ref_info: Dict[str, ReaderRefInfo], writer_registered: bool, reader_registered: bool, ) -> "Channel": chan = Channel( writer, reader_and_node_list, typ, _writer_node_id=writer_node_id, _writer_ref=writer_ref, _node_id_to_reader_ref_info=node_id_to_reader_ref_info, _writer_registered=writer_registered, _reader_registered=reader_registered, ) return chan def __reduce__(self): assert self._node_id_to_reader_ref_info is not None return self._deserialize_reader_channel, ( self._writer, self._reader_and_node_list, self._typ, self._writer_node_id, self._writer_ref, self._node_id_to_reader_ref_info, self._writer_registered, self._reader_registered, ) def __str__(self) -> str: return ( f"Channel(_node_id_to_reader_ref_info={self._node_id_to_reader_ref_info}, " f"_writer_ref={self._writer_ref})" ) def _resize_channel_if_needed(self, serialized_value: str, timeout_ms: int): # serialized_value.total_bytes *only* includes the size of the data. It does not # include the size of the metadata, so we must account for the size of the # metadata explicitly. size = serialized_value.total_bytes + len(serialized_value.metadata) if size > self._typ.buffer_size_bytes: # Now make the channel backing store larger. self._typ.buffer_size_bytes = size # TODO(jhumphri): Free the current writer ref once the reference to it is # destroyed below. # TODO(sang): Support different policies such as 2X buffer size. prev_writer_ref = self._writer_ref self._writer_ref = _create_channel_ref(self, self._typ.buffer_size_bytes) self._create_reader_refs(self._typ.buffer_size_bytes) self._local_reader_ref = self._get_local_reader_ref( self._node_id_to_reader_ref_info ) # Write a special message to the channel so that the readers know to # stop using the current reader_ref. special_message = _ResizeChannel(self._node_id_to_reader_ref_info) special_message_serialized = ( self._worker.get_serialization_context().serialize(special_message) ) self._worker.core_worker.experimental_channel_put_serialized( special_message_serialized, prev_writer_ref, self._num_local_readers, timeout_ms, ) # TODO(sang): Clean the previous ref that won't be used. # Right now, if we just close it here, it will not work because # of race conditions. # self._worker.core_worker.experimental_channel_set_error( # prev_writer_ref # ) def write(self, value: Any, timeout: Optional[float] = None) -> None: self.ensure_registered_as_writer() assert ( timeout is None or timeout >= 0 or timeout == -1 ), "Timeout must be non-negative or -1." # -1 means no timeout (block indefinitely) timeout_ms = int(timeout * 1000) if timeout is not None else -1 if not isinstance(value, SerializedObject): try: serialized_value = self._worker.get_serialization_context().serialize( value ) except TypeError as e: sio = io.StringIO() ray.util.inspect_serializability(value, print_file=sio) msg = ( "Could not serialize the put value " f"{repr(value)}:\n" f"{sio.getvalue()}" ) raise TypeError(msg) from e else: serialized_value = value start_time = time.monotonic() self._resize_channel_if_needed(serialized_value, timeout_ms) if timeout is not None: timeout_ms -= int((time.monotonic() - start_time) * 1000) timeout_ms = max(timeout_ms, 0) self._worker.core_worker.experimental_channel_put_serialized( serialized_value, self._writer_ref, self._num_local_readers, timeout_ms, ) def read(self, timeout: Optional[float] = None) -> Any: assert ( timeout is None or timeout >= 0 or timeout == -1 ), "Timeout must be non-negative or -1." self.ensure_registered_as_reader() start_time = time.monotonic() ret = self._worker.get_objects( [self._local_reader_ref], timeout=timeout, return_exceptions=True )[0][0] if isinstance(ret, _ResizeChannel): self._node_id_to_reader_ref_info = ret._node_id_to_reader_ref_info self._local_reader_ref = self._get_local_reader_ref( self._node_id_to_reader_ref_info ) # We need to register the new reader_ref. self._reader_registered = False self.ensure_registered_as_reader() if timeout is not None: timeout -= time.monotonic() - start_time timeout = max(timeout, 0) ret = self._worker.get_objects( [self._local_reader_ref], timeout=timeout, return_exceptions=True )[0][0] return ret def release_buffer(self, timeout: Optional[float] = None) -> None: assert ( timeout is None or timeout >= 0 or timeout == -1 ), "Timeout must be non-negative or -1." self.ensure_registered_as_reader() self._worker.get_objects( [self._local_reader_ref], timeout=timeout, return_exceptions=True, skip_deserialization=True, ) def close(self) -> None: """ Close this channel by setting the error bit on both the writer_ref and the reader_ref. """ self._worker.core_worker.experimental_channel_set_error(self._writer_ref) is_local_node_reader = False for node_id in self._node_id_to_readers.keys(): if self.is_local_node(node_id): is_local_node_reader = True if is_local_node_reader: self.ensure_registered_as_reader() for reader_ref_info in self._node_id_to_reader_ref_info.values(): self._worker.core_worker.experimental_channel_set_error( reader_ref_info.reader_ref ) @DeveloperAPI
Channel
python
redis__redis-py
tests/test_scenario/maint_notifications_helpers.py
{ "start": 279, "end": 2004 }
class ____: @staticmethod def wait_push_notification( redis_client: Redis, timeout: int = 120, fail_on_timeout: bool = True, connection: Optional[Connection] = None, ): """Wait for a push notification to be received.""" start_time = time.time() check_interval = 0.2 # Check more frequently during operations test_conn = ( connection if connection else redis_client.connection_pool.get_connection() ) try: while time.time() - start_time < timeout: try: if test_conn.can_read(timeout=0.2): # reading is important, it triggers the push notification push_response = test_conn.read_response(push_request=True) logging.debug( f"Push notification has been received. Response: {push_response}" ) if test_conn.should_reconnect(): logging.debug("Connection is marked for reconnect") return except Exception as e: logging.error(f"Error reading push notification: {e}") break time.sleep(check_interval) if fail_on_timeout: pytest.fail("Timeout waiting for push notification") finally: # Release the connection back to the pool try: if not connection: redis_client.connection_pool.release(test_conn) except Exception as e: logging.error(f"Error releasing connection: {e}")
ClientValidations
python
allegroai__clearml
clearml/backend_api/services/v2_13/tasks.py
{ "start": 315915, "end": 318642 }
class ____(Response): """ Response of tasks.get_hyper_params endpoint. :param params: Hyper parameters (keyed by task ID) :type params: Sequence[dict] """ _service = "tasks" _action = "get_hyper_params" _version = "2.13" _schema = { "definitions": { "params_item": { "properties": { "description": { "description": "The parameter description. Optional", "type": ["string", "null"], }, "name": { "description": "Name of the parameter. The combination of section and name should be unique", "type": ["string", "null"], }, "section": { "description": "Section that the parameter belongs to", "type": ["string", "null"], }, "type": { "description": "Type of the parameter. Optional", "type": ["string", "null"], }, "value": { "description": "Value of the parameter", "type": ["string", "null"], }, }, "type": "object", } }, "properties": { "params": { "description": "Hyper parameters (keyed by task ID)", "items": { "properties": { "hyperparams": { "description": "Hyper parameters", "items": {"$ref": "#/definitions/params_item"}, "type": "array", }, "task": {"description": "Task ID", "type": "string"}, }, "type": "object", }, "type": ["array", "null"], } }, "type": "object", } def __init__(self, params: Optional[List[dict]] = None, **kwargs: Any) -> None: super(GetHyperParamsResponse, self).__init__(**kwargs) self.params = params @schema_property("params") def params(self) -> Optional[List[dict]]: return self._property_params @params.setter def params(self, value: Optional[List[dict]]) -> None: if value is None: self._property_params = None return self.assert_isinstance(value, "params", (list, tuple)) self.assert_isinstance(value, "params", (dict,), is_array=True) self._property_params = value
GetHyperParamsResponse
python
huggingface__transformers
tests/models/instructblipvideo/test_modeling_instructblipvideo.py
{ "start": 18253, "end": 25930 }
class ____( ModelTesterMixin, GenerationTesterMixin, unittest.TestCase ): all_model_classes = ( (InstructBlipVideoForConditionalGeneration, InstructBlipVideoModel) if is_torch_available() else () ) additional_model_inputs = ["qformer_input_ids", "input_ids"] test_resize_embeddings = True test_attention_outputs = False _is_composite = True def setUp(self): self.model_tester = InstructBlipVideoForConditionalGenerationDecoderOnlyModelTester(self) common_properties = ["num_query_tokens", "video_token_index"] self.config_tester = ConfigTester( self, config_class=InstructBlipVideoConfig, has_text_modality=False, common_properties=common_properties ) def test_for_conditional_generation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs) def test_config(self): self.config_tester.run_common_tests() @unittest.skip( reason="InstructBlipVideoQFormerModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet." ) def test_eager_matches_sdpa_generate(self): pass @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="InstructBlipVideoForConditionalGeneration doesn't support inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Tied weights are tested in individual model tests") def test_tied_weights_keys(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="InstructBlipVideoModel does not have input/output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="InstructBLIP has no separate base model without a head.") def test_model_base_model_prefix(self): pass def test_forward_signature(self): for model_class in self.all_model_classes: config, _ = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_load_vision_qformer_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save InstructBlipVideoConfig and check if we can load InstructBlipVideoVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = InstructBlipVideoVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save InstructBlipVideoConfig and check if we can load InstructBlipVideoQFormerConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) qformer_config = InstructBlipVideoQFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "Salesforce/instructblip-vicuna-7b" model = InstructBlipVideoForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) # overwrite because InstructBLIPVideo internally calls LM.generate() with embeds thus it cannot operate in no cache format def _check_generate_outputs(self, output, config, use_cache=False, num_return_sequences=1, num_beams=1): use_cache = True # force this to be True in case False is passed super()._check_generate_outputs( output, config, use_cache=use_cache, num_return_sequences=num_return_sequences, num_beams=num_beams ) def test_sdpa_can_dispatch_composite_models(self): """ Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers call "SDPAAttention". In contrast to the above test, this one checks if the "config._attn_implementation" is a dict after the model is loaded, because we manually replicate requested attn implementation on each sub-config when loading. See https://github.com/huggingface/transformers/pull/32238 for more info The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model that has a different set of sub-configs has to overwrite this test. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) # `None` as it is the requested one which will be assigned to each sub-config # Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present) self.assertTrue(model.language_model.config._attn_implementation == "sdpa") self.assertTrue(model.vision_model.config._attn_implementation == "sdpa") self.assertTrue(model.qformer.config._attn_implementation == "eager") model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") self.assertTrue(model_eager.language_model.config._attn_implementation == "eager") self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager") self.assertTrue(model_eager.qformer.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if ( class_name.endswith("Attention") and getattr(submodule, "config", None) and submodule.config._attn_implementation == "sdpa" ): raise ValueError("The eager model should not have SDPA attention layers") # We will verify our results on an image of cute cats def prepare_video(): video_file = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset" ) video = np.load(video_file)[::2] # sample every 2nd frame to get 4 frames total return video @require_vision @require_torch @require_bitsandbytes @require_accelerate @slow
InstructBlipVideoForConditionalGenerationDecoderOnlyTest
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/paramSpec32.py
{ "start": 391, "end": 894 }
class ____(Generic[P, T2]): def __init__(self, fn: Callable[P, T2], *args: P.args, **kwargs: P.kwargs) -> None: self.fn = fn self.args = args self.kwargs = kwargs def __call__(self) -> T2: return self.fn(*self.args, **self.kwargs) # This should generate an error because arguments x and k are missing. Class1(add_k) # This should generate an error because arguments x has the wrong type. Class1(add_k, "3", 2) Class1(add_k, 3, 2) Class1(add_k, x=3, k=2)
Class1