language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sqlalchemy__sqlalchemy | test/orm/test_relationships.py | {
"start": 58380,
"end": 60073
} | class ____(fixtures.MappedTest):
"""test a relationship based on a primary
join against a unique non-pk column"""
@classmethod
def define_tables(cls, metadata):
Table(
"table_a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("ident", String(10), nullable=False, unique=True),
)
Table(
"table_b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column(
"a_ident",
String(10),
ForeignKey("table_a.ident"),
nullable=False,
),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
def test_switch_parent(self):
A, B, table_b, table_a = (
self.classes.A,
self.classes.B,
self.tables.table_b,
self.tables.table_a,
)
self.mapper_registry.map_imperatively(A, table_a)
self.mapper_registry.map_imperatively(
B, table_b, properties={"a": relationship(A, backref="bs")}
)
session = fixture_session()
a1, a2 = A(ident="uuid1"), A(ident="uuid2")
session.add_all([a1, a2])
a1.bs = [B(), B()]
session.flush()
session.expire_all()
a1, a2 = session.query(A).all()
for b in list(a1.bs):
b.a = a2
session.delete(a1)
session.flush()
| UniqueColReferenceSwitchTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zoho-crm/source_zoho_crm/types.py | {
"start": 2739,
"end": 7968
} | class ____(FromDictMixin):
json_type: str
length: Optional[int]
api_name: str
data_type: str
decimal_place: Optional[int]
system_mandatory: bool
display_label: str
pick_list_values: Optional[List[ZohoPickListItem]]
auto_number: Optional[AutoNumberDict] = AutoNumberDict(prefix="", suffix="")
def _default_type_kwargs(self) -> Dict[str, str]:
return {"title": self.display_label}
def _picklist_items(self) -> Iterable[Union[str, None]]:
default_list = [None]
if not self.pick_list_values:
return default_list
return default_list + [pick_item.display_value for pick_item in self.pick_list_values]
def _boolean_field(self) -> FieldType:
return {"type": ["null", "boolean"], **self._default_type_kwargs()}
def _integer_field(self) -> FieldType:
return {"type": ["null", "integer"], **self._default_type_kwargs()}
def _double_field(self) -> FieldType:
typedef = {"type": ["null", "number"], **self._default_type_kwargs()}
if self.decimal_place:
typedef["multipleOf"] = float(Decimal("0.1") ** self.decimal_place)
return typedef
def _string_field(self) -> FieldType:
if self.api_name == "Reminder":
# this is a special case. although datatype = `picklist`,
# actual values do not correspond to the values in the list
return {"type": ["null", "string"], "format": "date-time", **self._default_type_kwargs()}
typedef = {"type": ["null", "string"], "maxLength": self.length, **self._default_type_kwargs()}
if self.data_type == ZohoDataType.website:
typedef["format"] = "uri"
elif self.data_type == ZohoDataType.email:
typedef["format"] = "email"
elif self.data_type == ZohoDataType.date:
typedef["format"] = "date"
elif self.data_type == ZohoDataType.datetime:
typedef["format"] = "date-time"
elif self.data_type == ZohoDataType.bigint:
typedef["airbyte_type"] = "big_integer"
elif self.data_type == ZohoDataType.autonumber:
print(self.auto_number)
if self.auto_number.get("prefix") or self.auto_number.get("suffix"):
typedef["format"] = "string"
else:
typedef["airbyte_type"] = "big_integer"
elif self.data_type == ZohoDataType.picklist and self.pick_list_values:
typedef["enum"] = self._picklist_items()
return typedef
def _jsonarray_field(self) -> FieldType:
typedef = {"type": "array", **self._default_type_kwargs()}
if self.api_name in ("Product_Details", "Pricing_Details"):
# these two fields are said to be text, but are actually complex objects
typedef["items"] = {"type": "object"}
return typedef
if self.api_name == "Tag":
# `Tag` is defined as string, but is actually an object
typedef["items"] = {
"type": "object",
"additionalProperties": True,
"required": ["name", "id"],
"properties": {"name": {"type": "string"}, "id": {"type": "string"}},
}
return typedef
if self.data_type in (ZohoDataType.text, *ZohoDataType.numeric_string_types()):
typedef["items"] = {"type": "string"}
if self.data_type == ZohoDataType.autonumber:
if self.auto_number.get("prefix") or self.auto_number.get("suffix"):
typedef["items"]["format"] = "string"
else:
typedef["items"]["airbyte_type"] = "big_integer"
else:
typedef["items"]["airbyte_type"] = "big_integer"
if self.data_type == ZohoDataType.multiselectpicklist:
typedef["minItems"] = 1
typedef["uniqueItems"] = True
items = {"type": ["null", "string"]}
if self.pick_list_values:
items["enum"] = self._picklist_items()
typedef["items"] = items
return typedef
def _jsonobject_field(self) -> FieldType:
lookup_typedef = {
"type": ["null", "object"],
"additionalProperties": True,
"required": ["name", "id"],
"properties": {"name": {"type": ["null", "string"]}, "id": {"type": "string"}},
**self._default_type_kwargs(),
}
if self.data_type == ZohoDataType.lookup:
return lookup_typedef
if self.data_type == ZohoDataType.ownerlookup:
owner_lookup_typedef = copy.deepcopy(lookup_typedef)
owner_lookup_typedef["required"] += ["email"]
owner_lookup_typedef["properties"]["email"] = {"type": "string", "format": "email"}
return owner_lookup_typedef
# exact specification unknown
return {"type": ["null", "object"]}
@property
def schema(self) -> FieldType:
if self.json_type in ZohoJsonType.all():
return getattr(self, f"_{self.json_type}_field")()
raise UnknownDataTypeException(f"JSON type: {self.json_type}, data type:{self.data_type}")
@dataclasses.dataclass
| FieldMeta |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/transfers/test_dynamodb_to_s3.py | {
"start": 1480,
"end": 1919
} | class ____:
@pytest.mark.parametrize("value", ["102938.3043847474", 1.010001, 10, "100", "1E-128", 1e-128])
def test_jsonencoder_with_decimal(self, value):
"""Test JSONEncoder correctly encodes and decodes decimal values."""
org = Decimal(value)
encoded = json.dumps(org, cls=JSONEncoder)
decoded = json.loads(encoded, parse_float=Decimal)
assert org == pytest.approx(decoded)
| TestJSONEncoder |
python | psf__requests | tests/test_utils.py | {
"start": 5531,
"end": 6032
} | class ____:
@pytest.mark.parametrize(
"value, expected",
(
(None, None),
("Test", "Test"),
('"Test"', "Test"),
('"Test\\\\"', "Test\\"),
('"\\\\Comp\\Res"', "\\Comp\\Res"),
),
)
def test_valid(self, value, expected):
assert unquote_header_value(value) == expected
def test_is_filename(self):
assert unquote_header_value('"\\\\Comp\\Res"', True) == "\\\\Comp\\Res"
| TestUnquoteHeaderValue |
python | dagster-io__dagster | python_modules/libraries/dagster-pandas/dagster_pandas/constraints.py | {
"start": 25012,
"end": 36201
} | class ____(MultiColumnConstraintWithMetadata):
"""This class is similar to multicolumn, but takes in functions that operate on the whole column at once
rather than ones that operate on each value --
consider this similar to the difference between apply-map and apply aggregate.
Args:
description (str): description of the overall set of validations (TODO: support multiple descriptions)
fn_and_columns_dict (Dict[str, List[Callable[[pd.Series], Tuple[bool, dict[str, Union[dict,list, str, set]]]]]):
while this is a relatively complex type,
what it amounts to is a dict mapping columns to the functions to
run on them'
resulting_exception (type): the response to generate if validation fails. Subclass of
ConstraintWithMetadataException
raise_or_typecheck (Optional[bool]): whether to raise an exception (true) or a failed typecheck (false)
type_for_internal (Optional[type]): what type to use for internal validators. Subclass of
ConstraintWithMetadata
name (Optional[str]): what to call the constraint, defaults to the class name.
"""
def __init__(
self,
description,
fn_and_columns_dict,
resulting_exception,
raise_or_typecheck=True,
name=None,
):
super().__init__(
description,
fn_and_columns_dict,
resulting_exception,
raise_or_typecheck=raise_or_typecheck,
type_for_internal=ColumnAggregateConstraintWithMetadata, # pyright: ignore[reportArgumentType]
name=name,
)
@beta
def non_null_validation(x):
"""Validates that a particular value in a column is not null.
Usage:
pass this as a column validator to
:py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'
or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'
Generally, you should prefer to use nonnull as a decorator/wrapper rather than using this
directly.
"""
return not pd.isnull(x), {}
@beta
def all_unique_validator(column, ignore_missing_vals=False):
"""Validates that all values in an iterable are unique.
Returns duplicated values as metadata.
Usage:
As a validation function for a
:py:class:'~dagster_pandas.constraints.ColumnAggregateConstraintWithMetadata'
or :py:class:'~dagster_pandas.constraints.MultiAggregateConstraintWithMetadata'
Example:
.. code-block:: python
aggregate_validator = MultiAggregateConstraintWithMetadata(
"confirms all values are unique",
{'bar': [all_unique_validator]},
ConstraintWithMetadataException,
raise_or_typecheck=False,
)
ntype = create_structured_dataframe_type(
"NumericType",
columns_aggregate_validator=aggregate_validator
)
@op(out={'basic_dataframe': Out(dagster_type=ntype)})
def create_dataframe(_):
yield Output(
DataFrame({'foo': [1, 2, 3], 'bar': [9, 10, 10]}),
output_name='basic_dataframe',
)
#will fail with
metadata['offending'] == {'bar': {'all_unique_validator': 'a violation'}}
metadata['actual'] == {'bar': {'all_unique_validator': [10.0]}}
"""
column = pd.Series(column)
duplicated = column.duplicated()
if ignore_missing_vals:
duplicated = apply_ignore_missing_data_to_mask(duplicated, column)
return not duplicated.any(), {"actual": column[duplicated]}
@beta
def nonnull(func):
"""Decorator for column validation functions to make them error on nulls.
Usage:
pass decorated functions as column validators to
:py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'
or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'
Args:
func (Callable[[Any], Tuple[bool, dict[str, Union[dict,list, str, set]]]]]):
the column validator you want to error on nulls.
"""
@wraps(func)
def nvalidator(val):
origval = func(val)
nval = non_null_validation(val)
return origval[0] and nval[0], {}
nvalidator.__doc__ += " and ensures no values are null" # pyright: ignore[reportOperatorIssue]
return nvalidator
@beta
def column_range_validation_factory(minim=None, maxim=None, ignore_missing_vals=False):
"""Factory for validators testing if column values are within a range.
Args:
minim(Optional[Comparable]): the low end of the range
maxim(Optional[Comparable]): the high end of the range
ignore_missing_vals(Optional[bool]): whether to ignore nulls.
Returns: a validation function for this constraint
Usage:
pass returned functions as column validators to
:py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'
or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'
Examples:
.. code-block:: python
in_range_validator = column_range_validation_factory(1, 3, ignore_missing_vals=True)
column_validator = MultiColumnConstraintWithMetadata(
"confirms values are numbers in a range",
{'foo': [in_range_validator]},
ColumnWithMetadataException,
raise_or_typecheck=False,
)
ntype = create_structured_dataframe_type(
"NumericType",
columns_validator=column_validator
)
@op(out={'basic_dataframe': Out(dagster_type=ntype)})
def create_dataframe(_):
yield Output(
DataFrame({'foo': [1, 2, 7], 'bar': [9, 10, 10]}),
output_name='basic_dataframe',
)
#will fail with
metadata['offending'] == {'foo': {'in_range_validation_fn': ['row 2']}}
metadata['actual'] == {'foo': {'in_range_validation_fn': [7]}}
"""
if minim is None:
if isinstance(maxim, datetime):
minim = datetime.min
else:
minim = -1 * (sys.maxsize - 1)
if maxim is None:
if isinstance(minim, datetime):
maxim = datetime.max
else:
maxim = sys.maxsize
def in_range_validation_fn(x):
if ignore_missing_vals and pd.isnull(x):
return True, {}
return (isinstance(x, (type(minim), type(maxim)))) and (x <= maxim) and (x >= minim), {}
in_range_validation_fn.__doc__ = f"checks whether values are between {minim} and {maxim}"
if ignore_missing_vals:
in_range_validation_fn.__doc__ += ", ignoring nulls"
return in_range_validation_fn
@beta
def categorical_column_validator_factory(categories, ignore_missing_vals=False):
"""Factory for validators testing if all values are in some set.
Args:
categories(Union[Sequence, set]): the set of allowed values
ignore_missing_vals(Optional[bool]): whether to ignore nulls.
Returns: a validation function for this constraint
Usage:
pass returned functions as column validators to
:py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'
or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'
Example:
.. code-block:: python
categorical_validation_fn = categorical_column_validator_factory([1, 2])
column_validator = MultiColumnConstraintWithMetadata(
"confirms values are numbers in a range",
{'foo': [categorical_validation_fn]},
ColumnWithMetadataException,
raise_or_typecheck=False,
)
ntype = create_structured_dataframe_type(
"NumericType",
columns_validator=column_validator
)
@op(out={'basic_dataframe': Out(dagster_type=ntype)})
def create_dataframe(_):
yield Output(
DataFrame({'foo': [1, 2, 7], 'bar': [9, 10, 10]}),
output_name='basic_dataframe',
)
#will fail with
metadata['offending'] == {'foo': {'categorical_validation_fn': ['row 2']}}
metadata['actual'] == {'foo': {'categorical_validation_fn': [7]}}
"""
categories = set(categories)
def categorical_validation_fn(x):
if ignore_missing_vals and pd.isnull(x):
return True, {}
return (x in categories), {}
categorical_validation_fn.__doc__ = (
f"checks whether values are within this set of values: {categories}"
)
if ignore_missing_vals:
categorical_validation_fn.__doc__ += ", ignoring nulls"
return categorical_validation_fn
@beta
def dtype_in_set_validation_factory(datatypes, ignore_missing_vals=False):
"""Factory for testing if the dtype of a val falls within some allowed set.
Args:
datatypes(Union[set[type], type]): which datatype/datatypes are allowed
ignore_missing_vals(Optional[bool]): whether to ignore nulls
Returns: a validation function for this constraint
Usage:
pass returned functions as column validators to
:py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'
or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'
Examples:
.. code-block:: python
dtype_is_num_validator = dtype_in_set_validation_factory((int, float, int64, float64))
column_validator = MultiColumnConstraintWithMetadata(
"confirms values are numbers in a range",
{'foo': [dtype_is_num_validator]},
ColumnWithMetadataException,
raise_or_typecheck=False,
)
ntype = create_structured_dataframe_type(
"NumericType",
columns_validator=column_validator
)
@op(out={'basic_dataframe': Out(dagster_type=ntype)})
def create_dataframe(_):
yield Output(
DataFrame({'foo': [1, 'a', 7], 'bar': [9, 10, 10]}),
output_name='basic_dataframe',
)
#will fail with
metadata['offending'] == {'foo': {'categorical_validation_fn': ['row 1']}}
metadata['actual'] == {'foo': {'categorical_validation_fn': ['a']}}
"""
def dtype_in_set_validation_fn(x):
if ignore_missing_vals and pd.isnull(x):
return True, {}
return isinstance(x, datatypes), {}
dtype_in_set_validation_fn.__doc__ = f"checks whether values are this type/types: {datatypes}"
if ignore_missing_vals:
dtype_in_set_validation_fn.__doc__ += ", ignoring nulls"
return dtype_in_set_validation_fn
| MultiAggregateConstraintWithMetadata |
python | pandas-dev__pandas | pandas/tests/extension/base/groupby.py | {
"start": 305,
"end": 6247
} | class ____:
"""Groupby-specific tests."""
def test_grouping_grouper(self, data_for_grouping):
df = pd.DataFrame(
{
"A": pd.Series(
["B", "B", None, None, "A", "A", "B", "C"], dtype=object
),
"B": data_for_grouping,
}
)
gr1 = df.groupby("A")._grouper.groupings[0]
gr2 = df.groupby("B")._grouper.groupings[0]
tm.assert_numpy_array_equal(gr1.grouping_vector, df.A.values)
tm.assert_extension_array_equal(gr2.grouping_vector, data_for_grouping)
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
is_bool = data_for_grouping.dtype._is_boolean
if is_bool:
# only 2 unique values, and the final entry has c==b
# (see data_for_grouping docstring)
df = df.iloc[:-1]
result = df.groupby("B", as_index=as_index).A.mean()
_, uniques = pd.factorize(data_for_grouping, sort=True)
exp_vals = [3.0, 1.0, 4.0]
if is_bool:
exp_vals = exp_vals[:-1]
if as_index:
index = pd.Index(uniques, name="B")
expected = pd.Series(exp_vals, index=index, name="A")
tm.assert_series_equal(result, expected)
else:
expected = pd.DataFrame({"B": uniques, "A": exp_vals})
tm.assert_frame_equal(result, expected)
def test_groupby_agg_extension(self, data_for_grouping):
# GH#38980 groupby agg on extension type fails for non-numeric types
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
expected = df.iloc[[0, 2, 4, 7]]
expected = expected.set_index("A")
result = df.groupby("A").agg({"B": "first"})
tm.assert_frame_equal(result, expected)
result = df.groupby("A").agg("first")
tm.assert_frame_equal(result, expected)
result = df.groupby("A").first()
tm.assert_frame_equal(result, expected)
def test_groupby_extension_no_sort(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
is_bool = data_for_grouping.dtype._is_boolean
if is_bool:
# only 2 unique values, and the final entry has c==b
# (see data_for_grouping docstring)
df = df.iloc[:-1]
result = df.groupby("B", sort=False).A.mean()
_, index = pd.factorize(data_for_grouping, sort=False)
index = pd.Index(index, name="B")
exp_vals = [1.0, 3.0, 4.0]
if is_bool:
exp_vals = exp_vals[:-1]
expected = pd.Series(exp_vals, index=index, name="A")
tm.assert_series_equal(result, expected)
def test_groupby_extension_transform(self, data_for_grouping):
is_bool = data_for_grouping.dtype._is_boolean
valid = data_for_grouping[~data_for_grouping.isna()]
df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4], "B": valid})
is_bool = data_for_grouping.dtype._is_boolean
if is_bool:
# only 2 unique values, and the final entry has c==b
# (see data_for_grouping docstring)
df = df.iloc[:-1]
result = df.groupby("B").A.transform(len)
expected = pd.Series([3, 3, 2, 2, 3, 1], name="A")
if is_bool:
expected = expected[:-1]
tm.assert_series_equal(result, expected)
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
df.groupby("B", group_keys=False, observed=False).apply(groupby_apply_op)
df.groupby("B", group_keys=False, observed=False).A.apply(groupby_apply_op)
df.groupby("A", group_keys=False, observed=False).apply(groupby_apply_op)
df.groupby("A", group_keys=False, observed=False).B.apply(groupby_apply_op)
def test_groupby_apply_identity(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("A").B.apply(lambda x: x.array)
expected = pd.Series(
[
df.B.iloc[[0, 1, 6]].array,
df.B.iloc[[2, 3]].array,
df.B.iloc[[4, 5]].array,
df.B.iloc[[7]].array,
],
index=pd.Index([1, 2, 3, 4], name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
def test_in_numeric_groupby(self, data_for_grouping):
df = pd.DataFrame(
{
"A": [1, 1, 2, 2, 3, 3, 1, 4],
"B": data_for_grouping,
"C": [1, 1, 1, 1, 1, 1, 1, 1],
}
)
dtype = data_for_grouping.dtype
if (
is_numeric_dtype(dtype)
or is_bool_dtype(dtype)
or dtype.name == "decimal"
or is_string_dtype(dtype)
or is_object_dtype(dtype)
or dtype.kind == "m" # in particular duration[*][pyarrow]
):
expected = pd.Index(["B", "C"])
result = df.groupby("A").sum().columns
else:
expected = pd.Index(["C"])
msg = "|".join(
[
# period
"does not support sum operations",
# datetime
"does not support operation 'sum'",
# all others
re.escape(f"agg function failed [how->sum,dtype->{dtype}"),
]
)
with pytest.raises(TypeError, match=msg):
df.groupby("A").sum()
result = df.groupby("A").sum(numeric_only=True).columns
tm.assert_index_equal(result, expected)
| BaseGroupbyTests |
python | huggingface__transformers | src/transformers/models/layoutlm/modeling_layoutlm.py | {
"start": 12678,
"end": 14345
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LayoutLMLayer(config) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
**kwargs,
) -> Union[tuple[torch.Tensor], BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
**kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
| LayoutLMEncoder |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_ordered_dict.py | {
"start": 31989,
"end": 35580
} | class ____:
def check_runtime_error_issue119004(self, dict1, dict2):
msg = re.escape("OrderedDict mutated during iteration")
self.assertRaisesRegex(RuntimeError, msg, operator.eq, dict1, dict2)
def test_issue119004_change_size_by_clear(self):
with torch._dynamo.error_on_graph_break(False):
class Key(_TriggerSideEffectOnEqual):
def side_effect(self):
dict1.clear()
dict1 = self.OrderedDict(dict.fromkeys((0, Key(), 4.2)))
dict2 = self.OrderedDict(dict.fromkeys((0, Key(), 4.2)))
self.check_runtime_error_issue119004(dict1, dict2)
self.assertEqual(Key.count, 2)
self.assertDictEqual(dict1, {})
self.assertDictEqual(dict2, dict.fromkeys((0, Key(), 4.2)))
def test_issue119004_change_size_by_delete_key(self):
with torch._dynamo.error_on_graph_break(False):
class Key(_TriggerSideEffectOnEqual):
def side_effect(self):
del dict1[TODEL]
TODEL = Key()
dict1 = self.OrderedDict(dict.fromkeys((0, TODEL, 4.2)))
dict2 = self.OrderedDict(dict.fromkeys((0, Key(), 4.2)))
self.check_runtime_error_issue119004(dict1, dict2)
self.assertEqual(Key.count, 2)
self.assertDictEqual(dict1, dict.fromkeys((0, 4.2)))
self.assertDictEqual(dict2, dict.fromkeys((0, Key(), 4.2)))
def test_issue119004_change_linked_list_by_clear(self):
with torch._dynamo.error_on_graph_break(False):
class Key(_TriggerSideEffectOnEqual):
def side_effect(self):
dict1.clear()
dict1['a'] = dict1['b'] = 'c'
dict1 = self.OrderedDict(dict.fromkeys((0, Key(), 4.2)))
dict2 = self.OrderedDict(dict.fromkeys((0, Key(), 4.2)))
self.check_runtime_error_issue119004(dict1, dict2)
self.assertEqual(Key.count, 2)
self.assertDictEqual(dict1, dict.fromkeys(('a', 'b'), 'c'))
self.assertDictEqual(dict2, dict.fromkeys((0, Key(), 4.2)))
def test_issue119004_change_linked_list_by_delete_key(self):
with torch._dynamo.error_on_graph_break(False):
class Key(_TriggerSideEffectOnEqual):
def side_effect(self):
del dict1[TODEL]
dict1['a'] = 'c'
TODEL = Key()
dict1 = self.OrderedDict(dict.fromkeys((0, TODEL, 4.2)))
dict2 = self.OrderedDict(dict.fromkeys((0, Key(), 4.2)))
self.check_runtime_error_issue119004(dict1, dict2)
self.assertEqual(Key.count, 2)
self.assertDictEqual(dict1, {0: None, 'a': 'c', 4.2: None})
self.assertDictEqual(dict2, dict.fromkeys((0, Key(), 4.2)))
def test_issue119004_change_size_by_delete_key_in_dict_eq(self):
with torch._dynamo.error_on_graph_break(False):
class Key(_TriggerSideEffectOnEqual):
trigger = 0
def side_effect(self):
del dict1[TODEL]
TODEL = Key()
dict1 = self.OrderedDict(dict.fromkeys((0, TODEL, 4.2)))
dict2 = self.OrderedDict(dict.fromkeys((0, Key(), 4.2)))
self.assertEqual(Key.count, 0)
# the side effect is in dict.__eq__ and modifies the length
self.assertNotEqual(dict1, dict2)
self.assertEqual(Key.count, 2)
self.assertDictEqual(dict1, dict.fromkeys((0, 4.2)))
self.assertDictEqual(dict2, dict.fromkeys((0, Key(), 4.2)))
@unittest.skipUnless(c_coll, 'requires the C version of the collections module')
| CPythonOrderedDictSideEffects |
python | PrefectHQ__prefect | tests/server/utilities/test_text_search_parser.py | {
"start": 463,
"end": 2087
} | class ____:
"""Test basic query parsing functionality"""
def test_empty_string(self):
result = parse_text_search_query("")
assert result == TextSearchQuery(include=[], exclude=[], required=[])
def test_whitespace_only(self):
result = parse_text_search_query(" \t\n ")
assert result == TextSearchQuery(include=[], exclude=[], required=[])
def test_single_term(self):
result = parse_text_search_query("error")
assert result == TextSearchQuery(include=["error"], exclude=[], required=[])
def test_multiple_terms_or_logic(self):
result = parse_text_search_query("error warning timeout")
assert result == TextSearchQuery(
include=["error", "warning", "timeout"], exclude=[], required=[]
)
def test_multiple_spaces_between_terms(self):
result = parse_text_search_query("error warning\t\ttimeout")
assert result == TextSearchQuery(
include=["error", "warning", "timeout"], exclude=[], required=[]
)
def test_leading_trailing_whitespace(self):
result = parse_text_search_query(" error warning ")
assert result == TextSearchQuery(
include=["error", "warning"], exclude=[], required=[]
)
def test_whitespace_preserved_in_quotes_only(self):
# Multiple spaces between terms should be collapsed, but preserved in quotes
result = parse_text_search_query('hello "world again "')
assert result == TextSearchQuery(
include=["hello", "world again "], exclude=[], required=[]
)
| TestBasicParsing |
python | plotly__plotly.py | plotly/graph_objs/carpet/baxis/_title.py | {
"start": 233,
"end": 3564
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "carpet.baxis"
_path_str = "carpet.baxis.title"
_valid_props = {"font", "offset", "text"}
@property
def font(self):
"""
Sets this axis' title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.baxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.carpet.baxis.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def offset(self):
"""
An additional amount by which to offset the title from the tick
labels, given in pixels.
The 'offset' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["offset"]
@offset.setter
def offset(self, val):
self["offset"] = val
@property
def text(self):
"""
Sets the title of this axis.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this axis' title font.
offset
An additional amount by which to offset the title from
the tick labels, given in pixels.
text
Sets the title of this axis.
"""
def __init__(self, arg=None, font=None, offset=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.carpet.baxis.Title`
font
Sets this axis' title font.
offset
An additional amount by which to offset the title from
the tick labels, given in pixels.
text
Sets the title of this axis.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.carpet.baxis.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.carpet.baxis.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("offset", arg, offset)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | mlflow__mlflow | mlflow/store/artifact/databricks_logged_model_artifact_repo.py | {
"start": 127,
"end": 1453
} | class ____(DatabricksTrackingArtifactRepository):
"""
Artifact repository for interacting with logged model artifacts in a Databricks workspace.
If operations using the Databricks SDK fail for any reason, this repository automatically
falls back to using the `DatabricksArtifactRepository`, ensuring operational resilience.
"""
# Matches URIs of the form:
# databricks/mlflow-tracking/<experiment_id>/logged_models/<model_id>/<relative_path>
_URI_REGEX = re.compile(
r"databricks/mlflow-tracking/(?P<experiment_id>[^/]+)/logged_models/(?P<model_id>[^/]+)(?P<relative_path>/.*)?$"
)
def _get_uri_regex(self) -> re.Pattern[str]:
return self._URI_REGEX
def _get_expected_uri_format(self) -> str:
return "databricks/mlflow-tracking/<EXP_ID>/logged_models/<MODEL_ID>"
def _build_root_path(self, experiment_id: str, match: re.Match, relative_path: str) -> str:
model_id = match.group("model_id")
return (
f"/WorkspaceInternal/Mlflow/Artifacts/{experiment_id}/LoggedModels/{model_id}"
f"{relative_path}"
)
@staticmethod
def is_logged_model_uri(artifact_uri: str) -> bool:
return bool(DatabricksLoggedModelArtifactRepository._URI_REGEX.search(artifact_uri))
| DatabricksLoggedModelArtifactRepository |
python | realpython__materials | python-maze-solver/source_code_final/src/maze_solver/models/role.py | {
"start": 33,
"end": 186
} | class ____(IntEnum):
NONE = 0
ENEMY = auto()
ENTRANCE = auto()
EXIT = auto()
EXTERIOR = auto()
REWARD = auto()
WALL = auto()
| Role |
python | pypa__warehouse | tests/common/db/ses.py | {
"start": 818,
"end": 1256
} | class ____(WarehouseFactory):
class Meta:
model = Event
created = factory.Faker(
"date_time_between_dates",
datetime_start=datetime.datetime.now(datetime.UTC)
- datetime.timedelta(days=14),
)
email = factory.SubFactory(EmailMessageFactory)
event_id = factory.Faker("pystr", max_chars=12)
event_type = factory.Faker("random_element", elements=[e.value for e in EventTypes])
| EventFactory |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py | {
"start": 127390,
"end": 128609
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.gate = Qwen3OmniMoeTalkerTextTopKRouter(config)
self.experts = Qwen3OmniMoeTalkerTextExperts(config)
self.shared_expert = Qwen3OmniMoeTalkerTextMLP(
config, intermediate_size=config.shared_expert_intermediate_size
)
self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, sequence_length, hidden_dim = hidden_states.shape
hidden_states_reshaped = hidden_states.view(-1, hidden_dim)
shared_expert_output = self.shared_expert(hidden_states_reshaped)
routing_weights, selected_experts = self.gate(hidden_states_reshaped)
expert_output = self.experts(hidden_states_reshaped, selected_experts, routing_weights)
shared_expert_output = F.sigmoid(self.shared_expert_gate(hidden_states_reshaped)) * shared_expert_output
expert_output += shared_expert_output
expert_output = expert_output.reshape(batch_size, sequence_length, hidden_dim)
return expert_output
| Qwen3OmniMoeTalkerTextSparseMoeBlock |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/components_tests/test_component_scaffolding.py | {
"start": 662,
"end": 873
} | class ____(dg.Scaffolder[TestParamsModelWithDefaults]):
@classmethod
def get_scaffold_params(cls) -> type[TestParamsModelWithDefaults]:
return TestParamsModelWithDefaults
| TestScaffolderWithDefaults |
python | numpy__numpy | numpy/lib/tests/test_recfunctions.py | {
"start": 16876,
"end": 17869
} | class ____:
# Test recursive_fill_fields.
def test_simple_flexible(self):
# Test recursive_fill_fields on flexible-array
a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
b = np.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = np.array([(1, 10.), (2, 20.), (0, 0.)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
def test_masked_flexible(self):
# Test recursive_fill_fields on masked flexible-array
a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],
dtype=[('A', int), ('B', float)])
b = ma.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = ma.array([(1, 10.), (2, 20.), (0, 0.)],
mask=[(0, 1), (1, 0), (0, 0)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
| TestRecursiveFillFields |
python | redis__redis-py | tests/test_maint_notifications.py | {
"start": 31731,
"end": 37542
} | class ____:
"""Test MaintNotificationsConfig endpoint type functionality."""
def setup_method(self):
"""Set up common mock classes for all tests."""
class MockSocket:
def __init__(self, resolved_ip):
self.resolved_ip = resolved_ip
def getpeername(self):
return (self.resolved_ip, 6379)
class MockConnection(MaintNotificationsAbstractConnection, ConnectionInterface):
def __init__(self, host, resolved_ip=None, is_ssl=False):
self.host = host
self.port = 6379
self._sock = MockSocket(resolved_ip) if resolved_ip else None
self.__class__.__name__ = "SSLConnection" if is_ssl else "Connection"
def _get_socket(self):
return self._sock
def get_resolved_ip(self):
# Call the actual method from AbstractConnection
from redis.connection import AbstractConnection
return AbstractConnection.get_resolved_ip(self) # type: ignore
self.MockSocket = MockSocket
self.MockConnection = MockConnection
def test_config_validation_valid_endpoint_types(self):
"""Test that MaintNotificationsConfig accepts valid endpoint types."""
for endpoint_type in EndpointType:
config = MaintNotificationsConfig(endpoint_type=endpoint_type)
assert config.endpoint_type == endpoint_type
def test_config_validation_none_endpoint_type(self):
"""Test that MaintNotificationsConfig accepts None as endpoint type."""
config = MaintNotificationsConfig(endpoint_type=None)
assert config.endpoint_type is None
def test_endpoint_type_detection_ip_addresses(self):
"""Test endpoint type detection for IP addresses."""
config = MaintNotificationsConfig()
# Test private IPv4 addresses
conn1 = self.MockConnection("192.168.1.1", resolved_ip="192.168.1.1")
assert (
config.get_endpoint_type("192.168.1.1", conn1) == EndpointType.INTERNAL_IP
)
# Test public IPv4 addresses
conn2 = self.MockConnection("8.8.8.8", resolved_ip="8.8.8.8")
assert config.get_endpoint_type("8.8.8.8", conn2) == EndpointType.EXTERNAL_IP
# Test IPv6 loopback
conn3 = self.MockConnection("::1")
assert config.get_endpoint_type("::1", conn3) == EndpointType.INTERNAL_IP
# Test IPv6 public address
conn4 = self.MockConnection("2001:4860:4860::8888")
assert (
config.get_endpoint_type("2001:4860:4860::8888", conn4)
== EndpointType.EXTERNAL_IP
)
def test_endpoint_type_detection_fqdn_with_resolved_ip(self):
"""Test endpoint type detection for FQDNs with resolved IP addresses."""
config = MaintNotificationsConfig()
# Test FQDN resolving to private IP
conn1 = self.MockConnection(
"redis.internal.company.com", resolved_ip="192.168.1.1"
)
assert (
config.get_endpoint_type("redis.internal.company.com", conn1)
== EndpointType.INTERNAL_FQDN
)
# Test FQDN resolving to public IP
conn2 = self.MockConnection("db123.redis.com", resolved_ip="8.8.8.8")
assert (
config.get_endpoint_type("db123.redis.com", conn2)
== EndpointType.EXTERNAL_FQDN
)
# Test internal FQDN resolving to public IP (should use resolved IP)
conn3 = self.MockConnection(
"redis.internal.company.com", resolved_ip="10.8.8.8"
)
assert (
config.get_endpoint_type("redis.internal.company.com", conn3)
== EndpointType.INTERNAL_FQDN
)
# Test FQDN with TLS
conn4 = self.MockConnection(
"redis.internal.company.com", resolved_ip="192.168.1.1", is_ssl=True
)
assert (
config.get_endpoint_type("redis.internal.company.com", conn4)
== EndpointType.INTERNAL_FQDN
)
conn5 = self.MockConnection(
"db123.redis.com", resolved_ip="8.8.8.8", is_ssl=True
)
assert (
config.get_endpoint_type("db123.redis.com", conn5)
== EndpointType.EXTERNAL_FQDN
)
def test_endpoint_type_detection_fqdn_heuristics(self):
"""Test endpoint type detection using FQDN heuristics when no resolved IP is available."""
config = MaintNotificationsConfig()
# Test localhost (should be internal)
conn1 = self.MockConnection("localhost")
assert (
config.get_endpoint_type("localhost", conn1) == EndpointType.INTERNAL_FQDN
)
# Test .local domain (should be internal)
conn2 = self.MockConnection("server.local")
assert (
config.get_endpoint_type("server.local", conn2)
== EndpointType.INTERNAL_FQDN
)
# Test public domain (should be external)
conn3 = self.MockConnection("example.com")
assert (
config.get_endpoint_type("example.com", conn3) == EndpointType.EXTERNAL_FQDN
)
def test_endpoint_type_override(self):
"""Test that configured endpoint_type overrides detection."""
# Test with endpoint_type set to NONE
config = MaintNotificationsConfig(endpoint_type=EndpointType.NONE)
conn = self.MockConnection("localhost")
assert config.get_endpoint_type("localhost", conn) == EndpointType.NONE
# Test with endpoint_type set to EXTERNAL_IP
config = MaintNotificationsConfig(endpoint_type=EndpointType.EXTERNAL_IP)
assert config.get_endpoint_type("localhost", conn) == EndpointType.EXTERNAL_IP
| TestMaintNotificationsConfigEndpointType |
python | docker__docker-py | tests/unit/api_network_test.py | {
"start": 150,
"end": 5664
} | class ____(BaseAPIClientTest):
def test_list_networks(self):
networks = [
{
"name": "none",
"id": "8e4e55c6863ef424",
"type": "null",
"endpoints": []
},
{
"name": "host",
"id": "062b6d9ea7913fde",
"type": "host",
"endpoints": []
},
]
get = mock.Mock(return_value=response(
status_code=200, content=json.dumps(networks).encode('utf-8')))
with mock.patch('docker.api.client.APIClient.get', get):
assert self.client.networks() == networks
assert get.call_args[0][0] == f"{url_prefix}networks"
filters = json.loads(get.call_args[1]['params']['filters'])
assert not filters
self.client.networks(names=['foo'])
filters = json.loads(get.call_args[1]['params']['filters'])
assert filters == {'name': ['foo']}
self.client.networks(ids=['123'])
filters = json.loads(get.call_args[1]['params']['filters'])
assert filters == {'id': ['123']}
def test_create_network(self):
network_data = {
"id": 'abc12345',
"warning": "",
}
network_response = response(status_code=200, content=network_data)
post = mock.Mock(return_value=network_response)
with mock.patch('docker.api.client.APIClient.post', post):
result = self.client.create_network('foo')
assert result == network_data
assert post.call_args[0][0] == f"{url_prefix}networks/create"
assert json.loads(post.call_args[1]['data']) == {"Name": "foo"}
opts = {
'com.docker.network.bridge.enable_icc': False,
'com.docker.network.bridge.enable_ip_masquerade': False,
}
self.client.create_network('foo', 'bridge', opts)
assert json.loads(post.call_args[1]['data']) == {
"Name": "foo", "Driver": "bridge", "Options": opts
}
ipam_pool_config = IPAMPool(subnet="192.168.52.0/24",
gateway="192.168.52.254")
ipam_config = IPAMConfig(pool_configs=[ipam_pool_config])
self.client.create_network("bar", driver="bridge",
ipam=ipam_config)
assert json.loads(post.call_args[1]['data']) == {
"Name": "bar",
"Driver": "bridge",
"IPAM": {
"Driver": "default",
"Config": [{
"IPRange": None,
"Gateway": "192.168.52.254",
"Subnet": "192.168.52.0/24",
"AuxiliaryAddresses": None,
}],
}
}
def test_remove_network(self):
network_id = 'abc12345'
delete = mock.Mock(return_value=response(status_code=200))
with mock.patch('docker.api.client.APIClient.delete', delete):
self.client.remove_network(network_id)
args = delete.call_args
assert args[0][0] == f"{url_prefix}networks/{network_id}"
def test_inspect_network(self):
network_id = 'abc12345'
network_name = 'foo'
network_data = {
'name': network_name,
'id': network_id,
'driver': 'bridge',
'containers': {},
}
network_response = response(status_code=200, content=network_data)
get = mock.Mock(return_value=network_response)
with mock.patch('docker.api.client.APIClient.get', get):
result = self.client.inspect_network(network_id)
assert result == network_data
args = get.call_args
assert args[0][0] == f"{url_prefix}networks/{network_id}"
def test_connect_container_to_network(self):
network_id = 'abc12345'
container_id = 'def45678'
post = mock.Mock(return_value=response(status_code=201))
with mock.patch('docker.api.client.APIClient.post', post):
self.client.connect_container_to_network(
container={'Id': container_id},
net_id=network_id,
aliases=['foo', 'bar'],
links=[('baz', 'quux')],
driver_opt={'com.docker-py.setting': 'yes'},
)
assert post.call_args[0][0] == (
f"{url_prefix}networks/{network_id}/connect"
)
assert json.loads(post.call_args[1]['data']) == {
'Container': container_id,
'EndpointConfig': {
'Aliases': ['foo', 'bar'],
'Links': ['baz:quux'],
'DriverOpts': {'com.docker-py.setting': 'yes'},
},
}
def test_disconnect_container_from_network(self):
network_id = 'abc12345'
container_id = 'def45678'
post = mock.Mock(return_value=response(status_code=201))
with mock.patch('docker.api.client.APIClient.post', post):
self.client.disconnect_container_from_network(
container={'Id': container_id}, net_id=network_id)
assert post.call_args[0][0] == (
f"{url_prefix}networks/{network_id}/disconnect"
)
assert json.loads(post.call_args[1]['data']) == {
'Container': container_id
}
| NetworkTest |
python | apache__airflow | providers/dingding/src/airflow/providers/dingding/hooks/dingding.py | {
"start": 1010,
"end": 5046
} | class ____(HttpHook):
"""
Send message using a DingTalk Custom Robot API.
.. seealso::
`How to get webhook token <https://open.dingtalk.com/document/robots/custom-robot-access>`__
:param dingding_conn_id: Dingding connection id that has access token in the password field,
and optional host name in host field, if host not set than default
``https://oapi.dingtalk.com`` will use.
:param message_type: Message type you want to send to Dingding, support five type so far
including ``text``, ``link``, ``markdown``, ``actionCard``, ``feedCard``.
:param message: The message send to chat group
:param at_mobiles: Remind specific users with this message
:param at_all: Remind all people in group or not. If True, will overwrite ``at_mobiles``
"""
conn_name_attr = "dingding_conn_id"
default_conn_name = "dingding_default"
conn_type = "dingding"
hook_name = "DingTalk Custom Robot (Dingding)"
def __init__(
self,
dingding_conn_id="dingding_default",
message_type: str = "text",
message: str | dict | None = None,
at_mobiles: list[str] | None = None,
at_all: bool = False,
*args,
**kwargs,
) -> None:
super().__init__(http_conn_id=dingding_conn_id, *args, **kwargs) # type: ignore[misc]
self.message_type = message_type
self.message = message
self.at_mobiles = at_mobiles
self.at_all = at_all
def _get_endpoint(self) -> str:
"""Get DingTalk Custom Robot endpoint for sending message."""
conn = self.get_connection(self.http_conn_id)
token = conn.password
if not token:
raise AirflowException(
"Dingding token is requests but get nothing, check you conn_id configuration."
)
return f"robot/send?access_token={token}"
def _build_message(self) -> str:
"""Build different type of DingTalk custom robot messages."""
if self.message_type in ["text", "markdown"]:
data = {
"msgtype": self.message_type,
self.message_type: {"content": self.message} if self.message_type == "text" else self.message,
"at": {"atMobiles": self.at_mobiles, "isAtAll": self.at_all},
}
else:
data = {"msgtype": self.message_type, self.message_type: self.message}
return json.dumps(data)
def get_conn(
self, headers: dict[Any, Any] | None = None, extra_options: dict[str, Any] | None = None
) -> Session:
"""
Overwrite HttpHook get_conn.
We just need base_url and headers, and not don't need generic params.
:param headers: additional headers to be passed through as a dictionary
:param extra_options: extra options to pass to the connection (ignored)
"""
conn = self.get_connection(self.http_conn_id)
self.base_url = conn.host if conn.host else "https://oapi.dingtalk.com"
session = requests.Session()
if headers:
session.headers.update(headers)
return session
def send(self) -> None:
"""Send DingTalk Custom Robot message."""
support_type = ["text", "link", "markdown", "actionCard", "feedCard"]
if self.message_type not in support_type:
raise ValueError(
f"DingdingWebhookHook only support {support_type} so far, but receive {self.message_type}"
)
data = self._build_message()
self.log.info("Sending Dingding type %s message %s", self.message_type, data)
resp = self.run(
endpoint=self._get_endpoint(), data=data, headers={"Content-Type": "application/json"}
)
# Success send message will return errcode = 0
if int(resp.json().get("errcode")) != 0:
raise AirflowException(f"Send Dingding message failed, receive error message {resp.text}")
self.log.info("Success Send Dingding message")
| DingdingHook |
python | OmkarPathak__pygorithm | tests/test_sorting.py | {
"start": 287,
"end": 2278
} | class ____:
def test_test_setup(self):
self.assertIsNotNone(getattr(self, 'sort', None))
self.assertIsNotNone(getattr(self, 'inplace', None))
self.assertIsNotNone(getattr(self, 'alph_support', None))
def _check_sort_list(self, arr, expected):
cp_arr = list(arr)
sarr = self.sort(cp_arr)
self.assertTrue(
isinstance(sarr, list), 'weird result type: ' + str(type(sarr)))
self.assertEqual(len(sarr), len(arr))
self.assertEqual(sarr, expected)
if self.inplace:
self.assertTrue(cp_arr is sarr, 'was not inplace')
else:
self.assertTrue(cp_arr is not sarr, 'was inplace')
self.assertEqual(cp_arr, arr, 'inplace modified list')
def _check_sort_alph(self, inp, expected):
if not self.alph_support:
return
self._check_sort_list(list(inp), list(expected))
def test_sort_empty(self):
self._check_sort_list([], [])
def test_sort_single(self):
self._check_sort_list([5], [5])
def test_sort_single_alph(self):
self._check_sort_alph('a', 'a')
def test_sort_two_inorder(self):
self._check_sort_list([1, 2], [1, 2])
def test_sort_two_outoforder(self):
self._check_sort_list([2, 1], [1, 2])
def test_sort_5_random_numeric(self):
arr = list(range(5))
random.shuffle(arr)
self._check_sort_list(arr, list(range(5)))
def test_sort_15_random_numeric(self):
arr = list(range(15))
random.shuffle(arr)
self._check_sort_list(arr, list(range(15)))
def test_sort_5_random_alph(self):
arr = ['a', 'b', 'c', 'd', 'e']
random.shuffle(arr)
self._check_sort_alph(''.join(arr), 'abcde')
def test_sort_15_random_alph(self):
arr = [chr(ord('a') + i) for i in range(15)]
exp = ''.join(arr)
random.shuffle(arr)
self._check_sort_alph(''.join(arr), exp)
| TestSortingAlgorithm |
python | jazzband__django-model-utils | model_utils/managers.py | {
"start": 2086,
"end": 7301
} | class ____(Generic[ModelT]):
model: type[ModelT]
subclasses: Sequence[str]
def __init__(self, *args: object, **kwargs: object):
super().__init__(*args, **kwargs)
self._iterable_class: type[BaseIterable[ModelT]] = InheritanceIterable
def select_subclasses(self, *subclasses: str | type[models.Model]) -> InheritanceQuerySet[ModelT]:
model: type[ModelT] = self.model
calculated_subclasses = self._get_subclasses_recurse(model)
# if none were passed in, we can just short circuit and select all
if not subclasses:
selected_subclasses = calculated_subclasses
else:
verified_subclasses: list[str] = []
for subclass in subclasses:
# special case for passing in the same model as the queryset
# is bound against. Rather than raise an error later, we know
# we can allow this through.
if subclass is model:
continue
if not isinstance(subclass, str):
subclass = self._get_ancestors_path(subclass)
if subclass in calculated_subclasses:
verified_subclasses.append(subclass)
else:
raise ValueError(
'{!r} is not in the discovered subclasses, tried: {}'.format(
subclass, ', '.join(calculated_subclasses))
)
selected_subclasses = verified_subclasses
new_qs = cast('InheritanceQuerySet[ModelT]', self)
if selected_subclasses:
new_qs = new_qs.select_related(*selected_subclasses)
new_qs.subclasses = selected_subclasses
return new_qs
def _chain(self, **kwargs: object) -> InheritanceQuerySet[ModelT]:
update = {}
for name in ['subclasses', '_annotated']:
if hasattr(self, name):
update[name] = getattr(self, name)
# django-stubs doesn't include this private API.
chained = super()._chain(**kwargs) # type: ignore[misc]
chained.__dict__.update(update)
return chained
def _clone(self) -> InheritanceQuerySet[ModelT]:
# django-stubs doesn't include this private API.
qs = super()._clone() # type: ignore[misc]
for name in ['subclasses', '_annotated']:
if hasattr(self, name):
setattr(qs, name, getattr(self, name))
return qs
def annotate(self, *args: Any, **kwargs: Any) -> InheritanceQuerySet[ModelT]:
qset = cast(QuerySet[ModelT], super()).annotate(*args, **kwargs)
qset._annotated = [a.default_alias for a in args] + list(kwargs.keys())
return qset
def _get_subclasses_recurse(self, model: type[models.Model]) -> list[str]:
"""
Given a Model class, find all related objects, exploring children
recursively, returning a `list` of strings representing the
relations for select_related
"""
related_objects = [
f for f in model._meta.get_fields()
if isinstance(f, OneToOneRel)]
rels = [
rel for rel in related_objects
if isinstance(rel.field, OneToOneField)
and issubclass(rel.field.model, model)
and model is not rel.field.model
and rel.parent_link
]
subclasses = []
for rel in rels:
for subclass in self._get_subclasses_recurse(rel.field.model):
subclasses.append(rel.get_accessor_name() + LOOKUP_SEP + subclass)
subclasses.append(rel.get_accessor_name())
return subclasses
def _get_ancestors_path(self, model: type[models.Model]) -> str:
"""
Serves as an opposite to _get_subclasses_recurse, instead walking from
the Model class up the Model's ancestry and constructing the desired
select_related string backwards.
"""
if not issubclass(model, self.model):
raise ValueError(
f"{model!r} is not a subclass of {self.model!r}")
ancestry: list[str] = []
# should be a OneToOneField or None
parent_link = model._meta.get_ancestor_link(self.model)
while parent_link is not None:
related = parent_link.remote_field
ancestry.insert(0, related.get_accessor_name())
parent_model = related.model
parent_link = parent_model._meta.get_ancestor_link(self.model)
return LOOKUP_SEP.join(ancestry)
def _get_sub_obj_recurse(self, obj: models.Model, s: str) -> ModelT | None:
rel, _, s = s.partition(LOOKUP_SEP)
try:
node = getattr(obj, rel)
except ObjectDoesNotExist:
return None
if s:
child = self._get_sub_obj_recurse(node, s)
return child
else:
return node
def get_subclass(self, *args: object, **kwargs: object) -> ModelT:
return self.select_subclasses().get(*args, **kwargs)
# Defining the 'model' attribute using a generic type triggers a bug in mypy:
# https://github.com/python/mypy/issues/9031
| InheritanceQuerySetMixin |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/layout_test.py | {
"start": 12479,
"end": 16710
} | class ____(test_util.DTensorBaseTest, parameterized.TestCase):
def test_empty_sharding_spec_different_from_single_unsharded(self):
layout_str_single_unsharded = (
'sharding_specs:unsharded, mesh:' + _MESH_2D_STRING
)
layout_str_empty_sharding_spec = 'sharding_specs: mesh:' + _MESH_2D_STRING
self.assertNotEqual(
layout.Layout.from_string(layout_str_single_unsharded).to_string(),
layout.Layout.from_string(layout_str_empty_sharding_spec).to_string(),
)
@parameterized.named_parameters(
dict(
testcase_name='sharded_batch_and_x',
test_layout_str='sharding_specs:batch,x, mesh:' + _MESH_2D_STRING,
),
dict(
testcase_name='unsharded_explicit',
test_layout_str='sharding_specs:'
+ UNSHARDED
+ ','
+ UNSHARDED
+ ','
+ ' mesh:'
+ _MESH_2D_STRING,
),
)
def test_layout_reciprocal_string_rep(self, test_layout_str):
new_layout_str = layout.Layout.from_string(test_layout_str).to_string()
self.assertEqual(test_layout_str, new_layout_str)
def test_layout_pickle(self):
replicated = layout.Layout.replicated(_2D_MESH, rank=3)
pickled = pickle.dumps(replicated)
unpickled = pickle.loads(pickled)
self.assertEqual(replicated, unpickled)
def test_layout_repr(self):
tensor_layout = layout.Layout.batch_sharded(
_2D_MESH, _MESH_DIM_BATCH, rank=2)
self.assertIn('batch,unsharded', repr(tensor_layout))
def test_throws_for_non_mesh(self):
with self.assertRaisesRegex(ValueError, 'mesh is not a valid Mesh object'):
layout.Layout([_MESH_DIM_BATCH, _MESH_DIM_X], 'string_mesh')
def test_throws_for_repeated_dimension(self):
with self.assertRaisesRegex(ValueError, 'Mesh dimensions must be unique.'):
layout.Layout([_MESH_DIM_BATCH, _MESH_DIM_BATCH], _2D_MESH)
def test_throws_for_invalid_sharding_spec(self):
with self.assertRaisesRegex(
ValueError,
'A dimension sharding must either be a valid mesh dimension or ' +
'UNSHARDED.'):
layout.Layout(['WRONG_SHARDING_SPEC', 'UNSHARDED'], _2D_MESH)
def test_data_parallel_layout(self):
tensor_layout = layout.Layout.batch_sharded(
_2D_MESH, _MESH_DIM_BATCH, rank=2)
self.assertEqual(
tensor_layout.num_shards(0), _2D_MESH.dim_size(_MESH_DIM_BATCH))
self.assertEqual(tensor_layout.num_shards(1), 1)
def test_global_shape_from_local_shape(self):
tensor_layout = layout.Layout(
[_MESH_DIM_BATCH, _MESH_DIM_X, layout.UNSHARDED],
mesh=_2D_MESH,
)
self.assertEqual(
tensor_layout.global_shape_from_local_shape(
tensor_shape.TensorShape((1, 3, 5))
),
(2, 6, 5),
)
def test_local_shape_from_global_shape(self):
tensor_layout = layout.Layout(
[_MESH_DIM_BATCH, _MESH_DIM_X, layout.UNSHARDED],
mesh=_2D_MESH,
)
self.assertEqual(
tensor_layout.local_shape_from_global_shape(
tensor_shape.TensorShape((2, 6, 5))
),
(1, 3, 5),
)
def test_single_device_layout(self):
tensor_layout = layout.Layout.from_single_device_mesh(_SINGLE_DEVICE_MESH)
tensor_layout2 = layout.Layout.from_device(
_SINGLE_DEVICE_MESH.single_device
)
self.assertTrue(tensor_layout.is_single_device())
self.assertEqual(tensor_layout.mesh, _SINGLE_DEVICE_MESH)
self.assertEqual(tensor_layout, tensor_layout2)
def test_single_device_layout_from_string(self):
tensor_layout = layout.Layout.from_single_device_mesh(_SINGLE_DEVICE_MESH)
roundtrip = layout.Layout.from_string(tensor_layout.to_string())
self.assertEqual(roundtrip, tensor_layout)
def test_single_device_layout_from_proto(self):
tensor_layout = layout.Layout.from_single_device_mesh(_SINGLE_DEVICE_MESH)
roundtrip = layout.Layout.from_proto(tensor_layout.as_proto())
self.assertEqual(roundtrip, tensor_layout)
def test_parted_layout(self):
tensor_layout = layout.Layout.batch_sharded(
_2D_MESH, _MESH_DIM_BATCH, rank=2
)
parted_layout = tensor_layout.to_parted()
self.assertEqual(parted_layout.type, layout.LayoutType.PARTED)
| LayoutTest |
python | pyparsing__pyparsing | examples/shapes.py | {
"start": 633,
"end": 1737
} | class ____(Shape):
def area(self):
return 3.14159 * self.radius ** 2
import pyparsing as pp
ppc = pp.pyparsing_common
# use pyparsing-defined numeric expression that converts all parsed
# numeric values as floats
number = ppc.fnumber()
# Shape expressions:
# square : S <centerx> <centery> <side>
# rectangle: R <centerx> <centery> <width> <height>
# circle : C <centerx> <centery> <diameter>
squareDefn = "S" + number("centerx") + number("centery") + number("side")
rectDefn = (
"R" + number("centerx") + number("centery") + number("width") + number("height")
)
circleDefn = "C" + number("centerx") + number("centery") + number("diameter")
squareDefn.set_parse_action(Square)
rectDefn.set_parse_action(Rectangle)
def computeRadius(tokens):
tokens["radius"] = tokens.diameter / 2.0
circleDefn.set_parse_action(computeRadius, Circle)
shapeExpr = squareDefn | rectDefn | circleDefn
tests = """\
C 0 0 100
R 10 10 20 50
S -1 5 10""".splitlines()
for t in tests:
shape = shapeExpr.parse_string(t)[0]
print(shape)
print("Area:", shape.area())
print()
| Circle |
python | streamlit__streamlit | lib/tests/streamlit/elements/arrow_table_test.py | {
"start": 1310,
"end": 5228
} | class ____(DeltaGeneratorTestCase):
"""Test ability to marshall arrow protos."""
def test_dataframe_data(self):
df = mock_data_frame()
st.table(df)
proto = self.get_delta_from_queue().new_element.arrow_table
pd.testing.assert_frame_equal(convert_arrow_bytes_to_pandas_df(proto.data), df)
def test_pyarrow_table_data(self):
df = mock_data_frame()
table = pa.Table.from_pandas(df)
st.table(table)
proto = self.get_delta_from_queue().new_element.arrow_table
assert proto.data == convert_arrow_table_to_arrow_bytes(table)
def test_uuid(self):
df = mock_data_frame()
styler = df.style
styler.set_uuid("FAKE_UUID")
st.table(styler)
proto = self.get_delta_from_queue().new_element.arrow_table
assert proto.styler.uuid == "FAKE_UUID"
def test_caption(self):
df = mock_data_frame()
styler = df.style
styler.set_caption("FAKE_CAPTION")
st.table(styler)
proto = self.get_delta_from_queue().new_element.arrow_table
assert proto.styler.caption == "FAKE_CAPTION"
def test_table_styles(self):
df = mock_data_frame()
styler = df.style
# NOTE: If UUID is not set - a random UUID will be generated.
styler.set_uuid("FAKE_UUID")
styler.set_table_styles(
[{"selector": ".blank", "props": [("background-color", "red")]}]
)
st.table(styler)
proto = self.get_delta_from_queue().new_element.arrow_table
assert proto.styler.styles == "#T_FAKE_UUID .blank { background-color: red }"
def test_cell_styles(self):
df = mock_data_frame()
styler = df.style
# NOTE: If UUID is not set - a random UUID will be generated.
styler.set_uuid("FAKE_UUID")
styler.highlight_max(axis=None)
st.table(styler)
proto = self.get_delta_from_queue().new_element.arrow_table
assert (
proto.styler.styles == "#T_FAKE_UUID_row1_col2 { background-color: yellow }"
)
def test_display_values(self):
df = pd.DataFrame(
[[1, 2, 3], [4, 5, 6]],
)
styler = df.style.format("{:.2%}")
st.table(styler)
expected = pd.DataFrame(
[["100.00%", "200.00%", "300.00%"], ["400.00%", "500.00%", "600.00%"]],
)
proto = self.get_delta_from_queue().new_element.arrow_table
pd.testing.assert_frame_equal(
convert_arrow_bytes_to_pandas_df(proto.styler.display_values), expected
)
def test_table_uses_convert_anything_to_df(self):
"""Test that st.table uses convert_anything_to_df to convert input data."""
df = mock_data_frame()
with patch(
"streamlit.dataframe_util.convert_anything_to_pandas_df"
) as convert_anything_to_df:
convert_anything_to_df.return_value = df
st.table(df)
convert_anything_to_df.assert_called_once()
@parameterized.expand(
[
(True, ArrowProto.BorderMode.ALL),
(False, ArrowProto.BorderMode.NONE),
("horizontal", ArrowProto.BorderMode.HORIZONTAL),
]
)
def test_table_border_parameter(self, border, expected):
"""Test that st.table border parameter converts values correctly."""
df = mock_data_frame()
st.table(df, border=border)
proto = self.get_delta_from_queue().new_element.arrow_table
assert proto.border_mode == expected
def test_table_border_invalid_value(self):
"""Test that st.table raises StreamlitValueError for invalid border values."""
df = mock_data_frame()
with pytest.raises(
StreamlitValueError,
match=r"Invalid `border` value.*True, False, 'horizontal'",
):
st.table(df, border="invalid")
| ArrowTest |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/links/emr.py | {
"start": 1268,
"end": 1524
} | class ____(BaseAwsLink):
"""Helper class for constructing Amazon EMR Cluster Link."""
name = "EMR Cluster"
key = "emr_cluster"
format_str = BASE_AWS_CONSOLE_LINK + "/emr/home?region={region_name}#/clusterDetails/{job_flow_id}"
| EmrClusterLink |
python | django__django | tests/generic_views/test_base.py | {
"start": 1045,
"end": 1236
} | class ____(TemplateView):
def get(self, request):
return self.render_to_response({})
def get_template_names(self):
return ["generic_views/about.html"]
| AboutTemplateView |
python | scipy__scipy | scipy/interpolate/_bary_rational.py | {
"start": 8098,
"end": 25085
} | class ____(_BarycentricRational):
r"""
AAA real or complex rational approximation.
As described in [1]_, the AAA algorithm is a greedy algorithm for approximation by
rational functions on a real or complex set of points. The rational approximation is
represented in a barycentric form from which the roots (zeros), poles, and residues
can be computed.
Parameters
----------
x : 1D array_like, shape (n,)
1-D array containing values of the independent variable. Values may be real or
complex but must be finite.
y : 1D array_like, shape (n,)
Function values ``f(x)``. Infinite and NaN values of `values` and
corresponding values of `points` will be discarded.
rtol : float, optional
Relative tolerance, defaults to ``eps**0.75``. If a small subset of the entries
in `values` are much larger than the rest the default tolerance may be too
loose. If the tolerance is too tight then the approximation may contain
Froissart doublets or the algorithm may fail to converge entirely.
max_terms : int, optional
Maximum number of terms in the barycentric representation, defaults to ``100``.
Must be greater than or equal to one.
clean_up : bool, optional
Automatic removal of Froissart doublets, defaults to ``True``. See notes for
more details.
clean_up_tol : float, optional
Poles with residues less than this number times the geometric mean
of `values` times the minimum distance to `points` are deemed spurious by the
cleanup procedure, defaults to 1e-13. See notes for more details.
Attributes
----------
support_points : array
Support points of the approximation. These are a subset of the provided `x` at
which the approximation strictly interpolates `y`.
See notes for more details.
support_values : array
Value of the approximation at the `support_points`.
weights : array
Weights of the barycentric approximation.
errors : array
Error :math:`|f(z) - r(z)|_\infty` over `points` in the successive iterations
of AAA.
Warns
-----
RuntimeWarning
If `rtol` is not achieved in `max_terms` iterations.
See Also
--------
FloaterHormannInterpolator : Floater-Hormann barycentric rational interpolation.
pade : Padé approximation.
Notes
-----
At iteration :math:`m` (at which point there are :math:`m` terms in the both the
numerator and denominator of the approximation), the
rational approximation in the AAA algorithm takes the barycentric form
.. math::
r(z) = n(z)/d(z) =
\frac{\sum_{j=1}^m\ w_j f_j / (z - z_j)}{\sum_{j=1}^m w_j / (z - z_j)},
where :math:`z_1,\dots,z_m` are real or complex support points selected from
`x`, :math:`f_1,\dots,f_m` are the corresponding real or complex data values
from `y`, and :math:`w_1,\dots,w_m` are real or complex weights.
Each iteration of the algorithm has two parts: the greedy selection the next support
point and the computation of the weights. The first part of each iteration is to
select the next support point to be added :math:`z_{m+1}` from the remaining
unselected `x`, such that the nonlinear residual
:math:`|f(z_{m+1}) - n(z_{m+1})/d(z_{m+1})|` is maximised. The algorithm terminates
when this maximum is less than ``rtol * np.linalg.norm(f, ord=np.inf)``. This means
the interpolation property is only satisfied up to a tolerance, except at the
support points where approximation exactly interpolates the supplied data.
In the second part of each iteration, the weights :math:`w_j` are selected to solve
the least-squares problem
.. math::
\text{minimise}_{w_j}|fd - n| \quad \text{subject to} \quad
\sum_{j=1}^{m+1} w_j = 1,
over the unselected elements of `x`.
One of the challenges with working with rational approximations is the presence of
Froissart doublets, which are either poles with vanishingly small residues or
pole-zero pairs that are close enough together to nearly cancel, see [2]_. The
greedy nature of the AAA algorithm means Froissart doublets are rare. However, if
`rtol` is set too tight then the approximation will stagnate and many Froissart
doublets will appear. Froissart doublets can usually be removed by removing support
points and then resolving the least squares problem. The support point :math:`z_j`,
which is the closest support point to the pole :math:`a` with residue
:math:`\alpha`, is removed if the following is satisfied
.. math::
|\alpha| / |z_j - a| < \verb|clean_up_tol| \cdot \tilde{f},
where :math:`\tilde{f}` is the geometric mean of `support_values`.
References
----------
.. [1] Y. Nakatsukasa, O. Sete, and L. N. Trefethen, "The AAA algorithm for
rational approximation", SIAM J. Sci. Comp. 40 (2018), A1494-A1522.
:doi:`10.1137/16M1106122`
.. [2] J. Gilewicz and M. Pindor, Pade approximants and noise: rational functions,
J. Comp. Appl. Math. 105 (1999), pp. 285-297.
:doi:`10.1016/S0377-0427(02)00674-X`
Examples
--------
Here we reproduce a number of the numerical examples from [1]_ as a demonstration
of the functionality offered by this method.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import AAA
>>> import warnings
For the first example we approximate the gamma function on ``[-3.5, 4.5]`` by
extrapolating from 100 samples in ``[-1.5, 1.5]``.
>>> from scipy.special import gamma
>>> sample_points = np.linspace(-1.5, 1.5, num=100)
>>> r = AAA(sample_points, gamma(sample_points))
>>> z = np.linspace(-3.5, 4.5, num=1000)
>>> fig, ax = plt.subplots()
>>> ax.plot(z, gamma(z), label="Gamma")
>>> ax.plot(sample_points, gamma(sample_points), label="Sample points")
>>> ax.plot(z, r(z).real, '--', label="AAA approximation")
>>> ax.set(xlabel="z", ylabel="r(z)", ylim=[-8, 8], xlim=[-3.5, 4.5])
>>> ax.legend()
>>> plt.show()
We can also view the poles of the rational approximation and their residues:
>>> order = np.argsort(r.poles())
>>> r.poles()[order]
array([-3.81591039e+00+0.j , -3.00269049e+00+0.j ,
-1.99999988e+00+0.j , -1.00000000e+00+0.j ,
5.85842812e-17+0.j , 4.77485458e+00-3.06919376j,
4.77485458e+00+3.06919376j, 5.29095868e+00-0.97373072j,
5.29095868e+00+0.97373072j])
>>> r.residues()[order]
array([ 0.03658074 +0.j , -0.16915426 -0.j ,
0.49999915 +0.j , -1. +0.j ,
1. +0.j , -0.81132013 -2.30193429j,
-0.81132013 +2.30193429j, 0.87326839+10.70148546j,
0.87326839-10.70148546j])
For the second example, we call `AAA` with a spiral of 1000 points that wind 7.5
times around the origin in the complex plane.
>>> z = np.exp(np.linspace(-0.5, 0.5 + 15j*np.pi, 1000))
>>> r = AAA(z, np.tan(np.pi*z/2), rtol=1e-13)
We see that AAA takes 12 steps to converge with the following errors:
>>> r.errors.size
12
>>> r.errors
array([2.49261500e+01, 4.28045609e+01, 1.71346935e+01, 8.65055336e-02,
1.27106444e-02, 9.90889874e-04, 5.86910543e-05, 1.28735561e-06,
3.57007424e-08, 6.37007837e-10, 1.67103357e-11, 1.17112299e-13])
We can also plot the computed poles:
>>> fig, ax = plt.subplots()
>>> ax.plot(z.real, z.imag, '.', markersize=2, label="Sample points")
>>> ax.plot(r.poles().real, r.poles().imag, '.', markersize=5,
... label="Computed poles")
>>> ax.set(xlim=[-3.5, 3.5], ylim=[-3.5, 3.5], aspect="equal")
>>> ax.legend()
>>> plt.show()
We now demonstrate the removal of Froissart doublets using the `clean_up` method
using an example from [1]_. Here we approximate the function
:math:`f(z)=\log(2 + z^4)/(1 + 16z^4)` by sampling it at 1000 roots of unity. The
algorithm is run with ``rtol=0`` and ``clean_up=False`` to deliberately cause
Froissart doublets to appear.
>>> z = np.exp(1j*2*np.pi*np.linspace(0,1, num=1000))
>>> def f(z):
... return np.log(2 + z**4)/(1 - 16*z**4)
>>> with warnings.catch_warnings(): # filter convergence warning due to rtol=0
... warnings.simplefilter('ignore', RuntimeWarning)
... r = AAA(z, f(z), rtol=0, max_terms=50, clean_up=False)
>>> mask = np.abs(r.residues()) < 1e-13
>>> fig, axs = plt.subplots(ncols=2)
>>> axs[0].plot(r.poles().real[~mask], r.poles().imag[~mask], '.')
>>> axs[0].plot(r.poles().real[mask], r.poles().imag[mask], 'r.')
Now we call the `clean_up` method to remove Froissart doublets.
>>> with warnings.catch_warnings():
... warnings.simplefilter('ignore', RuntimeWarning)
... r.clean_up()
4 # may vary
>>> mask = np.abs(r.residues()) < 1e-13
>>> axs[1].plot(r.poles().real[~mask], r.poles().imag[~mask], '.')
>>> axs[1].plot(r.poles().real[mask], r.poles().imag[mask], 'r.')
>>> plt.show()
The left image shows the poles prior of the approximation ``clean_up=False`` with
poles with residue less than ``10^-13`` in absolute value shown in red. The right
image then shows the poles after the `clean_up` method has been called.
"""
def __init__(self, x, y, *, rtol=None, max_terms=100, clean_up=True,
clean_up_tol=1e-13):
super().__init__(x, y, rtol=rtol, max_terms=max_terms)
if clean_up:
self.clean_up(clean_up_tol)
def _input_validation(self, x, y, rtol=None, max_terms=100, clean_up=True,
clean_up_tol=1e-13):
max_terms = operator.index(max_terms)
if max_terms < 1:
raise ValueError("`max_terms` must be an integer value greater than or "
"equal to one.")
if y.ndim != 1:
raise ValueError("`y` must be 1-D.")
super()._input_validation(x, y)
@property
def support_points(self):
return self._support_points
@property
def support_values(self):
return self._support_values
def _compute_weights(self, z, f, rtol, max_terms):
# Initialization for AAA iteration
M = np.size(z)
mask = np.ones(M, dtype=np.bool_)
dtype = np.result_type(z, f, 1.0)
rtol = np.finfo(dtype).eps**0.75 if rtol is None else rtol
atol = rtol * np.linalg.norm(f, ord=np.inf)
zj = np.empty(max_terms, dtype=dtype)
fj = np.empty(max_terms, dtype=dtype)
# Cauchy matrix
C = np.empty((M, max_terms), dtype=dtype)
# Loewner matrix
A = np.empty((M, max_terms), dtype=dtype)
errors = np.empty(max_terms, dtype=A.real.dtype)
R = np.repeat(np.mean(f), M)
ill_conditioned = False
ill_conditioned_tol = 1/(3*np.finfo(dtype).eps)
# AAA iteration
for m in range(max_terms):
# Introduce next support point
# Select next support point
jj = np.argmax(np.abs(f[mask] - R[mask]))
# Update support points
zj[m] = z[mask][jj]
# Update data values
fj[m] = f[mask][jj]
# Next column of Cauchy matrix
# Ignore errors as we manually interpolate at support points
with np.errstate(divide="ignore", invalid="ignore"):
C[:, m] = 1 / (z - z[mask][jj])
# Update mask
mask[np.nonzero(mask)[0][jj]] = False
# Update Loewner matrix
# Ignore errors as inf values will be masked out in SVD call
with np.errstate(invalid="ignore"):
A[:, m] = (f - fj[m]) * C[:, m]
# Compute weights
rows = mask.sum()
if rows >= m + 1:
# The usual tall-skinny case
if not ill_conditioned:
_, s, V = scipy.linalg.svd(
A[mask, : m + 1], full_matrices=False, check_finite=False,
)
with np.errstate(invalid="ignore", divide="ignore"):
if s[0]/s[-1] > ill_conditioned_tol:
ill_conditioned = True
if ill_conditioned:
col_norm = np.linalg.norm(A[mask, : m + 1], axis=0)
_, s, V = scipy.linalg.svd(
A[mask, : m + 1]/col_norm, full_matrices=False,
check_finite=False,
)
# Treat case of multiple min singular values
mm = s == np.min(s)
# Aim for non-sparse weight vector
wj = (V.conj()[mm, :].sum(axis=0) / np.sqrt(mm.sum())).astype(dtype)
if ill_conditioned:
wj /= col_norm
else:
# Fewer rows than columns
V = scipy.linalg.null_space(A[mask, : m + 1], check_finite=False)
nm = V.shape[-1]
# Aim for non-sparse wt vector
wj = V.sum(axis=-1) / np.sqrt(nm)
# Compute rational approximant
# Omit columns with `wj == 0`
i0 = wj != 0
# Ignore errors as we manually interpolate at support points
with np.errstate(invalid="ignore"):
# Numerator
N = C[:, : m + 1][:, i0] @ (wj[i0] * fj[: m + 1][i0])
# Denominator
D = C[:, : m + 1][:, i0] @ wj[i0]
# Interpolate at support points with `wj !=0`
D_inf = np.isinf(D) | np.isnan(D)
D[D_inf] = 1
N[D_inf] = f[D_inf]
R = N / D
# Check if converged
max_error = np.linalg.norm(f - R, ord=np.inf)
errors[m] = max_error
if max_error <= atol:
break
if m == max_terms - 1:
warnings.warn(f"AAA failed to converge within {max_terms} iterations.",
RuntimeWarning, stacklevel=2)
# Trim off unused array allocation
zj = zj[: m + 1]
fj = fj[: m + 1]
# Remove support points with zero weight
i_non_zero = wj != 0
self.errors = errors[: m + 1]
self._points = z
self._values = f
return zj[i_non_zero], fj[i_non_zero], wj[i_non_zero]
def clean_up(self, cleanup_tol=1e-13):
"""Automatic removal of Froissart doublets.
Parameters
----------
cleanup_tol : float, optional
Poles with residues less than this number times the geometric mean
of `values` times the minimum distance to `points` are deemed spurious by
the cleanup procedure, defaults to 1e-13.
Returns
-------
int
Number of Froissart doublets detected
"""
# Find negligible residues
geom_mean_abs_f = scipy.stats.gmean(np.abs(self._values))
Z_distances = np.min(
np.abs(np.subtract.outer(self.poles(), self._points)), axis=1
)
with np.errstate(divide="ignore", invalid="ignore"):
ii = np.nonzero(
np.abs(self.residues()) / Z_distances < cleanup_tol * geom_mean_abs_f
)
ni = ii[0].size
if ni == 0:
return ni
warnings.warn(f"{ni} Froissart doublets detected.", RuntimeWarning,
stacklevel=2)
# For each spurious pole find and remove closest support point
closest_spt_point = np.argmin(
np.abs(np.subtract.outer(self._support_points, self.poles()[ii])), axis=0
)
self._support_points = np.delete(self._support_points, closest_spt_point)
self._support_values = np.delete(self._support_values, closest_spt_point)
# Remove support points z from sample set
mask = np.logical_and.reduce(
np.not_equal.outer(self._points, self._support_points), axis=1
)
f = self._values[mask]
z = self._points[mask]
# recompute weights, we resolve the least squares problem for the remaining
# support points
m = self._support_points.size
# Cauchy matrix
C = 1 / np.subtract.outer(z, self._support_points)
# Loewner matrix
A = f[:, np.newaxis] * C - C * self._support_values
# Solve least-squares problem to obtain weights
_, _, V = scipy.linalg.svd(A, check_finite=False)
self.weights = np.conj(V[m - 1,:])
# reset roots, poles, residues as cached values will be wrong with new weights
self._poles = None
self._residues = None
self._roots = None
return ni
| AAA |
python | django__django | django/views/generic/list.py | {
"start": 7765,
"end": 8001
} | class ____(MultipleObjectTemplateResponseMixin, BaseListView):
"""
Render some list of objects, set by `self.model` or `self.queryset`.
`self.queryset` can actually be any iterable of items, not just a queryset.
"""
| ListView |
python | django__django | tests/migrations/test_migrations_squashed_complex/2_auto.py | {
"start": 35,
"end": 188
} | class ____(migrations.Migration):
dependencies = [("migrations", "1_auto")]
operations = [migrations.RunPython(migrations.RunPython.noop)]
| Migration |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/permissions.py | {
"start": 114,
"end": 748
} | class ____(graphene.ObjectType):
class Meta:
name = "Permission"
permission = graphene.NonNull(graphene.String)
value = graphene.NonNull(graphene.Boolean)
disabledReason = graphene.Field(graphene.String)
def __init__(self, permission: str, permission_result: PermissionResult):
check.str_param(permission, "permission")
check.inst_param(permission_result, "permission_result", PermissionResult)
super().__init__(
permission=permission,
value=permission_result.enabled,
disabledReason=permission_result.disabled_reason,
)
| GraphenePermission |
python | lazyprogrammer__machine_learning_examples | hmm_class/hmmd.py | {
"start": 597,
"end": 7291
} | class ____:
def __init__(self, M):
self.M = M # number of hidden states
def fit(self, X, max_iter=30):
t0 = datetime.now()
np.random.seed(123)
# train the HMM model using the Baum-Welch algorithm
# a specific instance of the expectation-maximization algorithm
# determine V, the vocabulary size
# assume observables are already integers from 0..V-1
# X is a jagged array of observed sequences
V = max(max(x) for x in X) + 1
N = len(X)
self.pi = np.ones(self.M) / self.M # initial state distribution
self.A = random_normalized(self.M, self.M) # state transition matrix
self.B = random_normalized(self.M, V) # output distribution
print("initial A:", self.A)
print("initial B:", self.B)
costs = []
for it in range(max_iter):
if it % 10 == 0:
print("it:", it)
alphas = []
betas = []
P = np.zeros(N)
for n in range(N):
x = X[n]
T = len(x)
alpha = np.zeros((T, self.M))
alpha[0] = self.pi*self.B[:,x[0]]
for t in range(1, T):
tmp1 = alpha[t-1].dot(self.A) * self.B[:, x[t]]
# tmp2 = np.zeros(self.M)
# for i in range(self.M):
# for j in range(self.M):
# tmp2[j] += alpha[t-1,i] * self.A[i,j] * self.B[j, x[t]]
# print "diff:", np.abs(tmp1 - tmp2).sum()
alpha[t] = tmp1
P[n] = alpha[-1].sum()
alphas.append(alpha)
beta = np.zeros((T, self.M))
beta[-1] = 1
for t in range(T - 2, -1, -1):
beta[t] = self.A.dot(self.B[:, x[t+1]] * beta[t+1])
betas.append(beta)
# print "P:", P
# break
assert(np.all(P > 0))
cost = np.sum(np.log(P))
costs.append(cost)
# now re-estimate pi, A, B
self.pi = np.sum((alphas[n][0] * betas[n][0])/P[n] for n in range(N)) / N
# print "self.pi:", self.pi
# break
den1 = np.zeros((self.M, 1))
den2 = np.zeros((self.M, 1))
a_num = 0
b_num = 0
for n in range(N):
x = X[n]
T = len(x)
# print "den shape:", den.shape
# test = (alphas[n][:-1] * betas[n][:-1]).sum(axis=0, keepdims=True).T
# print "shape (alphas[n][:-1] * betas[n][:-1]).sum(axis=0): ", test.shape
den1 += (alphas[n][:-1] * betas[n][:-1]).sum(axis=0, keepdims=True).T / P[n]
den2 += (alphas[n] * betas[n]).sum(axis=0, keepdims=True).T / P[n]
# tmp2 = np.zeros((self.M, 1))
# for i in range(self.M):
# for t in range(T-1):
# tmp2[i] += alphas[n][t,i] * betas[n][t,i]
# tmp2 /= P[n]
# # print "diff:", np.abs(tmp1 - tmp2).sum()
# den += tmp1
# numerator for A
a_num_n = np.zeros((self.M, self.M))
for i in range(self.M):
for j in range(self.M):
for t in range(T-1):
a_num_n[i,j] += alphas[n][t,i] * self.A[i,j] * self.B[j, x[t+1]] * betas[n][t+1,j]
a_num += a_num_n / P[n]
# numerator for B
# b_num_n = np.zeros((self.M, V))
# for i in range(self.M):
# for j in range(V):
# for t in range(T):
# if x[t] == j:
# b_num_n[i,j] += alphas[n][t][i] * betas[n][t][i]
b_num_n2 = np.zeros((self.M, V))
for i in range(self.M):
for t in range(T):
b_num_n2[i,x[t]] += alphas[n][t,i] * betas[n][t,i]
b_num += b_num_n2 / P[n]
# tmp1 = a_num / den1
# tmp2 = np.zeros(a_num.shape)
# for i in range(self.M):
# for j in range(self.M):
# tmp2[i,j] = a_num[i,j] / den1[i]
# print "diff:", np.abs(tmp1 - tmp2).sum()
# print "tmp1:", tmp1
# print "tmp2:", tmp2
self.A = a_num / den1
self.B = b_num / den2
# print "P:", P
# break
print("A:", self.A)
print("B:", self.B)
print("pi:", self.pi)
print("Fit duration:", (datetime.now() - t0))
plt.plot(costs)
plt.show()
def likelihood(self, x):
# returns log P(x | model)
# using the forward part of the forward-backward algorithm
T = len(x)
alpha = np.zeros((T, self.M))
alpha[0] = self.pi*self.B[:,x[0]]
for t in range(1, T):
alpha[t] = alpha[t-1].dot(self.A) * self.B[:, x[t]]
return alpha[-1].sum()
def likelihood_multi(self, X):
return np.array([self.likelihood(x) for x in X])
def log_likelihood_multi(self, X):
return np.log(self.likelihood_multi(X))
def get_state_sequence(self, x):
# returns the most likely state sequence given observed sequence x
# using the Viterbi algorithm
T = len(x)
delta = np.zeros((T, self.M))
psi = np.zeros((T, self.M))
delta[0] = self.pi*self.B[:,x[0]]
for t in range(1, T):
for j in range(self.M):
delta[t,j] = np.max(delta[t-1]*self.A[:,j]) * self.B[j, x[t]]
psi[t,j] = np.argmax(delta[t-1]*self.A[:,j])
# backtrack
states = np.zeros(T, dtype=np.int32)
states[T-1] = np.argmax(delta[T-1])
for t in range(T-2, -1, -1):
states[t] = psi[t+1, states[t+1]]
return states
def fit_coin():
X = []
for line in open('coin_data.txt'):
# 1 for H, 0 for T
x = [1 if e == 'H' else 0 for e in line.rstrip()]
X.append(x)
hmm = HMM(2)
hmm.fit(X)
L = hmm.log_likelihood_multi(X).sum()
print("LL with fitted params:", L)
# try true values
hmm.pi = np.array([0.5, 0.5])
hmm.A = np.array([[0.1, 0.9], [0.8, 0.2]])
hmm.B = np.array([[0.6, 0.4], [0.3, 0.7]])
L = hmm.log_likelihood_multi(X).sum()
print("LL with true params:", L)
# try viterbi
print("Best state sequence for:", X[0])
print(hmm.get_state_sequence(X[0]))
if __name__ == '__main__':
fit_coin()
| HMM |
python | google__python-fire | fire/test_components.py | {
"start": 6752,
"end": 7054
} | class ____:
"""Test class for supporting callable."""
def __call__(self, **kwargs):
for key, value in kwargs.items():
print('{}: {}'.format(key, value))
def print_msg(self, msg):
print(msg)
CALLABLE_WITH_KEYWORD_ARGUMENT = CallableWithKeywordArgument()
| CallableWithKeywordArgument |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/utils.py | {
"start": 1299,
"end": 1401
} | class ____(Exception):
"""General class for Rate Limits errors"""
@dataclass
| GitHubAPILimitException |
python | great-expectations__great_expectations | tests/integration/cloud/rest_contracts/conftest.py | {
"start": 1322,
"end": 5156
} | class ____(str, enum.Enum):
DELETE = "DELETE"
GET = "GET"
PATCH = "PATCH"
POST = "POST"
PUT = "PUT"
@pytest.fixture
def cloud_base_url() -> str:
try:
return os.environ["GX_CLOUD_BASE_URL"]
except KeyError as e:
raise OSError("GX_CLOUD_BASE_URL is not set in this environment.") from e
@pytest.fixture
def cloud_access_token() -> str:
try:
return os.environ["GX_CLOUD_ACCESS_TOKEN"]
except KeyError as e:
raise OSError("GX_CLOUD_ACCESS_TOKEN is not set in this environment.") from e
@pytest.fixture(scope="module")
def gx_cloud_session() -> Session:
try:
access_token = os.environ["GX_CLOUD_ACCESS_TOKEN"]
except KeyError as e:
raise OSError("GX_CLOUD_ACCESS_TOKEN is not set in this environment.") from e
return create_session(access_token=access_token)
@pytest.fixture
def cloud_data_context(
cloud_base_url: str,
cloud_access_token: str,
pact_test: pact.Pact,
) -> CloudDataContext:
"""This is a real Cloud Data Context that points to the pact mock service instead of the Mercury API.""" # noqa: E501 # FIXME CoP
cloud_data_context = CloudDataContext(
cloud_base_url=cloud_base_url,
cloud_organization_id=EXISTING_ORGANIZATION_ID,
cloud_workspace_id=EXISTING_WORKSPACE_ID,
cloud_access_token=cloud_access_token,
)
# we can't override the base url to use the mock service due to
# reliance on env vars, so instead we override with a real project config
project_config = cloud_data_context.config
with pact_test:
context = CloudDataContext(
cloud_base_url=PACT_MOCK_SERVICE_URL,
cloud_organization_id=EXISTING_ORGANIZATION_ID,
cloud_workspace_id=EXISTING_WORKSPACE_ID,
cloud_access_token=cloud_access_token,
project_config=project_config,
)
project_manager.set_project(cloud_data_context)
return context
def get_git_commit_hash() -> str:
return subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("ascii").strip()
@pytest.fixture(scope="package")
def pact_test(request) -> pact.Pact:
"""
pact_test can be used as a context manager and will:
1. write a new contract to the pact dir
2. verify the contract against the mock service
"""
pact_broker_base_url = "https://greatexpectations.pactflow.io"
broker_token: str
publish_to_broker: bool
if os.environ.get("PACT_BROKER_READ_WRITE_TOKEN"):
broker_token = os.environ.get("PACT_BROKER_READ_WRITE_TOKEN", "")
publish_to_broker = True
elif os.environ.get("PACT_BROKER_READ_ONLY_TOKEN"):
broker_token = os.environ.get("PACT_BROKER_READ_ONLY_TOKEN", "")
publish_to_broker = False
else:
pytest.skip(
"no pact credentials: set PACT_BROKER_READ_ONLY_TOKEN from greatexpectations.pactflow.io" # noqa: E501 # FIXME CoP
)
# Adding random id to the commit hash allows us to run the build
# and publish the contract more than once for a given commit.
# We need this because we have the ability to trigger re-run of tests
# in GH, and we run the release build process on the tagged commit.
version = f"{get_git_commit_hash()}_{str(uuid.uuid4())[:5]}"
_pact: pact.Pact = pact.Consumer(
name=CONSUMER_NAME,
version=version,
tag_with_git_branch=True,
auto_detect_version_properties=True,
).has_pact_with(
pact.Provider(name=PROVIDER_NAME),
broker_base_url=pact_broker_base_url,
broker_token=broker_token,
host_name=PACT_MOCK_HOST,
port=PACT_MOCK_PORT,
pact_dir=str(PACT_DIR),
publish_to_broker=publish_to_broker,
)
_pact.start_service()
yield _pact
_pact.stop_service()
| RequestMethods |
python | sympy__sympy | sympy/holonomic/holonomicerrors.py | {
"start": 173,
"end": 476
} | class ____(BaseHolonomicError):
def __init__(self, holonomic, x0):
self.holonomic = holonomic
self.x0 = x0
def __str__(self):
s = 'A Power Series does not exists for '
s += str(self.holonomic)
s += ' about %s.' %self.x0
return s
| NotPowerSeriesError |
python | marshmallow-code__apispec | src/apispec/ext/marshmallow/openapi.py | {
"start": 938,
"end": 11671
} | class ____(FieldConverterMixin):
"""Adds methods for generating OpenAPI specification from marshmallow schemas and fields.
:param Version|str openapi_version: The OpenAPI version to use.
Should be in the form '2.x' or '3.x.x' to comply with the OpenAPI standard.
:param callable schema_name_resolver: Callable to generate the schema definition name.
Receives the `Schema` class and returns the name to be used in refs within
the generated spec. When working with circular referencing this function
must must not return `None` for schemas in a circular reference chain.
:param APISpec spec: An initialized spec. Nested schemas will be added to the spec
"""
def __init__(
self,
openapi_version: Version | str,
schema_name_resolver,
spec: APISpec,
) -> None:
self.openapi_version = (
Version(openapi_version)
if isinstance(openapi_version, str)
else openapi_version
)
self.schema_name_resolver = schema_name_resolver
self.spec = spec
self.init_attribute_functions()
self.init_parameter_attribute_functions()
# Schema references
self.refs: dict = {}
def init_parameter_attribute_functions(self) -> None:
self.parameter_attribute_functions = [
self.field2required,
self.list2param,
]
def add_parameter_attribute_function(self, func) -> None:
"""Method to add a field parameter function to the list of field
parameter functions that will be called on a field to convert it to a
field parameter.
:param func func: the field parameter function to add
The attribute function will be bound to the
`OpenAPIConverter <apispec.ext.marshmallow.openapi.OpenAPIConverter>`
instance.
It will be called for each field in a schema with
`self <apispec.ext.marshmallow.openapi.OpenAPIConverter>` and a
`field <marshmallow.fields.Field>` instance
positional arguments and `ret <dict>` keyword argument.
May mutate `ret`.
User added field parameter functions will be called after all built-in
field parameter functions in the order they were added.
"""
bound_func = func.__get__(self)
setattr(self, func.__name__, bound_func)
self.parameter_attribute_functions.append(bound_func)
def resolve_nested_schema(self, schema):
"""Return the OpenAPI representation of a marshmallow Schema.
Adds the schema to the spec if it isn't already present.
Typically will return a dictionary with the reference to the schema's
path in the spec unless the `schema_name_resolver` returns `None`, in
which case the returned dictionary will contain a JSON Schema Object
representation of the schema.
:param schema: schema to add to the spec
"""
try:
schema_instance = resolve_schema_instance(schema)
# If schema is a string and is not found in registry,
# assume it is a schema reference
except marshmallow.exceptions.RegistryError:
return schema
schema_key = make_schema_key(schema_instance)
if schema_key not in self.refs:
name = self.schema_name_resolver(schema)
if not name:
try:
json_schema = self.schema2jsonschema(schema_instance)
except RuntimeError as exc:
raise APISpecError(
f"Name resolver returned None for schema {schema} which is "
"part of a chain of circular referencing schemas. Please"
" ensure that the schema_name_resolver passed to"
" MarshmallowPlugin returns a string for all circular"
" referencing schemas."
) from exc
if getattr(schema, "many", False):
return {"type": "array", "items": json_schema}
return json_schema
name = get_unique_schema_name(self.spec.components, name)
self.spec.components.schema(name, schema=schema)
return self.get_ref_dict(schema_instance)
def schema2parameters(
self,
schema,
*,
location,
name: str = "body",
required: bool = False,
description: str | None = None,
):
"""Return an array of OpenAPI parameters given a given marshmallow
:class:`Schema <marshmallow.Schema>`. If `location` is "body", then return an array
of a single parameter; else return an array of a parameter for each included field in
the :class:`Schema <marshmallow.Schema>`.
In OpenAPI 3, only "query", "header", "path" or "cookie" are allowed for the location
of parameters. "requestBody" is used when fields are in the body.
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#parameterObject
"""
location = __location_map__.get(location, location)
# OAS 2 body parameter
if location == "body":
param = {
"in": location,
"required": required,
"name": name,
"schema": self.resolve_nested_schema(schema),
}
if description:
param["description"] = description
return [param]
assert not getattr(schema, "many", False), (
"Schemas with many=True are only supported for 'json' location (aka 'in: body')"
)
fields = get_fields(schema, exclude_dump_only=True)
return [
self._field2parameter(
field_obj,
name=field_obj.data_key or field_name,
location=location,
)
for field_name, field_obj in fields.items()
]
def _field2parameter(
self, field: marshmallow.fields.Field, *, name: str, location: str
) -> dict:
"""Return an OpenAPI parameter as a `dict`, given a marshmallow
:class:`Field <marshmallow.Field>`.
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#parameterObject
"""
ret: dict = {"in": location, "name": name}
prop = self.field2property(field)
if self.openapi_version.major < 3:
ret.update(prop)
else:
if "description" in prop:
ret["description"] = prop.pop("description")
if "deprecated" in prop:
ret["deprecated"] = prop.pop("deprecated")
ret["schema"] = prop
for param_attr_func in self.parameter_attribute_functions:
ret.update(param_attr_func(field, ret=ret))
return ret
def field2required(
self, field: marshmallow.fields.Field, **kwargs: typing.Any
) -> dict:
"""Return the dictionary of OpenAPI parameter attributes for a required field.
:param Field field: A marshmallow field.
:rtype: dict
"""
ret = {}
partial = getattr(field.parent, "partial", False)
ret["required"] = field.required and (
not partial or (is_collection(partial) and field.name not in partial)
)
return ret
def list2param(self, field: marshmallow.fields.Field, **kwargs: typing.Any) -> dict:
"""Return a dictionary of parameter properties from
:class:`List <marshmallow.fields.List` fields.
:param Field field: A marshmallow field.
:rtype: dict
"""
ret: dict = {}
if isinstance(field, marshmallow.fields.List):
if self.openapi_version.major < 3:
ret["collectionFormat"] = "multi"
else:
ret["explode"] = True
ret["style"] = "form"
return ret
def schema2jsonschema(self, schema):
"""Return the JSON Schema Object for a given marshmallow
:class:`Schema <marshmallow.Schema>`. Schema may optionally
provide the ``title`` and ``description`` class Meta options.
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject
:param Schema schema: A marshmallow Schema instance
:rtype: dict, a JSON Schema Object
"""
fields = get_fields(schema)
Meta = getattr(schema, "Meta", None)
partial = getattr(schema, "partial", None)
jsonschema = self.fields2jsonschema(fields, partial=partial)
schema_instance = resolve_schema_instance(schema)
if hasattr(Meta, "title"):
jsonschema["title"] = Meta.title
if hasattr(Meta, "description"):
jsonschema["description"] = Meta.description
elif schema_instance.unknown != marshmallow.EXCLUDE:
jsonschema["additionalProperties"] = (
schema_instance.unknown == marshmallow.INCLUDE
)
return jsonschema
def fields2jsonschema(self, fields, *, partial=None):
"""Return the JSON Schema Object given a mapping between field names and
:class:`Field <marshmallow.Field>` objects.
:param dict fields: A dictionary of field name field object pairs
:param bool|tuple partial: Whether to override a field's required flag.
If `True` no fields will be set as required. If an iterable fields
in the iterable will not be marked as required.
:rtype: dict, a JSON Schema Object
"""
jsonschema = {"type": "object", "properties": {}}
for field_name, field_obj in fields.items():
observed_field_name = field_obj.data_key or field_name
prop = self.field2property(field_obj)
jsonschema["properties"][observed_field_name] = prop
if field_obj.required:
if not partial or (
is_collection(partial) and field_name not in partial
):
jsonschema.setdefault("required", []).append(observed_field_name)
if "required" in jsonschema:
jsonschema["required"].sort()
return jsonschema
def get_ref_dict(self, schema):
"""Method to create a dictionary containing a JSON reference to the
schema in the spec
"""
schema_key = make_schema_key(schema)
ref_schema = self.spec.components.get_ref("schema", self.refs[schema_key])
if getattr(schema, "many", False):
return {"type": "array", "items": ref_schema}
return ref_schema
| OpenAPIConverter |
python | numba__numba | numba/parfors/parfor.py | {
"start": 64851,
"end": 70200
} | class ____:
"""Parfor subpass to convert setitem on Arrays
"""
def __init__(self, pass_states):
"""
Parameters
----------
pass_states : ParforPassStates
"""
self.pass_states = pass_states
self.rewritten = []
def run(self, blocks):
pass_states = self.pass_states
# convert expressions like A += ... where A is an array.
topo_order = find_topo_order(blocks)
# variables available in the program so far (used for finding map
# functions in array_expr lowering)
for label in topo_order:
block = blocks[label]
new_body = []
equiv_set = pass_states.array_analysis.get_equiv_set(label)
for instr in block.body:
if isinstance(instr, ir.Assign):
lhs = instr.target
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == 'inplace_binop':
loc = expr.loc
target = expr.lhs
value = expr.rhs
target_typ = pass_states.typemap[target.name]
value_typ = pass_states.typemap[value.name]
# Handle A op= ...
if isinstance(target_typ, types.npytypes.Array):
# RHS is an array
if isinstance(value_typ, types.npytypes.Array):
new_instr = self._inplace_binop_to_parfor(equiv_set,
loc, expr.immutable_fn, target, value)
self.rewritten.append(
dict(old=instr, new=new_instr,
reason='inplace_binop'),
)
instr = [new_instr, ir.Assign(target, lhs, loc)]
if isinstance(instr, list):
new_body.extend(instr)
else:
new_body.append(instr)
block.body = new_body
def _inplace_binop_to_parfor(self, equiv_set, loc, op, target, value):
"""generate parfor from setitem node with a boolean or slice array indices.
The value can be either a scalar or an array variable, and if a boolean index
is used for the latter case, the same index must be used for the value too.
"""
pass_states = self.pass_states
scope = target.scope
arr_typ = pass_states.typemap[target.name]
el_typ = arr_typ.dtype
init_block = ir.Block(scope, loc)
value_typ = pass_states.typemap[value.name]
size_vars = equiv_set.get_shape(target)
# generate loopnests and size variables from target correlations
index_vars, loopnests = _mk_parfor_loops(pass_states.typemap, size_vars, scope, loc)
# generate body
body_label = next_label()
body_block = ir.Block(scope, loc)
index_var, index_var_typ = _make_index_var(
pass_states.typemap, scope, index_vars, body_block)
# Read value.
value_var = ir.Var(scope, mk_unique_var("$value_var"), loc)
pass_states.typemap[value_var.name] = value_typ.dtype
getitem_call = ir.Expr.getitem(value, index_var, loc)
pass_states.calltypes[getitem_call] = signature(
value_typ.dtype, value_typ, index_var_typ)
body_block.body.append(ir.Assign(getitem_call, value_var, loc))
# Read target
target_var = ir.Var(scope, mk_unique_var("$target_var"), loc)
pass_states.typemap[target_var.name] = el_typ
getitem_call = ir.Expr.getitem(target, index_var, loc)
pass_states.calltypes[getitem_call] = signature(
el_typ, arr_typ, index_var_typ)
body_block.body.append(ir.Assign(getitem_call, target_var, loc))
# Create temp to hold result.
expr_out_var = ir.Var(scope, mk_unique_var("$expr_out_var"), loc)
pass_states.typemap[expr_out_var.name] = el_typ
# Create binop and assign result to temporary.
binop_expr = ir.Expr.binop(op, target_var, value_var, loc)
body_block.body.append(ir.Assign(binop_expr, expr_out_var, loc))
unified_type = self.pass_states.typingctx.unify_pairs(el_typ, value_typ.dtype)
pass_states.calltypes[binop_expr] = signature(
unified_type, unified_type, unified_type)
# Write to target
setitem_node = ir.SetItem(target, index_var, expr_out_var, loc)
pass_states.calltypes[setitem_node] = signature(
types.none, arr_typ, index_var_typ, el_typ)
body_block.body.append(setitem_node)
parfor = Parfor(loopnests, init_block, {}, loc, index_var, equiv_set,
('inplace_binop', ''), pass_states.flags)
parfor.loop_body = {body_label: body_block}
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor from inplace_binop")
parfor.dump()
return parfor
def _type_getitem(self, args):
fnty = operator.getitem
return self.pass_states.typingctx.resolve_function_type(fnty, tuple(args), {})
def get_index_var(x):
return x.index if isinstance(x, ir.SetItem) else x.index_var
| ConvertInplaceBinop |
python | neetcode-gh__leetcode | python/0424-longest-repeating-character-replacement.py | {
"start": 0,
"end": 381
} | class ____:
def characterReplacement(self, s: str, k: int) -> int:
count = {}
l = 0
maxf = 0
for r in range(len(s)):
count[s[r]] = 1 + count.get(s[r], 0)
maxf = max(maxf, count[s[r]])
if (r - l + 1) - maxf > k:
count[s[l]] -= 1
l += 1
return (r - l + 1)
| Solution |
python | scrapy__scrapy | scrapy/mail.py | {
"start": 1250,
"end": 7062
} | class ____:
def __init__(
self,
smtphost: str = "localhost",
mailfrom: str = "scrapy@localhost",
smtpuser: str | None = None,
smtppass: str | None = None,
smtpport: int = 25,
smtptls: bool = False,
smtpssl: bool = False,
debug: bool = False,
):
self.smtphost: str = smtphost
self.smtpport: int = smtpport
self.smtpuser: bytes | None = _to_bytes_or_none(smtpuser)
self.smtppass: bytes | None = _to_bytes_or_none(smtppass)
self.smtptls: bool = smtptls
self.smtpssl: bool = smtpssl
self.mailfrom: str = mailfrom
self.debug: bool = debug
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
settings = crawler.settings
return cls(
smtphost=settings["MAIL_HOST"],
mailfrom=settings["MAIL_FROM"],
smtpuser=settings["MAIL_USER"],
smtppass=settings["MAIL_PASS"],
smtpport=settings.getint("MAIL_PORT"),
smtptls=settings.getbool("MAIL_TLS"),
smtpssl=settings.getbool("MAIL_SSL"),
)
def send(
self,
to: str | list[str],
subject: str,
body: str,
cc: str | list[str] | None = None,
attachs: Sequence[tuple[str, str, IO[Any]]] = (),
mimetype: str = "text/plain",
charset: str | None = None,
_callback: Callable[..., None] | None = None,
) -> Deferred[None] | None:
from twisted.internet import reactor
msg: MIMEBase = (
MIMEMultipart() if attachs else MIMENonMultipart(*mimetype.split("/", 1))
)
to = list(arg_to_iter(to))
cc = list(arg_to_iter(cc))
msg["From"] = self.mailfrom
msg["To"] = COMMASPACE.join(to)
msg["Date"] = formatdate(localtime=True)
msg["Subject"] = subject
rcpts = to[:]
if cc:
rcpts.extend(cc)
msg["Cc"] = COMMASPACE.join(cc)
if attachs:
if charset:
msg.set_charset(charset)
msg.attach(MIMEText(body, "plain", charset or "us-ascii"))
for attach_name, attach_mimetype, f in attachs:
part = MIMEBase(*attach_mimetype.split("/"))
part.set_payload(f.read())
Encoders.encode_base64(part)
part.add_header(
"Content-Disposition", "attachment", filename=attach_name
)
msg.attach(part)
else:
msg.set_payload(body, charset)
if _callback:
_callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)
if self.debug:
logger.debug(
"Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s "
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
{
"mailto": to,
"mailcc": cc,
"mailsubject": subject,
"mailattachs": len(attachs),
},
)
return None
dfd: Deferred[Any] = self._sendmail(
rcpts, msg.as_string().encode(charset or "utf-8")
)
dfd.addCallback(self._sent_ok, to, cc, subject, len(attachs))
dfd.addErrback(self._sent_failed, to, cc, subject, len(attachs))
reactor.addSystemEventTrigger("before", "shutdown", lambda: dfd)
return dfd
def _sent_ok(
self, result: Any, to: list[str], cc: list[str], subject: str, nattachs: int
) -> None:
logger.info(
"Mail sent OK: To=%(mailto)s Cc=%(mailcc)s "
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
{
"mailto": to,
"mailcc": cc,
"mailsubject": subject,
"mailattachs": nattachs,
},
)
def _sent_failed(
self,
failure: Failure,
to: list[str],
cc: list[str],
subject: str,
nattachs: int,
) -> Failure:
errstr = str(failure.value)
logger.error(
"Unable to send mail: To=%(mailto)s Cc=%(mailcc)s "
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d'
"- %(mailerr)s",
{
"mailto": to,
"mailcc": cc,
"mailsubject": subject,
"mailattachs": nattachs,
"mailerr": errstr,
},
)
return failure
def _sendmail(self, to_addrs: list[str], msg: bytes) -> Deferred[Any]:
from twisted.internet import reactor
msg_io = BytesIO(msg)
d: Deferred[Any] = Deferred()
factory = self._create_sender_factory(to_addrs, msg_io, d)
if self.smtpssl:
reactor.connectSSL(
self.smtphost, self.smtpport, factory, ssl.ClientContextFactory()
)
else:
reactor.connectTCP(self.smtphost, self.smtpport, factory)
return d
def _create_sender_factory(
self, to_addrs: list[str], msg: IO[bytes], d: Deferred[Any]
) -> ESMTPSenderFactory:
# imports twisted.internet.reactor
from twisted.mail.smtp import ESMTPSenderFactory # noqa: PLC0415
factory_keywords: dict[str, Any] = {
"heloFallback": True,
"requireAuthentication": False,
"requireTransportSecurity": self.smtptls,
"hostname": self.smtphost,
}
factory = ESMTPSenderFactory(
self.smtpuser,
self.smtppass,
self.mailfrom,
to_addrs,
msg,
d,
**factory_keywords,
)
factory.noisy = False
return factory
| MailSender |
python | scikit-learn__scikit-learn | sklearn/manifold/_spectral_embedding.py | {
"start": 18481,
"end": 29959
} | class ____(BaseEstimator):
"""Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
n_components : int, default=2
The dimension of the projected subspace.
affinity : {'nearest_neighbors', 'rbf', 'precomputed', \
'precomputed_nearest_neighbors'} or callable, \
default='nearest_neighbors'
How to construct the affinity matrix.
- 'nearest_neighbors' : construct the affinity matrix by computing a
graph of nearest neighbors.
- 'rbf' : construct the affinity matrix by computing a radial basis
function (RBF) kernel.
- 'precomputed' : interpret ``X`` as a precomputed affinity matrix.
- 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph
of precomputed nearest neighbors, and constructs the affinity matrix
by selecting the ``n_neighbors`` nearest neighbors.
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, default=None
Kernel coefficient for rbf kernel. If None, gamma will be set to
1/n_features.
random_state : int, RandomState instance or None, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems.
If None, then ``'arpack'`` is used.
eigen_tol : float, default="auto"
Stopping criterion for eigendecomposition of the Laplacian matrix.
If `eigen_tol="auto"` then the passed tolerance will depend on the
`eigen_solver`:
- If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
- If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
`eigen_tol=None` which configures the underlying `lobpcg` solver to
automatically resolve the value according to their heuristics. See,
:func:`scipy.sparse.linalg.lobpcg` for details.
Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"`
values of `tol<1e-5` may lead to convergence issues and should be
avoided.
.. versionadded:: 1.2
n_neighbors : int, default=None
Number of nearest neighbors for nearest_neighbors graph building.
If None, n_neighbors will be set to max(n_samples/10, 1).
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
embedding_ : ndarray of shape (n_samples, n_components)
Spectral embedding of the training matrix.
affinity_matrix_ : ndarray of shape (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_neighbors_ : int
Number of nearest neighbors effectively used.
See Also
--------
Isomap : Non-linear dimensionality reduction through Isometric Mapping.
References
----------
- :doi:`A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
<10.1007/s11222-007-9033-z>`
- `On Spectral Clustering: Analysis and an algorithm, 2001
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
<https://citeseerx.ist.psu.edu/doc_view/pid/796c5d6336fc52aa84db575fb821c78918b65f58>`_
- :doi:`Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
<10.1109/34.868688>`
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import SpectralEmbedding
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = SpectralEmbedding(n_components=2)
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left")],
"affinity": [
StrOptions(
{
"nearest_neighbors",
"rbf",
"precomputed",
"precomputed_nearest_neighbors",
},
),
callable,
],
"gamma": [Interval(Real, 0, None, closed="left"), None],
"random_state": ["random_state"],
"eigen_solver": [StrOptions({"arpack", "lobpcg", "amg"}), None],
"eigen_tol": [Interval(Real, 0, None, closed="left"), StrOptions({"auto"})],
"n_neighbors": [Interval(Integral, 1, None, closed="left"), None],
"n_jobs": [None, Integral],
}
def __init__(
self,
n_components=2,
*,
affinity="nearest_neighbors",
gamma=None,
random_state=None,
eigen_solver=None,
eigen_tol="auto",
n_neighbors=None,
n_jobs=None,
):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.eigen_tol = eigen_tol
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
tags.input_tags.pairwise = self.affinity in [
"precomputed",
"precomputed_nearest_neighbors",
]
return tags
def _get_affinity_matrix(self, X, Y=None):
"""Calculate the affinity matrix from data
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : array-like of shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Y: Ignored
Returns
-------
affinity_matrix of shape (n_samples, n_samples)
"""
if self.affinity == "precomputed":
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == "precomputed_nearest_neighbors":
estimator = NearestNeighbors(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed"
).fit(X)
connectivity = estimator.kneighbors_graph(X=X, mode="connectivity")
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
return self.affinity_matrix_
if self.affinity == "nearest_neighbors":
if sparse.issparse(X):
warnings.warn(
"Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity"
)
self.affinity = "rbf"
else:
self.n_neighbors_ = (
self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1)
)
self.affinity_matrix_ = kneighbors_graph(
X, self.n_neighbors_, include_self=True, n_jobs=self.n_jobs
)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (
self.affinity_matrix_ + self.affinity_matrix_.T
)
return self.affinity_matrix_
if self.affinity == "rbf":
self.gamma_ = self.gamma if self.gamma is not None else 1.0 / X.shape[1]
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : {array-like, sparse matrix}, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, accept_sparse="csr", ensure_min_samples=2)
random_state = check_random_state(self.random_state)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = _spectral_embedding(
affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
eigen_tol=self.eigen_tol,
random_state=random_state,
)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : {array-like, sparse matrix} of shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : array-like of shape (n_samples, n_components)
Spectral embedding of the training matrix.
"""
self.fit(X)
return self.embedding_
| SpectralEmbedding |
python | ray-project__ray | python/ray/client_builder.py | {
"start": 798,
"end": 2693
} | class ____(BaseContext):
"""
Basic context manager for a ClientBuilder connection.
"""
dashboard_url: Optional[str]
python_version: str
ray_version: str
ray_commit: str
_num_clients: int
_context_to_restore: Optional[ray.util.client.RayAPIStub]
def __enter__(self) -> "ClientContext":
self._swap_context()
return self
def __exit__(self, *exc) -> None:
self._disconnect_with_context(False)
self._swap_context()
def disconnect(self) -> None:
self._swap_context()
self._disconnect_with_context(True)
self._swap_context()
def _swap_context(self):
if self._context_to_restore is not None:
self._context_to_restore = ray.util.client.ray.set_context(
self._context_to_restore
)
def _disconnect_with_context(self, force_disconnect: bool) -> None:
"""
Disconnect Ray. If it's a ray client and created with `allow_multiple`,
it will do nothing. For other cases this either disconnects from the
remote Client Server or shuts the current driver down.
"""
if ray.util.client.ray.is_connected():
if ray.util.client.ray.is_default() or force_disconnect:
# This is the only client connection
ray.util.client_connect.disconnect()
elif ray._private.worker.global_worker.node is None:
# Already disconnected.
return
elif ray._private.worker.global_worker.node.is_head():
logger.debug(
"The current Ray Cluster is scoped to this process. "
"Disconnecting is not possible as it will shutdown the "
"cluster."
)
else:
# This is only a driver connected to an existing cluster.
ray.shutdown()
@Deprecated
| ClientContext |
python | apache__airflow | providers/cncf/kubernetes/tests/unit/cncf/kubernetes/test_kubernetes_helper_functions.py | {
"start": 3002,
"end": 6549
} | class ____:
@pytest.mark.parametrize(
("val", "expected"),
[
("task-id", "task-id"), # no problem
("task_id", "task-id"), # underscores
("---task.id---", "task-id"), # dots
(".task.id", "task-id"), # leading dot invalid
("**task.id", "task-id"), # leading dot invalid
("-90Abc*&", "90abc"), # invalid ends
("90AçLbˆˆç˙ßߘ˜˙c*a", "90aclb-c-ssss-c-a"), # weird unicode
],
)
def test_create_pod_id_task_only(self, val, expected):
actual = create_unique_id(task_id=val, unique=False)
assert actual == expected
assert re.match(pod_name_regex, actual)
@pytest.mark.parametrize(
("val", "expected"),
[
("dag-id", "dag-id"), # no problem
("dag_id", "dag-id"), # underscores
("---dag.id---", "dag-id"), # dots
(".dag.id", "dag-id"), # leading dot invalid
("**dag.id", "dag-id"), # leading dot invalid
("-90Abc*&", "90abc"), # invalid ends
("90AçLbˆˆç˙ßߘ˜˙c*a", "90aclb-c-ssss-c-a"), # weird unicode
],
)
def test_create_pod_id_dag_only(self, val, expected):
actual = create_unique_id(dag_id=val, unique=False)
assert actual == expected
assert re.match(pod_name_regex, actual)
@pytest.mark.parametrize(
("dag_id", "task_id", "expected"),
[
("dag-id", "task-id", "dag-id-task-id"), # no problem
("dag_id", "task_id", "dag-id-task-id"), # underscores
("dag.id", "task.id", "dag-id-task-id"), # dots
(".dag.id", ".---task.id", "dag-id-task-id"), # leading dot invalid
("**dag.id", "**task.id", "dag-id-task-id"), # leading dot invalid
("-90Abc*&", "-90Abc*&", "90abc-90abc"), # invalid ends
("90AçLbˆˆç˙ßߘ˜˙c*a", "90AçLbˆˆç˙ßߘ˜˙c*a", "90aclb-c-ssss-c-a-90aclb-c-ssss-c-a"), # ugly
],
)
def test_create_pod_id_dag_and_task(self, dag_id, task_id, expected):
actual = create_unique_id(dag_id=dag_id, task_id=task_id, unique=False)
assert actual == expected
assert re.match(pod_name_regex, actual)
def test_create_pod_id_dag_too_long_with_suffix(self):
actual = create_unique_id("0" * 254)
assert len(actual) == 63
assert re.match(r"0{54}-[a-z0-9]{8}", actual)
assert re.match(pod_name_regex, actual)
def test_create_pod_id_dag_too_long_non_unique(self):
actual = create_unique_id("0" * 254, unique=False)
assert len(actual) == 63
assert re.match(r"0{63}", actual)
assert re.match(pod_name_regex, actual)
@pytest.mark.parametrize("unique", [True, False])
@pytest.mark.parametrize("length", [25, 100, 200, 300])
def test_create_pod_id(self, length, unique):
"""Test behavior of max_length and unique."""
dag_id = "dag-dag-dag-dag-dag-dag-dag-dag-dag-dag-dag-dag-dag-dag-dag-dag-"
task_id = "task-task-task-task-task-task-task-task-task-task-task-task-task-task-task-task-task-"
actual = create_unique_id(
dag_id=dag_id,
task_id=task_id,
max_length=length,
unique=unique,
)
base = f"{dag_id}{task_id}".strip("-")
if unique:
assert actual[:-9] == base[: length - 9].strip("-")
assert re.match(r"-[a-z0-9]{8}", actual[-9:])
else:
assert actual == base[:length]
| TestCreateUniqueId |
python | allegroai__clearml | clearml/backend_api/services/v2_9/queues.py | {
"start": 61834,
"end": 63235
} | class ____(Request):
"""
Removes a task entry from the queue.
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
"""
_service = "queues"
_action = "remove_task"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"queue": {"description": "Queue id", "type": "string"},
"task": {"description": "Task id", "type": "string"},
},
"required": ["queue", "task"],
"type": "object",
}
def __init__(self, queue: str, task: str, **kwargs: Any) -> None:
super(RemoveTaskRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| RemoveTaskRequest |
python | plotly__plotly.py | plotly/graph_objs/layout/mapbox/_center.py | {
"start": 235,
"end": 2815
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.mapbox"
_path_str = "layout.mapbox.center"
_valid_props = {"lat", "lon"}
@property
def lat(self):
"""
Sets the latitude of the center of the map (in degrees North).
The 'lat' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["lat"]
@lat.setter
def lat(self, val):
self["lat"] = val
@property
def lon(self):
"""
Sets the longitude of the center of the map (in degrees East).
The 'lon' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["lon"]
@lon.setter
def lon(self, val):
self["lon"] = val
@property
def _prop_descriptions(self):
return """\
lat
Sets the latitude of the center of the map (in degrees
North).
lon
Sets the longitude of the center of the map (in degrees
East).
"""
def __init__(self, arg=None, lat=None, lon=None, **kwargs):
"""
Construct a new Center object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.mapbox.Center`
lat
Sets the latitude of the center of the map (in degrees
North).
lon
Sets the longitude of the center of the map (in degrees
East).
Returns
-------
Center
"""
super().__init__("center")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.mapbox.Center
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.mapbox.Center`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("lat", arg, lat)
self._set_property("lon", arg, lon)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Center |
python | getsentry__sentry | fixtures/safe_migrations_apps/safe_run_sql_app/migrations/0003_add_col.py | {
"start": 155,
"end": 383
} | class ____(CheckedMigration):
dependencies = [
("safe_run_sql_app", "0002_run_sql"),
]
operations = [
migrations.AlterField("testtable", "field", BoundedPositiveIntegerField(null=True)),
]
| Migration |
python | explosion__spaCy | spacy/schemas.py | {
"start": 18311,
"end": 19381
} | class ____(BaseModel):
# fmt: off
vocab_data: Optional[StrictStr] = Field(..., title="Path to JSON-formatted vocabulary file")
lookups: Optional[Lookups] = Field(..., title="Vocabulary lookups, e.g. lexeme normalization")
vectors: Optional[StrictStr] = Field(..., title="Path to vectors")
init_tok2vec: Optional[StrictStr] = Field(..., title="Path to pretrained tok2vec weights")
tokenizer: Dict[StrictStr, Any] = Field(..., help="Arguments to be passed into Tokenizer.initialize")
components: Dict[StrictStr, Dict[StrictStr, Any]] = Field(..., help="Arguments for TrainablePipe.initialize methods of pipeline components, keyed by component")
before_init: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object before initialization")
after_init: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after initialization")
# fmt: on
class Config:
extra = "forbid"
arbitrary_types_allowed = True
| ConfigSchemaInit |
python | huggingface__transformers | src/transformers/trainer_pt_utils.py | {
"start": 18196,
"end": 19826
} | class ____(Sampler):
r"""
Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while
keeping a bit of randomness.
"""
def __init__(
self,
batch_size: int,
dataset: Dataset | None = None,
lengths: list[int] | None = None,
model_input_name: str | None = None,
generator=None,
):
if dataset is None and lengths is None:
raise ValueError("One of dataset and lengths must be provided.")
self.batch_size = batch_size
if lengths is None:
model_input_name = model_input_name if model_input_name is not None else "input_ids"
if not isinstance(dataset[0], (dict, BatchEncoding)) or model_input_name not in dataset[0]:
raise ValueError(
"Can only automatically infer lengths for datasets whose items are dictionaries with an "
f"'{model_input_name}' key."
)
lengths = [len(feature[model_input_name]) for feature in dataset]
elif isinstance(lengths, torch.Tensor):
logger.info(
"If lengths is a torch.Tensor, LengthGroupedSampler will be slow. Converting lengths to list[int]..."
)
lengths = lengths.tolist()
self.lengths = lengths
self.generator = generator
def __len__(self):
return len(self.lengths)
def __iter__(self):
indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=self.generator)
return iter(indices)
| LengthGroupedSampler |
python | sdispater__pendulum | src/pendulum/formatting/difference_formatter.py | {
"start": 495,
"end": 4813
} | class ____:
"""
Handles formatting differences in text.
"""
def __init__(self, locale: str = "en") -> None:
self._locale = Locale.load(locale)
def format(
self,
diff: Duration,
is_now: bool = True,
absolute: bool = False,
locale: str | Locale | None = None,
) -> str:
"""
Formats a difference.
:param diff: The difference to format
:param is_now: Whether the difference includes now
:param absolute: Whether it's an absolute difference or not
:param locale: The locale to use
"""
locale = self._locale if locale is None else Locale.load(locale)
if diff.years > 0:
unit = "year"
count = diff.years
if diff.months > MONTHS_THRESHOLD_FOR_HALF_YEAR:
count += 1
elif (diff.months == MONTHS_IN_NEARLY_A_YEAR) and (
(diff.weeks * DAYS_OF_WEEK + diff.remaining_days)
> DAYS_THRESHOLD_FOR_HALF_MONTH
):
unit = "year"
count = 1
elif diff.months > 0:
unit = "month"
count = diff.months
if (
diff.weeks * DAYS_OF_WEEK + diff.remaining_days
) >= DAYS_IN_NEARLY_A_MONTH:
count += 1
elif diff.weeks > 0:
unit = "week"
count = diff.weeks
if diff.remaining_days > DAYS_THRESHOLD_FOR_HALF_WEEK:
count += 1
elif diff.remaining_days > 0:
unit = "day"
count = diff.remaining_days
if diff.hours >= HOURS_IN_NEARLY_A_DAY:
count += 1
elif diff.hours > 0:
unit = "hour"
count = diff.hours
elif diff.minutes > 0:
unit = "minute"
count = diff.minutes
elif FEW_SECONDS_MAX < diff.remaining_seconds < SECONDS_OF_MINUTE:
unit = "second"
count = diff.remaining_seconds
else:
# We check if the "a few seconds" unit exists
time = locale.get("custom.units.few_second")
if time is not None:
if absolute:
return t.cast("str", time)
key = "custom"
is_future = diff.invert
if is_now:
if is_future:
key += ".from_now"
else:
key += ".ago"
else:
if is_future:
key += KEY_AFTER
else:
key += KEY_BEFORE
return t.cast("str", locale.get(key).format(time))
else:
unit = "second"
count = diff.remaining_seconds
if count == 0:
count = 1
if absolute:
key = f"translations.units.{unit}"
else:
is_future = diff.invert
if is_now:
# Relative to now, so we can use
# the CLDR data
key = f"translations.relative.{unit}"
if is_future:
key += KEY_FUTURE
else:
key += KEY_PAST
else:
# Absolute comparison
# So we have to use the custom locale data
# Checking for special pluralization rules
key = "custom.units_relative"
if is_future:
key += f".{unit}{KEY_FUTURE}"
else:
key += f".{unit}{KEY_PAST}"
trans = locale.get(key)
if not trans:
# No special rule
key = f"translations.units.{unit}.{locale.plural(count)}"
time = locale.get(key).format(count)
else:
time = trans[locale.plural(count)].format(count)
key = "custom"
if is_future:
key += KEY_AFTER
else:
key += KEY_BEFORE
return t.cast("str", locale.get(key).format(time))
key += f".{locale.plural(count)}"
return t.cast("str", locale.get(key).format(count))
| DifferenceFormatter |
python | allegroai__clearml | clearml/backend_api/services/v2_23/models.py | {
"start": 136040,
"end": 144264
} | class ____(Request):
"""
Update a model
:param model: Model id
:type model: str
:param name: Model name Unique within the company.
:type name: str
:param comment: Model comment
:type comment: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param ready: Indication if the model is final and can be used by other tasks
Default is false.
:type ready: bool
:param created: Model creation time (UTC)
:type created: datetime.datetime
:param ui_cache: UI cache for this model
:type ui_cache: dict
:param project: Project to which to model belongs
:type project: str
:param task: Associated task ID
:type task: str
:param iteration: Iteration (used to update task statistics if an associated
task is reported)
:type iteration: int
"""
_service = "models"
_action = "update"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"comment": {"description": "Model comment", "type": "string"},
"created": {
"description": "Model creation time (UTC) ",
"format": "date-time",
"type": "string",
},
"iteration": {
"description": "Iteration (used to update task statistics if an associated task is reported)",
"type": "integer",
},
"model": {"description": "Model id", "type": "string"},
"name": {
"description": "Model name Unique within the company.",
"type": "string",
},
"project": {
"description": "Project to which to model belongs",
"type": "string",
},
"ready": {
"default": False,
"description": "Indication if the model is final and can be used by other tasks Default is false.",
"type": "boolean",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "Associated task ID", "type": "string"},
"ui_cache": {
"additionalProperties": True,
"description": "UI cache for this model",
"type": "object",
},
},
"required": ["model"],
"type": "object",
}
def __init__(
self,
model: str,
name: Optional[str] = None,
comment: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
ready: Optional[bool] = False,
created: Optional[str] = None,
ui_cache: Optional[dict] = None,
project: Optional[str] = None,
task: Optional[str] = None,
iteration: Optional[int] = None,
**kwargs: Any
) -> None:
super(UpdateRequest, self).__init__(**kwargs)
self.model = model
self.name = name
self.comment = comment
self.tags = tags
self.system_tags = system_tags
self.ready = ready
self.created = created
self.ui_cache = ui_cache
self.project = project
self.task = task
self.iteration = iteration
@schema_property("model")
def model(self) -> str:
return self._property_model
@model.setter
def model(self, value: str) -> None:
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("ready")
def ready(self) -> Optional[bool]:
return self._property_ready
@ready.setter
def ready(self, value: Optional[bool]) -> None:
if value is None:
self._property_ready = None
return
self.assert_isinstance(value, "ready", (bool,))
self._property_ready = value
@schema_property("created")
def created(self) -> Optional[str]:
return self._property_created
@created.setter
def created(self, value: Optional[str]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
@schema_property("ui_cache")
def ui_cache(self) -> Optional[dict]:
return self._property_ui_cache
@ui_cache.setter
def ui_cache(self, value: Optional[dict]) -> None:
if value is None:
self._property_ui_cache = None
return
self.assert_isinstance(value, "ui_cache", (dict,))
self._property_ui_cache = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("iteration")
def iteration(self) -> Optional[int]:
return self._property_iteration
@iteration.setter
def iteration(self, value: Optional[int]) -> None:
if value is None:
self._property_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iteration", six.integer_types)
self._property_iteration = value
| UpdateRequest |
python | ansible__ansible | lib/ansible/modules/dnf.py | {
"start": 14066,
"end": 51642
} | class ____(YumDnf):
"""
DNF Ansible module back-end implementation
"""
def __init__(self, module):
# This populates instance vars for all argument spec params
super(DnfModule, self).__init__(module)
self._ensure_dnf()
self.pkg_mgr_name = "dnf"
self.with_modules = dnf.base.WITH_MODULES
def _sanitize_dnf_error_msg_install(self, spec, error):
"""
For unhandled dnf.exceptions.Error scenarios, there are certain error
messages we want to filter in an install scenario. Do that here.
"""
if (
to_text("no package matched") in to_text(error) or
to_text("No match for argument:") in to_text(error)
):
return "No package {0} available.".format(spec)
return error
def _package_dict(self, package):
"""Return a dictionary of information for the package."""
# NOTE: This no longer contains the 'dnfstate' field because it is
# already known based on the query type.
result = {
'name': package.name,
'arch': package.arch,
'epoch': str(package.epoch),
'release': package.release,
'version': package.version,
'repo': package.repoid}
# envra format for backwards compat
result['envra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format(**result)
# keep nevra key for backwards compat as it was previously
# defined with a value in envra format
result['nevra'] = result['envra']
if package.installtime == 0:
result['yumstate'] = 'available'
else:
result['yumstate'] = 'installed'
return result
def _ensure_dnf(self):
locale = get_best_parsable_locale(self.module)
os.environ['LC_ALL'] = os.environ['LC_MESSAGES'] = locale
os.environ['LANGUAGE'] = os.environ['LANG'] = locale
global dnf
try:
import dnf
import dnf.const
import dnf.exceptions
import dnf.package
import dnf.subject
import dnf.util
HAS_DNF = True
except ImportError:
HAS_DNF = False
if HAS_DNF:
return
system_interpreters = ['/usr/libexec/platform-python',
'/usr/bin/python3',
'/usr/bin/python']
if not has_respawned():
# probe well-known system Python locations for accessible bindings, favoring py3
interpreter = probe_interpreters_for_module(system_interpreters, 'dnf')
if interpreter:
# respawn under the interpreter where the bindings should be found
respawn_module(interpreter)
# end of the line for this module, the process will exit here once the respawned module completes
# done all we can do, something is just broken (auto-install isn't useful anymore with respawn, so it was removed)
self.module.fail_json(
msg="Could not import the dnf python module using {0} ({1}). "
"Please install `python3-dnf` package or ensure you have specified the "
"correct ansible_python_interpreter. (attempted {2})"
.format(sys.executable, sys.version.replace('\n', ''), system_interpreters),
results=[]
)
def _configure_base(self, base, conf_file, disable_gpg_check, installroot='/', sslverify=True):
"""Configure the dnf Base object."""
conf = base.conf
# Change the configuration file path if provided, this must be done before conf.read() is called
if conf_file:
# Fail if we can't read the configuration file.
if not os.access(conf_file, os.R_OK):
self.module.fail_json(
msg="cannot read configuration file", conf_file=conf_file,
results=[],
)
else:
conf.config_file_path = conf_file
# Read the configuration file
conf.read()
# Turn off debug messages in the output
conf.debuglevel = 0
# Set whether to check gpg signatures
conf.gpgcheck = not disable_gpg_check
conf.localpkg_gpgcheck = not disable_gpg_check
# Don't prompt for user confirmations
conf.assumeyes = True
# Set certificate validation
conf.sslverify = sslverify
# Set installroot
if not os.path.isdir(installroot):
self.module.fail_json(msg=f"Installroot {installroot} must be a directory")
conf.installroot = installroot
# Load substitutions from the filesystem
conf.substitutions.update_from_etc(installroot)
# Handle different DNF versions immutable mutable datatypes and
# dnf v1/v2/v3
#
# In DNF < 3.0 are lists, and modifying them works
# In DNF >= 3.0 < 3.6 are lists, but modifying them doesn't work
# In DNF >= 3.6 have been turned into tuples, to communicate that modifying them doesn't work
#
# https://www.happyassassin.net/2018/06/27/adams-debugging-adventures-the-immutable-mutable-object/
#
# Set excludes
if self.exclude:
_excludes = list(conf.exclude)
_excludes.extend(self.exclude)
conf.exclude = _excludes
# Set disable_excludes
if self.disable_excludes:
_disable_excludes = list(conf.disable_excludes)
if self.disable_excludes not in _disable_excludes:
_disable_excludes.append(self.disable_excludes)
conf.disable_excludes = _disable_excludes
# Set releasever
if self.releasever is not None:
conf.substitutions['releasever'] = self.releasever
if conf.substitutions.get('releasever') is None:
self.module.warn(
'Unable to detect release version (use "releasever" option to specify release version)'
)
# values of conf.substitutions are expected to be strings
# setting this to an empty string instead of None appears to mimic the DNF CLI behavior
conf.substitutions['releasever'] = ''
# Honor installroot for dnf directories
# This will also perform variable substitutions in the paths
for opt in ('cachedir', 'logdir', 'persistdir'):
conf.prepend_installroot(opt)
# Set skip_broken (in dnf this is strict=0)
if self.skip_broken:
conf.strict = 0
# best and nobest are mutually exclusive
if self.nobest is not None:
conf.best = not self.nobest
elif self.best is not None:
conf.best = self.best
if self.download_only:
conf.downloadonly = True
if self.download_dir:
conf.destdir = self.download_dir
if self.cacheonly:
conf.cacheonly = True
# Default in dnf upstream is true
conf.clean_requirements_on_remove = self.autoremove
# Default in dnf (and module default) is True
conf.install_weak_deps = self.install_weak_deps
def _specify_repositories(self, base, disablerepo, enablerepo):
"""Enable and disable repositories matching the provided patterns."""
base.read_all_repos()
repos = base.repos
# Disable repositories
for repo_pattern in disablerepo:
if repo_pattern:
for repo in repos.get_matching(repo_pattern):
repo.disable()
# Enable repositories
for repo_pattern in enablerepo:
if repo_pattern:
for repo in repos.get_matching(repo_pattern):
repo.enable()
for repo in base.repos.iter_enabled():
if self.disable_gpg_check:
repo.gpgcheck = False
repo.repo_gpgcheck = False
def _base(self, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot, sslverify):
"""Return a fully configured dnf Base object."""
base = dnf.Base()
self._configure_base(base, conf_file, disable_gpg_check, installroot, sslverify)
base.setup_loggers()
base.init_plugins(set(self.disable_plugin), set(self.enable_plugin))
base.pre_configure_plugins()
self._specify_repositories(base, disablerepo, enablerepo)
base.configure_plugins()
try:
if self.update_cache:
try:
base.update_cache()
except dnf.exceptions.RepoError as e:
self.module.fail_json(
msg="{0}".format(to_text(e)),
results=[],
rc=1
)
base.fill_sack(load_system_repo='auto')
except dnf.exceptions.RepoError as e:
self.module.fail_json(
msg="{0}".format(to_text(e)),
results=[],
rc=1
)
add_security_filters = getattr(base, "add_security_filters", None)
if callable(add_security_filters):
filters = {}
if self.bugfix:
filters.setdefault('types', []).append('bugfix')
if self.security:
filters.setdefault('types', []).append('security')
if filters:
add_security_filters('eq', **filters)
else:
filters = []
if self.bugfix:
key = {'advisory_type__eq': 'bugfix'}
filters.append(base.sack.query().upgrades().filter(**key))
if self.security:
key = {'advisory_type__eq': 'security'}
filters.append(base.sack.query().upgrades().filter(**key))
if filters:
base._update_security_filters = filters
return base
def list_items(self, command):
"""List package info based on the command."""
# Rename updates to upgrades
if command == 'updates':
command = 'upgrades'
# Return the corresponding packages
if command in ['installed', 'upgrades', 'available']:
results = [
self._package_dict(package)
for package in getattr(self.base.sack.query(), command)()]
# Return the enabled repository ids
elif command in ['repos', 'repositories']:
results = [
{'repoid': repo.id, 'state': 'enabled'}
for repo in self.base.repos.iter_enabled()]
# Return any matching packages
else:
packages = dnf.subject.Subject(command).get_best_query(self.base.sack)
results = [self._package_dict(package) for package in packages]
self.module.exit_json(msg="", results=results)
def _is_installed(self, pkg):
return bool(dnf.subject.Subject(pkg).get_best_query(sack=self.base.sack).installed())
def _is_newer_version_installed(self, pkg_spec):
# expects a versioned package spec
try:
if isinstance(pkg_spec, dnf.package.Package):
installed = sorted(self.base.sack.query().installed().filter(name=pkg_spec.name, arch=pkg_spec.arch))[-1]
return installed.evr_gt(pkg_spec)
else:
solution = dnf.subject.Subject(pkg_spec).get_best_solution(self.base.sack)
q = solution["query"]
if not q or not solution['nevra'] or solution['nevra'].has_just_name():
return False
installed = self.base.sack.query().installed().filter(name=solution['nevra'].name)
if not installed:
return False
return installed[0].evr_gt(q[0])
except IndexError:
return False
def _mark_package_install(self, pkg_spec, upgrade=False):
"""Mark the package for install."""
msg = ''
try:
if dnf.util.is_glob_pattern(pkg_spec):
# Special case for package specs that contain glob characters.
# For these we skip `is_installed` and `is_newer_version_installed` tests that allow for the
# allow_downgrade feature and pass the package specs to dnf.
# Since allow_downgrade is not available in dnf and while it is relatively easy to implement it for
# package specs that evaluate to a single package, trying to mimic what would the dnf machinery do
# for glob package specs and then filtering those for allow_downgrade appears to always
# result in naive/inferior solution.
# NOTE this has historically never worked even before https://github.com/ansible/ansible/pull/82725
# where our (buggy) custom code ignored wildcards for the installed checks.
# TODO reasearch how feasible it is to implement the above
if upgrade:
# for upgrade we pass the spec to both upgrade and install, to satisfy both available and installed
# packages evaluated from the glob spec
try:
self.base.upgrade(pkg_spec)
except dnf.exceptions.PackagesNotInstalledError:
pass
self.base.install(pkg_spec, strict=self.base.conf.strict)
elif self._is_newer_version_installed(pkg_spec):
if self.allow_downgrade:
self.base.install(pkg_spec, strict=self.base.conf.strict)
elif self._is_installed(pkg_spec):
if upgrade:
self.base.upgrade(pkg_spec)
else:
self.base.install(pkg_spec, strict=self.base.conf.strict)
except dnf.exceptions.MarkingError as e:
msg = "No package {0} available.".format(pkg_spec)
if self.base.conf.strict:
return {
'failed': True,
'msg': msg,
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
except dnf.exceptions.DepsolveError as e:
return {
'failed': True,
'msg': "Depsolve Error occurred for package {0}.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
except dnf.exceptions.Error as e:
return {
'failed': True,
'msg': "Unknown Error occurred for package {0}.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
return {'failed': False, 'msg': msg, 'failure': '', 'rc': 0}
def _parse_spec_group_file(self):
pkg_specs, grp_specs, module_specs, filenames = [], [], [], []
already_loaded_comps = False # Only load this if necessary, it's slow
for name in self.names:
if '://' in name:
name = fetch_file(self.module, name)
filenames.append(name)
elif name.endswith(".rpm"):
filenames.append(name)
elif name.startswith('/'):
# dnf install /usr/bin/vi
installed = self.base.sack.query().filter(provides=name, file=name).installed().run()
if installed:
pkg_specs.append(installed[0].name) # should be only one?
elif not self.update_only:
# not installed, pass the filename for dnf to process
pkg_specs.append(name)
elif name.startswith("@") or ('/' in name):
if not already_loaded_comps:
self.base.read_comps()
already_loaded_comps = True
grp_env_mdl_candidate = name[1:].strip()
if self.with_modules:
mdl = self.module_base._get_modules(grp_env_mdl_candidate)
if mdl[0]:
module_specs.append(grp_env_mdl_candidate)
else:
grp_specs.append(grp_env_mdl_candidate)
else:
grp_specs.append(grp_env_mdl_candidate)
else:
pkg_specs.append(name)
return pkg_specs, grp_specs, module_specs, filenames
def _update_only(self, pkgs):
not_installed = []
for pkg in pkgs:
if self._is_installed(
self._package_dict(pkg)["nevra"] if isinstance(pkg, dnf.package.Package) else pkg
):
try:
if isinstance(pkg, dnf.package.Package):
self.base.package_upgrade(pkg)
else:
self.base.upgrade(pkg)
except Exception as e:
self.module.fail_json(
msg="Error occurred attempting update_only operation: {0}".format(to_native(e)),
results=[],
rc=1,
)
else:
not_installed.append(pkg)
return not_installed
def _install_remote_rpms(self, filenames):
try:
pkgs = self.base.add_remote_rpms(filenames)
if self.update_only:
self._update_only(pkgs)
else:
for pkg in pkgs:
if not (self._is_newer_version_installed(pkg) and not self.allow_downgrade):
self.base.package_install(pkg, strict=self.base.conf.strict)
except Exception as e:
self.module.fail_json(
msg="Error occurred attempting remote rpm operation: {0}".format(to_native(e)),
results=[],
rc=1,
)
def _is_module_installed(self, module_spec):
if self.with_modules:
module_spec = module_spec.strip()
module_list, nsv = self.module_base._get_modules(module_spec)
enabled_streams = self.base._moduleContainer.getEnabledStream(nsv.name)
if enabled_streams:
if nsv.stream:
if nsv.stream in enabled_streams:
return True # The provided stream was found
else:
return False # The provided stream was not found
else:
return True # No stream provided, but module found
return False # seems like a logical default
def ensure(self):
response = {
'msg': "",
'changed': False,
'results': [],
'rc': 0
}
# Accumulate failures. Package management modules install what they can
# and fail with a message about what they can't.
failure_response = {
'msg': "",
'failures': [],
'results': [],
'rc': 1
}
# Autoremove is called alone
# Jump to remove path where base.autoremove() is run
if not self.names and self.autoremove:
self.names = []
self.state = 'absent'
if self.names == ['*'] and self.state == 'latest':
try:
self.base.upgrade_all()
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to upgrade all packages"
self.module.fail_json(**failure_response)
else:
pkg_specs, group_specs, module_specs, filenames = self._parse_spec_group_file()
pkg_specs = [p.strip() for p in pkg_specs]
filenames = [f.strip() for f in filenames]
groups = []
environments = []
for group_spec in (g.strip() for g in group_specs):
group = self.base.comps.group_by_pattern(group_spec)
if group:
groups.append(group.id)
else:
environment = self.base.comps.environment_by_pattern(group_spec)
if environment:
environments.append(environment.id)
else:
self.module.fail_json(
msg="No group {0} available.".format(group_spec),
results=[],
)
if self.state in ['installed', 'present']:
# Install files.
self._install_remote_rpms(filenames)
for filename in filenames:
response['results'].append("Installed {0}".format(filename))
# Install modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if not self._is_module_installed(module):
response['results'].append("Module {0} installed.".format(module))
self.module_base.install([module])
self.module_base.enable([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
# Install groups.
for group in groups:
try:
group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
if group_pkg_count_installed == 0:
response['results'].append("Group {0} already installed.".format(group))
else:
response['results'].append("Group {0} installed.".format(group))
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install group: {0}".format(group)
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
# In dnf 2.0 if all the mandatory packages in a group do
# not install, an error is raised. We want to capture
# this but still install as much as possible.
failure_response['failures'].append(" ".join((group, to_native(e))))
for environment in environments:
try:
self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install environment: {0}".format(environment)
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((environment, to_native(e))))
if module_specs and not self.with_modules:
# This means that the group or env wasn't found in comps
self.module.fail_json(
msg="No group {0} available.".format(module_specs[0]),
results=[],
)
# Install packages.
if self.update_only:
not_installed = self._update_only(pkg_specs)
for spec in not_installed:
response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
else:
for pkg_spec in pkg_specs:
install_result = self._mark_package_install(pkg_spec)
if install_result['failed']:
if install_result['msg']:
failure_response['msg'] += install_result['msg']
failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
else:
if install_result['msg']:
response['results'].append(install_result['msg'])
elif self.state == 'latest':
# "latest" is same as "installed" for filenames.
self._install_remote_rpms(filenames)
for filename in filenames:
response['results'].append("Installed {0}".format(filename))
# Upgrade modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if self._is_module_installed(module):
response['results'].append("Module {0} upgraded.".format(module))
self.module_base.upgrade([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
for group in groups:
try:
try:
self.base.group_upgrade(group)
response['results'].append("Group {0} upgraded.".format(group))
except dnf.exceptions.CompsError:
if not self.update_only:
# If not already installed, try to install.
group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
if group_pkg_count_installed == 0:
response['results'].append("Group {0} already installed.".format(group))
else:
response['results'].append("Group {0} installed.".format(group))
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((group, to_native(e))))
for environment in environments:
try:
try:
self.base.environment_upgrade(environment)
except dnf.exceptions.CompsError:
# If not already installed, try to install.
self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install environment: {0}".format(environment)
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((environment, to_native(e))))
if self.update_only:
not_installed = self._update_only(pkg_specs)
for spec in not_installed:
response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
else:
for pkg_spec in pkg_specs:
install_result = self._mark_package_install(pkg_spec, upgrade=True)
if install_result['failed']:
if install_result['msg']:
failure_response['msg'] += install_result['msg']
failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
else:
if install_result['msg']:
response['results'].append(install_result['msg'])
else:
# state == absent
if filenames:
self.module.fail_json(
msg="Cannot remove paths -- please specify package name.",
results=[],
)
# Remove modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if self._is_module_installed(module):
response['results'].append("Module {0} removed.".format(module))
self.module_base.remove([module])
self.module_base.disable([module])
self.module_base.reset([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
for group in groups:
try:
self.base.group_remove(group)
except dnf.exceptions.CompsError:
# Group is already uninstalled.
pass
for environment in environments:
try:
self.base.environment_remove(environment)
except dnf.exceptions.CompsError:
# Environment is already uninstalled.
pass
for pkg_spec in pkg_specs:
try:
self.base.remove(pkg_spec)
except dnf.exceptions.MarkingError as e:
response['results'].append(f"{e.value}: {pkg_spec}")
# Like the dnf CLI we want to allow recursive removal of dependent
# packages
self.allowerasing = True
if self.autoremove:
self.base.autoremove()
try:
# NOTE for people who go down the rabbit hole of figuring out why
# resolve() throws DepsolveError here on dep conflict, but not when
# called from the CLI: It's controlled by conf.best. When best is
# set, Hawkey will fail the goal, and resolve() in dnf.base.Base
# will throw. Otherwise if it's not set, the update (install) will
# be (almost silently) removed from the goal, and Hawkey will report
# success. Note that in this case, similar to the CLI, skip_broken
# does nothing to help here, so we don't take it into account at
# all.
if not self.base.resolve(allow_erasing=self.allowerasing):
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
response['msg'] = "Nothing to do"
self.module.exit_json(**response)
else:
response['changed'] = True
# If packages got installed/removed, add them to the results.
# We do this early so we can use it for both check_mode and not.
if self.download_only:
install_action = 'Downloaded'
else:
install_action = 'Installed'
for package in self.base.transaction.install_set:
response['results'].append("{0}: {1}".format(install_action, package))
for package in self.base.transaction.remove_set:
response['results'].append("Removed: {0}".format(package))
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
if self.module.check_mode:
response['msg'] = "Check mode: No changes made, but would have if not in check mode"
self.module.exit_json(**response)
try:
if self.download_only and self.download_dir and self.base.conf.destdir:
dnf.util.ensure_dir(self.base.conf.destdir)
self.base.repos.all().pkgdir = self.base.conf.destdir
self.base.download_packages(self.base.transaction.install_set)
except dnf.exceptions.DownloadError as e:
failure_response['msg'] = "Failed to download packages: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
# Validate GPG. This is NOT done in dnf.Base (it's done in the
# upstream CLI subclass of dnf.Base)
if not self.disable_gpg_check:
for package in self.base.transaction.install_set:
fail = False
gpgres, gpgerr = self.base._sig_check_pkg(package)
if gpgres == 0: # validated successfully
continue
elif gpgres == 1: # validation failed, install cert?
try:
self.base._get_key_for_package(package)
except dnf.exceptions.Error as e:
fail = True
else: # fatal error
fail = True
if fail:
msg = 'Failed to validate GPG signature for {0}: {1}'.format(package, gpgerr)
self.module.fail_json(msg)
if self.download_only:
# No further work left to do, and the results were already updated above.
# Just return them.
self.module.exit_json(**response)
else:
tid = self.base.do_transaction()
if tid is not None:
transaction = self.base.history.old([tid])[0]
if transaction.return_code:
failure_response['failures'].append(transaction.output())
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
self.module.exit_json(**response)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
failure_response['msg'] = "Unknown Error occurred: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
def run(self):
if self.update_cache and not self.names and not self.list:
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot, self.sslverify
)
self.module.exit_json(
msg="Cache updated",
changed=False,
results=[],
rc=0
)
# Set state as installed by default
# This is not set in AnsibleModule() because the following shouldn't happen
# - dnf: autoremove=yes state=installed
if self.state is None:
self.state = 'installed'
if self.list:
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot, self.sslverify
)
self.list_items(self.list)
else:
# Note: base takes a long time to run so we want to check for failure
# before running it.
if not self.download_only and not dnf.util.am_i_root():
self.module.fail_json(
msg="This command has to be run under the root user.",
results=[],
)
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot, self.sslverify
)
if self.with_modules:
self.module_base = dnf.module.module_base.ModuleBase(self.base)
try:
self.ensure()
finally:
self.base.close()
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'dnf', 'yum', 'yum4', 'dnf4', 'dnf5'])
module = AnsibleModule(
**yumdnf_argument_spec
)
module_implementation = DnfModule(module)
try:
module_implementation.run()
except dnf.exceptions.RepoError as de:
module.fail_json(
msg="Failed to synchronize repodata: {0}".format(to_native(de)),
rc=1,
results=[],
changed=False
)
if __name__ == '__main__':
main()
| DnfModule |
python | pypa__virtualenv | src/virtualenv/activation/python/__init__.py | {
"start": 153,
"end": 830
} | class ____(ViaTemplateActivator):
def templates(self):
yield "activate_this.py"
@staticmethod
def quote(string):
return repr(string)
def replacements(self, creator, dest_folder):
replacements = super().replacements(creator, dest_folder)
lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)
lib_folders = os.pathsep.join(lib_folders.keys())
replacements.update(
{
"__LIB_FOLDERS__": lib_folders,
"__DECODE_PATH__": "",
},
)
return replacements
__all__ = [
"PythonActivator",
]
| PythonActivator |
python | run-llama__llama_index | llama-index-core/llama_index/core/agent/workflow/multi_agent_workflow.py | {
"start": 2918,
"end": 28032
} | class ____(Workflow, PromptMixin, metaclass=AgentWorkflowMeta):
"""A workflow for managing multiple agents with handoffs."""
def __init__(
self,
agents: List[BaseWorkflowAgent],
initial_state: Optional[Dict] = None,
root_agent: Optional[str] = None,
handoff_prompt: Optional[Union[str, BasePromptTemplate]] = None,
handoff_output_prompt: Optional[Union[str, BasePromptTemplate]] = None,
state_prompt: Optional[Union[str, BasePromptTemplate]] = None,
timeout: Optional[float] = None,
output_cls: Optional[Type[BaseModel]] = None,
structured_output_fn: Optional[
Callable[[List[ChatMessage]], Dict[str, Any]]
] = None,
**workflow_kwargs: Any,
):
super().__init__(timeout=timeout, **workflow_kwargs)
if not agents:
raise ValueError("At least one agent must be provided")
# Raise an error if any agent has no name or no description
if len(agents) > 1 and any(
agent.name == DEFAULT_AGENT_NAME for agent in agents
):
raise ValueError("All agents must have a name in a multi-agent workflow")
if len(agents) > 1 and any(
agent.description == DEFAULT_AGENT_DESCRIPTION for agent in agents
):
raise ValueError(
"All agents must have a description in a multi-agent workflow"
)
if any(agent.initial_state for agent in agents):
raise ValueError(
"Initial state is not supported per-agent in AgentWorkflow"
)
self.agents = {cfg.name: cfg for cfg in agents}
if len(agents) == 1:
root_agent = agents[0].name
elif root_agent is None:
raise ValueError("Exactly one root agent must be provided")
else:
root_agent = root_agent
if root_agent not in self.agents:
raise ValueError(f"Root agent {root_agent} not found in provided agents")
self.root_agent = root_agent
self.initial_state = initial_state or {}
handoff_prompt = handoff_prompt or DEFAULT_HANDOFF_PROMPT
if isinstance(handoff_prompt, str):
handoff_prompt = PromptTemplate(handoff_prompt)
if "{agent_info}" not in handoff_prompt.get_template():
raise ValueError("Handoff prompt must contain {agent_info}")
self.handoff_prompt = handoff_prompt
handoff_output_prompt = handoff_output_prompt or DEFAULT_HANDOFF_OUTPUT_PROMPT
if isinstance(handoff_output_prompt, str):
handoff_output_prompt = PromptTemplate(handoff_output_prompt)
if (
"{to_agent}" not in handoff_output_prompt.get_template()
or "{reason}" not in handoff_output_prompt.get_template()
):
raise ValueError(
"Handoff output prompt must contain {to_agent} and {reason}"
)
self.handoff_output_prompt = handoff_output_prompt
state_prompt = state_prompt or DEFAULT_STATE_PROMPT
if isinstance(state_prompt, str):
state_prompt = PromptTemplate(state_prompt)
if (
"{state}" not in state_prompt.get_template()
or "{msg}" not in state_prompt.get_template()
):
raise ValueError("State prompt must contain {state} and {msg}")
self.state_prompt = state_prompt
self.output_cls = output_cls
self.structured_output_fn = structured_output_fn
if output_cls is not None and structured_output_fn is not None:
self.structured_output_fn = None
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {
"handoff_prompt": self.handoff_prompt,
"handoff_output_prompt": self.handoff_output_prompt,
"state_prompt": self.state_prompt,
}
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {agent.name: agent for agent in self.agents.values()}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
"""Update prompts."""
if "handoff_prompt" in prompts_dict:
self.handoff_prompt = prompts_dict["handoff_prompt"]
if "handoff_output_prompt" in prompts_dict:
self.handoff_output_prompt = prompts_dict["handoff_output_prompt"]
if "state_prompt" in prompts_dict:
self.state_prompt = prompts_dict["state_prompt"]
def _ensure_tools_are_async(
self, tools: Sequence[BaseTool]
) -> Sequence[AsyncBaseTool]:
"""Ensure all tools are async."""
return [adapt_to_async_tool(tool) for tool in tools]
def _get_handoff_tool(
self, current_agent: BaseWorkflowAgent
) -> Optional[AsyncBaseTool]:
"""Creates a handoff tool for the given agent."""
# Do not create a handoff tool if there is only one agent
if len(self.agents) == 1:
return None
agent_info = {cfg.name: cfg.description for cfg in self.agents.values()}
# Filter out agents that the current agent cannot handoff to
configs_to_remove = []
for name in agent_info:
if name == current_agent.name:
configs_to_remove.append(name)
elif (
current_agent.can_handoff_to is not None
and name not in current_agent.can_handoff_to
):
configs_to_remove.append(name)
for name in configs_to_remove:
agent_info.pop(name)
if not agent_info:
return None
fn_tool_prompt = self.handoff_prompt.format(agent_info=str(agent_info))
return FunctionTool.from_defaults(
async_fn=handoff, description=fn_tool_prompt, return_direct=True
)
async def get_tools(
self, agent_name: str, input_str: Optional[str] = None
) -> Sequence[AsyncBaseTool]:
"""Get tools for the given agent."""
agent_tools = self.agents[agent_name].tools or []
tools = [*agent_tools]
retriever = self.agents[agent_name].tool_retriever
if retriever is not None:
retrieved_tools = await retriever.aretrieve(input_str or "")
tools.extend(retrieved_tools)
if (
self.agents[agent_name].can_handoff_to
or self.agents[agent_name].can_handoff_to is None
):
handoff_tool = self._get_handoff_tool(self.agents[agent_name])
if handoff_tool:
tools.append(handoff_tool)
return self._ensure_tools_are_async(cast(List[BaseTool], tools))
async def _init_context(self, ctx: Context, ev: StartEvent) -> None:
"""Initialize the context once, if needed."""
if not await ctx.store.get("memory", default=None):
default_memory = ev.get("memory", default=None)
default_memory = default_memory or ChatMemoryBuffer.from_defaults(
llm=self.agents[self.root_agent].llm or Settings.llm
)
await ctx.store.set("memory", default_memory)
if not await ctx.store.get("agents", default=None):
await ctx.store.set("agents", list(self.agents.keys()))
if not await ctx.store.get("can_handoff_to", default=None):
await ctx.store.set(
"can_handoff_to",
{
agent: agent_cfg.can_handoff_to
for agent, agent_cfg in self.agents.items()
},
)
if not await ctx.store.get("state", default=None):
await ctx.store.set("state", self.initial_state)
if not await ctx.store.get("current_agent_name", default=None):
await ctx.store.set("current_agent_name", self.root_agent)
if not await ctx.store.get("handoff_output_prompt", default=None):
await ctx.store.set(
"handoff_output_prompt", self.handoff_output_prompt.get_template()
)
if not await ctx.store.get("max_iterations", default=None):
max_iterations = (
ev.get("max_iterations", default=None) or DEFAULT_MAX_ITERATIONS
)
await ctx.store.set("max_iterations", max_iterations)
# Reset the number of iterations
await ctx.store.set("num_iterations", 0)
# always set to false initially
await ctx.store.set("formatted_input_with_state", False)
async def _call_tool(
self,
ctx: Context,
tool: AsyncBaseTool,
tool_input: dict,
) -> ToolOutput:
"""Call the given tool with the given input."""
try:
if (
isinstance(tool, FunctionTool)
and tool.requires_context
and tool.ctx_param_name is not None
):
new_tool_input = {**tool_input}
new_tool_input[tool.ctx_param_name] = ctx
tool_output = await tool.acall(**new_tool_input)
else:
tool_output = await tool.acall(**tool_input)
except Exception as e:
event_exception = _get_waiting_for_event_exception()
if event_exception and isinstance(e, event_exception):
raise
tool_output = ToolOutput(
content=str(e),
tool_name=tool.metadata.get_name(),
raw_input=tool_input,
raw_output=str(e),
is_error=True,
exception=e,
)
return tool_output
@step
async def init_run(self, ctx: Context, ev: AgentWorkflowStartEvent) -> AgentInput:
"""Sets up the workflow and validates inputs."""
await self._init_context(ctx, ev)
user_msg: Optional[Union[str, ChatMessage]] = ev.get("user_msg")
chat_history: Optional[List[ChatMessage]] = ev.get("chat_history", [])
# Convert string user_msg to ChatMessage
if isinstance(user_msg, str):
user_msg = ChatMessage(role="user", content=user_msg)
# Add messages to memory
memory: BaseMemory = await ctx.store.get("memory")
# First set chat history if it exists
if chat_history:
await memory.aset(chat_history)
# Then add user message if it exists
if user_msg:
await memory.aput(user_msg)
content_str = "\n".join(
[
block.text
for block in user_msg.blocks
if isinstance(block, TextBlock)
]
)
await ctx.store.set("user_msg_str", content_str)
elif chat_history and not all(
message.role == "system" for message in chat_history
):
# If no user message, use the last message from chat history as user_msg_str
user_hist: List[ChatMessage] = [
msg for msg in chat_history if msg.role == "user"
]
content_str = "\n".join(
[
block.text
for block in user_hist[-1].blocks
if isinstance(block, TextBlock)
]
)
await ctx.store.set("user_msg_str", content_str)
else:
raise ValueError("Must provide either user_msg or chat_history")
# Get all messages from memory
input_messages = await memory.aget()
# send to the current agent
current_agent_name: str = await ctx.store.get("current_agent_name")
return AgentInput(input=input_messages, current_agent_name=current_agent_name)
@step
async def setup_agent(self, ctx: Context, ev: AgentInput) -> AgentSetup:
"""Main agent handling logic."""
current_agent_name = ev.current_agent_name
agent = self.agents[current_agent_name]
llm_input = [*ev.input]
if agent.system_prompt:
llm_input = [
ChatMessage(role="system", content=agent.system_prompt),
*llm_input,
]
state = await ctx.store.get("state", default=None)
formatted_input_with_state = await ctx.store.get(
"formatted_input_with_state", default=False
)
if state and not formatted_input_with_state:
# update last message with current state
for block in llm_input[-1].blocks[::-1]:
if isinstance(block, TextBlock):
block.text = self.state_prompt.format(state=state, msg=block.text)
break
await ctx.store.set("formatted_input_with_state", True)
return AgentSetup(
input=llm_input,
current_agent_name=ev.current_agent_name,
)
@step
async def run_agent_step(self, ctx: Context, ev: AgentSetup) -> AgentOutput:
"""Run the agent."""
memory: BaseMemory = await ctx.store.get("memory")
agent = self.agents[ev.current_agent_name]
user_msg_str = await ctx.store.get("user_msg_str")
tools = await self.get_tools(ev.current_agent_name, user_msg_str or "")
agent_output = await agent.take_step(
ctx,
ev.input,
tools,
memory,
)
ctx.write_event_to_stream(agent_output)
return agent_output
@step
async def parse_agent_output(
self, ctx: Context, ev: AgentOutput
) -> Union[StopEvent, AgentInput, ToolCall, None]:
max_iterations = await ctx.store.get(
"max_iterations", default=DEFAULT_MAX_ITERATIONS
)
num_iterations = await ctx.store.get("num_iterations", default=0)
num_iterations += 1
await ctx.store.set("num_iterations", num_iterations)
if num_iterations >= max_iterations:
raise WorkflowRuntimeError(
f"Max iterations of {max_iterations} reached! Either something went wrong, or you can "
"increase the max iterations with `.run(.., max_iterations=...)`"
)
memory: BaseMemory = await ctx.store.get("memory")
if ev.retry_messages:
# Retry with the given messages to let the LLM fix potential errors
history = await memory.aget()
user_msg_str = await ctx.store.get("user_msg_str")
agent_name: str = await ctx.store.get("current_agent_name")
return AgentInput(
input=[
*history,
ChatMessage(role="user", content=user_msg_str),
*ev.retry_messages,
],
current_agent_name=agent_name,
)
if not ev.tool_calls:
agent = self.agents[ev.current_agent_name]
memory = await ctx.store.get("memory")
# important: messages should always be fetched after calling finalize, otherwise they do not contain the agent's response
output = await agent.finalize(ctx, ev, memory)
messages = await memory.aget()
cur_tool_calls: List[ToolCallResult] = await ctx.store.get(
"current_tool_calls", default=[]
)
output.tool_calls.extend(cur_tool_calls) # type: ignore
await ctx.store.set("current_tool_calls", [])
if self.structured_output_fn is not None:
try:
if inspect.iscoroutinefunction(self.structured_output_fn):
output.structured_response = await self.structured_output_fn(
messages
)
else:
output.structured_response = cast(
Dict[str, Any], self.structured_output_fn(messages)
)
ctx.write_event_to_stream(
AgentStreamStructuredOutput(output=output.structured_response)
)
except Exception as e:
warnings.warn(
f"There was a problem with the generation of the structured output: {e}"
)
if self.output_cls is not None:
try:
llm_input = [*messages]
if agent.system_prompt:
llm_input = [
ChatMessage(role="system", content=agent.system_prompt),
*llm_input,
]
output.structured_response = await generate_structured_response(
messages=llm_input, llm=agent.llm, output_cls=self.output_cls
)
ctx.write_event_to_stream(
AgentStreamStructuredOutput(output=output.structured_response)
)
except Exception as e:
warnings.warn(
f"There was a problem with the generation of the structured output: {e}"
)
return StopEvent(result=output)
await ctx.store.set("num_tool_calls", len(ev.tool_calls))
for tool_call in ev.tool_calls:
ctx.send_event(
ToolCall(
tool_name=tool_call.tool_name,
tool_kwargs=tool_call.tool_kwargs,
tool_id=tool_call.tool_id,
)
)
return None
@step
async def call_tool(self, ctx: Context, ev: ToolCall) -> ToolCallResult:
"""Calls the tool and handles the result."""
ctx.write_event_to_stream(
ToolCall(
tool_name=ev.tool_name,
tool_kwargs=ev.tool_kwargs,
tool_id=ev.tool_id,
)
)
current_agent_name = await ctx.store.get("current_agent_name")
tools = await self.get_tools(current_agent_name, ev.tool_name)
tools_by_name = {tool.metadata.name: tool for tool in tools}
if ev.tool_name not in tools_by_name:
tool = None
result = ToolOutput(
content=f"Tool {ev.tool_name} not found. Please select a tool that is available.",
tool_name=ev.tool_name,
raw_input=ev.tool_kwargs,
raw_output=None,
is_error=True,
)
else:
tool = tools_by_name[ev.tool_name]
result = await self._call_tool(ctx, tool, ev.tool_kwargs)
result_ev = ToolCallResult(
tool_name=ev.tool_name,
tool_kwargs=ev.tool_kwargs,
tool_id=ev.tool_id,
tool_output=result,
return_direct=tool.metadata.return_direct if tool else False,
)
ctx.write_event_to_stream(result_ev)
return result_ev
@step
async def aggregate_tool_results(
self, ctx: Context, ev: ToolCallResult
) -> Union[AgentInput, StopEvent, None]:
"""Aggregate tool results and return the next agent input."""
num_tool_calls = await ctx.store.get("num_tool_calls", default=0)
if num_tool_calls == 0:
raise ValueError("No tool calls found, cannot aggregate results.")
tool_call_results: list[ToolCallResult] = ctx.collect_events( # type: ignore
ev, expected=[ToolCallResult] * num_tool_calls
)
if not tool_call_results:
return None
memory: BaseMemory = await ctx.store.get("memory")
agent_name: str = await ctx.store.get("current_agent_name")
agent: BaseWorkflowAgent = self.agents[agent_name]
# track tool calls made during a .run() call
cur_tool_calls: List[ToolCallResult] = await ctx.store.get(
"current_tool_calls", default=[]
)
cur_tool_calls.extend(tool_call_results)
await ctx.store.set("current_tool_calls", cur_tool_calls)
await agent.handle_tool_call_results(ctx, tool_call_results, memory)
# set the next agent, if needed
# the handoff tool sets this
next_agent_name = await ctx.store.get("next_agent", default=None)
if next_agent_name:
await ctx.store.set("current_agent_name", next_agent_name)
await ctx.store.set("next_agent", None)
if any(
tool_call_result.return_direct and not tool_call_result.tool_output.is_error
for tool_call_result in tool_call_results
):
# if any tool calls return directly and it's not an error tool call, take the first one
return_direct_tool = next(
tool_call_result
for tool_call_result in tool_call_results
if tool_call_result.return_direct
and not tool_call_result.tool_output.is_error
)
# always finalize the agent, even if we're just handing off
result = AgentOutput(
response=ChatMessage(
role="assistant",
content=return_direct_tool.tool_output.content or "",
),
tool_calls=[
ToolSelection(
tool_id=t.tool_id,
tool_name=t.tool_name,
tool_kwargs=t.tool_kwargs,
)
for t in cur_tool_calls
],
raw=return_direct_tool.tool_output.raw_output,
current_agent_name=agent.name,
)
result = await agent.finalize(ctx, result, memory)
# we don't want to stop the system if we're just handing off
if return_direct_tool.tool_name != "handoff":
await ctx.store.set("current_tool_calls", [])
return StopEvent(result=result)
user_msg_str = await ctx.store.get("user_msg_str")
input_messages = await memory.aget(input=user_msg_str)
# get this again, in case it changed
agent_name = await ctx.store.get("current_agent_name")
agent = self.agents[agent_name]
return AgentInput(input=input_messages, current_agent_name=agent.name)
def run(
self,
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
ctx: Optional[Context] = None,
max_iterations: Optional[int] = None,
start_event: Optional[AgentWorkflowStartEvent] = None,
**kwargs: Any,
) -> WorkflowHandler:
# Detect if hitl is needed
if ctx is not None and ctx.is_running:
return super().run(
ctx=ctx,
**kwargs,
)
else:
start_event = start_event or AgentWorkflowStartEvent(
user_msg=user_msg,
chat_history=chat_history,
memory=memory,
max_iterations=max_iterations,
**kwargs,
)
return super().run(
start_event=start_event,
ctx=ctx,
)
@classmethod
def from_tools_or_functions(
cls,
tools_or_functions: List[Union[BaseTool, Callable]],
llm: Optional[LLM] = None,
system_prompt: Optional[str] = None,
state_prompt: Optional[Union[str, BasePromptTemplate]] = None,
initial_state: Optional[dict] = None,
output_cls: Optional[Type[BaseModel]] = None,
structured_output_fn: Optional[
Callable[[List[ChatMessage]], Dict[str, Any]]
] = None,
timeout: Optional[float] = None,
verbose: bool = False,
) -> "AgentWorkflow":
"""
Initializes an AgentWorkflow from a list of tools or functions.
The workflow will be initialized with a single agent that uses the provided tools or functions.
If the LLM is a function calling model, the workflow will use the FunctionAgent.
Otherwise, it will use the ReActAgent.
"""
llm = llm or Settings.llm
agent_cls = (
FunctionAgent if llm.metadata.is_function_calling_model else ReActAgent
)
tools = [
FunctionTool.from_defaults(fn=tool)
if not isinstance(tool, BaseTool)
else tool
for tool in tools_or_functions
]
return cls(
agents=[
agent_cls(
name="Agent",
description="A single agent that uses the provided tools or functions.",
tools=tools,
llm=llm,
system_prompt=system_prompt,
)
],
output_cls=output_cls,
structured_output_fn=structured_output_fn,
state_prompt=state_prompt,
initial_state=initial_state,
timeout=timeout,
verbose=verbose,
)
| AgentWorkflow |
python | scipy__scipy | scipy/spatial/tests/test__plotutils.py | {
"start": 486,
"end": 3814
} | class ____:
points = [(0,0), (0,1), (1,0), (1,1)]
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
voronoi_plot_2d(obj, show_vertices=False)
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
def test_gh_19653(self):
# aspect ratio sensitivity of voronoi_plot_2d
# infinite Voronoi edges
points = np.array([[245.059986986012, 10.971011721360075],
[320.49044143557785, 10.970258360366753],
[239.79023081978914, 13.108487516946218],
[263.38325791238833, 12.93241352743668],
[219.53334398353175, 13.346107628161008]])
vor = Voronoi(points)
fig = voronoi_plot_2d(vor)
ax = fig.gca()
infinite_segments = ax.collections[1].get_segments()
expected_segments = np.array([[[282.77256, -254.76904],
[282.729714, -4544.744698]],
[[282.77256014, -254.76904029],
[430.08561382, 4032.67658742]],
[[229.26733285, -20.39957514],
[-168.17167404, -4291.92545966]],
[[289.93433364, 5151.40412217],
[330.40553385, 9441.18887532]]])
assert_allclose(infinite_segments, expected_segments)
def test_gh_19653_smaller_aspect(self):
# reasonable behavior for less extreme aspect
# ratio
points = np.array([[24.059986986012, 10.971011721360075],
[32.49044143557785, 10.970258360366753],
[23.79023081978914, 13.108487516946218],
[26.38325791238833, 12.93241352743668],
[21.53334398353175, 13.346107628161008]])
vor = Voronoi(points)
fig = voronoi_plot_2d(vor)
ax = fig.gca()
infinite_segments = ax.collections[1].get_segments()
expected_segments = np.array([[[28.274979, 8.335027],
[28.270463, -42.19763338]],
[[28.27497869, 8.33502697],
[43.73223829, 56.44555501]],
[[22.51805823, 11.8621754],
[-12.09266506, -24.95694485]],
[[29.53092448, 78.46952378],
[33.82572726, 128.81934455]]])
assert_allclose(infinite_segments, expected_segments)
| TestPlotting |
python | pytorch__pytorch | test/inductor/test_graph_transform_observer.py | {
"start": 598,
"end": 2280
} | class ____(TestCase):
def test_sdpa_rewriter(self):
if not (
HAS_CUDA_AND_TRITON
and PLATFORM_SUPPORTS_FUSED_ATTENTION
and HAS_PYDOT
and HAS_DOT
):
return
def dot_prod_attention(
query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
) -> torch.Tensor:
"""Input tensors assumed to have shape (batch_size, n_head, seq_len, embed_dim)"""
return (
torch.matmul(query, key.transpose(-2, -1))
.div(math.sqrt(key.shape[-1]))
.softmax(dim=-1)
.matmul(value)
)
log_url = tempfile.mkdtemp()
inductor_config.trace.log_url_for_graph_xform = log_url
inductor_config.force_disable_caches = True
compiled_fn = torch.compile(dot_prod_attention, fullgraph=True)
tensor_shape = (4, 2, 16, 32)
q = torch.randn(tensor_shape, device="cuda")
k = torch.randn(tensor_shape, device="cuda")
v = torch.randn(tensor_shape, device="cuda")
compiled_fn(q, k, v)
found_input_svg = False
found_output_svg = False
for filepath_object in glob.glob(log_url + "/*"):
if os.path.isfile(filepath_object):
if filepath_object.endswith("input_graph.dot"):
found_input_svg = True
elif filepath_object.endswith("output_graph.dot"):
found_output_svg = True
self.assertTrue(found_input_svg)
self.assertTrue(found_output_svg)
if __name__ == "__main__":
if IS_LINUX:
run_tests()
| TestGraphTransformObserver |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 96840,
"end": 96973
} | class ____(Structure):
_fields_ = [("start", c_uint),
("size", c_uint)
]
| c_nvmlGpuInstancePlacement_t |
python | scipy__scipy | scipy/io/tests/test_idl.py | {
"start": 18972,
"end": 20531
} | class ____:
'''Test that sav files with description tag read at all'''
def test_description(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte_descr.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_null_pointer():
# Regression test for null pointers.
s = readsav(path.join(DATA_PATH, 'null_pointer.sav'), verbose=False)
assert_identical(s.point, None)
assert_identical(s.check, np.int16(5))
def test_invalid_pointer():
# Regression test for invalid pointers (gh-4613).
# In some files in the wild, pointers can sometimes refer to a heap
# variable that does not exist. In that case, we now gracefully fail for
# that variable and replace the variable with None and emit a warning.
# Since it's difficult to artificially produce such files, the file used
# here has been edited to force the pointer reference to be invalid.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
s = readsav(path.join(DATA_PATH, 'invalid_pointer.sav'), verbose=False)
assert_(len(w) == 1)
assert_(str(w[0].message) == ("Variable referenced by pointer not found in "
"heap: variable will be set to None"))
assert_identical(s['a'], np.array([None, None]))
def test_attrdict():
d = _idl.AttrDict({'one': 1})
assert d['one'] == 1
assert d.one == 1
with pytest.raises(KeyError):
d['two']
with pytest.raises(AttributeError, match='has no attribute'):
d.two
| TestTags |
python | ray-project__ray | python/ray/autoscaler/v2/instance_manager/subscribers/ray_stopper.py | {
"start": 654,
"end": 5263
} | class ____(InstanceUpdatedSubscriber):
"""RayStopper is responsible for stopping ray on instances.
It will drain the ray node if it's for idle termination.
For other terminations, it will stop the ray node. (e.g. scale down, etc.)
If any failures happen when stopping/draining the node, we will not retry
and rely on the reconciler to handle the failure.
TODO: we could also surface the errors back to the reconciler for
quicker failure detection.
"""
def __init__(self, gcs_client: GcsClient, error_queue: Queue) -> None:
self._gcs_client = gcs_client
self._error_queue = error_queue
self._executor = ThreadPoolExecutor(max_workers=1)
def notify(self, events: List[InstanceUpdateEvent]) -> None:
for event in events:
if event.new_instance_status == Instance.RAY_STOP_REQUESTED:
fut = self._executor.submit(self._stop_or_drain_ray, event)
def _log_on_error(fut):
try:
fut.result()
except Exception:
logger.exception("Error stopping/drain ray.")
fut.add_done_callback(_log_on_error)
def _stop_or_drain_ray(self, event: InstanceUpdateEvent) -> None:
"""
Stops or drains the ray node based on the termination request.
"""
assert event.HasField("termination_request"), "Termination request is required."
termination_request = event.termination_request
ray_node_id = termination_request.ray_node_id
instance_id = event.instance_id
if termination_request.cause == TerminationRequest.Cause.IDLE:
reason = DrainNodeReason.DRAIN_NODE_REASON_IDLE_TERMINATION
reason_str = "Termination of node that's idle for {} seconds.".format(
termination_request.idle_duration_ms / 1000
)
self._drain_ray_node(
self._gcs_client,
self._error_queue,
ray_node_id,
instance_id,
reason,
reason_str,
)
return
# If it's not an idle termination, we stop the ray node.
self._stop_ray_node(
self._gcs_client, self._error_queue, ray_node_id, instance_id
)
@staticmethod
def _drain_ray_node(
gcs_client: GcsClient,
error_queue: Queue,
ray_node_id: str,
instance_id: str,
reason: DrainNodeReason,
reason_str: str,
):
"""
Drains the ray node.
Args:
gcs_client: The gcs client to use.
ray_node_id: The ray node id to drain.
reason: The reason to drain the node.
reason_str: The reason message to drain the node.
"""
try:
accepted, reject_msg_str = gcs_client.drain_node(
node_id=ray_node_id,
reason=reason,
reason_message=reason_str,
# TODO: we could probably add a deadline here that's derived
# from the stuck instance reconciliation configs.
deadline_timestamp_ms=0,
)
logger.info(
f"Drained ray on {ray_node_id}(success={accepted}, "
f"msg={reject_msg_str})"
)
if not accepted:
error_queue.put_nowait(RayStopError(im_instance_id=instance_id))
except Exception:
logger.exception(f"Error draining ray on {ray_node_id}")
error_queue.put_nowait(RayStopError(im_instance_id=instance_id))
@staticmethod
def _stop_ray_node(
gcs_client: GcsClient,
error_queue: Queue,
ray_node_id: str,
instance_id: str,
):
"""
Stops the ray node.
Args:
gcs_client: The gcs client to use.
ray_node_id: The ray node id to stop.
"""
try:
drained = gcs_client.drain_nodes(node_ids=[hex_to_binary(ray_node_id)])
success = len(drained) > 0
logger.info(
f"Stopping ray on {ray_node_id}(instance={instance_id}): "
f"success={success})"
)
if not success:
error_queue.put_nowait(RayStopError(im_instance_id=instance_id))
except Exception:
logger.exception(
f"Error stopping ray on {ray_node_id}(instance={instance_id})"
)
error_queue.put_nowait(RayStopError(im_instance_id=instance_id))
| RayStopper |
python | bokeh__bokeh | src/bokeh/core/property/string.py | {
"start": 1366,
"end": 2663
} | class ____(String):
""" Accept strings that match a given regular expression.
Args:
default (string, optional) :
A default value for attributes created from this property to have.
help (str or None, optional) :
A documentation string for this property. (default: None)
Example:
.. code-block:: python
>>> class RegexModel(HasProps):
... prop = Regex("foo[0-9]+bar")
...
>>> m = RegexModel()
>>> m.prop = "foo123bar"
>>> m.prop = "foo" # ValueError !!
>>> m.prop = [1, 2, 3] # ValueError !!
"""
def __init__(self, regex: str, *, default: Init[str] = Undefined, help: str | None = None) -> None:
self.regex = re.compile(regex)
super().__init__(default=default, help=help)
def __str__(self) -> str:
class_name = self.__class__.__name__
return f"{class_name}({self.regex.pattern!r})"
def validate(self, value: Any, detail: bool = True) -> None:
super().validate(value, detail)
if self.regex.match(value):
return
msg = "" if not detail else f"expected a string matching {self.regex.pattern!r} pattern, got {value!r}"
raise ValueError(msg)
| Regex |
python | doocs__leetcode | solution/1400-1499/1403.Minimum Subsequence in Non-Increasing Order/Solution.py | {
"start": 0,
"end": 278
} | class ____:
def minSubsequence(self, nums: List[int]) -> List[int]:
ans = []
s, t = sum(nums), 0
for x in sorted(nums, reverse=True):
t += x
ans.append(x)
if t > s - t:
break
return ans
| Solution |
python | aio-libs__aiohttp | tests/test_loop.py | {
"start": 550,
"end": 1629
} | class ____(AioHTTPTestCase):
on_startup_called: bool
async def get_application(self) -> web.Application:
app = web.Application()
app.on_startup.append(self.on_startup_hook)
return app
async def on_startup_hook(self, app: web.Application) -> None:
self.on_startup_called = True
async def test_on_startup_hook(self) -> None:
self.assertTrue(self.on_startup_called)
def test_default_loop(loop: asyncio.AbstractEventLoop) -> None:
assert asyncio.get_event_loop() is loop
def test_setup_loop_non_main_thread() -> None:
child_exc = None
def target() -> None:
try:
with loop_context() as loop:
assert asyncio.get_event_loop() is loop
loop.run_until_complete(test_subprocess_co(loop))
except Exception as exc:
nonlocal child_exc
child_exc = exc
# Ensures setup_test_loop can be called by pytest-xdist in non-main thread.
t = threading.Thread(target=target)
t.start()
t.join()
assert child_exc is None
| TestCase |
python | skorch-dev__skorch | skorch/callbacks/training.py | {
"start": 23307,
"end": 23894
} | class ____(ParamMapper):
"""Apply any function on matching parameters in the first epoch.
Examples
--------
Use ``Initializer`` to initialize all dense layer weights with
values sampled from an uniform distribution on the beginning of
the first epoch:
>>> init_fn = partial(torch.nn.init.uniform_, a=-1e-3, b=1e-3)
>>> cb = Initializer('dense*.weight', fn=init_fn)
>>> net = Net(myModule, callbacks=[cb])
"""
def __init__(self, *args, **kwargs):
kwargs['at'] = kwargs.get('at', 1)
super().__init__(*args, **kwargs)
| Initializer |
python | wandb__wandb | wandb/vendor/pygments/lexers/data.py | {
"start": 18269,
"end": 18771
} | class ____(JsonLexer):
"""
For `JSON-LD <http://json-ld.org/>`_ linked data.
.. versionadded:: 2.0
"""
name = 'JSON-LD'
aliases = ['jsonld', 'json-ld']
filenames = ['*.jsonld']
mimetypes = ['application/ld+json']
tokens = {
'objectvalue': [
(r'"@(context|id|value|language|type|container|list|set|'
r'reverse|index|base|vocab|graph)"', Name.Decorator,
'objectattribute'),
inherit,
],
}
| JsonLdLexer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor1.py | {
"start": 386,
"end": 657
} | class ____(Generic[S]):
def __init__(self, value: S) -> None:
self._value: Final = value
Result = A[T] | B[S]
def return_ok_none() -> Result[int | None, Exception]:
return A(None)
def return_ok_one() -> Result[int | None, Exception]:
return A(1)
| B |
python | tensorflow__tensorflow | third_party/xla/build_tools/ci/build.py | {
"start": 3201,
"end": 4992
} | class ____(enum.Enum):
"""Enum representing all types of builds.
Should be named as `REPO,OS,HOST_TYPE,BACKEND,GPU_TYPE,CI_TYPE`.
"""
XLA_LINUX_X86_CPU_GITHUB_ACTIONS = enum.auto()
XLA_LINUX_X86_CPU_BZLMOD_GITHUB_ACTIONS = enum.auto()
XLA_LINUX_ARM64_CPU_GITHUB_ACTIONS = enum.auto()
XLA_LINUX_X86_GPU_L4_GITHUB_ACTIONS = enum.auto()
XLA_LINUX_X86_GPU_ONEAPI_GITHUB_ACTIONS = enum.auto()
# Presubmit builds for regression testing.
XLA_LINUX_ARM64_CPU_48_VCPU_PRESUBMIT_GITHUB_ACTIONS = enum.auto()
XLA_LINUX_X86_CPU_128_VCPU_PRESUBMIT_GITHUB_ACTIONS = enum.auto()
XLA_LINUX_X86_GPU_L4_16_VCPU_PRESUBMIT_GITHUB_ACTIONS = enum.auto()
XLA_LINUX_X86_GPU_L4_48_VCPU_PRESUBMIT_GITHUB_ACTIONS = enum.auto()
XLA_LINUX_X86_GPU_A4_224_VCPU_PRESUBMIT_GITHUB_ACTIONS = enum.auto()
XLA_LINUX_X86_GPU_L4_16_VCPU_BENCHMARK_PRESUBMIT_GITHUB_ACTIONS = enum.auto()
XLA_LINUX_X86_GPU_L4_48_VCPU_BENCHMARK_PRESUBMIT_GITHUB_ACTIONS = enum.auto()
XLA_LINUX_X86_GPU_A4_224_VCPU_BENCHMARK_PRESUBMIT_GITHUB_ACTIONS = enum.auto()
XLA_MACOS_X86_CPU_KOKORO = enum.auto()
XLA_MACOS_ARM64_CPU_KOKORO = enum.auto()
JAX_LINUX_X86_CPU_GITHUB_ACTIONS = enum.auto()
JAX_WINDOWS_X86_CPU_GITHUB_ACTIONS = enum.auto()
JAX_LINUX_X86_GPU_L4_GITHUB_ACTIONS = enum.auto()
TENSORFLOW_LINUX_X86_CPU_GITHUB_ACTIONS = enum.auto()
TENSORFLOW_LINUX_X86_GPU_L4_GITHUB_ACTIONS = enum.auto()
@classmethod
def from_str(cls, s):
try:
return cls[s.replace(" ", "_").upper()]
except KeyError:
# Sloppy looking exception handling, but argparse will catch ValueError
# and give a pleasant error message. KeyError would not work here.
raise ValueError # pylint: disable=raise-missing-from
@dataclasses.dataclass(frozen=True, **_KW_ONLY_IF_PYTHON310)
| BuildType |
python | doocs__leetcode | solution/2600-2699/2652.Sum Multiples/Solution2.py | {
"start": 0,
"end": 237
} | class ____:
def sumOfMultiples(self, n: int) -> int:
def f(x: int) -> int:
m = n // x
return (x + m * x) * m // 2
return f(3) + f(5) + f(7) - f(3 * 5) - f(3 * 7) - f(5 * 7) + f(3 * 5 * 7)
| Solution |
python | weaviate__weaviate-python-client | weaviate/collections/classes/filters.py | {
"start": 3779,
"end": 4109
} | class ____(GeoCoordinate):
distance: float
FilterValuesList = Union[
Sequence[str],
Sequence[bool],
Sequence[int],
Sequence[float],
Sequence[datetime],
Sequence[UUID],
]
FilterValues = Union[
int, float, str, bool, datetime, UUID, _GeoCoordinateFilter, None, FilterValuesList
]
| _GeoCoordinateFilter |
python | django__django | tests/gis_tests/relatedapp/models.py | {
"start": 695,
"end": 1094
} | class ____(SimpleModel):
name = models.CharField(max_length=30)
city = models.ForeignKey(City, models.CASCADE)
center1 = models.PointField()
# Throwing a curveball w/`db_column` here.
center2 = models.PointField(srid=2276, db_column="mycenter")
border1 = models.PolygonField()
border2 = models.PolygonField(srid=2276)
def __str__(self):
return self.name
| Parcel |
python | apache__airflow | airflow-core/src/airflow/exceptions.py | {
"start": 9887,
"end": 10019
} | class ____(ValueError):
"""Raised when an attempt is made to load an executor which is not configured."""
| UnknownExecutorException |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_highlight.py | {
"start": 17781,
"end": 20373
} | class ____(util.MdCase):
"""Test extended language cases."""
extension = ['pymdownx.highlight', 'pymdownx.superfences', 'pymdownx.inlinehilite']
extension_configs = {
'pymdownx.highlight': {
'extend_pygments_lang': [
{'name': 'php-inline', 'lang': 'php', 'options': {'startinline': True}}
]
}
}
def test_extended_lang_inlinehilite(self):
"""Test extended language in InlineHilite."""
self.check_markdown(
'''
`#!php-inline $a = array("foo" => 0, "bar" => 1);`
''',
'''
<p><code class="highlight"><span class="nv">$a</span> <span class="o">=</span> <span class="k">array</span><span class="p">(</span><span class="s2">"foo"</span> <span class="o">=></span> <span class="mi">0</span><span class="p">,</span> <span class="s2">"bar"</span> <span class="o">=></span> <span class="mi">1</span><span class="p">);</span></code></p>
''', # noqa: E501
True
)
def test_extended_lang_superfences(self):
"""Test extended language in SuperFences."""
self.check_markdown(
'''
```php-inline
$a = array("foo" => 0, "bar" => 1);
```
''',
'''
<div class="highlight"><pre><span></span><code><span class="nv">$a</span> <span class="o">=</span> <span class="k">array</span><span class="p">(</span><span class="s2">"foo"</span> <span class="o">=></span> <span class="mi">0</span><span class="p">,</span> <span class="s2">"bar"</span> <span class="o">=></span> <span class="mi">1</span><span class="p">);</span>
</code></pre></div>
''', # noqa: E501
True
)
def test_extended_lang_case(self):
"""Test extended language in SuperFences."""
self.check_markdown(
'''
```PHP-Inline
$a = array("foo" => 0, "bar" => 1);
```
''',
'''
<div class="highlight"><pre><span></span><code><span class="nv">$a</span> <span class="o">=</span> <span class="k">array</span><span class="p">(</span><span class="s2">"foo"</span> <span class="o">=></span> <span class="mi">0</span><span class="p">,</span> <span class="s2">"bar"</span> <span class="o">=></span> <span class="mi">1</span><span class="p">);</span>
</code></pre></div>
''', # noqa: E501
True
)
| TestExtendedLang |
python | bottlepy__bottle | bottle.py | {
"start": 143209,
"end": 150183
} | class ____(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
CherootServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'cheroot': CherootServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'bjoern': BjoernServer,
'aiohttp': AiohttpServer,
'uvloop': AiohttpUVLoopServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None,
config=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
args = [sys.executable] + sys.argv
# If a package was loaded with `python -m`, then `sys.argv` needs to be
# restored to the original value, or imports might break. See #1336
if getattr(sys.modules.get('__main__'), '__package__', None):
args[1:1] = ["-m", sys.modules['__main__'].__package__]
try:
os.close(fd) # We never write to this file
while os.path.exists(lockfile):
p = subprocess.Popen(args, env=environ)
while p.poll() is None:
os.utime(lockfile, None) # Tell child we are still alive
time.sleep(interval)
if p.returncode == 3: # Child wants to be restarted
continue
sys.exit(p.returncode)
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, str):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, str):
plugin = load(plugin)
app.install(plugin)
if config:
app.config.update(config)
if server in server_names:
server = server_names.get(server)
if isinstance(server, str):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)..." %
(__version__, repr(server)))
_stderr("Listening on %s" % server._listen_url)
_stderr("Hit Ctrl-C to quit.\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except: # noqa: E722
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
| AutoServer |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/trt_convert_test.py | {
"start": 2888,
"end": 50244
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
"""Class to test Tensorflow-TensorRT integration python API."""
# Use a small max_workspace_size for tests so they don't consume too much GPU
# memory.
_TRT_MAX_WORKSPACE_SIZE_BYTES = (
trt_convert.DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES)
def mkdtemp(self):
return tempfile.mkdtemp(dir=self.get_temp_dir())
def testTRTEngineInstanceAvailable(self):
# test if we can access the TRTEngineInstance protobuf
assert hasattr(TRTEngineInstance(), "serialized_engine")
def _GetConfigProto(self, rewriter_config=None):
"""Get ConfigProto for session creation."""
config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(allow_growth=True))
if rewriter_config:
config.graph_options.rewrite_options.CopyFrom(rewriter_config)
return config
@classmethod
def _GetGraph(cls, inp1, inp2, var):
"""Get the graph for testing."""
# The graph computes: inp1^2 + inp1*var + inp1 + inp2 + var
add = inp1 + var
mul = inp1 * add
add = mul + add
add = add + inp2
out = array_ops.identity(add, name="output")
return out
def _GetShapeOpModel(self):
class ShapeOpModel(autotrackable.AutoTrackable):
def __init__(self):
self.v = None
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[None, None], dtype=dtypes.float32)
])
def run(self, x):
q = x + 1
q_shape = array_ops.shape(q)
# Add an OP that is not supported by TF-TRT. This allows TF-TRT to build
# two engines. The first engine produces an int32 output and the second
# engines has an int32 input and an int32 output.
q = math_ops.cumsum(q_shape)
q = q * 2
return array_ops.identity(q, name="output")
return ShapeOpModel()
def _GetModelForV2(self):
class SimpleModel(autotrackable.AutoTrackable):
def __init__(self):
self.v = None
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[None, 1, 1], dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=[None, 1, 1], dtype=dtypes.float32)
])
def run(self, inp1, inp2):
if self.v is None:
self.v = variables.Variable([[[1.0]]], dtype=dtypes.float32)
return TrtConvertTest._GetGraph(inp1, inp2, self.v)
return SimpleModel()
def _GetGraphForV1(self, device):
def _GraphFn():
inp1 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, 1, 1], name="input1")
inp2 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, 1, 1], name="input2")
var = variables.Variable([[[1.0]]], dtype=dtypes.float32, name="v1")
out = TrtConvertTest._GetGraph(inp1, inp2, var)
return g, var, inp1, inp2, out
g = ops.Graph()
with g.as_default():
if device:
with g.device(device):
return _GraphFn()
return _GraphFn()
def _GetGraphDefForV1(self, device):
"""Get the graph def for testing."""
g, var, _, _, _ = self._GetGraphForV1(device)
with self.session(graph=g, config=self._GetConfigProto()) as sess:
sess.run(var.initializer)
graph_def = convert_to_constants.convert_variables_to_constants(
sess, g.as_graph_def(add_shapes=True), ["output"])
node_name_to_op = {node.name: node.op for node in graph_def.node}
self.assertEqual(
{
"v1": "Const",
"add/ReadVariableOp": "Identity",
"input1": "Placeholder",
"input2": "Placeholder",
"add": "AddV2",
"mul": "Mul",
"add_1": "AddV2",
"add_2": "AddV2",
"output": "Identity"
}, node_name_to_op)
return graph_def
def _WriteInputSavedModelForV1(self, input_saved_model_dir, device):
"""Write the saved model as an input for testing."""
g, var, inp1, inp2, out = self._GetGraphForV1(device)
signature_def = signature_def_utils.build_signature_def(
inputs={
"myinput1": utils.build_tensor_info(inp1),
"myinput2": utils.build_tensor_info(inp2)
},
outputs={"myoutput": utils.build_tensor_info(out)},
method_name=signature_constants.PREDICT_METHOD_NAME)
saved_model_builder = builder.SavedModelBuilder(input_saved_model_dir)
with self.session(graph=g, config=self._GetConfigProto()) as sess:
sess.run(var.initializer)
saved_model_builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={_SAVED_MODEL_SIGNATURE_KEY: signature_def})
saved_model_builder.save()
def _ConvertGraphV1(self,
output_saved_model_dir=None,
need_calibration=False,
max_batch_size=1,
minimum_segment_size=3,
is_dynamic_op=False,
maximum_cached_engines=1,
device=None):
"""Helper method to convert a GraphDef or SavedModel using TF-TRT."""
input_saved_model_dir = None
if output_saved_model_dir:
input_saved_model_dir = self.mkdtemp()
self._WriteInputSavedModelForV1(input_saved_model_dir, device)
# Calibration requires dynamic_op.
if need_calibration:
is_dynamic_op = True
# For dynamic_op, the converter requires the unused max_batch_size=None.
if is_dynamic_op:
max_batch_size = None
converter = trt_convert.TrtGraphConverter(
input_saved_model_dir=input_saved_model_dir,
input_saved_model_signature_key=_SAVED_MODEL_SIGNATURE_KEY,
input_graph_def=None
if input_saved_model_dir else self._GetGraphDefForV1(device),
nodes_denylist=None if input_saved_model_dir else ["output"],
max_batch_size=max_batch_size,
max_workspace_size_bytes=TrtConvertTest._TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=(trt_convert.TrtPrecisionMode.INT8 if need_calibration
else trt_convert.TrtPrecisionMode.FP32),
minimum_segment_size=minimum_segment_size,
is_dynamic_op=is_dynamic_op,
maximum_cached_engines=maximum_cached_engines)
output_graph_def = converter.convert()
if need_calibration:
class CalibrationData(object):
def __init__(self):
self._data = 0
def next(self):
self._data += 1
return {"input1:0": [[[self._data]]], "input2:0": [[[self._data]]]}
output_graph_def = converter.calibrate(
fetch_names=["output:0"],
num_runs=10,
feed_dict_fn=CalibrationData().next)
if output_saved_model_dir is not None:
converter.save(output_saved_model_dir=output_saved_model_dir)
return output_graph_def
# Remove the graph sequence number prefix from the name only if the name has
# a prefix TRTEngineOp_n_.
def _MayRemoveGraphSequenceNumber(self, name):
prefix = re.search(r"TRTEngineOp_\d{3,}_", name)
if prefix and name.startswith(prefix.group(0)):
parts = name.split("_", maxsplit=2)
assert len(parts) == 3
return parts[0] + "_" + parts[2]
return name
# Return the unique TRTEngineOp in the given graph def.
def _GetUniqueTRTEngineOp(self, graph_def):
trt_engine_nodes = [
node for node in graph_def.node if node.op == "TRTEngineOp"
]
assert len(trt_engine_nodes) == 1
return trt_engine_nodes[0]
def _TestTrtGraphConverter(self,
device,
output_saved_model_dir=None,
need_calibration=False,
is_dynamic_op=False):
"""General method to test trt_convert.TrtGraphConverter()."""
output_graph_def = self._ConvertGraphV1(
output_saved_model_dir=output_saved_model_dir,
need_calibration=need_calibration,
is_dynamic_op=is_dynamic_op,
device=device)
graph_defs_to_verify = [output_graph_def]
if output_saved_model_dir:
saved_model_graph_def = saved_model_utils.get_meta_graph_def(
output_saved_model_dir, tag_constants.SERVING).graph_def
self.assertIsInstance(saved_model_graph_def, graph_pb2.GraphDef)
graph_defs_to_verify.append(saved_model_graph_def)
for graph_def in graph_defs_to_verify:
node_name_to_op = {
self._MayRemoveGraphSequenceNumber(node.name): node.op
for node in graph_def.node
}
if device is not None and device.startswith("/CPU:"):
self.assertEqual(
{
"add": "AddV2",
"v1": "Const",
"add_1": "AddV2",
"add_2": "AddV2",
"input1": "Placeholder",
"input2": "Placeholder",
"mul": "Mul",
"output": "Identity"
}, node_name_to_op)
else:
self.assertEqual(
{
"input1": "Placeholder",
"input2": "Placeholder",
"TRTEngineOp_000": "TRTEngineOp",
"output": "Identity"
}, node_name_to_op)
if need_calibration:
trt_engine_nodes = [
node for node in graph_def.node if node.op == "TRTEngineOp"
]
if device is not None and device.startswith("/CPU:"):
self.assertEmpty(trt_engine_nodes)
return
self.assertNotEmpty(trt_engine_nodes)
for node in trt_engine_nodes:
self.assertTrue(len(node.attr["calibration_data"].s))
# Run the calibrated graph.
# TODO(laigd): consider having some input where the answer is different.
with ops.Graph().as_default():
importer.import_graph_def(graph_def, name="")
with self.session(config=self._GetConfigProto()) as sess:
for test_data in range(10):
self.assertEqual((test_data + 1.0)**2 + test_data,
sess.run(
"output:0",
feed_dict={
"input1:0": [[[test_data]]],
"input2:0": [[[test_data]]]
}))
@parameterized.named_parameters([
("NoDeviceAssignment", None),
("GPU", "/GPU:0"),
("CPU", "/CPU:0"),
])
@test_util.deprecated_graph_mode_only
def testTrtGraphConverter_OfflineConversion(self, device):
"""Test case for trt_convert.TrtGraphConverter()."""
for need_calibration in [False, True]:
# Use GraphDef as input.
self._TestTrtGraphConverter(device)
# Use SavedModel as input.
self._TestTrtGraphConverter(
device,
output_saved_model_dir=self.mkdtemp(),
need_calibration=need_calibration)
@parameterized.named_parameters([
("NoDeviceAssignment", None),
("GPU", "/device:GPU:0"),
("CPU", "/device:CPU:0"),
])
@test_util.deprecated_graph_mode_only
def testTrtGraphConverter_OnlineConversion(self, device):
"""Test case for TF-TRT conversion using Grappler directly."""
conversion_params = trt_convert.DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode=trt_convert.TrtPrecisionMode.FP32)
config = self._GetConfigProto(
rewriter_config=trt_convert.get_tensorrt_rewriter_config(
conversion_params,
is_dynamic_op=False,
max_batch_size=1,
is_v2=False))
with ops.Graph().as_default():
# Online conversion requires a frozen graph, so we reuse inp1 as the var
# argument.
inp1 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, 1, 1], name="input1")
inp2 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, 1, 1], name="input2")
if device:
with ops.device(device):
TrtConvertTest._GetGraph(inp1, inp2, inp1)
else:
TrtConvertTest._GetGraph(inp1, inp2, inp1)
with self.session(config=config) as sess:
self._TestRun(sess, batch_size=1)
def _CreateConverterV2(
self,
input_saved_model_dir,
input_saved_model_signature_key=_SAVED_MODEL_SIGNATURE_KEY,
max_workspace_size_bytes=10 << 20, # Use a smaller workspace.
precision_mode=trt_convert.TrtPrecisionMode.FP32,
maximum_cached_engines=2,
allow_build_at_runtime=True):
return trt_convert.TrtGraphConverterV2(
input_saved_model_dir=input_saved_model_dir,
input_saved_model_signature_key=input_saved_model_signature_key,
max_workspace_size_bytes=max_workspace_size_bytes,
precision_mode=precision_mode,
maximum_cached_engines=maximum_cached_engines,
allow_build_at_runtime=allow_build_at_runtime)
def _CheckTrtOps(self, concrete_func, check_fn=None, num_engines=1):
graph_def = concrete_func.graph.as_graph_def()
trt_op_names = []
for node in graph_def.node:
if node.op == "TRTEngineOp":
trt_op_names.append(self._MayRemoveGraphSequenceNumber(node.name))
if check_fn:
check_fn(node)
for func in graph_def.library.function:
for node in func.node_def:
if node.op == "TRTEngineOp":
trt_op_names.append(self._MayRemoveGraphSequenceNumber(node.name))
if check_fn:
check_fn(node)
self.assertLen(trt_op_names, num_engines)
def _RandomInput(self, shape, dtype=np.float32):
inp1 = np.random.random_sample(shape).astype(dtype)
inp2 = np.random.random_sample(shape).astype(dtype)
return inp1, inp2
def _GetAssetFile(self, output_saved_model_dir, trt_engine_name):
asset_file = os.path.join(output_saved_model_dir,
"assets/trt-serialized-engine." + trt_engine_name)
return asset_file
def _BuildGraphWithInputGenerator(self, InputFunc, np_input=None):
# Create the SavedModel.
root = self._GetShapeOpModel()
expected_output = None if np_input is None else root.run(np_input)
input_saved_model_dir = self.mkdtemp()
save.save(root, input_saved_model_dir, signatures=root.run)
# Convert the graph to TF-TRT.
conv_params = trt_convert.TrtConversionParams(minimum_segment_size=2)
converter = trt_convert.TrtGraphConverterV2(
input_saved_model_dir=input_saved_model_dir,
use_dynamic_shape=True,
**conv_params._asdict())
converter.convert()
# Build the graph with the input generator. This runs the TRTEngineOp native
# segment.
converter.build(InputFunc)
output_saved_model_dir = self.mkdtemp()
converter.save(output_saved_model_dir)
del converter
return output_saved_model_dir, expected_output
def _BuildGraphWithInputGeneratorTwoInputs(self, InputFunc, np_input=None):
# Create a model and save it.
input_saved_model_dir = self.mkdtemp()
root = self._GetModelForV2()
if np_input is None:
expected_output = None
else:
expected_output = root.run(np_input[0], np_input[1])
save.save(root, input_saved_model_dir,
{_SAVED_MODEL_SIGNATURE_KEY: root.run})
# Run TRT conversion.
converter = self._CreateConverterV2(input_saved_model_dir)
converter.convert()
# Verify the converted GraphDef and ConcreteFunction.
self._CheckTrtOps(converter._converted_func) # pylint: disable=protected-access
trt_engine_name = self._GetUniqueTRTEngineOp(
converter._converted_graph_def).name
# Save the converted model without any TRT engine cache.
output_saved_model_dir = self.mkdtemp()
converter.save(output_saved_model_dir)
unexpected_asset_file = \
self._GetAssetFile(output_saved_model_dir, trt_engine_name)
self.assertFalse(os.path.exists(unexpected_asset_file))
# Run the converted function to populate the engine cache.
converter.build(input_fn=InputFunc)
# Save the converted model again with serialized engine cache.
output_saved_model_dir = self.mkdtemp()
converter.save(output_saved_model_dir)
expected_asset_file = \
self._GetAssetFile(output_saved_model_dir, trt_engine_name)
self.assertTrue(os.path.exists(expected_asset_file))
self.assertTrue(os.path.getsize(expected_asset_file))
del converter
return output_saved_model_dir, expected_output
@test_util.run_v2_only
def testTrtGraphBuild(self):
"""Testing the construction of a graph with an input data generator
that takes one or two input parameters passed in different formats.
"""
# One input parameter:
np_input = np.random.random_sample([5, 3]).astype(np.float32)
def _Func_1():
yield (np_input,) # tuple with one element
def _Func_2():
yield [np_input] # list with one element
def _Func_3():
yield np_input # array
def _Func_4():
yield {"x": np_input} # dictionary
def _Func_5():
# multiple yields: different types
yield (np_input) # tuple with one element
yield [np_input] # list with one element
yield np_input # array
yield {"x": np_input} # dictionary
def _Func_6():
# multiple yields: all arrays
for shape in [(1, 128), (16, 128), (256, 128)]:
yield np.random.random_sample(shape).astype(np.float32)
for input_fn in [_Func_1, _Func_2, _Func_3, _Func_4, _Func_5, _Func_6]:
self._BuildGraphWithInputGenerator(input_fn)
# Two input parameters:
np_input1, np_input2 = self._RandomInput([4, 1, 1])
def _Func_A():
yield np_input1, np_input2 # tuple
def _Func_B():
yield [np_input1, np_input2] # list
def _Func_C():
yield {"inp1": np_input1, "inp2": np_input2} # dictionary
def _Func_D():
# multiple yields: different types
yield np_input1, np_input2 # tuple
yield [np_input1, np_input2] # list
yield {"inp1": np_input1, "inp2": np_input2} # dictionary
def _Func_E():
# multiple yields: tuples
for shape in [[4, 1, 1], [4, 2, 1], [4, 4, 1]]:
yield self._RandomInput(shape)
for input_fn in [_Func_A, _Func_B, _Func_C, _Func_D, _Func_E]:
self._BuildGraphWithInputGeneratorTwoInputs(input_fn)
@test_util.run_v2_only
def testTrtGraphConverter_DynamicConversion_v2(self):
"""Test case for trt_convert.TrtGraphConverter()."""
np_input1, np_input2 = self._RandomInput([4, 1, 1])
def _InputFn():
yield np_input1, np_input2
np_inputs = [np_input1, np_input2]
output_saved_model_dir, expected_output = \
self._BuildGraphWithInputGeneratorTwoInputs(_InputFn, np_inputs)
gc.collect() # Force GC to destroy the TRT engine cache.
# Load and verify the converted model.
#
# TODO(laigd): the name of the new input_signature of the
# `root_with_trt.run` function is empty string (originally was None),
# investigate why.
root_with_trt = load.load(output_saved_model_dir)
# TODO(laigd): `root_with_trt.run` is still using the original graph without
# trt. Consider changing that.
# self._CheckTrtOps(root_with_trt.run.get_concrete_function())
converted_signature = root_with_trt.signatures[_SAVED_MODEL_SIGNATURE_KEY]
self._CheckTrtOps(converted_signature)
output_with_trt = converted_signature(
inp1=ops.convert_to_tensor(np_input1),
inp2=ops.convert_to_tensor(np_input2))
# The output of running the converted signature is a dict due to
# compatibility reasons with V1 SavedModel signature mechanism.
self.assertAllClose(
expected_output,
list(output_with_trt.values())[0],
atol=1e-6,
rtol=1e-6)
del root_with_trt
gc.collect() # Force GC to destroy the TRT engine cache.
@test_util.run_v2_only
def testTrtGraphConverter_ShapeOp_Int32InputOutput_v2(self):
"""Testing ShapeOp and int32 values as engine input and output."""
np_input = np.random.random_sample([5, 3]).astype(np.float32)
def _InputFunc():
# Passing single input parameter as a tuple with one element
yield (np_input,)
output_saved_model_dir, expected_output = \
self._BuildGraphWithInputGenerator(_InputFunc, np_input)
root_with_trt = load.load(output_saved_model_dir)
converted_signature = root_with_trt.signatures["serving_default"]
# Check that the graph is converted to two TRTEngineOps.
self._CheckTrtOps(converted_signature, num_engines=2)
# Run the graph.
output_with_trt = converted_signature(x=ops.convert_to_tensor(np_input))
# Check the result of the run.
self.assertAllClose(expected_output, list(output_with_trt.values())[0])
@test_util.run_v2_only
def testTrtGraphConverter_Int8Conversion_v2(self):
np_input1, np_input2 = self._RandomInput([4, 1, 1])
# Create a model and save it.
input_saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
root = self._GetModelForV2()
expected_output = root.run(np_input1, np_input2)
save.save(root, input_saved_model_dir,
{_SAVED_MODEL_SIGNATURE_KEY: root.run})
# Run TRT conversion.
converter = self._CreateConverterV2(
input_saved_model_dir,
precision_mode=trt_convert.TrtPrecisionMode.INT8,
maximum_cached_engines=3)
# Convert and perform INT8 calibration
def _CalibrationInputFn():
yield np_input1, np_input2
converter.convert(calibration_input_fn=_CalibrationInputFn)
trt_engine_name = self._GetUniqueTRTEngineOp(
converter._converted_graph_def).name
def _CheckFn(node):
self.assertTrue(len(node.attr["calibration_data"].s), node.name)
# Verify the converted GraphDef.
self._CheckTrtOps(converter._converted_func, _CheckFn) # pylint: disable=protected-access
# Build another engine with different batch size.
def _InputFn():
yield self._RandomInput([5, 1, 1])
converter.build(input_fn=_InputFn)
# Save the converted model.
# TODO(laigd): check that it should contain two engines.
output_saved_model_dir = self.mkdtemp()
converter.save(output_saved_model_dir)
expected_asset_file = \
self._GetAssetFile(output_saved_model_dir, trt_engine_name)
self.assertTrue(os.path.exists(expected_asset_file))
self.assertTrue(os.path.getsize(expected_asset_file))
del converter
gc.collect() # Force GC to destroy the TRT engine cache.
# Load and verify the converted model.
root_with_trt = load.load(output_saved_model_dir)
converted_signature = root_with_trt.signatures[_SAVED_MODEL_SIGNATURE_KEY]
self._CheckTrtOps(converted_signature, _CheckFn)
output_with_trt = converted_signature(
inp1=ops.convert_to_tensor(np_input1),
inp2=ops.convert_to_tensor(np_input2))
self.assertEqual(1, len(output_with_trt))
# The output of running the converted signature is a dict due to
# compatibility reasons with V1 SavedModel signature mechanism.
self.assertAllClose(
expected_output,
list(output_with_trt.values())[0],
atol=1e-6,
rtol=1e-6)
# Run with an input of different batch size. It should build a new engine
# using calibration table.
# TODO(laigd): check that it should contain three engines.
np_input1, np_input2 = self._RandomInput([6, 1, 1])
converted_signature(
inp1=ops.convert_to_tensor(np_input1),
inp2=ops.convert_to_tensor(np_input2))
del root_with_trt
gc.collect() # Force GC to destroy the TRT engine cache.
@test_util.run_v2_only
def testTrtGraphConverter_RemoveNativeSegments(self):
"""Test case for trt_convert._remove_native_segment()."""
np_input = np.random.random_sample([5, 3]).astype(np.float32)
# Create a model and save it.
input_saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
root = self._GetShapeOpModel()
expected_output = root.run(np_input)
save.save(root, input_saved_model_dir, signatures=root.run)
# Run TRT conversion.
converter = trt_convert.TrtGraphConverterV2(
input_saved_model_dir,
precision_mode=trt_convert.TrtPrecisionMode.FP32,
allow_build_at_runtime=False,
minimum_segment_size=1,
)
def _input_fn():
yield (np_input,)
graph_func = converter.convert()
converter.build(_input_fn)
# Load and verify the reduced converted model.
output_saved_model_dir2 = self.mkdtemp()
with test_utils.experimental_feature_scope("remove_native_segments"):
converter.save(output_saved_model_dir2)
saved_model_loaded = load.load(output_saved_model_dir2)
graph_func_after = saved_model_loaded.signatures["serving_default"]
actual_output = graph_func_after(x=np_input)["output_0"]
self.assertAllClose(expected_output, actual_output, atol=1e-6, rtol=1e-6)
del graph_func
del root
gc.collect() # Force GC to destroy the TRT engine cache.
@test_util.run_v2_only
def testTrtGraphConverter_DestroyEngineCache(self):
"""Test case for trt_convert.TrtGraphConverter()."""
np_input1, np_input2 = self._RandomInput([4, 1, 1])
# Create a model and save it.
input_saved_model_dir = self.mkdtemp()
root = self._GetModelForV2()
save.save(root, input_saved_model_dir,
{_SAVED_MODEL_SIGNATURE_KEY: root.run})
# Run TRT conversion.
converter = self._CreateConverterV2(input_saved_model_dir)
converter.convert()
trt_engine_name = self._GetUniqueTRTEngineOp(
converter._converted_graph_def).name
def _InputFn():
yield np_input1, np_input2
converter.build(input_fn=_InputFn) # Populate the TRT engine cache.
output_saved_model_dir = self.mkdtemp()
converter.save(output_saved_model_dir)
def _DestroyCache():
with ops.device("GPU:0"):
handle = gen_trt_ops.create_trt_resource_handle(
resource_name=trt_engine_name)
gen_resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=False)
with self.assertRaisesRegex(errors.NotFoundError,
r"Resource .* does not exist."):
_DestroyCache()
# Load the converted model and make sure the engine cache is populated by
# default.
root = load.load(output_saved_model_dir)
_DestroyCache()
with self.assertRaisesRegex(errors.NotFoundError,
r"Resource .* does not exist."):
_DestroyCache()
# Load the converted model again and make sure the engine cache is destroyed
# when the model goes out of scope.
root = load.load(output_saved_model_dir)
del root
gc.collect() # Force GC to destroy the TRT engine cache.
with self.assertRaisesRegex(errors.NotFoundError,
r"Resource .* does not exist."):
_DestroyCache()
def _CompareSavedModel(self, model_class):
signature_key = "serving_default"
def _GetModelPaths(model_class):
input_saved_model_dir = self.mkdtemp()
root = model_class()
save.save(root, input_saved_model_dir)
converter = self._CreateConverterV2(
input_saved_model_dir, input_saved_model_signature_key=signature_key)
converter.convert()
output_saved_model_dir = self.mkdtemp()
converter.save(output_saved_model_dir)
return input_saved_model_dir, output_saved_model_dir
def _GetSignatureDef(export_dir):
saved_model_proto = loader_impl.parse_saved_model(export_dir)
self.assertEqual(1, len(saved_model_proto.meta_graphs))
meta_graph = saved_model_proto.meta_graphs[0]
self.assertIn(signature_key, meta_graph.signature_def)
return meta_graph.signature_def[signature_key]
def _CompareSignatureDef(original_def, converted_def, is_input):
endpoints = original_def.inputs if is_input else original_def.outputs
converted_endpoints = (
converted_def.inputs if is_input else converted_def.outputs)
self.assertEqual(set(endpoints.keys()), set(converted_endpoints.keys()))
for key in endpoints:
original_input = endpoints[key]
converted_input = converted_endpoints[key]
self.assertEqual(original_input.name, converted_input.name)
self.assertEqual(original_input.dtype, converted_input.dtype)
self.assertEqual(
tensor_shape.TensorShape(original_input.tensor_shape).as_list(),
tensor_shape.TensorShape(converted_input.tensor_shape).as_list())
def _GetStructuredOutputs(export_dir):
root = load.load(export_dir)
return root.signatures[signature_key].structured_outputs
saved_model_path, converted_saved_model_path = _GetModelPaths(model_class)
original_def = _GetSignatureDef(saved_model_path)
converted_def = _GetSignatureDef(converted_saved_model_path)
self.assertEqual(original_def.method_name, converted_def.method_name)
_CompareSignatureDef(original_def, converted_def, True)
_CompareSignatureDef(original_def, converted_def, False)
self.assertEqual(
_GetStructuredOutputs(saved_model_path),
_GetStructuredOutputs(converted_saved_model_path))
@test_util.run_v2_only
def testRetainSignatureInfo_NoInputs(self):
class _Model(autotrackable.AutoTrackable):
@def_function.function(input_signature=[])
def run(self):
return array_ops.constant(1.0)
self._CompareSavedModel(_Model)
@test_util.run_v2_only
def testRetainSignatureInfo_OneInput(self):
class _Model(autotrackable.AutoTrackable):
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[None, 1], dtype=dtypes.float32)
])
def run(self, inp):
return inp + inp * inp
self._CompareSavedModel(_Model)
@test_util.run_v2_only
def testRetainSignatureInfo_TwoInputs(self):
class _Model(autotrackable.AutoTrackable):
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[None, 1], dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=[None, 2], dtype=dtypes.float32)
])
def run(self, inp1, inp2):
return inp1 + inp2 * inp2
self._CompareSavedModel(_Model)
@test_util.run_v2_only
def testRetainSignatureInfo_OneOutputSignatureKey(self):
class _Model(autotrackable.AutoTrackable):
@def_function.function(input_signature=[])
def run(self):
return {"my_output": array_ops.constant(1.0)}
self._CompareSavedModel(_Model)
@test_util.run_v2_only
def testRetainSignatureInfo_TwoOutputSignatureKeys(self):
class _Model(autotrackable.AutoTrackable):
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[None, 1], dtype=dtypes.float32)
])
def run(self, inp):
# Here the keys are not ordered lexicographically on purpose.
return {
"output_b": array_ops.constant(1.0),
"output_a": inp + inp * inp
}
self._CompareSavedModel(_Model)
def _TestRun(self, sess, batch_size):
result = sess.run(
"output:0",
feed_dict={
"input1:0": [[[1.0]]] * batch_size,
"input2:0": [[[1.0]]] * batch_size
})
self.assertAllEqual([[[5.0]]] * batch_size, result)
@parameterized.named_parameters([
("LargeSegmentSize", 7),
("NoMainGraphConversionSegmentSize", -1),
])
@test_util.deprecated_graph_mode_only
def testTrtGraphConverter_MinimumSegmentSize(self, minimum_segment_size):
output_graph_def = self._ConvertGraphV1(
minimum_segment_size=minimum_segment_size)
node_name_to_op = {node.name: node.op for node in output_graph_def.node}
self.assertEqual(
{
"v1": "Const",
"input1": "Placeholder",
"input2": "Placeholder",
"add": "AddV2",
"mul": "Mul",
"add_1": "AddV2",
"add_2": "AddV2",
"output": "Identity"
}, node_name_to_op)
@test_util.deprecated_graph_mode_only
def testTrtGraphConverter_DynamicOp(self):
output_saved_model_dir = self.mkdtemp()
output_graph_def = self._ConvertGraphV1(
output_saved_model_dir=output_saved_model_dir,
is_dynamic_op=True,
maximum_cached_engines=2)
# Test the output GraphDef.
with ops.Graph().as_default():
importer.import_graph_def(output_graph_def, name="")
with self.session(config=self._GetConfigProto()) as sess:
# Run with batch size 1, a new engine is created and cached.
self._TestRun(sess, 1)
# Run with batch size 2, a new engine is created and cached.
self._TestRun(sess, 2)
# Run with batch size 3, since the number of cached engines has reached
# the max, it should evict an old engine and create a new one.
self._TestRun(sess, 3)
# Test the output SavedModel
with ops.Graph().as_default():
with self.session(config=self._GetConfigProto()) as sess:
loader.load(sess, [tag_constants.SERVING], output_saved_model_dir)
# Run with batch size 1, a new engine is created and cached.
self._TestRun(sess, 1)
# Run with batch size 2, a new engine is created and cached.
self._TestRun(sess, 2)
# Run with batch size 3, since the number of cached engines has reached
# the max, it should evict an old engine and create a new one.
self._TestRun(sess, 3)
@test_util.deprecated_graph_mode_only
def testTrtGraphConverter_StaticOp(self):
output_saved_model_dir = self.mkdtemp()
output_graph_def = self._ConvertGraphV1(
output_saved_model_dir=output_saved_model_dir, maximum_cached_engines=1)
# Test the output GraphDef.
with ops.Graph().as_default():
importer.import_graph_def(output_graph_def, name="")
with self.session(config=self._GetConfigProto()) as sess:
# Run with batch size 1, the default engine embedded in the graphdef
# will be used.
self._TestRun(sess, 1)
# Run with batch size 2, which exceed the max_batch_size, it should try
# to fall back to TF function.
self._TestRun(sess, 2)
# Test the output SavedModel
with ops.Graph().as_default():
with self.session(config=self._GetConfigProto()) as sess:
loader.load(sess, [tag_constants.SERVING], output_saved_model_dir)
# Run with batch size 1, the default engine embedded in the graphdef
# will be used.
self._TestRun(sess, 1)
# Run with batch size 2, which exceed the max_batch_size, it should try
# to fall back to TF function.
self._TestRun(sess, 2)
@test_util.run_v2_only
def testTrtGraphConverter_AllowEngineNativeSegmentExecution(self):
np_input1, np_input2 = self._RandomInput([4, 1, 1])
# Create a model and save it.
input_saved_model_dir = self.mkdtemp()
root = self._GetModelForV2()
save.save(root, input_saved_model_dir,
{_SAVED_MODEL_SIGNATURE_KEY: root.run})
def _InputFn():
yield np_input1, np_input2
# Run TRT conversion
converter = self._CreateConverterV2(
input_saved_model_dir, max_workspace_size_bytes=1 << 20)
converter.convert()
os.environ["TF_TRT_ALLOW_ENGINE_NATIVE_SEGMENT_EXECUTION"] = "False"
os.environ["TF_TRT_ABORT_CUDA_ENGINE_BUILD"] = "True"
with self.assertRaisesRegex(
errors.AbortedError,
r"User disallowed engine native segment execution"):
try:
converter.build(input_fn=_InputFn)
finally:
# Always reset the environment variable.
os.environ["TF_TRT_ALLOW_ENGINE_NATIVE_SEGMENT_EXECUTION"] = "True"
os.environ["TF_TRT_ABORT_CUDA_ENGINE_BUILD"] = "False"
converter.build(input_fn=_InputFn)
@parameterized.parameters((True, True), (True, False), (False, True),
(False, False))
@test_util.run_v2_only
def testTrtGraphConverter_AllowBuildAtRuntime(self, build_offline,
allow_build_at_runtime):
if not is_tensorrt_enabled():
return
# Create a model and save it.
input_saved_model_dir = self.mkdtemp()
root = self._GetModelForV2()
save.save(root, input_saved_model_dir,
{_SAVED_MODEL_SIGNATURE_KEY: root.run})
np_input1 = ops.convert_to_tensor(np.ones([4, 1, 1]).astype(np.float32))
np_input2 = ops.convert_to_tensor(np.ones([4, 1, 1]).astype(np.float32))
def _InputFn():
yield np_input1, np_input2
# Run TRT conversion and request an unreasonably large workspace.
converter = self._CreateConverterV2(
input_saved_model_dir, allow_build_at_runtime=allow_build_at_runtime)
converter.convert()
if build_offline:
converter.build(input_fn=_InputFn)
# Output saved model dir.
output_saved_model_dir = self.mkdtemp()
converter.save(output_saved_model_dir)
saved_model_loaded = load.load(
output_saved_model_dir, tags=[tag_constants.SERVING])
graph_func = saved_model_loaded.signatures[_SAVED_MODEL_SIGNATURE_KEY]
# Checks the TrtEngineOp(s) have the correct attribute(s).
def _CheckFn(node):
self.assertEqual(node.attr["_allow_build_at_runtime"].b,
allow_build_at_runtime)
self._CheckTrtOps(graph_func, _CheckFn)
# If the engine was not build offline and the user set not to build at
# runtime and not to run native segments. Then, it will report an error.
if not build_offline and not allow_build_at_runtime:
with self.assertRaisesRegex(
errors.AbortedError,
r"User disallowed engine native segment execution"):
try:
os.environ["TF_TRT_ALLOW_ENGINE_NATIVE_SEGMENT_EXECUTION"] = "False"
graph_func(inp1=np_input1, inp2=np_input2)
finally:
os.environ["TF_TRT_ALLOW_ENGINE_NATIVE_SEGMENT_EXECUTION"] = "True"
else:
output = graph_func(inp1=np_input1, inp2=np_input2)["output_0"]
self.assertEqual(output.shape, (4, 1, 1))
self.assertAllClose(
np.asarray([5.0, 5.0, 5.0, 5.0]).reshape([4, 1, 1]), output)
@test_util.run_v2_only
def testBackwardCompatibility(self):
"""Load and execute a model that was saved in TF2.0."""
model_dir = test.test_src_dir_path(
"python/compiler/tensorrt/test/testdata/tftrt_2.0_saved_model")
saved_model_loaded = load.load(model_dir, tags=[tag_constants.SERVING])
graph_func = saved_model_loaded.signatures[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
np_input1 = ops.convert_to_tensor(np.ones([4, 1, 1]).astype(np.float32))
np_input2 = ops.convert_to_tensor(np.ones([4, 1, 1]).astype(np.float32))
output = graph_func(input1=np_input1, input2=np_input2)["output_0"]
self.assertEqual(output.shape, (4, 1, 1))
self.assertAllClose(
np.asarray([5.0, 5.0, 5.0, 5.0]).reshape([4, 1, 1]), output)
@parameterized.named_parameters([
("SaveGPUSpecificEngine", True),
("WithoutSaveGPUSpecificEngine", False),
])
@test_util.run_v2_only
def testTrtGraphConverter_SaveGPUSpecificEngine(self, save_engine_flag):
"""Test case for trt_convert.TrtGraphConverter()."""
np_input1, np_input2 = self._RandomInput([4, 1, 1])
# Create a model and save it.
input_saved_model_dir = self.mkdtemp()
root = self._GetModelForV2()
save.save(root, input_saved_model_dir,
{_SAVED_MODEL_SIGNATURE_KEY: root.run})
# Run TRT conversion.
converter = self._CreateConverterV2(
input_saved_model_dir, precision_mode=trt_convert.TrtPrecisionMode.INT8)
# Run the converted function to populate the engine cache.
def CalibrationFn():
yield np_input1, np_input2
converter.convert(calibration_input_fn=CalibrationFn)
# Verify the converted GraphDef and ConcreteFunction.
self._CheckTrtOps(converter._converted_func)
trt_engine_name = self._GetUniqueTRTEngineOp(
converter._converted_graph_def).name
# Save the converted model with or without any TRT engine cache
# based on the value of save_engine_flag.
output_saved_model_dir = self.mkdtemp()
converter.save(
output_saved_model_dir, save_gpu_specific_engines=save_engine_flag)
expected_asset_file = \
self._GetAssetFile(output_saved_model_dir, trt_engine_name)
self.assertTrue(os.path.exists(expected_asset_file))
if save_engine_flag:
# engine is saved so we expect engine data
self.assertTrue(os.path.getsize(expected_asset_file))
else:
# engine is not saved so files should be empty
self.assertFalse(os.path.getsize(expected_asset_file))
del converter
gc.collect() # Force GC to destroy the TRT engine cache.
@test_util.run_v2_only
def testTrtGraphConverterV2_SaveWithOptions(self):
"""Test to make sure that save method respects options kwarg."""
# Create a model and save it.
input_saved_model_dir = self.mkdtemp()
root = self._GetModelForV2()
save.save(root, input_saved_model_dir,
{_SAVED_MODEL_SIGNATURE_KEY: root.run})
# Run TRT conversion.
converter = self._CreateConverterV2(input_saved_model_dir)
converter.convert()
# Patch save function with mock.
with mock.patch.object(trt_convert, "save") as mock_save:
mock_save.save = mock.MagicMock()
# Save converted model with options.
output_saved_model_dir = self.mkdtemp()
options = save_options.SaveOptions(save_debug_info=True)
converter.save(output_saved_model_dir, options=options)
# Assert that the saved_model.save function was called with the given
# save_options by TrtGraphConverterV2.save method.
mock_save.save.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, options=options)
@parameterized.named_parameters([
("NoDeviceAssignment", None),
("GPU1", "GPU:1"),
])
@test_util.run_v2_only
def testTrtGraphConverter_DevicePlacement(self, device_id):
"""Test case for trt_convert.TrtGraphConverter()."""
gpus = config.list_physical_devices("GPU")
if len(gpus) < 2:
self.skipTest("Expected at least 2 GPUs but found {} GPUs".format(
len(gpus)))
np_input1 = ops.convert_to_tensor(np.ones([4, 1, 1]).astype(np.float32))
np_input2 = ops.convert_to_tensor(np.ones([4, 1, 1]).astype(np.float32))
# Create a model and save it.
input_saved_model_dir = self.mkdtemp()
root = self._GetModelForV2()
save.save(root, input_saved_model_dir,
{_SAVED_MODEL_SIGNATURE_KEY: root.run})
converter = self._CreateConverterV2(
input_saved_model_dir, precision_mode=trt_convert.TrtPrecisionMode.FP32)
converted_model = None
# Specify device on which converted model should be placed
with ops.device(device_id):
converted_model = converter.convert()
# Verify that TRT engine op has the correct device.
self._CheckTrtOps(converter._converted_func)
actual_device_id = self._GetUniqueTRTEngineOp(
converter._converted_graph_def).device
expected_device_id = None
if device_id is not None:
expected_device_id = device_id
else:
expected_device_id = "GPU:0"
self.assertTrue(expected_device_id.lower() in actual_device_id.lower())
del converter
gc.collect() # Force GC to destroy the TRT engine cache.
@test_util.run_v2_only
def testTrtGraphConverter_DevicePlacementOnCPU(self):
"""Test case for trt_convert.TrtGraphConverter()."""
np_input1 = ops.convert_to_tensor(np.ones([4, 1, 1]).astype(np.float32))
np_input2 = ops.convert_to_tensor(np.ones([4, 1, 1]).astype(np.float32))
# Create a model and save it.
input_saved_model_dir = self.mkdtemp()
root = self._GetModelForV2()
save.save(root, input_saved_model_dir,
{_SAVED_MODEL_SIGNATURE_KEY: root.run})
# Run TRT conversion.
converter = self._CreateConverterV2(
input_saved_model_dir, precision_mode=trt_convert.TrtPrecisionMode.FP32)
converted_model = None
# Specify device on which converted model should be placed
with self.assertRaisesRegex(ValueError, r"Specified device is not a GPU"):
with ops.device("CPU"):
converted_model = converter.convert()
del converter
gc.collect() # Force GC to destroy the TRT engine cache.
def _TestVariableHelper(self, variable_op, tf_model_name, tftrt_model_name,
output_name):
"""Helper with the common code of variable converter tests."""
model_dir = test.test_src_dir_path(
"python/compiler/tensorrt/test/testdata/" + tf_model_name)
trt_model_dir = os.path.join(self.mkdtemp(), tftrt_model_name)
# Load and convert the TF model.
conv_params = trt_convert.TrtConversionParams(
precision_mode="FP16",
minimum_segment_size=3,
max_workspace_size_bytes=10 << 20,
maximum_cached_engines=1)
with test_utils.experimental_feature_scope("disable_graph_freezing"):
converter = trt_convert.TrtGraphConverterV2(
input_saved_model_dir=model_dir,
conversion_params=conv_params,
use_dynamic_shape=True,
dynamic_shape_profile_strategy="Optimal")
converter.convert()
# Build and save the converted model.
input_shapes = [[(4, 1, 1), (4, 1, 1)]]
def _InputFn():
for shapes in input_shapes:
# return a list of input tensors
yield [np.ones(shape=shape).astype(np.float32) for shape in shapes]
converter.build(_InputFn)
converter.save(trt_model_dir)
# Load the converted model.
saved_model_loaded = load.load(trt_model_dir, tags=[tag_constants.SERVING])
graph_func = saved_model_loaded.signatures[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
# Check that there is one segment and that the 2 variables are in it.
graph_def = graph_func.graph.as_graph_def()
engines = []
for lib_function in graph_def.library.function:
if re.search(r"TRTEngineOp_\d+_\d+_native_segment",
lib_function.signature.name):
node_ops = [node.op for node in lib_function.node_def]
engines.append(node_ops)
self.assertLen(engines, 1)
self.assertEqual(engines[0].count(variable_op), 2)
# Run the function and check the output.
np_input1 = ops.convert_to_tensor(np.ones([4, 1, 1]).astype(np.float32))
np_input2 = ops.convert_to_tensor(2. *
np.ones([4, 1, 1]).astype(np.float32))
output = graph_func(input1=np_input1, input2=np_input2)[output_name]
self.assertEqual(output.shape, (4, 1, 1))
self.assertAllClose(
np.asarray([42., 42., 42., 42.]).reshape([4, 1, 1]), output)
@test_util.run_v2_only
def testVariableV2(self):
"""Test conversion of VariableV2 nodes."""
self._TestVariableHelper("VariableV2", "tf_variablev2_saved_model",
"tftrt_variablev2_saved_model", "output")
@test_util.run_v2_only
def testReadVariableOp(self):
"""Test conversion of ReadVariableOp nodes."""
self._TestVariableHelper("ReadVariableOp", "tf_readvariableop_saved_model",
"tftrt_readvariableop_saved_model", "output_0")
if __name__ == "__main__" and is_tensorrt_enabled():
test.main()
| TrtConvertTest |
python | Pylons__pyramid | src/pyramid/response.py | {
"start": 307,
"end": 2197
} | class ____(Response):
"""
A Response object that can be used to serve a static file from disk
simply.
``path`` is a file path on disk.
``request`` must be a Pyramid :term:`request` object. Note
that a request *must* be passed if the response is meant to attempt to
use the ``wsgi.file_wrapper`` feature of the web server that you're using
to serve your Pyramid application.
``cache_max_age`` is the number of seconds that should be used
to HTTP cache this response.
``content_type`` is the content_type of the response.
``content_encoding`` is the content_encoding of the response.
It's generally safe to leave this set to ``None`` if you're serving a
binary file. This argument will be ignored if you also leave
``content-type`` as ``None``.
"""
def __init__(
self,
path,
request=None,
cache_max_age=None,
content_type=None,
content_encoding=None,
):
if content_type is None:
content_type, content_encoding = _guess_type(path)
super().__init__(
conditional_response=True,
content_type=content_type,
content_encoding=content_encoding,
)
self.last_modified = getmtime(path)
content_length = getsize(path)
f = open(path, 'rb')
app_iter = None
if request is not None:
environ = request.environ
if 'wsgi.file_wrapper' in environ:
app_iter = environ['wsgi.file_wrapper'](f, _BLOCK_SIZE)
if app_iter is None:
app_iter = FileIter(f, _BLOCK_SIZE)
self.app_iter = app_iter
# assignment of content_length must come after assignment of app_iter
self.content_length = content_length
if cache_max_age is not None:
self.cache_expires = cache_max_age
| FileResponse |
python | nedbat__coveragepy | tests/test_files.py | {
"start": 13356,
"end": 13499
} | class ____(Protocol):
"""The shape all Matchers have."""
def match(self, s: str) -> bool:
"""Does this string match?"""
| TMatcher |
python | encode__django-rest-framework | tests/test_generics.py | {
"start": 19117,
"end": 19546
} | class ____(TestCase):
def test_guarded_queryset(self):
class QuerysetAccessError(generics.ListAPIView):
queryset = BasicModel.objects.all()
def get(self, request):
return Response(list(self.queryset))
view = QuerysetAccessError.as_view()
request = factory.get('/')
with pytest.raises(RuntimeError):
view(request).render()
| TestGuardedQueryset |
python | huggingface__transformers | tests/utils/test_core_model_loading.py | {
"start": 6452,
"end": 6678
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self.q_proj = DummyParamModule((1, 2))
self.k_proj = DummyParamModule((1, 2))
self.v_proj = DummyParamModule((1, 2))
| DummySelfAttn |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 282254,
"end": 282750
} | class ____(sgqlc.types.Input):
"""Ordering options for repository migrations."""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(RepositoryMigrationOrderField), graphql_name="field")
"""The field to order repository migrations by."""
direction = sgqlc.types.Field(sgqlc.types.non_null(RepositoryMigrationOrderDirection), graphql_name="direction")
"""The ordering direction."""
| RepositoryMigrationOrder |
python | openai__openai-python | src/openai/types/evals/create_eval_completions_run_data_source.py | {
"start": 4453,
"end": 4783
} | class ____(BaseModel):
template: List[InputMessagesTemplateTemplate]
"""A list of chat messages forming the prompt or context.
May include variable references to the `item` namespace, ie {{item.name}}.
"""
type: Literal["template"]
"""The type of input messages. Always `template`."""
| InputMessagesTemplate |
python | huggingface__transformers | src/transformers/models/swiftformer/modeling_swiftformer.py | {
"start": 1188,
"end": 2760
} | class ____(nn.Module):
"""
Patch Embedding Layer constructed of two 2D convolutional layers.
Input: tensor of shape `[batch_size, in_channels, height, width]`
Output: tensor of shape `[batch_size, out_channels, height/4, width/4]`
"""
def __init__(self, config: SwiftFormerConfig):
super().__init__()
in_chs = config.num_channels
out_chs = config.embed_dims[0]
self.patch_embedding = nn.Sequential(
nn.Conv2d(in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(out_chs // 2, eps=config.batch_norm_eps),
nn.ReLU(),
nn.Conv2d(out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(out_chs, eps=config.batch_norm_eps),
nn.ReLU(),
)
def forward(self, x):
return self.patch_embedding(x)
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
| SwiftFormerPatchEmbedding |
python | encode__django-rest-framework | tests/schemas/test_openapi.py | {
"start": 1111,
"end": 1656
} | class ____(TestCase):
def dummy_view(request):
pass
def test_filters(self):
classes = [filters.SearchFilter, filters.OrderingFilter]
for c in classes:
f = c()
assert f.get_schema_operation_parameters(self.dummy_view)
def test_pagination(self):
classes = [pagination.PageNumberPagination, pagination.LimitOffsetPagination, pagination.CursorPagination]
for c in classes:
f = c()
assert f.get_schema_operation_parameters(self.dummy_view)
| TestBasics |
python | crytic__slither | slither/solc_parsing/declarations/caller_context.py | {
"start": 228,
"end": 913
} | class ____(metaclass=abc.ABCMeta):
"""
This class is inherited by all the declarations class that can be used in the expression/type parsing
As a source of context/scope
It is used by any declaration class that can be top-level and require complex parsing
"""
@property
@abc.abstractmethod
def is_compact_ast(self) -> bool:
pass
@property
@abc.abstractmethod
def compilation_unit(self) -> "SlitherCompilationUnit":
pass
@abc.abstractmethod
def get_key(self) -> str:
pass
@property
@abc.abstractmethod
def slither_parser(self) -> "SlitherCompilationUnitSolc":
pass
| CallerContextExpression |
python | mamba-org__mamba | micromamba/tests/test_config.py | {
"start": 2875,
"end": 3153
} | class ____:
def test_config_empty(self, tmp_home):
assert "Configuration of micromamba" in config()
@pytest.mark.parametrize("quiet_flag", ["-q", "--quiet"])
def test_config_quiet(self, quiet_flag, tmp_home):
assert config(quiet_flag) == ""
| TestConfig |
python | spyder-ide__spyder | spyder/plugins/plots/widgets/figurebrowser.py | {
"start": 11111,
"end": 23154
} | class ____(QScrollArea, SpyderWidgetMixin):
"""
A scrollarea that displays a single FigureCanvas with zooming and panning
capability with CTRL + Mouse_wheel and Left-press mouse button event.
"""
sig_zoom_changed = Signal(int)
"""
This signal is emitted when zoom has changed.
Parameters
----------
zoom_value: int
The new value for the zoom property.
"""
sig_context_menu_requested = Signal(QPoint)
"""
This signal is emitted to request a context menu.
Parameters
----------
point: QPoint
The QPoint in global coordinates where the menu was requested.
"""
sig_figure_loaded = Signal()
"""This signal is emitted when a new figure is loaded."""
def __init__(self, parent=None, background_color=None):
if not PYSIDE2:
super().__init__(parent, class_parent=parent)
else:
QScrollArea.__init__(self, parent)
SpyderWidgetMixin.__init__(self, class_parent=parent)
self.setAlignment(Qt.AlignCenter)
self.viewport().setObjectName("figviewport")
self.viewport().setStyleSheet(
"#figviewport {background-color:" + str(background_color) + "}")
self.setFrameStyle(0)
self.background_color = background_color
self.current_thumbnail = None
self.scalefactor = 0
self._scalestep = 1.2
self._sfmax = 10
self._sfmin = -10
self.setup_figcanvas()
self.auto_fit_plotting = False
# An internal flag that tracks when the figure is being panned.
self._ispanning = False
# To save scrollbar values in the current thumbnail
self.verticalScrollBar().valueChanged.connect(
self._set_vscrollbar_value
)
self.horizontalScrollBar().valueChanged.connect(
self._set_hscrollbar_value
)
@property
def auto_fit_plotting(self):
"""
Return whether to automatically fit the plot to the scroll area size.
"""
return self._auto_fit_plotting
@auto_fit_plotting.setter
def auto_fit_plotting(self, value):
"""
Set whether to automatically fit the plot to the scroll area size.
"""
self._auto_fit_plotting = value
if self.current_thumbnail is not None:
self.current_thumbnail.auto_fit = value
if value:
self.scale_image()
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
else:
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
@property
def scalefactor(self):
"""Return the current scale factor."""
return self._scalefactor
@scalefactor.setter
def scalefactor(self, value):
"""Set the scale factor value."""
self._scalefactor = value
if self.current_thumbnail is not None:
self.current_thumbnail.scalefactor = value
def setup_figcanvas(self):
"""Setup the FigureCanvas."""
self.figcanvas = FigureCanvas(parent=self,
background_color=self.background_color)
self.figcanvas.installEventFilter(self)
self.figcanvas.customContextMenuRequested.connect(
self.show_context_menu)
self.setWidget(self.figcanvas)
def show_context_menu(self, qpoint):
"""Only emit context menu signal if there is a figure."""
if self.figcanvas and self.figcanvas.fig is not None:
# Convert to global
point = self.figcanvas.mapToGlobal(qpoint)
self.sig_context_menu_requested.emit(point)
def set_current_thumbnail(self, thumbnail):
"""Set the current thumbnail displayed in the viewer."""
self.current_thumbnail = thumbnail
def load_figure(self, fig, fmt):
"""Set a new figure in the figure canvas."""
self.auto_fit_plotting = self.current_thumbnail.auto_fit
# Let scale_image compute the scale factor for the thumbnail if it
# hasn't one yet.
if self.current_thumbnail.scalefactor is not None:
self.scalefactor = self.current_thumbnail.scalefactor
self.figcanvas.load_figure(fig, fmt)
self.sig_figure_loaded.emit()
self.scale_image()
self.figcanvas.repaint()
# Save the computed scale factor by scale_image in the thumbnail
if self.current_thumbnail.scalefactor is None:
self.current_thumbnail.scalefactor = self.scalefactor
# Restore scrollbar values.
# We need to use timers for this because trying to set those values
# immediately after the figure is loaded doesn't work.
QTimer.singleShot(
20,
self.update_scrollbars_values,
)
def update_scrollbars_values(self):
self.verticalScrollBar().setValue(
self.current_thumbnail.vscrollbar_value
)
self.horizontalScrollBar().setValue(
self.current_thumbnail.hscrollbar_value
)
def eventFilter(self, widget, event):
"""A filter to control the zooming and panning of the figure canvas."""
# ---- Zooming
if event.type() == QEvent.Wheel and not self.auto_fit_plotting:
modifiers = QApplication.keyboardModifiers()
if (
modifiers == Qt.ControlModifier
and not self.get_conf('disable_zoom_mouse', section='main')
):
if event.angleDelta().y() > 0:
self.zoom_in()
else:
self.zoom_out()
return True
else:
return False
# ---- Scaling
elif event.type() == QEvent.Paint and self.auto_fit_plotting:
self.scale_image()
# ---- Panning
# Set ClosedHandCursor:
elif event.type() == QEvent.MouseButtonPress:
if (
event.button() == Qt.LeftButton
and not self.auto_fit_plotting
and (
self.verticalScrollBar().isVisible()
or self.horizontalScrollBar().isVisible()
)
):
self.setCursor(Qt.ClosedHandCursor)
self._ispanning = True
self.xclick = event.globalX()
self.yclick = event.globalY()
# Reset Cursor:
elif event.type() == QEvent.MouseButtonRelease:
self.setCursor(Qt.ArrowCursor)
self._ispanning = False
# Move ScrollBar:
elif event.type() == QEvent.MouseMove:
if self._ispanning:
dx = self.xclick - event.globalX()
self.xclick = event.globalX()
dy = self.yclick - event.globalY()
self.yclick = event.globalY()
scrollBarH = self.horizontalScrollBar()
scrollBarH.setValue(scrollBarH.value() + dx)
scrollBarV = self.verticalScrollBar()
scrollBarV.setValue(scrollBarV.value() + dy)
# Show in full size
elif (
event.type() == QEvent.MouseButtonDblClick
and self.scalefactor != 0
):
self.auto_fit_plotting = False
self.zoom_in(to_full_size=True)
# Necessary to correctly set the state of the fit_action button
self.sig_figure_loaded.emit()
return QWidget.eventFilter(self, widget, event)
# ---- Figure Scaling Handlers
def zoom_in(self, to_full_size=False):
"""Scale the image up by one scale step."""
# This is necessary so that the scale factor becomes zero below
if to_full_size:
self.scalefactor = -1
if self.scalefactor <= self._sfmax:
self.scalefactor += 1
self.scale_image()
self._adjust_scrollbar(self._scalestep)
def zoom_out(self):
"""Scale the image down by one scale step."""
if self.scalefactor >= self._sfmin:
self.scalefactor -= 1
self.scale_image()
self._adjust_scrollbar(1 / self._scalestep)
def scale_image(self):
"""Scale the image size."""
fwidth = self.figcanvas.fwidth
fheight = self.figcanvas.fheight
# Don't auto fit plotting
if not self.auto_fit_plotting:
new_width = int(fwidth * self._scalestep ** self.scalefactor)
new_height = int(fheight * self._scalestep ** self.scalefactor)
# Auto fit plotting
# Scale the image to fit the figviewer size while respecting the ratio.
else:
size = self.size()
style = self.style()
width = (size.width() -
style.pixelMetric(QStyle.PM_LayoutLeftMargin) -
style.pixelMetric(QStyle.PM_LayoutRightMargin))
height = (size.height() -
style.pixelMetric(QStyle.PM_LayoutTopMargin) -
style.pixelMetric(QStyle.PM_LayoutBottomMargin))
self.figcanvas.setToolTip('')
try:
if (fwidth / fheight) > (width / height):
new_width = int(width)
new_height = int(width / fwidth * fheight)
else:
new_height = int(height)
new_width = int(height / fheight * fwidth)
except ZeroDivisionError:
icon = self.create_icon('broken_image')
self.figcanvas._qpix_orig = icon.pixmap(fwidth, fheight)
self.figcanvas.setToolTip(
_('The image is broken, please try to generate it again')
)
new_width = fwidth
new_height = fheight
self.auto_fit_plotting = False
if self.figcanvas.size() != QSize(new_width, new_height):
self.figcanvas.setFixedSize(new_width, new_height)
# Adjust the scale factor according to the scaling of the fitted
# image. This is necessary so that zoom in/out increases/decreases
# the image size in factors of of +1/-1 of the one computed below.
if self.auto_fit_plotting:
self.scalefactor = self.get_scale_factor()
self.sig_zoom_changed.emit(self.get_scaling())
def get_scaling(self):
"""Get the current scaling of the figure in percent."""
width = self.figcanvas.width()
fwidth = self.figcanvas.fwidth
if fwidth != 0:
return max(round(width / fwidth * 100), 1)
else:
return 100
def get_scale_factor(self):
"""Get scale factor according to the current scaling."""
return math.log(self.get_scaling() / 100) / math.log(self._scalestep)
def reset_original_image(self):
"""Reset the image to its original size."""
self.scalefactor = 0
self.scale_image()
def _adjust_scrollbar(self, f):
"""
Adjust the scrollbar position to take into account the zooming of
the figure.
"""
# Adjust horizontal scrollbar :
hb = self.horizontalScrollBar()
hb.setValue(int(f * hb.value() + ((f - 1) * hb.pageStep()/2)))
# Adjust the vertical scrollbar :
vb = self.verticalScrollBar()
vb.setValue(int(f * vb.value() + ((f - 1) * vb.pageStep()/2)))
def _set_vscrollbar_value(self, value):
"""Save vertical scrollbar value in current thumbnail."""
if self.current_thumbnail is not None:
self.current_thumbnail.vscrollbar_value = value
def _set_hscrollbar_value(self, value):
"""Save horizontal scrollbar value in current thumbnail."""
if self.current_thumbnail is not None:
self.current_thumbnail.hscrollbar_value = value
| FigureViewer |
python | apache__airflow | providers/discord/src/airflow/providers/discord/notifications/discord.py | {
"start": 1234,
"end": 3815
} | class ____(BaseNotifier):
"""
Discord BaseNotifier.
:param discord_conn_id: Http connection ID with host as "https://discord.com/api/" and
default webhook endpoint in the extra field in the form of
{"webhook_endpoint": "webhooks/{webhook.id}/{webhook.token}"}
:param text: The content of the message
:param username: The username to send the message as. Optional
:param avatar_url: The URL of the avatar to use for the message. Optional
:param tts: Text to speech.
"""
# A property that specifies the attributes that can be templated.
template_fields = ("discord_conn_id", "text", "username", "avatar_url", "tts")
def __init__(
self,
discord_conn_id: str = "discord_webhook_default",
text: str = "This is a default message",
username: str = "Airflow",
avatar_url: str = ICON_URL,
tts: bool = False,
**kwargs,
):
if AIRFLOW_V_3_1_PLUS:
# Support for passing context was added in 3.1.0
super().__init__(**kwargs)
else:
super().__init__()
self.discord_conn_id = discord_conn_id
self.text = text
self.username = username
self.avatar_url = avatar_url
# If you're having problems with tts not being recognized in __init__(),
# you can define that after instantiating the class
self.tts = tts
@cached_property
def hook(self) -> DiscordWebhookHook:
"""Discord Webhook Hook."""
return DiscordWebhookHook(http_conn_id=self.discord_conn_id)
@cached_property
def hook_async(self) -> DiscordWebhookAsyncHook:
"""Discord Webhook Async Hook."""
return DiscordWebhookAsyncHook(
http_conn_id=self.discord_conn_id,
message=self.text,
username=self.username,
avatar_url=self.avatar_url,
tts=self.tts,
)
def notify(self, context):
"""
Send a message to a Discord channel.
:param context: the context object
:return: None
"""
self.hook.username = self.username
self.hook.message = self.text
self.hook.avatar_url = self.avatar_url
self.hook.tts = self.tts
self.hook.execute()
async def async_notify(self, context) -> None:
"""
Send a message to a Discord channel using async HTTP.
:param context: the context object
:return: None
"""
await self.hook_async.execute()
| DiscordNotifier |
python | django__django | django/contrib/postgres/lookups.py | {
"start": 1733,
"end": 1856
} | class ____(PostgresOperatorLookup):
lookup_name = "trigram_word_similar"
postgres_operator = "%%>"
| TrigramWordSimilar |
python | sphinx-doc__sphinx | sphinx/domains/cpp/__init__.py | {
"start": 19373,
"end": 20559
} | class ____(SphinxDirective):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {}
def run(self) -> list[Node]:
if self.arguments[0].strip() in {'NULL', '0', 'nullptr'}:
return []
parser = DefinitionParser(
self.arguments[0], location=self.get_location(), config=self.config
)
try:
ast = parser.parse_namespace_object()
parser.assert_end()
except DefinitionError as e:
logger.warning(e, location=self.get_location())
name = _make_phony_error_name()
ast = ASTNamespace(name, None)
old_parent = self.env.current_document.cpp_parent_symbol
if not old_parent:
old_parent = self.env.domaindata['cpp']['root_symbol']
symbol = old_parent.add_name(ast.nestedName, ast.templatePrefix)
self.env.current_document.cpp_namespace_stack.append(symbol)
self.env.current_document.cpp_parent_symbol = symbol
self.env.ref_context['cpp:parent_key'] = symbol.get_lookup_key()
return []
| CPPNamespacePushObject |
python | cython__cython | Cython/Compiler/Buffer.py | {
"start": 6615,
"end": 26954
} | class ____:
def __init__(self, entry):
self.entry = entry
self.type = entry.type
self.cname = entry.buffer_aux.buflocal_nd_var.cname
self.buf_ptr = "%s.rcbuffer->pybuffer.buf" % self.cname
self.buf_ptr_type = entry.type.buffer_ptr_type
self.init_attributes()
def init_attributes(self):
self.shape = self.get_buf_shapevars()
self.strides = self.get_buf_stridevars()
self.suboffsets = self.get_buf_suboffsetvars()
def get_buf_suboffsetvars(self):
return self._for_all_ndim("%s.diminfo[%d].suboffsets")
def get_buf_stridevars(self):
return self._for_all_ndim("%s.diminfo[%d].strides")
def get_buf_shapevars(self):
return self._for_all_ndim("%s.diminfo[%d].shape")
def _for_all_ndim(self, s):
return [s % (self.cname, i) for i in range(self.type.ndim)]
def generate_buffer_lookup_code(self, code, index_cnames):
# Create buffer lookup and return it
# This is done via utility macros/inline functions, which vary
# according to the access mode used.
params = []
nd = self.type.ndim
mode = self.type.mode
if mode == 'full':
for i, s, o in zip(index_cnames,
self.get_buf_stridevars(),
self.get_buf_suboffsetvars()):
params.append(i)
params.append(s)
params.append(o)
funcname = "__Pyx_BufPtrFull%dd" % nd
funcgen = buf_lookup_full_code
else:
if mode == 'strided':
funcname = "__Pyx_BufPtrStrided%dd" % nd
funcgen = buf_lookup_strided_code
elif mode == 'c':
funcname = "__Pyx_BufPtrCContig%dd" % nd
funcgen = buf_lookup_c_code
elif mode == 'fortran':
funcname = "__Pyx_BufPtrFortranContig%dd" % nd
funcgen = buf_lookup_fortran_code
else:
assert False
for i, s in zip(index_cnames, self.get_buf_stridevars()):
params.append(i)
params.append(s)
# Make sure the utility code is available
if funcname not in code.globalstate.utility_codes:
code.globalstate.utility_codes.add(funcname)
protocode = code.globalstate['utility_code_proto']
defcode = code.globalstate['utility_code_def']
funcgen(protocode, defcode, name=funcname, nd=nd)
buf_ptr_type_code = self.buf_ptr_type.empty_declaration_code()
ptrcode = "%s(%s, %s, %s)" % (funcname, buf_ptr_type_code, self.buf_ptr,
", ".join(params))
return ptrcode
def get_flags(buffer_aux, buffer_type):
flags = 'PyBUF_FORMAT'
mode = buffer_type.mode
if mode == 'full':
flags += '| PyBUF_INDIRECT'
elif mode == 'strided':
flags += '| PyBUF_STRIDES'
elif mode == 'c':
flags += '| PyBUF_C_CONTIGUOUS'
elif mode == 'fortran':
flags += '| PyBUF_F_CONTIGUOUS'
else:
assert False
if buffer_aux.writable_needed: flags += "| PyBUF_WRITABLE"
return flags
def used_buffer_aux_vars(entry):
buffer_aux = entry.buffer_aux
buffer_aux.buflocal_nd_var.used = True
buffer_aux.rcbuf_var.used = True
def put_unpack_buffer_aux_into_scope(buf_entry, code):
# Generate code to copy the needed struct info into local
# variables.
buffer_aux, mode = buf_entry.buffer_aux, buf_entry.type.mode
pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
fldnames = ['strides', 'shape']
if mode == 'full':
fldnames.append('suboffsets')
ln = []
for i in range(buf_entry.type.ndim):
for fldname in fldnames:
ln.append("%s.diminfo[%d].%s = %s.rcbuffer->pybuffer.%s[%d];" % (
pybuffernd_struct, i, fldname,
pybuffernd_struct, fldname, i,
))
code.putln(' '.join(ln))
def put_init_vars(entry, code):
bufaux = entry.buffer_aux
pybuffernd_struct = bufaux.buflocal_nd_var.cname
pybuffer_struct = bufaux.rcbuf_var.cname
# init pybuffer_struct
code.putln("%s.pybuffer.buf = NULL;" % pybuffer_struct)
code.putln("%s.refcount = 0;" % pybuffer_struct)
# init the buffer object
# code.put_init_var_to_py_none(entry)
# init the pybuffernd_struct
code.putln("%s.data = NULL;" % pybuffernd_struct)
code.putln("%s.rcbuffer = &%s;" % (pybuffernd_struct, pybuffer_struct))
def put_acquire_arg_buffer(entry, code, pos):
buffer_aux = entry.buffer_aux
getbuffer = get_getbuffer_call(code, entry.cname, buffer_aux, entry.type)
# Acquire any new buffer
code.putln("{")
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % entry.type.dtype.struct_nesting_depth())
code.putln(code.error_goto_if("%s == -1" % getbuffer, pos))
code.putln("}")
# An exception raised in arg parsing cannot be caught, so no
# need to care about the buffer then.
put_unpack_buffer_aux_into_scope(entry, code)
def put_release_buffer_code(code, entry):
code.globalstate.use_utility_code(acquire_utility_code)
code.putln("__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);" % entry.buffer_aux.buflocal_nd_var.cname)
def get_getbuffer_call(code, obj_cname, buffer_aux, buffer_type):
ndim = buffer_type.ndim
cast = int(buffer_type.cast)
flags = get_flags(buffer_aux, buffer_type)
pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
dtype_typeinfo = get_type_information_cname(code, buffer_type.dtype)
code.globalstate.use_utility_code(acquire_utility_code)
return ("__Pyx_GetBufferAndValidate(&%(pybuffernd_struct)s.rcbuffer->pybuffer, "
"(PyObject*)%(obj_cname)s, &%(dtype_typeinfo)s, %(flags)s, %(ndim)d, "
"%(cast)d, __pyx_stack)" % locals())
def put_assign_to_buffer(lhs_cname, rhs_cname, buf_entry,
is_initialized, pos, code):
"""
Generate code for reassigning a buffer variables. This only deals with getting
the buffer auxiliary structure and variables set up correctly, the assignment
itself and refcounting is the responsibility of the caller.
However, the assignment operation may throw an exception so that the reassignment
never happens.
Depending on the circumstances there are two possible outcomes:
- Old buffer released, new acquired, rhs assigned to lhs
- Old buffer released, new acquired which fails, reaqcuire old lhs buffer
(which may or may not succeed).
"""
buffer_aux, buffer_type = buf_entry.buffer_aux, buf_entry.type
pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
flags = get_flags(buffer_aux, buffer_type)
code.putln("{") # Set up necessary stack for getbuffer
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % buffer_type.dtype.struct_nesting_depth())
getbuffer = get_getbuffer_call(code, "%s", buffer_aux, buffer_type) # fill in object below
if is_initialized:
# Release any existing buffer
code.putln('__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);' % pybuffernd_struct)
# Acquire
retcode_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = %s;" % (retcode_cname, getbuffer % rhs_cname))
code.putln('if (%s) {' % (code.unlikely("%s < 0" % retcode_cname)))
# If acquisition failed, attempt to reacquire the old buffer
# before raising the exception. A failure of reacquisition
# will cause the reacquisition exception to be reported, one
# can consider working around this later.
exc_temps = tuple(code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=False)
for _ in range(3))
code.putln('PyErr_Fetch(&%s, &%s, &%s);' % exc_temps)
code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % lhs_cname)))
code.putln('Py_XDECREF(%s); Py_XDECREF(%s); Py_XDECREF(%s);' % exc_temps) # Do not refnanny these!
code.globalstate.use_utility_code(raise_buffer_fallback_code)
code.putln('__Pyx_RaiseBufferFallbackError();')
code.putln('} else {')
code.putln('PyErr_Restore(%s, %s, %s);' % exc_temps)
code.putln('}')
code.putln('%s = %s = %s = 0;' % exc_temps)
for t in exc_temps:
code.funcstate.release_temp(t)
code.putln('}')
# Unpack indices
put_unpack_buffer_aux_into_scope(buf_entry, code)
code.putln(code.error_goto_if_neg(retcode_cname, pos))
code.funcstate.release_temp(retcode_cname)
else:
# Our entry had no previous value, so set to None when acquisition fails.
# In this case, auxiliary vars should be set up right in initialization to a zero-buffer,
# so it suffices to set the buf field to NULL.
code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % rhs_cname)))
code.putln('%s = %s; __Pyx_INCREF(Py_None); %s.rcbuffer->pybuffer.buf = NULL;' %
(lhs_cname,
PyrexTypes.typecast(buffer_type, PyrexTypes.py_object_type, "Py_None"),
pybuffernd_struct))
code.putln(code.error_goto(pos))
code.put('} else {')
# Unpack indices
put_unpack_buffer_aux_into_scope(buf_entry, code)
code.putln('}')
code.putln("}") # Release stack
def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives,
pos, code, negative_indices, in_nogil_context):
"""
Generates code to process indices and calculate an offset into
a buffer. Returns a C string which gives a pointer which can be
read from or written to at will (it is an expression so caller should
store it in a temporary if it is used more than once).
As the bounds checking can have any number of combinations of unsigned
arguments, smart optimizations etc. we insert it directly in the function
body. The lookup however is delegated to a inline function that is instantiated
once per ndim (lookup with suboffsets tend to get quite complicated).
entry is a BufferEntry
"""
negative_indices = directives['wraparound'] and negative_indices
if directives['boundscheck']:
# Check bounds and fix negative indices.
# We allocate a temporary which is initialized to -1, meaning OK (!).
# If an error occurs, the temp is set to the index dimension the
# error is occurring at.
failed_dim_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = -1;" % failed_dim_temp)
for dim, (signed, cname, shape) in enumerate(zip(index_signeds, index_cnames, entry.get_buf_shapevars())):
if signed != 0:
# not unsigned, deal with negative index
code.putln("if (%s < 0) {" % cname)
if negative_indices:
code.putln("%s += %s;" % (cname, shape))
code.putln("if (%s) %s = %d;" % (
code.unlikely("%s < 0" % cname),
failed_dim_temp, dim))
else:
code.putln("%s = %d;" % (failed_dim_temp, dim))
code.put("} else ")
# check bounds in positive direction
if signed != 0:
cast = ""
else:
cast = "(size_t)"
code.putln("if (%s) %s = %d;" % (
code.unlikely("%s >= %s%s" % (cname, cast, shape)),
failed_dim_temp, dim))
if in_nogil_context:
code.globalstate.use_utility_code(raise_indexerror_nogil)
func = '__Pyx_RaiseBufferIndexErrorNogil'
else:
code.globalstate.use_utility_code(raise_indexerror_code)
func = '__Pyx_RaiseBufferIndexError'
code.putln("if (%s) {" % code.unlikely("%s != -1" % failed_dim_temp))
code.putln('%s(%s);' % (func, failed_dim_temp))
code.putln(code.error_goto(pos))
code.putln('}')
code.funcstate.release_temp(failed_dim_temp)
elif negative_indices:
# Only fix negative indices.
for signed, cname, shape in zip(index_signeds, index_cnames, entry.get_buf_shapevars()):
if signed != 0:
code.putln("if (%s < 0) %s += %s;" % (cname, cname, shape))
return entry.generate_buffer_lookup_code(code, index_cnames)
def use_bufstruct_declare_code(env):
env.use_utility_code(buffer_struct_declare_code)
def buf_lookup_full_code(proto, defin, name, nd):
"""
Generates a buffer lookup function for the right number
of dimensions. The function gives back a void* at the right location.
"""
# _i_ndex, _s_tride, sub_o_ffset
macroargs = ", ".join(["i%d, s%d, o%d" % (i, i, i) for i in range(nd)])
proto.putln("#define %s(type, buf, %s) (type)(%s_imp(buf, %s))" % (name, macroargs, name, macroargs))
funcargs = ", ".join(["Py_ssize_t i%d, Py_ssize_t s%d, Py_ssize_t o%d" % (i, i, i) for i in range(nd)])
proto.putln("static CYTHON_INLINE void* %s_imp(void* buf, %s);" % (name, funcargs))
defin.putln(dedent("""
static CYTHON_INLINE void* %s_imp(void* buf, %s) {
char* ptr = (char*)buf;
""") % (name, funcargs) + "".join([dedent("""\
ptr += s%d * i%d;
if (o%d >= 0) ptr = *((char**)ptr) + o%d;
""") % (i, i, i, i) for i in range(nd)]
) + "\nreturn ptr;\n}")
def buf_lookup_strided_code(proto, defin, name, nd):
"""
Generates a buffer lookup function for the right number
of dimensions. The function gives back a void* at the right location.
"""
# _i_ndex, _s_tride
args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd)])
proto.putln("#define %s(type, buf, %s) (type)((char*)buf + %s)" % (name, args, offset))
def buf_lookup_c_code(proto, defin, name, nd):
"""
Similar to strided lookup, but can assume that the last dimension
doesn't need a multiplication as long as.
Still we keep the same signature for now.
"""
if nd == 1:
proto.putln("#define %s(type, buf, i0, s0) ((type)buf + i0)" % name)
else:
args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd - 1)])
proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, nd - 1))
def buf_lookup_fortran_code(proto, defin, name, nd):
"""
Like C lookup, but the first index is optimized instead.
"""
if nd == 1:
proto.putln("#define %s(type, buf, i0, s0) ((type)buf + i0)" % name)
else:
args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
offset = " + ".join(["i%d * s%d" % (i, i) for i in range(1, nd)])
proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, 0))
def mangle_dtype_name(dtype):
# Use prefixes to separate user defined types from builtins
# (consider "typedef float unsigned_int")
if dtype.is_pyobject:
return "object"
elif dtype.is_ptr:
return "ptr"
else:
if dtype.is_typedef or dtype.is_struct_or_union:
prefix = "nn_"
else:
prefix = ""
return prefix + dtype.specialization_name()
def get_type_information_cname(code, dtype, maxdepth=None):
"""
Output the run-time type information (__Pyx_TypeInfo) for given dtype,
and return the name of the type info struct.
Structs with two floats of the same size are encoded as complex numbers.
One can separate between complex numbers declared as struct or with native
encoding by inspecting to see if the fields field of the type is
filled in.
"""
namesuffix = mangle_dtype_name(dtype)
name = "__Pyx_TypeInfo_%s" % namesuffix
structinfo_name = "__Pyx_StructFields_%s" % namesuffix
if dtype.is_error: return "<error>"
# It's critical that walking the type info doesn't use more stack
# depth than dtype.struct_nesting_depth() returns, so use an assertion for this
if maxdepth is None: maxdepth = dtype.struct_nesting_depth()
if maxdepth <= 0:
assert False
if name not in code.globalstate.utility_codes:
code.globalstate.utility_codes.add(name)
typecode = code.globalstate['typeinfo']
arraysizes = []
if dtype.is_array:
while dtype.is_array:
arraysizes.append(dtype.size)
dtype = dtype.base_type
complex_possible = dtype.is_struct_or_union and dtype.can_be_complex()
declcode = dtype.empty_declaration_code()
if dtype.is_simple_buffer_dtype():
structinfo_name = "NULL"
elif dtype.is_struct:
struct_scope = dtype.scope
if dtype.is_cv_qualified:
struct_scope = struct_scope.base_type_scope
# Must pre-call all used types in order not to recurse during utility code writing.
fields = struct_scope.var_entries
assert len(fields) > 0
types = [get_type_information_cname(code, f.type, maxdepth - 1)
for f in fields]
typecode.putln("static const __Pyx_StructField %s[] = {" % structinfo_name, safe=True)
if dtype.is_cv_qualified:
# roughly speaking, remove "const" from struct_type
struct_type = dtype.cv_base_type.empty_declaration_code()
else:
struct_type = dtype.empty_declaration_code()
for f, typeinfo in zip(fields, types):
typecode.putln(' {&%s, "%s", offsetof(%s, %s)},' %
(typeinfo, f.name, struct_type, f.cname), safe=True)
typecode.putln(' {NULL, NULL, 0}', safe=True)
typecode.putln("};", safe=True)
else:
assert False
rep = str(dtype)
flags = "0"
is_unsigned = "0"
if dtype is PyrexTypes.c_char_type:
is_unsigned = "__PYX_IS_UNSIGNED(%s)" % declcode
typegroup = "'H'"
elif dtype.is_int:
is_unsigned = "__PYX_IS_UNSIGNED(%s)" % declcode
typegroup = "%s ? 'U' : 'I'" % is_unsigned
elif complex_possible or dtype.is_complex:
typegroup = "'C'"
elif dtype.is_float:
typegroup = "'R'"
elif dtype.is_struct:
typegroup = "'S'"
if dtype.packed:
flags = "__PYX_BUF_FLAGS_PACKED_STRUCT"
elif dtype.is_pyobject:
typegroup = "'O'"
else:
assert False, dtype
typeinfo = ('static const __Pyx_TypeInfo %s = '
'{ "%s", %s, sizeof(%s), { %s }, %s, %s, %s, %s };')
tup = (name, rep, structinfo_name, declcode,
', '.join([str(x) for x in arraysizes]) or '0', len(arraysizes),
typegroup, is_unsigned, flags)
typecode.putln(typeinfo % tup, safe=True)
return name
def load_buffer_utility(util_code_name, context=None, **kwargs):
if context is None:
return UtilityCode.load(util_code_name, "Buffer.c", **kwargs)
else:
return TempitaUtilityCode.load(util_code_name, "Buffer.c", context=context, **kwargs)
context = dict(max_dims=Options.buffer_max_dims)
buffer_struct_declare_code = load_buffer_utility("BufferStructDeclare", context=context)
buffer_formats_declare_code = load_buffer_utility("BufferFormatStructs")
# Utility function to set the right exception
# The caller should immediately goto_error
raise_indexerror_code = load_buffer_utility("BufferIndexError")
raise_indexerror_nogil = load_buffer_utility("BufferIndexErrorNogil")
raise_buffer_fallback_code = load_buffer_utility("BufferFallbackError")
acquire_utility_code = load_buffer_utility("BufferGetAndValidate", context=context)
buffer_format_check_code = load_buffer_utility("BufferFormatCheck", context=context)
# See utility code BufferFormatFromTypeInfo
_typeinfo_to_format_code = load_buffer_utility("TypeInfoToFormat")
| BufferEntry |
python | scipy__scipy | scipy/ndimage/tests/test_interpolation.py | {
"start": 661,
"end": 3737
} | class ____:
@make_xp_test_case(ndimage.geometric_transform)
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [1.5, 2.5, 3.5, 4, 4, 4, 4]),
('wrap', [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5]),
('grid-wrap', [1.5, 2.5, 3.5, 2.5, 1.5, 2.5, 3.5]),
('mirror', [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5]),
('reflect', [1.5, 2.5, 3.5, 4, 3.5, 2.5, 1.5]),
('constant', [1.5, 2.5, 3.5, -1, -1, -1, -1]),
('grid-constant', [1.5, 2.5, 3.5, 1.5, -1, -1, -1])]
)
def test_boundaries(self, mode, expected_value, xp):
def shift(x):
return (x[0] + 0.5,)
data = xp.asarray([1, 2, 3, 4.])
xp_assert_equal(
ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
output_shape=(7,), order=1),
xp.asarray(expected_value))
@make_xp_test_case(ndimage.geometric_transform)
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [1, 1, 2, 3]),
('wrap', [3, 1, 2, 3]),
('grid-wrap', [4, 1, 2, 3]),
('mirror', [2, 1, 2, 3]),
('reflect', [1, 1, 2, 3]),
('constant', [-1, 1, 2, 3]),
('grid-constant', [-1, 1, 2, 3])]
)
def test_boundaries2(self, mode, expected_value, xp):
def shift(x):
return (x[0] - 0.9,)
data = xp.asarray([1, 2, 3, 4])
xp_assert_equal(
ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
output_shape=(4,)),
xp.asarray(expected_value))
@make_xp_test_case(ndimage.map_coordinates)
@pytest.mark.parametrize('mode', ['mirror', 'reflect', 'grid-mirror',
'grid-wrap', 'grid-constant',
'nearest'])
@pytest.mark.parametrize('order', range(6))
def test_boundary_spline_accuracy(self, mode, order, xp):
"""Tests based on examples from gh-2640"""
if (is_jax(xp) and
(mode not in ['mirror', 'reflect', 'constant', 'wrap', 'nearest']
or order > 1)
):
pytest.xfail("Jax does not support grid- modes or order > 1")
np_data = np.arange(-6, 7, dtype=np.float64)
data = xp.asarray(np_data)
x = xp.asarray(np.linspace(-8, 15, num=1000))
y = ndimage.map_coordinates(data, x[xp.newaxis, ...], order=order, mode=mode)
# compute expected value using explicit padding via np.pad
npad = 32
pad_mode = ndimage_to_numpy_mode.get(mode)
padded = xp.asarray(np.pad(np_data, npad, mode=pad_mode))
coords = xp.asarray(npad + x)[xp.newaxis, ...]
expected = ndimage.map_coordinates(padded, coords, order=order, mode=mode)
atol = 1e-5 if mode == 'grid-constant' else 1e-12
xp_assert_close(y, expected, rtol=1e-7, atol=atol)
@make_xp_test_case(ndimage.spline_filter)
@pytest.mark.parametrize('order', range(2, 6))
@pytest.mark.parametrize('dtype', types)
| TestBoundaries |
python | kamyu104__LeetCode-Solutions | Python/assign-elements-to-groups-with-constraints.py | {
"start": 121,
"end": 632
} | class ____(object):
def assignElements(self, groups, elements):
"""
:type groups: List[int]
:type elements: List[int]
:rtype: List[int]
"""
mx = max(groups)
lookup = [-1]*mx
for i, x in enumerate(elements):
if x > mx or lookup[x-1] != -1:
continue
for y in xrange(x, mx+1, x):
if lookup[y-1] == -1:
lookup[y-1] = i
return [lookup[x-1] for x in groups]
| Solution |
python | encode__django-rest-framework | tests/generic_relations/models.py | {
"start": 562,
"end": 792
} | class ____(models.Model):
"""
A URL bookmark that may have multiple tags attached.
"""
url = models.URLField()
tags = GenericRelation(Tag)
def __str__(self):
return 'Bookmark: %s' % self.url
| Bookmark |
python | PrefectHQ__prefect | tests/server/models/test_block_schemas.py | {
"start": 34234,
"end": 36458
} | class ____:
async def test_delete_block_schema(self, session, block_schema):
block_schema_id = block_schema.id
assert await models.block_schemas.delete_block_schema(
session=session, block_schema_id=block_schema_id
)
assert not await models.block_schemas.read_block_schema(
session=session, block_schema_id=block_schema_id
)
async def test_delete_block_schema_fails_gracefully(self, session, block_schema):
block_schema_id = block_schema.id
assert await models.block_schemas.delete_block_schema(
session=session, block_schema_id=block_schema_id
)
assert not await models.block_schemas.delete_block_schema(
session=session, block_schema_id=block_schema_id
)
@pytest.fixture
async def block_schemas_with_capabilities(session):
class CanRun(Block):
_block_schema_capabilities = ["run"]
def run(self):
pass
class CanFly(Block):
_block_schema_capabilities = ["fly"]
def fly(self):
pass
class CanSwim(Block):
_block_schema_capabilities = ["swim"]
def swim(self):
pass
class Duck(CanSwim, CanFly, Block):
a: str
class Bird(CanFly, Block):
b: str
class Cat(CanRun, Block):
c: str
block_type_a = await models.block_types.create_block_type(
session=session, block_type=Duck._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session,
block_schema=Duck._to_block_schema(block_type_id=block_type_a.id),
)
block_type_b = await models.block_types.create_block_type(
session=session, block_type=Bird._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session,
block_schema=Bird._to_block_schema(block_type_id=block_type_b.id),
)
block_type_c = await models.block_types.create_block_type(
session=session, block_type=Cat._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session,
block_schema=Cat._to_block_schema(block_type_id=block_type_c.id),
)
| TestDeleteBlockSchema |
python | pytorch__pytorch | torch/_inductor/metrics.py | {
"start": 2790,
"end": 3674
} | class ____:
"""
A helper class to help calculate and apply counter deltas for those
metrics we want to save with cache entries (e.g., FxGraphCache) and
apply on a cache hit.
"""
def __init__(self) -> None:
self.cached_metrics = {}
for metric in get_metric_fields():
self.cached_metrics[metric] = globals()[metric]
def get_deltas(self) -> CachedMetricsDeltas:
delta_metrics = {}
for metric in get_metric_fields():
delta_metrics[metric] = globals()[metric] - self.cached_metrics[metric]
return CachedMetricsDeltas(**delta_metrics)
@staticmethod
def apply_deltas(delta: CachedMetricsDeltas) -> None:
for metric in get_metric_fields():
globals()[metric] += getattr(delta, metric)
REGISTERED_METRIC_TABLES: dict[str, MetricTable] = {}
@dataclass
| CachedMetricsHelper |
python | kamyu104__LeetCode-Solutions | Python/count-number-of-trapezoids-i.py | {
"start": 78,
"end": 548
} | class ____(object):
def countTrapezoids(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
cnt = collections.defaultdict(int)
for _, y in points:
cnt[y] += 1
result = total = 0
for c in cnt.itervalues():
curr = (c*(c-1)//2)%MOD
result = (result+(total*curr)%MOD)%MOD
total = (total+curr)%MOD
return result
| Solution |
python | skorch-dev__skorch | skorch/tests/test_history.py | {
"start": 251,
"end": 14041
} | class ____:
test_epochs = 3
test_batches = 4
@pytest.fixture(scope='class', params=['single', 'distributed'])
def history_cls(self, request):
# run tests once with default History, once with DistributedHistory
from skorch.history import DistributedHistory, History
from skorch._version import Version
if request.param == 'single':
return History
if request.param == 'distributed':
store = TCPStore("127.0.0.1", port=1234, world_size=1, is_master=True)
return partial(
DistributedHistory, store=store, rank=0, world_size=1
)
raise ValueError("Incorrect pytest request parameter '{request.param}'")
@pytest.fixture
def history(self, history_cls):
"""Return a history filled with epoch and batch data."""
h = history_cls()
for num_epoch in range(self.test_epochs):
h.new_epoch()
h.record('duration', 1)
h.record('total_loss', num_epoch + self.test_batches)
if num_epoch == 2:
h.record('extra', 42)
for num_batch in range(self.test_batches):
h.new_batch()
h.record_batch('loss', num_epoch + num_batch)
if num_batch % 2 == 0 and (num_epoch + 1) != self.test_epochs:
h.record_batch('extra_batch', 23)
return h
@pytest.fixture
def ref(self, history):
return history.to_list()
def test_list_initialization(self, history_cls):
h = history_cls([1, 2, 3])
assert len(h) == 3
def test_history_length(self, history):
assert len(history) == self.test_epochs
# we expect to have the extracted batches for each epoch
assert len(history[:, 'batches']) == self.test_epochs
def test_history_epoch_column(self, history, ref):
total_losses = history[:, 'total_loss']
total_losses_ref = [n['total_loss'] for n in ref]
assert total_losses == total_losses_ref
def test_history_epoch_two_columns(self, history, ref):
duration_with_losses = history[:, ('total_loss', 'duration')]
total_losses_ref = [n['total_loss'] for n in ref]
durations_ref = [n['duration'] for n in ref]
expected = list(zip(total_losses_ref, durations_ref))
assert duration_with_losses == expected
def test_history_epoch_two_columns_different_order(self, history, ref):
duration_with_losses = history[:, ('duration', 'total_loss')]
total_losses_ref = [n['total_loss'] for n in ref]
durations_ref = [n['duration'] for n in ref]
expected = list(zip(durations_ref, total_losses_ref))
assert duration_with_losses == expected
def test_history_partial_index(self, history, ref):
extra = history[:, 'extra']
assert len(extra) == 1
# we retrieve 'extra' from a slice, therefore we expect a list as result
assert extra == [ref[2]['extra']]
def test_history_partial_and_full_index(self, history, ref):
total_loss_with_extra = history[:, ('total_loss', 'extra')]
assert len(total_loss_with_extra) == 1
assert total_loss_with_extra[0][0] == ref[2]['total_loss']
assert total_loss_with_extra[0][1] == ref[2]['extra']
def test_history_partial_join_list(self, history, ref):
total = history[:, ['total_loss', 'extra', 'batches']]
# there's only one epoch with the 'extra' key.
assert len(total) == 1
assert total[0][0] == ref[2]['total_loss']
assert total[0][1] == ref[2]['extra']
assert total[0][2] == ref[2]['batches']
def test_history_retrieve_single_value(self, history, ref):
total_loss_0 = history[0, 'total_loss']
assert total_loss_0 == ref[0]['total_loss']
def test_history_retrieve_multiple_values(self, history, ref):
total_loss_0_to_1 = history[0:1, 'total_loss']
assert total_loss_0_to_1 == [n['total_loss'] for n in ref[0:1]]
def test_history_non_existing_values(self, history):
with pytest.raises(KeyError):
# pylint: disable=pointless-statement
history[:, 'non-existing']
with pytest.raises(KeyError):
# pylint: disable=pointless-statement
history[0, 'extra']
def test_history_non_existing_values_batch(self, history):
with pytest.raises(KeyError):
# pylint: disable=pointless-statement
history[:, 'batches', :, 'non-existing']
with pytest.raises(KeyError):
# pylint: disable=pointless-statement
history[:, 'batches', 1, 'extra_batch']
def test_history_mixed_slicing(self, history, ref):
losses = history[:, 'batches', 0, 'loss']
assert len(losses) == self.test_epochs
assert losses == [epoch['batches'][0]['loss'] for epoch in ref]
losses = history[0, 'batches', :, 'loss']
assert losses == [batch['loss'] for batch in ref[0]['batches']]
def test_history_partial_and_full_index_batches(self, history, ref):
loss_with_extra = history[:, 'batches', :, ('loss', 'extra_batch')]
expected_e0 = [(b['loss'], b['extra_batch']) for b in ref[0]['batches']
if 'extra_batch' in b]
expected_e1 = [(b['loss'], b['extra_batch']) for b in ref[1]['batches']
if 'extra_batch' in b]
assert len(loss_with_extra) == self.test_epochs - 1
assert loss_with_extra[0] == expected_e0
assert loss_with_extra[1] == expected_e1
def test_history_partial_batches_batch_key_3rd(self, history, ref):
extra_batches = history[:, 'batches', :, 'extra_batch']
expected_e0 = [b['extra_batch'] for b in ref[0]['batches']
if 'extra_batch' in b]
expected_e1 = [b['extra_batch'] for b in ref[1]['batches']
if 'extra_batch' in b]
# In every epoch there are 2 batches with the 'extra_batch'
# key except for the last epoch. We therefore two results
# of which one of them is an empty list.
assert len(extra_batches) == self.test_epochs - 1
assert extra_batches[0] == expected_e0
assert extra_batches[1] == expected_e1
def test_history_partial_batches_batch_key_4th(self, history, ref):
extra_batches = history[:, 'batches', :, 'extra_batch']
expected_e0 = [b['extra_batch'] for b in ref[0]['batches']
if 'extra_batch' in b]
expected_e1 = [b['extra_batch'] for b in ref[1]['batches']
if 'extra_batch' in b]
# In every epoch there are 2 batches with the 'extra_batch'
# key except for the last epoch. We therefore two results
# of which one of them is an empty list.
assert len(extra_batches) == self.test_epochs - 1
assert extra_batches[0] == expected_e0
assert extra_batches[1] == expected_e1
def test_history_partial_singular_values(self, history):
values = history[-1, ('duration', 'total_loss')]
expected = (history[-1]['duration'], history[-1]['total_loss'])
# pylint: disable=unidiomatic-typecheck
assert type(values) == tuple
assert values == expected
def test_history_slice_beyond_batches_but_key_not_batches(self, history):
with pytest.raises(KeyError) as exc:
# pylint: disable=pointless-statement
history[:, 'not-batches', 0]
msg = exc.value.args[0]
expected = ("History indexing beyond the 2nd level is "
"only possible if key 'batches' is used, "
"found key 'not-batches'.")
assert msg == expected
def test_history_with_invalid_epoch_key(self, history):
key = slice(None), 'not-batches'
with pytest.raises(KeyError) as exc:
# pylint: disable=pointless-statement
history[key]
msg = exc.value.args[0]
expected = "Key 'not-batches' was not found in history."
assert msg == expected
def test_history_too_many_indices(self, history):
with pytest.raises(KeyError) as exc:
# pylint: disable=pointless-statement
history[:, 'batches', :, 'train_loss', :]
msg = exc.value.args[0]
expected = ("Tried to index history with 5 indices but only "
"4 indices are possible.")
assert msg == expected
def test_history_save_load_cycle_file_obj(self, history_cls, history, tmpdir):
if hasattr(history, 'store'):
# DistributedHistory does not support loading from file
pytest.skip()
history_f = tmpdir.mkdir('skorch').join('history.json')
with open(str(history_f), 'w') as f:
history.to_file(f)
with open(str(history_f), 'r') as f:
new_history = history_cls.from_file(f)
assert history == new_history
def test_history_save_load_cycle_file_path(self, history_cls, history, tmpdir):
if hasattr(history, 'store'):
# DistributedHistory does not support loading from file
pytest.skip()
history_f = tmpdir.mkdir('skorch').join('history.json')
history.to_file(str(history_f))
new_history = history_cls.from_file(str(history_f))
assert history == new_history
@pytest.mark.parametrize('type_', [list, tuple])
def test_history_multiple_keys(self, history, type_):
dur_loss = history[-1, type_(['duration', 'total_loss'])]
# pylint: disable=unidiomatic-typecheck
assert type(dur_loss) is type_ and len(dur_loss) == 2
loss_loss = history[-1, 'batches', -1, type_(['loss', 'loss'])]
# pylint: disable=unidiomatic-typecheck
assert type(loss_loss) is type_ and len(loss_loss) == 2
def test_history_key_in_other_epoch(self, history_cls):
h = history_cls()
for has_valid in (True, False):
h.new_epoch()
h.new_batch()
h.record_batch('train_loss', 1)
if has_valid:
h.new_batch()
h.record_batch('valid_loss', 2)
with pytest.raises(KeyError):
# pylint: disable=pointless-statement
h[-1, 'batches', -1, 'valid_loss']
def test_history_no_epochs_index(self, history_cls):
h = history_cls()
with pytest.raises(IndexError):
# pylint: disable=pointless-statement
h[-1, 'batches']
def test_history_jagged_batches(self, history_cls):
h = history_cls()
for num_batch in (1, 2):
h.new_epoch()
for _ in range(num_batch):
h.new_batch()
# Make sure we can access this batch
assert h[-1, 'batches', 1] == {}
@pytest.mark.parametrize('value, check_warn', [
([], False),
(np.array([]), True),
])
def test_history_retrieve_empty_list(self, value, history_cls, check_warn, recwarn):
h = history_cls()
if hasattr(h, 'store') and isinstance(value, np.ndarray):
# DistributedHistory does not support numpy arrays because they
# cannot be json serialized
pytest.skip()
h.new_epoch()
h.record('foo', value)
h.new_batch()
h.record_batch('batch_foo', value)
# Make sure we can access our object
out = h[-1, 'foo']
assert (out is value) or (out == value)
out = h[-1, 'batches', -1, 'batch_foo']
assert (out is value) or (out == value)
# There should be no warning about comparison to an empty ndarray
if check_warn:
assert not recwarn.list
@pytest.mark.parametrize('has_epoch, epoch_slice', [
(False, slice(None)),
(True, slice(1, None)),
])
def test_history_no_epochs_key(self, has_epoch, epoch_slice, history_cls):
h = history_cls()
if has_epoch:
h.new_epoch()
# Expect KeyError since the key was not found in any epochs
with pytest.raises(KeyError):
# pylint: disable=pointless-statement
h[epoch_slice, 'foo']
with pytest.raises(KeyError):
# pylint: disable=pointless-statement
h[epoch_slice, ['foo', 'bar']]
@pytest.mark.parametrize('has_batch, batch_slice', [
(False, slice(None)),
(True, slice(1, None)),
])
def test_history_no_batches_key(self, has_batch, batch_slice, history_cls):
h = history_cls()
h.new_epoch()
if has_batch:
h.new_batch()
# Expect KeyError since the key was not found in any batches
with pytest.raises(KeyError):
# pylint: disable=pointless-statement
h[-1, 'batches', batch_slice, 'foo']
with pytest.raises(KeyError):
# pylint: disable=pointless-statement
h[-1, 'batches', batch_slice, ['foo', 'bar']]
@pytest.mark.parametrize('has_epoch, epoch_slice', [
(False, slice(None)),
(True, slice(1, None)),
])
def test_history_no_epochs_batches(self, has_epoch, epoch_slice, history_cls):
h = history_cls()
if has_epoch:
h.new_epoch()
# Expect a list of zero epochs since 'batches' always exists
assert h[epoch_slice, 'batches'] == []
assert h[epoch_slice, 'batches', -1] == []
def test_pickle(self, history):
loaded = pickle.loads(pickle.dumps(history))
# The store cannot be pickled, so it is set to None
if not hasattr(history, 'store'):
assert history.to_list() == loaded.to_list()
else:
assert loaded.store is None
| TestHistory |
python | chroma-core__chroma | sample_apps/generative_benchmarking/functions/types.py | {
"start": 725,
"end": 791
} | class ____:
results: Dict[str, Dict[str, float]]
| ResultMetricsDict |
python | PrefectHQ__prefect | tests/server/models/test_block_types.py | {
"start": 2636,
"end": 8885
} | class ____:
@pytest.fixture
async def block_types_with_associated_capabilities(self, session):
class CanRun(Block):
_block_schema_capabilities = ["run"]
def run(self):
pass
class CanFly(Block):
_block_schema_capabilities = ["fly"]
def fly(self):
pass
class CanSwim(Block):
_block_schema_capabilities = ["swim"]
def swim(self):
pass
class Duck(CanSwim, CanFly, Block):
a: str
class Bird(CanFly, Block):
b: str
class Cat(CanRun, Block):
c: str
block_type_duck = await models.block_types.create_block_type(
session=session, block_type=Duck._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session,
block_schema=Duck._to_block_schema(block_type_id=block_type_duck.id),
)
block_type_bird = await models.block_types.create_block_type(
session=session, block_type=Bird._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session,
block_schema=Bird._to_block_schema(block_type_id=block_type_bird.id),
)
block_type_cat = await models.block_types.create_block_type(
session=session, block_type=Cat._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session,
block_schema=Cat._to_block_schema(block_type_id=block_type_cat.id),
)
await session.commit()
return block_type_duck, block_type_bird, block_type_cat
async def test_read_block_type_by_id(self, session, block_type_x):
db_block_type = await models.block_types.read_block_type(
session=session, block_type_id=block_type_x.id
)
assert db_block_type.name == block_type_x.name
async def test_read_block_type_by_name(self, session, block_type_x):
db_block_type = await models.block_types.read_block_type_by_slug(
session=session, block_type_slug=block_type_x.slug
)
assert db_block_type.id == block_type_x.id
async def test_read_nonexistant_block_type(self, session):
assert not await models.block_types.read_block_type(
session=session, block_type_id=uuid4()
)
async def test_read_nonexistant_block_type_by_name(self, session):
assert not await models.block_types.read_block_type_by_slug(
session=session, block_type_slug="huh?"
)
async def test_read_all_block_types(
self, session, block_type_x, block_type_y, block_type_z
):
block_types = await models.block_types.read_block_types(session=session)
assert {block_type.id for block_type in block_types} == {
block_type.id for block_type in [block_type_x, block_type_y, block_type_z]
}
assert [block_type.id for block_type in block_types] == [
block_type_x.id,
block_type_y.id,
block_type_z.id,
]
async def test_read_block_types_with_limit_and_offset(
self, session, block_type_x, block_type_y, block_type_z
):
block_types = await models.block_types.read_block_types(
session=session, limit=1
)
assert len(block_types) == 1
assert [block_type.id for block_type in block_types] == [block_type_x.id]
block_types = await models.block_types.read_block_types(
session=session, limit=2, offset=1
)
assert len(block_types) == 2
assert [block_type.id for block_type in block_types] == [
block_type_y.id,
block_type_z.id,
]
block_types = await models.block_types.read_block_types(
session=session, offset=3
)
assert len(block_types) == 0
async def test_read_block_types_filter_by_name(
self, session, block_types_with_associated_capabilities
):
block_types = await models.block_types.read_block_types(
session=session, block_type_filter=BlockTypeFilter(name=dict(like_="duck"))
)
assert len(block_types) == 1
assert block_types == [block_types_with_associated_capabilities[0]]
block_types = await models.block_types.read_block_types(
session=session, block_type_filter=BlockTypeFilter(name=dict(like_="c"))
)
assert len(block_types) == 2
assert block_types == [
block_types_with_associated_capabilities[2],
block_types_with_associated_capabilities[0],
]
block_types = await models.block_types.read_block_types(
session=session, block_type_filter=BlockTypeFilter(name=dict(like_="z"))
)
assert len(block_types) == 0
async def test_read_block_types_filter_by_associated_capability(
self, session, block_types_with_associated_capabilities
):
fly_and_swim_block_types = await models.block_types.read_block_types(
session=session,
block_schema_filter=BlockSchemaFilter(
block_capabilities=dict(all_=["fly", "swim"])
),
)
assert len(fly_and_swim_block_types) == 1
assert fly_and_swim_block_types == [block_types_with_associated_capabilities[0]]
fly_block_types = await models.block_types.read_block_types(
session=session,
block_schema_filter=BlockSchemaFilter(
block_capabilities=dict(all_=["fly"])
),
)
assert len(fly_block_types) == 2
assert fly_block_types == [
block_types_with_associated_capabilities[1],
block_types_with_associated_capabilities[0],
]
swim_block_types = await models.block_types.read_block_types(
session=session,
block_schema_filter=BlockSchemaFilter(
block_capabilities=dict(all_=["swim"])
),
)
assert len(swim_block_types) == 1
assert swim_block_types == [block_types_with_associated_capabilities[0]]
| TestReadBlockTypes |
python | getsentry__sentry | tests/sentry/web/frontend/test_cli.py | {
"start": 150,
"end": 760
} | class ____(TestCase):
def test_cli(self) -> None:
resp = self.client.get(reverse("get_cli_script"))
assert b"https://release-registry.services.sentry.io/apps/sentry-cli" in resp.content
def test_valid_platform_arch(self) -> None:
resp = self.client.get(reverse("get_cli_download_url", args=("Linux", "x86_64")))
assert resp.status_code == 302
assert (
resp["Location"]
== "https://release-registry.services.sentry.io/apps/sentry-cli/latest?response=download&arch=x86_64&platform=Linux&package=sentry-cli"
)
| GetCliDownloadUrlTestCase |
python | getsentry__sentry | src/sentry/integrations/jira_server/integration.py | {
"start": 4560,
"end": 4667
} | class ____(TypedDict):
emptyMessage: str
noResultsMessage: str
items: list[_Project]
| _AddDropDown |
python | airbytehq__airbyte | airbyte-ci/connectors/live-tests/src/live_tests/commons/connector_runner.py | {
"start": 413,
"end": 9469
} | class ____:
DATA_DIR = "/airbyte/data"
IN_CONTAINER_CONFIG_PATH = f"{DATA_DIR}/config.json"
IN_CONTAINER_CONFIGURED_CATALOG_PATH = f"{DATA_DIR}/catalog.json"
IN_CONTAINER_STATE_PATH = f"{DATA_DIR}/state.json"
IN_CONTAINER_OUTPUT_PATH = f"{DATA_DIR}/output.txt"
IN_CONTAINER_OBFUSCATOR_PATH = "/user/local/bin/record_obfuscator.py"
def __init__(
self,
dagger_client: dagger.Client,
execution_inputs: ExecutionInputs,
is_airbyte_ci: bool,
http_proxy: Proxy | None = None,
):
self.connector_under_test = execution_inputs.connector_under_test
self.command = execution_inputs.command
self.output_dir = execution_inputs.output_dir
self.config = execution_inputs.config
self.configured_catalog = execution_inputs.configured_catalog
self.state = execution_inputs.state
self.duckdb_path = execution_inputs.duckdb_path
self.actor_id = execution_inputs.actor_id
self.hashed_connection_id = execution_inputs.hashed_connection_id
self.environment_variables = execution_inputs.environment_variables if execution_inputs.environment_variables else {}
self.full_command: list[str] = self._get_full_command(execution_inputs.command)
self.completion_event = anyio.Event()
self.http_proxy = http_proxy
self.logger = logging.getLogger(f"{self.connector_under_test.name}-{self.connector_under_test.version}")
self.dagger_client = dagger_client
if is_airbyte_ci:
self.host_obfuscator_path = "/tmp/record_obfuscator.py"
else:
repo_root = Path(subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).strip().decode())
self.host_obfuscator_path = f"{repo_root}/tools/bin/record_obfuscator.py"
@property
def _connector_under_test_container(self) -> dagger.Container:
return self.connector_under_test.container
@property
def stdout_file_path(self) -> Path:
return (self.output_dir / "stdout.log").resolve()
@property
def stderr_file_path(self) -> Path:
return (self.output_dir / "stderr.log").resolve()
def _get_full_command(self, command: Command) -> list[str]:
"""Returns a list with a full Airbyte command invocation and all it's arguments and options."""
if command is Command.SPEC:
return ["spec"]
elif command is Command.CHECK:
return ["check", "--config", self.IN_CONTAINER_CONFIG_PATH]
elif command is Command.DISCOVER:
return ["discover", "--config", self.IN_CONTAINER_CONFIG_PATH]
elif command is Command.READ:
return [
"read",
"--config",
self.IN_CONTAINER_CONFIG_PATH,
"--catalog",
self.IN_CONTAINER_CONFIGURED_CATALOG_PATH,
]
elif command is Command.READ_WITH_STATE:
return [
"read",
"--config",
self.IN_CONTAINER_CONFIG_PATH,
"--catalog",
self.IN_CONTAINER_CONFIGURED_CATALOG_PATH,
"--state",
self.IN_CONTAINER_STATE_PATH,
]
else:
raise NotImplementedError(f"The connector runner does not support the {command} command")
async def get_container_env_variable_value(self, name: str) -> str | None:
return await self._connector_under_test_container.env_variable(name)
async def get_container_label(self, label: str) -> str | None:
return await self._connector_under_test_container.label(label)
async def get_container_entrypoint(self) -> str:
entrypoint = await self._connector_under_test_container.entrypoint()
assert entrypoint, "The connector container has no entrypoint"
return " ".join(entrypoint)
async def run(self) -> ExecutionResult:
async with asyncer.create_task_group() as task_group:
soon_result = task_group.soonify(self._run)()
task_group.soonify(self._log_progress)()
return soon_result.value
async def _run(
self,
) -> ExecutionResult:
container = self._connector_under_test_container
current_user = (await container.with_exec(["whoami"]).stdout()).strip()
container = container.with_user(current_user)
container = container.with_exec(["mkdir", "-p", self.DATA_DIR])
# Do not cache downstream dagger layers
container = container.with_env_variable("CACHEBUSTER", str(uuid.uuid4()))
# When running locally, it's likely that record_obfuscator is within the user's home directory, so we expand it.
expanded_host_executable_path = os.path.expanduser(self.host_obfuscator_path)
container = container.with_file(
self.IN_CONTAINER_OBFUSCATOR_PATH,
self.dagger_client.host().file(expanded_host_executable_path),
)
for env_var_name, env_var_value in self.environment_variables.items():
container = container.with_env_variable(env_var_name, env_var_value)
if self.config:
container = container.with_new_file(self.IN_CONTAINER_CONFIG_PATH, contents=json.dumps(dict(self.config)), owner=current_user)
if self.state:
container = container.with_new_file(self.IN_CONTAINER_STATE_PATH, contents=json.dumps(self.state), owner=current_user)
if self.configured_catalog:
container = container.with_new_file(
self.IN_CONTAINER_CONFIGURED_CATALOG_PATH,
contents=self.configured_catalog.json(),
owner=current_user,
)
if self.http_proxy:
container = await self.http_proxy.bind_container(container)
self.logger.info(f"⏳ Start running {self.command.value} command")
try:
entrypoint = await container.entrypoint()
assert entrypoint, "The connector container has no entrypoint"
airbyte_command = entrypoint + self.full_command
container = container.with_exec(
[
"sh",
"-c",
" ".join(airbyte_command)
+ f"| {self.IN_CONTAINER_OBFUSCATOR_PATH} > {self.IN_CONTAINER_OUTPUT_PATH} 2>&1 | tee -a {self.IN_CONTAINER_OUTPUT_PATH}",
]
)
executed_container = await container.sync()
# We exporting to disk as we can't read .stdout() or await file.contents() as it might blow up the memory
stdout_exported = await executed_container.file(self.IN_CONTAINER_OUTPUT_PATH).export(str(self.stdout_file_path))
if not stdout_exported:
raise errors.ExportError(f"Failed to export {self.IN_CONTAINER_OUTPUT_PATH}")
stderr = await executed_container.stderr()
self.stderr_file_path.write_text(stderr)
success = True
except dagger.ExecError as e:
self.stderr_file_path.write_text(e.stderr)
self.stdout_file_path.write_text(e.stdout)
executed_container = None
success = False
self.completion_event.set()
if not success:
self.logger.error(f"❌ Failed to run {self.command.value} command")
else:
self.logger.info(f"⌛ Finished running {self.command.value} command")
execution_result = await ExecutionResult.load(
command=self.command,
connector_under_test=self.connector_under_test,
actor_id=self.actor_id,
hashed_connection_id=self.hashed_connection_id,
configured_catalog=self.configured_catalog,
stdout_file_path=self.stdout_file_path,
stderr_file_path=self.stderr_file_path,
success=success,
http_dump=await self.http_proxy.retrieve_http_dump() if self.http_proxy else None,
executed_container=executed_container,
config=self.config,
)
await execution_result.save_artifacts(self.output_dir, self.duckdb_path)
return execution_result
async def _log_progress(self) -> None:
start_time = datetime.datetime.utcnow()
message = f"⏳ Still running {self.command.value} command"
while not self.completion_event.is_set():
duration = datetime.datetime.utcnow() - start_time
elapsed_seconds = duration.total_seconds()
if elapsed_seconds > 10 and round(elapsed_seconds) % 10 == 0:
self.logger.info(f"{message} (duration: {self.format_duration(duration)})")
await anyio.sleep(1)
@staticmethod
def format_duration(time_delta: datetime.timedelta) -> str:
total_seconds = time_delta.total_seconds()
if total_seconds < 60:
return f"{total_seconds:.2f}s"
minutes = int(total_seconds // 60)
seconds = int(total_seconds % 60)
return f"{minutes:02d}mn{seconds:02d}s"
| ConnectorRunner |
python | kubernetes-client__python | kubernetes/client/models/v1_typed_local_object_reference.py | {
"start": 383,
"end": 5769
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_group': 'str',
'kind': 'str',
'name': 'str'
}
attribute_map = {
'api_group': 'apiGroup',
'kind': 'kind',
'name': 'name'
}
def __init__(self, api_group=None, kind=None, name=None, local_vars_configuration=None): # noqa: E501
"""V1TypedLocalObjectReference - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_group = None
self._kind = None
self._name = None
self.discriminator = None
if api_group is not None:
self.api_group = api_group
self.kind = kind
self.name = name
@property
def api_group(self):
"""Gets the api_group of this V1TypedLocalObjectReference. # noqa: E501
APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. # noqa: E501
:return: The api_group of this V1TypedLocalObjectReference. # noqa: E501
:rtype: str
"""
return self._api_group
@api_group.setter
def api_group(self, api_group):
"""Sets the api_group of this V1TypedLocalObjectReference.
APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. # noqa: E501
:param api_group: The api_group of this V1TypedLocalObjectReference. # noqa: E501
:type: str
"""
self._api_group = api_group
@property
def kind(self):
"""Gets the kind of this V1TypedLocalObjectReference. # noqa: E501
Kind is the type of resource being referenced # noqa: E501
:return: The kind of this V1TypedLocalObjectReference. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1TypedLocalObjectReference.
Kind is the type of resource being referenced # noqa: E501
:param kind: The kind of this V1TypedLocalObjectReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
self._kind = kind
@property
def name(self):
"""Gets the name of this V1TypedLocalObjectReference. # noqa: E501
Name is the name of resource being referenced # noqa: E501
:return: The name of this V1TypedLocalObjectReference. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1TypedLocalObjectReference.
Name is the name of resource being referenced # noqa: E501
:param name: The name of this V1TypedLocalObjectReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1TypedLocalObjectReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1TypedLocalObjectReference):
return True
return self.to_dict() != other.to_dict()
| V1TypedLocalObjectReference |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 342606,
"end": 351216
} | class ____:
""" Tests for wasserstein_distance_nd() output values.
"""
def test_published_values(self):
# Compare against published values and manually computed results.
# The values and computed result are posted at James D. McCaffrey's blog,
# https://jamesmccaffrey.wordpress.com/2018/03/05/earth-mover-distance
# -wasserstein-metric-example-calculation/
u = [(1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,1),
(4,2), (6,1), (6,1)]
v = [(2,1), (2,1), (3,2), (3,2), (3,2), (5,1), (5,1), (5,1), (5,1), (5,1),
(5,1), (5,1), (7,1)]
res = stats.wasserstein_distance_nd(u, v)
# In original post, the author kept two decimal places for ease of calculation.
# This test uses the more precise value of distance to get the precise results.
# For comparison, please see the table and figure in the original blog post.
flow = np.array([2., 3., 5., 1., 1., 1.])
dist = np.array([1.00, 5**0.5, 4.00, 2**0.5, 1.00, 1.00])
ref = np.sum(flow * dist)/np.sum(flow)
assert_allclose(res, ref)
@pytest.mark.parametrize('n_value', (4, 15, 35))
@pytest.mark.parametrize('ndim', (3, 4, 7))
@pytest.mark.parametrize('max_repeats', (5, 10))
def test_same_distribution_nD(self, ndim, n_value, max_repeats):
# Any distribution moved to itself should have a Wasserstein distance
# of zero.
rng = np.random.default_rng(363836384995579937222333)
repeats = rng.integers(1, max_repeats, size=n_value, dtype=int)
u_values = rng.random(size=(n_value, ndim))
v_values = np.repeat(u_values, repeats, axis=0)
v_weights = rng.random(np.sum(repeats))
range_repeat = np.repeat(np.arange(len(repeats)), repeats)
u_weights = np.bincount(range_repeat, weights=v_weights)
index = rng.permutation(len(v_weights))
v_values, v_weights = v_values[index], v_weights[index]
res = stats.wasserstein_distance_nd(u_values, v_values, u_weights, v_weights)
assert_allclose(res, 0, atol=1e-15)
@pytest.mark.parametrize('nu', (8, 9, 38))
@pytest.mark.parametrize('nv', (8, 12, 17))
@pytest.mark.parametrize('ndim', (3, 5, 23))
def test_collapse_nD(self, nu, nv, ndim):
# test collapse for n dimensional values
# Collapsing a n-D distribution to a point distribution at zero
# is equivalent to taking the average of the norm of data.
rng = np.random.default_rng(38573488467338826109)
u_values = rng.random(size=(nu, ndim))
v_values = np.zeros((nv, ndim))
u_weights = rng.random(size=nu)
v_weights = rng.random(size=nv)
ref = np.average(np.linalg.norm(u_values, axis=1), weights=u_weights)
res = stats.wasserstein_distance_nd(u_values, v_values, u_weights, v_weights)
assert_allclose(res, ref)
@pytest.mark.parametrize('nu', (8, 16, 32))
@pytest.mark.parametrize('nv', (8, 16, 32))
@pytest.mark.parametrize('ndim', (1, 2, 6))
def test_zero_weight_nD(self, nu, nv, ndim):
# Values with zero weight have no impact on the Wasserstein distance.
rng = np.random.default_rng(38573488467338826109)
u_values = rng.random(size=(nu, ndim))
v_values = rng.random(size=(nv, ndim))
u_weights = rng.random(size=nu)
v_weights = rng.random(size=nv)
ref = stats.wasserstein_distance_nd(u_values, v_values, u_weights, v_weights)
add_row, nrows = rng.integers(0, nu, size=2)
add_value = rng.random(size=(nrows, ndim))
u_values = np.insert(u_values, add_row, add_value, axis=0)
u_weights = np.insert(u_weights, add_row, np.zeros(nrows), axis=0)
res = stats.wasserstein_distance_nd(u_values, v_values, u_weights, v_weights)
assert_allclose(res, ref)
def test_inf_values(self):
# Inf values can lead to an inf distance or trigger a RuntimeWarning
# (and return NaN) if the distance is undefined.
uv, vv, uw = [[1, 1], [2, 1]], [[np.inf, -np.inf]], [1, 1]
distance = stats.wasserstein_distance_nd(uv, vv, uw)
assert_equal(distance, np.inf)
with np.errstate(invalid='ignore'):
uv, vv = [[np.inf, np.inf]], [[np.inf, -np.inf]]
distance = stats.wasserstein_distance_nd(uv, vv)
assert_equal(distance, np.nan)
@pytest.mark.parametrize('nu', (10, 15, 20))
@pytest.mark.parametrize('nv', (10, 15, 20))
@pytest.mark.parametrize('ndim', (1, 3, 5))
def test_multi_dim_nD(self, nu, nv, ndim):
# Adding dimension on distributions do not affect the result
rng = np.random.default_rng(2736495738494849509)
u_values = rng.random(size=(nu, ndim))
v_values = rng.random(size=(nv, ndim))
u_weights = rng.random(size=nu)
v_weights = rng.random(size=nv)
ref = stats.wasserstein_distance_nd(u_values, v_values, u_weights, v_weights)
add_dim = rng.integers(0, ndim)
add_value = rng.random()
u_values = np.insert(u_values, add_dim, add_value, axis=1)
v_values = np.insert(v_values, add_dim, add_value, axis=1)
res = stats.wasserstein_distance_nd(u_values, v_values, u_weights, v_weights)
assert_allclose(res, ref)
@pytest.mark.parametrize('nu', (7, 13, 19))
@pytest.mark.parametrize('nv', (7, 13, 19))
@pytest.mark.parametrize('ndim', (2, 4, 7))
def test_orthogonal_nD(self, nu, nv, ndim):
# orthogonal transformations do not affect the result of the
# wasserstein_distance
rng = np.random.default_rng(34746837464536)
u_values = rng.random(size=(nu, ndim))
v_values = rng.random(size=(nv, ndim))
u_weights = rng.random(size=nu)
v_weights = rng.random(size=nv)
ref = stats.wasserstein_distance_nd(u_values, v_values, u_weights, v_weights)
dist = stats.ortho_group(ndim)
transform = dist.rvs(random_state=rng)
shift = rng.random(size=ndim)
res = stats.wasserstein_distance_nd(u_values @ transform + shift,
v_values @ transform + shift,
u_weights, v_weights)
assert_allclose(res, ref)
def test_error_code(self):
rng = np.random.default_rng(52473644737485644836320101)
with pytest.raises(ValueError, match='Invalid input values. The inputs'):
u_values = rng.random(size=(4, 10, 15))
v_values = rng.random(size=(6, 2, 7))
_ = stats.wasserstein_distance_nd(u_values, v_values)
with pytest.raises(ValueError, match='Invalid input values. Dimensions'):
u_values = rng.random(size=(15,))
v_values = rng.random(size=(3, 15))
_ = stats.wasserstein_distance_nd(u_values, v_values)
with pytest.raises(ValueError,
match='Invalid input values. If two-dimensional'):
u_values = rng.random(size=(2, 10))
v_values = rng.random(size=(2, 2))
_ = stats.wasserstein_distance_nd(u_values, v_values)
@pytest.mark.parametrize('u_size', [1, 10, 50])
@pytest.mark.parametrize('v_size', [1, 10, 50])
def test_optimization_vs_analytical(self, u_size, v_size):
rng = np.random.default_rng(45634745675)
# Test when u_weights = None, v_weights = None
u_values = rng.random(size=(u_size, 1))
v_values = rng.random(size=(v_size, 1))
u_values_flat = u_values.ravel()
v_values_flat = v_values.ravel()
# These three calculations are done using different backends
# but they must be equal
d1 = stats.wasserstein_distance(u_values_flat, v_values_flat)
d2 = stats.wasserstein_distance_nd(u_values, v_values)
d3 = stats.wasserstein_distance_nd(u_values_flat, v_values_flat)
assert_allclose(d2, d1)
assert_allclose(d3, d1)
# Test with u_weights and v_weights specified.
u_weights = rng.random(size=u_size)
v_weights = rng.random(size=v_size)
d1 = stats.wasserstein_distance(u_values_flat, v_values_flat,
u_weights, v_weights)
d2 = stats.wasserstein_distance_nd(u_values, v_values,
u_weights, v_weights)
d3 = stats.wasserstein_distance_nd(u_values_flat, v_values_flat,
u_weights, v_weights)
assert_allclose(d2, d1)
assert_allclose(d3, d1)
| TestWassersteinDistanceND |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.